Quantenna sdk-v37.4.1.89: Files from outside the Linux tree

mkdir drivers/qtn
cp -pr
/usr/local/hdd1/home/danielmentz/quantenna-sdk-v37.4.1.89.pristine/drivers/*
drivers/qtn

mkdir arch/arc/plat-qtn/sdk-qsr1000
cp -pr
/usr/local/hdd1/home/danielmentz/quantenna-sdk-v37.4.1.89.pristine/common/
arch/arc/plat-qtn/sdk-qsr1000
cp -pr
/usr/local/hdd1/home/danielmentz/quantenna-sdk-v37.4.1.89.pristine/include/
arch/arc/plat-qtn/sdk-qsr1000

rm arch/arc/include/asm/board-ruby
ln -s ../../../../drivers/qtn/ruby/ arch/arc/include/asm/board-ruby

rm -rf arch/arc/plat-qtn/sdk-qsr1000/common/doxygen
diff --git a/arch/arc/include/asm/board-ruby b/arch/arc/include/asm/board-ruby
index 1ca4289..b4672d4 120000
--- a/arch/arc/include/asm/board-ruby
+++ b/arch/arc/include/asm/board-ruby
@@ -1 +1 @@
-../../../../../drivers/ruby/
\ No newline at end of file
+drivers/qtn/ruby/
\ No newline at end of file
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/Makefile b/arch/arc/plat-qtn/sdk-qsr1000/common/Makefile
new file mode 100755
index 0000000..b25a070
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/Makefile
@@ -0,0 +1,91 @@
+#
+#   common/Makefile
+# 
+#   Copyright (c) Quantenna Communications Incorporated 2007.
+#   All rights reserved.
+# 
+#  This program is free software; you can redistribute it and/or modify
+#  it under the terms of the GNU General Public License as published by
+#  the Free Software Foundation; either version 2 of the License, or
+#  (at your option) any later version.
+# 
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+# 
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software
+#  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+# 
+#  This file defines the major, minor and build numbers for this release, and
+#  can create a C header file corresponding to those values. 
+#
+
+# This section is AWK'd by the release script, so keep the format to be
+# <start of line><label><space>:=<space><number>
+UMS_RELEASE_MAJOR := 0
+UMS_RELEASE_MINOR := 6
+UMS_RELEASE_BUILD := 1
+
+HFILE := ums_release
+
+PDFTK = $(shell which pdftk)
+ENSCRIPT = $(shell which enscript)
+PS2PDF = $(shell which ps2pdf)
+
+$(HFILE).h: Makefile
+	echo "/* This file is auto-generated by common/Makefile */" > $(HFILE).h
+	echo "#ifndef __$(HFILE)_H" >> $(HFILE).h
+	echo "#define __$(HFILE)_H" >> $(HFILE).h
+	echo "#define UMS_RELEASE_MAJOR	 ($(UMS_RELEASE_MAJOR))" >> $(HFILE).h
+	echo "#define UMS_RELEASE_MINOR	 ($(UMS_RELEASE_MINOR))" >> $(HFILE).h
+	echo "#define UMS_RELEASE_BUILD	 ($(UMS_RELEASE_BUILD))" >> $(HFILE).h
+	echo "#endif" >> $(HFILE).h
+
+header_version: check_enscript check_ps2pdf
+	-rm rev-num.pdf .tmp.rev-num .tmp.rev-num.ps
+	echo "$(REV_NUM)" > .tmp.rev-num
+	enscript -B --margins 67:200:260:240 -f Courier24 -p ./.tmp.rev-num.ps .tmp.rev-num
+	ps2pdf .tmp.rev-num.ps rev-num.pdf
+	-rm .tmp.rev-num .tmp.rev-num.ps
+
+clean:
+	rm -f $(HFILE).h
+	
+distclean: clean
+
+doco_pktlogger_i: check_pdftk header_version
+	REV_NUM=$(REV_NUM) make -e -C doxygen/pktlogger_doc Quantenna_pktlogger-INTERNAL-ONLY.pdf
+
+doco_pktlogger_ext_no_muc: check_pdftk header_version
+	REV_NUM=$(REV_NUM) make -e -C doxygen/pktlogger_doc Quantenna_pktlogger-external-no-muc.pdf
+
+doco_pktlogger_ext: check_pdftk header_version
+	REV_NUM=$(REV_NUM) make -e -C doxygen/pktlogger_doc Quantenna_pktlogger.pdf
+
+check_pdftk:
+	@if [ "$(PDFTK)" = "" ]; then \
+		echo "Please install pdftk to generate internal documentation"; \
+		exit 1; \
+	fi
+
+check_enscript:
+	@if [ "$(ENSCRIPT)" = "" ]; then \
+		echo "Please install enscript"; \
+		exit 1; \
+	fi
+
+check_ps2pdf:
+	@if [ "$(PS2PDF)" = "" ]; then \
+		echo "Please install ps2pdf"; \
+		exit 1; \
+	fi
+
+doco_qcsapi: check_pdftk header_version
+	REV_NUM=$(REV_NUM) make -e -C doxygen/qcsapi_doc
+
+ALL_DOCS = doco_pktlogger_ext doco_qcsapi doco_pktlogger_i doco_pktlogger_ext_no_muc
+doco: $(ALL_DOCS)
+.PHONY: doco $(ALL_DOCS)
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/common_mem.h b/arch/arc/plat-qtn/sdk-qsr1000/common/common_mem.h
new file mode 100644
index 0000000..7b785c4
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/common_mem.h
@@ -0,0 +1,277 @@
+/*
+ * (C) Copyright 2014 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * Header file which describes the Ruby and Topaz platforms.
+ * Used by both run-time and boot loader images.
+ *
+ * Do not put run-time specific definitions in this file.
+ */
+
+#ifndef __COMMON_MEM_H
+#define __COMMON_MEM_H
+
+#include "ruby_config.h"
+
+/* Platform memory */
+/* SRAM */
+#define RUBY_SRAM_UNIFIED_BEGIN			0x98000000
+#define RUBY_SRAM_UNIFIED_NOCACHE_BEGIN		0x60000000
+#define RUBY_SRAM_ALIAS_NOCACHE_BEGIN		0xf8000000
+#define RUBY_SRAM_FLIP_BEGIN			0x88000000
+#define RUBY_SRAM_FLIP_NOCACHE_BEGIN		0x60000000
+#define RUBY_SRAM_NOFLIP_BEGIN			0x80000000
+#define RUBY_SRAM_NOFLIP_NOCACHE_BEGIN		0x60000000
+#define RUBY_SRAM_BANK_SIZE			(64 * 1024)
+
+#define RUBY_SRAM_SIZE			(8 * RUBY_SRAM_BANK_SIZE)
+#define RUBY_SRAM_BANK_SAFE_SIZE	RUBY_SRAM_BANK_SIZE
+
+/* DDR */
+#define RUBY_DRAM_UNIFIED_BEGIN			0x80000000
+#define RUBY_DRAM_UNIFIED_NOCACHE_BEGIN		0x40000000
+#define RUBY_DRAM_ALIAS_NOCACHE_BEGIN		0xd0000000
+#define RUBY_DRAM_FLIP_BEGIN			0x80000000
+#define RUBY_DRAM_FLIP_NOCACHE_BEGIN		0x40000000
+#define RUBY_DRAM_NOFLIP_BEGIN			0x0
+#define RUBY_DRAM_NOFLIP_NOCACHE_BEGIN		0x40000000
+#if TOPAZ_MMAP_UNIFIED && TOPAZ_SUPPORT_256MB_DDR
+#define RUBY_MAX_DRAM_SIZE			DDR_256MB
+#else
+#define RUBY_MAX_DRAM_SIZE			DDR_128MB
+#endif
+#define RUBY_MIN_DRAM_SIZE			DDR_64MB
+
+#if TOPAZ_MMAP_UNIFIED && TOPAZ_MMAP_ALIAS
+	#define RUBY_SRAM_BEGIN			RUBY_SRAM_UNIFIED_BEGIN
+	#define RUBY_SRAM_BUS_BEGIN		RUBY_SRAM_UNIFIED_BEGIN
+	#define RUBY_SRAM_NOCACHE_BEGIN		RUBY_SRAM_ALIAS_NOCACHE_BEGIN
+	#define RUBY_DRAM_BEGIN			RUBY_DRAM_UNIFIED_BEGIN
+	#define RUBY_DRAM_BUS_BEGIN		RUBY_DRAM_UNIFIED_BEGIN
+	#define RUBY_DRAM_NOCACHE_BEGIN		RUBY_DRAM_ALIAS_NOCACHE_BEGIN
+#elif TOPAZ_MMAP_UNIFIED
+	#define RUBY_SRAM_BEGIN			RUBY_SRAM_UNIFIED_BEGIN
+	#define RUBY_SRAM_BUS_BEGIN		RUBY_SRAM_UNIFIED_BEGIN
+	#define RUBY_SRAM_NOCACHE_BEGIN		RUBY_SRAM_UNIFIED_NOCACHE_BEGIN
+	#define RUBY_DRAM_BEGIN			RUBY_DRAM_UNIFIED_BEGIN
+	#define RUBY_DRAM_BUS_BEGIN		RUBY_DRAM_UNIFIED_BEGIN
+	#define RUBY_DRAM_NOCACHE_BEGIN		RUBY_DRAM_UNIFIED_NOCACHE_BEGIN
+#elif RUBY_MMAP_FLIP
+	#define RUBY_SRAM_BEGIN			RUBY_SRAM_FLIP_BEGIN
+	#define RUBY_SRAM_BUS_BEGIN		RUBY_SRAM_NOFLIP_BEGIN
+	#define RUBY_SRAM_NOCACHE_BEGIN		RUBY_SRAM_FLIP_NOCACHE_BEGIN
+	#define RUBY_DRAM_BEGIN			RUBY_DRAM_FLIP_BEGIN
+	#define RUBY_DRAM_BUS_BEGIN		RUBY_DRAM_NOFLIP_BEGIN
+	#define RUBY_DRAM_NOCACHE_BEGIN		RUBY_DRAM_FLIP_NOCACHE_BEGIN
+#else
+	#define RUBY_SRAM_BEGIN			RUBY_SRAM_NOFLIP_BEGIN
+	#define RUBY_SRAM_BUS_BEGIN		RUBY_SRAM_NOFLIP_BEGIN
+	#define RUBY_SRAM_NOCACHE_BEGIN		RUBY_SRAM_NOFLIP_NOCACHE_BEGIN
+	#define RUBY_DRAM_BEGIN			RUBY_DRAM_NOFLIP_BEGIN
+	#define RUBY_DRAM_BUS_BEGIN		RUBY_DRAM_NOFLIP_BEGIN
+	#define RUBY_DRAM_NOCACHE_BEGIN		RUBY_DRAM_NOFLIP_NOCACHE_BEGIN
+#endif
+
+/*****************************************************************************/
+/* SPI memory mapped                                                         */
+/*****************************************************************************/
+#define RUBY_SPI_FLASH_ADDR     0x90000000
+
+ /* Hardware */
+#define RUBY_HARDWARE_BEGIN	0xC0000000
+
+#define	ROUNDUP(x, y)		((((x)+((y)-1))/(y))*(y))
+
+/* Config space */
+#define CONFIG_ARC_CONF_SIZE		(8 * 1024)
+/* Config area for Universal H/W ID */
+#define CONFIG_ARC_CONF_BASE		(0x80000000 + CONFIG_ARC_CONF_SIZE)
+
+#define CONFIG_ARC_KERNEL_PAGE_SIZE	(8 * 1024)
+
+#define RUBY_KERNEL_LOAD_DRAM_BEGIN	(RUBY_DRAM_BEGIN + 0x3000000)
+
+/* Safety offset from stack top address */
+#define RUBY_STACK_INIT_OFFSET		4
+
+/* DDR layout */
+#define CONFIG_ARC_NULL_BASE		0x00000000
+#define CONFIG_ARC_NULL_SIZE		(64 * 1024)
+#define CONFIG_ARC_NULL_END		(CONFIG_ARC_NULL_BASE + CONFIG_ARC_NULL_SIZE)
+
+/* PCIe BDA area */
+#define CONFIG_ARC_PCIE_BASE		(RUBY_DRAM_BEGIN + CONFIG_ARC_NULL_END)
+#define CONFIG_ARC_PCIE_SIZE		(64 * 1024) /* minimal PCI BAR size */
+#if ((CONFIG_ARC_PCIE_BASE & (64 * 1024 - 1)) != 0)
+	#error "The reserved region for PCIe BAR should 64k aligned!"
+#endif
+
+/*
+ * CONFIG_ARC_MUC_STACK_OFFSET_UBOOT must be equal to CONFIG_ARC_MUC_STACK_OFFSET
+ * and RUBY_CRUMBS_OFFSET_UBOOT must be equal to RUBY_CRUMBS_OFFSET.
+ * Their values can be obtained with host/utilities/ruby_mem_helper.
+ */
+#if TOPAZ_RX_ACCELERATE
+	/* Must be equal to CONFIG_ARC_MUC_STACK_OFFSET */
+	#define CONFIG_ARC_MUC_STACK_OFFSET_UBOOT	(0x0003F7C0)
+	/* MuC stack, included in CONFIG_ARC_MUC_SRAM_SIZE */
+	#define CONFIG_ARC_MUC_STACK_SIZE		(4 * 1024)
+#else
+	/* Must be equal to CONFIG_ARC_MUC_STACK_OFFSET */
+	#define CONFIG_ARC_MUC_STACK_OFFSET_UBOOT	(0x0003FFA0)
+	/* MuC stack, included in CONFIG_ARC_MUC_SRAM_SIZE */
+	#define CONFIG_ARC_MUC_STACK_SIZE		(6 * 1024)
+#endif
+
+#define CONFIG_ARC_MUC_STACK_INIT_UBOOT		(RUBY_SRAM_BEGIN + CONFIG_ARC_MUC_STACK_OFFSET_UBOOT)
+
+/* Must be equal to RUBY_CRUMBS_OFFSET */
+#define RUBY_CRUMBS_OFFSET_UBOOT	(0x0003FFC0)
+
+#define RUBY_UBOOT_PIGGY_MAX_SIZE		0x14000
+#define RUBY_CRUMBS_ADDR_UBOOT			(RUBY_SRAM_BEGIN + RUBY_CRUMBS_OFFSET_UBOOT)
+
+/*
+ * Crumb structure, sits at the end of 4th SRAM bank. Each core can use it to
+ * store the last run function to detect bus hangs.
+ */
+#define RUBY_CRUMBS_SIZE		64
+
+#ifndef __ASSEMBLY__
+	struct ruby_crumbs_percore {
+		unsigned long	blink;
+		unsigned long	status32;
+		unsigned long	sp;
+	};
+
+	struct ruby_crumbs_mem_section {
+		unsigned long	start;
+		unsigned long	end;
+	};
+
+	struct ruby_crumbs {
+		struct ruby_crumbs_percore	lhost;
+		struct ruby_crumbs_percore	muc;
+		struct ruby_crumbs_percore	dsp;
+		/*
+		 * allow (somewhat) intelligent parsing of muc stacks by
+		 * specifying the text section
+		 */
+		struct ruby_crumbs_mem_section	muc_dram;
+		struct ruby_crumbs_mem_section	muc_sram;
+
+		/*
+		 * magic token; if set incorrectly we probably have
+		 * random values after power-on
+		 */
+		unsigned long			magic;
+	};
+
+	#define	RUBY_CRUMBS_MAGIC	0x7c97be8f
+
+#endif /* __ASSEMBLY__ */
+
+/* Utility functions */
+#ifndef __ASSEMBLY__
+	#if defined(AUC_BUILD) || defined(RUBY_MINI)
+		#define NO_RUBY_WEAK	1
+	#else
+		#define NO_RUBY_WEAK	0
+	#endif
+
+	#define RUBY_BAD_BUS_ADDR	((unsigned  long)0)
+	#define RUBY_BAD_VIRT_ADDR	((void*)RUBY_BAD_BUS_ADDR)
+	#define RUBY_ERROR_ADDR		((unsigned long)0xefefefef)
+
+	#if defined(__CHECKER__)
+		#define RUBY_INLINE			static inline __attribute__((always_inline))
+		#define RUBY_WEAK(name)			RUBY_INLINE
+	#elif defined(__GNUC__)
+		/*GCC*/
+		#define RUBY_INLINE			static inline __attribute__((always_inline))
+		#if NO_RUBY_WEAK
+			#define RUBY_WEAK(name)		RUBY_INLINE
+		#else
+			#define RUBY_WEAK(name)		__attribute__((weak))
+		#endif
+	#else
+		/*MCC*/
+		#define RUBY_INLINE			static _Inline
+		#if NO_RUBY_WEAK
+			#define RUBY_WEAK(name)		RUBY_INLINE
+		#else
+			#define RUBY_WEAK(name)		pragma Weak(name);
+		#endif
+		#pragma Offwarn(428)
+	#endif
+
+	#define ____in_mem_range(addr, start, size)	\
+		(((addr) >= (start)) && ((addr) < (start) + (size)))
+
+	#if defined(STATIC_CHECK) || defined(__CHECKER__)
+		RUBY_INLINE int __in_mem_range(unsigned long addr, unsigned long start, unsigned long size)
+		{
+			return (((addr) >= (start)) && ((addr) < (start) + (size)));
+		}
+	#else
+		#define __in_mem_range ____in_mem_range
+	#endif
+
+	#if RUBY_MMAP_FLIP
+		RUBY_INLINE unsigned long virt_to_bus(const void *addr)
+		{
+			unsigned long ret = (unsigned long)addr;
+			if (__in_mem_range(ret, RUBY_SRAM_FLIP_BEGIN, RUBY_SRAM_SIZE)) {
+				ret = ret - RUBY_SRAM_FLIP_BEGIN + RUBY_SRAM_NOFLIP_BEGIN;
+			} else if (__in_mem_range(ret, RUBY_DRAM_FLIP_BEGIN, RUBY_MAX_DRAM_SIZE)) {
+				ret = ret - RUBY_DRAM_FLIP_BEGIN + RUBY_DRAM_NOFLIP_BEGIN;
+			} else if (ret < RUBY_HARDWARE_BEGIN) {
+				ret = RUBY_BAD_BUS_ADDR;
+			}
+			return ret;
+		}
+		RUBY_WEAK(bus_to_virt) void* bus_to_virt(unsigned long addr)
+		{
+			unsigned long ret = addr;
+			if (__in_mem_range(ret, RUBY_SRAM_NOFLIP_BEGIN, RUBY_SRAM_SIZE)) {
+				ret = ret - RUBY_SRAM_NOFLIP_BEGIN + RUBY_SRAM_FLIP_BEGIN;
+			} else if (__in_mem_range(ret, RUBY_DRAM_NOFLIP_BEGIN, RUBY_MAX_DRAM_SIZE)) {
+				ret = ret - RUBY_DRAM_NOFLIP_BEGIN + RUBY_DRAM_FLIP_BEGIN;
+			} else if (ret < RUBY_HARDWARE_BEGIN) {
+				ret = (unsigned long)RUBY_BAD_VIRT_ADDR;
+			}
+			return (void*)ret;
+		}
+	#else
+		/* Map 1:1, (x) address must be upper then 0x8000_0000. */
+		#define virt_to_bus(x) ((unsigned long)(x))
+		#define bus_to_virt(x) ((void *)(x))
+	#endif /* #if RUBY_MMAP_FLIP */
+
+	#ifndef __GNUC__
+		/* MCC */
+		#pragma Popwarn()
+	#endif
+
+#endif /* #ifndef __ASSEMBLY__ */
+
+#endif /* __COMMON_MEM_H */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/current_platform.h b/arch/arc/plat-qtn/sdk-qsr1000/common/current_platform.h
new file mode 100644
index 0000000..13899f3
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/current_platform.h
@@ -0,0 +1,12 @@
+#define TOPAZ_PLATFORM
+#define TOPAZ_FPGA_PLATFORM 0
+#define TOPAZ_EMAC_NULL_BUF_WR
+#undef TOPAZ_FPGA_UMCTL1
+#define PLATFORM_WMAC_MODE ap
+#undef PLATFORM_DEFAULT_BOARD_ID
+#define ARC_HW_REV_NEEDS_TLBMISS_FIX
+#define TOPAZ_SUPPORT_UMM 0
+#define TOPAZ_SUPPORT_256MB_DDR 0
+#define FLASH_SUPPORT_64KB
+#define WPA_TKIP_SUPPORT 0
+#define SIGMA_TESTBED_SUPPORT 0
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/pktlogger/pktlogger_nl_common.h b/arch/arc/plat-qtn/sdk-qsr1000/common/pktlogger/pktlogger_nl_common.h
new file mode 100644
index 0000000..22e842c
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/pktlogger/pktlogger_nl_common.h
@@ -0,0 +1,135 @@
+/* (C)2016 Quantenna Communications Inc. */
+
+/* Common structures for netdebug - userspace/kernelspace interface */
+#ifndef __PKTLOGGER_NL_COMMON_H__
+#define __PKTLOGGER_NL_COMMON_H__
+
+
+#define PKTLOGGER_MSG_MAGIC 0x79FFA904
+
+/* Magic, version, type, length then variable value. */
+struct pktlogger_nl_hdr_t
+{
+	uint32_t magic;
+	uint32_t mver;
+	uint32_t mtype;
+	uint32_t mlen;
+	uint32_t mseq;
+	uint8_t  msg[0];
+};
+
+enum pktlogger_nl_msg
+{
+	PKTLOGGER_NETLINK_MTYPE_QUERY        = 1,
+	PKTLOGGER_NETLINK_MTYPE_CONFIG       = 2,
+	PKTLOGGER_NETLINK_MTYPE_CONFIG_ONE   = 3,
+	PKTLOGGER_NETLINK_MTYPE_PTYPE_CONFIG = 4
+};
+
+struct pktlogger_nl_query_t
+{
+	struct pktlogger_nl_hdr_t hdr;
+	uint32_t query_num;
+	uint32_t arg1;
+	uint32_t arg2;
+	uint8_t data[0];
+};
+
+struct pktlogger_nl_pktlog_config_t
+{
+	uint16_t type;
+	uint16_t flags;
+	char     name[16];
+	uint32_t rate;
+	uint32_t history;
+	uint16_t struct_bsize;
+	uint16_t struct_vsize;
+};
+
+struct pktlogger_nl_radio_config_t
+{
+	uint32_t destip;	/* Network endian destination IP address for pktlogger data.       */
+	uint32_t srcip;
+	uint8_t  destmac[6];	/* Destination MAC address.                                        */
+	uint8_t  srcmac[6];	/* Source MAC address.                                             */
+	uint16_t destport;	/* UDP dest port for this radio.                                   */
+	uint16_t srcport;	/* UDP src port for this radio.                                    */
+	uint32_t pktlog_ver_cnt;/*  0x000000FF: Number of entries in the pktlog_configs array,
+				0x00FFFF00: reserved
+				0xFF000000: Version of pktlog_types.                               */
+	char     radioname[16];	/* Radio name (eg, wifi0, wifi1, wifi2).
+				Filled out on query, ignored on configuration.                     */
+	struct   pktlogger_nl_pktlog_config_t pktlog_configs[16];	/* Per-pktlogger config.
+									Pointer to the first element. */
+};
+
+struct pktlogger_nl_config_t
+{
+	uint32_t rev;
+	uint32_t rcontrol;
+	struct pktlogger_nl_radio_config_t per_radio[3];
+};
+
+struct pktlogger_nl_config_one_t
+{
+	uint32_t radio_index;
+	struct pktlogger_nl_pktlog_config_t config;
+};
+
+struct pktlogger_nl_config_set_t
+{
+	struct pktlogger_nl_hdr_t hdr;
+	struct pktlogger_nl_config_t config;
+};
+
+struct pktlogger_nl_config_oneset_t
+{
+	struct pktlogger_nl_hdr_t hdr;
+	struct pktlogger_nl_config_one_t config;
+};
+
+enum pktlogger_nl_query
+{
+	PKTLOGGER_QUERY_STRUCT       = 0,
+	PKTLOGGER_QUERY_CONFIG       = 1,
+	PKTLOGGER_QUERY_CONFIG_ONE   = 2,
+	PKTLOGGER_QUERY_PTYPE_CONFIG = 3
+};
+
+/* pktlogger header - for all incoming data frames */
+struct pktlogger_nl_pktlogger_hdr
+{
+	struct udphdr hdr;
+	uint8_t type;
+	uint8_t opmode;
+	/**
+	 * The source address (the bridge MAC address).
+	 */
+	unsigned char src[6];
+	u_int32_t version;
+	u_int32_t builddate;
+	/**
+	 * Identifying string to easily see in packet dumps that this is a packetlogger packet.
+	 */
+	char buildstring[32];
+	u_int8_t flags;
+
+	/**
+	 * Epoch timestamp.
+	 */
+	u_int32_t timestamp;
+	/**
+	 * TSF timestamp low bytes.
+	 */
+	u_int32_t tsf_lo;
+	/**
+	 * TSF timestamp high bytes.
+	 */
+	u_int32_t tsf_hi;
+
+	u_int32_t platform;
+	u_int32_t stats_len;
+	char padding[3];     /* Word align data start */
+} __attribute__((__packed__));
+
+#endif
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/queue.h b/arch/arc/plat-qtn/sdk-qsr1000/common/queue.h
new file mode 100644
index 0000000..c384592
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/queue.h
@@ -0,0 +1,648 @@
+/*
+ * Copyright (c) 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: queue.h 1441 2006-02-06 16:03:21Z mrenzmann $
+ */
+
+#ifndef _SYS_QUEUE_H_
+#define	_SYS_QUEUE_H_
+
+#ifdef MUC_BUILD
+#include <stddef.h>
+#endif
+
+/*
+ * This file defines four types of data structures: singly-linked lists,
+ * singly-linked tail queues, lists and tail queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction.  Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A singly-linked tail queue is headed by a pair of pointers, one to the
+ * head of the list and the other to the tail of the list. The elements are
+ * singly linked for minimum space and pointer manipulation overhead at the
+ * expense of O(n) removal for arbitrary elements. New elements can be added
+ * to the list after an existing element, at the head of the list, or at the
+ * end of the list. Elements being removed from the head of the tail queue
+ * should use the explicit macro for this purpose for optimum efficiency.
+ * A singly-linked tail queue may only be traversed in the forward direction.
+ * Singly-linked tail queues are ideal for applications with large datasets
+ * and few or no removals or for implementing a FIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ *
+ *
+ *				SLIST	LIST	STAILQ	TAILQ
+ * _HEAD			+	+	+	+
+ * _HEAD_INITIALIZER		+	+	+	+
+ * _ENTRY			+	+	+	+
+ * _INIT			+	+	+	+
+ * _EMPTY			+	+	+	+
+ * _FIRST			+	+	+	+
+ * _NEXT			+	+	+	+
+ * _PREV			-	-	-	+
+ * _LAST			-	-	+	+
+ * _FOREACH			+	+	+	+
+ * _FOREACH_SAFE		+	+	+	+
+ * _FOREACH_REVERSE		-	-	-	+
+ * _FOREACH_REVERSE_SAFE	-	-	-	+
+ * _INSERT_HEAD			+	+	+	+
+ * _INSERT_BEFORE		-	+	-	+
+ * _INSERT_AFTER		+	+	+	+
+ * _INSERT_TAIL			-	-	+	+
+ * _CONCAT			-	-	+	+
+ * _REMOVE_HEAD			+	-	+	-
+ * _REMOVE			+	+	+	+
+ *
+ */
+
+#ifdef MUC_BUILD
+# define QUEUE_MACRO_LOCK_DEBUG	0
+#else
+# define QUEUE_MACRO_LOCK_DEBUG	0
+#endif
+
+/*
+ * Additional fields to try and locate unprotected accesses of fields.
+ * These macros provide hooks to a function which is checked when the
+ * various queue macros are 
+ */
+#if QUEUE_MACRO_LOCK_DEBUG
+# define QUEUE_MACRO_LOCK_FIELD(prefix)					\
+		int prefix##lockcontext;
+
+# define QUEUE_MACRO_LOCK_INIT(prefix, x)	(x)->prefix##lockcontext = 0
+struct os_spinlock;
+int queue_debug_muc_context_check(int* context, const char* file, const int line);
+# define MUC_CONTEXT_CHECK_IN_INTERRUPT		0x00000001
+# define MUC_CONTEXT_CHECK_NOT_IN_INTERRUPT	0x00000002
+# define MUC_CONTEXT_CHECK(context)		queue_debug_muc_context_check(&(context), __FILE__, __LINE__)
+# define QUEUE_MACRO_LOCK_ASSERT(context)	MUC_CONTEXT_CHECK(context)
+#elif defined(SYSTEM_BUILD)
+# define QUEUE_MACRO_LOCK_FIELD(prefix)	int dummy
+# define QUEUE_MACRO_LOCK_INIT(prefix, x)	int dummy
+# define QUEUE_MACRO_LOCK_ASSERT(context)	__queue_macro_lock_assert_noop()
+static __inline int __queue_macro_lock_assert_noop(void) {
+	return 1;
+}
+#else
+# define QUEUE_MACRO_LOCK_FIELD(prefix)
+# define QUEUE_MACRO_LOCK_INIT(prefix, x)
+# define QUEUE_MACRO_LOCK_ASSERT(context)	__queue_macro_lock_assert_noop()
+static __inline__ int __queue_macro_lock_assert_noop(void) {
+	return 1;
+}
+#endif
+
+#define	QUEUE_MACRO_DEBUG 0
+#if QUEUE_MACRO_DEBUG
+/* Store the last 2 places the queue element or head was altered */
+struct qm_trace {
+	char *lastfile;
+	int lastline;
+	char *prevfile;
+	int prevline;
+};
+
+#define	TRACEBUF	struct qm_trace trace;
+#define	TRASHIT(x)	do {(x) = (void *)-1;} while (0)
+
+#define	QMD_TRACE_HEAD(head) do {					\
+	(head)->trace.prevline = (head)->trace.lastline;		\
+	(head)->trace.prevfile = (head)->trace.lastfile;		\
+	(head)->trace.lastline = __LINE__;				\
+	(head)->trace.lastfile = __FILE__;				\
+} while (0)
+
+#define	QMD_TRACE_ELEM(elem) do {					\
+	(elem)->trace.prevline = (elem)->trace.lastline;		\
+	(elem)->trace.prevfile = (elem)->trace.lastfile;		\
+	(elem)->trace.lastline = __LINE__;				\
+	(elem)->trace.lastfile = __FILE__;				\
+} while (0)
+
+#else
+#define	QMD_TRACE_ELEM(elem)
+#define	QMD_TRACE_HEAD(head)
+#define	TRACEBUF
+#define	TRASHIT(x)
+#endif	/* QUEUE_MACRO_DEBUG */
+
+/*
+ * Singly-linked List declarations.
+ */
+#define	SLIST_HEAD(name, type)						\
+struct name {								\
+	struct type *slh_first;	/* first element */			\
+	QUEUE_MACRO_LOCK_FIELD(slh_);					\
+}
+
+#define	SLIST_HEAD_INITIALIZER(head)					\
+	{ NULL }
+
+#define	SLIST_ENTRY(type)						\
+struct {								\
+	struct type *sle_next;	/* next element */			\
+}
+
+#define SLIST_LOCK_ASSERT(head)		\
+	QUEUE_MACRO_LOCK_ASSERT((head)->slh_lockcontext)
+
+/*
+ * Singly-linked List functions.
+ */
+#define	SLIST_EMPTY(head)	((head)->slh_first == NULL)
+
+#define	SLIST_FIRST(head)	((head)->slh_first)
+
+#define	SLIST_FOREACH(var, head, field)					\
+	for ((var) = SLIST_FIRST((head));				\
+	    SLIST_LOCK_ASSERT((head)) && (var);				\
+	    (var) = SLIST_NEXT((var), field))
+
+#define	SLIST_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = SLIST_FIRST((head));				\
+	    SLIST_LOCK_ASSERT((head)) &&				\
+	    (var) && ((tvar) = SLIST_NEXT((var), field), 1);		\
+	    (var) = (tvar))
+
+#define	SLIST_FOREACH_PREVPTR(var, varp, head, field)			\
+	for ((varp) = &SLIST_FIRST((head));				\
+	    SLIST_LOCK_ASSERT((head)) &&				\
+	    ((var) = *(varp)) != NULL;					\
+	    (varp) = &SLIST_NEXT((var), field))
+
+#define	SLIST_INIT(head) do {						\
+	SLIST_FIRST((head)) = NULL;					\
+} while (0)
+
+#define	SLIST_INSERT_AFTER(slistelm, elm, field) do {			\
+	SLIST_LOCK_ASSERT((head));					\
+	SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field);	\
+	SLIST_NEXT((slistelm), field) = (elm);				\
+} while (0)
+
+#define	SLIST_INSERT_HEAD(head, elm, field) do {			\
+	SLIST_LOCK_ASSERT((head));					\
+	SLIST_NEXT((elm), field) = SLIST_FIRST((head));			\
+	SLIST_FIRST((head)) = (elm);					\
+} while (0)
+
+#define	SLIST_NEXT(elm, field)	((elm)->field.sle_next)
+
+#define	SLIST_REMOVE(head, elm, type, field) do {			\
+	SLIST_LOCK_ASSERT((head));					\
+	if (SLIST_FIRST((head)) == (elm)) {				\
+		SLIST_REMOVE_HEAD((head), field);			\
+	}								\
+	else {								\
+		struct type *curelm = SLIST_FIRST((head));		\
+		while (SLIST_NEXT(curelm, field) != (elm))		\
+			curelm = SLIST_NEXT(curelm, field);		\
+		SLIST_NEXT(curelm, field) =				\
+		    SLIST_NEXT(SLIST_NEXT(curelm, field), field);	\
+	}								\
+} while (0)
+
+#define	SLIST_REMOVE_HEAD(head, field) do {				\
+	SLIST_LOCK_ASSERT((head));					\
+	SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field);	\
+} while (0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define	STAILQ_HEAD(name, type)						\
+struct name {								\
+	struct type *stqh_first;/* first element */			\
+	struct type **stqh_last;/* addr of last next element */		\
+	QUEUE_MACRO_LOCK_FIELD(stqh_);	/* debug locking primitive */	\
+}
+
+#define	STAILQ_HEAD_INITIALIZER(head)					\
+	{ NULL, &(head).stqh_first }
+
+#define	STAILQ_ENTRY(type)						\
+struct {								\
+	struct type *stqe_next;	/* next element */			\
+}
+
+#define STAILQ_LOCK_ASSERT(head)	\
+	QUEUE_MACRO_LOCK_ASSERT((head)->stqh_lockcontext)
+#if QUEUE_MACRO_LOCK_DEBUG
+# define STAILQ_INIT_LOCK_DEBUG(head, lock)	(head)->stqh_lock = lock;
+#else
+# define STAILQ_INIT_LOCK_DEBUG(head, intref)
+#endif
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define	STAILQ_CONCAT(head1, head2) do {				\
+	if (!STAILQ_EMPTY((head2))) {					\
+		*(head1)->stqh_last = (head2)->stqh_first;		\
+		(head1)->stqh_last = (head2)->stqh_last;		\
+		STAILQ_INIT((head2));					\
+	}								\
+} while (0)
+
+#define	STAILQ_EMPTY(head)	((head)->stqh_first == NULL)
+
+#define	STAILQ_FIRST(head)	((head)->stqh_first)
+
+#define	STAILQ_FOREACH(var, head, field)				\
+	for((var) = STAILQ_FIRST((head));				\
+	   STAILQ_LOCK_ASSERT((head)) && (var);				\
+	   (var) = STAILQ_NEXT((var), field))
+
+
+#define	STAILQ_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = STAILQ_FIRST((head));				\
+	    STAILQ_LOCK_ASSERT((head)) &&				\
+	    (var) && ((tvar) = STAILQ_NEXT((var), field), 1);		\
+	    (var) = (tvar))
+
+#define	STAILQ_INIT(head) do {						\
+	STAILQ_FIRST((head)) = NULL;					\
+	(head)->stqh_last = &STAILQ_FIRST((head));			\
+	QUEUE_MACRO_LOCK_INIT(stqh_, (head));				\
+} while (0)
+
+#define	STAILQ_INSERT_AFTER(head, tqelm, elm, field) do {		\
+	STAILQ_LOCK_ASSERT(head);					\
+	if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
+		(head)->stqh_last = &STAILQ_NEXT((elm), field);		\
+	STAILQ_NEXT((tqelm), field) = (elm);				\
+} while (0)
+
+#define	STAILQ_INSERT_HEAD(head, elm, field) do {			\
+	STAILQ_LOCK_ASSERT(head);					\
+	if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL)	\
+		(head)->stqh_last = &STAILQ_NEXT((elm), field);		\
+	STAILQ_FIRST((head)) = (elm);					\
+} while (0)
+
+#define	STAILQ_INSERT_TAIL(head, elm, field) do {			\
+	STAILQ_LOCK_ASSERT(head);					\
+	STAILQ_NEXT((elm), field) = NULL;				\
+	*(head)->stqh_last = (elm);					\
+	(head)->stqh_last = &STAILQ_NEXT((elm), field);			\
+} while (0)
+
+#define	STAILQ_LAST(head, type, field)					\
+	(STAILQ_EMPTY((head)) ?						\
+		NULL :							\
+	        ((struct type *)					\
+		((char *)((head)->stqh_last) - __offsetof(struct type, field))))
+
+#define	STAILQ_NEXT(elm, field)	((elm)->field.stqe_next)
+
+#define	STAILQ_REMOVE(head, elm, type, field) do {			\
+	STAILQ_LOCK_ASSERT(head);					\
+	if (STAILQ_FIRST((head)) == (elm)) {				\
+		STAILQ_REMOVE_HEAD((head), field);			\
+	}								\
+	else {								\
+		struct type *curelm = STAILQ_FIRST((head));		\
+		while (STAILQ_NEXT(curelm, field) != (elm))		\
+			curelm = STAILQ_NEXT(curelm, field);		\
+		if ((STAILQ_NEXT(curelm, field) =			\
+		     STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
+			(head)->stqh_last = &STAILQ_NEXT((curelm), field);\
+	}								\
+} while (0)
+
+
+#define	STAILQ_REMOVE_AFTER(head, elm, field) do {			\
+	STAILQ_LOCK_ASSERT(head);					\
+	if (STAILQ_NEXT(elm, field)) {		\
+		if ((STAILQ_NEXT(elm, field) =			\
+		    STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL)\
+			(head)->stqh_last = &STAILQ_NEXT((elm), field); \
+	}								\
+} while (0)
+
+
+#define	STAILQ_REMOVE_HEAD(head, field) do {				\
+	STAILQ_LOCK_ASSERT(head);					\
+	if ((STAILQ_FIRST((head)) =					\
+	     STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL)		\
+		(head)->stqh_last = &STAILQ_FIRST((head));		\
+} while (0)
+
+#define	STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do {			\
+	STAILQ_LOCK_ASSERT(head);					\
+	if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL)	\
+		(head)->stqh_last = &STAILQ_FIRST((head));		\
+} while (0)
+
+/*
+ * List declarations.
+ */
+#define	ATH_LIST_HEAD(name, type)					\
+struct name {								\
+	struct type *lh_first;	/* first element */			\
+	QUEUE_MACRO_LOCK_FIELD(lh_);					\
+}
+
+#define	LIST_HEAD_INITIALIZER(head)					\
+	{ NULL }
+
+#define	LIST_ENTRY(type)						\
+struct {								\
+	struct type *le_next;	/* next element */			\
+	struct type **le_prev;	/* address of previous next element */	\
+}
+
+#define LIST_LOCK_ASSERT(head)	\
+	QUEUE_MACRO_LOCK_ASSERT((head)->lh_lockcontext)
+
+/*
+ * List functions.
+ */
+
+#define	LIST_EMPTY(head)	((head)->lh_first == NULL)
+
+#define	LIST_FIRST(head)	((head)->lh_first)
+
+#define	LIST_FOREACH(var, head, field)					\
+	for ((var) = LIST_FIRST((head));				\
+	    LIST_LOCK_ASSERT((head)) && (var);				\
+	    (var) = LIST_NEXT((var), field))
+
+#define	LIST_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = LIST_FIRST((head));				\
+	    LIST_LOCK_ASSERT((head)) &&					\
+	    (var) && ((tvar) = LIST_NEXT((var), field), 1);		\
+	    (var) = (tvar))
+
+#define	LIST_INIT(head) do {						\
+	LIST_FIRST((head)) = NULL;					\
+} while (0)
+
+#define	LIST_INSERT_AFTER(listelm, elm, field) do {			\
+	if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
+		LIST_NEXT((listelm), field)->field.le_prev =		\
+		    &LIST_NEXT((elm), field);				\
+	LIST_NEXT((listelm), field) = (elm);				\
+	(elm)->field.le_prev = &LIST_NEXT((listelm), field);		\
+} while (0)
+
+#define	LIST_INSERT_BEFORE(listelm, elm, field) do {			\
+	(elm)->field.le_prev = (listelm)->field.le_prev;		\
+	LIST_NEXT((elm), field) = (listelm);				\
+	*(listelm)->field.le_prev = (elm);				\
+	(listelm)->field.le_prev = &LIST_NEXT((elm), field);		\
+} while (0)
+
+#define	LIST_INSERT_HEAD(head, elm, field) do {				\
+	LIST_LOCK_ASSERT((head));					\
+	if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL)	\
+		LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
+	LIST_FIRST((head)) = (elm);					\
+	(elm)->field.le_prev = &LIST_FIRST((head));			\
+} while (0)
+
+#define	LIST_NEXT(elm, field)	((elm)->field.le_next)
+
+#define	LIST_REMOVE(elm, field) do {					\
+	if (LIST_NEXT((elm), field) != NULL)				\
+		LIST_NEXT((elm), field)->field.le_prev = 		\
+		    (elm)->field.le_prev;				\
+	*(elm)->field.le_prev = LIST_NEXT((elm), field);		\
+} while (0)
+
+/*
+ * Tail queue declarations.
+ */
+#define	TAILQ_HEAD(name, type)						\
+struct name {								\
+	struct type *tqh_first;	/* first element */			\
+	struct type **tqh_last;	/* addr of last next element */		\
+	TRACEBUF							\
+	QUEUE_MACRO_LOCK_FIELD(tqh_);					\
+}
+
+#define	TAILQ_HEAD_INITIALIZER(head)					\
+	{ NULL, &(head).tqh_first }
+#if (!defined(TAILQ_ENTRY))
+#define	TAILQ_ENTRY(type)						\
+struct {								\
+	struct type *tqe_next;	/* next element */			\
+	struct type **tqe_prev;	/* address of previous next element */	\
+	TRACEBUF							\
+}
+#endif
+
+#define TAILQ_LOCK_ASSERT(head)	\
+	QUEUE_MACRO_LOCK_ASSERT((head)->tqh_lockcontext)
+
+/*
+ * Tail queue functions.
+ */
+#define	TAILQ_CONCAT(head1, head2, field) do {				\
+	if (!TAILQ_EMPTY(head2)) {					\
+		*(head1)->tqh_last = (head2)->tqh_first;		\
+		(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last;	\
+		(head1)->tqh_last = (head2)->tqh_last;			\
+		TAILQ_INIT((head2));					\
+		QMD_TRACE_HEAD(head);					\
+		QMD_TRACE_HEAD(head2);					\
+	}								\
+} while (0)
+
+#define	TAILQ_EMPTY(head)	((head)->tqh_first == NULL)
+
+#define	TAILQ_FIRST(head)	((head)->tqh_first)
+
+#define	TAILQ_FOREACH(var, head, field)					\
+	for ((var) = TAILQ_FIRST((head));				\
+	    TAILQ_LOCK_ASSERT((head)) && (var);				\
+	    (var) = TAILQ_NEXT((var), field))
+
+#define	TAILQ_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = TAILQ_FIRST((head));				\
+	    TAILQ_LOCK_ASSERT((head)) &&				\
+	    (var) && ((tvar) = TAILQ_NEXT((var), field), 1);		\
+	    (var) = (tvar))
+
+#define	TAILQ_FOREACH_REVERSE(var, head, headname, field)		\
+	for ((var) = TAILQ_LAST((head), headname);			\
+	    TAILQ_LOCK_ASSERT((head)) && (var);				\
+	    (var) = TAILQ_PREV((var), headname, field))
+
+#define	TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar)	\
+	for ((var) = TAILQ_LAST((head), headname);			\
+	    TAILQ_LOCK_ASSERT((head)) &&				\
+	    (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1);	\
+	    (var) = (tvar))
+
+#define	TAILQ_INIT(head) do {						\
+	TAILQ_FIRST((head)) = NULL;					\
+	(head)->tqh_last = &TAILQ_FIRST((head));			\
+	QMD_TRACE_HEAD(head);						\
+} while (0)
+
+#define	TAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	TAILQ_LOCK_ASSERT((head));					\
+	if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
+		TAILQ_NEXT((elm), field)->field.tqe_prev =		\
+		    &TAILQ_NEXT((elm), field);				\
+	else {								\
+		(head)->tqh_last = &TAILQ_NEXT((elm), field);		\
+		QMD_TRACE_HEAD(head);					\
+	}								\
+	TAILQ_NEXT((listelm), field) = (elm);				\
+	(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field);		\
+	QMD_TRACE_ELEM(&(elm)->field);					\
+	QMD_TRACE_ELEM(&listelm->field);				\
+} while (0)
+
+#define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
+	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
+	TAILQ_NEXT((elm), field) = (listelm);				\
+	*(listelm)->field.tqe_prev = (elm);				\
+	(listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field);		\
+	QMD_TRACE_ELEM(&(elm)->field);					\
+	QMD_TRACE_ELEM(&listelm->field);				\
+} while (0)
+
+#define	TAILQ_INSERT_HEAD(head, elm, field) do {			\
+	TAILQ_LOCK_ASSERT((head));					\
+	if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL)	\
+		TAILQ_FIRST((head))->field.tqe_prev =			\
+		    &TAILQ_NEXT((elm), field);				\
+	else								\
+		(head)->tqh_last = &TAILQ_NEXT((elm), field);		\
+	TAILQ_FIRST((head)) = (elm);					\
+	(elm)->field.tqe_prev = &TAILQ_FIRST((head));			\
+	QMD_TRACE_HEAD(head);						\
+	QMD_TRACE_ELEM(&(elm)->field);					\
+} while (0)
+
+#define	TAILQ_INSERT_TAIL(head, elm, field) do {			\
+	TAILQ_LOCK_ASSERT((head));					\
+	TAILQ_NEXT((elm), field) = NULL;				\
+	(elm)->field.tqe_prev = (head)->tqh_last;			\
+	*(head)->tqh_last = (elm);					\
+	(head)->tqh_last = &TAILQ_NEXT((elm), field);			\
+	QMD_TRACE_HEAD(head);						\
+	QMD_TRACE_ELEM(&(elm)->field);					\
+} while (0)
+
+#define	TAILQ_LAST(head, headname)					\
+	(*(((struct headname *)((head)->tqh_last))->tqh_last))
+
+#define	TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+#define	TAILQ_PREV(elm, headname, field)				\
+	(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+
+#define	TAILQ_REMOVE(head, elm, field) do {				\
+	TAILQ_LOCK_ASSERT((head));					\
+	if ((TAILQ_NEXT((elm), field)) != NULL)				\
+		TAILQ_NEXT((elm), field)->field.tqe_prev =		\
+		    (elm)->field.tqe_prev;				\
+	else {								\
+		(head)->tqh_last = (elm)->field.tqe_prev;		\
+		QMD_TRACE_HEAD(head);					\
+	}								\
+	*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field);		\
+	TRASHIT((elm)->field.tqe_next);					\
+	TRASHIT((elm)->field.tqe_prev);					\
+	QMD_TRACE_ELEM(&(elm)->field);					\
+} while (0)
+
+
+#ifdef _KERNEL
+
+/*
+ * XXX insque() and remque() are an old way of handling certain queues.
+ * They bogusly assumes that all queue heads look alike.
+ */
+
+struct quehead {
+	struct quehead *qh_link;
+	struct quehead *qh_rlink;
+};
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+
+static __inline void
+insque(void *a, void *b)
+{
+	struct quehead *element = (struct quehead *)a,
+		 *head = (struct quehead *)b;
+
+	element->qh_link = head->qh_link;
+	element->qh_rlink = head;
+	head->qh_link = element;
+	element->qh_link->qh_rlink = element;
+}
+
+static __inline void
+remque(void *a)
+{
+	struct quehead *element = (struct quehead *)a;
+
+	element->qh_link->qh_rlink = element->qh_rlink;
+	element->qh_rlink->qh_link = element->qh_link;
+	element->qh_rlink = 0;
+}
+
+#else /* !(__GNUC__ || __INTEL_COMPILER) */
+
+void	insque(void *a, void *b);
+void	remque(void *a);
+
+#endif /* __GNUC__ || __INTEL_COMPILER */
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_QUEUE_H_ */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_arasan_emac_ahb.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_arasan_emac_ahb.h
new file mode 100755
index 0000000..15dae08
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_arasan_emac_ahb.h
@@ -0,0 +1,437 @@
+/*
+ *  drivers/net/arasan_emac_ahb.h
+ *
+ *  Copyright (c) Quantenna Communications Incorporated 2007.
+ *  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef __COMMON_RUBY_ARASAN_EMAC_AHB_H
+#define __COMMON_RUBY_ARASAN_EMAC_AHB_H	1
+
+#include "ruby_platform.h"
+#ifdef TOPAZ_AMBER_IP
+#include <include/qtn/amber.h>
+#endif
+
+extern __inline__ void __mydelay(unsigned long loops)
+{
+	__asm__ __volatile__ ( "1: \n\t"
+			     "sub.f %0, %1, 1\n\t"
+			     "jpnz 1b"
+			     : "=r" (loops)
+			     : "0" (loops));
+}
+
+/*
+ * Division by multiplication: you don't have to worry about loss of
+ * precision.
+ *
+ * Use only for very small delays ( < 1 msec).  Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays.  This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+static inline void __const_myudelay(unsigned long xloops)
+{
+	__asm__ ("mpyhu %0, %1, %2"
+		 : "=r" (xloops)
+		 : "r" (xloops), "r" (1<<20));	/* Number derived from loops per jiffy */
+	__mydelay(xloops * 100);		/* Jiffies per sec */
+}
+
+static inline void __myudelay(unsigned long usecs)
+{
+	__const_myudelay(usecs * 4295);	/* 2**32 / 1000000 */
+}
+#ifndef MAX_UDELAY_MS
+#define MAX_UDELAY_MS	5
+#endif
+
+#ifndef mydelay
+#define mydelay(n) (\
+	(__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? __myudelay((n)*1000) : \
+	({unsigned long __ms=(n); while (__ms--) __myudelay(1000);}))
+#endif
+
+#define		DELAY_40MILLISEC	(40)
+#define		DELAY_50MILLISEC	(50)
+
+/* Arasan Gigabit AHB controller register offsets */
+#define EMAC_DMA_CONFIG			0x0000
+#define EMAC_DMA_CTRL			0x0004
+#define EMAC_DMA_STATUS_IRQ		0x0008
+#define EMAC_DMA_INT_ENABLE		0x000C
+#define EMAC_DMA_TX_AUTO_POLL		0x0010
+#define EMAC_DMA_TX_POLL_DEMAND		0x0014
+#define EMAC_DMA_RX_POLL_DEMAND		0x0018
+#define EMAC_DMA_TX_BASE_ADDR		0x001C
+#define EMAC_DMA_RX_BASE_ADDR		0x0020
+#define EMAC_DMA_MISSED_FRAMES		0x0024
+#define EMAC_DMA_STOP_FLUSHES		0x0028
+#define EMAC_DMA_RX_IRQ_MITIGATION	0x002C
+#define EMAC_DMA_CUR_TXDESC_PTR		0x0030
+#define EMAC_DMA_CUR_TXBUF_PTR		0x0034
+#define EMAC_DMA_CUR_RXDESC_PTR		0x0038
+#define EMAC_DMA_CUR_RXBUF_PTR		0x003C
+
+#define EMAC_MAC_GLOBAL_CTRL		0x0100
+#define EMAC_MAC_TX_CTRL		0x0104
+#define EMAC_MAC_RX_CTRL		0x0108
+#define EMAC_MAC_MAX_FRAME_SIZE		0x010C
+#define EMAC_MAC_TX_JABBER_SIZE		0x0110
+#define EMAC_MAC_RX_JABBER_SIZE		0x0114
+#define EMAC_MAC_ADDR_CTRL		0x0118
+#define EMAC_MAC_ADDR1_HIGH		0x0120
+#define EMAC_MAC_ADDR1_MED		0x0124
+#define EMAC_MAC_ADDR1_LOW		0x0128
+#define EMAC_MAC_ADDR2_HIGH		0x012C
+#define EMAC_MAC_ADDR2_MED		0x0130
+#define EMAC_MAC_ADDR2_LOW		0x0134
+#define EMAC_MAC_ADDR3_HIGH		0x0138
+#define EMAC_MAC_ADDR3_MED		0x013C
+#define EMAC_MAC_ADDR3_LOW		0x0140
+#define EMAC_MAC_ADDR4_HIGH		0x0144
+#define EMAC_MAC_ADDR4_MED		0x0148
+#define EMAC_MAC_ADDR4_LOW		0x014C
+#define EMAC_MAC_TABLE1			0x0150
+#define EMAC_MAC_TABLE2			0x0154
+#define EMAC_MAC_TABLE3			0x0158
+#define EMAC_MAC_TABLE4			0x015C
+#define EMAC_MAC_FLOW_CTRL		0x0160
+#define EMAC_MAC_FLOW_PAUSE_GENERATE	0x0164
+#define EMAC_MAC_FLOW_SA_HIGH		0x0168
+#define EMAC_MAC_FLOW_SA_MED		0x016C
+#define EMAC_MAC_FLOW_SA_LOW		0x0170
+#define EMAC_MAC_FLOW_DA_HIGH		0x0174
+#define EMAC_MAC_FLOW_DA_MED		0x0178
+#define EMAC_MAC_FLOW_DA_LOW		0x017C
+#define EMAC_MAC_FLOW_PAUSE_TIMEVAL	0x0180
+#define EMAC_MAC_MDIO_CTRL		0x01A0
+#define EMAC_MAC_MDIO_DATA		0x01A4
+#define EMAC_MAC_RXSTAT_CTRL		0x01A8
+#define EMAC_MAC_RXSTAT_DATA_HIGH	0x01AC
+#define EMAC_MAC_RXSTAT_DATA_LOW	0x01B0
+#define EMAC_MAC_TXSTAT_CTRL		0x01B4
+#define EMAC_MAC_TXSTAT_DATA_HIGH	0x01B8
+#define EMAC_MAC_TXSTAT_DATA_LOW	0x01BC
+#define EMAC_MAC_TX_ALMOST_FULL		0x01C0
+#define EMAC_MAC_TX_START_THRESHOLD	0x01C4
+#define EMAC_MAC_RX_START_THRESHOLD	0x01C8
+#define EMAC_MAC_INT			0x01E0
+#define EMAC_MAC_INT_ENABLE		0x01E4
+
+#ifndef __ASSEMBLY__
+
+/* Common structure for tx and rx descriptors */
+struct emac_desc {
+	volatile u32 status;
+	volatile u32 control;
+	volatile u32 bufaddr1;
+	volatile u32 bufaddr2;
+};
+
+enum DmaRxDesc {
+	/* status field */
+	RxDescOwn = (1 << 31),
+	RxDescFirstDesc = (1 << 30),
+	RxDescLastDesc = (1 << 29),
+	RxDescStatusLenErr = (1 << 23),
+	RxDescStatusJabberErr = (1 << 22),
+	RxDescStatusMaxLenErr = (1 << 21),
+	RxDescStatusCRCErr = (1 << 20),
+	RxDescStatusRuntFrame = (1 << 15),
+	RxDescStatusAlignErr = (1 << 14),
+	RxDescStatusShift = 14,
+	RxDescStatusMask = 0x7fff,
+	RxDescFrameLenShift = 0,
+	RxDescFrameLenMask = 0x3fff,
+	/* control field */
+	RxDescEndOfRing = (1 << 26),
+	RxDescChain2ndAddr = (1 << 25),
+	RxDescBuf2SizeShift = 12,
+	RxDescBuf2SizeMask = 0xfff,
+	RxDescBuf1SizeShift = 0,
+	RxDescBuf1SizeMask = 0xfff,
+};
+
+enum DmaTxDesc {
+	/* status field */
+	TxDescOwn = (1 << 31),
+	TxDescStatusShift = 0,
+	TxDescStatusMask = 0x8fffffff,
+	/* control field */
+	TxDescIntOnComplete = (1 << 31),
+	TxDescLastSeg = (1 << 30),
+	TxDescFirstSeg = (1 << 29),
+	TxDescCrcDisable = (1 << 28),
+	TxDescPadDisable = (1 << 27),
+	TxDescEndOfRing = (1 << 26),
+	TxDescChain2ndAddr = (1 << 25),
+	TxDescForceEopErr = (1 << 24),
+	TxDescBuf2SizeShift = 12,
+	TxDescBuf2SizeMask = 0xfff,
+	TxDescBuf1SizeShift = 0,
+	TxDescBuf1SizeMask = 0xfff,
+};
+
+enum AraMacRegVals {
+	/* DMA config register */
+	DmaSoftReset = 1,
+	Dma1WordBurst = (0x01 << 1),
+	Dma4WordBurst = (0x04 << 1),
+	Dma16WordBurst = (0x10 << 1),
+	DmaRoundRobin = (1 << 15),
+	DmaWait4Done = (1 << 16),
+	DmaStrictBurst = (1 << 17),
+	Dma64BitMode = (1 << 18),
+	/* DMA control register */
+	DmaStartTx = (1 << 0),
+	DmaStartRx = (1 << 1),
+	/* DMA status/interrupt & interrupt mask registers */
+	DmaTxDone = (1 << 0),
+	DmaNoTxDesc = (1 << 1),
+	DmaTxStopped = (1 << 2),
+	DmaRxDone = (1 << 4),
+	DmaNoRxDesc = (1 << 5),
+	DmaRxStopped = (1 << 6),
+	DmaRxMissedFrame = (1 << 7),
+	DmaMacInterrupt = (1 << 8),
+	DmaAllInts = DmaTxDone | DmaNoTxDesc | DmaTxStopped | DmaRxDone | 
+		DmaNoRxDesc | DmaRxStopped | DmaRxMissedFrame | DmaMacInterrupt,
+	DmaTxStateMask = (7 << 16),
+	DmaTxStateStopped = (0 << 16),
+	DmaTxStateFetchDesc = (1 << 16),
+	DmaTxStateFetchData = (2 << 16),
+	DmaTxStateWaitEOT = (3 << 16),
+	DmaTxStateCloseDesc = (4 << 16),
+	DmaTxStateSuspended = (5 << 16),
+	DmaRxStateMask = (15 << 21),
+	DmaRxStateStopped = (0 << 21),
+	DmaRxStateFetchDesc = (1 << 21),
+	DmaRxStateWaitEOR = (2 << 21),
+	DmaRxStateWaitFrame = (3 << 21),
+	DmaRxStateSuspended = (4 << 21),
+	DmaRxStateCloseDesc = (5 << 21),
+	DmaRxStateFlushBuf = (6 << 21),
+	DmaRxStatePutBuf = (7 << 21),
+	DmaRxStateWaitStatus = (8 << 21),
+	/* MAC global control register */
+	MacSpeed10M = (0 << 0),
+	MacSpeed100M = (1 << 0),
+	MacSpeed1G = (2 << 0),
+	MacSpeedMask = (3 << 0),
+	MacFullDuplex = (1 << 2),
+	MacResetRxStats = (1 << 3),
+	MacResetTxStats = (1 << 4),
+	/* MAC TX control */
+	MacTxEnable = (1 << 0),
+	MacTxInvertFCS = (1 << 1),
+	MacTxDisableFCSInsertion = (1 << 2),
+	MacTxAutoRetry = (1 << 3),
+	MacTxIFG96 = (0 << 4),
+	MacTxIFG64 = (1 << 4),
+	MacTxIFG128 = (2 << 4),
+	MacTxIFG256 = (3 << 4),
+	MacTxPreamble7 = (0 << 6),
+	MacTxPreamble3 = (2 << 6),
+	MacTxPreamble5 = (3 << 6),
+	/* MAC RX control */
+	MacRxEnable = (1 << 0),
+	MacRxStripFCS = (1 << 2),
+	MacRxStoreAndForward = (1 << 3),
+	MacAccountVLANs = (1 << 6),
+	/* MAC address control */
+	MacAddr1Enable = (1 << 0),
+	MacAddr2Enable = (1 << 1),
+	MacAddr3Enable = (1 << 2),
+	MacAddr4Enable = (1 << 3),
+	MacInverseAddr1Enable = (1 << 4),
+	MacInverseAddr2Enable = (1 << 5),
+	MacInverseAddr3Enable = (1 << 6),
+	MacInverseAddr4Enable = (1 << 7),
+	MacPromiscuous = (1 << 8),
+	/* MAC flow control */
+	MacFlowDecodeEnable = (1 << 0),
+	MacFlowGenerationEnable = (1 << 1),
+	MacAutoFlowGenerationEnable = (1 << 2),
+	MacFlowMulticastMode = (1 << 3),
+	MacBlockPauseFrames = (1 << 4),
+	/* MDIO control register values */
+	MacMdioCtrlPhyMask = 0x1f,
+	MacMdioCtrlPhyShift = 0,
+	MacMdioCtrlRegMask = 0x1f,
+	MacMdioCtrlRegShift = 5,
+	MacMdioCtrlRead = (1 << 10),
+	MacMdioCtrlWrite = 0,
+	MacMdioCtrlClkMask = 0x3,
+	MacMdioCtrlClkShift = 11,
+	MacMdioCtrlStart = (1 << 15),
+	/* MDIO data register values */
+	MacMdioDataMask = 0xffff,
+	/* MAC interrupt & interrupt mask values */
+	MacUnderrun = (1 << 0),
+	MacJabber = (1 << 0),
+	/* RX statistics counter control */
+	RxStatReadBusy = (1 << 15),
+	/* TX statistics counter control */
+	TxStatReadBusy = (1 << 15),
+};
+
+enum ArasanTxStatisticsCounters {
+	FramesSentOK = 0,
+	FramesSentTotal = 1,
+	OctetsSentOK = 2,
+	FramesSentError = 3,
+	FramesSentSingleCol = 4,
+	FramesSentMultipleCol = 5,
+	FramesSentLateCol = 6,
+	FramesSentExcessiveCol = 7,
+	FramesSentUnicast = 8,
+	FramesSentMulticast = 9,
+	FramesSentBroadcast = 10,
+	FramesSentPause = 11,
+	TxLastStatCounter = 11,
+};
+
+enum ArasanRxStatisticsCounters {
+	FramesRxOK = 0,
+	FramesRxTotal = 1,
+	FramesRxCrcErr = 2,
+	FramesRxAlignErr = 3,
+	FramesRxErrTotal = 4,
+	OctetsRxOK = 5,
+	OctetsRxTotal = 6,
+	FramesRxUnicast = 7,
+	FramesRxMulticast = 8,
+	FramesRxBroadcast = 9,
+	FramesRxPause = 10,
+	FramesRxLenErr = 11,
+	FramesRxUndersized = 12,
+	FramesRxOversized = 13,
+	FramesRxFragments = 14,
+	FramesRxJabber = 15,
+	FramesRx64bytes = 16,
+	FramesRx65to127bytes = 17,
+	FramesRx128to255bytes = 18,
+	FramesRx256to511bytes = 19,
+	FramesRx512to1023bytes = 20,
+	FramesRx1024to1518bytes = 21,
+	FramesRxOver1518bytes = 22,
+	FramesRxDroppedBufFull = 23,
+	FramesRxTruncatedBufFull = 24,
+	RxLastStatCounter = 24,
+};
+
+extern int mdc_clk_divisor;
+static inline void arasan_initialize_release_reset(uint32_t emac0_cfg,
+		uint32_t emac1_cfg, uint32_t rgmii_timing, uint32_t ext_reset)
+{
+	uint32_t emac_cfg = emac0_cfg | emac1_cfg;
+	unsigned long reset_mask;
+	uint32_t mii_value = 0x481 | ((mdc_clk_divisor & MacMdioCtrlClkMask) << MacMdioCtrlClkShift);
+
+	if (!(emac_cfg & EMAC_IN_USE)) {
+		return;
+	}
+
+	/* both interfaces (if enabled) must use same mii config so we can just or here */
+	writel(RUBY_SYS_CTL_MASK_MII, RUBY_SYS_CTL_MASK);
+	if (emac0_cfg & EMAC_PHY_MII) {
+		writel(RUBY_SYS_CTL_MASK_MII_EMAC0, RUBY_SYS_CTL_CTRL);
+	}
+	if (emac1_cfg & EMAC_PHY_MII) {
+		writel(RUBY_SYS_CTL_MASK_MII_EMAC1, RUBY_SYS_CTL_CTRL);
+	}
+	if (!(emac0_cfg & EMAC_PHY_MII) && !(emac1_cfg & EMAC_PHY_MII)){
+		writel(0, RUBY_SYS_CTL_CTRL);
+	}
+	/* Have PLL clock signal go out */
+	writel_topaz(TOPAZ_SYS_CTL_PLLCLKOUT_EN, RUBY_SYS_CTL_MASK);
+	writel_topaz(TOPAZ_SYS_CTL_PLLCLKOUT_EN, RUBY_SYS_CTL_CTRL);
+
+	/*
+	 * if RGMII mode, we need to configure the clock before we release reset and also
+	 * make sure we actually reset the block
+	 */
+	writel(rgmii_timing, RUBY_SYS_CTL_GMII_CLKDLL);
+
+	/* Release Ethernet busses from reset separately to emacs */
+	reset_mask = RUBY_SYS_CTL_RESET_NETSS | RUBY_SYS_CTL_RESET_IOSS;
+	writel(reset_mask, RUBY_SYS_CTL_CPU_VEC_MASK);
+	writel(reset_mask, RUBY_SYS_CTL_CPU_VEC);
+
+	if (emac1_cfg & EMAC_IN_USE) {
+		/*
+		 * emac1 only or emac0 + emac1 configurations both require emac0
+		 * to be taken out of reset, since both PHYs use a shared mdio bus
+		 * starting from emac0
+		 */
+		reset_mask = RUBY_SYS_CTL_RESET_ENET0 | RUBY_SYS_CTL_RESET_ENET1;
+	} else if (emac0_cfg & EMAC_IN_USE) {
+		reset_mask = RUBY_SYS_CTL_RESET_ENET0;
+	}
+#ifdef TOPAZ_AMBER_IP
+	amber_bus_flush_req(TOPAZ_AMBER_BUS_FLUSH_RGMII);
+#endif
+	if (ext_reset) {
+		reset_mask |= RUBY_SYS_CTL_RESET_EXT;
+	}
+	if (reset_mask && (readl(RUBY_SYS_CTL_CPU_VEC) & reset_mask) != reset_mask) {
+		writel(reset_mask, RUBY_SYS_CTL_CPU_VEC_MASK);
+		if (ext_reset) {
+			writel(RUBY_SYS_CTL_RESET_EXT, RUBY_SYS_CTL_CPU_VEC);
+			mydelay(DELAY_40MILLISEC);
+			reset_mask &= ~RUBY_SYS_CTL_RESET_EXT;
+		}
+		writel(0, RUBY_SYS_CTL_CPU_VEC);
+		mydelay(DELAY_50MILLISEC);
+	}
+	/* Bring the EMAC out of reset */
+	writel(reset_mask, RUBY_SYS_CTL_CPU_VEC_MASK);
+	writel(reset_mask, RUBY_SYS_CTL_CPU_VEC);
+
+	writel(0, RUBY_SYS_CTL_MASK);
+	writel(0, RUBY_SYS_CTL_CPU_VEC_MASK);
+#ifdef TOPAZ_AMBER_IP
+	amber_bus_flush_release(TOPAZ_AMBER_BUS_FLUSH_RGMII);
+#endif
+
+	/*
+	 * Trigger dummy MDIO read to set MDC clock
+	 */
+	writel(mii_value, RUBY_ENET0_BASE_ADDR + EMAC_MAC_MDIO_CTRL);
+
+	/*
+	 * Remove EMAC DMA from soft reset; all other EMAC register
+	 * writes result in bus hang if the EMAC is in soft reset
+	 */
+	if (emac0_cfg & EMAC_IN_USE) {
+		writel(0x0, RUBY_ENET0_BASE_ADDR + EMAC_DMA_CONFIG);
+		writel(0x0, RUBY_ENET0_BASE_ADDR + EMAC_DMA_CTRL);
+	}
+	if (emac1_cfg & EMAC_IN_USE) {
+		writel(0x0, RUBY_ENET1_BASE_ADDR + EMAC_DMA_CONFIG);
+		writel(0x0, RUBY_ENET1_BASE_ADDR + EMAC_DMA_CTRL);
+	}
+
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_board_cfg.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_board_cfg.h
new file mode 100644
index 0000000..3528dc4
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_board_cfg.h
@@ -0,0 +1,119 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __RUBY_BOARD_CFG_H
+#define __RUBY_BOARD_CFG_H
+
+#include "ruby_platform.h"
+
+#define	SPI1_NOT_IN_USE			(0)
+#define	SPI1_IN_USE			(BIT(0))
+
+/*
+ * There is a copy named qdpc_pcie_board_cfg_t in qdpc_config.h they must be the same.
+ * The copy is used for Host driver Because the Host driver can't cite to here.
+ */
+typedef struct board_cfg {
+	int bc_board_id;
+	char *bc_name;		/* optional name of cfg */
+	int bc_ddr_type;	/* ID */
+	int bc_ddr_speed;	/* speed in MHz */
+	int bc_ddr_size;	/* in bytes */
+	int bc_emac0;		/* in use? */
+	int bc_emac1;		/* in use? */
+	int bc_phy0_addr;	/* address */
+	int bc_phy1_addr;	/* address */
+	int bc_spi1;		/* in use? */
+	int bc_wifi_hw;		/* WiFi hardware type */
+	int bc_uart1;		/* in use? */
+	int bc_pcie;		/* in use? */
+	int bc_rgmii_timing;	/* special timing value for RGMII */
+} board_cfg_t;
+
+#define BOARD_CFG_STRUCT_NUM_FIELDS	(sizeof(struct board_cfg) / sizeof(int))
+
+/* These are index into cfg array */
+#define BOARD_CFG_ID			(0)
+#define BOARD_CFG_NAME			(1)
+#define BOARD_CFG_DDR_TYPE		(2)
+#define BOARD_CFG_DDR_SPEED		(3)
+#define BOARD_CFG_DDR_SIZE		(4)
+#define BOARD_CFG_EMAC0			(5)
+#define BOARD_CFG_EMAC1			(6)
+#define BOARD_CFG_PHY0_ADDR		(7)
+#define BOARD_CFG_PHY1_ADDR		(8)
+#define BOARD_CFG_SPI1			(9)
+#define BOARD_CFG_WIFI_HW		(10)
+#define BOARD_CFG_UART1			(11)
+#define BOARD_CFG_PCIE			(12)
+#define BOARD_CFG_RGMII_TIMING		(13)
+#define BOARD_CFG_EXT_LNA_GAIN		(14)
+#define BOARD_CFG_TX_ANTENNA_NUM	(15)
+#define BOARD_CFG_FLASH_SIZE		(16)
+#define BOARD_CFG_TX_ANTENNA_GAIN	(17)
+#define BOARD_CFG_EXT_LNA_BYPASS_GAIN	(18)
+#define BOARD_CFG_RFIC			(19)
+#define BOARD_CFG_CALSTATE_VPD		(20)
+
+#define BOARD_CFG_FIELD_NAMES	{	\
+	"bc_board_id",			\
+	"bc_name",			\
+	"bc_ddr_type",			\
+	"bc_ddr_speed",			\
+	"bc_ddr_size",			\
+	"bc_emac0",			\
+	"bc_emac1",			\
+	"bc_phy0_addr",			\
+	"bc_phy1_addr",			\
+	"bc_spi1",			\
+	"bc_wifi_hw",			\
+	"bc_uart1",			\
+	"bc_pcie",			\
+	"bc_rgmii_timing",		\
+	"bc_ext_lna_gain",		\
+	"bc_tx_antenna_num",		\
+	"bc_flash_cfg",			\
+	"bc_tx_antenna_gain",		\
+	"bc_ext_lna_bypass_gain",	\
+	"bc_rfic",			\
+	"bc_tx_power_cal",		\
+}
+
+#define RUBY_BDA_VERSION		0x1000
+#define RUBY_BDA_NAMELEN		32
+#define QTN_MAX_BOOTLINE                (256)
+
+/*
+ * quantenna board configuration information,
+ * shared between u-boot and linux kernel.
+ */
+typedef struct qtn_board_cfg_info {
+	uint16_t	bda_len;			/* Size of BDA block */
+	uint16_t	bda_version;			/* BDA version */
+	uint8_t		rsvd[36];
+	board_cfg_t	bda_boardcfg;
+	uint32_t	bda_flashsz;
+	char		bda_boardname[RUBY_BDA_NAMELEN];
+	char            bda_bootline[QTN_MAX_BOOTLINE];
+} __attribute__ ((packed)) ruby_bda_t;
+
+#endif /* __RUBY_BOARD_CFG_H */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_board_db.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_board_db.h
new file mode 100644
index 0000000..9999df1
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_board_db.h
@@ -0,0 +1,1184 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef _RUBY_BOARD_DB_
+#define _RUBY_BOARD_DB_
+
+#include "ruby_platform.h"
+
+#ifdef UBOOT_BOARD_AUTOCONFIG
+/*
+ * UBoot dynamic board config
+ * Placed here to allow for simplified maintenence
+ */
+struct ruby_cfgstr_map {
+	const char *name;
+	const int val;
+};
+#define CFGSTR(x)	{ #x, (x) }
+
+struct ruby_board_param {
+	const struct ruby_cfgstr_map * const p_map;
+	const uint32_t p_index;
+};
+
+const struct ruby_cfgstr_map g_cfgstr_board_id_cfg[] = {
+	{ "UNIVERSAL_ID", QTN_RUBY_UNIVERSAL_BOARD_ID },
+	{ 0 , 0 }
+};
+
+/* Board name */
+static const struct ruby_cfgstr_map g_cfgstr_name_cfg[] = {
+	{ "hw_QTN_test", 0 },
+	{ 0 , 0 }
+};
+
+/* DDR Configuration strings */
+/* DDR Type */
+static const struct ruby_cfgstr_map g_cfgstr_ddr_cfg[] = {
+	CFGSTR(DDR_16_ETRON),
+	CFGSTR(DDR_32_MICRON),
+	CFGSTR(DDR_16_MICRON),
+	CFGSTR(DDR_32_ETRON),
+	CFGSTR(DDR_32_SAMSUNG),
+	CFGSTR(DDR_16_SAMSUNG),
+	CFGSTR(DDR_16_HYNIX),
+	CFGSTR(DDR3_16_WINBOND),
+	CFGSTR(DDR3_32_WINBOND),
+	CFGSTR(DEFAULT_DDR_CFG),
+	{ 0 , 0 }
+};
+
+/* DDR Size */
+static const struct ruby_cfgstr_map g_cfgstr_ddr_size[] = {
+	CFGSTR(DDR_32MB),
+	CFGSTR(DDR_64MB),
+	CFGSTR(DDR_128MB),
+	CFGSTR(DDR_256MB),
+	CFGSTR(DDR_AUTO),
+	CFGSTR(DEFAULT_DDR_SIZE),
+	{ 0 , 0 }
+};
+
+/* DDR Speed */
+static const struct ruby_cfgstr_map g_cfgstr_ddr_speed[] = {
+	CFGSTR(DDR_160),
+	CFGSTR(DDR_250),
+	CFGSTR(DDR_320),
+	CFGSTR(DDR_400),
+	CFGSTR(DDR3_320MHz),
+	CFGSTR(DDR3_400MHz),
+	CFGSTR(DDR3_500MHz),
+	CFGSTR(DDR3_640MHz),
+	CFGSTR(DDR3_800MHz),
+	CFGSTR(DEFAULT_DDR_SPEED),
+	{ 0 , 0 }
+};
+
+/* EMAC configuration strings */
+static const struct ruby_cfgstr_map g_cfgstr_emac_cfg[] = {
+	{ "EMAC_IN_USE", EMAC_IN_USE },
+	{ "EMAC_RGMII_AN", EMAC_IN_USE },
+	{ "EMAC_NOT_IN_USE", EMAC_NOT_IN_USE },
+	{ "EMAC_MII_AN", (EMAC_IN_USE | EMAC_PHY_MII) },
+	{ "EMAC_MII_100M", (EMAC_IN_USE | EMAC_PHY_NOT_IN_USE | EMAC_PHY_MII | EMAC_PHY_FORCE_100MB) },
+	{ "EMAC_MII_100M_PHY", (EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_FORCE_100MB) },
+	{ "EMAC_AR8327_RGMII", (EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE | EMAC_PHY_AR8327) },
+	{ "EMAC_RTL8363S_RGMII", (EMAC_IN_USE |  EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE) },
+	{ "EMAC_RTL8363SB_RGMII_P0", (EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE | EMAC_PHY_RTL8363SB_P0) },
+	{ "EMAC_RTL8363SB_RGMII_P1", (EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE | EMAC_PHY_RTL8363SB_P1) },
+	{ "EMAC_RTL8363SB_RGMII_BONDED", (EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE |
+			EMAC_PHY_RTL8363SB_P0 | EMAC_PHY_RTL8363SB_P1 | EMAC_BONDED) },
+	{ "EMAC_RTL8211E_RGMII", (EMAC_IN_USE |  EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE) },
+	{ "EMAC_88E6071_MII", (EMAC_MV88E6071) },
+	{ "EMAC_B2B_RGMII", (EMAC_IN_USE |  EMAC_PHY_FORCE_100MB | EMAC_PHY_NOT_IN_USE) },
+	{ "EMAC_B2B_RGMII_100M", (EMAC_IN_USE | EMAC_PHY_FORCE_100MB | EMAC_PHY_NOT_IN_USE) },
+	{ "EMAC_B2B_RGMII_1000M", (EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE) },
+	{ "EMAC_AR8236_MII", (EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_FORCE_100MB | EMAC_PHY_NOT_IN_USE | EMAC_PHY_AR8236) },
+	{ "EMAC_MII_GPIO1_RST", (EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_GPIO1_RESET) },
+	{ "EMAC_MII_100M_GPIO13_RST", (EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_GPIO13_RESET | EMAC_PHY_FORCE_100MB) },
+	{ "DEFAULT_EMAC", (EMAC_NOT_IN_USE) },
+	{ 0 , 0 }
+};
+
+
+static const struct ruby_cfgstr_map g_cfgstr_emac_phyaddr[] = {
+	CFGSTR(24),
+	CFGSTR(31),
+	CFGSTR(EMAC_PHY_ADDR_SCAN),
+	{ "EMAC_PHY0_ADDR", 1 },
+	{ "EMAC_PHY1_ADDR", 3 },
+	{ "DEFAULT_PHY_ADDR", EMAC_PHY_ADDR_SCAN },
+	{ 0 , 0 }
+};
+
+/* Wireless PHY */
+static const struct ruby_cfgstr_map g_cfgstr_rfpa_cfg[] = {
+	CFGSTR(QTN_RUBY_BRINGUP_RWPA),
+	CFGSTR(QTN_RUBY_REF_RWPA),
+	CFGSTR(QTN_RUBY_SIGE),
+	CFGSTR(QTN_RUBY_WIFI_NONE),
+	CFGSTR(QTN_TPZ_SE5003L1),
+	CFGSTR(QTN_TPZ_SE5003L1_INV),
+	CFGSTR(QTN_TPZ_SKY85703),
+	CFGSTR(QTN_TPZ_DBS),
+	CFGSTR(QTN_TPZ_SKY85405_BPF840),
+	CFGSTR(QTN_TPZ_DBS),
+	CFGSTR(QTN_TPZ_SE5502L),
+	CFGSTR(QTN_TPZ_SKY85710_NG),
+	CFGSTR(QTN_TPZ_DBS_5591),
+	CFGSTR(QTN_TPZ_DBS_5591),
+	CFGSTR(QTN_TPZ_DBS_NXP_BGU7224_BGU7258),
+	CFGSTR(QTN_TPZ_2_4GHZ_NXP_BGU7224),
+	CFGSTR(QTN_TPZ_5GHZ_NXP_BGU7258),
+	CFGSTR(QTN_TPZ_5GHZ_SKY85728),
+	CFGSTR(QTN_TPZ_DBS_SKY85806_SKY85811),
+	{ "DEFAULT_WIFI_HW", QTN_RUBY_REF_RWPA },
+	{ 0 , 0 }
+};
+
+/* SPI config */
+static const struct ruby_cfgstr_map g_cfgstr_spi_cfg[] = {
+	CFGSTR(SPI1_IN_USE),
+	CFGSTR(SPI1_NOT_IN_USE),
+	{ 0 , 0 }
+};
+
+/* UART Config */
+static const struct ruby_cfgstr_map g_cfgstr_uart_cfg[] = {
+	CFGSTR(UART1_NOT_IN_USE),
+	CFGSTR(UART1_IN_USE),
+	{ "DEFAULT_UART1", UART1_NOT_IN_USE },
+	{ 0 , 0 }
+};
+
+/* RGMII Timing Config */
+static const struct ruby_cfgstr_map g_cfgstr_rgmii_cfg[] = {
+	{ "RGMII_DEFAULT_S2p7ns_H1p1ns", CONFIG_ARCH_RGMII_DEFAULT },
+	{ "RGMII_S2p4ns_H1p4ns", CONFIG_ARCH_RGMII_DLL_TIMING },
+	{ "RGMII_S1p8ns_H1p9ns", CONFIG_ARCH_RGMII_S1P8NS_H1P9NS },
+	{ "RGMII_P1RX00TX0E", CONFIG_ARCH_RGMII_P1RX00TX0E },
+	{ "RGMII_710F", CONFIG_ARCH_RGMII_710F },
+	{ "RGMII_NODELAY", CONFIG_ARCH_RGMII_NODELAY },
+	{ "DEFAULT_RGMII_TIMING", CONFIG_ARCH_RGMII_DEFAULT },
+	{ 0 , 0 }
+};
+
+
+/* PCIE Config */
+static const struct ruby_cfgstr_map g_cfgstr_pcie_cfg[] = {
+	CFGSTR(PCIE_NOT_IN_USE),
+	CFGSTR(PCIE_ENDPOINT),
+	CFGSTR(PCIE_ROOTCOMPLEX),
+	{ 0 , 0 }
+};
+
+/* Flash config */
+static const struct ruby_cfgstr_map g_cfgstr_flash_cfg[] = {
+	CFGSTR(FLASH_SIZE_JEDEC),
+	CFGSTR(FLASH_32MB),
+	CFGSTR(FLASH_16MB),
+	CFGSTR(FLASH_8MB),
+	CFGSTR(FLASH_4MB),
+	CFGSTR(FLASH_2MB),
+	CFGSTR(DEFAULT_FLASH_SIZE),
+	{ 0 , 0 }
+};
+
+static const struct ruby_cfgstr_map g_cfgstr_tx_antenna_num[] = {
+	{ "TX_ANTENNA_NUM_1", 1 },
+	{ "TX_ANTENNA_NUM_2", 2 },
+	{ "TX_ANTENNA_NUM_3", 3 },
+	{ "TX_ANTENNA_NUM_4", 4 },
+	{ "DEFAULT_TX_ANTENNA_NUM", 4 },
+	{ 0 , 0 }
+};
+
+#define TX_ANTENNA_GAIN_1_1dB	4506
+static const struct ruby_cfgstr_map g_cfgstr_tx_antenna_gain[] = {
+	CFGSTR(TX_ANTENNA_GAIN_1_1dB),
+	{ "DEFAULT_TX_ANTENNA_GAIN", TX_ANTENNA_GAIN_1_1dB },
+	{ 0 , 0 }
+};
+
+#define LNA_gain_12dB		12
+static const struct ruby_cfgstr_map g_cfgstr_ext_lna_gain[] = {
+	CFGSTR(LNA_gain_12dB),
+	{ "DEFAULT_EXT_LNA_GAIN", LNA_gain_12dB },
+	{ 0 , 0 }
+};
+
+#define LNA_gain_BYPASS_N5dB	-5
+static const struct ruby_cfgstr_map g_cfgstr_ext_lna_bypass_gain[] = {
+	CFGSTR(LNA_gain_BYPASS_N5dB),
+	{ "DEFAULT_EXT_LNA_BYPASS_GAIN", LNA_gain_BYPASS_N5dB },
+	{ 0 , 0 }
+};
+
+#define RFIC_NOT_IN_USE		0
+#define RFIC_V4_IN_USE		4
+#define RFIC_V6_IN_USE		6
+static const struct ruby_cfgstr_map g_cfgstr_rfic[] = {
+	CFGSTR(RFIC_NOT_IN_USE),
+	CFGSTR(RFIC_V4_IN_USE),
+	CFGSTR(RFIC_V6_IN_USE),
+	{ "DEFAULT_RFIC", RFIC_V4_IN_USE },
+	{ 0 , 0 }
+};
+
+#define CALSTATE_VPD_LOG	0
+#define CALSTATE_VPD_LINEAR	1
+static const struct ruby_cfgstr_map g_cfgstr_txpow_cal[] = {
+	CFGSTR(CALSTATE_VPD_LOG),
+	CFGSTR(CALSTATE_VPD_LINEAR),
+	{ "DEFAULT_CALSTATE_VPD", CALSTATE_VPD_LOG },
+	{ 0 , 0 }
+};
+
+static const struct ruby_board_param g_custom_board_params[] = {
+	{ g_cfgstr_board_id_cfg, BOARD_CFG_ID },
+	{ g_cfgstr_name_cfg, BOARD_CFG_NAME },
+	{ g_cfgstr_ddr_cfg, BOARD_CFG_DDR_TYPE },
+	{ g_cfgstr_ddr_speed, BOARD_CFG_DDR_SPEED },
+	{ g_cfgstr_ddr_size, BOARD_CFG_DDR_SIZE },
+	{ g_cfgstr_emac_cfg, BOARD_CFG_EMAC0 },
+	{ g_cfgstr_emac_cfg, BOARD_CFG_EMAC1 },
+	{ g_cfgstr_emac_phyaddr, BOARD_CFG_PHY0_ADDR },
+	{ g_cfgstr_emac_phyaddr, BOARD_CFG_PHY1_ADDR },
+	{ g_cfgstr_rfpa_cfg, BOARD_CFG_WIFI_HW },
+	{ g_cfgstr_spi_cfg, BOARD_CFG_SPI1 },
+	{ g_cfgstr_uart_cfg, BOARD_CFG_UART1 },
+	{ g_cfgstr_rgmii_cfg, BOARD_CFG_RGMII_TIMING },
+	{ g_cfgstr_pcie_cfg, BOARD_CFG_PCIE },
+	{ g_cfgstr_flash_cfg, BOARD_CFG_FLASH_SIZE },
+	{ g_cfgstr_tx_antenna_num, BOARD_CFG_TX_ANTENNA_NUM },
+	{ g_cfgstr_tx_antenna_gain, BOARD_CFG_TX_ANTENNA_GAIN },
+	{ g_cfgstr_ext_lna_gain, BOARD_CFG_EXT_LNA_GAIN },
+	{ g_cfgstr_ext_lna_bypass_gain, BOARD_CFG_EXT_LNA_BYPASS_GAIN },
+	{ g_cfgstr_rfic, BOARD_CFG_RFIC },
+	{ g_cfgstr_txpow_cal, BOARD_CFG_CALSTATE_VPD },
+	{0, 0 }
+};
+
+static board_cfg_t g_custom_board_cfg = {
+	.bc_board_id	= QTN_RUBY_AUTOCONFIG_ID,
+	.bc_name	= "Autoconfigured board",
+	.bc_ddr_type	= DEFAULT_DDR_CFG,
+	.bc_ddr_speed	= DEFAULT_DDR_SPEED,
+	.bc_ddr_size	= DDR_AUTO,
+	.bc_emac0	= EMAC_NOT_IN_USE,
+	.bc_emac1	= EMAC_NOT_IN_USE,
+	.bc_spi1	= SPI1_NOT_IN_USE,
+	.bc_wifi_hw	= QTN_RUBY_WIFI_NONE,
+	.bc_uart1	= UART1_NOT_IN_USE,
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,
+};
+
+#endif
+
+
+#define QTN_BOARD_DB					{		\
+	{ /* 0 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD,			\
+	.bc_name	= "micron32-160, emac0-24, pa0",		\
+	.bc_ddr_type	= DDR_32_MICRON,				\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_32_320,		\
+	.bc_name	= "micron32-250, emac0-24, pa0",		\
+	.bc_ddr_type	= DDR_32_MICRON,				\
+	.bc_ddr_speed	= DDR_250,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 2 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_16_320,		\
+	.bc_name	= "micron16-250, emac0-24, pa0",		\
+	.bc_ddr_type	= DDR_16_MICRON,				\
+	.bc_ddr_speed	= DDR_250,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 3 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_16_160,		\
+	.bc_name	= "micron16-160, emac0-24, pa0",		\
+	.bc_ddr_type	= DDR_16_MICRON,				\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 4 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_ETRON,			\
+	.bc_name	= "etron16-250, emac0-24, pa0",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_250,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 5 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_ETRON_320,		\
+	.bc_name	= "etron16-320, emac0-24, pa0",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_320,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 6 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_ETRON_160,		\
+	.bc_name	= "etron16-160, emac0-24, pa0",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 7 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_16_200,		\
+	.bc_name	= "micron16-200, emac0-24, pa0",		\
+	.bc_ddr_type	= DDR_16_MICRON,				\
+	.bc_ddr_speed	= DDR_200,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 8 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_32_200,		\
+	.bc_name	= "micron32-200, emac0-24, pa0",		\
+	.bc_ddr_type	= DDR_32_MICRON,				\
+	.bc_ddr_speed	= DDR_200,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 9 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_PCIE,			\
+	.bc_name	= "etron16-160, pcie, pa2, phy loopbk",		\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_pcie	= PCIE_IN_USE | PCIE_USE_PHY_LOOPBK,		\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{								\
+	/* test arbitration settings */					\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_32_160_ARB,		\
+	.bc_name	= "micron32-160, emac0-24, pa0, arb",		\
+	.bc_ddr_type	= DDR_32_MICRON,				\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{								\
+	/* test emac1 */						\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_32_160_ARB_1,		\
+	.bc_name	= "micron32-160, emac1-31, pa0, arb",		\
+	.bc_ddr_type	= DDR_32_MICRON,				\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_phy1_addr	= 31,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{	/* 12 - arb, 16bit emac1 */				\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_16_160_ARB_1,		\
+	.bc_name	= "micron16-160, emac1-31, pa0, arb",		\
+	.bc_ddr_type	= DDR_16_MICRON,				\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= 31,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{	/* 13 - arb, 16bit emac1 */				\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_32_160_ARB_0,		\
+	.bc_name	= "micron16-160, emac0-24, pa0, arb",		\
+	.bc_ddr_type	= DDR_16_MICRON,				\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{	/* 14 */						\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_ETRON_160_EMAC1,	\
+	.bc_name	= "etron16-160, emac0-24, pa0",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 15 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_ETRON_250_EMAC1,	\
+	.bc_name	= "etron16-250, emac1-31, pa0",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_250,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_phy1_addr	= 31,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 16 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_ETRON_32_320_EMAC1,	\
+	.bc_name	= "etron32-320, emac1-31, pa0",			\
+	.bc_ddr_type	= DDR_32_ETRON,					\
+	.bc_ddr_speed	= DDR_320,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_phy1_addr	= 31,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 17 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_ETRON32_160,			\
+	.bc_name	= "etron32-160, emac0-24, pa0",			\
+	.bc_ddr_type	= DDR_32_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_pcie	= PCIE_IN_USE | PCIE_USE_PHY_LOOPBK,		\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 18 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_ETRON32_320,			\
+	.bc_name	= "etron32-250, emac0-24, pa0",			\
+	.bc_ddr_type	= DDR_32_ETRON,					\
+	.bc_ddr_speed	= DDR_320,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_pcie	= PCIE_IN_USE | PCIE_USE_PHY_LOOPBK,		\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 19 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_MICRON_DUALEMAC,	\
+	.bc_name	= "micron32-160, emac0, emac1, pa0",		\
+	.bc_ddr_type	= DDR_32_MICRON,				\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy0_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 20 */							\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_MICRON_DUALEMAC_MII,	\
+	.bc_name	= "micron32-160, emac0-mii-100, emac1-mii-100, pa0",	\
+	.bc_ddr_type	= DDR_32_MICRON,				\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_FORCE_100MB,	\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_FORCE_100MB,	\
+	.bc_phy0_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,			\
+	.bc_uart1	= UART1_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{								\
+	/* 21 Bringup board with dual EMAC loopback */			\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_MICRON_DUALEMAC_LOOPBACK,		\
+	.bc_name	= "micron16-160, pcie, emac0, emac1, pa0, phy loopback",	\
+	.bc_ddr_type	= DDR_16_MICRON,				\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_128MB,				\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_phy1_addr	= 31,						\
+	.bc_phy0_addr	= 24,						\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_BRINGUP_RWPA,		\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_pcie	= PCIE_IN_USE | PCIE_USE_PHY_LOOPBK,\
+	 },{	/* 22 */												\
+	.bc_board_id	= QTN_RUBY_BRINGUP_BOARD_16_160_DUALEMAC,	\
+	.bc_name        = "etron16-160, emac1,emac0, pa0",			\
+	.bc_ddr_type    = DDR_16_ETRON,								\
+	.bc_ddr_speed   = DDR_160,									\
+	.bc_ddr_size    = DDR_64MB,									\
+	.bc_emac0       = EMAC_IN_USE,								\
+	.bc_emac1       = EMAC_IN_USE,								\
+	.bc_phy0_addr	= EMAC_PHY_ADDR_SCAN,						\
+	.bc_phy1_addr   = EMAC_PHY_ADDR_SCAN,						\
+	.bc_spi1        = SPI1_IN_USE,								\
+	.bc_wifi_hw     = QTN_RUBY_BRINGUP_RWPA,					\
+	.bc_uart1       = UART1_IN_USE,								\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,				\
+	 },{ /* 1000 */							\
+	.bc_board_id	= QTN_RUBY_REFERENCE_DESIGN_BOARD,		\
+	.bc_name	= "etron16-160, emac1, pa1",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1001 */							\
+	.bc_board_id	= QTN_RUBY_REFERENCE_DESIGN_BOARD_250,		\
+	.bc_name	= "etron16-250, emac1, pa1",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_250,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1002 */							\
+	.bc_board_id	= QTN_RUBY_REF_BOARD_DUAL_CON,			\
+	.bc_name	= "etron32-160, emac1, pa1",			\
+	.bc_ddr_type	= DDR_32_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_pcie	= PCIE_IN_USE,					\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1003 */							\
+	.bc_board_id	= QTN_RUBY_REFERENCE_DESIGN_BOARD_320,		\
+	.bc_name	= "etron16-320, emac1, pa1",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_320,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1004 */							\
+	.bc_board_id	= QTN_RUBY_ETRON_32_320_EMAC1,			\
+	.bc_name	= "etron32-320, emac1, pa1",			\
+	.bc_ddr_type	= DDR_32_ETRON,					\
+	.bc_ddr_speed	= DDR_320,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1005 */							\
+	.bc_board_id	= QTN_RUBY_ETRON_32_250_EMAC1,			\
+	.bc_name	= "etron32-250, emac1, pa1",			\
+	.bc_ddr_type	= DDR_32_ETRON,					\
+	.bc_ddr_speed	= DDR_250,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1006 */							\
+	.bc_board_id	= QTN_RUBY_REFERENCE_DESIGN_BOARD_RGMII_DLL,	\
+	.bc_name	= "etron16-160, emac1-rgmii-dll, pa2",		\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DLL_TIMING,		\
+	 },{ /* 1007 */							\
+	.bc_board_id	= QTN_RUBY_QHS710_5S5_SIGE_DDR250,		\
+	.bc_name	= "etron16-250, emac1, pa2",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_250,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1008 */							\
+	.bc_board_id	= QTN_RUBY_QHS710_5S5_SIGE_DDR320,		\
+	.bc_name	= "etron16-320, emac1, pa2",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_320,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1009 */							\
+	.bc_board_id	= QTN_RUBY_OHS711_PCIE_320DDR,			\
+	.bc_name	= "etron16-320, pcie, pa2",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_320,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_pcie	= PCIE_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1170 */							\
+	.bc_board_id	= QTN_RUBY_QHS713_5S1_PCIERC_DDR160,		\
+	.bc_name	= "etron16-160, emac1, pcie-rc, pa1",		\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_pcie	= PCIE_IN_USE | PCIE_RC_MODE,			\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1171 */							\
+	.bc_board_id	= QTN_RUBY_OHS711_5S13_PCIE_DDR320,		\
+	.bc_name	= "etron16-320, pcie-ep, pa2",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_320,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_pcie	= PCIE_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1172 */							\
+	.bc_board_id	= QTN_RUBY_QHS713_5S1_PCIERC_DDR320,		\
+	.bc_name	= "etron16-320, emac1, pcie-rc, pa1",		\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_320,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_pcie	= PCIE_IN_USE | PCIE_RC_MODE,			\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1200 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_0,				\
+	.bc_name	= "etron16-160, emac1-mii, pa1",		\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_MII,			\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1201 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_1,				\
+	.bc_name	= "etron16-160, emac0-mii, pa1",		\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE | EMAC_PHY_MII,			\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1202 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_2,				\
+	.bc_name	= "etron16-160, emac1 88e6071-mii, pa1",	\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_MV88E6071,				\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1203 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_3,				\
+	.bc_name	= "etron16-160, emac1 ar8236-mii, pa1",		\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_FORCE_100MB | EMAC_PHY_NOT_IN_USE | EMAC_PHY_AR8236,	\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1204 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_4,				\
+	.bc_name	= "etron16-160, pcie, pa2",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_pcie	= PCIE_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1205 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_5,				\
+	.bc_name	= "etron16-160, emac1, mii-100, pa1",		\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_NOT_IN_USE | EMAC_PHY_MII | EMAC_PHY_FORCE_100MB,	\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1206 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_6,				\
+	.bc_name	= "etron16-160, emac1, mii-100, pa2",		\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_NOT_IN_USE | EMAC_PHY_MII | EMAC_PHY_FORCE_100MB,	\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1207 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_7,				\
+	.bc_name	= "etron16-160, emac1, pa2",			\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE,					\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1208 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_8,				\
+	.bc_name	= "etron16-160, emac1 ar8327-rgmii, pa1",	\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE | EMAC_PHY_AR8327,     \
+	.bc_phy1_addr   = EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_REF_RWPA,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1209*/							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_9,				\
+	.bc_name	= "etron16-160, emac1 rtl8363s-rgmii , pa2",	\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE |  EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE,	\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1210*/							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_10,			\
+	.bc_name	= "etron16-160, emac1 back-to-back-rgmii, pa2",	\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE |  EMAC_PHY_FORCE_100MB | EMAC_PHY_NOT_IN_USE,	\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1211 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_11,			\
+	.bc_name	= "etron16-160, emac0-mii, pa2",		\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE | EMAC_PHY_MII,			\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1212 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_12,			\
+	.bc_name	= "etron16-160, emac1 88e6071-mii, pa2",	\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_MV88E6071,				\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1213 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_13,			\
+	.bc_name	= "etron16-160, emac0, emac1 b2b-rgmii 100M, pa2",	\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_FORCE_100MB | EMAC_PHY_NOT_IN_USE,	\
+	.bc_phy0_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1214 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_14,			\
+	.bc_name	= "etron16-160, emac0-mii-gpio1rst, emac1-mii-gpio13rst-100M, pa2",	\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_GPIO1_RESET,		\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_GPIO13_RESET | EMAC_PHY_FORCE_100MB,	\
+	.bc_phy0_addr	= 1,				\
+	.bc_phy1_addr	= 2,				\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1215 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_15,			\
+	.bc_name	= "etron16-160, emac0, emac1 b2b-rgmii 1000M, pa2",	\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE,	\
+	.bc_phy0_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_710F,			\
+	 },{ /* 1216 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_16,			\
+	.bc_name	= "etron16-160, emac1 b2b-rgmii 100M, pa2",	\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_FORCE_100MB | EMAC_PHY_NOT_IN_USE,	\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1217 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_17,			\
+	.bc_name	= "etron16-160, emac1 b2b-rgmii 1000M, pa2",	\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE,	\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_710F,			\
+	 },{ /* 1218 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_18,			\
+	.bc_name	= "etron16-160, emac1-mii, pa2",		\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_NOT_IN_USE,				\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_FORCE_100MB,	\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1219 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_19,			\
+	.bc_name	= "samsung16-160, emac1, mii-100, pa2",		\
+	.bc_ddr_type	= DDR_16_SAMSUNG,				\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_NOT_IN_USE | EMAC_PHY_MII | EMAC_PHY_FORCE_100MB,	\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_IN_USE,					\
+	 },{ /* 1220 */							\
+	.bc_board_id	= QTN_RUBY_ODM_BOARD_20,			\
+	.bc_name	= "etron16-160, emac0, emac1 b2b-rgmii 1000M, no wifi",	\
+	.bc_ddr_type	= DDR_16_ETRON,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE,	\
+	.bc_phy0_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_phy1_addr	= EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw	= QTN_RUBY_WIFI_NONE,				\
+	.bc_uart1	= UART1_NOT_IN_USE,				\
+	.bc_pcie	= PCIE_IN_USE | PCIE_RC_MODE,			\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1221 */							\
+	.bc_board_id = QTN_RUBY_ODM_BOARD_21,				\
+	.bc_name = "etron16-160, emac1 ar8236-mii, pa2",		\
+	.bc_ddr_type = DDR_16_ETRON,					\
+	.bc_ddr_speed = DDR_160,					\
+	.bc_ddr_size = DDR_64MB,					\
+	.bc_emac1 = EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_FORCE_100MB | EMAC_PHY_NOT_IN_USE | EMAC_PHY_AR8236,                                          \
+	.bc_phy1_addr = EMAC_PHY_ADDR_SCAN,				\
+	.bc_wifi_hw = QTN_RUBY_SIGE,                                    \
+	.bc_uart1 = UART1_NOT_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1222 */							\
+	.bc_board_id    = QTN_RUBY_ODM_BOARD_22,                        \
+	.bc_name        = "etron16-160, emac1 ar8327-rgmii, pa2",       \
+	.bc_ddr_type    = DDR_16_ETRON,                                 \
+	.bc_ddr_speed   = DDR_160,                                      \
+	.bc_ddr_size    = DDR_64MB,                                     \
+	.bc_emac1       = EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE | EMAC_PHY_AR8327, \
+	.bc_phy1_addr   = EMAC_PHY_ADDR_SCAN,                           \
+	.bc_wifi_hw     = QTN_RUBY_SIGE,                                \
+	.bc_uart1       = UART1_NOT_IN_USE,                             \
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,                   \
+	 },{ /* 1223 */							\
+	.bc_board_id	= QTN_TOPAZ_FPGAA_BOARD,		\
+	.bc_name	= "FPGA-A(hw_config_id:1223) DDR3, EMAC1, WMAC, RGMII-1G", \
+	.bc_ddr_type	= DDR3_16_WINBOND,					\
+	.bc_ddr_speed	= DDR3_320MHz,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE | EMAC_PHY_FORCE_100MB | EMAC_PHY_NOT_IN_USE, \
+	.bc_emac1	= EMAC_NOT_IN_USE | EMAC_PHY_FORCE_100MB | EMAC_PHY_NOT_IN_USE, \
+	.bc_phy0_addr	= TOPAZ_FPGAA_PHY0_ADDR,			\
+	.bc_phy1_addr	= TOPAZ_FPGAA_PHY1_ADDR,			\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1224 */							\
+	.bc_board_id	= QTN_TOPAZ_FPGAB_BOARD,		\
+	.bc_name	= "FPGA-B(hw_config_id:1224) DDR3, EMAC0, WMAC, RGMII-1G", \
+	.bc_ddr_type	= DDR3_16_WINBOND,					\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE | EMAC_PHY_FORCE_100MB, \
+	.bc_emac1	= EMAC_NOT_IN_USE | EMAC_PHY_FORCE_100MB,	\
+	.bc_phy0_addr	= TOPAZ_FPGAB_PHY0_ADDR,			\
+	.bc_phy1_addr	= TOPAZ_FPGAB_PHY1_ADDR,			\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1225 */							\
+	.bc_board_id	= QTN_TOPAZ_DUAL_EMAC_FPGAA_BOARD,		\
+	.bc_name	= "FPGA-A(hw_config_id:1225) DDR3, EMAC0, EMAC1, WMAC, RGMII-1G", \
+	.bc_ddr_type	= DDR3_16_WINBOND,				\
+	.bc_ddr_speed	= DDR3_320MHz,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE | EMAC_PHY_FORCE_100MB | EMAC_PHY_NOT_IN_USE | EMAC_PHY_FPGAA_ONLY, \
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_FORCE_100MB | EMAC_PHY_NOT_IN_USE | EMAC_PHY_FPGAA_ONLY, \
+	.bc_phy0_addr	= TOPAZ_FPGAA_PHY0_ADDR,			\
+	.bc_phy1_addr	= TOPAZ_FPGAA_PHY1_ADDR,			\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1226 */							\
+	.bc_board_id	= QTN_TOPAZ_DUAL_EMAC_FPGAB_BOARD,		\
+	.bc_name	= "FPGA-B(hw_config_id:1226) DDR3, EMAC0, EMAC1, WMAC, RGMII-1G", \
+	.bc_ddr_type	= DDR3_16_WINBOND,				\
+	.bc_ddr_speed	= DDR3_320MHz,					\
+	.bc_ddr_size	= DDR_64MB,					\
+	.bc_emac0	= EMAC_IN_USE | EMAC_PHY_FORCE_100MB | EMAC_PHY_FPGAB_ONLY, \
+	.bc_emac1	= EMAC_IN_USE | EMAC_PHY_FORCE_100MB | EMAC_PHY_FPGAB_ONLY, \
+	.bc_phy0_addr	= TOPAZ_FPGAB_PHY0_ADDR,			\
+	.bc_phy1_addr	= TOPAZ_FPGAB_PHY1_ADDR,			\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1227 */							\
+	.bc_board_id	= QTN_TOPAZ_RC_BOARD,				\
+	.bc_name	= "FPGA-A(hw_config_id:1227) DDR3, EMAC1, WMAC, RGMII-1G", \
+	.bc_ddr_type	= DDR3_16_WINBOND,				\
+	.bc_ddr_speed	= DDR3_320MHz,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= TOPAZ_PHY0_ADDR,				\
+	.bc_phy1_addr	= TOPAZ_PHY1_ADDR,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_WIFI_NONE,				\
+	.bc_uart1	= UART1_IN_USE,					\
+	.bc_pcie        = PCIE_IN_USE | PCIE_RC_MODE,			\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	 },{ /* 1228 */							\
+	.bc_board_id	= QTN_TOPAZ_EP_BOARD,				\
+	.bc_name	= "FPGA-B(hw_config_id:1228) DDR3, EMAC0, WMAC, RGMII-1G", \
+	.bc_ddr_type	= DDR3_16_WINBOND,				\
+	.bc_ddr_speed	= DDR_160,					\
+	.bc_ddr_size	= DDR_128MB,					\
+	.bc_emac0	= EMAC_IN_USE,					\
+	.bc_emac1	= EMAC_NOT_IN_USE,				\
+	.bc_phy0_addr	= TOPAZ_PHY0_ADDR,				\
+	.bc_phy1_addr	= TOPAZ_PHY1_ADDR,				\
+	.bc_spi1	= SPI1_IN_USE,					\
+	.bc_wifi_hw	= QTN_RUBY_SIGE,				\
+	.bc_uart1	= UART1_IN_USE,					\
+	.bc_pcie	= PCIE_IN_USE,					\
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,			\
+	} ,{ /* 1229 */							\
+	.bc_board_id    = QTN_TOPAZ_BB_BOARD, \
+	.bc_name        = "BB-EVK(hw_config_id:1229) DDR3, EMAC0, WMAC, RGMII-1G", \
+	.bc_ddr_type    = DDR3_16_WINBOND, \
+	.bc_ddr_speed   = DDR3_400MHz, \
+	.bc_ddr_size    = DDR_128MB, \
+	.bc_emac0       = EMAC_IN_USE, \
+	.bc_emac1       = EMAC_IN_USE, \
+	.bc_phy0_addr   = TOPAZ_PHY0_ADDR, \
+	.bc_phy1_addr   = TOPAZ_PHY1_ADDR, \
+	.bc_spi1        = SPI1_IN_USE, \
+	.bc_wifi_hw     = QTN_RUBY_WIFI_NONE, \
+	.bc_uart1       = UART1_IN_USE, \
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_NODELAY, \
+	} ,{ /* 1230 */ \
+	.bc_board_id    = QTN_TOPAZ_RF_BOARD, \
+	.bc_name        = "RF-EVK(hw_config_id:1230) DDR3, EMAC0, WMAC, RGMII-1G", \
+	.bc_ddr_type    = DDR3_16_WINBOND, \
+	.bc_ddr_speed   = DDR3_500MHz, \
+	.bc_ddr_size    = DDR_128MB, \
+	.bc_emac0       = EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE, \
+	.bc_emac1       = EMAC_IN_USE, \
+	.bc_phy0_addr   = TOPAZ_PHY0_ADDR, \
+	.bc_phy1_addr   = TOPAZ_PHY1_ADDR, \
+	.bc_spi1        = SPI1_IN_USE, \
+	.bc_wifi_hw     = QTN_TPZ_SE5003L1_INV, \
+	.bc_uart1       = UART1_IN_USE, \
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_NODELAY, \
+	} ,{ /* 1231 */ \
+	.bc_board_id    = QTN_TOPAZ_QHS840_5S1, \
+	.bc_name        = "QHS840_5S1 RDK", \
+	.bc_ddr_type    = DDR3_16_WINBOND, \
+	.bc_ddr_speed   = DDR3_500MHz, \
+	.bc_ddr_size    = DDR_128MB, \
+	.bc_emac0       = EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE | EMAC_PHY_RTL8363SB_P0, \
+	.bc_wifi_hw     = QTN_TPZ_SE5003L1, \
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_NODELAY, \
+	} ,{ /* 2008 */ \
+	.bc_board_id    = QTN_TOPAZ_RGMII_RFIC6, \
+	.bc_name        = "TOPAZ RGMII RFIC6", \
+	.bc_ddr_type    = DDR_NO_INIT, \
+	.bc_ddr_speed   = DEFAULT_DDR_SPEED, \
+	.bc_ddr_size    = DDR_128MB, \
+	.bc_emac0       = EMAC_IN_USE | EMAC_PHY_FORCE_1000MB | EMAC_PHY_NOT_IN_USE, \
+	.bc_emac1       = EMAC_NOT_IN_USE, \
+	.bc_phy0_addr   = TOPAZ_PHY0_ADDR, \
+	.bc_phy1_addr   = TOPAZ_PHY1_ADDR, \
+	.bc_spi1        = SPI1_IN_USE, \
+	.bc_wifi_hw     = QTN_TPZ_DBS_SKY85806_SKY85811, \
+	.bc_uart1       = UART1_IN_USE, \
+	.bc_rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT,		\
+	}\
+}
+
+#endif /* _RUBY_BOARD_DB_ */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_config.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_config.h
new file mode 100644
index 0000000..4caec1b
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_config.h
@@ -0,0 +1,187 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * Header file which describes Ruby platform.
+ * Has to be used by both kernel and bootloader.
+ */
+
+#ifndef __RUBY_CONFIG_H
+#define __RUBY_CONFIG_H
+
+#include "topaz_config.h"
+
+/*******************************************************************/
+
+#if TOPAZ_MMAP_UNIFIED
+	#define RUBY_MMAP_FLIP		0
+#else
+	#if !(defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD))
+		#define RUBY_MMAP_FLIP		1
+	#else
+		#define RUBY_MMAP_FLIP		0
+	#endif
+#endif
+
+/* Set to 1 if MuC need to enable TLB, otherwise set to 0 */
+#define RUBY_MUC_TLB_ENABLE		1
+
+/*******************************************************************/
+
+#ifdef RUBY_PLATFORM
+
+	#if RUBY_FPGA_PLATFORM
+		#define RUBY_SERIAL_BAUD	38400
+		#define RUBY_FIXED_DEV_CLK	12500000
+		#define RUBY_FIXED_CPU_CLK	40000000
+		#define RUBY_FPGA_DDR
+	#else
+		#define RUBY_SERIAL_BAUD	115200
+		#define RUBY_FIXED_DEV_CLK	125000000
+		#define RUBY_FIXED_CPU_CLK	400000000
+		#define RUBY_ASIC_DDR
+	#endif /* #if RUBY_FPGA_PLATFORM */
+
+	#define UPF_SPD_FLAG	0
+	#define DEFAULT_BAUD	RUBY_SERIAL_BAUD
+
+#endif /* #ifdef RUBY_PLATFORM */
+
+/*******************************************************************/
+/* Define some constants for Linux ARC kernel */
+#define CONFIG_ARC700_SERIAL_BAUD	RUBY_SERIAL_BAUD
+#define CONFIG_ARC700_CLK		RUBY_FIXED_CPU_CLK
+#define CONFIG_ARC700_DEV_CLK		RUBY_FIXED_DEV_CLK
+
+/*******************************************************************/
+
+/* RGMII related defines */
+#define CONFIG_ARCH_RUBY_ENET_RGMII
+
+#define CONFIG_ARCH_RGMII_DEFAULT	0x8F8F8F8F
+#define CONFIG_ARCH_RGMII_DLL_TIMING	0x8F8D8F8F
+#define CONFIG_ARCH_RGMII_S1P8NS_H1P9NS	0x8F891F1F
+#define CONFIG_ARCH_RGMII_NODELAY	0x1F1F1F1F
+#define CONFIG_ARCH_RGMII_710F		CONFIG_ARCH_RGMII_NODELAY
+#define CONFIG_ARCH_RGMII_P1RX00TX0E    0x0E8E1F1F
+
+/* EMAC related defines */
+
+/* EMAC flags */
+#define EMAC_NOT_IN_USE			(0)
+#define EMAC_IN_USE			(BIT(0))
+#define EMAC_PHY_NOT_IN_USE		(BIT(1))  // do not initialize/access phy mdio
+#define EMAC_PHY_FORCE_10MB		(BIT(2))
+#define EMAC_PHY_FORCE_100MB		(BIT(3))
+#define EMAC_PHY_FORCE_1000MB		(BIT(4))
+#define EMAC_PHY_FORCE_HDX		(BIT(5))
+#define EMAC_PHY_RESET			(BIT(6)) // force PHY reset
+#define EMAC_PHY_MII			(BIT(7)) // default is rgmii
+#define EMAC_PHY_AUTO_MASK		(EMAC_PHY_FORCE_10MB | EMAC_PHY_FORCE_100MB | EMAC_PHY_FORCE_1000MB)
+#define EMAC_PHY_AR8236			(BIT(8))
+#define EMAC_PHY_AR8327			(BIT(9))
+#define EMAC_PHY_GPIO1_RESET		(BIT(10))
+#define EMAC_PHY_GPIO13_RESET		(BIT(11))
+#define EMAC_PHY_NO_COC			(BIT(12)) // do not adjust link speed for power savings
+#define EMAC_PHY_MV88E6071		(BIT(13))
+#define EMAC_PHY_FPGAA_ONLY		(BIT(15))
+#define EMAC_PHY_FPGAB_ONLY		(BIT(16))
+#define EMAC_PHY_RTL8363SB_P0		(BIT(18))
+#define EMAC_PHY_RTL8363SB_P1		(BIT(19))
+#define EMAC_BONDED			(BIT(20))
+#define EMAC_PHY_RTL8365MB		(BIT(21))
+#define EMAC_PHY_RTL8211DS		(BIT(22))
+#define EMAC_PHY_RTL8367RB		(BIT(23))
+#define EMAC_PHY_CUSTOM			(BIT(31))
+
+#define EMAC_MV88E6071			(EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_NOT_IN_USE |	\
+						EMAC_PHY_NO_COC | EMAC_PHY_FORCE_100MB | EMAC_PHY_MV88E6071)
+#define EMAC_SLOW_PHY			(EMAC_PHY_FORCE_10MB|EMAC_PHY_FORCE_100MB|EMAC_PHY_MII)
+
+/* force phy addr scan */
+#define EMAC_PHY_ADDR_SCAN		(32)	// scan bus for addr
+
+/* Flash memory sizes */
+#define FLASH_64MB			(64*1024*1024)
+#define FLASH_32MB			(32*1024*1024)
+#define FLASH_16MB			(16*1024*1024)
+#define FLASH_8MB			(8*1024*1024)
+#define FLASH_4MB			(4*1024*1024)
+#define FLASH_2MB			(2*1024*1024)
+#define FLASH_256KB			(256*1024)
+#define FLASH_64KB			(64*1024)
+#define DEFAULT_FLASH_SIZE		(FLASH_8MB)
+#define FLASH_SIZE_JEDEC		(0)
+
+/* DDR memory sizes */
+#define DDR_256MB			(256*1024*1024)
+#define DDR_128MB			(128*1024*1024)
+#define DDR_64MB			(64*1024*1024)
+#define DDR_46MB			(46*1024*1024)
+#define DDR_32MB			(32*1024*1024)
+#define DDR_AUTO			(0)
+#define DEFAULT_DDR_SIZE		(DDR_64MB)
+
+/* Other DDR defines */
+#define DDR3_800MHz		800
+#define DDR3_640MHz		640
+#define DDR3_500MHz		500
+#define DDR3_400MHz		400
+#define DDR3_320MHz		320
+#define DDR_400			400
+#define DDR_320			320
+#define DDR_250			250
+#define DDR_200			200
+#define DDR_160			160
+#define DDR_125			125
+#define DEFAULT_DDR_SPEED	(DDR_160)
+
+#define	DDR_32_MICRON		0
+#define DDR_16_MICRON		1
+#define DDR_16_ETRON		2
+#define DDR_16_SAMSUNG		3
+#define DDR_32_ETRON		4
+#define DDR_32_SAMSUNG		5
+#define DDR_16_HYNIX		6
+#define DDR3_16_WINBOND		7
+#define DDR3_32_WINBOND		8
+#define DDR_NO_INIT		9
+#define DEFAULT_DDR_CFG		(DDR_16_MICRON)
+
+/* UART1 defines */
+#define	UART1_NOT_IN_USE	0
+#define	UART1_IN_USE		1
+
+#define PCIE_NOT_IN_USE		0
+#define PCIE_IN_USE		(BIT(0))
+#define PCIE_USE_PHY_LOOPBK	(BIT(1))
+#define PCIE_RC_MODE		(BIT(2))
+#define PCIE_ENDPOINT		(PCIE_IN_USE | PCIE_USE_PHY_LOOPBK)
+#define PCIE_ROOTCOMPLEX	(PCIE_IN_USE | PCIE_RC_MODE | PCIE_USE_PHY_LOOPBK)
+
+/*******************************************************************/
+
+#define CONFIG_USE_SPI1_FOR_IPC	PLATFORM_REG_SWITCH(1, 0)
+
+#endif // #ifndef __RUBY_CONFIG_H
+
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_flip.S b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_flip.S
new file mode 100644
index 0000000..31b57a2
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_flip.S
@@ -0,0 +1,92 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <config.h>
+#include <asm/arcregs.h>
+#include <asm/arch/platform.h>
+#include "start.inl"
+
+#if RUBY_MMAP_FLIP || TOPAZ_MMAP_UNIFIED
+
+ruby_flip_mmap:
+	.globl ruby_flip_mmap
+	/* Code must be position-independent! */
+
+	/*
+	* Flush and invalidate data cache.
+	* Please make sure that instructions which touch
+	* d-cache are NOT used until flipping is done.
+	*/
+	/* Set flush mode for invalidate operation */
+	lr      r1, [ARC_REG_DC_CTRL]
+	bset    r1, r1, 0x6
+	sr      r1, [ARC_REG_DC_CTRL]
+	/* Start invalidate operation */
+	mov     r1, 0x1
+	sr      r1, [ARC_REG_DC_IVDC]
+	/* Check while cache invalidating will be finished */
+dcache_flush_continue:
+	lr      r1, [ARC_REG_DC_CTRL]
+	and     r1, r1, ARC_DC_FLUSH_STATUS_BIT
+	brne    r1, 0x0, dcache_flush_continue
+
+	/* Prepare flipping.
+	 * After code is finished, memory maps will change as follows:
+	 *     Flip map:
+	 *         SRAM 0x8000_0000 -> 0x8800_0000
+	 *         DRAM 0x0         -> 0x8000_0000
+	 *     Unified map:
+	 *         SRAM 0x8000_0000 -> 0x9800_0000
+	 *         DRAM 0x0         -> 0x8000_0000
+	 */
+	mov     r1, RUBY_SYS_CTL_BASE_ADDR_NOMAP
+	mov     r2, FLIPBIT | RUBY_SYS_CTL_REMAP(0x3)
+	st.di   r2, [r1, RUBY_SYS_CTL_MASK - RUBY_SYS_CTL_BASE_ADDR]
+	mov     r2, FLIPBIT
+
+.align ARC_ICACHE_LINE_LEN
+	/* Do flipping.
+	* Align to cache line to ensure we don't hit memory during following instructions.
+	* Code must fit into 1 cache line (32 bytes).
+	*/
+	st.di   r2, [r1, RUBY_SYS_CTL_CTRL - RUBY_SYS_CTL_BASE_ADDR]
+	ld.di   r2, [r1, RUBY_SYS_CTL_CTRL - RUBY_SYS_CTL_BASE_ADDR] /* read back to clear pipeline */
+	sync
+	j       boot_continue		/* jump to absolute addr in sram */
+	/* Align to cache line so code occupy strictly 1 cache line. */
+.align ARC_ICACHE_LINE_LEN
+
+boot_continue:
+	/* Finalize flipping. */
+	mov     r2, 0x0
+	st.di   r2, [r1, RUBY_SYS_CTL_MASK - RUBY_SYS_CTL_BASE_ADDR]
+
+	/* Let's discard instruction cache.
+	*/
+	mov     r2, 0x1
+	sr      r2, [ARC_REG_IC_IVIC] /* invalidate i-cache */
+	lr      r2, [ARC_REG_IC_CTRL] /* read will be not completed until i-cache is invalidated */
+
+	/* Done. We are now sitting in different addresses. */
+	b	ruby_boot
+#endif // #if RUBY_MMAP_FLIP
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_mem.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_mem.h
new file mode 100644
index 0000000..b23561d
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_mem.h
@@ -0,0 +1,517 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * Header file which describes Ruby platform.
+ * Has to be used by runtime firmware.
+ */
+
+#ifndef __RUBY_MEM_H
+#define __RUBY_MEM_H
+
+#include "common_mem.h"
+
+/* FIXME: Move CPU related macros to a separate header file. */
+#define ARC_DCACHE_LINE_LENGTH			32
+
+/* NEVTBD - put in real XYMEM values */
+#define RUBY_DSP_XYMEM_BEGIN			0xD0000000
+#define RUBY_DSP_XYMEM_END			0xDFFFFFFF
+
+/* SRAM layout */
+
+#ifdef QTN_RC_ENABLE_HDP
+#define TOPAZ_HBM_BUF_EMAC_RX_COUNT_S		(14)
+#define TOPAZ_HBM_BUF_WMAC_RX_COUNT_S		(0)
+#else
+#define TOPAZ_HBM_BUF_EMAC_RX_COUNT_S		(13)
+#define TOPAZ_HBM_BUF_WMAC_RX_COUNT_S		(11)
+#endif
+#define TOPAZ_HBM_EMAC_TX_DONE_COUNT_S		(12)
+
+#define TOPAZ_HBM_BUF_EMAC_RX_COUNT		(1 << TOPAZ_HBM_BUF_EMAC_RX_COUNT_S)
+#define TOPAZ_HBM_BUF_WMAC_RX_COUNT		(1 << TOPAZ_HBM_BUF_WMAC_RX_COUNT_S)
+#define TOPAZ_HBM_EMAC_TX_DONE_COUNT		(1 << TOPAZ_HBM_EMAC_TX_DONE_COUNT_S)
+
+/* dedicated SRAM space for HBM pointer pools */
+#define TOPAZ_HBM_POOL_PTR_SIZE			4	/* sizeof(void *), 32 bit arch */
+#define TOPAZ_HBM_POOL_EMAC_RX_START		0x00000000
+#define TOPAZ_HBM_POOL_EMAC_RX_SIZE		(TOPAZ_HBM_BUF_EMAC_RX_COUNT * TOPAZ_HBM_POOL_PTR_SIZE)
+#define TOPAZ_HBM_POOL_EMAC_RX_END		(TOPAZ_HBM_POOL_EMAC_RX_START + TOPAZ_HBM_POOL_EMAC_RX_SIZE)
+#define TOPAZ_HBM_POOL_WMAC_RX_START		TOPAZ_HBM_POOL_EMAC_RX_END
+#define TOPAZ_HBM_POOL_WMAC_RX_SIZE		(TOPAZ_HBM_BUF_WMAC_RX_COUNT * TOPAZ_HBM_POOL_PTR_SIZE)
+#define TOPAZ_HBM_POOL_WMAC_RX_END		(TOPAZ_HBM_POOL_WMAC_RX_START + TOPAZ_HBM_POOL_WMAC_RX_SIZE)
+#define TOPAZ_HBM_POOL_EMAC_TX_DONE_START	TOPAZ_HBM_POOL_WMAC_RX_END
+#define TOPAZ_HBM_POOL_EMAC_TX_DONE_SIZE	(TOPAZ_HBM_EMAC_TX_DONE_COUNT * TOPAZ_HBM_POOL_PTR_SIZE)
+#define TOPAZ_HBM_POOL_EMAC_TX_DONE_END		(TOPAZ_HBM_POOL_EMAC_TX_DONE_START + TOPAZ_HBM_POOL_EMAC_TX_DONE_SIZE)
+#define TOPAZ_FWT_SW_START			TOPAZ_HBM_POOL_EMAC_TX_DONE_END
+#define TOPAZ_FWT_SW_SIZE			(4096)
+#define TOPAZ_FWT_SW_END			(TOPAZ_FWT_SW_START + TOPAZ_FWT_SW_SIZE)
+
+#define CONFIG_MUC_EXTRA_RES_BASE		TOPAZ_FWT_SW_END
+#define CONFIG_MUC_EXTRA_RESERVE_SIZE		(8 * 1024)
+#define CONFIG_MUC_EXTRA_RES_END		(CONFIG_MUC_EXTRA_RES_BASE + CONFIG_MUC_EXTRA_RESERVE_SIZE)
+
+#define CONFIG_ARC_KERNEL_SRAM_B1_BASE		ROUNDUP(CONFIG_MUC_EXTRA_RES_END, CONFIG_ARC_KERNEL_PAGE_SIZE)
+#define CONFIG_ARC_KERNEL_SRAM_B1_SIZE		(22 * 1024)
+#define CONFIG_ARC_KERNEL_SRAM_B1_END		(CONFIG_ARC_KERNEL_SRAM_B1_BASE + CONFIG_ARC_KERNEL_SRAM_B1_SIZE)
+#define CONFIG_ARC_KERNEL_SRAM_B2_BASE		CONFIG_ARC_KERNEL_SRAM_B1_END
+#define CONFIG_ARC_KERNEL_SRAM_B2_END		ROUNDUP(CONFIG_ARC_KERNEL_SRAM_B2_BASE, RUBY_SRAM_BANK_SIZE)
+#define CONFIG_ARC_KERNEL_SRAM_B2_SIZE		(CONFIG_ARC_KERNEL_SRAM_B2_END - CONFIG_ARC_KERNEL_SRAM_B2_BASE)
+#define CONFIG_ARC_MUC_SRAM_B1_BASE		ROUNDUP(CONFIG_ARC_KERNEL_SRAM_B2_END, CONFIG_ARC_KERNEL_PAGE_SIZE)
+#define CONFIG_ARC_MUC_SRAM_B1_END		ROUNDUP(CONFIG_ARC_MUC_SRAM_B1_BASE + 1, RUBY_SRAM_BANK_SIZE)
+#define CONFIG_ARC_MUC_SRAM_B1_SIZE		(CONFIG_ARC_MUC_SRAM_B1_END - CONFIG_ARC_MUC_SRAM_B1_BASE)
+#define CONFIG_ARC_MUC_SRAM_B2_BASE		ROUNDUP(CONFIG_ARC_MUC_SRAM_B1_END, RUBY_SRAM_BANK_SIZE)
+#define CONFIG_ARC_MUC_SRAM_B2_SIZE		(RUBY_SRAM_BANK_SAFE_SIZE - RUBY_CRUMBS_SIZE)
+#define CONFIG_ARC_MUC_SRAM_B2_END		(CONFIG_ARC_MUC_SRAM_B2_BASE + CONFIG_ARC_MUC_SRAM_B2_SIZE)
+#define CONFIG_ARC_AUC_SRAM_BASE		ROUNDUP(CONFIG_ARC_MUC_SRAM_B2_END, RUBY_SRAM_BANK_SIZE)
+
+#define CONFIG_ARC_AUC_MU_SRAM_SIZE		(3 * RUBY_SRAM_BANK_SIZE)
+#define CONFIG_ARC_AUC_MU_SRAM_END		(CONFIG_ARC_AUC_SRAM_BASE + CONFIG_ARC_AUC_MU_SRAM_SIZE)
+#define CONFIG_ARC_AUC_NOMU_SRAM_SIZE		(4 * RUBY_SRAM_BANK_SIZE)
+#define CONFIG_ARC_AUC_NOMU_SRAM_END		(CONFIG_ARC_AUC_SRAM_BASE + CONFIG_ARC_AUC_NOMU_SRAM_SIZE)
+
+/* MU TxBF qmatrix is stored at the last bank of SRAM, DSP writes to it, has to use SRAM BUS addr.
+ * WARN: CONFIG_ARC_MU_QMAT_* defines are only valid if MU-enabled AuC FW is running,
+ * since in non-MU AuC FW case MU QMat SRAM bank is used by AuC.
+ */
+#define CONFIG_ARC_MU_QMAT_BASE		(RUBY_SRAM_BUS_BEGIN + 7 * RUBY_SRAM_BANK_SIZE)
+#define CONFIG_ARC_MU_QMAT_SIZE		RUBY_SRAM_BANK_SIZE
+#define CONFIG_ARC_MU_QMAT_END		(CONFIG_ARC_MU_QMAT_BASE + CONFIG_ARC_MU_QMAT_SIZE)
+
+#define CONFIG_ARC_SRAM_END		RUBY_SRAM_SIZE
+
+#if TOPAZ_RX_ACCELERATE
+	/* TODO FIXME - MuC crashed when copying data between SRAM and DDR */
+	#define CONFIG_ARC_MUC_STACK_OFFSET		(CONFIG_ARC_MUC_SRAM_B2_END - 2048)
+#else
+	#define CONFIG_ARC_MUC_STACK_OFFSET		(CONFIG_ARC_MUC_SRAM_B2_END)
+#endif
+
+#if CONFIG_ARC_MUC_STACK_OFFSET_UBOOT != CONFIG_ARC_MUC_STACK_OFFSET
+	#error "CONFIG_ARC_MUC_STACK_OFFSET_UBOOT must be equal to CONFIG_ARC_MUC_STACK_OFFSET!"
+#endif
+
+#define CONFIG_ARC_MUC_STACK_INIT	(RUBY_SRAM_BEGIN + CONFIG_ARC_MUC_STACK_OFFSET)
+
+#define RUBY_CRUMBS_OFFSET		(CONFIG_ARC_MUC_SRAM_B2_END)
+
+#if RUBY_CRUMBS_OFFSET != RUBY_CRUMBS_OFFSET_UBOOT
+	#error "RUBY_CRUMBS_OFFSET_UBOOT must be equal to RUBY_CRUMBS_OFFSET!"
+#endif
+
+#define RUBY_CRUMBS_ADDR		(RUBY_SRAM_BEGIN + RUBY_CRUMBS_OFFSET)
+
+/* DDR layout  */
+#define CONFIG_ARC_PCIE_RSVD_SIZE	(64 * 1024)
+#define CONFIG_ARC_DSP_BASE		(CONFIG_ARC_NULL_END + CONFIG_ARC_PCIE_RSVD_SIZE)
+#define CONFIG_ARC_DSP_SIZE		(768 * 1024)
+#define CONFIG_ARC_DSP_END		(CONFIG_ARC_DSP_BASE + CONFIG_ARC_DSP_SIZE)
+#define CONFIG_ARC_MUC_BASE		CONFIG_ARC_DSP_END
+#ifdef TOPAZ_128_NODE_MODE
+#define CONFIG_ARC_MUC_SIZE		((3 * 1024 * 1024) + (584 * 1024))
+#else
+#define CONFIG_ARC_MUC_SIZE		((2 * 1024 * 1024) + (772 * 1024))
+#endif
+#define MUC_DRAM_RX_RESVERED_RELOC_SIZE		(8 * 1024)
+#define CONFIG_ARC_MUC_END		(CONFIG_ARC_MUC_BASE + CONFIG_ARC_MUC_SIZE)
+#define CONFIG_ARC_MUC_MAPPED_BASE	CONFIG_ARC_MUC_BASE
+#define CONFIG_ARC_MUC_MAPPED_SIZE	(RUBY_MAX_DRAM_SIZE - CONFIG_ARC_MUC_MAPPED_BASE)
+
+#define CONFIG_ARC_AUC_BASE		CONFIG_ARC_MUC_END
+#define CONFIG_ARC_AUC_SIZE		(1024 * 1024 + 768 * 1024 + 40 * 1024)
+#define CONFIG_ARC_AUC_END		(CONFIG_ARC_AUC_BASE + CONFIG_ARC_AUC_SIZE)
+#define TOPAZ_HBM_BUF_ALIGN		(1 * 1024)
+
+#define TOPAZ_HBM_BUF_EMAC_RX_POOL	0
+#define TOPAZ_HBM_BUF_WMAC_RX_POOL	1
+#define TOPAZ_HBM_AUC_FEEDBACK_POOL	2
+#define TOPAZ_HBM_EMAC_TX_DONE_POOL	3
+
+#define TOPAZ_HBM_BUF_EMAC_RX_SIZE	(4 * 1024)
+#define TOPAZ_HBM_BUF_WMAC_RX_SIZE	(17 * 1024)
+
+#define TOPAZ_HBM_BUF_META_SIZE		64		/* keep it 2^n */
+#define TOPAZ_HBM_POOL_GUARD_SIZE	(64 * 1024)
+
+#define TOPAZ_HBM_BUF_EMAC_RX_TOTAL	(TOPAZ_HBM_BUF_EMAC_RX_COUNT *	\
+						TOPAZ_HBM_BUF_EMAC_RX_SIZE)
+#define TOPAZ_HBM_BUF_WMAC_RX_TOTAL	(TOPAZ_HBM_BUF_WMAC_RX_COUNT *	\
+						TOPAZ_HBM_BUF_WMAC_RX_SIZE)
+#define TOPAZ_HBM_BUF_META_BASE		CONFIG_ARC_AUC_END
+
+#define TOPAZ_HBM_BUF_META_EMAC_RX_BASE		(TOPAZ_HBM_BUF_META_BASE + TOPAZ_HBM_BUF_META_SIZE)
+#define TOPAZ_HBM_BUF_META_EMAC_RX_BASE_VIRT	(RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_META_EMAC_RX_BASE)
+#define TOPAZ_HBM_BUF_META_EMAC_RX_TOTAL	(TOPAZ_HBM_BUF_EMAC_RX_COUNT * \
+							TOPAZ_HBM_BUF_META_SIZE)
+#define TOPAZ_HBM_BUF_META_EMAC_RX_END		(TOPAZ_HBM_BUF_META_EMAC_RX_BASE + \
+							TOPAZ_HBM_BUF_META_EMAC_RX_TOTAL)
+
+#define TOPAZ_HBM_BUF_META_WMAC_RX_BASE		(TOPAZ_HBM_BUF_META_EMAC_RX_END + TOPAZ_HBM_BUF_META_SIZE)
+#define TOPAZ_HBM_BUF_META_WMAC_RX_BASE_VIRT	(RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_META_WMAC_RX_BASE)
+#define TOPAZ_HBM_BUF_META_WMAC_RX_TOTAL	(TOPAZ_HBM_BUF_WMAC_RX_COUNT * \
+							TOPAZ_HBM_BUF_META_SIZE)
+#define TOPAZ_HBM_BUF_META_WMAC_RX_END		(TOPAZ_HBM_BUF_META_WMAC_RX_BASE + \
+							TOPAZ_HBM_BUF_META_WMAC_RX_TOTAL)
+
+#define TOPAZ_HBM_BUF_META_END		(TOPAZ_HBM_BUF_META_WMAC_RX_END + TOPAZ_HBM_BUF_META_SIZE)
+#define TOPAZ_HBM_BUF_META_TOTAL	(TOPAZ_HBM_BUF_META_END - TOPAZ_HBM_BUF_META_BASE)
+
+#define TOPAZ_HBM_BUF_BASE		ROUNDUP(TOPAZ_HBM_BUF_META_END, TOPAZ_HBM_BUF_ALIGN)
+
+#define TOPAZ_HBM_BUF_EMAC_RX_BASE	(TOPAZ_HBM_BUF_BASE + TOPAZ_HBM_POOL_GUARD_SIZE)
+#define TOPAZ_HBM_BUF_EMAC_RX_BASE_VIRT	(RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_EMAC_RX_BASE)
+#define TOPAZ_HBM_BUF_EMAC_RX_END	(TOPAZ_HBM_BUF_EMAC_RX_BASE +	\
+						TOPAZ_HBM_BUF_EMAC_RX_TOTAL)
+
+#define TOPAZ_HBM_BUF_WMAC_RX_BASE	(TOPAZ_HBM_BUF_EMAC_RX_END + TOPAZ_HBM_POOL_GUARD_SIZE)
+#define TOPAZ_HBM_BUF_WMAC_RX_BASE_VIRT	(RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_WMAC_RX_BASE)
+#define TOPAZ_HBM_BUF_WMAC_RX_END	(TOPAZ_HBM_BUF_WMAC_RX_BASE +	\
+						TOPAZ_HBM_BUF_WMAC_RX_TOTAL)
+
+#define TOPAZ_HBM_BUF_END		(TOPAZ_HBM_BUF_WMAC_RX_END + TOPAZ_HBM_POOL_GUARD_SIZE)
+
+#define TOPAZ_FWT_MCAST_ENTRIES		2048
+#define TOPAZ_FWT_MCAST_FF_ENTRIES	1	/* one for all FF addresses */
+#define TOPAZ_FWT_MCAST_IPMAP_ENT_SIZE	64	/* sizeof(struct topaz_fwt_sw_ipmap) */
+#define TOPAZ_FWT_MCAST_TQE_ENT_SIZE	20	/* sizeof(struct topaz_fwt_sw_mcast_entry) */
+/* Tables are cache-line aligned to ensure proper memory flushing. */
+#define TOPAZ_FWT_MCAST_IPMAP_SIZE	\
+	ROUNDUP(TOPAZ_FWT_MCAST_ENTRIES * TOPAZ_FWT_MCAST_IPMAP_ENT_SIZE,	\
+			ARC_DCACHE_LINE_LENGTH)
+#define TOPAZ_FWT_MCAST_TQE_SIZE	\
+	ROUNDUP(TOPAZ_FWT_MCAST_ENTRIES * TOPAZ_FWT_MCAST_TQE_ENT_SIZE,		\
+			ARC_DCACHE_LINE_LENGTH)
+#define TOPAZ_FWT_MCAST_TQE_FF_SIZE	\
+	ROUNDUP(TOPAZ_FWT_MCAST_FF_ENTRIES * TOPAZ_FWT_MCAST_TQE_ENT_SIZE,	\
+			ARC_DCACHE_LINE_LENGTH)
+
+#define TOPAZ_FWT_MCAST_IPMAP_BASE	TOPAZ_HBM_BUF_END
+#define TOPAZ_FWT_MCAST_IPMAP_END	(TOPAZ_FWT_MCAST_IPMAP_BASE + TOPAZ_FWT_MCAST_IPMAP_SIZE)
+#define TOPAZ_FWT_MCAST_TQE_BASE	TOPAZ_FWT_MCAST_IPMAP_END
+#define TOPAZ_FWT_MCAST_TQE_END		(TOPAZ_FWT_MCAST_TQE_BASE + TOPAZ_FWT_MCAST_TQE_SIZE)
+#define TOPAZ_FWT_MCAST_TQE_FF_BASE	TOPAZ_FWT_MCAST_TQE_END
+#define TOPAZ_FWT_MCAST_TQE_FF_END	(TOPAZ_FWT_MCAST_TQE_FF_BASE + TOPAZ_FWT_MCAST_TQE_FF_SIZE)
+#define TOPAZ_FWT_MCAST_END		TOPAZ_FWT_MCAST_TQE_FF_END
+
+/* Offset from DDR beginning, from which memory start to belong to Linux */
+#define CONFIG_ARC_KERNEL_MEM_BASE	TOPAZ_FWT_MCAST_END
+
+#if TOPAZ_HBM_BUF_EMAC_RX_BASE & (TOPAZ_HBM_BUF_ALIGN - 1)
+	#error EMAC Buffer start not aligned
+#endif
+#if TOPAZ_HBM_BUF_WMAC_RX_BASE & (TOPAZ_HBM_BUF_ALIGN - 1)
+	#error WMAC Buffer start not aligned
+#endif
+#define CONFIG_ARC_UBOOT_RESERVED_SPACE	(8 * 1024)
+
+/* Linux kernel u-boot image start address, for uncompressed images */
+#define CONFIG_ARC_KERNEL_BOOT_BASE	ROUNDUP(CONFIG_ARC_KERNEL_MEM_BASE, \
+						CONFIG_ARC_KERNEL_PAGE_SIZE)
+/* Linux kernel image start */
+#define CONFIG_ARC_KERNEL_BASE		(CONFIG_ARC_KERNEL_BOOT_BASE + CONFIG_ARC_UBOOT_RESERVED_SPACE)
+#define CONFIG_ARC_KERNEL_MAX_SIZE	(RUBY_MAX_DRAM_SIZE - CONFIG_ARC_KERNEL_MEM_BASE)
+#define CONFIG_ARC_KERNEL_MIN_SIZE	(RUBY_MIN_DRAM_SIZE - CONFIG_ARC_KERNEL_MEM_BASE)
+
+/* AuC tightly coupled memory specification */
+#define TOPAZ_AUC_IMEM_ADDR		0xE5000000
+#define TOPAZ_AUC_IMEM_SIZE		(32 * 1024)
+/* BBIC4 RevB AuC DMEM bottom 4KB: 0xE510_0000 to 0xE510_0FFF is aliased with Wmac1 TCM 0xE514_0000
+ * exclude the bottom 4K from DMEM, and reduce the size from 16KB to 12KB
+ */
+#define TOPAZ_AUC_DMEM_ADDR		0xE5101000
+#define TOPAZ_AUC_DMEM_SIZE		(12 * 1024)
+#define TOPAZ_REVB_DMEM_SIZE_RESERVED	(4 *1024)
+/***************/
+
+/* Utility functions */
+#ifndef __ASSEMBLY__
+
+	#if defined(__CHECKER__)
+		#define __sram_text
+		#define __sram_data
+	#elif defined(__GNUC__)
+		/*GCC*/
+		#if defined(CONFIG_ARCH_RUBY_NUMA) && defined(__KERNEL__) && defined(__linux__)
+			/* Kernel is compiled with -mlong-calls option, so we can make calls between code fragments placed in different memories */
+			#define __sram_text_sect_name	".sram.text"
+			#define __sram_data_sect_name	".sram.data"
+			#define __sram_text
+			#define __sram_data
+		#else
+			#define __sram_text_sect_name	".text"
+			#define __sram_data_sect_name	".data"
+			#define __sram_text
+			#define __sram_data
+		#endif
+	#else
+		#pragma Offwarn(428)
+	#endif
+
+	#define __in_mem_range_sram(addr)		__in_mem_range(addr, RUBY_SRAM_BEGIN, RUBY_SRAM_SIZE)
+	#define __in_mem_range_sram_nocache(addr)	__in_mem_range(addr, RUBY_SRAM_NOCACHE_BEGIN, RUBY_SRAM_SIZE)
+	#define __in_mem_range_dram(addr)		__in_mem_range(addr, RUBY_DRAM_BEGIN, RUBY_MAX_DRAM_SIZE)
+	#define __in_mem_range_dram_nocache(addr)	__in_mem_range(addr, RUBY_DRAM_NOCACHE_BEGIN, RUBY_MAX_DRAM_SIZE)
+	RUBY_INLINE int is_valid_mem_addr(unsigned long addr)
+	{
+		if (__in_mem_range(addr, RUBY_SRAM_BEGIN, RUBY_SRAM_SIZE)) {
+			return 1;
+		} else if (__in_mem_range(addr, RUBY_DRAM_BEGIN, RUBY_MAX_DRAM_SIZE)) {
+			return 1;
+		}
+		return 0;
+	}
+
+	#if TOPAZ_MMAP_UNIFIED
+		RUBY_WEAK(virt_to_nocache) void* virt_to_nocache(const void *addr)
+		{
+			unsigned long ret = (unsigned long)addr;
+			if (__in_mem_range(ret, RUBY_SRAM_BEGIN, RUBY_SRAM_SIZE)) {
+				ret = ret - RUBY_SRAM_BEGIN + RUBY_SRAM_NOCACHE_BEGIN;
+			} else if (__in_mem_range(ret, RUBY_DRAM_BEGIN, RUBY_MAX_DRAM_SIZE)) {
+				ret = ret - RUBY_DRAM_BEGIN + RUBY_DRAM_NOCACHE_BEGIN;
+			} else if (ret < RUBY_HARDWARE_BEGIN) {
+				ret = (unsigned long)RUBY_BAD_VIRT_ADDR;
+			}
+			return (void*)ret;
+		}
+		RUBY_WEAK(nocache_to_virt) void* nocache_to_virt(const void *addr)
+		{
+			unsigned long ret = (unsigned long)addr;
+			if (__in_mem_range(ret, RUBY_SRAM_NOCACHE_BEGIN, RUBY_SRAM_SIZE)) {
+				ret = ret - RUBY_SRAM_NOCACHE_BEGIN + RUBY_SRAM_BEGIN;
+			} else if (__in_mem_range(ret, RUBY_DRAM_NOCACHE_BEGIN, RUBY_MAX_DRAM_SIZE)) {
+				ret = ret - RUBY_DRAM_NOCACHE_BEGIN + RUBY_DRAM_BEGIN;
+			} else if (ret < RUBY_HARDWARE_BEGIN) {
+				ret = (unsigned long)RUBY_BAD_VIRT_ADDR;
+			}
+			return (void*)ret;
+		}
+	#endif
+
+	#if RUBY_MUC_TLB_ENABLE
+		#if TOPAZ_MMAP_UNIFIED
+#define is_muc_nocached_address(addr) \
+	(__in_mem_range((unsigned long )addr, RUBY_SRAM_NOCACHE_BEGIN, RUBY_SRAM_SIZE) || \
+	 (__in_mem_range((unsigned long )addr, RUBY_DRAM_NOCACHE_BEGIN, RUBY_MAX_DRAM_SIZE)))
+#define is_muc_cached_address(addr) \
+	((unsigned long)addr > RUBY_DRAM_BEGIN) && ((unsigned long)addr < RUBY_HARDWARE_BEGIN)
+			#define muc_to_nocache virt_to_nocache
+			#define nocache_to_muc nocache_to_virt
+		#else
+#define is_muc_nocached_address(addr) \
+	((muc_to_nocache(addr) == RUBY_BAD_VIRT_ADDR) ? 1 : 0)
+#define is_muc_cached_address(addr) \
+	((nocache_to_muc(addr) == RUBY_BAD_VIRT_ADDR) ? 1 : 0)
+
+			RUBY_WEAK(muc_to_nocache) void* muc_to_nocache(const void *addr)
+			{
+				unsigned long ret = (unsigned long)addr;
+				if (__in_mem_range(ret, RUBY_SRAM_NOFLIP_BEGIN, RUBY_SRAM_SIZE)) {
+					ret = ret - RUBY_SRAM_NOFLIP_BEGIN + RUBY_SRAM_NOFLIP_NOCACHE_BEGIN;
+				} else if (__in_mem_range(ret, RUBY_DRAM_NOFLIP_BEGIN, RUBY_MAX_DRAM_SIZE)) {
+					ret = ret - RUBY_DRAM_NOFLIP_BEGIN + RUBY_DRAM_NOFLIP_NOCACHE_BEGIN;
+				} else if (ret < RUBY_HARDWARE_BEGIN) {
+					ret = (unsigned long)RUBY_BAD_VIRT_ADDR;
+				}
+				return (void*)ret;
+			}
+			RUBY_WEAK(nocache_to_muc) void* nocache_to_muc(const void *addr)
+			{
+				unsigned long ret = (unsigned long)addr;
+				if (__in_mem_range(ret, RUBY_SRAM_NOFLIP_NOCACHE_BEGIN, RUBY_SRAM_SIZE)) {
+					ret = ret - RUBY_SRAM_NOFLIP_NOCACHE_BEGIN + RUBY_SRAM_NOFLIP_BEGIN;
+				} else if (__in_mem_range(ret, RUBY_DRAM_NOFLIP_NOCACHE_BEGIN, RUBY_MAX_DRAM_SIZE)) {
+					ret = ret - RUBY_DRAM_NOFLIP_NOCACHE_BEGIN + RUBY_DRAM_NOFLIP_BEGIN;
+				} else if (ret < RUBY_HARDWARE_BEGIN) {
+					ret = (unsigned long)RUBY_BAD_VIRT_ADDR;
+				}
+				return (void*)ret;
+			}
+		#endif
+		#ifndef MUC_BUILD
+			RUBY_INLINE unsigned long muc_to_lhost(unsigned long addr)
+			{
+				void *tmp = nocache_to_muc((void*)addr);
+				if (tmp != RUBY_BAD_VIRT_ADDR) {
+					addr = (unsigned long)tmp;
+				}
+				return (unsigned long)bus_to_virt(addr);
+			}
+		#endif // #ifndef MUC_BUILD
+	#else
+		#define muc_to_nocache(x) ((void*)(x))
+		#define nocache_to_muc(x) ((void*)(x))
+		#ifndef MUC_BUILD
+			#define muc_to_lhost(x)   ((unsigned long)bus_to_virt((unsigned long)(x)))
+		#endif // #ifndef MUC_BUILD
+	#endif // #if RUBY_MUC_TLB_ENABLE
+
+	#ifndef __GNUC__
+		/*MCC*/
+		#pragma Popwarn()
+	#endif
+
+#endif // #ifndef __ASSEMBLY__
+
+/*
+ * "Write memory barrier" instruction emulation.
+ * Ruby platform has complex net of connected buses.
+ * Write transactions are buffered.
+ * qtn_wmb() guarantees that all issued earlier and pending writes
+ * to system controller, to SRAM and to DDR are completed
+ * before qtn_wmb() is finished.
+ * For complete safety Linux's wmb() should be defined
+ * through qtn_wmb(), but I afraid it would kill performance.
+ */
+#ifndef __ASSEMBLY__
+	#define RUBY_SYS_CTL_SAFE_READ_REGISTER 0xE0000000
+	#if defined(__GNUC__) && defined(__i386__)
+		#define qtn_wmb()		do {} while(0)
+		static inline unsigned long _qtn_addr_wmb(unsigned long *addr) { return *addr; }
+		#define qtn_addr_wmb(addr)	_qtn_addr_wmb((unsigned long *)(addr))
+		#define qtn_pipeline_drain()	do {} while(0)
+	#elif defined(__GNUC__)
+		/*GCC*/
+		#if defined(__arc__)
+			#define qtn_wmb() \
+			({ \
+				unsigned long temp; \
+				__asm__ __volatile__ ( \
+					"ld.di %0, [%1]\n\t" \
+					"ld.di %0, [%2]\n\t" \
+					"ld.di %0, [%3]\n\t" \
+					"sync\n\t" \
+					: "=r"(temp) \
+					: "i"(RUBY_DRAM_BEGIN + CONFIG_ARC_KERNEL_MEM_BASE), "i"(RUBY_SRAM_BEGIN + CONFIG_ARC_KERNEL_SRAM_B1_BASE), "i"(RUBY_SYS_CTL_SAFE_READ_REGISTER) \
+					: "memory"); \
+			})
+			#define qtn_addr_wmb(addr) \
+			({ \
+				unsigned long temp; \
+				__asm__ __volatile__ ( \
+					"ld.di %0, [%1]\n\t" \
+					"sync\n\t" \
+					: "=r"(temp) \
+					: "r"(addr) \
+					: "memory"); \
+				temp; \
+			})
+			#define qtn_pipeline_drain() \
+			({ \
+				__asm__ __volatile__ ( \
+					"sync\n\t" \
+					: : : "memory"); \
+			})
+		#else
+			#define qtn_wmb()
+			#define qtn_addr_wmb(addr)	*((volatile uint32_t*)addr)
+			#define qtn_pipeline_drain()
+		#endif
+	#else
+		/*MCC*/
+		#if _ARCVER >= 0x31/*ARC7*/
+			#define _qtn_pipeline_drain() \
+				sync
+		#else
+			#define _qtn_pipeline_drain() \
+				nop_s; nop_s; nop_s
+		#endif
+		_Asm void qtn_wmb(void)
+		{
+			/*r12 is temporary register, so we can use it inside this function freely*/
+			ld.di %r12, [RUBY_DRAM_BEGIN + CONFIG_ARC_MUC_BASE]
+			ld.di %r12, [RUBY_SRAM_BEGIN + CONFIG_ARC_MUC_SRAM_B1_BASE]
+			ld.di %r12, [RUBY_SYS_CTL_SAFE_READ_REGISTER]
+			_qtn_pipeline_drain()
+		}
+		_Asm u_int32_t qtn_addr_wmb(unsigned long addr)
+		{
+			%reg addr;
+			ld.di %r0, [addr]
+			_qtn_pipeline_drain()
+		}
+		_Asm void qtn_pipeline_drain(void)
+		{
+			_qtn_pipeline_drain()
+		}
+	#endif
+#endif
+
+/*
+ * Problem - writing to first half of cache way trash second half.
+ * Idea is to lock second half.
+ * Need make sure that invalidation does not unlock these lines (whole
+ * cache invalidation unlocks), or need to re-lock lines back.
+ * Also side effect - half of lines will be cached, half - not.
+ * So may need to shuffle data to make hot data cacheable.
+ */
+#define TOPAZ_CACHE_WAR_OFFSET	2048
+#ifndef __ASSEMBLY__
+#ifdef __GNUC__
+RUBY_INLINE void qtn_cache_topaz_war_dcache_lock(unsigned long aux_reg, unsigned long val)
+{
+	unsigned long addr;
+	unsigned long way_iter;
+	unsigned long line_iter;
+
+	asm volatile (
+		"	sr	%4, [%3]\n"
+		"	mov	%0, 0xA0000000\n"
+		"	mov	%1, 0\n"
+		"1:	add	%0, %0, 2048\n"
+		"	mov	%2, 0\n"
+		"2:	sr	%0, [0x49]\n"
+		"	add	%0, %0, 32\n"
+		"	add	%2, %2, 1\n"
+		"	cmp	%2, 64\n"
+		"	bne	2b\n"
+		"	add	%1, %1, 1\n"
+		"	cmp	%1, 4\n"
+		"	bne	1b\n"
+		: "=r"(addr), "=r"(way_iter), "=r"(line_iter)
+		: "r"(aux_reg), "r"(val)
+	);
+}
+#else
+_Inline _Asm  void qtn_cache_topaz_war_dcache_lock(unsigned long aux_reg, unsigned long val)
+{
+	% reg aux_reg, reg val;
+
+	sr	val, [aux_reg]
+	mov	%r0, 0xA0000000
+	mov	%r1, 0
+	1:	add	%r0, %r0, 2048
+	mov	%r2, 0
+	2:	sr	%r0, [0x49]
+	add	%r0, %r0, 32
+	add	%r2, %r2, 1
+	cmp	%r2, 64
+	bne	2b
+	add	%r1, %r1, 1
+	cmp	%r1, 4
+	bne	1b
+}
+#endif // #ifdef __GNUC__
+#endif // #ifndef __ASSEMBLY__
+
+#endif // #ifndef __RUBY_MEM_H
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_partitions.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_partitions.h
new file mode 100644
index 0000000..1af2bc5
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_partitions.h
@@ -0,0 +1,102 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * Header file which describes Ruby platform.
+ * Has to be used by both kernel and bootloader.
+ */
+
+#ifndef __RUBY_PARTITIONS_H
+#define __RUBY_PARTITIONS_H
+
+#define F64K_UBOOT_PIGGY_PARTITION_SIZE	0x5000
+
+#if defined(FLASH_SUPPORT_256KB)
+	#define F256K_UBOOT_PIGGY_PARTITION_SIZE 0x5000
+	#define F256K_ENV_PARTITION_SIZE	0x18000
+#endif
+#define F64K_ENV_PARTITION_SIZE         0x6000
+
+#define UBOOT_TEXT_PARTITION_SIZE	0x20000
+#define UBOOT_TINY_TEXT_PARTITION_SIZE	UBOOT_TEXT_PARTITION_SIZE
+#if defined(FLASH_SUPPORT_256KB)
+	#define UBOOT_ENV_PARTITION_SIZE	0x40000
+#else
+	#define UBOOT_ENV_PARTITION_SIZE        0x10000
+#endif
+#define UBOOT_ENV_PARTITION_ADDR	UBOOT_TEXT_PARTITION_SIZE
+
+/*
+ * Make sure CONFIG_ENV_SIZE in file carve_env_partition.sh is the same value
+ */
+#if defined(FLASH_SUPPORT_256KB)
+	#define BOOT_CFG_SIZE			(96 * 1024)
+#elif defined(FLASH_SUPPORT_64KB)
+	#define BOOT_CFG_SIZE			(24 * 1024)
+#else
+	#define BOOT_CFG_SIZE			(64 * 1024)
+#endif
+
+#if defined(FLASH_SUPPORT_256KB)
+	#define BOOT_CFG_BASE_SIZE	(24 * 1024)
+#else
+	#define BOOT_CFG_BASE_SIZE      (16 * 1024)
+#endif
+
+#define BOOT_CFG_DATA_SIZE		(BOOT_CFG_SIZE - sizeof(u32))
+#define BOOT_CFG_DEF_START		(0x1000)
+
+#define RUBY_MIN_DATA_PARTITION_SIZE	(512 * 1024)
+#define IMAGES_START_ADDR		(UBOOT_ENV_PARTITION_ADDR + UBOOT_ENV_PARTITION_SIZE * 2)
+#define NON_IMAGE_SIZE			(UBOOT_TEXT_PARTITION_SIZE +		\
+						UBOOT_ENV_PARTITION_SIZE * 2 +	\
+						RUBY_MIN_DATA_PARTITION_SIZE)
+#define TINY_CFG_NON_IMAGE_SIZE		(UBOOT_TINY_TEXT_PARTITION_SIZE +	\
+						UBOOT_ENV_PARTITION_SIZE * 2 +	\
+						UBOOT_TEXT_PARTITION_SIZE * 2 +	\
+						RUBY_MIN_DATA_PARTITION_SIZE)
+
+#define IMG_SIZE_8M_FLASH_2_IMG		((FLASH_8MB - NON_IMAGE_SIZE) / 2)
+#define IMG_SIZE_8M_FLASH_1_IMG		((FLASH_8MB - NON_IMAGE_SIZE) / 1)
+#define IMG_SIZE_16M_FLASH_2_IMG	((FLASH_16MB - NON_IMAGE_SIZE) / 2)
+#define IMG_SIZE_16M_FLASH_1_IMG	((FLASH_16MB - NON_IMAGE_SIZE) / 1)
+
+#define TINY_CFG_SIZE_16M_FLASH_1_IMG	(FLASH_16MB - TINY_CFG_NON_IMAGE_SIZE)
+#define UBOOT_SAFE_PARTITION_ADDR	(IMAGES_START_ADDR + TINY_CFG_SIZE_16M_FLASH_1_IMG)
+#define UBOOT_LIVE_PARTITION_ADDR	(UBOOT_SAFE_PARTITION_ADDR + UBOOT_TEXT_PARTITION_SIZE)
+
+#define MTD_PARTNAME_UBOOT_BIN		"uboot"
+#define MTD_PARTNAME_UBOOT_TINY_BIN	"uboot_tiny"
+#define MTD_PARTNAME_UBOOT_SAFETY	"uboot_safety"
+#define MTD_PARTNAME_UBOOT_LIVE		"uboot_live"
+#define MTD_PARTNAME_UBOOT_ENV		"uboot_env"
+#define MTD_PARTNAME_UBOOT_ENV_BAK	"uboot_env_bak"
+#define MTD_PARTNAME_LINUX_SAFETY	"linux_safety"
+#define MTD_PARTNAME_LINUX_LIVE		"linux_live"
+#define MTD_PARTNAME_DATA		"data"
+#define MTD_PARTNAME_EXTEND		"extend"
+
+#define IMG_SIZE_LIMIT_PLATFORM	IMG_SIZE_16M_FLASH_2_IMG
+
+#endif // #ifndef __RUBY_PARTITIONS_H
+
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_pcie_bda.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_pcie_bda.h
new file mode 100644
index 0000000..89091ae
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_pcie_bda.h
@@ -0,0 +1,136 @@
+/*

+ * (C) Copyright 2011 Quantenna Communications Inc.

+ *

+ * See file CREDITS for list of people who contributed to this

+ * project.

+ *

+ * This program is free software; you can redistribute it and/or

+ * modify it under the terms of the GNU General Public License as

+ * published by the Free Software Foundation; either version 2 of

+ * the License, or (at your option) any later version.

+ *

+ * This program is distributed in the hope that it will be useful,

+ * but WITHOUT ANY WARRANTY; without even the implied warranty of

+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the

+ * GNU General Public License for more details.

+ *

+ * You should have received a copy of the GNU General Public License

+ * along with this program; if not, write to the Free Software

+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,

+ * MA 02111-1307 USA

+ */

+

+/*

+ * Header file which describes Ruby PCI Express Boot Data Area

+ * Has to be used by both kernel and bootloader.

+ */

+

+#ifndef RUBY_PCIE_BDA_H

+#define RUBY_PCIE_BDA_H

+

+/* Area mapped by via the BAR visible to the host */

+#define RUBY_PCIE_BDA_ADDR		CONFIG_ARC_PCIE_BASE

+#define RUBY_PCIE_BDA_SIZE		CONFIG_ARC_PCIE_SIZE

+

+#define RUBY_BDA_VADDR			(RUBY_PCIE_BDA_ADDR + 0x80000000)

+

+

+#define QDPC_PCIE_BDA_VERSION	0x1000

+

+#define QDPC_BDA_PCIE_INIT		0x01

+#define QDPC_BDA_PCIE_RDY		0x02

+#define QDPC_BDA_FW_LOAD_RDY		0x03

+#define QDPC_BDA_FW_LOAD_DONE		0x04

+#define QDPC_BDA_FW_START		0x05

+#define QDPC_BDA_FW_RUN			0x06

+#define QDPC_BDA_FW_HOST_RDY		0x07

+#define QDPC_BDA_FW_TARGET_RDY		0x11

+#define QDPC_BDA_FW_TARGET_BOOT		0x12

+#define QDPC_BDA_FW_FLASH_BOOT		0x13

+#define QDPC_BDA_FW_HOST_LOAD		0x08

+#define QDPC_BDA_FW_BLOCK_DONE		0x09

+#define QDPC_BDA_FW_BLOCK_RDY		0x0A

+#define QDPC_BDA_FW_EP_RDY		0x0B

+#define QDPC_BDA_FW_BLOCK_END		0x0C

+#define QDPC_BDA_FW_CONFIG		0x0D

+#define QDPC_BDA_FW_RUNNING		0x0E

+

+#define QDPC_BDA_PCIE_FAIL		0x82

+#define QDPC_BDA_FW_LOAD_FAIL		0x85

+

+

+#define PCIE_BDA_RCMODE                 BIT(1)

+#define PCIE_BDA_MSI                    BIT(2)

+#define PCIE_BDA_BAR64                  BIT(3)

+#define PCIE_BDA_FLASH_PRESENT          BIT(4)  /* Tell the Host if EP have flash contain firmware */

+#define PCIE_BDA_FLASH_BOOT             BIT(5)  /* Tell TARGET to boot from flash */

+#define PCIE_BDA_XMIT_UBOOT             BIT(6) /* EP ask for u-boot.bin */

+#define PCIE_BDA_TARGET_FBOOT_ERR       BIT(8)  /* TARGET flash boot failed */

+#define PCIE_BDA_TARGET_FWLOAD_ERR      BIT(9)  /* TARGET firmware load failed */

+#define PCIE_BDA_HOST_NOFW_ERR          BIT(12) /* Host not find any firmware */

+#define PCIE_BDA_HOST_MEMALLOC_ERR      BIT(13) /* Host malloc firmware download memory block failed */

+#define PCIE_BDA_HOST_MEMMAP_ERR        BIT(14) /* Host pci map download memory block failed */

+#define PCIE_BDA_VER(x)                 (((x) >> 4) & 0xFF)

+#define PCIE_BDA_ERROR_MASK             0xFF00  /* take the second 8 bits as error flag */

+

+#define PCIE_DMA_OFFSET_ERROR		0xFFFF

+#define PCIE_DMA_OFFSET_ERROR_MASK	0xFFFF

+

+#define PCIE_BDA_NAMELEN		32

+

+#define QDPC_PCI_ENDIAN_DETECT_DATA	0x12345678

+#define QDPC_PCI_ENDIAN_REVERSE_DATA	0x78563412

+

+#define QDPC_PCI_ENDIAN_VALID_STATUS	0x3c3c3c3c

+#define QDPC_PCI_ENDIAN_INVALID_STATUS	0

+

+#define QDPC_PCI_LITTLE_ENDIAN		0

+#define	QDPC_PCI_BIG_ENDIAN		0xffffffff

+

+#define QDPC_SCHED_TIMEOUT		(HZ / 20)

+

+#define PCIE_DMA_ISSUE_LOG_NUM		128

+

+#define PCIE_RC_TX_QUEUE_LEN		256

+#define PCIE_TX_VALID_PKT		0x80000000

+#define PCIE_PKT_LEN_MASK		0xffff

+

+struct vmac_pkt_info {

+	uint32_t addr;

+	uint32_t info;

+};

+

+typedef struct qdpc_pcie_bda {

+	uint16_t	bda_len;			/* Size of BDA block */

+	uint16_t	bda_version;			/* BDA version */

+	uint32_t	bda_bootstate;			/* Boot state of device */

+	uint32_t	bda_dma_mask;			/* Number of addressable DMA bits */

+	uint32_t	bda_dma_offset;			/* HW specific offset for DMA engine */

+	uint32_t	bda_flags;

+	uint32_t	bda_img;			/* Current load image block */

+	uint32_t	bda_img_size;			/* Current load image block size */

+	uint32_t	bda_ep2h_irqstatus;		/* Added here to allow boot loader to use irqs if desired */

+	uint32_t	bda_h2ep_irqstatus;		/* Added here to allow boot loader to use irqs if desired */

+	uint32_t	bda_msi_addr;

+	uint8_t		reserved1[56];			/* Reserve 56 bytes to make it compatible with older version */

+	uint32_t	bda_flashsz;

+	char		bda_boardname[PCIE_BDA_NAMELEN];

+	uint32_t	bda_pci_pre_status;		/* PCI endian check previous status */

+	uint32_t	bda_pci_endian;			/* Check pci memory endian format */

+	uint32_t	bda_pci_post_status;		/* PCI endian check post status */

+	int32_t		bda_h2ep_txd_budget;		/* txdone replenish budget for ep */

+	int32_t		bda_ep2h_txd_budget;		/* txdone replenish budget for host */

+	uint32_t	bda_rc_rx_bd_base;		/* EP rx buffer descriptors base address */

+	uint32_t	bda_rc_rx_bd_num;

+	uint32_t	bda_rc_tx_bd_base;		/* RC rx buffer descriptors base address */

+	uint32_t	bda_rc_tx_bd_num;

+	uint8_t		bda_ep_link_state;

+	uint8_t		bda_rc_link_state;

+	uint8_t		bda_rc_msi_enabled;

+	uint8_t		reserved2;

+        uint32_t        bda_ep_next_pkt;		/* A pointer to RC's memory specifying next packet to be handled by EP */

+	struct vmac_pkt_info request[PCIE_RC_TX_QUEUE_LEN];

+} qdpc_pcie_bda_t;

+

+#endif

+

diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_platform.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_platform.h
new file mode 100644
index 0000000..4cc2ebb
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_platform.h
@@ -0,0 +1,831 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * Header file which describes Ruby platform.
+ * Has to be used by both kernel and bootloader.
+ */
+
+#ifndef __RUBY_PLATFORM_H
+#define __RUBY_PLATFORM_H
+
+#include "ruby_config.h"
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* Common                                                                    */
+/*****************************************************************************/
+#define RUBY_BIT(x)			(1 << (x))
+/*****************************************************************************/
+/* DRAM registers                                                            */
+/*****************************************************************************/
+#define RUBY_DDR_BASE_ADDR		TOPAZ_ALIAS_MAP_SWITCH(0xF6000000, 0xE40E0000)
+#define RUBY_DDR_CONTROL		(RUBY_DDR_BASE_ADDR + 0x0)
+#define RUBY_DDR_CONTROL_POWERDOWN_EN	RUBY_BIT(1)
+#define RUBY_DDR_SETTLE_US		(4)
+/*****************************************************************************/
+/*****************************************************************************/
+/* GPIO constants                                                            */
+/*****************************************************************************/
+#define RUBY_GPIO_MAX			(32)
+#define RUBY_GPIO_MODE1_MAX		(11)
+#define RUBY_GPIO_MODE2_MAX		(22)
+#define RUBY_GPIO_IRQ_MAX		(16)
+/*****************************************************************************/
+/* GPIO registers                                                            */
+/*****************************************************************************/
+#define RUBY_GPIO_REGS_ADDR		TOPAZ_ALIAS_MAP_SWITCH(0xF1000000, 0xE4090000)
+#define RUBY_GPIO_INPUT			(RUBY_GPIO_REGS_ADDR + 0x00)
+#define GPIO_INPUT			RUBY_GPIO_INPUT
+#define RUBY_GPIO_OMASK			(RUBY_GPIO_REGS_ADDR + 0x04)
+#define GPIO_OUTPUT_MASK		RUBY_GPIO_OMASK
+#define RUBY_GPIO_OUTPUT		(RUBY_GPIO_REGS_ADDR + 0x08)
+#define GPIO_OUTPUT			RUBY_GPIO_OUTPUT
+#define RUBY_GPIO_MODE1			(RUBY_GPIO_REGS_ADDR + 0x0c)
+#define GPIO_MODE1			RUBY_GPIO_MODE1
+#define RUBY_GPIO_MODE2			(RUBY_GPIO_REGS_ADDR + 0x10)
+#define GPIO_MODE2			RUBY_GPIO_MODE2
+#define RUBY_GPIO_AFSEL			(RUBY_GPIO_REGS_ADDR + 0x14)
+#define GPIO_ALTFN			RUBY_GPIO_AFSEL
+#define RUBY_GPIO_DEF			(RUBY_GPIO_REGS_ADDR + 0x18)
+#define	RUBY_GPIO1_PWM0			(RUBY_GPIO_REGS_ADDR + 0x20) /* AFSEL: UART1 (input) */
+#define	RUBY_GPIO9_PWM2			(RUBY_GPIO_REGS_ADDR + 0x28) /* AFSEL: UART1 (output) */
+/*****************************************************************************/
+/* GPIO pins                                                                 */
+/*****************************************************************************/
+#define RUBY_GPIO_PIN0			(0)
+#define RUBY_GPIO_PIN1			(1)
+#define RUBY_GPIO_PIN2			(2)
+#define RUBY_GPIO_PIN3			(3)
+#define RUBY_GPIO_PIN4			(4)
+#define RUBY_GPIO_PIN5			(5)
+#define RUBY_GPIO_PIN6			(6)
+#define RUBY_GPIO_PIN7			(7)
+#define RUBY_GPIO_PIN8			(8)
+#define RUBY_GPIO_PIN9			(9)
+#define RUBY_GPIO_PIN10			(10)
+#define RUBY_GPIO_PIN11			(11)
+#define RUBY_GPIO_PIN12			(12)
+#define RUBY_GPIO_PIN13			(13)
+#define RUBY_GPIO_PIN14			(14)
+#define RUBY_GPIO_PIN15			(15)
+#define RUBY_GPIO_PIN16			(16)
+#define RUBY_GPIO_PIN17			(17)
+
+#define RUBY_GPIO_UART0_SI		(0)
+#define RUBY_GPIO_UART0_SO		(8)
+#define RUBY_GPIO_UART1_SI		(1)
+#define RUBY_GPIO_UART1_SO		(9)
+/* these are for spi1, bga has dedicated spi0 pins */
+#define RUBY_GPIO_SPI_MISO		(2)
+#define RUBY_GPIO_SPI_SCK		(6)
+#define RUBY_GPIO_SPI_MOSI		(5)
+#define RUBY_GPIO_SPI_nCS		(4)
+
+#define RUBY_GPIO_LNA_TOGGLE		(7)
+
+#define RUBY_GPIO_RTD			(3)	/* Reset to default */
+#define RUBY_GPIO_WPS			(7)
+#define RUBY_GPIO_I2C_SCL		(10)
+#define RUBY_GPIO_I2C_SDA		(11)
+#define RUBY_GPIO_WLAN_DISABLE		(12)
+#define RUBY_GPIO_LED1			(13)
+#define RUBY_GPIO_RFIC_INTR		(14)
+
+#ifndef TOPAZ_AMBER_IP
+#define RUBY_GPIO_RFIC_RESET		(15)
+#else
+#define RUBY_GPIO_RFIC_RESET		(10)
+#endif
+
+#define RUBY_GPIO_LED2			(16)
+
+/*****************************************************************************/
+/* GPIO function constants                                                   */
+/*****************************************************************************/
+#define RUBY_GPIO_MODE_INPUT		(0)
+#define GPIO_MODE_INPUT				RUBY_GPIO_MODE_INPUT
+#define RUBY_GPIO_MODE_OUTPUT		(1)
+#define GPIO_MODE_OUTPUT			RUBY_GPIO_MODE_OUTPUT
+#define RUBY_GPIO_MODE_OPEN_SOURCE	(2)
+#define RUBY_GPIO_MODE_OPEN_DRAIN	(3)
+#define RUBY_GPIO_ALT_INPUT			(4)
+#define RUBY_GPIO_ALT_OUTPUT		(5)
+#define RUBY_GPIO_ALT_OPEN_SOURCE	(6)
+#define RUBY_GPIO_ALT_OPEN_DRAIN	(7)
+#define	GPIO_PIN(x)			(x)
+#define	GPIO_OUTPUT_LO			(0)
+#define	GPIO_OUTPUT_HI			(1)
+/*****************************************************************************/
+/*****************************************************************************/
+/* UART FIFO size                                                            */
+/*****************************************************************************/
+#define RUBY_UART_FIFO_SIZE		(16)
+/*****************************************************************************/
+/* UART register addresses                                                   */
+/*****************************************************************************/
+#define RUBY_UART0_BASE_ADDR		TOPAZ_ALIAS_MAP_SWITCH(0xF0000000, 0xE4080000)
+#define RUBY_UART0_RBR_THR_DLL		(RUBY_UART0_BASE_ADDR + 0x00)
+#define RUBY_UART0_DLH_IER		(RUBY_UART0_BASE_ADDR + 0x04)
+#define RUBY_UART0_IIR_FCR_LCR		(RUBY_UART0_BASE_ADDR + 0x08)
+#define RUBY_UART0_LCR			(RUBY_UART0_BASE_ADDR + 0x0c)
+#define RUBY_UART0_MCR			(RUBY_UART0_BASE_ADDR + 0x10)
+#define RUBY_UART0_LSR			(RUBY_UART0_BASE_ADDR + 0x14)
+#define RUBY_UART0_MSR			(RUBY_UART0_BASE_ADDR + 0x18)
+#define RUBY_UART0_SCR			(RUBY_UART0_BASE_ADDR + 0x1c)
+#define RUBY_UART0_LPDLL		(RUBY_UART0_BASE_ADDR + 0x20)
+#define RUBY_UART0_LPDLH		(RUBY_UART0_BASE_ADDR + 0x24)
+#define RUBY_UART0_SRBR			(RUBY_UART0_BASE_ADDR + 0x30)
+#define RUBY_UART0_STHR			(RUBY_UART0_BASE_ADDR + 0x34)
+#define RUBY_UART0_FAR			(RUBY_UART0_BASE_ADDR + 0x70)
+#define RUBY_UART0_TFR			(RUBY_UART0_BASE_ADDR + 0x74)
+#define RUBY_UART0_RFW			(RUBY_UART0_BASE_ADDR + 0x78)
+#define RUBY_UART0_USR			(RUBY_UART0_BASE_ADDR + 0x7c)
+#define RUBY_UART0_TFL			(RUBY_UART0_BASE_ADDR + 0x80)
+#define RUBY_UART0_RFL			(RUBY_UART0_BASE_ADDR + 0x84)
+#define RUBY_UART0_SRR			(RUBY_UART0_BASE_ADDR + 0x88)
+#define RUBY_UART0_SRTS			(RUBY_UART0_BASE_ADDR + 0x8c)
+#define RUBY_UART0_SBCR			(RUBY_UART0_BASE_ADDR + 0x90)
+#define RUBY_UART0_SDMAM		(RUBY_UART0_BASE_ADDR + 0x94)
+#define RUBY_UART0_SFE			(RUBY_UART0_BASE_ADDR + 0x98)
+#define RUBY_UART0_SRT			(RUBY_UART0_BASE_ADDR + 0x9c)
+#define RUBY_UART0_STET			(RUBY_UART0_BASE_ADDR + 0xa0)
+#define RUBY_UART0_HTX			(RUBY_UART0_BASE_ADDR + 0xa4)
+#define RUBY_UART0_DMASA		(RUBY_UART0_BASE_ADDR + 0xa8)
+#define RUBY_UART0_CPR			(RUBY_UART0_BASE_ADDR + 0xf4)
+#define RUBY_UART0_UCV			(RUBY_UART0_BASE_ADDR + 0xf8)
+#define RUBY_UART0_CTR			(RUBY_UART0_BASE_ADDR + 0xfc)
+
+#define RUBY_UART1_BASE_ADDR		TOPAZ_ALIAS_MAP_SWITCH(0xF5000000, 0xE40D0000)
+#define RUBY_UART1_RBR_THR_DLL		(RUBY_UART1_BASE_ADDR + 0x00)
+#define RUBY_UART1_DLH_IER		(RUBY_UART1_BASE_ADDR + 0x04)
+#define RUBY_UART1_IIR_FCR_LCR		(RUBY_UART1_BASE_ADDR + 0x08)
+#define RUBY_UART1_LCR			(RUBY_UART1_BASE_ADDR + 0x0c)
+#define RUBY_UART1_MCR			(RUBY_UART1_BASE_ADDR + 0x10)
+#define RUBY_UART1_LSR			(RUBY_UART1_BASE_ADDR + 0x14)
+#define RUBY_UART1_MSR			(RUBY_UART1_BASE_ADDR + 0x18)
+#define RUBY_UART1_SCR			(RUBY_UART1_BASE_ADDR + 0x1c)
+#define RUBY_UART1_LPDLL		(RUBY_UART1_BASE_ADDR + 0x20)
+#define RUBY_UART1_LPDLH		(RUBY_UART1_BASE_ADDR + 0x24)
+#define RUBY_UART1_SRBR			(RUBY_UART1_BASE_ADDR + 0x30)
+#define RUBY_UART1_STHR			(RUBY_UART1_BASE_ADDR + 0x34)
+#define RUBY_UART1_FAR			(RUBY_UART1_BASE_ADDR + 0x70)
+#define RUBY_UART1_TFR			(RUBY_UART1_BASE_ADDR + 0x74)
+#define RUBY_UART1_RFW			(RUBY_UART1_BASE_ADDR + 0x78)
+#define RUBY_UART1_USR			(RUBY_UART1_BASE_ADDR + 0x7c)
+#define RUBY_UART1_TFL			(RUBY_UART1_BASE_ADDR + 0x80)
+#define RUBY_UART1_RFL			(RUBY_UART1_BASE_ADDR + 0x84)
+#define RUBY_UART1_SRR			(RUBY_UART1_BASE_ADDR + 0x88)
+#define RUBY_UART1_SRTS			(RUBY_UART1_BASE_ADDR + 0x8c)
+#define RUBY_UART1_SBCR			(RUBY_UART1_BASE_ADDR + 0x90)
+#define RUBY_UART1_SDMAM		(RUBY_UART1_BASE_ADDR + 0x94)
+#define RUBY_UART1_SFE			(RUBY_UART1_BASE_ADDR + 0x98)
+#define RUBY_UART1_SRT			(RUBY_UART1_BASE_ADDR + 0x9c)
+#define RUBY_UART1_STET			(RUBY_UART1_BASE_ADDR + 0xa0)
+#define RUBY_UART1_HTX			(RUBY_UART1_BASE_ADDR + 0xa4)
+#define RUBY_UART1_DMASA		(RUBY_UART1_BASE_ADDR + 0xa8)
+#define RUBY_UART1_CPR			(RUBY_UART1_BASE_ADDR + 0xf4)
+#define RUBY_UART1_UCV			(RUBY_UART1_BASE_ADDR + 0xf8)
+#define RUBY_UART1_CTR			(RUBY_UART1_BASE_ADDR + 0xfc)
+
+/*****************************************************************************/
+/* UART Status Register - USR                                                */
+/*****************************************************************************/
+#define RUBY_USR_TX_Fifo_Empty		0x04
+#define RUBY_USR_RX_Fifo_Full		0x10
+#define RUBY_USR_TX_Fifo_nFull		0x02
+#define RUBY_USR_RX_Fifo_nEmpty		0x08
+#define RUBY_USR_Busy			0x01
+/*****************************************************************************/
+/* Line Status Register - LSR                                                */
+/*****************************************************************************/
+#define RUBY_LSR_TX_Empty		0x40
+#define RUBY_LSR_RX_Ready		0x01
+/*****************************************************************************/
+/* Line Control Register - LCR                                               */
+/*****************************************************************************/
+#define RUBY_LCR_Data_Word_Length_5	0x0
+#define RUBY_LCR_Data_Word_Length_6	0x1
+#define RUBY_LCR_Data_Word_Length_7	0x2
+#define RUBY_LCR_Data_Word_Length_8	0x3
+#define RUBY_LCR_Stop_Bit_1		0x0
+#define RUBY_LCR_Stop_Bit_2		0x4
+#define RUBY_LCR_No_Parity		0x0
+#define RUBY_LCR_Odd_Parity		0x8
+#define RUBY_LCR_Even_Parity		0x18
+#define RUBY_LCR_High_Parity		0x28
+#define RUBY_LCR_Low_Parity		0x38
+#define RUBY_LCR_Break_Disable		0x0
+#define RUBY_LCR_Break_Enable		0x40
+#define RUBY_LCR_DLAB			0x80
+/*****************************************************************************/
+/*****************************************************************************/
+/* Timer constants                                                           */
+/*****************************************************************************/
+#define RUBY_TIMER_INT_MASK		(RUBY_BIT(2))
+#define RUBY_TIMER_SINGLE		(RUBY_BIT(1))
+#define RUBY_TIMER_ENABLE		(RUBY_BIT(0))
+/*****************************************************************************/
+#define RUBY_CPU_TIMERS			(2)
+#define RUBY_NUM_TIMERS			(4)
+#define RUBY_TIMER_MUC_CCA              (3)
+#define RUBY_TIMER_FREQ                 125000000
+
+#define RUBY_TIMER_MUC_CCA_FREQ_SHIFT    2    /* shift from 1ms base */
+#define RUBY_TIMER_MUC_CCA_FREQ          (1000 << RUBY_TIMER_MUC_CCA_FREQ_SHIFT)
+#define RUBY_TIMER_MUC_CCA_LIMIT         (RUBY_TIMER_FREQ / RUBY_TIMER_MUC_CCA_FREQ)
+#define RUBY_TIMER_MUC_CCA_INTV          (1000 >> RUBY_TIMER_MUC_CCA_FREQ_SHIFT)    /* microseconds */
+#define RUBY_TIMER_MUC_CCA_CNT2MS(_v)    ((_v) >> RUBY_TIMER_MUC_CCA_FREQ_SHIFT)
+/*****************************************************************************/
+#define RUBY_TIMER_BASE_ADDR		TOPAZ_ALIAS_MAP_SWITCH(0xF3000000, 0xE40B0000)
+#define RUBY_TIMER_CHANNEL		(0x14)
+#define RUBY_TIMER_LOAD_COUNT(x)	(RUBY_TIMER_BASE_ADDR + ((x)*RUBY_TIMER_CHANNEL) + 0)
+#define RUBY_TIMER_VALUE(x)		(RUBY_TIMER_BASE_ADDR + ((x)*RUBY_TIMER_CHANNEL) + 4)
+#define RUBY_TIMER_CONTROL(x)		(RUBY_TIMER_BASE_ADDR + ((x)*RUBY_TIMER_CHANNEL) + 8)
+#define RUBY_TIMER_EOI(x)		(RUBY_TIMER_BASE_ADDR + ((x)*RUBY_TIMER_CHANNEL) + 12)
+#define RUBY_TIMER_INTSTAT(x)		(RUBY_TIMER_BASE_ADDR + ((x)*RUBY_TIMER_CHANNEL) + 16)
+/*****************************************************************************/
+#define RUBY_TIMER_GLOBAL_INT_STATUS	(RUBY_TIMER_BASE_ADDR + 0xa0)
+#define RUBY_TIMER_GLOBAL_EOI		(RUBY_TIMER_BASE_ADDR + 0xa4)
+#define RUBY_TIMER_GLOBAL_RAW_STATUS	(RUBY_TIMER_BASE_ADDR + 0xa8)
+#define RUBY_TIMER_GLOBAL_COMP_VER	(RUBY_TIMER_BASE_ADDR + 0xac)
+/*****************************************************************************/
+#define RUBY_TIMER_ORINT_EN(x)		(1 << (18 + (x)))
+/*****************************************************************************/
+/*****************************************************************************/
+/* ENET registers                                                            */
+/*****************************************************************************/
+#define RUBY_ENET0_BASE_ADDR		TOPAZ_ALIAS_MAP_SWITCH(0xED000000, 0xE4070000)
+#define RUBY_ENET1_BASE_ADDR		TOPAZ_ALIAS_MAP_SWITCH(0xE8000000, 0xE4040000)
+/*****************************************************************************/
+/*****************************************************************************/
+/* System controller registers                                               */
+/*****************************************************************************/
+#define RUBY_SYS_CTL_BASE_ADDR_NOMAP		0xE0000000
+#define RUBY_SYS_CTL_BASE_ADDR			TOPAZ_ALIAS_MAP_SWITCH(RUBY_SYS_CTL_BASE_ADDR_NOMAP, 0xE4000000)
+#define RUBY_SYS_CTL_CPU_VEC_MASK		(RUBY_SYS_CTL_BASE_ADDR + 0x00)
+#define RUBY_SYS_CTL_CPU_VEC			(RUBY_SYS_CTL_BASE_ADDR + 0x04)
+#define RUBY_SYS_CTL_MASK			(RUBY_SYS_CTL_BASE_ADDR + 0x08)
+#define RUBY_SYS_CTL_CTRL			(RUBY_SYS_CTL_BASE_ADDR + 0x0c)
+#define RUBY_SYS_CTL_RESET_CAUSE		(RUBY_SYS_CTL_BASE_ADDR + 0x10)
+#define RUBY_SYS_CTL_CSR			(RUBY_SYS_CTL_BASE_ADDR + 0x14)
+#define RUBY_SYS_CTL_DEBUG_SEL			(RUBY_SYS_CTL_BASE_ADDR + 0x18)
+#define RUBY_SYS_CTL_L2M_INT			(RUBY_SYS_CTL_BASE_ADDR + 0x1C)
+#define RUBY_SYS_CTL_L2M_INT_MASK		(RUBY_SYS_CTL_BASE_ADDR + 0x20)
+#define RUBY_SYS_CTL_L2D_INT			(RUBY_SYS_CTL_BASE_ADDR + PLATFORM_REG_SWITCH(0x24, 0x34))
+#define RUBY_SYS_CTL_L2D_INT_MASK		(RUBY_SYS_CTL_BASE_ADDR + PLATFORM_REG_SWITCH(0x28, 0x38))
+#define RUBY_SYS_CTL_M2L_INT			(RUBY_SYS_CTL_BASE_ADDR + 0x2C)
+#define RUBY_SYS_CTL_M2L_INT_MASK		(RUBY_SYS_CTL_BASE_ADDR + 0x30)
+#define RUBY_SYS_CTL_M2D_INT			(RUBY_SYS_CTL_BASE_ADDR + PLATFORM_REG_SWITCH(0x34, 0x24))
+#define RUBY_SYS_CTL_M2D_INT_MASK		(RUBY_SYS_CTL_BASE_ADDR + PLATFORM_REG_SWITCH(0x38, 0x28))
+#define RUBY_SYS_CTL_D2L_INT			(RUBY_SYS_CTL_BASE_ADDR + 0x3C)
+#define RUBY_SYS_CTL_D2L_INT_MASK		(RUBY_SYS_CTL_BASE_ADDR + 0x40)
+#define RUBY_SYS_CTL_D2M_INT			(RUBY_SYS_CTL_BASE_ADDR + 0x44)
+#define RUBY_SYS_CTL_D2M_INT_MASK		(RUBY_SYS_CTL_BASE_ADDR + 0x48)
+#define RUBY_SYS_CTL_LHOST_INT_EN		(RUBY_SYS_CTL_BASE_ADDR + 0x4C)
+#define RUBY_SYS_CTL_MUC_INT_EN			(RUBY_SYS_CTL_BASE_ADDR + 0x50)
+#define RUBY_SYS_CTL_DSP_INT_EN			(RUBY_SYS_CTL_BASE_ADDR + 0x54)
+#define RUBY_SYS_CTL_LHOST_ORINT_EN		(RUBY_SYS_CTL_BASE_ADDR + 0x58)
+#define RUBY_SYS_CTL_MUC_ORINT_EN		(RUBY_SYS_CTL_BASE_ADDR + 0x5C)
+#define RUBY_SYS_CTL_DSP_ORINT_EN		(RUBY_SYS_CTL_BASE_ADDR + 0x60)
+#define RUBY_SYS_CTL_MUC_REMAP			(RUBY_SYS_CTL_BASE_ADDR + 0x64)
+#define RUBY_SYS_CTL_DSP_REMAP			(RUBY_SYS_CTL_BASE_ADDR + 0x68)
+#define RUBY_SYS_CTL_PCIE_CFG0			(RUBY_SYS_CTL_BASE_ADDR + 0x6C)
+#define RUBY_SYS_CTL_PCIE_CFG1			(RUBY_SYS_CTL_BASE_ADDR + 0x70)
+#define RUBY_SYS_CTL_PCIE_CFG2			(RUBY_SYS_CTL_BASE_ADDR + 0x74)
+#define RUBY_SYS_CTL_PCIE_CFG3			(RUBY_SYS_CTL_BASE_ADDR + 0x78)
+#define RUBY_SYS_CTL_PCIE_CFG4			(RUBY_SYS_CTL_BASE_ADDR + 0x7C)
+#define RUBY_SYS_CTL_PLL0_CTRL			(RUBY_SYS_CTL_BASE_ADDR + 0x80)
+#define RUBY_SYS_CTL_PLL1_CTRL			(RUBY_SYS_CTL_BASE_ADDR + 0x84)
+#define RUBY_SYS_CTL_LHOST_ID			(RUBY_SYS_CTL_BASE_ADDR + 0x88)
+#define RUBY_SYS_CTL_PLL2_CTRL			(RUBY_SYS_CTL_BASE_ADDR + 0x8C)
+#define RUBY_SYS_CTL_MUC_ID			(RUBY_SYS_CTL_BASE_ADDR + 0x90)
+#define RUBY_SYS_CTL_L2M_SEM			(RUBY_SYS_CTL_BASE_ADDR + 0x94)
+#define RUBY_SYS_CTL_M2L_SEM			(RUBY_SYS_CTL_BASE_ADDR + 0x98)
+#define RUBY_SYS_CTL_L2D_SEM			(RUBY_SYS_CTL_BASE_ADDR + 0x9C)
+#define RUBY_SYS_CTL_D2L_SEM			(RUBY_SYS_CTL_BASE_ADDR + 0xA0)
+#define RUBY_SYS_CTL_M2D_SEM			(RUBY_SYS_CTL_BASE_ADDR + 0xA4)
+#define RUBY_SYS_CTL_D2M_SEM			(RUBY_SYS_CTL_BASE_ADDR + 0xA8)
+#define RUBY_SYS_CTL_INTR_INV0			(RUBY_SYS_CTL_BASE_ADDR + 0xAC)
+#define RUBY_SYS_CTL_INTR_INV1			(RUBY_SYS_CTL_BASE_ADDR + 0xB0)
+#define RUBY_SYS_CTL_GMII_CLKDLL		(RUBY_SYS_CTL_BASE_ADDR + 0xB4)
+#define RUBY_SYS_CTL_DEBUG_BUS			(RUBY_SYS_CTL_BASE_ADDR + 0xB8)
+#define RUBY_SYS_CTL_SPARE			(RUBY_SYS_CTL_BASE_ADDR + 0xBC)
+#define RUBY_SYS_CTL_PCIE_INT_MASK		(RUBY_SYS_CTL_BASE_ADDR + 0xC0)
+#define	RUBY_SYS_CTL_GPIO_IRQ_SEL		(RUBY_SYS_CTL_BASE_ADDR + 0xc4)
+#define RUBY_SYS_CTL_PCIE_SLV_REQ_MISC_INFO	(RUBY_SYS_CTL_BASE_ADDR + 0xCC)
+#define RUBY_SYS_CTL_DDR_CTRL			(RUBY_SYS_CTL_BASE_ADDR + 0xE8)
+#define RUBY_SYS_CTL_GPIO_INT_STATUS		(RUBY_SYS_CTL_BASE_ADDR + 0x154)
+#define RUBY_SYS_AHB_MON_INT_MASK		(RUBY_SYS_CTL_BASE_ADDR + 0x160)
+#define RUBY_SYS_CTL_BOND_OPT			(RUBY_SYS_CTL_BASE_ADDR + 0x16C)
+
+/*****************************************************************************/
+/* System controller constants                                               */
+/*****************************************************************************/
+#define RUBY_SYS_CTL_REMAP(x)		(((x) & 0x3) << 3)
+#define RUBY_SYS_CTL_LINUX_MAP(x)	(((x) & 0x1) << 31)
+#define RUBY_SYS_CTL_SPICLK(x)		(((x) & 0x3) << 15)
+#define RUBY_SYS_CTL_CLKSEL(x)		(((x) & 0x3) << 5)
+#define RUBY_SYS_CTL_MUC_REMAP_SHIFT	15
+#define RUBY_SYS_CTL_MUC_REMAP_VAL(x)	(RUBY_BIT(31) | ((x) >> RUBY_SYS_CTL_MUC_REMAP_SHIFT))
+#define RUBY_SYS_CTL_DSP_REMAP_SHIFT	15
+#define RUBY_SYS_CTL_DSP_REMAP_VAL(x)	(RUBY_BIT(31) | ((x) >> RUBY_SYS_CTL_DSP_REMAP_SHIFT))
+/* reset bits - names match rtl */
+#define RUBY_SYS_CTL_RESET_LHOST_CORE	(RUBY_BIT(0))
+#define RUBY_SYS_CTL_RESET_LHOST_BUS	(RUBY_BIT(1))
+#define RUBY_SYS_CTL_RESET_DDR		(RUBY_BIT(2))
+#define RUBY_SYS_CTL_RESET_SRAM		(RUBY_BIT(3))
+#define RUBY_SYS_CTL_RESET_DSP		(RUBY_BIT(4))
+#define RUBY_SYS_CTL_RESET_IOSS		(RUBY_BIT(5))
+#define RUBY_SYS_CTL_RESET_NETSS	(RUBY_BIT(7))
+#define RUBY_SYS_CTL_RESET_MAC		(RUBY_BIT(8))
+#define RUBY_SYS_CTL_RESET_ENET0	(RUBY_BIT(9))
+#define RUBY_SYS_CTL_RESET_MUC		(RUBY_BIT(11))
+#define RUBY_SYS_CTL_RESET_ENET1	(RUBY_BIT(12))
+#define RUBY_SYS_CTL_RESET_PCIE		(RUBY_BIT(13))
+#define RUBY_SYS_CTL_RESET_BB		(RUBY_BIT(14))
+#define RUBY_SYS_CTL_RESET_EXT		(RUBY_BIT(15))
+/* reset useful constants */
+#define RUBY_SYS_CTL_RESET_ALL		(~0x0)
+#define RUBY_SYS_CTL_RESET_MUC_ALL	RUBY_SYS_CTL_RESET_MUC
+#define RUBY_SYS_CTL_RESET_DSP_ALL	RUBY_SYS_CTL_RESET_DSP
+/* reset cause definitions */
+#define	RUBY_SYS_CTL_RESET_CAUSE_PO	(RUBY_BIT(0))
+#define	RUBY_SYS_CTL_RESET_CAUSE_SR	(RUBY_BIT(1))
+#define	RUBY_SYS_CTL_RESET_CAUSE_WD	(RUBY_BIT(2))
+#define RUBY_SYS_CTL_INTR_TIMER_MSK(t)	(1 << (20 + (t)))
+/* sysctl vector/mask bit definitions */
+#define RUBY_SYS_CTL_MASK_BOOTMODE	(0x7 << 0)
+#define RUBY_SYS_CTL_MASK_REMAP		(0x3 << 3)
+#define RUBY_SYS_CTL_MASK_CLKSEL	(0x3 << 5)
+/* clksel: 00 = cpu(400)      bus(200) */
+#define RUBY_SYS_CTL_CLKSEL_00_BUS_FREQ	200000000
+/* clksel: 01 = cpu(320)      bus(160) */
+#define RUBY_SYS_CTL_CLKSEL_01_BUS_FREQ	160000000
+/* clksel: 10 = cpu(250)      bus(125) */
+#define RUBY_SYS_CTL_CLKSEL_10_BUS_FREQ	125000000
+/* clksel: 11 = cpu(200)      bus(100) */
+#define RUBY_SYS_CTL_CLKSEL_11_BUS_FREQ	100000000
+#define RUBY_SYS_CTL_MASK_DDRDRV	(0x1 << 7)
+#define RUBY_SYS_CTL_MASK_DDRODT	(0x3 << 8)
+#define RUBY_SYS_CTL_MASK_NODDR		(0x1 << 12)
+#define RUBY_SYS_CTL_MASK_MII		(0x3 << 13)
+#define RUBY_SYS_CTL_MASK_MII_EMAC0	(0x1 << 13)
+#define RUBY_SYS_CTL_MASK_MII_EMAC1	(0x1 << 14)
+#define RUBY_SYS_CTL_MASK_SPICLK	(0x3 << 15)
+#define RUBY_SYS_CTL_MASK_JTAGCHAIN	(0x1 << 17)
+
+#define RUBY_SYS_CTL_MASK_GMII0_TXCLK	(0x3 << 18)
+#define RUBY_SYS_CTL_MASK_GMII0_10M	(0x0 << 18)
+#define RUBY_SYS_CTL_MASK_GMII0_100M	(0x1 << 18)
+#define RUBY_SYS_CTL_MASK_GMII0_1000M	(0x2 << 18)
+
+#define RUBY_SYS_CTL_MASK_GMII1_TXCLK	(0x3 << 20)
+#define RUBY_SYS_CTL_MASK_GMII1_10M	(0x0 << 20)
+#define RUBY_SYS_CTL_MASK_GMII1_100M	(0x1 << 20)
+#define RUBY_SYS_CTL_MASK_GMII1_1000M	(0x2 << 20)
+
+#define RUBY_SYS_CTL_MASK_GMII_10M	(0)
+#define RUBY_SYS_CTL_MASK_GMII_100M	(1)
+#define RUBY_SYS_CTL_MASK_GMII_1000M	(2)
+#define RUBY_SYS_CTL_MASK_GMII_TXCLK	(3)
+#define RUBY_SYS_CTL_MASK_GMII0_SHIFT	(18)
+#define RUBY_SYS_CTL_MASK_GMII1_SHIFT	(20)
+
+#define RUBY_SYS_CTL_MASK_DDRCLK	(0x7 << 22)
+#define RUBY_SYS_CTL_MASK_LINUX_MAP	(0x1 << 31)
+
+#define RUBY_RESET_CAUSE_UART_SHIFT	(7)
+#define RUBY_RESET_CAUSE_UART(x)	(1 << (RUBY_RESET_CAUSE_UART_SHIFT + x))
+
+/* global[30:25,11] unused */
+/* for compatibility */
+#define SYSCTRL_CTRL_MASK		(RUBY_SYS_CTL_MASK - RUBY_SYS_CTL_BASE_ADDR)
+#define SYSCTRL_CTRL			(RUBY_SYS_CTL_CTRL - RUBY_SYS_CTL_BASE_ADDR)
+#define SYSCTRL_RESET_MASK		(RUBY_SYS_CTL_CPU_VEC_MASK - RUBY_SYS_CTL_BASE_ADDR)
+#define SYSCTRL_RESET			(RUBY_SYS_CTL_CPU_VEC - RUBY_SYS_CTL_BASE_ADDR)
+#define SYSCTRL_REV_NUMBER		(RUBY_SYS_CTL_CSR - RUBY_SYS_CTL_BASE_ADDR)
+#define SYSCTRL_RGMII_DLL		(RUBY_SYS_CTL_GMII_CLKDLL - RUBY_SYS_CTL_BASE_ADDR)
+/*****************************************************************************/
+/*****************************************************************************/
+/* Watchdog registers                                                        */
+/*****************************************************************************/
+#define RUBY_WDT_BASE_ADDR		TOPAZ_ALIAS_MAP_SWITCH(0xF4000000, 0xE40C0000)
+#define RUBY_WDT_CTL			(RUBY_WDT_BASE_ADDR + 0x00)
+#define RUBY_WDT_TIMEOUT_RANGE		(RUBY_WDT_BASE_ADDR + 0x04)
+#define RUBY_WDT_CURRENT_VALUE		(RUBY_WDT_BASE_ADDR + 0x08)
+#define RUBY_WDT_COUNTER_RESTART	(RUBY_WDT_BASE_ADDR + 0x0c)
+#define RUBY_WDT_INT_STAT		(RUBY_WDT_BASE_ADDR + 0x10)
+#define RUBY_WDT_INT_CLEAR		(RUBY_WDT_BASE_ADDR + 0x14)
+/*****************************************************************************/
+/* Watchdog constants                                                        */
+/*****************************************************************************/
+#define RUBY_WDT_ENABLE_IRQ_WARN	(RUBY_BIT(1))
+#define RUBY_WDT_ENABLE			(RUBY_BIT(0))
+#define RUBY_WDT_MAGIC_NUMBER		(0x76)
+#define RUBY_WDT_MAX_TIMEOUT		(0xF)
+#define RUBY_WDT_RESET_TIMEOUT		(0x8)
+/*****************************************************************************/
+/*****************************************************************************/
+/* SPI registers                                                             */
+/*****************************************************************************/
+#define RUBY_SPI_BASE_ADDR		TOPAZ_ALIAS_MAP_SWITCH(0xE2000000, 0xE4030000)
+#define RUBY_SPI_COMMIT			(RUBY_SPI_BASE_ADDR + 0x0000)
+#define RUBY_SPI_CONTROL		(RUBY_SPI_BASE_ADDR + 0x0004)
+#define RUBY_SPI_WRITE_STATUS		(RUBY_SPI_BASE_ADDR + 0x0100)
+#define RUBY_SPI_PAGE_PROGRAM		(RUBY_SPI_BASE_ADDR + 0x0200)
+#define RUBY_SPI_WRITE_DIS		(RUBY_SPI_BASE_ADDR + 0x0400)
+#define RUBY_SPI_READ_STATUS		(RUBY_SPI_BASE_ADDR + 0x0500)
+#define RUBY_SPI_WRITE_EN		(RUBY_SPI_BASE_ADDR + 0x0600)
+#define RUBY_SPI_FAST_READ		(RUBY_SPI_BASE_ADDR + 0x0B00)
+#define RUBY_SPI_WRITE_REG3		(RUBY_SPI_BASE_ADDR + 0x1100)
+#define RUBY_SPI_READ_REG3              (RUBY_SPI_BASE_ADDR + 0x1500)
+#define RUBY_SPI_SECTOR_ERASE_20	(RUBY_SPI_BASE_ADDR + 0x2000)
+#define RUBY_SPI_READ_SCUR              (RUBY_SPI_BASE_ADDR + 0x2b00)
+#define RUBY_SPI_WRITE_IBUP		(RUBY_SPI_BASE_ADDR + 0x3900)
+#define RUBY_SPI_WRITE_PRO_SEL          (RUBY_SPI_BASE_ADDR + 0x6800)
+#define RUBY_SPI_GBLOCK_LOCK            (RUBY_SPI_BASE_ADDR + 0x7e00)
+#define RUBY_SPI_GBLOCK_UNLOCK          (RUBY_SPI_BASE_ADDR + 0x9800)
+#define TOPAZ_SPI_GBLOCK_UNLOCK		(RUBY_SPI_BASE_ADDR + 0x9800)
+#define RUBY_SPI_READ_ID		(RUBY_SPI_BASE_ADDR + 0x9F00)
+#define RUBY_SPI_BULK_ERASE		(RUBY_SPI_BASE_ADDR + 0xC700)
+#define RUBY_SPI_SECTOR_ERASE_D8	(RUBY_SPI_BASE_ADDR + 0xD800)
+#define RUBY_SPI_READ_DPB               (RUBY_SPI_BASE_ADDR + 0xe000)
+#define RUBY_SPI_WRITE_DPB              (RUBY_SPI_BASE_ADDR + 0xe100)
+#define RUBY_SPI_PAGE_PROGRAM_4B	(RUBY_SPI_BASE_ADDR + 0x1200)
+#define RUBY_SPI_SECTOR_ERASE_D8_4B	(RUBY_SPI_BASE_ADDR + 0xDC00)
+#define RUBY_SPI_SECTOR_ERASE_20_4B	(RUBY_SPI_BASE_ADDR + 0x2100)
+#define RUBY_SPI_ADDRESS_MODE_4B	0x85
+#define RUBY_SPI_BOUNDARY_4B		0x1000000
+
+#define RUBY_SPI_READ_LOCK               (RUBY_SPI_BASE_ADDR + 0x2D00)
+#define RUBY_SPI_WRITE_LOCK              (RUBY_SPI_BASE_ADDR + 0x2C00)
+#define RUBY_SPI_READ_CONFIG              (RUBY_SPI_BASE_ADDR + 0x1500)
+#define RUBY_SPI_READ_SPB               (RUBY_SPI_BASE_ADDR + 0xE200)
+#define RUBY_SPI_WRITE_SPB              (RUBY_SPI_BASE_ADDR + 0xE300)
+#define RUBY_SPI_ERASE_SPB              (RUBY_SPI_BASE_ADDR + 0xE400)
+
+#define RUBY_SPI_WRITE_PASWORD          (RUBY_SPI_BASE_ADDR + 0x2800)
+#define RUBY_SPI_READ_PASSWORD			(RUBY_SPI_BASE_ADDR + 0x2700)
+#define RUBY_SPI_UNLOCK_PASSWORD		(RUBY_SPI_BASE_ADDR + 0x2900)
+#define RUBY_SPI_WRITE_SPBLOCK			(RUBY_SPI_BASE_ADDR + 0xA600)
+#define RUBY_SPI_READ_SPBLOCK			(RUBY_SPI_BASE_ADDR + 0xA700)
+
+/*
+ * UBOOT_VERSION_LOCATION:
+ * This is hardwired in u-boot's start.S; the first instruction generates a
+ * 32 bit branch instruction.  The next several locations holds a human
+ * readable ascii version string that is visible in the file and in memory.
+ * The branch target of the first instruction is the next 4 byte aligned
+ * address following the version string.
+ *
+ */
+#define UBOOT_VERSION_LOCATION		(RUBY_SPI_FLASH_ADDR + 4)
+
+/*****************************************************************************/
+/* SPI constants                                                             */
+/*****************************************************************************/
+#define RUBY_SPI_WR_IN_PROGRESS		(RUBY_BIT(0))
+#define RUBY_SPI_PROTECTION		(0x3C)
+
+/*****************************************************************************/
+/* SPI1 registers                                                             */
+/*****************************************************************************/
+#define RUBY_SPI1_BASE_ADDR		TOPAZ_ALIAS_MAP_SWITCH(0xF2000000, 0xE40A0000)
+#define RUBY_SPI1_SPCR			(RUBY_SPI1_BASE_ADDR + 0x0000)
+#define RUBY_SPI1_SPSR			(RUBY_SPI1_BASE_ADDR + 0x0004)
+#define RUBY_SPI1_SPDR			(RUBY_SPI1_BASE_ADDR + 0x0008)
+#define RUBY_SPI1_SPER			(RUBY_SPI1_BASE_ADDR + 0x000C)
+#define RUBY_SPI1_SLVN			(RUBY_SPI1_BASE_ADDR + 0x0010)
+/*****************************************************************************/
+/* SPI1 constants                                                            */
+/*****************************************************************************/
+#define RUBY_SPI1_SPCR_SPIE_BIT		7
+#define RUBY_SPI1_SPCR_SPIE		(RUBY_BIT(RUBY_SPI1_SPCR_SPIE_BIT))
+#define RUBY_SPI1_SPCR_SPE		(RUBY_BIT(6))
+#define RUBY_SPI1_SPCR_MSTR		(RUBY_BIT(4))
+#define RUBY_SPI1_SPCR_CPOL		(RUBY_BIT(3))
+#define RUBY_SPI1_SPCR_CPHA		(RUBY_BIT(2))
+#define RUBY_SPI1_SPCR_SPR(x)		((x) & 0x3)
+#define RUBY_SPI1_SPSR_SPIF		(RUBY_BIT(7))
+#define RUBY_SPI1_SPSR_WCOL		(RUBY_BIT(6))
+#define RUBY_SPI1_SPSR_WFFULL		(RUBY_BIT(3))
+#define RUBY_SPI1_SPSR_WFEMPTY		(RUBY_BIT(2))
+#define RUBY_SPI1_SPSR_RFFULL		(RUBY_BIT(1))
+#define RUBY_SPI1_SPSR_RFEMPTY		(RUBY_BIT(0))
+#define RUBY_SPI1_SPER_ICNT(x)		(((x) & 0x3) << 6)
+#define RUBY_SPI1_SPER_ESPR(x)		((x) & 0x3)
+/*****************************************************************************/
+
+/*****************************************************************************/
+/* I2C constants                                                            */
+/*****************************************************************************/
+#define RUBY_I2C_BASE_ADDR			TOPAZ_ALIAS_MAP_SWITCH(0xF9000000, 0xE40F0000)
+#define RUBY_I2C_MEM_SIZE			(0x0A08)
+#define RUBY_I2C_ADAPTER_NUM		(0)
+
+/*****************************************************************************/
+/* Interrupts                                                                */
+/*****************************************************************************/
+#define RUBY_IRQ_RESET			(0)
+#define RUBY_IRQ_MEM_ERR		(1)
+#define RUBY_IRQ_INS_ERR		(2)
+#define RUBY_IRQ_CPUTIMER0		(3)
+#define RUBY_IRQ_CPUTIMER1		(4)
+#define RUBY_IRQ_WATCHDOG		(5)
+#define RUBY_IRQ_DMA0			(6)
+#define RUBY_IRQ_BB			(7)
+#define RUBY_IRQ_IPC_LO			(8)
+#define IRQ_MAC0_0			RUBY_IRQ_IPC_LO
+#define RUBY_IRQ_DSP			(9)
+#define RUBY_IRQ_IPC_HI			(10)
+#define RUBY_IRQ_MAC_TX_DONE		(11)
+#define RUBY_IRQ_MAC_TX_ALERT		(12)
+#define RUBY_IRQ_MAC_RX_DONE		(13)
+#define RUBY_IRQ_MAC_RX_TYPE		(14)
+#define RUBY_IRQ_MAC_MIB		(15)
+#define RUBY_IRQ_MAC_0			(16)
+#define RUBY_IRQ_MAC_1			(17)
+#define RUBY_IRQ_PCIE			(18)
+#define RUBY_IRQ_ENET0			(19)
+#define RUBY_IRQ_ENET1			(20)
+#define RUBY_IRQ_DMA1			(21)
+#define RUBY_IRQ_DMA2			(22)
+#define RUBY_IRQ_DMA3			(23)
+#define RUBY_IRQ_UART			(24)
+#define RUBY_IRQ_GPIO			(25)
+#define RUBY_IRQ_TIMER			(26)
+#define RUBY_IRQ_MISC			(27)
+/* Combined PCIe Interrupt IRQ 28 */
+#define RUBY_IRQ_MSI			(28)
+#define RUBY_IRQ_INTA			(28)
+/* Combined PCIe DMA Legacy/MSI Interrupt IRQ 22 */
+#define TOPAZ_IRQ_PCIE_DMA_INT		(22)
+#define TOPAZ_IRQ_PCIE_IPC4_INT		(29)
+/* Combined PCIe Legacy/MSI Interrupt IRQ 28 */
+#define TOPAZ_IRQ_PCIE_INT		RUBY_IRQ_INTA
+#define RUBY_IRQ_SPI			(30)
+#define RUBY_IRQ_BB_PER_PACKET		(31)
+#define RUBY_MAX_IRQ_VECTOR		(31)
+#define RUBY_IRQ_VECTORS_NUM		(RUBY_MAX_IRQ_VECTOR + 1)
+/* these are extended (shared) irqs */
+#define GPIO2IRQ(x)			((x) + RUBY_IRQ_GPIO0)
+#define RUBY_IRQ_GPIO0			(32)
+#define RUBY_IRQ_GPIO1			(33)
+#define RUBY_IRQ_GPIO2			(34)
+#define RUBY_IRQ_GPIO3			(35)
+#define RUBY_IRQ_GPIO4			(36)
+#define RUBY_IRQ_GPIO5			(37)
+#define RUBY_IRQ_GPIO6			(38)
+#define RUBY_IRQ_GPIO7			(39)
+#define RUBY_IRQ_GPIO8			(40)
+#define RUBY_IRQ_GPIO9			(41)
+#define RUBY_IRQ_GPIO10			(42)
+#define RUBY_IRQ_GPIO11			(43)
+#define RUBY_IRQ_GPIO12			(44)
+#define RUBY_IRQ_GPIO13			(45)
+#define RUBY_IRQ_GPIO14			(46)
+#define RUBY_IRQ_GPIO15			(47)
+#define RUBY_IRQ_UART0			(48)
+#define RUBY_IRQ_UART1			(49)
+#define RUBY_IRQ_TIMER0			(50)
+#define RUBY_IRQ_TIMER1			(51)
+#define RUBY_IRQ_TIMER2			(52)
+#define RUBY_IRQ_TIMER3			(53)
+#define RUBY_IRQ_MISC_I2C		(56)
+#define RUBY_IRQ_MISC_SRAM		(57)
+#define RUBY_IRQ_MISC_NETSS		(58)
+#define RUBY_IRQ_MISC_PLL1		(59)
+#define RUBY_IRQ_MISC_PLL2		(60)
+#define RUBY_IRQ_MISC_EXT_IRQ_COUNT		(5)
+#define RUBY_IRQ_MISC_EXT_IRQ_START		(56)
+#define RUBY_IRQ_MISC_RST_CAUSE_START	(26)
+
+#define QTN_IRQ_MISC_EXT_IRQ_COUNT		TOPAZ_IRQ_MISC_EXT_IRQ_COUNT
+#define QTN_IRQ_MISC_RST_CAUSE_START	TOPAZ_IRQ_MISC_RST_CAUSE_START
+
+#define RUBY_MAX_IRQ_EXT_VECTOR		(63)
+#define RUBY_IRQ_EXT_VECTORS_NUM	(RUBY_MAX_IRQ_EXT_VECTOR + 1)
+
+/* M2L interrupt register is [31:16] high prio, [15:0] low prio */
+#define RUBY_M2L_IRQ_NUM_HI		(16)
+#define RUBY_M2L_IRQ_NUM_LO		(16)
+/* M2L High priority interrupts, sent to RUBY_IRQ_IPC_HI */
+#define RUBY_M2L_IRQ_HI_REBOOT		RUBY_M2L_IPC_HI_IRQ(13)
+#define RUBY_M2L_IRQ_HI_DIE		RUBY_M2L_IPC_HI_IRQ(14)
+/* M2L Low priority interrupts, sent to RUBY_IRQ_IPC_LO */
+#define RUBY_M2L_IRQ_LO_MEAS		(8)
+#define RUBY_M2L_IRQ_LO_OCAC		(9)
+#define RUBY_M2L_IRQ_LO_CSA		(10)
+#define RUBY_M2L_IRQ_LO_SCS		(11)
+#define RUBY_M2L_IRQ_LO_SCAN		(12)
+#define RUBY_M2L_IRQ_LO_VSP		(13)
+#define RUBY_M2L_IRQ_LO_PRINT		(14)
+#define RUBY_M2L_IRQ_LO_HLINK		(15)
+
+/* these are DSP interrupts */
+#define RUBY_DSP_IRQ_TIMER0		(3)
+#define RUBY_DSP_IRQ_TIMER1		(5)
+#define RUBY_DSP_IRQ_IPC_MUC2DSP	(10)
+#define RUBY_DSP_IRQ_IPC_LHOST2DSP	(11)
+#define RUBY_DSP_IRQ_IPC_MUC2DSP_HI	(17)
+#define RUBY_DSP_IRQ_WMAC_COMBINED	(20)
+
+/* M2D High priority interrupts, sent to RUBY_DSP_IRQ_IPC_MUC2DSP_HI */
+#define RUBY_M2D_IRQ_HI_DIE		PLATFORM_REG_SWITCH(RUBY_IPC_HI_IRQ(7), 0)
+
+/* L2M Low priority interrupts */
+#define RUBY_L2M_IRQ_HLINK		(6)
+#define RUBY_L2M_IRQ_HIGH		(7)
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* BB registers                                                             */
+/*****************************************************************************/
+#define RUBY_BB_BASE_ADDR			0xE6000000
+/* FIXME: no BB2 on Ruby - this should expand to something else */
+#define UMS_REGS_BB2				RUBY_BB_BASE_ADDR
+#define RUBY_QT3_BB_GLBL_PREG_RIF_ENABLE	(RUBY_BB_BASE_ADDR + 0x1F4)
+#define RUBY_QT3_BB_GLBL_PREG_RIF_ENABLE_ON	0x1
+#define RUBY_QT3_BB_GLBL_PREG_RIF_ENABLE_OFF	0x0
+#define RUBY_QT3_BB_GLBL_PREG_INTR_STATUS	(RUBY_BB_BASE_ADDR + 0x320)
+#define RUBY_QT3_BB_FFT_INTR			(0x1000)
+#define RUBY_QT3_BB_MIMO_PREG_RX_IPG_RST_ENABLE	(RUBY_BB_BASE_ADDR + 0x50268)
+#define RUBY_QT3_BB_MIMO_BF_RX			(RUBY_BB_BASE_ADDR + 0x501FC)
+#define RUBY_QT3_BB_MIMO_BF_RX_DUMP_ENABLE	(1 << 1) /*11n HT-LF dump enable*/
+
+#define RUBY_QT3_BB_GLBL_SOFT_RST		(RUBY_BB_BASE_ADDR + 0x0008)
+#define RUBY_QT3_BB_GLBL_SPI_CTRL		(RUBY_BB_BASE_ADDR + 0x0024)
+#define RUBY_QT3_BB_TD_BASE_ADDR		(RUBY_BB_BASE_ADDR + 0x90000)
+
+#define RUBY_QT3_BB_RF1_BASE_ADDR		0xE6080000
+#define RUBY_QT3_BB_BONDING_4SS			0x01
+
+#define RUBY_QT3_BB_TD_SPARE_0			(RUBY_QT3_BB_TD_BASE_ADDR + 0x5f4)
+#define RUBY_QT3_BB_TD_SPARE_1			(RUBY_QT3_BB_TD_BASE_ADDR + 0x5f8)
+#define RUBY_QT3_BB_TD_MAX_GAIN			(RUBY_QT3_BB_TD_BASE_ADDR + 0x36c)
+
+/*****************************************************************************/
+/* Radar registers                                                           */
+/*****************************************************************************/
+#define RUBY_RADAR_CNT_L	0xE6090558
+
+/*****************************************************************************/
+/* ARC addresses                                                             */
+/*****************************************************************************/
+#define RUBY_ARC_TLB_BYPASS	0x80000000
+#define RUBY_ARC_CACHE_BYPASS	0xC0000000
+
+/*****************************************************************************/
+/*               DMA registers and bit defines.                              */
+/*****************************************************************************/
+#define RUBY_DMA_NUM_CHANNELS	(4)
+#define RUBY_DMA_BASE_ADDR		TOPAZ_ALIAS_MAP_SWITCH(0xEA000000, 0xE4060000)
+#define	RUBY_DMA_SAR(x)			(RUBY_DMA_BASE_ADDR + 0x00 + (x)*0x58)
+#define	RUBY_DMA_DAR(x)			(RUBY_DMA_BASE_ADDR + 0x08 + (x)*0x58)
+#define	RUBY_DMA_LLP(x)			(RUBY_DMA_BASE_ADDR + 0x10 + (x)*0x58)
+#define	RUBY_DMA_CTL(x)			(RUBY_DMA_BASE_ADDR + 0x18 + (x)*0x58)
+#define	RUBY_DMA_SIZE(x)		(RUBY_DMA_BASE_ADDR + 0x1c + (x)*0x58)
+#define	RUBY_DMA_SSTAT(x)		(RUBY_DMA_BASE_ADDR + 0x20 + (x)*0x58)
+#define	RUBY_DMA_DSTAT(x)		(RUBY_DMA_BASE_ADDR + 0x28 + (x)*0x58)
+#define	RUBY_DMA_SSTATAR(x)		(RUBY_DMA_BASE_ADDR + 0x30 + (x)*0x58)
+#define	RUBY_DMA_DSTATAR(x)		(RUBY_DMA_BASE_ADDR + 0x38 + (x)*0x58)
+#define	RUBY_DMA_CFG(x)			(RUBY_DMA_BASE_ADDR + 0x40 + (x)*0x58)
+#define	RUBY_DMA_SGR(x)			(RUBY_DMA_BASE_ADDR + 0x48 + (x)*0x58)
+#define	RUBY_DMA_DSR(x)			(RUBY_DMA_BASE_ADDR + 0x50 + (x)*0x58)
+
+#define RUBY_DMA_MASK_BLK		(RUBY_DMA_BASE_ADDR + 0x318)
+#define RUBY_DMA_BLK_CLR		(RUBY_DMA_BASE_ADDR + 0x340)
+#define RUBY_DMA_DMA_CFG		(RUBY_DMA_BASE_ADDR + 0x398)
+#define RUBY_DMA_CH_EN			(RUBY_DMA_BASE_ADDR + 0x3a0)
+/************** RUBY_DMA_CTL(x) bit defines. *********************************/
+#define RUBY_DMA_CTL_INT_EN		RUBY_BIT(0)
+#define RUBY_DMA_CTL_DINC		(0)
+#define RUBY_DMA_CTL_DDEC		RUBY_BIT(7)
+#define RUBY_DMA_CTL_DNOCHNG		RUBY_BIT(8)
+#define RUBY_DMA_CTL_SINC		(0)
+#define RUBY_DMA_CTL_SDEC		RUBY_BIT(9)
+#define RUBY_DMA_CTL_SNOCHNG		RUBY_BIT(10)
+#define RUBY_DMA_CTL_DMS_LHOST		(0)
+#define RUBY_DMA_CTL_DMS_MUC		RUBY_BIT(23)
+#define RUBY_DMA_CTL_SMS_LHOST		(0)		// ahb master bus 1
+#define RUBY_DMA_CTL_SMS_MUC		RUBY_BIT(25)	// ahb master bus 2
+
+/************** RUBY_DMA_CFG(x) bit defines. *********************************/
+#define RUBY_DMA_CFG_ENABLE		RUBY_BIT(0)
+
+/*****************************************************************************/
+/* PCI registers & memory regions for target driver.                         */
+/*****************************************************************************/
+#define RUBY_PCIE_REG_BASE			TOPAZ_ALIAS_MAP_SWITCH(0xE9000000, 0xE4050000)
+#define RUBY_PCIE_CONFIG_REGION			(0xCF000000)
+
+#define RUBY_PCIE_CMD_REG			(RUBY_PCIE_REG_BASE + 0x0004)
+#define RUBY_PCIE_BAR_NUM			(6)
+#define	RUBY_PCIE_BAR_BASE			(RUBY_PCIE_REG_BASE + 0x0010)
+#define	RUBY_PCIE_BAR(n)			(RUBY_PCIE_BAR_BASE + (n << 2))
+#define	RUBY_PCIE_BAR_MASK_ADDR			(RUBY_PCIE_REG_BASE + 0x1010)
+#define	RUBY_PCIE_BAR_MASK(n)			(RUBY_PCIE_BAR_MASK_ADDR + (n << 2))
+
+#define RUBY_PCIE_ATU_VIEW			(RUBY_PCIE_REG_BASE + 0x0900)
+#define RUBY_PCIE_ATU_CTL1			(RUBY_PCIE_REG_BASE + 0x0904)
+#define RUBY_PCIE_ATU_CTL2			(RUBY_PCIE_REG_BASE + 0x0908)
+#define RUBY_PCIE_ATU_BASE_LO			(RUBY_PCIE_REG_BASE + 0x090c)
+#define RUBY_PCIE_ATU_BASE_HI			(RUBY_PCIE_REG_BASE + 0x0910)
+#define RUBY_PCIE_ATU_BASE_LIMIT		(RUBY_PCIE_REG_BASE + 0x0914)
+#define RUBY_PCIE_ATU_TARGET_LO			(RUBY_PCIE_REG_BASE + 0x0918)
+#define RUBY_PCIE_ATU_TARGET_HI			(RUBY_PCIE_REG_BASE + 0x091c)
+
+#define RUBY_PCIE_ATU_OB_REGION(n)		(0x0 + n)
+#define RUBY_PCIE_ATU_IB_REGION(n)		(0x80000000 + n)
+#define RUBY_PCIE_ATU_CFG_SHIFT			RUBY_BIT(28)
+#define RUBY_PCIE_ATU_OB_ENABLE			RUBY_BIT(31)
+
+#define RUBY_PCI_RC_MEM_WINDOW			(32 << 20) /* 32MB  MEMORY window in Root Complex for pcie tree  */
+#define RUBY_PCI_RC_CFG_SIZE			(64 << 10) /* 64KB  CFG size in Root Complex for pcie tree */
+#define RUBY_PCI_RC_MEM_START			(0xc0000000) /* PCI memory region in Root Complex's kernel address space */
+
+#define RUBY_PCIE_INT_MASK			(RUBY_SYS_CTL_BASE_ADDR + 0xC0)
+#define RUBY_PCIE_MSI_ENABLE			RUBY_BIT(16)
+#define MSI_CTL_OFFSET				(0x50)
+#define RUBY_MSI_ADDR_LOWER			(RUBY_PCIE_REG_BASE + 0x820)
+#define RUBY_MSI_ADDR_UPPER			(RUBY_PCIE_REG_BASE + 0x824)
+#define RUBY_MSI_INT_ENABLE			(RUBY_PCIE_REG_BASE + 0x828)
+#define RUBY_PCIE_MSI_MASK			(RUBY_PCIE_REG_BASE + 0x82c)
+#define RUBY_PCIE_MSI_STATUS			(RUBY_PCIE_REG_BASE + 0x830)
+#define RUBY_PCIE_MSI_REGION			(0xce000000) /* msi message address */
+#define RUBY_MSI_DATA				(0x0)	     /* msi message data */
+#define RUBY_PCIE_MSI_CLEAR			RUBY_BIT(0)  /* clear msi intr */
+
+#define TOPAZ_PCIE_STAT				(RUBY_SYS_CTL_BASE_ADDR + 0x017C)
+#if TOPAZ_FPGA_PLATFORM
+	#define TOPAZ_PCIE_LINKUP		(0xe)
+#else
+	#define TOPAZ_PCIE_LINKUP		(0x7)
+#endif
+
+#define PCIE_LINK_STAT				(RUBY_PCIE_REG_BASE + 0x80)
+#define PCIE_LINK_CTL2				(RUBY_PCIE_REG_BASE + 0xa0)
+#define PCIE_ASPM_L1_CTRL			(RUBY_PCIE_REG_BASE + 0x70c)
+#define PCIE_ASPM_LINK_CTRL			(PCIE_LINK_STAT)
+#define PCIE_PORT_LINK_CTL			(RUBY_PCIE_REG_BASE + 0x710)
+#define PCIE_ASPM_L1_SUBSTATE_TIMING		(RUBY_PCIE_REG_BASE + 0xB44)
+#define PCIE_L1SUB_CTRL1			(RUBY_PCIE_REG_BASE + 0x150)
+#define PCIE_PMCSR				(RUBY_PCIE_REG_BASE + 0x44)
+
+/* PCIe link defines */
+#define PCIE_LINK_GEN1				(BIT(0))
+#define PCIE_LINK_GEN2				(BIT(1))
+#define PCIE_LINK_GEN3				(BIT(2))
+#define PCIE_LINK_MODE(x)			(((x) >> 16) & 0x7)
+
+/* ATU setting for Host Buffer Descriptor Mapping */
+#define PCIE_HOSTBD_REGION			(RUBY_PCIE_ATU_OB_REGION(2))
+#define PCIE_ATU_BAR_MIN_SIZE			0x00010000 /* 64k */
+#define PCIE_HOSTBD_SIZE			(2 * PCIE_ATU_BAR_MIN_SIZE)
+#define PCIE_HOSTBD_SIZE_MASK			(PCIE_HOSTBD_SIZE - 1)
+#define PCIE_HOSTBD_START_HI			0x00000000
+#define PCIE_HOSTBD_REGION_ENABLE		RUBY_PCIE_ATU_OB_ENABLE
+
+/* Extra system controller bits */
+#define TOPAZ_SYS_CTL_PLLCLKOUT_EN		(RUBY_BIT(10))
+
+/* Board platform and revision */
+#define TOPAZ_BOARD_REVA			0x40
+#define TOPAZ_BOARD_REVB			0x41
+#define TOPAZ_BOARD_REVA2			0x43
+
+/*
+ * WOWLAN GPIO assignment
+ * On RGMII module - GPIO_B_10 (set as output for WoWLAN)
+ * On RGMII host board - GPIO_B_10 (set as input to wake up the host)
+ *
+ * On PCIe module - WAKE# or GPIO_B_12 ( set as output for WoWLAN, use WAKE# by default)
+ * On PCIe host board - GPIO_B_14 (set as input to wake up the host)
+ */
+#ifdef CONFIG_TOPAZ_PCIE_TARGET
+#define WOWLAN_GPIO_OUTPUT_PIN	12
+#else
+#define WOWLAN_GPIO_OUTPUT_PIN	10
+#endif
+#endif // #ifndef __RUBY_PLATFORM_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_pm.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_pm.h
new file mode 100644
index 0000000..be4cc2f
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_pm.h
@@ -0,0 +1,162 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __RUBY_PM_H
+#define __RUBY_PM_H
+
+/* Power save levels */
+#define BOARD_PM_LEVEL_FORCE_NO			(0)
+#define BOARD_PM_LEVEL_NO			(1)
+#define BOARD_PM_LEVEL_SLOW_DOWN		(2)
+#define BOARD_PM_LEVEL_LATENCY_UP		(3)
+#define BOARD_PM_LEVEL_DISTANCE_DOWN		(4)
+#define BOARD_PM_LEVEL_IDLE			(5)
+#define BOARD_PM_LEVEL_SUSPEND			(6)
+
+#define BOARD_PM_LEVEL_INIT			BOARD_PM_LEVEL_FORCE_NO
+
+/* Duty level, shared between Lhost and MuC */
+#define BOARD_PM_LEVEL_DUTY			BOARD_PM_LEVEL_IDLE
+
+/* Names of power save governors */
+#define BOARD_PM_GOVERNOR_WLAN			"wlan"
+#define BOARD_PM_GOVERNOR_QDISC			"qdisc"
+#define BOARD_PM_GOVERNOR_QCSAPI		"qcsapi"
+
+/* wlan timings to switch between modes */
+#define BOARD_PM_WLAN_IDLE_TIMEOUT		(120 * HZ)
+#define BOARD_PM_WLAN_STA_IDLE_TIMEOUT		(15 * HZ)
+#define BOARD_PM_WLAN_DEFAULT_TIMEOUT		(0)
+#define BOARD_PM_WLAN_AP_IDLE_AFTER_BEACON_SCHEME	(50 * HZ)
+/* qdisc parameters to switch between modes */
+#define BOARD_PM_QDISC_TIMER_TIMEOUT		(50/*ms*/ * HZ / 1000)
+#define BOARD_PM_QDISC_SPEEDUP_THRESHOLD	(400)
+#define BOARD_PM_QDISC_SLOWDOWN_THRESHOLD	(100)
+#define BOARD_PM_QDISC_SLOWDOWN_COUNT		(80)
+#define BOARD_PM_QDISC_SLOWDOWN_TIMEOUT		(3 * HZ)
+#define BOARD_PM_QDISC_DEFAULT_TIMEOUT		(0)
+
+/* Beacon TSF setting */
+#define BOARD_PM_WAKE_BEACON_TSF_DEADLINE_PCT		(50)
+#define BOARD_PM_WAKE_BEACON_TSF_ALERT_PCT		(25)
+
+/* Default setting, shared between Lhost and MuC */
+#define BOARD_PM_PDUTY_PERIOD_MS_DEFAULT		80
+#define BOARD_PM_PDUTY_PCT_LOW_DEFAULT			80
+#define BOARD_PM_SUSPEND_PERIOD_MS_DEFAULT		100
+#define BOARD_PM_SUSPEND_PCT_LOW_DEFAULT		99
+
+/* Multiple Periods Support */
+#define BOARD_PM_PERIOD_CHANGE_INTERVAL			1
+#define BOARD_PM_PERIOD_CNT			3
+
+enum qtn_pm_param {
+	QTN_PM_CURRENT_LEVEL,
+
+	QTN_PM_SUSPEND,
+	QTN_PM_SUSPEND_PERIOD_MS,
+	QTN_PM_SUSPEND_PCT_LOW,
+	QTN_PM_SUSPEND_HRESET,
+	QTN_PM_SUSPEND_ALLCHAINS_DISABLE,
+
+	QTN_PM_PDUTY,
+	QTN_PM_PDUTY_PERIOD_MS,
+	QTN_PM_PDUTY_PCT_LOW,
+	QTN_PM_PDUTY_HRESET,
+	QTN_PM_PDUTY_RXCHAINS_DISABLE,
+
+	QTN_PM_MUC_SLEEP,
+
+	QTN_PM_RXCHAIN_IDLE_COUNT,
+	QTN_PM_RXCHAIN_IDLE_LEVEL,
+	QTN_PM_TXCHAIN_IDLE_COUNT,
+	QTN_PM_TXCHAIN_IDLE_LEVEL,
+
+	QTN_PM_PAUSE_MGMT_PROBERESP,
+	QTN_PM_PAUSE_MGMT_ASSOCRESP,
+	QTN_PM_PAUSE_MGMT_AUTH,
+	QTN_PM_PAUSE_DATA = QTN_PM_PAUSE_MGMT_AUTH,
+
+	/* For Multiple Periods Support */
+	QTN_PM_PERIOD_CHANGE_INTERVAL,	/* How long period setting will be changed(unit: second) */
+	QTN_PM_PERIOD_CNT,	/* How many periods in period group(Max 3) */
+	QTN_PM_PERIOD_GROUP,	/* Period group(Max 3 periods, each <= 255ms, unit: millisecond)*/
+
+	QTN_PM_IOCTL_MAX
+};
+
+#define QTN_PM_PARAM_NAMES	{						\
+	"level",			/* QTN_PM_CURRENT_LEVEL */		\
+	"suspend_level",		/* QTN_PM_SUSPEND */			\
+	"suspend_period",		/* QTN_PM_SUSPEND_PERIOD_MS */		\
+	"suspend_pct",			/* QTN_PM_SUSPEND_PCT_LOW */		\
+	"suspend_hreset",		/* QTN_PM_SUSPEND_HRESET */		\
+	"suspend_allchains",		/* QTN_PM_SUSPEND_ALLCHAINS_DISABLE */	\
+	"pduty_level",			/* QTN_PM_PDUTY */			\
+	"pduty_period",			/* QTN_PM_PDUTY_PERIOD_MS */		\
+	"pduty_pct",			/* QTN_PM_PDUTY_PCT_LOW */		\
+	"pduty_hreset",			/* QTN_PM_PDUTY_HRESET */		\
+	"pduty_rxchains",		/* QTN_PM_PDUTY_RXCHAINS_DISABLE */	\
+	"muc_sleep_level",		/* QTN_PM_MUC_SLEEP */			\
+	"rxchain_count",		/* QTN_PM_RXCHAIN_IDLE_COUNT */		\
+	"rxchain_level",		/* QTN_PM_RXCHAIN_IDLE_LEVEL */		\
+	"txchain_count",		/* QTN_PM_TXCHAIN_IDLE_COUNT */		\
+	"txchain_level",		/* QTN_PM_TXCHAIN_IDLE_LEVEL */		\
+	"pause_proberesp",		/* QTN_PM_PAUSE_MGMT_PROBERESP */	\
+	"pause_assocresp",		/* QTN_PM_PAUSE_MGMT_ASSOCRESP */	\
+	"pause_auth",			/* QTN_PM_PAUSE_MGMT_ASSOCRESP */	\
+	"period_change_interval",	/* QTN_PM_PERIOD_CHANGE_INTERVAL */	\
+	"period_cnt",			/* QTN_PM_PERIOD_CNT */	\
+	"period_group"			/* QTN_PM_PERIOD_GROUP */	\
+}
+
+#define QTN_PM_PARAM_DEFAULTS	{							\
+	BOARD_PM_LEVEL_INIT,			/* QTN_PM_CURRENT_LEVEL */		\
+	BOARD_PM_LEVEL_SUSPEND,			/* QTN_PM_SUSPEND */			\
+	BOARD_PM_SUSPEND_PERIOD_MS_DEFAULT,	/* QTN_PM_SUSPEND_PERIOD_MS */		\
+	BOARD_PM_SUSPEND_PCT_LOW_DEFAULT,	/* QTN_PM_SUSPEND_PCT_LOW */		\
+	1,					/* QTN_PM_SUSPEND_HRESET */		\
+	1,					/* QTN_PM_SUSPEND_ALL_CHAINS_DISABLE */	\
+	BOARD_PM_LEVEL_DUTY,			/* QTN_PM_PDUTY */			\
+	BOARD_PM_PDUTY_PERIOD_MS_DEFAULT,	/* QTN_PM_PDUTY_PERIOD_MS */		\
+	BOARD_PM_PDUTY_PCT_LOW_DEFAULT,		/* QTN_PM_PDUTY_PCT_LOW */		\
+	0,					/* QTN_PM_PDUTY_HRESET */		\
+	1,					/* QTN_PM_PDUTY_RXCHAINS_DISABLE */	\
+	BOARD_PM_LEVEL_LATENCY_UP,		/* QTN_PM_MUC_SLEEP */			\
+	4,					/* QTN_PM_RXCHAIN_IDLE_COUNT */		\
+	BOARD_PM_LEVEL_DISTANCE_DOWN,		/* QTN_PM_RXCHAIN_IDLE_LEVEL */		\
+	4,					/* QTN_PM_TXCHAIN_IDLE_COUNT */		\
+	BOARD_PM_LEVEL_DISTANCE_DOWN,		/* QTN_PM_TXCHAIN_IDLE_LEVEL */		\
+	60000,					/* QTN_PM_PAUSE_MGMT_PROBERESP */	\
+	5000,					/* QTN_PM_PAUSE_MGMT_ASSOCRESP */	\
+	5000,					/* QTN_PM_PAUSE_MGMT_AUTH */		\
+	BOARD_PM_PERIOD_CHANGE_INTERVAL,	/* QTN_PM_PERIOD_CHANGE_INTERVAL */	\
+	BOARD_PM_PERIOD_CNT,			/* QTN_PM_PERIOD_CNT */	\
+	0x50321E				/* QTN_PM_PERIOD_GROUP(30ms, 50ms, 80ms) */	\
+}
+
+#define QTN_PM_UNPACK_PARAM(x)		((x) & 0xFF)
+#define QTN_PM_UNPACK_VALUE(x)		((x) >> 8)
+#define QTN_PM_PACK_PARAM_VALUE(p, v)	(((p) & 0xFF) | (((v) << 8) & 0xFFFFFF00))
+
+#endif /* __RUBY_PM_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_pm_mproc.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_pm_mproc.h
new file mode 100644
index 0000000..869303c
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_pm_mproc.h
@@ -0,0 +1,75 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __RUBY_PM_MPROC_H
+#define __RUBY_PM_MPROC_H
+
+#if defined(__KERNEL__) || defined(MUC_BUILD)
+	#include <qtn/mproc_sync.h>
+	#include "ruby_pm.h"
+
+#if QTN_SEM_TRACE
+#define qtn_pm_duty_try_lock(_cpu) qtn_pm_duty_try_lock_dbg(_cpu, __FILE__, __LINE__)
+	RUBY_WEAK(qtn_pm_duty_try_lock_dbg) int
+	qtn_pm_duty_try_lock_dbg(QTN_SOC_CPU current_cpu, char *caller, int caller_line)
+#else
+	RUBY_WEAK(qtn_pm_duty_try_lock) int
+	qtn_pm_duty_try_lock(QTN_SOC_CPU current_cpu)
+#endif
+	{
+		int ret = 0;
+		unsigned long flags;
+		uint32_t fail_sem = 0;
+
+		if (__qtn_mproc_sync_spin_try_lock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, &flags, &fail_sem)) {
+			u_int32_t lock = qtn_mproc_sync_addr(&qtn_mproc_sync_shared_params_get()->pm_duty_lock);
+			if (!qtn_mproc_sync_mem_read(lock)) {
+				qtn_mproc_sync_mem_write_wmb(lock, 1);
+				ret = 1;
+			}
+			__qtn_mproc_sync_spin_unlock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, &flags);
+		}
+
+		return ret;
+	}
+
+#if QTN_SEM_TRACE
+#define qtn_pm_duty_unlock(_cpu) qtn_pm_duty_unlock_dbg(_cpu, __FILE__, __LINE__)
+	RUBY_WEAK(qtn_pm_duty_unlock_dbg) void
+	qtn_pm_duty_unlock_dbg(QTN_SOC_CPU current_cpu, char *caller, int caller_line)
+#else
+	RUBY_WEAK(qtn_pm_duty_unlock) void
+	qtn_pm_duty_unlock(QTN_SOC_CPU current_cpu)
+#endif
+	{
+		u_int32_t lock = qtn_mproc_sync_addr(&qtn_mproc_sync_shared_params_get()->pm_duty_lock);
+		unsigned long flags;
+
+		__qtn_mproc_sync_spin_lock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, &flags);
+		qtn_mproc_sync_mem_write_wmb(lock, 0);
+		__qtn_mproc_sync_spin_unlock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, &flags);
+	}
+
+#endif	// defined(__KERNEL__) || defined(MUC_BUILD)
+
+#endif /* __RUBY_PM_MPROC_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_spi_api.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_spi_api.h
new file mode 100644
index 0000000..e4a7c34
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_spi_api.h
@@ -0,0 +1,147 @@
+/*
+ *(C) Copyright 2014 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __RUBY_SPI_API_H__
+#define __RUBY_SPI_API_H__
+
+/*
+ * Swap bytes
+ */
+
+#define SWAP32(x)	((((x) & 0x000000ff) << 24)  | \
+			(((x)  & 0x0000ff00) << 8)   | \
+			(((x)  & 0x00ff0000) >> 8)   | \
+			(((x)  & 0xff000000) >> 24))
+
+
+#define SPI_WR_IN_PROGRESS	(BIT(0))
+#define SPI_SR_QUAD_MODE	(BIT(6))
+#define SPI_SCUR_WPSEL		(BIT(7))
+#define SPI_PROTECTION		(0x3c)
+#define SPI_WPS_SELECT		(0x4)
+#define SPI_SECTOR_SIZE_4K	4
+#define ADDRESS_MASK		(0x0fffffff)
+#define SPI_WRITE_DELAY         (9)
+#define SPI_FLASH_ADDR		(0x90000000)
+#define SECTOR_MASK		(0x01ffff00)
+#define SECTOR_ERASE_WAIT_TIME	(90)
+#define CHIP_ERASE_WAIT_TIME	(50)
+#define SPI_WRITE_TIMEOUT       (1) /*sec*/
+#define SPI_ERASE_TIMEOUT       (5) /*sec*/
+#define SPI_UNLOCK_TIMEOUT      (5) /*sec*/
+#define MX_25L12805_ID		(0xc22018)
+#define MX_25L25635_ID		(0xc22019)
+#define MX_25L6405_ID		(0xc22017)
+#define M25P32_ID		(0x202016)
+#define W25Q128_ID		(0xef4018)
+#define	MX25L512E		(0xc22010)
+#define S25FL129P		(0x012018)
+#define SPI_SECTOR_64K		(64 * 1024)
+#define SPI_SECTOR_COUNT_256	(256)
+#define SPI_SECTOR_4K		(4 * 1024)
+#define SPI_SECTOR_COUNT_4K	(4 * 1024)
+#define SPI_SECTOR_INDEX	(16)
+#define SPI_SR_QUAD_MODE_MASK(X)	(((X) & 0xfffff0ff) | (2<<8))
+#define SPI_SR_SINGLE_MODE_MASK(X)	((X) & 0xfffff0ff)
+#define RUBY_SPI_READ_SCUR_MASK(X)	(((X) & 0xffffff00) | 2)
+#define RUBY_SPI_READ_DPB_MASK(X)	(((X) & 0xffffff00) | 0x86)
+#define RUBY_SPI_GBLOCK_LOCK_MASK(X)	(((X) & 0xffffff00) | 0x1)
+#define RUBY_SPI_GBLOCK_UNLOCK_MASK(X)	(((X) & 0xffffff00) | 0x1)
+#define RUBY_SPI_WRITE_PRO_SEL_MASK(X)	(((X) & 0xffffff00) | 1)
+#define RUBY_SPI_WRITE_WPS_SEL_MASK(X)	(((X) & 0xffffff00) | 2)   /* writing 2 bytes */
+#define RUBY_SPI_WRITE_DPB_MASK(X)	(((X) & 0xffffff00) | 0x86)
+#define RUBY_SPI_WRITE_IBUP_MASK(X)	(((X) & 0xffffff00) | 0x06)
+#define RUBY_SPI_WRITE_WPS_MASK(X)	(((X) & 0xffffff00) | 0x64)
+#define RUBY_SPI_READ_ID_MASK		(0xffffff)
+#define RUBY_SPI_READ_STATUS_MASK	(0xff)
+#define SECTOR_ERASE_OP20		(0x02)
+#define SPI_WPS_ENABLE			(0x00640000)
+#define SPI_PROTECT_MODE		"spi_protect"
+#define SPI_PROTECT_MODE_STR		17
+#define SPI_PROTECT_MODE_ENABLE		"enable"
+#define SPI_PROTECT_MODE_FLAG_DISABLE	1
+#define SPI_PROTECT_MODE_FLAG_ENABLE	0
+
+#define RUBY_SPI_PASS_CMD_MASK(X)	(((X) & 0xfffffff0) | 0x1)
+#define RUBY_SPI_PASS_ONE_MASK(X)	(((X) & 0xfffffff0) | 0x2)
+#define RUBY_SPI_PASS_TWO_MASK(X)	(((X) & 0xfffffff0) | 0x3)
+#define RUBY_SPI_PASS_FOUR_MASK(X)	(((X) & 0xfffffff0) | 0x5)
+#define RUBY_SPI_PASS_EIGHT_MASK(X)	(((X) & 0xfffffff0) | 0x9)
+#define RUBY_SPI_PASS_FIVE_MASK(X)	(((X) & 0xfffffff0) | 0x6)
+#define RUBY_SPI_PASS_ONE_ADDR_MASK(X)	(((X) & 0xfffffff0) | 0x6)
+#define RUBY_SPI_CMD_MASK(X)		(((X) & 0xff) << 8)
+#define RUBY_SPI_READ_DATA_MASK		(0xff)
+#define SPI_LOCK_SPM		(BIT(1))
+#define SPI_LOCK_PPM		(BIT(2))
+#define SPI_LOCK_SPMOTP		(BIT(3))
+
+#define SPI_MICRON_STATUS_BP_MASK		(BIT(6)|BIT(4)|BIT(3)|BIT(2))
+#define SPI_MICRON_BP_STATUS(X)			((X & 1<<3)<<3 |(X & 7<<0)<<2 )
+#define SPI_MICRON_STATUS_BP(X)			((X & 1<<6)>>3 |(X & 7<<2)>>2 )
+#define SPI_MICRON_STATUS_LOCK_MASK		(BIT(7))
+#define SPI_MICRON_STATUS_BOTTON_MASK	(BIT(5))
+
+/*
+*
+* Ruby uses 3 msb bytes to form addresses.
+* Topaz uses all 4 bytes, just skip first msb if in 3-bytes address mode.
+*
+*/
+#define SPI_MEM_ADDR(addr)      (((addr) & 0x00FFFFFF))
+#define SPI_MEM_ADDR_4B(addr)	(((addr) & 0xFFFFFFFF))
+
+enum SPI_TYPES{
+	NOT_SUPPORTED,
+	ATMEL,
+	SPANSION,
+	SST,
+	ST_MICROELECTRONICS,
+	WINBOND,
+	MACRONIX,
+	ESMT,
+	EON,
+	MICRON,
+	GD
+};
+
+struct flash_info {
+	char *name;
+	/* JEDEC id zero means "no ID" (most older chips); otherwise it has
+	 * a high byte of zero plus three data bytes: the manufacturer id,
+	 * then a two byte device id.
+	 */
+	u32 jedec_id;
+
+	/* The size listed here is what works with OPCODE_SE, which isn't
+	 * necessarily called a "sector" by the vendor.
+	 */
+	unsigned sector_size;
+	u16 n_sectors;
+
+	u16 flags;
+
+	unsigned freq;
+	enum SPI_TYPES single_unprotect_mode;
+};
+
+
+#endif
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_spi_flash_data.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_spi_flash_data.h
new file mode 100644
index 0000000..15cf2e7
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_spi_flash_data.h
@@ -0,0 +1,150 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * MTD SPI driver for ST M25Pxx (and similar) serial flash chips
+ *
+ * Author: Mike Lavender, mike@steroidmicros.com
+ *
+ * Copyright (c) 2005, Intec Automation Inc.
+ *
+ * Some parts are based on lart.c by Abraham Van Der Merwe
+ *
+ * Cleaned up and generalized based on mtd_dataflash.c
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+
+/*****************************************************
+ * Structure definitions as well as data was borrowed
+ * from Linux kernel, 2.6.30 version.
+ * File name is drivers/mtd/devices/m25p80.c
+ */
+
+#ifndef __SPI_FLASH_DATA_H
+#define __SPI_FLASH_DATA_H
+
+#include "ruby_spi_api.h"
+
+#define FREQ_UNKNOWN		0
+#define FREQ_MHZ(num)		(num * 1000000)
+
+/* NOTE: double check command sets and memory organization when you add
+ * more flash chips.  This current list focusses on newer chips, which
+ * have been converging on command sets which including JEDEC ID.
+ */
+
+static struct flash_info flash_data [] = {
+
+	/* Atmel -- some are (confusingly) marketed as "DataFlash" */
+	{ "at25fs010", 0x1f6601, 32 * 1024, 4, 0, FREQ_UNKNOWN, ATMEL, },
+	{ "at25fs040", 0x1f6604, 64 * 1024, 8, 0, FREQ_UNKNOWN, ATMEL, },
+
+	{ "at25df041a", 0x1f4401, 64 * 1024, 8, 0, FREQ_UNKNOWN, ATMEL, },
+	{ "at25df641", 0x1f4800, 64 * 1024, 128, 0, FREQ_UNKNOWN, ATMEL, },
+
+	{ "at26f004", 0x1f0400, 64 * 1024, 8, 0, FREQ_UNKNOWN, ATMEL, },
+	{ "at26df081a", 0x1f4501, 64 * 1024, 16, 0, FREQ_UNKNOWN, ATMEL, },
+	{ "at26df161a", 0x1f4601, 64 * 1024, 32, 0, FREQ_UNKNOWN, ATMEL, },
+	{ "at26df321",  0x1f4701, 64 * 1024, 64, 0, FREQ_UNKNOWN, ATMEL, },
+	{ "at25f512b",  0x1f6500,  4 * 1024, 16, SECTOR_ERASE_OP20, FREQ_MHZ(50), ATMEL, },
+
+	/* Spansion -- single (large) sector size only, at least
+	 * for the chips listed here (without boot sectors).
+	 */
+	{ "s25sl004a", 0x010212, 64 * 1024, 8, 0, FREQ_UNKNOWN, SPANSION },
+	{ "s25sl008a", 0x010213, 64 * 1024, 16, 0, FREQ_UNKNOWN, SPANSION },
+	{ "s25sl016a", 0x010214, 64 * 1024, 32, 0, FREQ_UNKNOWN, SPANSION },
+	{ "s25fl032", 0x010215, 64 * 1024, 64, 0, FREQ_UNKNOWN, SPANSION },
+	{ "s25sl064a", 0x010216, 64 * 1024, 128, 0, FREQ_UNKNOWN, SPANSION },
+	{ "s25sl12801", 0x012018, 64 * 1024, 256, 0, FREQ_MHZ(40), SPANSION }, /* S25FL129P has same jedec_id, but higher frequency - so it should be supported, but at not at maximum speed */
+	{ "s25fl256s", 0x010219, 64 * 1024, 512, 0, FREQ_UNKNOWN, SPANSION },
+
+	/* Micron */
+	{ "N25Q032", 0x20ba16, 64 * 1024, 64, 0, FREQ_UNKNOWN, MICRON },
+	{ "N25Q256", 0x20ba19, 64 * 1024, 512, 0, FREQ_MHZ(80), MICRON },
+
+	/* SST -- large erase sizes are "overlays", "sectors" are 4K */
+	{ "sst25vf040b", 0xbf258d, 64 * 1024, 8, 0, FREQ_UNKNOWN, SST },
+	{ "sst25vf080b", 0xbf258e, 64 * 1024, 16, 0, FREQ_UNKNOWN, SST },
+	{ "sst25vf016b", 0xbf2541, 64 * 1024, 32, 0, FREQ_UNKNOWN, SST },
+	{ "sst25vf032b", 0xbf254a, 64 * 1024, 64, 0, FREQ_UNKNOWN, SST },
+
+	/* ST Microelectronics -- newer production may have feature updates */
+	{ "m25p05", 0x202010, 32 * 1024, 2, 0, FREQ_UNKNOWN, ST_MICROELECTRONICS },
+	{ "m25p10", 0x202011, 32 * 1024, 4, 0, FREQ_UNKNOWN, ST_MICROELECTRONICS },
+	{ "m25p20", 0x202012, 64 * 1024, 4, 0, FREQ_UNKNOWN, ST_MICROELECTRONICS },
+	{ "m25p40", 0x202013, 64 * 1024, 8, 0, FREQ_UNKNOWN, ST_MICROELECTRONICS },
+	{ "m25p80", 0x202014, 64 * 1024, 16, 0, FREQ_UNKNOWN, ST_MICROELECTRONICS },
+	{ "m25p16", 0x202015, 64 * 1024, 32, 0, FREQ_UNKNOWN, ST_MICROELECTRONICS },
+	{ "m25p32", 0x202016, 64 * 1024, 64, 0, FREQ_MHZ(75), ST_MICROELECTRONICS },
+	{ "m25p64", 0x202017, 64 * 1024, 128, 0, FREQ_UNKNOWN, ST_MICROELECTRONICS },
+	{ "m25p128", 0x202018, 256 * 1024, 64, 0, FREQ_MHZ(54), ST_MICROELECTRONICS },
+
+	{ "m45pe80", 0x204014, 64 * 1024, 16, 0, FREQ_UNKNOWN, ST_MICROELECTRONICS },
+	{ "m45pe16", 0x204015, 64 * 1024, 32, 0, FREQ_UNKNOWN, ST_MICROELECTRONICS },
+
+	{ "m25pe80", 0x208014, 64 * 1024, 16, 0, FREQ_UNKNOWN, ST_MICROELECTRONICS },
+	{ "m25pe16", 0x208015, 64 * 1024, 32, 0, FREQ_UNKNOWN, ST_MICROELECTRONICS },
+
+	/* Winbond -- w25x "blocks" are 64K, except w25q128 is 4K  */
+	{ "w25x05", 0xef3010, 4 * 1024,  16, SECTOR_ERASE_OP20, FREQ_MHZ(104), WINBOND },
+	{ "w25x10", 0xef3011, 64 * 1024, 2, 0, FREQ_UNKNOWN, WINBOND },
+	{ "w25x20", 0xef3012, 4 * 1024, 64, SECTOR_ERASE_OP20, FREQ_UNKNOWN, WINBOND },
+	{ "w25x40", 0xef3013, 64 * 1024, 8, 0, FREQ_UNKNOWN, WINBOND },
+	{ "w25x80", 0xef3014, 64 * 1024, 16, 0, FREQ_UNKNOWN, WINBOND },
+	{ "w25x16", 0xef3015, 64 * 1024, 32, 0, FREQ_UNKNOWN, WINBOND },
+	{ "w25x32", 0xef3016, 64 * 1024, 64, 0, FREQ_UNKNOWN, WINBOND },
+	{ "w25x64", 0xef3017, 64 * 1024, 128, 0, FREQ_UNKNOWN, WINBOND },
+	{ "w25q64", 0xef4017, 64 * 1024, 128, 0, FREQ_MHZ(80), WINBOND },
+	{ "w25q128", 0xef4018, 64 * 1024, 256, 0, FREQ_MHZ(104), WINBOND },
+
+	/* Macronix -- MX25L "blocks" are 64K, except mx25l12836e and mx25l25635 are 4K */
+	{ "mx25l512e", 0xc22010, 4 * 1024, 16, SECTOR_ERASE_OP20, FREQ_MHZ(104), MACRONIX },
+	{ "mx25l1606e", 0xc22015, 64 * 1024, 32, 0, FREQ_MHZ(50), MACRONIX },
+	{ "mx25l6405", 0xc22017, 64 * 1024, 128, 0, FREQ_MHZ(50), MACRONIX },
+	{ "mx25l12836e", 0xc22018, 64 * 1024, 256, 0, FREQ_MHZ(104), MACRONIX },
+	{ "mx25l25635f", 0xc22019, 64 * 1024, 512, 0, FREQ_MHZ(104), MACRONIX },
+	{ "mx25l25655f", 0xc22619, 64 * 1024, 512, 0, FREQ_MHZ(104), MACRONIX },
+	{ "mx25l51245g", 0xc2201A, 64 * 1024, 1024, 0, FREQ_MHZ(104), MACRONIX },
+
+	/* ESMT -- F25L "blocks are 64K, "sectors" are 4KiB */
+	{ "f25l05pa", 0x8c3010, 4 * 1024, 16, SECTOR_ERASE_OP20, FREQ_MHZ(50), ESMT },
+	{ "f25l64qa", 0x8c4117, 64 * 1024, 128, 0, FREQ_MHZ(50), ESMT },
+
+	/* EON -- EN25Q "blocks are 64K, "sectors" are 4KiB */
+	{ "en25f05", 0x1c3110, 4 * 1024, 16, SECTOR_ERASE_OP20, FREQ_MHZ(100), EON },
+	{ "en25q64", 0x1c3017, 64 * 1024, 128, 0, FREQ_MHZ(104), EON },
+	{ "en25q128", 0x1c3018, 64 * 1024, 256, 0, FREQ_MHZ(104), EON },
+
+	/* GD -- GD25Q "blocks are 64K, "sectors" are 4KiB */
+	{ "qd25q512", 0xc84010, 4 * 1024, 16, SECTOR_ERASE_OP20, FREQ_MHZ(104), GD },
+	{ "gd25q128", 0xc84018, 64 * 1024, 256, 0, FREQ_MHZ(104), GD },
+
+};
+
+#endif // #ifndef __SPI_FLASH_DATA_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_version.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_version.h
new file mode 100644
index 0000000..342d60e
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ruby_version.h
@@ -0,0 +1,17 @@
+#ifndef __RUBY_RELEASE_H__
+#define __RUBY_RELEASE_H__
+/*
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ *  Header file to hold build version information.
+ *
+ */
+
+////////////////////////////////////////////////////////////////////////////
+// Defines
+////////////////////////////////////////////////////////////////////////////
+
+#define RUBY_UBOOT_VERSION	"v37.4.0.29"
+
+#endif // __RUBY_RELEASE_H__
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_config.h b/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_config.h
new file mode 100644
index 0000000..a71a123
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_config.h
@@ -0,0 +1,164 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * Header file which describes Topaz platform.
+ * Has to be used by both kernel and bootloader.
+ */
+
+#ifndef __TOPAZ_CONFIG_H
+#define __TOPAZ_CONFIG_H
+
+#include "current_platform.h"
+
+#if !TOPAZ_FPGA_PLATFORM
+#undef TOPAZ_ICACHE_WORKAROUND
+#endif
+
+/*
+ * Control registers move depending on unified + alias bit
+ */
+#if TOPAZ_SUPPORT_UMM
+#define TOPAZ_MMAP_UNIFIED	1
+#else
+#define TOPAZ_MMAP_UNIFIED	0
+#endif
+
+#define TOPAZ_MMAP_ALIAS	0
+#define TOPAZ_RX_ACCELERATE	1
+
+/* If MU-MIMO done in HDP or SDP */
+#define QTN_HDP_MU		1
+
+#if TOPAZ_MMAP_UNIFIED
+	#define RUBY_MMAP_FLIP		0
+	#define TOPAZ_UBOOT_UNIFIED_MAP		1
+#else
+	#if !(defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD))
+		#define RUBY_MMAP_FLIP		1
+	#else
+		#define RUBY_MMAP_FLIP		0
+	#endif
+	#define TOPAZ_UBOOT_UNIFIED_MAP		0
+#endif
+
+#if TOPAZ_MMAP_ALIAS && (defined(__linux__) || TOPAZ_UBOOT_UNIFIED_MAP)
+	#define RUBY_SYS_CTL_MMAP_REGVAL	(TOPAZ_SYS_CTL_UNIFIED_MAP | TOPAZ_SYS_CTL_ALIAS_MAP)
+#elif TOPAZ_MMAP_UNIFIED && (defined(__linux__) || TOPAZ_UBOOT_UNIFIED_MAP)
+	#define RUBY_SYS_CTL_MMAP_REGVAL	TOPAZ_SYS_CTL_UNIFIED_MAP
+#elif RUBY_MMAP_FLIP || defined(TOPAZ_PLATFORM)
+	#define RUBY_SYS_CTL_MMAP_REGVAL	RUBY_SYS_CTL_LINUX_MAP(0x1)
+#else
+	#undef RUBY_SYS_CTL_MMAP_REGVAL
+#endif
+
+#if QTN_HDP_MU
+#define QTN_HDP_MU_FCS_WORKROUND	1
+#else
+#define QTN_HDP_MU_FCS_WORKROUND	0
+#endif
+
+#if TOPAZ_MMAP_ALIAS && !TOPAZ_MMAP_UNIFIED
+	#error Alias map requires unified map
+#endif
+
+#if TOPAZ_MMAP_ALIAS
+	#define TOPAZ_ALIAS_MAP_SWITCH(a, b)	(b)
+#else
+	#define TOPAZ_ALIAS_MAP_SWITCH(a, b)	(a)
+#endif
+
+/* Topaz fixed phy addresses */
+#define TOPAZ_FPGAA_PHY0_ADDR		2
+#define TOPAZ_FPGAA_PHY1_ADDR		3
+#define TOPAZ_FPGAB_PHY0_ADDR		4
+#define TOPAZ_FPGAB_PHY1_ADDR		1
+#define TOPAZ_PHY0_ADDR				1
+#define TOPAZ_PHY1_ADDR				3
+
+#ifndef TOPAZ_FPGA_PLATFORM
+	#define TOPAZ_FPGA_PLATFORM	0
+#endif
+
+/* Definition indicates that Topaz platform is FPGA */
+#if TOPAZ_FPGA_PLATFORM
+	/* CLK speeds are in MHz and 1/10th the speed of actual ASIC */
+	#define TOPAZ_SERIAL_BAUD	38400
+	#define TOPAZ_APB_CLK		12500000
+	#define TOPAZ_AHB_CLK		25000000
+	#define TOPAZ_CPU_CLK		50000000
+	#define RUBY_FPGA_DDR
+#else
+	#define TOPAZ_SERIAL_BAUD	115200
+	#define TOPAZ_APB_CLK		125000000
+	#define TOPAZ_AHB_CLK		250000000
+	#define TOPAZ_CPU_CLK		500000000
+	#define RUBY_ASIC_DDR
+#endif /* #if TOPAZ_FPGA_PLATFORM */
+
+/*
+ * Setting UPF_SPD_FLAG gives a developer the option to set the
+ * flag to match a UPF_ define from <linux>/include/linux/serial_core.h
+ * or set the value to 0 to use the default baud rate setting DEFAULT_BAUD
+ */
+#define UPF_SPD_FLAG	0
+#define DEFAULT_BAUD	TOPAZ_SERIAL_BAUD
+
+/*
+ * Re-use Ruby defines to simplify the number of changes required
+ * to compile new binaries for Topaz
+ */
+#define RUBY_SERIAL_BAUD	TOPAZ_SERIAL_BAUD
+#define RUBY_FIXED_DEV_CLK	TOPAZ_APB_CLK
+#define RUBY_FIXED_CPU_CLK	TOPAZ_CPU_CLK
+
+#ifdef PLATFORM_DEFAULT_BOARD_ID
+        #define DEFAULT_BOARD_ID	PLATFORM_DEFAULT_BOARD_ID
+#else
+	/* Default board id used to match Topaz setting if there is no SPI Flash */
+	#define DEFAULT_BOARD_ID	QTN_TOPAZ_BB_BOARD
+#endif /* TOPAZ_DEFAULT_BOARD_ID */
+
+#ifndef PLATFORM_ARC7_MMU_VER
+	#define PLATFORM_ARC7_MMU_VER	2
+#endif
+
+#define CONFIG_RUBY_BROKEN_IPC_IRQS	0
+
+#define RUBY_IPC_HI_IRQ(bit_num)	((bit_num) + 8)
+#define RUBY_M2L_IPC_HI_IRQ(bit_num)	(bit_num)
+
+#define PLATFORM_REG_SWITCH(reg1, reg2)	(reg2)
+
+#define writel_topaz(a, b)		writel(a, b)
+#define writel_ruby(a, b)
+
+#define QTN_VLAN_LLC_ENCAP		1
+
+#define TOPAZ_128_NODE_MODE		1
+
+#define TOPAZ_ETH_REFLECT_SW_FWD	0
+
+#define DSP_ENABLE_STATS		1
+
+#endif /* #ifndef __TOPAZ_CONFIG_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_emac.h b/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_emac.h
new file mode 100644
index 0000000..b6c0bcc
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_emac.h
@@ -0,0 +1,281 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TOPAZ_EMAC_H
+#define __TOPAZ_EMAC_H
+
+#include "topaz_platform.h"
+#ifdef _KERNEL
+#include "ruby_arasan_emac_ahb.h"
+#endif
+
+#define TOPAZ_EMAC_NUM_DPI_FIELDS		32
+#define TOPAZ_EMAC_NUM_DPI_FILTERS		16
+#define TOPAZ_EMAC_NUM_DPI_IPTUPLES		8
+
+#define TOPAZ_EMAC_WRAP_CTRL			0x300
+# define TOPAZ_EMAC_WRAP_CTRL_VERSION			0x0000000f
+#define TOPAZ_EMAC_RXP_CTRL			0x304
+# define TOPAZ_EMAC_RXP_CTRL_ENABLE			BIT(0)
+# define TOPAZ_EMAC_RXP_CTRL_ENDIAN			BIT(1)
+# define TOPAZ_EMAC_RXP_CTRL_TQE_SYNC_EN_BP		BIT(2)
+# define TOPAZ_EMAC_RXP_CTRL_TQE_SYNC_EN_SUCC		BIT(3)
+# define TOPAZ_EMAC_RXP_CTRL_SYNC_NONE			0
+# define TOPAZ_EMAC_RXP_CTRL_SYNC_TQE			BIT(4)
+# define TOPAZ_EMAC_RXP_CTRL_SYNC_RX_DMA		BIT(5)
+# define TOPAZ_EMAC_RXP_CTRL_SYNC_RX_PARSER		(BIT(4) | BIT(5))
+# define TOPAZ_EMAC_RXP_CTRL_SRESET			BIT(8)
+#define TOPAZ_EMAC_TXP_CTRL			0x308
+# define TOPAZ_EMAC_TXP_CTRL_AHB_ENABLE			BIT(0)
+# define TOPAZ_EMAC_TXP_CTRL_SRESET			BIT(8)
+#define TOPAZ_EMAC_TXP_Q_FULL			0x320
+# define TOPAZ_EMAC_TXP_Q_FULL_BIT			BIT(31)
+#define TOPAZ_EMAC_TXP_DESC_PTR			0x324
+#define TOPAZ_EMAC_DEBUG_BUS_SEL		0x328
+#define TOPAZ_EMAC_DEBUG_BUS_SEL_TXP_PTR		0x00000001
+
+#define TOPAZ_EMAC_BUFFER_POOLS			0x32c
+# define TOPAZ_EMAC_BUFFER_POOLS_RX_REPLENISH		0x00000003
+# define TOPAZ_EMAC_BUFFER_POOLS_RX_REPLENISH_S		0
+# define TOPAZ_EMAC_BUFFER_POOLS_TX_RETURN		0x0000000c
+# define TOPAZ_EMAC_BUFFER_POOLS_TX_RETURN_S		2
+#define TOPAZ_EMAC_TXP_STATUS			0x330
+
+/*
+ * EMAC Tx-ring read/write pointer, should write 0x1 to reg_0x328 before read the pointer.
+ * Bit[12:0]  : EMAC read pointer
+ * Bit[25:13] : TQE write pointer
+ */
+#define TOPAZ_EMAC_TXP_READ_PTR(stat)			((stat) & 0x1FFF)
+#define TOPAZ_EMAC_TXP_WRITE_PTR(stat)			(((stat) >> 13) & 0x1FFF)
+
+#define TOPAZ_EMAC_DESC_LIMIT			0x334
+# define TOPAZ_EMAC_DESC_LIMIT_MASK			0x0000ffff
+#define TOPAZ_EMAC_RXP_PRIO_CTRL		0x350
+# define TOPAZ_EMAC_RXP_PRIO_CTRL_TID_SEL		0x00000003
+# define TOPAZ_EMAC_RXP_PRIO_CTRL_TID_SEL_S		0
+# define TOPAZ_EMAC_RXP_PRIO_CTRL_TID_MINMAX		0x00000004
+# define TOPAZ_EMAC_RXP_PRIO_CTRL_TID_MINMAX_S		2
+# define TOPAZ_EMAC_RXP_PRIO_CTRL_SW_TID		0x00000f00
+# define TOPAZ_EMAC_RXP_PRIO_CTRL_SW_TID_S		8
+# define TOPAZ_EMAC_RXP_PRIO_CTRL_SW_TID_SEL		BIT(12)
+
+#define TOPAZ_EMAC_RXP_OUTPORT_CTRL		0x354
+# define TOPAZ_EMAC_RXP_OP_CTRL_DA			0x00000003
+# define TOPAZ_EMAC_RXP_OP_CTRL_DA_S			0
+# define TOPAZ_EMAC_RXP_OP_CTRL_VLAN			0x0000000c
+# define TOPAZ_EMAC_RXP_OP_CTRL_VLAN_S			2
+# define TOPAZ_EMAC_RXP_OP_CTRL_IP			0x00000030
+# define TOPAZ_EMAC_RXP_OP_CTRL_IP_S			4
+# define TOPAZ_EMAC_RXP_OP_CTRL_DPI			0x000000c0
+# define TOPAZ_EMAC_RXP_OP_CTRL_DPI_S			6
+# define TOPAZ_EMAC_RXP_OP_CTRL_MCAST_EN		BIT(8)
+# define TOPAZ_EMAC_RXP_OP_CTRL_MCAST_SEL		BIT(9)
+# define TOPAZ_EMAC_RXP_OP_CTRL_MCAST_PORT		0x00003c00
+# define TOPAZ_EMAC_RXP_OP_CTRL_MCAST_PORT_S		10
+# define TOPAZ_EMAC_RXP_OP_CTRL_DYNAMIC_FAIL_PORT	0x000f0000
+# define TOPAZ_EMAC_RXP_OP_CTRL_DYNAMIC_FAIL_PORT_S	16
+# define TOPAZ_EMAC_RXP_OP_CTRL_SW_BACKDOOR_PORT	0x00f00000
+# define TOPAZ_EMAC_RXP_OP_CTRL_SW_BACKDOOR_PORT_S	20
+# define TOPAZ_EMAC_RXP_OP_CTRL_STATIC_FAIL_PORT	0x0f000000
+# define TOPAZ_EMAC_RXP_OP_CTRL_STATIC_FAIL_PORT_S	24
+# define TOPAZ_EMAC_RXP_OP_CTRL_STATIC_PORT_SEL		0x70000000
+# define TOPAZ_EMAC_RXP_OP_CTRL_STATIC_PORT_SEL_S	28
+# define TOPAZ_EMAC_RXP_OP_CTRL_STATIC_ENABLE		BIT(31)
+#ifndef __ASSEMBLY__
+union topaz_emac_rxp_outport_ctrl {
+	struct {
+		uint32_t word0;
+	} raw;
+	struct {
+		uint32_t da_prio		: 2,
+			vlan_prio		: 2,
+			ip_prio			: 2,
+			dpi_prio		: 2,
+			mcast_en		: 1,
+			mcast_sel		: 1,
+			mcast_port		: 4,
+			__unused		: 2,
+			dynamic_fail_port	: 4,
+			sw_backdoor_port	: 4,
+			static_fail_port	: 4,
+			static_port_sel		: 3,
+			static_mode_en		: 1;
+	} data;
+};
+#endif	// __ASSEMBLY__
+
+#define TOPAZ_EMAC_RXP_OUTNODE_CTRL		0x358
+union topaz_emac_rxp_outnode_ctrl {
+	struct {
+		uint32_t word0;
+	} raw;
+	struct {
+		uint32_t mcast_node		: 6,
+			 __unused		: 4,
+			 dynamic_fail_node	: 6,
+			 sw_backdoor_node	: 6,
+			 static_fail_node	: 6,
+			 static_node_sel	: 3,
+			 __unused2		: 1;
+	} data;
+};
+#define TOPAZ_EMAC_RXP_VLAN_PRI_TO_TID		0x380
+# define TOPAZ_EMAC_RXP_VLAN_PRI_TO_TID_PRI(pcp, tid)	(((tid) & 0xf) << ((pcp) * 4))
+#define TOPAZ_EMAC_RXP_VLAN_PRI_CTRL		0x384
+# define TOPAZ_EMAC_RXP_VLAN_PRI_CTRL_TAG		0x00000003
+# define TOPAZ_EMAC_RXP_VLAN_PRI_CTRL_TAG_S		0
+#define TOPAZ_EMAC_RXP_VLAN_TAG_0_1		0x388
+# define TOPAZ_EMAC_RXP_VLAN_TAG_0			0x0000ffff
+# define TOPAZ_EMAC_RXP_VLAN_TAG_0_S			0
+# define TOPAZ_EMAC_RXP_VLAN_TAG_1			0xffff0000
+# define TOPAZ_EMAC_RXP_VLAN_TAG_1_S			16
+#define TOPAZ_EMAC_RXP_VLAN_TAG_2_3		0x38c
+# define TOPAZ_EMAC_RXP_VLAN_TAG_2			0x0000ffff
+# define TOPAZ_EMAC_RXP_VLAN_TAG_2_S			0
+# define TOPAZ_EMAC_RXP_VLAN_TAG_3			0xffff0000
+# define TOPAZ_EMAC_RXP_VLAN_TAG_3_S			16
+#define TOPAZ_EMAC_RXP_IP_DIFF_SRV_TID_REG(x)	(0x390 + 4 * (x))
+#define TOPAZ_EMAC_RXP_IP_DIFF_SRV_TID_REGS	8
+#define TOPAZ_EMAC_RXP_IP_CTRL			0x3b0
+#define TOPAZ_EMAC_RXP_DPI_TID_MAP_REG(x)	(0x3b4 + 4 * (x))
+#define TOPAZ_EMAC_RXP_DPI_TID_MAP_INDEX(x)	TOPAZ_EMAC_RXP_DPI_TID_MAP_REG((x) >> 3)
+#define TOPAZ_EMAC_RXP_TID_MAP_INDEX_SHIFT(x)	(((x) & 0x7) << 2)
+#define TOPAZ_EMAC_RXP_TID_MAP_INDEX_MASK(x)	(0xF << TOPAZ_EMAC_RXP_TID_MAP_INDEX_SHIFT(x))
+#define TOPAZ_EMAC_RXP_DPI_CTRL			0x3bc
+# define TOPAZ_EMAC_RXP_DPI_CTRL_DPI_FAIL_TID		0x0000000f
+#define TOPAZ_EMAC_RXP_STATUS			0x3c0
+#define TOPAZ_EMAC_RXP_CST_SEL			0x3c4
+#define TOPAZ_EMAC_RXP_FRAME_CNT_CLEAR		0x3cc
+# define TOPAZ_EMAC_RXP_FRAME_CNT_CLEAR_ERROR		BIT(0)
+# define TOPAZ_EMAC_RXP_FRAME_CNT_CLEAR_TOTAL		BIT(1)
+# define TOPAZ_EMAC_RXP_FRAME_CNT_CLEAR_DA_MATCH	BIT(2)
+# define TOPAZ_EMAC_RXP_FRAME_CNT_CLEAR_SA_MATCH	BIT(3)
+#define TOPAZ_EMAC_FRM_COUNT_ERRORS		0x3d0
+#define TOPAZ_EMAC_FRM_COUNT_TOTAL		0x3d4
+#define TOPAZ_EMAC_FRM_COUNT_DA_MATCH		0x3d8
+#define TOPAZ_EMAC_FRM_COUNT_SA_MATCH		0x3dc
+#define TOPAZ_EMAC_RX_DPI_FIELD_VAL(x)		(0x400 + 4 * (x))
+#define TOPAZ_EMAC_RX_DPI_FIELD_MASK(x)		(0x480 + 4 * (x))
+#define TOPAZ_EMAC_RX_DPI_FIELD_CTRL(x)		(0x500 + 4 * (x))
+#define TOPAZ_EMAC_RX_DPI_FIELD_GROUP(x)	(0x580 + 4 * (x))
+#define TOPAZ_EMAC_RX_DPI_OUT_CTRL(x)		(0x5c0 + 4 * (x))
+# define TOPAZ_EMAC_RX_DPI_OUT_CTRL_NODE	0x0000007f
+# define TOPAZ_EMAC_RX_DPI_OUT_CTRL_NODE_S	0
+# define TOPAZ_EMAC_RX_DPI_OUT_CTRL_PORT	0x00000780
+# define TOPAZ_EMAC_RX_DPI_OUT_CTRL_PORT_S	7
+# define TOPAZ_EMAC_RX_DPI_OUT_CTRL_COMBO	0x00001800
+# define TOPAZ_EMAC_RX_DPI_OUT_CTRL_COMBO_S	11
+# define TOPAZ_EMAC_RX_DPI_OUT_CTRL_OFF		0x0
+# define TOPAZ_EMAC_RX_DPI_OUT_CTRL_IPTUPLE	0x1
+# define TOPAZ_EMAC_RX_DPI_OUT_CTRL_DPI		0x2
+# define TOPAZ_EMAC_RX_DPI_OUT_CTRL_BOTH	0x3
+
+#ifndef __ASSEMBLY__
+enum topaz_emac_rx_dpi_anchor {
+	TOPAZ_EMAC_RX_DPI_ANCHOR_FRAME	= 0x0,
+	TOPAZ_EMAC_RX_DPI_ANCHOR_VLAN0	= 0x1,
+	TOPAZ_EMAC_RX_DPI_ANCHOR_VLAN1	= 0x2,
+	TOPAZ_EMAC_RX_DPI_ANCHOR_VLAN2	= 0x3,
+	TOPAZ_EMAC_RX_DPI_ANCHOR_VLAN3	= 0x4,
+	TOPAZ_EMAC_RX_DPI_ANCHOR_OTHER	= 0x5,
+	TOPAZ_EMAC_RX_DPI_ANCHOR_LLC	= 0x6,
+	TOPAZ_EMAC_RX_DPI_ANCHOR_IPV4	= 0x7,
+	TOPAZ_EMAC_RX_DPI_ANCHOR_IPV6	= 0x8,
+	TOPAZ_EMAC_RX_DPI_ANCHOR_TCP	= 0x9,
+	TOPAZ_EMAC_RX_DPI_ANCHOR_UDP	= 0xa,
+};
+
+enum topaz_emac_rx_dpi_cmp_op {
+	TOPAZ_EMAC_RX_DPI_CMP_OP_EQUAL	= 0x0,
+	TOPAZ_EMAC_RX_DPI_CMP_OP_NEQUAL	= 0x1,
+	TOPAZ_EMAC_RX_DPI_CMP_OP_GT	= 0x2,
+	TOPAZ_EMAC_RX_DPI_CMP_OP_LT	= 0x3,
+};
+
+#define TOPAZ_EMAC_RX_DPI_ANCHOR_NAMES	{		\
+	"frame", "vlan0", "vlan1", "vlan2", "vlan3",	\
+	"other", "llc", "ipv4", "ipv6", "tcp", "udp",	\
+}
+
+#define TOPAZ_EMAC_RX_DPI_CMP_OP_NAMES	{		\
+	"==", "!=", ">", "<",				\
+}
+
+
+union topaz_emac_rx_dpi_ctrl {
+	uint32_t raw;
+	struct {
+		uint32_t offset		: 9,
+			 anchor		: 4,
+			 cmp_op		: 2,
+			 enable		: 1,
+			 __unused	: 16;
+	} data;
+};
+#endif	// __ASSEMBLY__
+
+#define TOPAZ_EMAC_RX_DPI_IPT_GROUP(x)		(0x720 + 4 * (x))
+#define TOPAZ_EMAC_RX_DPI_IPT_GROUP_SRCADDR(x)	BIT((x) + 0)
+#define TOPAZ_EMAC_RX_DPI_IPT_GROUP_DESTADDR(x)	BIT((x) + 8)
+#define TOPAZ_EMAC_RX_DPI_IPT_GROUP_DESTPORT(x)	BIT((x) + 16)
+#define TOPAZ_EMAC_RX_DPI_IPT_GROUP_SRCPORT(x)	BIT((x) + 24)
+#define TOPAZ_EMAC_RX_DPI_IPT_MEM_DATA(x)	(0x600 + 4 * (x))
+#define TOPAZ_EMAC_RX_DPI_IPT_MEM_DATA_MAX	8
+#define TOPAZ_EMAC_RX_DPI_IPT_PORT_DEST		0x0000FFFF
+#define TOPAZ_EMAC_RX_DPI_IPT_PORT_DEST_S	0
+#define TOPAZ_EMAC_RX_DPI_IPT_PORT_SRC		0xFFFF0000
+#define TOPAZ_EMAC_RX_DPI_IPT_PORT_SRC_S	16
+#define TOPAZ_EMAC_RX_DPI_IPT_MEM_COM		0x620
+#define TOPAZ_EMAC_RX_DPI_IPT_MEM_COM_ENT	0x0000000F
+#define TOPAZ_EMAC_RX_DPI_IPT_MEM_COM_ENT_S	0
+#define TOPAZ_EMAC_RX_DPI_IPT_MEM_COM_WRITE	BIT(8)
+#define TOPAZ_EMAC_RX_DPI_IPT_MEM_COM_READ	BIT(9)
+#define TOPAZ_EMAC_RX_DPI_IPT_ENTRIES			9
+#define TOPAZ_EMAC_RX_DPI_IPT_ENTRY_SRCADDR_START	0
+#define TOPAZ_EMAC_RX_DPI_IPT_ENTRY_SRCADDR_END		4
+#define TOPAZ_EMAC_RX_DPI_IPT_ENTRY_DESTADDR_START	4
+#define TOPAZ_EMAC_RX_DPI_IPT_ENTRY_DESTADDR_END	8
+#define TOPAZ_EMAC_RX_DPI_IPT_ENTRY_PORTS		8
+
+
+#define TOPAZ_EMAC_IP_PROTO_ENTRY(x)		(0xa000 + 4 * (x))
+#define TOPAZ_EMAC_IP_PROTO_ENTRIES		256
+#define TOPAZ_EMAC_IP_PROTO_OUT_NODE		0x0000007F
+#define TOPAZ_EMAC_IP_PROTO_OUT_NODE_S		0
+#define TOPAZ_EMAC_IP_PROTO_OUT_PORT		0x00000780
+#define TOPAZ_EMAC_IP_PROTO_OUT_PORT_S		7
+#define TOPAZ_EMAC_IP_PROTO_VALID		BIT(11)
+#define TOPAZ_EMAC_IP_PROTO_VALID_S		11
+
+#define TOPAZ_EMAC_IPDSCP_HWT_SHIFT	2
+
+#define TOPAZ_EMAC_RXP_PRIO_IS_BIT2	0
+#define TOPAZ_EMAC_RXP_PRIO_IS_VLAN	1
+#define TOPAZ_EMAC_RXP_PRIO_IS_DSCP	2
+#define TOPAZ_EMAC_RXP_PRIO_IS_DPI	3
+
+extern void topaz_emac_to_lhost(uint32_t enable);
+extern int topaz_emac_get_bonding(void);
+
+#endif	// __TOPAZ_EMAC_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_mmap.S b/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_mmap.S
new file mode 100644
index 0000000..12456bc
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_mmap.S
@@ -0,0 +1,94 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#if defined(__linux__) && defined (__KERNEL__)
+#if defined (DC_CTRL_FLUSH_STATUS)
+	#define ARC_DC_FLUSH_STATUS_BIT	DC_CTRL_FLUSH_STATUS
+#elif defined (BIT_DC_CTRL_FLUSH_STATUS)
+	#define ARC_DC_FLUSH_STATUS_BIT	BIT_DC_CTRL_FLUSH_STATUS
+#endif
+#endif
+
+
+topaz_unified_mmap:
+	.globl topaz_unified_mmap
+	/* Code must be position-independent! */
+
+	/*
+	* Flush and invalidate data cache.
+	* Please make sure that instructions which touch
+	* d-cache are NOT used until flipping is done.
+	*/
+	/* Set flush mode for invalidate operation */
+	lr      r3, [ARC_REG_DC_CTRL]
+	bset    r3, r3, 0x6
+	sr      r3, [ARC_REG_DC_CTRL]
+	/* Start invalidate operation */
+	mov     r3, 0x1
+	sr      r3, [ARC_REG_DC_IVDC]
+	/* Check while cache invalidating will be finished */
+dcache_flush_continue:
+	lr      r3, [ARC_REG_DC_CTRL]
+	and     r3, r3, ARC_DC_FLUSH_STATUS_BIT
+	brne    r3, 0x0, dcache_flush_continue
+
+	/* Prepare flipping.
+	 * After code is finished, memory maps will change as follows:
+	 *     Flip map:
+	 *         SRAM 0x8000_0000 -> 0x8800_0000
+	 *         DRAM 0x0         -> 0x8000_0000
+	 *     Unified map:
+	 *         SRAM 0x8000_0000 -> 0x9800_0000
+	 *         DRAM 0x0         -> 0x8000_0000
+	 */
+	mov     r3, RUBY_SYS_CTL_BASE_ADDR_NOMAP
+	mov     r4, RUBY_SYS_CTL_MMAP_REGVAL
+	or      r4, r4, RUBY_SYS_CTL_REMAP(0x3)
+	st.di   r4, [r3, RUBY_SYS_CTL_MASK - RUBY_SYS_CTL_BASE_ADDR]
+	mov     r4, RUBY_SYS_CTL_MMAP_REGVAL
+
+.align 32 /*ARC_ICACHE_LINE_LEN*/
+	/* Do flipping.
+	* Align to cache line to ensure we don't hit memory during following instructions.
+	* Code must fit into 1 cache line (32 bytes).
+	*/
+	st.di   r4, [r3, RUBY_SYS_CTL_CTRL - RUBY_SYS_CTL_BASE_ADDR]
+	ld.di   r4, [r3, RUBY_SYS_CTL_CTRL - RUBY_SYS_CTL_BASE_ADDR] /* read back to clear pipeline */
+	sync
+	j       boot_continue		/* jump to absolute addr in sram */
+	/* Align to cache line so code occupy strictly 1 cache line. */
+.align 32 /* ARC_ICACHE_LINE_LEN */
+
+boot_continue:
+	/* Finalize flipping. */
+	mov     r4, 0x0
+	st.di   r4, [r3, RUBY_SYS_CTL_MASK - RUBY_SYS_CTL_BASE_ADDR]
+
+	/* Let's discard instruction cache.
+	*/
+	mov     r4, 0x1
+	sr      r4, [ARC_REG_IC_IVIC] /* invalidate i-cache */
+	lr      r4, [ARC_REG_IC_CTRL] /* read will be not completed until i-cache is invalidated */
+
+	/* Done. We are now sitting in different addresses. */
+	b	ruby_boot
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_platform.h b/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_platform.h
new file mode 100644
index 0000000..be27da5
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_platform.h
@@ -0,0 +1,498 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * Header file which describes Topaz platform.
+ * Has to be used by both kernel and bootloader.
+ */
+
+#ifndef __TOPAZ_PLATFORM_H
+#define __TOPAZ_PLATFORM_H
+
+#include "ruby_platform.h"
+
+#ifndef MS
+#define MS(_v, _f)		(((_v) & _f) >> _f##_S)
+#endif
+#ifndef SM
+#define SM(_v, _f)		(((_v) << _f##_S) & _f)
+#endif
+
+/*
+ * The following macro couldn't be defined via SM because of issues with nesting ##
+ * i.e. the following define does not work
+ * do{ where = (where) & (~(bitmask)) | SM(new_value, bitmask); }while(0)
+ */
+#define UPDATE_BITSET(where, bitmask, new_value) \
+	do{ where = ((where) & (~(bitmask))) | (((new_value) << bitmask##_S) & bitmask); }while(0)
+
+/* Extra reset bits */
+#define TOPAZ_SYS_CTL_RESET_AUC		(RUBY_BIT(10))
+#define TOPAZ_SYS_CTL_ALIAS_MAP		(RUBY_BIT(29))
+#define TOPAZ_SYS_CTL_UNIFIED_MAP	(RUBY_BIT(30))
+
+/* Extra system controller bits */
+#define TOPAZ_SYS_CTL_DDRCLK_S		22
+#define TOPAZ_SYS_CTL_DDRCLK		(0x7 << TOPAZ_SYS_CTL_DDRCLK_S)
+#define TOPAZ_SYS_CTL_DDRCLK_400MHZ	SM(0, TOPAZ_SYS_CTL_DDRCLK)
+#define TOPAZ_SYS_CTL_DDRCLK_320MHZ	SM(1, TOPAZ_SYS_CTL_DDRCLK)
+#define TOPAZ_SYS_CTL_DDRCLK_250MHZ	SM(2, TOPAZ_SYS_CTL_DDRCLK)
+#define TOPAZ_SYS_CTL_DDRCLK_200MHZ	SM(3, TOPAZ_SYS_CTL_DDRCLK)
+#define TOPAZ_SYS_CTL_DDRCLK_160MHZ	SM(4, TOPAZ_SYS_CTL_DDRCLK)
+
+/* Extra system controller registers */
+#define TOPAZ_SYS_CTL_M2D_2_INT		(RUBY_SYS_CTL_BASE_ADDR + 0x0184)
+#define TOPAZ_SYS_CTL_M2D_2_INT_MASK	(RUBY_SYS_CTL_BASE_ADDR + 0x0188)
+#define TOPAZ_SYS_CTL_M2D_3_INT		(RUBY_SYS_CTL_BASE_ADDR + 0x018C)
+#define TOPAZ_SYS_CTL_M2D_3_INT_MASK	(RUBY_SYS_CTL_BASE_ADDR + 0x0190)
+
+/* Temperature control registers */
+#define TOPAZ_SYS_CTL_TEMPSENS_CTL	(RUBY_SYS_CTL_BASE_ADDR + 0x0108)
+#define TOPAZ_SYS_CTL_TEMPSENS_CTL_START_CONV		0x00000001
+#define TOPAZ_SYS_CTL_TEMPSENS_CTL_SHUTDWN		0x00000002
+
+#define TOPAZ_SYS_CTL_TEMP_SENS_TEST_CTL		(RUBY_SYS_CTL_BASE_ADDR + 0x010C)
+
+#define TOPAZ_SYS_CTL_TEMP_SENS_DATA			(RUBY_SYS_CTL_BASE_ADDR + 0x0110)
+#define TOPAZ_SYS_CTL_TEMP_SENS_DATA_TEMP		0x00000FFF
+#define TOPAZ_SYS_CTL_TEMP_SENS_DATA_END_CONV		0x00001000
+#define TOPAZ_SYS_CTL_TEMP_SENS_DATA_END_CONV_S		11
+
+/* AuC SoC interrupt controller registers */
+#define TOPAZ_AUC_INT_MASK		(RUBY_SYS_CTL_BASE_ADDR + 0x0174)
+#define TOPAZ_AUC_IPC_INT		(RUBY_SYS_CTL_BASE_ADDR + 0x0178)
+#define TOPAZ_AUC_IPC_INT_MASK(val)	((val & 0xFFFF) << 16)
+#define TOPAZ_AUC_INT_STATUS		(RUBY_SYS_CTL_BASE_ADDR + 0x00D0)
+
+/* Linux Host interrupt controller registers */
+#define TOPAZ_LH_IPC3_INT		(RUBY_SYS_CTL_BASE_ADDR + 0x14C)
+#define TOPAZ_LH_IPC3_INT_MASK		(RUBY_SYS_CTL_BASE_ADDR + 0x150)
+#define TOPAZ_IPC4_INT(base)	((base) + 0x13C)
+#define TOPAZ_IPC4_INT_MASK(base)	((base) + 0x140)
+#define TOPAZ_LH_IPC4_INT		(TOPAZ_IPC4_INT(RUBY_SYS_CTL_BASE_ADDR))
+#define TOPAZ_LH_IPC4_INT_MASK		(TOPAZ_IPC4_INT_MASK(RUBY_SYS_CTL_BASE_ADDR))
+
+/* Multi-processor hardware semahpore */
+#define TOPAZ_MPROC_SEMA		(RUBY_SYS_CTL_BASE_ADDR + 0x0170)
+
+/* MuC SoC Interrupt controller registers */
+#define TOPAZ_SYS_CTL_A2M_INT		(RUBY_SYS_CTL_BASE_ADDR + 0x0144)
+#define TOPAZ_SYS_CTL_A2M_INT_MASK	(RUBY_SYS_CTL_BASE_ADDR + 0x0148)
+
+/* PCIE SoC Interrupt controller registers */
+#define TOPAZ_SYS_CTL_PCIE_INT_STATUS	(RUBY_SYS_CTL_BASE_ADDR + 0x017c)
+#define TOPAZ_SYS_CTL_TQE_INT_STATS_BIT	(RUBY_BIT(10))
+
+#define TOPAZ_SWITCH_BASE_ADDR		0xE1000000
+#define TOPAZ_SWITCH_OUT_NODE_BITS	7		/* Up to 128 output nodes */
+#define TOPAZ_SWITCH_OUT_NODE_MAX	(1 << TOPAZ_SWITCH_OUT_NODE_BITS)
+#define TOPAZ_SWITCH_OUT_NODE_MASK	((1 << TOPAZ_SWITCH_OUT_NODE_BITS) - 1)
+#define TOPAZ_SWITCH_OUT_PORT_BITS	4		/* Up to 16 output ports. 8 are used */
+#define TOPAZ_SWITCH_OUT_PORT_MAX	(1 << TOPAZ_SWITCH_OUT_PORT_BITS)
+#define TOPAZ_SWITCH_OUT_PORT_MASK	((1 << TOPAZ_SWITCH_OUT_PORT_BITS) - 1)
+
+/* TQE */
+#define TOPAZ_TQE_BASE_ADDR		(TOPAZ_SWITCH_BASE_ADDR + 0x30000)
+#define TOPAZ_TQE_EMAC_TDES_1_CNTL	(TOPAZ_TQE_BASE_ADDR + 0x0000)
+#define TOPAZ_TQE_EMAC_TDES_1_CNTL_VAL				0x000000FF
+#define TOPAZ_TQE_EMAC_TDES_1_CNTL_VAL_S			0
+#define TOPAZ_TQE_EMAC_TDES_1_CNTL_SHIFT			24	/* reg bits [7:0] become emac ctrl [31:24] */
+#define TOPAZ_TQE_EMAC_TDES_1_CNTL_MCAST_APPEND_CNTR_EN_S	16
+#define TOPAZ_TQE_EMAC_TDES_1_CNTL_MCAST_APPEND_CNTR_EN		0x00010000
+#define TOPAZ_TQE_EMAC_TDES_1_CNTL_PRI_MODE			0x0F000000
+#define TOPAZ_TQE_EMAC_TDES_1_CNTL_PRI_MODE_S			24
+#define TOPAZ_TQE_MISC			(TOPAZ_TQE_BASE_ADDR + 0x0004)
+#define TOPAZ_TQE_MISC_CLR_DONE_DLY_CYCLE_NUM		0x000003FF	/* q_avail_clr_done delay cycles */
+#define TOPAZ_TQE_MISC_CLR_DONE_DLY_CYCLE_NUM_S		0
+#define TOPAZ_TQE_MISC_RFLCT_OUT_PORT			0x000F0000	/* dest port for reflected pkts */
+#define TOPAZ_TQE_MISC_RFLCT_OUT_PORT_S			16
+#define TOPAZ_TQE_MISC_RFLCT_OUT_PORT_ENABLE		0x00100000	/* redirect emac0<->emac0 or emac1<->emac1 reflected pkts */
+#define TOPAZ_TQE_MISC_RFLCT_OUT_PORT_ENABLE_S		20
+#define TOPAZ_TQE_MISC_RFLCT_2_OUT_PORT_ENABLE		0x00200000	/* redirect emac0<->emac0/emac1 or emac1<->emac0/emac1 reflected pkts */
+#define TOPAZ_TQE_MISC_RFLCT_2_OUT_PORT_ENABLE_S	21
+#define TOPAZ_TQE_MISC_CLR_DONE_DLY_ENABLE		0x80000000	/* enable q_avail_clr_done delay */
+#define TOPAZ_TQE_MISC_CLR_DONE_DLY_ENABLE_S		31
+#define TOPAZ_TQE_WMAC_Q_STATUS_PTR	(TOPAZ_TQE_BASE_ADDR + 0x0008)
+#define TOPAZ_TQE_CPU_SEM		(TOPAZ_TQE_BASE_ADDR + 0x000c)
+#define TOPAZ_TQE_OUTPORT_EMAC0_CNT	(TOPAZ_TQE_BASE_ADDR + 0x0010)
+#define TOPAZ_TQE_OUTPORT_EMAC1_CNT	(TOPAZ_TQE_BASE_ADDR + 0x0014)
+#define TOPAZ_TQE_OUTPORT_WMAC_CNT	(TOPAZ_TQE_BASE_ADDR + 0x0018)
+#define TOPAZ_TQE_OUTPORT_LHOST_CNT	(TOPAZ_TQE_BASE_ADDR + 0x001c)
+#define TOPAZ_TQE_OUTPORT_MUC_CNT	(TOPAZ_TQE_BASE_ADDR + 0x0020)
+#define TOPAZ_TQE_OUTPORT_DSP_CNT	(TOPAZ_TQE_BASE_ADDR + 0x0024)
+#define TOPAZ_TQE_OUTPORT_AUC_CNT	(TOPAZ_TQE_BASE_ADDR + 0x0028)
+#define TOPAZ_TQE_OUTPORT_PCIE_CNT	(TOPAZ_TQE_BASE_ADDR + 0x002c)
+#define TOPAZ_TQE_Q_AVAIL_CLR_CNTL	(TOPAZ_TQE_BASE_ADDR + 0x0030)
+#define TOPAZ_TQE_Q_AVAIL_CLR_CNTL_TID		0xF
+#define TOPAZ_TQE_Q_AVAIL_CLR_CNTL_TID_S	0
+#define TOPAZ_TQE_Q_AVAIL_CLR_CNTL_NODE		0x7F00
+#define TOPAZ_TQE_Q_AVAIL_CLR_CNTL_NODE_S	8
+#define TOPAZ_TQE_Q_AVAIL_CLR_CNTL_CLEAR	RUBY_BIT(30)
+#define TOPAZ_TQE_Q_AVAIL_CLR_CNTL_CLEAR_DONE	RUBY_BIT(31)
+#define TOPAZ_TQE_DROP_CNT		(TOPAZ_TQE_BASE_ADDR + 0x0034)
+#define TOPAZ_TQE_DROP_EMAC0_CNT	(TOPAZ_TQE_BASE_ADDR + 0x0040)
+#define TOPAZ_TQE_DROP_EMAC1_CNT	(TOPAZ_TQE_BASE_ADDR + 0x0044)
+#define TOPAZ_TQE_DROP_WMAC_CNT		(TOPAZ_TQE_BASE_ADDR + 0x0048)
+#define TOPAZ_TQE_DROP_LHOST_CNT	(TOPAZ_TQE_BASE_ADDR + 0x004c)
+#define TOPAZ_TQE_DROP_MUC_CNT		(TOPAZ_TQE_BASE_ADDR + 0x0050)
+#define TOPAZ_TQE_DROP_DSP_CNT		(TOPAZ_TQE_BASE_ADDR + 0x0054)
+#define TOPAZ_TQE_DROP_AUC_CNT		(TOPAZ_TQE_BASE_ADDR + 0x0058)
+#define TOPAZ_TQE_DROP_PCIE_CNT		(TOPAZ_TQE_BASE_ADDR + 0x005c)
+
+/* TQE-CPU interface */
+#define TOPAZ_TQE_CPUIF_BASE(num)		(TOPAZ_TQE_BASE_ADDR + 0x4000 + 0x1000 * (num))	// For FPGA build 72 and earlier need to use (0xE1040000 + 0x10000 * (num))
+#define TOPAZ_TQE_CPUIF_CSR(num)		(TOPAZ_TQE_CPUIF_BASE(num) + 0x0000)
+#define TOPAZ_TQE_CPUIF_RX_RING_SIZE(num)	(TOPAZ_TQE_CPUIF_BASE(num) + 0x0004)
+#define TOPAZ_TQE_CPUIF_RX_RING(num)		(TOPAZ_TQE_CPUIF_BASE(num) + 0x0008)
+#define TOPAZ_TQE_CPUIF_RX_CURPTR(num)		(TOPAZ_TQE_CPUIF_BASE(num) + 0x000c)
+#define TOPAZ_TQE_CPUIF_PKT_FINISH(num)		(TOPAZ_TQE_CPUIF_BASE(num) + 0x0010)
+#define TOPAZ_TQE_CPUIF_Q_PTR_STATUS(num)	(TOPAZ_TQE_CPUIF_BASE(num) + 0x0014)
+#define TOPAZ_TQE_CPUIF_PPCTL0(num)		(TOPAZ_TQE_CPUIF_BASE(num) + 0x0020)
+#define TOPAZ_TQE_CPUIF_PPCTL1(num)		(TOPAZ_TQE_CPUIF_BASE(num) + 0x0024)
+#define TOPAZ_TQE_CPUIF_PPCTL2(num)		(TOPAZ_TQE_CPUIF_BASE(num) + 0x0028)
+#define TOPAZ_TQE_CPUIF_PPCTL3(num)		(TOPAZ_TQE_CPUIF_BASE(num) + 0x002c)
+#define TOPAZ_TQE_CPUIF_PPCTL4(num)		(TOPAZ_TQE_CPUIF_BASE(num) + 0x0030)
+#define TOPAZ_TQE_CPUIF_PPCTL5(num)		(TOPAZ_TQE_CPUIF_BASE(num) + 0x0034)
+#define TOPAZ_TQE_CPUIF_TXSTART(num)		(TOPAZ_TQE_CPUIF_BASE(num) + 0x0038)
+#define TOPAZ_TQE_CPUIF_STATUS(num)		(TOPAZ_TQE_CPUIF_BASE(num) + 0x003C)
+/* Some bits definitions */
+#define TOPAZ_TQE_CPUIF_CSR_IRQ_EN		RUBY_BIT(0)
+#define TOPAZ_TQE_CPUIF_CSR_IRQ_THRESHOLD(num)	(((num) & 0x7F) << 8)
+#define TOPAZ_TQE_CPUIF_CSR_IRQ_THRESHOLD_EN	RUBY_BIT(15)
+#define TOPAZ_TQE_CPUIF_CSR_RESET		RUBY_BIT(31)
+/* Aux definitions */
+#define TOPAZ_TQE_CPUIF_RXDESC_ALIGN		8	/* TQE CPU rx descriptors must be 64 bit aligned */
+
+/**
+ * Hardware Buffer Manager
+ */
+#define TOPAZ_HBM_BASE_ADDR		(TOPAZ_SWITCH_BASE_ADDR + 0x20000)
+#define TOPAZ_HBM_CSR_REG		(TOPAZ_HBM_BASE_ADDR + 0x0000)
+#define TOPAZ_HBM_CSR_Q_EN(x)		(BIT(0 + (x)))
+#define TOPAZ_HBM_CSR_INT_EN		(BIT(7))
+#define TOPAZ_HBM_CSR_OFLOW_INT_MASK(x)	(BIT(8 + (x)))
+#define TOPAZ_HBM_CSR_UFLOW_INT_MASK(x)	(BIT(12 + (x)))
+#define TOPAZ_HBM_CSR_OFLOW_INT_RAW(x)	(BIT(16 + (x)))
+#define TOPAZ_HBM_CSR_UFLOW_INT_RAW(x)	(BIT(20 + (x)))
+#define TOPAZ_HBM_CSR_INT_MSK_RAW	(0xff << 16)
+#define TOPAZ_HBM_CSR_OFLOW_INT_STATUS(x) (BIT(24 + (x)))
+#define TOPAZ_HBM_CSR_UFLOW_INT_STATUS(x) (BIT(28 + (x)))
+
+#define TOPAZ_HBM_BASE_REG(x)		(TOPAZ_HBM_BASE_ADDR + 0x0004 + ((x) * 0x10))
+#define TOPAZ_HBM_LIMIT_REG(x)		(TOPAZ_HBM_BASE_ADDR + 0x0008 + ((x) * 0x10))
+#define TOPAZ_HBM_WR_PTR(x)		(TOPAZ_HBM_BASE_ADDR + 0x000c + ((x) * 0x10))
+#define TOPAZ_HBM_RD_PTR(x)		(TOPAZ_HBM_BASE_ADDR + 0x0010 + ((x) * 0x10))
+
+#define TOPAZ_HBM_POOL(x)		(TOPAZ_HBM_BASE_ADDR + 0x0100 + ((x) * 0x4))
+#define TOPAZ_HBM_POOL_REQ(x)		(TOPAZ_HBM_BASE_ADDR + 0x0110 + ((x) * 0x4))
+#define TOPAZ_HBM_POOL_DATA(x)		(TOPAZ_HBM_BASE_ADDR + 0x0140 + ((x) * 0x4))
+
+#define TOPAZ_HBM_OVERFLOW_CNT		(TOPAZ_HBM_BASE_ADDR + 0x0190)
+#define TOPAZ_HBM_UNDERFLOW_CNT		(TOPAZ_HBM_BASE_ADDR + 0x0194)
+
+#define TOPAZ_HBM_MASTER_COUNT				9
+#define TOPAZ_HBM_POOL_COUNT				4
+#define TOPAZ_HBM_POOL_REQUEST_CNT(master, pool)	(TOPAZ_HBM_BASE_ADDR + 0x0200 + (master) * 0x20 + (pool) * 0x4)
+#define TOPAZ_HBM_POOL_RELEASE_CNT(master, pool)	(TOPAZ_HBM_BASE_ADDR + 0x0210 + (master) * 0x20 + (pool) * 0x4)
+
+#define TOPAZ_HBM_RELEASE_BUF		(BIT(0))
+#define TOPAZ_HBM_REQUEST_BUF		(BIT(1))
+#define TOPAZ_HBM_POOL_NUM(x)		((x) << 2)
+#define TOPAZ_HBM_DONE			(BIT(31))
+
+/* SoC interrupts */
+#define TOPAZ_SYS_CTL_M2L_HI_INT	PLATFORM_REG_SWITCH(RUBY_SYS_CTL_M2L_INT, (RUBY_SYS_CTL_BASE_ADDR + 0xFC))
+
+/**
+ * Forwarding Table
+ */
+#define TOPAZ_FWT_BASE_ADDR		(TOPAZ_SWITCH_BASE_ADDR + 0x0)
+#define TOPAZ_FWT_SIZE			(BIT(12))
+
+#define TOPAZ_FWT_TABLE_BASE		(TOPAZ_FWT_BASE_ADDR)
+
+#define TOPAZ_FWT_VLAN_TABLE_BASE	(TOPAZ_FWT_BASE_ADDR + 0x10000)
+#define TOPAZ_FWT_VLAN_TABLE_LIMIT	(TOPAZ_FWT_BASE_ADDR + 0x14000)
+
+#define TOPAZ_FWT_CTRL_BASE_ADDR	(TOPAZ_FWT_BASE_ADDR + 0xA000)
+
+#define TOPAZ_FWT_CPU_ACCESS		(TOPAZ_FWT_CTRL_BASE_ADDR + 0x0000)
+#define TOPAZ_FWT_CPU_ACCESS_STATE	0x0000000F
+#define TOPAZ_FWT_CPU_ACCESS_STATE_S	0
+#define TOPAZ_FWT_CPU_ACCESS_STATE_GRANTED	0x3
+#define TOPAZ_FWT_CPU_ACCESS_REQ	BIT(31)
+#define TOPAZ_FWT_TIME_STAMP_CTRL	(TOPAZ_FWT_CTRL_BASE_ADDR + 0x0004)
+#define TOPAZ_FWT_TIME_STAMP_CTRL_UNIT		0x0000001F
+#define TOPAZ_FWT_TIME_STAMP_CTRL_UNIT_S	0
+#define TOPAZ_FWT_TIME_STAMP_CTRL_SCALE		0x000003e0
+#define TOPAZ_FWT_TIME_STAMP_CTRL_SCALE_S	5
+#define TOPAZ_FWT_TIME_STAMP_DIS_AUTO_UPDATE_S	(16)
+#define TOPAZ_FWT_TIME_STAMP_CTRL_CLEAR		BIT(31)
+#define TOPAZ_FWT_TIME_STAMP_CNT	(TOPAZ_FWT_CTRL_BASE_ADDR + 0x0008)
+#define TOPAZ_FWT_HASH_CTRL		(TOPAZ_FWT_CTRL_BASE_ADDR + 0x000c)
+#define TOPAZ_FWT_HASH_CTRL_ENABLE	BIT(15)
+
+#define TOPAZ_FWT_LOOKUP_LHOST		0
+#define TOPAZ_FWT_LOOKUP_MUC		1
+#define TOPAZ_FWT_LOOKUP_DSP		2
+#define TOPAZ_FWT_LOOKUP_AUC		3
+
+#define	__TOPAZ_FWT_LOOKUP_REG(x)	(TOPAZ_FWT_CTRL_BASE_ADDR + 0x0010 + ((x) * 0x10))
+#define	__TOPAZ_FWT_LOOKUP_MAC_LO(x)	(TOPAZ_FWT_CTRL_BASE_ADDR + 0x0014 + ((x) * 0x10))
+#define	__TOPAZ_FWT_LOOKUP_MAC_HI(x)	(TOPAZ_FWT_CTRL_BASE_ADDR + 0x0018 + ((x) * 0x10))
+
+#define TOPAZ_FWT_LOOKUP_TRIG		0x00000001
+#define TOPAZ_FWT_LOOKUP_TRIG_S		0
+#define TOPAZ_FWT_LOOKUP_ENTRY_ADDR	0x7FF00000
+#define TOPAZ_FWT_LOOKUP_ENTRY_ADDR_S	20
+#define TOPAZ_FWT_LOOKUP_HASH_ADDR	0x0003FF00
+#define TOPAZ_FWT_LOOKUP_HASH_ADDR_S	8
+#define TOPAZ_FWT_LOOKUP_VALID		0x80000000
+#define TOPAZ_FWT_LOOKUP_VALID_S	31
+
+#define TOPAZ_FWT_PORT_EMAC0		(0)
+#define TOPAZ_FWT_PORT_EMAC1		(1)
+#define TOPAZ_FWT_PORT_WMAC		(2)
+#define TOPAZ_FWT_PORT_PCIE		(3)
+#define TOPAZ_FWT_PORT_LH		(4)
+#define TOPAZ_FWT_PORT_MUC		(5)
+#define TOPAZ_FWT_PORT_DSP		(6)
+#define TOPAZ_FWT_PORT_AUC		(7)
+
+#define TOPAZ_FWT_ENTRY_NXT_ENTRY	0x0FFE0000
+#define TOPAZ_FWT_ENTRY_NXT_ENTRY_S	17
+#define TOPAZ_FWT_ENTRY_VALID		0x80000000
+#define TOPAZ_FWT_ENTRY_VALID_S		31
+#define TOPAZ_FWT_ENTRY_PORTAL		0x40000000
+#define TOPAZ_FWT_ENTRY_PORTAL_S	30
+
+#define TOPAZ_FWT_ENTRY_OUT_NODE_0		0x0000007F
+#define TOPAZ_FWT_ENTRY_OUT_NODE_0_S		0
+#define TOPAZ_FWT_ENTRY_OUT_NODE_VLD_0		0x00000080
+#define TOPAZ_FWT_ENTRY_OUT_NODE_VLD_0_S	7
+#define TOPAZ_FWT_ENTRY_OUT_NODE_1		0x00007F00
+#define TOPAZ_FWT_ENTRY_OUT_NODE_1_S		8
+#define TOPAZ_FWT_ENTRY_OUT_NODE_VLD_1		0x00008000
+#define TOPAZ_FWT_ENTRY_OUT_NODE_VLD_1_S	15
+#define TOPAZ_FWT_ENTRY_OUT_NODE_2		0x007F0000
+#define TOPAZ_FWT_ENTRY_OUT_NODE_2_S		16
+#define TOPAZ_FWT_ENTRY_OUT_NODE_VLD_2		0x00800000
+#define TOPAZ_FWT_ENTRY_OUT_NODE_VLD_2_S	23
+#define TOPAZ_FWT_ENTRY_OUT_NODE_3		0x7F000000
+#define TOPAZ_FWT_ENTRY_OUT_NODE_3_S		24
+#define TOPAZ_FWT_ENTRY_OUT_NODE_VLD_3		0x80000000
+#define TOPAZ_FWT_ENTRY_OUT_NODE_VLD_3_S	31
+#define TOPAZ_FWT_ENTRY_OUT_NODE_4		0x007F0000
+#define TOPAZ_FWT_ENTRY_OUT_NODE_4_S		16
+#define TOPAZ_FWT_ENTRY_OUT_NODE_VLD_4		0x00800000
+#define TOPAZ_FWT_ENTRY_OUT_NODE_VLD_4_S	23
+#define TOPAZ_FWT_ENTRY_OUT_NODE_5		0x7F000000
+#define TOPAZ_FWT_ENTRY_OUT_NODE_5_S		24
+#define TOPAZ_FWT_ENTRY_OUT_NODE_VLD_5		0x80000000
+#define TOPAZ_FWT_ENTRY_OUT_NODE_VLD_5_S	31
+
+#define TOPAZ_FWT_ENTRY_OUT_PORT	0x00003C00
+#define TOPAZ_FWT_ENTRY_OUT_PORT_S	10
+
+#define TOPAZ_FWT_ENTRY_TIMESTAMP	0x000003FF
+#define TOPAZ_FWT_ENTRY_TIMESTAMP_S	0
+
+#define TOPAZ_FWT_HW_HASH_SHIFT		10
+#define TOPAZ_FWT_HW_HASH_MASK		((1 << TOPAZ_FWT_HW_HASH_SHIFT) - 1)
+#define TOPAZ_FWT_HW_LEVEL1_ENTRIES	(1 << TOPAZ_FWT_HW_HASH_SHIFT)
+#define TOPAZ_FWT_HW_LEVEL2_ENTRIES	1024
+#define TOPAZ_FWT_HW_TOTAL_ENTRIES	(TOPAZ_FWT_HW_LEVEL1_ENTRIES + TOPAZ_FWT_HW_LEVEL2_ENTRIES)
+
+/*
+ * VLAN table
+ */
+#define TOPAZ_VLAN_BASE_ADDR		(TOPAZ_SWITCH_BASE_ADDR + 0x10000)
+#define TOPAZ_VLAN_ENTRIES		(1 << 12)	/* 802.1Q VLAN ID */
+#define TOPAZ_VLAN_ENTRY_ADDR(x)	(TOPAZ_VLAN_BASE_ADDR + 4 * (x))
+#define TOPAZ_VLAN_OUT_NODE		0x0000007F
+#define TOPAZ_VLAN_OUT_NODE_S		0
+#define TOPAZ_VLAN_OUT_PORT		0x00000380
+#define TOPAZ_VLAN_OUT_PORT_S		7
+#define TOPAZ_VLAN_VALID		0x00000400
+#define TOPAZ_VLAN_VALID_S		10
+#define TOPAZ_VLAN_HW_BITMASK		0x000007ff
+
+/* TX AGG */
+#define TOPAZ_TX_AGG_BASE_ADDR				0xE5090000
+#define TOPAZ_TX_AGG_NODE_N_TID_Q_AVAIL(node)		(TOPAZ_TX_AGG_BASE_ADDR + 0x000 + 4 * (node))
+#define TOPAZ_TX_AGG_NODE_N_TID_Q_AVAIL_MASK(val)	((val & 0xFFFF) << 16)
+#define TOPAZ_TX_AGG_NODE_N_TID_Q_AVAIL_SUP(node)	(TOPAZ_TX_AGG_BASE_ADDR + 0x200 + 4 * (node))
+#define TOPAZ_TX_AGG_NODE_N_TID_Q_AVAIL_SUP_MASK(val)	((val & 0xFFFF) << 16)
+#define TOPAZ_TX_AGG_CSR				(TOPAZ_TX_AGG_BASE_ADDR + 0x460)
+#define TOPAZ_TX_AGG_TAC_MAP_MODE_64			0
+#define TOPAZ_TX_AGG_TAC_MAP_MODE_128			1
+#define TOPAZ_TX_AGG_AC					0xF0000000
+#define TOPAZ_TX_AGG_AC_S				28
+#define TOPAZ_TX_AGG_CPU_Q_ACCESS_SEM			(TOPAZ_TX_AGG_BASE_ADDR + 0x464)
+#define TOPAZ_TX_AGG_UC_Q_ACCESS_SEM			(TOPAZ_TX_AGG_BASE_ADDR + 0x468)
+#define TOPAZ_TX_AGG_TAC_CNTL				(TOPAZ_TX_AGG_BASE_ADDR + 0x46C)
+#ifdef TOPAZ_128_NODE_MODE
+#define TOPAZ_TX_AGG_TAC_CNTL_NODE(node)		((node) & 0x7F)
+#else
+#define TOPAZ_TX_AGG_TAC_CNTL_NODE(node)		((node) & 0x3F)
+#endif
+#define TOPAZ_TX_AGG_TAC_CNTL_TID(tid)			(((tid) & 0xF) << 8)
+#define TOPAZ_TX_AGG_TAC_CNTL_READ_CMD(cmd)		(((cmd) & 0x3) << 12)
+#define TOPAZ_TX_AGG_TAC_CNTL_READ_DATA_VLD		RUBY_BIT(29)
+#define TOPAZ_TX_AGG_TAC_CNTL_READ			RUBY_BIT(30)
+#define TOPAZ_TX_AGG_TAC_CNTL_WRITE			RUBY_BIT(31)
+#define TOPAZ_TX_AGG_TAC_DATA				(TOPAZ_TX_AGG_BASE_ADDR + 0x470)
+#define TOPAZ_TX_AGG_TAC_DATA_AC(__ac)			((__ac) & 0x3)
+#define TOPAZ_TX_AGG_TAC_DATA_PRIORITY(__pri)		(((__pri) & 0xFF) << 2)
+#ifdef TOPAZ_128_NODE_MODE
+#define TOPAZ_TX_AGG_TAC_DATA_AC_LO			0x00000003
+#define TOPAZ_TX_AGG_TAC_DATA_AC_LO_S			0
+#define TOPAZ_TX_AGG_TAC_DATA_PRIORITY_LO		0x0000001c
+#define TOPAZ_TX_AGG_TAC_DATA_PRIORITY_LO_S		2
+#define TOPAZ_TX_AGG_TAC_DATA_AC_HI			0x00000060
+#define TOPAZ_TX_AGG_TAC_DATA_AC_HI_S			5
+#define TOPAZ_TX_AGG_TAC_DATA_PRIORITY_HI		0x00000380
+#define TOPAZ_TX_AGG_TAC_DATA_PRIORITY_HI_S		7
+#endif
+#define TOPAZ_TX_AGG_AC_N_NODE_TID(ac)			(TOPAZ_TX_AGG_BASE_ADDR + 0x478 + 4 * (ac))
+#define TOPAZ_TX_AGG_AC_N_STAT_PTR(ac)			(TOPAZ_TX_AGG_BASE_ADDR + 0x488 + 4 * (ac))
+#define TOPAZ_TX_AGG_Q_FULL_THRESH			(TOPAZ_TX_AGG_BASE_ADDR + 0x498)
+#define TOPAZ_TX_AGG_Q_FULL_THRESH_VAL(q0, q1, q2, q3)	(((q0) & 0xF) | (((q1) & 0xF) << 4) | (((q2) & 0xF) << 8) | (((q3) & 0xF) << 12))
+#define TOPAZ_TX_AGG_CPU_IRQ_CSR			(TOPAZ_TX_AGG_BASE_ADDR + 0x49C)
+#define TOPAZ_TX_AGG_STATUS_IRQ				(TOPAZ_TX_AGG_BASE_ADDR + 0x4A0)
+#define TOPAZ_TX_AGG_AC_N_NODE_TID_NO_SEL(ac)		(TOPAZ_TX_AGG_BASE_ADDR + 0x4A4 + 4 * (ac))
+#define TOPAZ_TX_AGG_TAC_CNTL_READ_CMD_NODE_TAB		0
+#define TOPAZ_TX_AGG_TAC_CNTL_READ_CMD_AVAIL_LO		1
+#define TOPAZ_TX_AGG_TAC_CNTL_READ_CMD_AVAIL_HI		3
+#define TOPAZ_TX_AGG_MAX_NODE_NUM			128
+#define TOPAZ_TX_AGG_HALF_MAX_NODE_NUM			(TOPAZ_TX_AGG_MAX_NODE_NUM >> 1)
+
+/*
+ * MuC/Lhost new interrupts.
+ * Old interrupts (even changed number) are in ruby_platform, RUBY_IRQ_*
+ */
+#define	TOPAZ_IRQ_TQE					(5)
+#define TOPAZ_IRQ_HDMA0					(RUBY_IRQ_DMA0)
+#define TOPAZ_IRQ_HBM					(RUBY_IRQ_DMA1)
+#define TOPAZ_IRQ_HDMA1					(RUBY_IRQ_DMA3)
+#define TOPAZ_IRQ_PCIE					(28)
+#define TOPAZ_IRQ_IPC_A2M				(18)
+#define TOPAZ_IQR_TQE_DSP				(19)
+#define	TOPAZ_IRQ_PCIE_DMA				(RUBY_IRQ_DMA2)
+#define	TOPAZ_IRQ_IPC4					(29)
+#define	TOPAZ_MUC_IRQ_BB_PER_PKT			(31)
+#define TOPAZ_HBM_INT_EN				RUBY_BIT(31)
+#define TOPAZ_PCIE_INTX_CLR_MASK			RUBY_BIT(11)
+#define	TOPAZ_PCIE_INT_MASK				RUBY_PCIE_INT_MASK
+#define	TOPAZ_PCIE_MSI_MASK				RUBY_PCIE_MSI_MASK
+#define TOPAZ_PCIE_MSI_EN				RUBY_BIT(0)
+#define TOPAZ_PCIE_MSI_BASE				0xE9000050
+#define TOPAZ_PCIE_MSI_CAP				(TOPAZ_PCIE_MSI_BASE + 0x0)
+
+#define TOPAZ_PCIE_EXP_DEVCTL				(0xE9000078)
+
+/* MSI defines to be used in Topaz PCIe host driver */
+#define	TOPAZ_PCIE_MSI_REGION				RUBY_PCIE_MSI_REGION
+#define	TOPAZ_MSI_ADDR_LOWER				RUBY_MSI_ADDR_LOWER
+#define	TOPAZ_MSI_ADDR_UPPER				RUBY_MSI_ADDR_UPPER
+#define	TOPAZ_MSI_INT_ENABLE				RUBY_MSI_INT_ENABLE
+
+/* AHB Bus monitors */
+#define TOPAZ_BUSMON_INTR_STATUS			(RUBY_SYS_CTL_BASE_ADDR + 0x015c)
+#define TOPAZ_BUSMON_INTR_MASK				(RUBY_SYS_CTL_BASE_ADDR + 0x0160)
+#define TOPAZ_BUSMON_INTR_MASK_TIMEOUT_EN(master)	BIT((master) * 2 + 0)
+#define TOPAZ_BUSMON_INTR_MASK_RANGE_CHECK_EN(master)	BIT((master) * 2 + 1)
+#define TOPAZ_BUSMON_DEBUG_VIEW				(RUBY_SYS_CTL_BASE_ADDR + 0x0164)
+#define TOPAZ_BUSMON_DEBUG_VIEW_MASTER(x)		(((x) & 0x3) << 0)
+#define TOPAZ_BUSMON_DEBUG_VIEW_DATA_SEL(x)		(((x) & 0x7) << 2)
+#define TOPAZ_BUSMON_DEBUG_STATUS			(RUBY_SYS_CTL_BASE_ADDR + 0x0168)
+#define TOPAZ_BUSMON_CTL_BASE_ADDR			(RUBY_SYS_CTL_BASE_ADDR + 0x0200)
+#define TOPAZ_BUSMON_CTL(core)				(TOPAZ_BUSMON_CTL_BASE_ADDR + ((core) * 0x40))
+#define __TOPAZ_BUSMON_CTL_RANGE(core, range)		(TOPAZ_BUSMON_CTL(core) + 0x8 + ((range) * 0x8))
+#define TOPAZ_BUSMON_CTL_RANGE_LOW(core, range)		(__TOPAZ_BUSMON_CTL_RANGE((core), (range)) + 0x0)
+#define TOPAZ_BUSMON_CTL_RANGE_HIGH(core, range)	(__TOPAZ_BUSMON_CTL_RANGE((core), (range)) + 0x4)
+#define TOPAZ_BUSMON_HREADY_EN				BIT(0)
+#define TOPAZ_BUSMON_TIMER_INT_EN			BIT(1)
+#define TOPAZ_BUSMON_TIMER_ERROR_EN			BIT(2)
+#define TOPAZ_BUSMON_ADDR_CHECK_EN			BIT(3)
+#define TOPAZ_BUSMON_REGION_VALID(x)			(((x) & 0xF) << 4)
+#define TOPAZ_BUSMON_TIMEOUT(cycles)			(((cycles) & 0x3FF) << 8)
+#define TOPAZ_BUSMON_BLOCK_TRANS_EN			BIT(18)
+#define TOPAZ_BUSMON_OUTSIDE_ADDR_CHECK			BIT(19)
+
+/* AHB Bus monitor masters */
+#define TOPAZ_BUSMON_LHOST				0
+#define TOPAZ_BUSMON_MUC				1
+#define TOPAZ_BUSMON_DSP				2
+#define TOPAZ_BUSMON_AUC				3
+#define TOPAZ_BUSMON_WMAC				4
+#define TOPAZ_BUSMON_PCIE				5
+#define TOPAZ_BUSMON_SWE				6
+#define TOPAZ_BUSMON_EMAC				7
+
+#define TOPAZ_BUSMON_MASTER_NAMES	{ "lhost", "muc", "dsp", "auc", "wmac", "pcie", "swe", "emac" }
+
+/* AHB Bus monitor debug data select */
+#define TOPAZ_BUSMON_ADDR				0
+#define TOPAZ_BUSMON_WR_L32				1
+#define TOPAZ_BUSMON_WR_H32				2
+#define TOPAZ_BUSMON_RD_L32				3
+#define TOPAZ_BUSMON_RD_H32				4
+#define TOPAZ_BUSMON_CTRL0				5
+#define TOPAZ_BUSMON_CTRL1				6
+#define TOPAZ_BUSMON_CTRL2				7
+#define TOPAZ_BUSMON_DEBUG_MAX				8
+
+/* GPIO Registers */
+#define RUBY_GPIO3_PWM1					(RUBY_GPIO1_PWM0 + 4)
+#define RUBY_GPIO12_PWM3				(RUBY_GPIO1_PWM0 + 12)
+#define RUBY_GPIO13_PWM4				(RUBY_GPIO1_PWM0 + 16)
+#define RUBY_GPIO15_PWM5				(RUBY_GPIO1_PWM0 + 20)
+#define RUBY_GPIO16_PWM6				(RUBY_GPIO1_PWM0 + 24)
+#define RUBY_GPIO_PWM_LOW_SHIFT				(0)
+#define RUBY_GPIO_PWM_HIGH_SHIFT			(8)
+#define RUBY_GPIO_PWM_ENABLE				(BIT(16))
+#define RUBY_GPIO_PWM_MAX_COUNT				(255)
+
+#ifdef TOPAZ_AMBER_IP
+#define	AMBER_GPIO11_PWM0				(RUBY_GPIO_REGS_ADDR + 0x20)
+#define AMBER_GPIO12_PWM1				(RUBY_GPIO_REGS_ADDR + 0x24)
+#define	AMBER_GPIO13_PWM2				(RUBY_GPIO_REGS_ADDR + 0x28)
+#define AMBER_GPIO14_PWM3				(RUBY_GPIO_REGS_ADDR + 0x2C)
+#define AMBER_GPIO15_PWM4				(RUBY_GPIO_REGS_ADDR + 0x30)
+#define AMBER_GPIO16_PWM5				(RUBY_GPIO_REGS_ADDR + 0x34)
+#define AMBER_GPIO17_PWM6				(RUBY_GPIO_REGS_ADDR + 0x38)
+#endif
+
+/* Interrupt lines */
+#define TOPAZ_IRQ_MISC_WDT				(57)
+#define TOPAZ_IRQ_MISC_SPI1				(58)
+#define TOPAZ_IRQ_MISC_AHB_MON				(61)
+#define TOPAZ_IRQ_MISC_HBM				(62)
+#define TOPAZ_IRQ_MISC_FWT				(63)
+#define TOPAZ_IRQ_MISC_EXT_IRQ_COUNT			(8)
+#define TOPAZ_IRQ_MISC_RST_CAUSE_START			(9)
+
+/* RESET CAUSE */
+#define TOPAZ_SYS_CTL_INTR_TIMER_MSK(t)		(1 << (3 + (t)))
+
+#endif /* #ifndef __TOPAZ_PLATFORM_H */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_reset.h b/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_reset.h
new file mode 100644
index 0000000..fab7393
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_reset.h
@@ -0,0 +1,94 @@
+/*
+ * (C) Copyright 2015 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/* This header file defines reset function to be used on Topaz */
+
+#ifndef __TOPAZ_RESET_H
+#define __TOPAZ_RESET_H
+
+#include <include/qtn/mproc_sync_base.h>
+#ifdef TOPAZ_AMBER_IP
+#include <include/qtn/amber.h>
+#endif
+
+static void topaz_set_reset_vec(int enable, unsigned long reset)
+{
+#ifdef TOPAZ_AMBER_IP
+	unsigned long flush_mask = 0;
+
+	switch (reset) {
+	case TOPAZ_SYS_CTL_RESET_AUC:
+		flush_mask = TOPAZ_AMBER_BUS_FLUSH_AUC;
+		break;
+	case RUBY_SYS_CTL_RESET_DSP_ALL:
+		flush_mask = TOPAZ_AMBER_BUS_FLUSH_DSP;
+		break;
+	case RUBY_SYS_CTL_RESET_MUC_ALL:
+		flush_mask = TOPAZ_AMBER_BUS_FLUSH_MUC;
+		break;
+	case RUBY_SYS_CTL_RESET_ENET0:
+		flush_mask = TOPAZ_AMBER_BUS_FLUSH_RGMII;
+		break;
+	case RUBY_SYS_CTL_RESET_IOSS:
+		flush_mask = TOPAZ_AMBER_BUS_FLUSH_BRIDGE | TOPAZ_AMBER_BUS_FLUSH_DMA;
+		break;
+	case RUBY_SYS_CTL_RESET_MAC:
+		flush_mask = TOPAZ_AMBER_BUS_FLUSH_WMAC;
+		break;
+	case RUBY_SYS_CTL_RESET_BB:
+		flush_mask = 0;
+		break;
+	default:
+		/* In the case we accidentally get here - request/release flush for everything to be safe */
+		flush_mask = TOPAZ_AMBER_BUS_FLUSH_AUC |
+			TOPAZ_AMBER_BUS_FLUSH_DSP |
+			TOPAZ_AMBER_BUS_FLUSH_MUC |
+			TOPAZ_AMBER_BUS_FLUSH_RGMII |
+			TOPAZ_AMBER_BUS_FLUSH_BRIDGE |
+			TOPAZ_AMBER_BUS_FLUSH_DMA |
+			TOPAZ_AMBER_BUS_FLUSH_WMAC |
+			TOPAZ_AMBER_BUS_FLUSH_LHOST;
+		qtn_mproc_sync_log("%s:%u: error - invalid reset flag 0x%08x\n", __FILE__, __LINE__, reset);
+		break;
+	}
+
+	if (!enable && flush_mask) {
+		/* Need to request bus flush before switching off */
+		amber_bus_flush_req(flush_mask);
+	}
+#endif
+
+	qtn_mproc_sync_mem_write(RUBY_SYS_CTL_CPU_VEC_MASK, reset);
+	qtn_mproc_sync_mem_write_wmb(RUBY_SYS_CTL_CPU_VEC, enable ? reset : 0);
+	qtn_mproc_sync_mem_write_wmb(RUBY_SYS_CTL_CPU_VEC_MASK, 0);
+
+#ifdef TOPAZ_AMBER_IP
+	if (enable && flush_mask) {
+		/* Need to release bus flush after switching on */
+		amber_bus_flush_release(flush_mask);
+	}
+#endif
+
+}
+#endif // #ifndef __TOPAZ_RESET_H
+
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_rfic6_config b/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_rfic6_config
new file mode 100644
index 0000000..0b7d498
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/topaz_rfic6_config
@@ -0,0 +1,149 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * Header file which describes Topaz platform.
+ * Has to be used by both kernel and bootloader.
+ */
+
+#ifndef __TOPAZ_CONFIG_H
+#define __TOPAZ_CONFIG_H
+
+#include "current_platform.h"
+
+#if !TOPAZ_FPGA_PLATFORM
+#undef TOPAZ_ICACHE_WORKAROUND
+#endif
+
+/*
+ * Control registers move depending on unified + alias bit
+ */
+
+#define TOPAZ_MMAP_UNIFIED	0
+#define TOPAZ_MMAP_ALIAS	0
+#define TOPAZ_RX_ACCELERATE	1
+
+/*
+ * VSP/QTM
+ * Macro TOPAZ_QTM is used to help identify changes between original VSP and QTM.
+ * In Lhost kernel driver, it must be used within CONFIG_QVSP(in kernel .config).
+ * CONFIG_QVSP	TOPAZ_QTM	ruby		topaz
+ * Y		1		invalid		*QTM works
+ * Y		0		*VSP works	VSP alive but doesn't work for HDP
+ * N		1		invalid		*no VSP/QTM
+ * N		0		*no VSP		no VSP/QTM, and no QTM changes in MuC and AuC
+ * So generally, sololy changing CONFIG_QVSP works for both ruby and topaz as indicated by *.
+ * But to throughly clean QTM code in AuC and MuC, disable TOPAZ_QTM in topaz below.
+ */
+	#define TOPAZ_QTM		1
+
+/*
+ * HBM buffer process in MuC requires that TOPAZ_AUC_RX is dependent on TOPAZ_RX_ACCELERATE, so let's
+ * enable TOPAZ_AUC_RX only when TOPAZ_RX_ACCELERATE is enabled.
+ */
+#if TOPAZ_RX_ACCELERATE
+#define TOPAZ_AUC_RX	1
+#else
+#define TOPAZ_AUC_RX	0
+#endif
+
+#if TOPAZ_MMAP_ALIAS && !TOPAZ_MMAP_UNIFIED
+	#error Alias map requires unified map
+#endif
+
+#if TOPAZ_MMAP_ALIAS
+	#define TOPAZ_ALIAS_MAP_SWITCH(a, b)	(b)
+#else
+	#define TOPAZ_ALIAS_MAP_SWITCH(a, b)	(a)
+#endif
+
+/* Topaz fixed phy addresses */
+#define TOPAZ_FPGAA_PHY0_ADDR		2
+#define TOPAZ_FPGAA_PHY1_ADDR		3
+#define TOPAZ_FPGAB_PHY0_ADDR		4
+#define TOPAZ_FPGAB_PHY1_ADDR		1
+#define TOPAZ_PHY0_ADDR				1
+#define TOPAZ_PHY1_ADDR				3
+
+#ifndef TOPAZ_FPGA_PLATFORM
+	#define TOPAZ_FPGA_PLATFORM	0
+#endif
+
+/* Definition indicates that Topaz platform is FPGA */
+#if TOPAZ_FPGA_PLATFORM
+	/* CLK speeds are in MHz and 1/10th the speed of actual ASIC */
+	#define TOPAZ_SERIAL_BAUD	38400
+	#define TOPAZ_APB_CLK		12500000
+	#define TOPAZ_AHB_CLK		25000000
+	#define TOPAZ_CPU_CLK		50000000
+	#define RUBY_FPGA_DDR
+#else
+	#define TOPAZ_SERIAL_BAUD	115200
+	#define TOPAZ_APB_CLK		125000000
+	#define TOPAZ_AHB_CLK		250000000
+	#define TOPAZ_CPU_CLK		500000000
+	#define RUBY_ASIC_DDR
+#endif /* #if TOPAZ_FPGA_PLATFORM */
+
+/*
+ * Setting UPF_SPD_FLAG gives a developer the option to set the
+ * flag to match a UPF_ define from <linux>/include/linux/serial_core.h
+ * or set the value to 0 to use the default baud rate setting DEFAULT_BAUD
+ */
+#define UPF_SPD_FLAG	0
+#define DEFAULT_BAUD	TOPAZ_SERIAL_BAUD
+
+/*
+ * Re-use Ruby defines to simplify the number of changes required
+ * to compile new binaries for Topaz
+ */
+#define RUBY_SERIAL_BAUD	TOPAZ_SERIAL_BAUD
+#define RUBY_FIXED_DEV_CLK	TOPAZ_APB_CLK
+#define RUBY_FIXED_CPU_CLK	TOPAZ_CPU_CLK
+
+#ifdef PLATFORM_DEFAULT_BOARD_ID
+        #define DEFAULT_BOARD_ID	PLATFORM_DEFAULT_BOARD_ID
+#else
+	/* Default board id used to match Topaz setting if there is no SPI Flash */
+	#define DEFAULT_BOARD_ID	QTN_TOPAZ_BB_BOARD
+#endif /* TOPAZ_DEFAULT_BOARD_ID */
+
+#ifndef PLATFORM_ARC7_MMU_VER
+	#define PLATFORM_ARC7_MMU_VER	2
+#endif
+
+#define CONFIG_RUBY_BROKEN_IPC_IRQS	0
+
+#define RUBY_IPC_HI_IRQ(bit_num)	((bit_num) + 8)
+#define RUBY_M2L_IPC_HI_IRQ(bit_num)	(bit_num)
+
+#define PLATFORM_REG_SWITCH(reg1, reg2)	(reg2)
+
+#define writel_topaz(a, b)		writel(a, b)
+#define writel_ruby(a, b)
+
+#define QTN_VLAN_LLC_ENCAP		1
+
+#define TOPAZ_128_NODE_MODE		1
+
+#endif /* #ifndef __TOPAZ_CONFIG_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/uboot_header.h b/arch/arc/plat-qtn/sdk-qsr1000/common/uboot_header.h
new file mode 100644
index 0000000..478cf06
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/uboot_header.h
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (c) 2015 Quantenna Communications, Inc.
+ *  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * This code is taken from u-boot/include/image.h file
+ */
+#ifndef UBOOT_HEADER_H
+#define UBOOT_HEADER_H
+
+#ifndef __ASSEMBLY__
+#define IH_MAGIC      0x27051956      /* Image Magic Number           */
+#define IH_NMLEN              32      /* Image Name Length            */
+
+/*
+ * Legacy format image header,
+ * all data in network byte order (aka natural aka bigendian).
+ */
+typedef struct image_header {
+	uint32_t        ih_magic;       /* Image Header Magic Number    */
+	uint32_t        ih_hcrc;        /* Image Header CRC Checksum    */
+	uint32_t        ih_time;        /* Image Creation Timestamp     */
+	uint32_t        ih_size;        /* Image Data Size              */
+	uint32_t        ih_load;        /* Data  Load  Address          */
+	uint32_t        ih_ep;          /* Entry Point Address          */
+	uint32_t        ih_dcrc;        /* Image Data CRC Checksum      */
+	uint8_t         ih_os;          /* Operating System             */
+	uint8_t         ih_arch;        /* CPU architecture             */
+	uint8_t         ih_type;        /* Image Type                   */
+	uint8_t         ih_comp;        /* Compression Type             */
+	uint8_t         ih_name[IH_NMLEN];      /* Image Name           */
+} image_header_t;
+
+static inline uint32_t image_get_header_size(void)
+{
+#define MAX_KNOWN_PAGE_SIZE 8192
+#define ROUND_UP(N, S) ((((N) + (S) - 1) / (S)) * (S))
+	return ROUND_UP(sizeof(image_header_t), MAX_KNOWN_PAGE_SIZE);
+}
+
+struct early_flash_config {
+	uint32_t	method;
+	uint32_t	ipaddr;
+	uint32_t	serverip;
+	uint8_t		reserved[8];
+	uint8_t		built_time_utc_sec[11];
+	uint8_t		uboot_type;
+} __attribute__ ((packed));
+#endif /* __ASSEMBLY__ */
+
+#define RUBY_BOOT_METHOD_TRYLOOP        0
+#define RUBY_BOOT_METHOD_TFTP           1
+#define RUBY_BOOT_METHOD_BOOTP          2
+#define RUBY_BOOT_METHOD_MAX            3
+
+#endif /* UBOOT_HEADER_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/common/ums_platform.h b/arch/arc/plat-qtn/sdk-qsr1000/common/ums_platform.h
new file mode 100755
index 0000000..ff8e7bd
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/common/ums_platform.h
@@ -0,0 +1,347 @@
+/*
+ *  common/ums_platform.h
+ *
+ *  Copyright (c) Quantenna Communications Incorporated 2007.
+ *  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * This file holds the hardware specific memory map and common declarations
+ * for the UMS build system.  The defines here are used in the bootrom,
+ * the U-Boot bootloader and the linux kernel.
+ *
+ * This file should only contain definitions that are assembler-friendly.
+ */
+
+#ifndef __UMS_PLATFORM_H
+#define __UMS_PLATFORM_H	1
+
+/* ================ System boot modes ================ */
+/* These are used in the boot rom which is hardcoded into
+ * the chip.  Do not change them unless the chip changes.
+ */
+#define BMODE_PRODUCTION_TEST 0
+#define BMODE_SERIAL_ICC 1
+#define BMODE_NOR 2
+
+/* ================ Physical address map ================ */
+#define UMS_DDR				0x00000000
+#define UMS_SRAM			0x80000000
+#define UMS_SRAM_SIZE			0x00080000
+#define UMS_EBB_CS0			0x90000000
+#define UMS_EBB_CS1			0x91000000
+#define UMS_EBB_CS2			0x92000000
+#define UMS_EBB_CS3			0x93000000
+#define UMS_EBB_CS4			0x94000000
+#define UMS_BOOTROM			0xA0000000
+#define UMS_ARM_ITCM			0xB0000000
+#define UMS_ITCM_SIZE			0x00008000
+#define UMS_ARM_DTCM			0xB0100000
+#define UMS_DTCM_SIZE			0x00008000
+#define UMS_REGS_DSP_UART		0xD0000000
+#define UMS_REGS_DSP_GPIO		0xD1000000
+#define UMS_REGS_DSP_SPI		0xD3000000
+#define UMS_REGS_DSP_CTRLRESET		0xD8000000
+#define UMS_REGS_DSP_MSP		0xD9000000
+#define UMS_REGS_DSP_XMEM		0xDCF80000
+#define UMS_REGS_DSP_YMEM		0xDCFA0000
+#define UMS_REGS_SYSCTRL		0xE0000000
+#define UMS_REGS_DDR			0xE1000000
+#define UMS_REGS_EBI			0xE2000000
+#define UMS_REGS_SRAM			0xE3000000
+#define UMS_REGS_MB			0xE4000000
+#define UMS_REGS_MAC			0xE5000000
+#define UMS_REGS_BB			0xE6000000
+#define UMS_REGS_RADAR			0xE6080000
+#define UMS_REGS_BB2			0xE6800000
+#define UMS_REGS_RADAR2			0xE6880000
+#define UMS_REGS_ICC			0xE8000000
+#define UMS_REGS_USB			0xE9000000
+#define UMS_REGS_ULA			0xEC000000
+#define UMS_REGS_ULA_MB         0xEC200000
+#define UMS_REGS_ETHERNET0		0xED000000
+#define UMS_REGS_ARM_UART0		0xF0000000
+#define UMS_REGS_ARM_GPIO		0xF1000000
+#define UMS_REGS_ARM_SPI		0xF2000000
+#define UMS_REGS_ARM_TIMERS		0xF3000000
+#define UMS_REGS_ARM_WATCHDOG		0xF4000000
+#define UMS_REGS_ARM_UART1		0xF5000000
+#define UMS_REGS_ARM_DMA		0xF8000000
+#define UMS_REGS_ARM_DSPI2C		0xF9000000
+#define UMS_REGS_ARM_VICS		0xFFF00000
+
+/* Explicit virtual address mappings for TCMs */
+#define UMS_ARM_ITCM_VA			0xFB000000
+#define UMS_ARM_DTCM_VA			0xFB100000
+
+#define UMS_ARM_SRAM_AREA		UMS_SRAM + CONFIG_ARCH_UMS_MUC_SRAM_REQUIREMENT
+#define UMS_ARM_SRAM_AREA_VA		IO_ADDRESS(UMS_ARM_SRAM_AREA)
+
+/* !!! FIXME_UMS - at present SRAM lives in IO address space */
+#define UMS_IO_AREA_START		UMS_SRAM
+
+/* ============== Interrupt functions ============== */
+
+/* Set bits in these values to make an interrupt a FIQ rather than an IRQ.
+   SELECT0 is for interrupts 0-31, SELECT1 for the others.
+*/
+#define FIQ_SELECT0 (0)
+#define FIQ_SELECT1 (0)
+
+#define VIC0_OFFSET	0x000FF000
+#define VIC1_OFFSET	0x000FE000
+#define INTERRUPT_VA0(a) (IO_ADDRESS(UMS_REGS_ARM_VICS) + VIC0_OFFSET + (a))
+#define INTERRUPT_VA1(a) (IO_ADDRESS(UMS_REGS_ARM_VICS) + VIC1_OFFSET + (a))
+#define PL192_IRQSTATUS (0)
+#define PL192_INTSELECT (0x0c)
+#define PL192_ENABLE (0x10)
+#define PL192_DISABLE (0x14)
+#define PL192_SOFTINT (0x18)
+#define PL192_SOFTINT_CLEAR (0x1c)
+#define PL192_PROTECTION (0x20)
+#define PL192_PRIORITY_MASK (0x24)
+#define PL192_PRIORITY_DAISY (0x28)
+#define PL192_VECTOR_ADDR (0x100)
+#define PL192_VECTOR_PRIORITY (0x200)
+#define PL192_VECTORADDRESS	0x0F00
+
+/* ============== Timer functions ============== */
+
+#define TIMER_VA(a) (IO_ADDRESS(UMS_REGS_ARM_TIMERS) + (a))
+
+#define TIMER_PRESCALAR_ENABLE (0x00)
+#define TIMER_PRESCALAR0 (0x04)
+#define TIMER_PRESCALAR1 (0x08)
+#define TIMER_CONTROL0 (0x0c)
+#define TIMER_VALUE0 (0x10)
+#define TIMER_CONTROL1 (0x14)
+#define TIMER_VALUE1 (0x18)
+#define TIMER_CONTROL2 (0x1c)
+#define TIMER_VALUE2 (0x20)
+#define TIMER_CONTROL3 (0x24)
+#define TIMER_VALUE3 (0x28)
+#define TIMER_INT_ENABLE (0x2c)
+#define TIMER_INT_STATUS (0x30)
+#define TIMER_INT_CLEAR (0x34)
+
+/* GPIO block register offsets */
+#define GPIO_INPUT		(0x00)
+#define GPIO_OUTPUT_MASK	(0x04)
+#define GPIO_OUTPUT		(0x08)
+#define GPIO_MODE1		(0x0c)
+#define GPIO_MODE2		(0x10)
+#define GPIO_ALTFN		(0x14)
+#define GPIO_ALTFN_DEFVAL	(0x18)
+
+/* GPIO special function GPIO line assignments (ARM GPIO block) */
+#define GPIO_UART0_SI		(0)
+#define GPIO_UART0_nRI		(1)
+#define GPIO_UART0_DSR		(2)
+#define GPIO_UART0_nDCD		(3)
+#define GPIO_UART0_nCTS		(4)
+#define GPIO_SPI_MISO		(5)
+#define GPIO_UART1_SI		(6)
+#define GPIO_UART0_SO		(8)
+#define GPIO_UART0_nRTS		(9)
+#define GPIO_UART0_nDTR		(10)
+#define GPIO_SPI_SCK		(11)
+#define GPIO_SPI_MOSI		(12)
+#define GPIO_UART1_SO		(13)
+#define GPIO_SPI_nCS		(14)
+
+/* alternate use for gpio5 */
+#define GPIO_RGMII_MODE		(5)
+
+/* GPIO mode register values */
+#define GPIO_MODE_INPUT		(0)
+#define GPIO_MODE_OUTPUT	(1)
+#define GPIO_MODE_OSOURCE	(2)
+#define GPIO_MODE_ODRAIN	(3)
+
+/* SPI controller register offsets */
+#define SPI_SPCR		(0x00)
+#define SPI_SPSR		(0x04)
+#define SPI_SPDR		(0x08)
+#define SPI_SPER		(0x0c)
+#define SPI_SLVN		(0x10)
+
+/* SPI status register bits */
+#define SPI_SPSR_RFEMPTY	(1 << 0)
+#define SPI_SPSR_RFFULL		(1 << 1)
+#define SPI_SPSR_WFEMPTY	(1 << 2)
+#define SPI_SPSR_WFFULL		(1 << 3)
+
+/* SPI control register bits */
+#define SPI_SPCR_SPR(x)		(((x) & 3) << 0)
+#define SPI_SPCR_CPHA		(1 << 2)
+#define SPI_SPCR_CPOL		(1 << 3)
+#define SPI_SPCR_MSTR		(1 << 4)
+#define SPI_SPCR_SPE		(1 << 6)
+#define SPI_SPCR_SPIE		(1 << 7)
+
+/* SPI extended control register bits */
+#define SPI_SPER_ESPR(x)	(((x) & 3) << 0)
+#define SPI_SPER_ICNT(x)	(((x) & 3) << 6)
+
+/* System controller register offset and bit position definitions */
+#define SYSCTRL_RESET_MASK	(0x00)
+#define SYSCTRL_RESET		(0x04)
+#define SYSCTRL_CTRL_MASK	(0x08)
+#define SYSCTRL_CTRL		(0x0c)
+#define SYSCTRL_RESET_CAUSE	(0x10)
+#define SYSCTRL_REV_NUMBER	(0x14)
+#define SYSCTRL_RGMII_DELAY	(0x1c)
+
+/* Reset bit positions for RESET_MASK and RESET_VEC registers */
+#define SYSCTRL_ARM_RUN		(1 << 0)
+#define SYSCTRL_EBI_RUN		(1 << 1)
+#define SYSCTRL_DDR_RUN		(1 << 2)
+#define SYSCTRL_SRAM_RUN	(1 << 3)
+#define SYSCTRL_DSPSS_RUN	(1 << 4)
+#define SYSCTRL_DSP_RUN		(1 << 5)
+#define SYSCTRL_MUC_RUN		(1 << 6)
+#define SYSCTRL_NETSS_RUN	(1 << 7)
+#define SYSCTRL_MMC_RUN		(1 << 8)
+#define SYSCTRL_ETHERNET_RUN	(1 << 9)
+#define SYSCTRL_IOSS_RUN	(1 << 10)
+#define SYSCTRL_ICC_RUN		(1 << 12)
+#define SYSCTRL_USB_RUN		(1 << 13)
+#define SYSCTRL_RESET_OUT   (1 << 31)
+
+/* System controller control register */
+#define SYSCTRL_BOOT_MODE(x)	(((x) & 7) << 0)
+#define SYSCTRL_REMAP(x)	(((x) & 3) << 3)
+#define SYSCTRL_CLKSEL(x)	(((x) & 3) << 5)
+#define SYSCTRL_ARM_IS_2X	(1 << 7)
+#define SYSCTRL_DSP_CLK		(1 << 8)
+#define SYSCTRL_MAC_CLK(x)	(((x) & 7) << 9)
+#define SYSCTRL_REMAP_SRAM	(1 << 12)
+#define SYSCTRL_ULPI_ENABLE	(1 << 13)
+#define SYSCTRL_ARM_GPIO_ENABLE	(1 << 14)
+#define SYSCTRL_DSP_GPIO_ENABLE	(1 << 15)
+#define SYSCTRL_EBI_MUXMODE	(1 << 16)
+#define SYSCTRL_ARBITER_MODE(x)	(((x) & 0xf) << 17)
+#define SYSCTRL_SPLIT_DISABLE	(1 << 21)
+#define SYSCTRL_EXT_USBCLK	(1 << 22)
+#define SYSCTRL_PCIE_ENABLE	(1 << 23)
+#define SYSCTRL_NETBUS_SWAP	(1 << 24)
+#define SYSCTRL_IOBUS_SWAP	(1 << 25)
+#define SYSCTRL_DSPBUS_SWAP	(1 << 26)
+
+#define SYSCTRL_REMAP_DDR	(0)
+#define SYSCTRL_REMAP_ROM	(1)
+#define SYSCTRL_REMAP_NOR	(2)
+#define SYSCTRL_REMAP_NAND	(3)
+
+/* Reset cause definitions */
+#define SYSCTRL_HARD_RESET	(1 << 0)
+#define SYSCTRL_SOFT_RESET	(1 << 1)
+#define SYSCTRL_WATCHDOG	(1 << 2)
+#define SYSCTRL_PLL_DRIFT	(1 << 3)
+#define SYSCTRL_EBI_STRAP(x)	(((x) & 3) >> 16)
+
+/* bbic2 bit to switch between 100M and 1000M */
+#define SYS_CTL_GMII_CLK_SEL	(1 << 23)
+#define SYS_CTL_FORCE_RGMII		(0xc0000000)
+
+/* Chip revision macros - use with SYSCTRL_REV_NUMBER */
+#define SYSCTRL_CHIP_MINOR(x)	((x) & 0xff)
+#define SYSCTRL_CHIP_MAJOR(x)	(((x) & 0xff) >> 8)
+#define SYSCTRL_CHIP_TYPE(x)	(((x) & 0xff) >> 16)
+#define SYSCTRL_CHIP_TYPE_UMS	(0)
+
+/* UART register offsets */
+#define PL011_DR	(0x00)
+#define PL011_RSR_ECR	(0x04)
+#define PL011_FR	(0x18)
+#define PL011_ILPR	(0x20)
+#define PL011_IBRD	(0x24)
+#define PL011_FBRD	(0x28)
+#define PL011_LCR_H	(0x2c)
+#define PL011_CR	(0x30)
+#define PL011_IFLS	(0x34)
+#define PL011_IMSC	(0x38)
+#define PL011_RIS	(0x3c)
+#define PL011_MIS	(0x40)
+#define PL011_ICR	(0x44)
+#define PL011_DMACR	(0x48)
+#define PL011_PERIPHID0	(0xfe0)
+#define PL011_PERIPHID1	(0xfe4)
+#define PL011_PERIPHID2	(0xfe8)
+#define PL011_PERIPHID3	(0xfec)
+#define PL011_CELLID0	(0xff0)
+#define PL011_CELLID1	(0xff4)
+#define PL011_CELLID2	(0xff8)
+#define PL011_CELLID3	(0xffc)
+
+/* Static memory controller offsets */
+#define PL241_DIRCMD	(0x1010)
+#define PL241_SETCYC	(0x1014)
+#define PL241_SETOPM	(0x1018)
+#define PL241_SETCYC0	(0x1100)
+#define PL241_SETOPM0	(0x1104)
+
+/* ICC register offsets */
+#define ICC_SRC		(0x00)
+#define ICC_DST		(0x04)
+#define ICC_CTRL	(0x08)
+#define ICC_ISR		(0x0C)
+#define ICC_MASKED_ISR	(0x10)
+#define ICC_IEN		(0x14)
+#define ICC_CLR_RIP	(0x18)
+#define ICC_RD_CMD	(0x20)
+#define ICC_BUSY_FLAG	(1 << 31) /* Busy bit in CTRL register */
+#define ICC_RD_COMPLETE	(1 << 1)  /* Read complete bit in ISR register */
+#define ICC_MAX_XFER	(0x8000) /* Max ICC length = 64kB !!! FIXME */
+
+/* MAC register offsets */
+#define UMS_MAC_IMEM	(0x00000)
+#define UMS_MAC_DMEM	(0x20000)
+#define UMS_MAC_PKTMEM	(0x30000)
+#define UMS_MAC_TXMEM	(0x40000)
+#define UMS_MAC_GBLCTRL	(0x42000)
+#define UMS_MAC0_TXREGS	(0x50000)
+#define UMS_MAC0_RXREGS	(0x52000)
+#define UMS_MAC0_SHARED	(0x53000)
+#define UMS_MAC1_TXREGS	(0x60000)
+#define UMS_MAC1_RXREGS	(0x62000)
+#define UMS_MAC1_SHARED	(0x63000)
+#define UMS_MAC_DMA	(0x70000)
+#define UMS_MAC_HOSTIF	(0x71000)
+
+/* BB register offsets*/
+/* MAY need revisit XXX */
+#define UMS_BB_SPI  (0x40000)
+#define UMS_BB_GAIN (0x50000) 
+#define UMS_BB_XREF (0x60000) 
+#define UMS_BB_RFIC (0x70000)
+#define UMS_BB_RDR  (0x80000)
+#define UMS_BB_COMPQ_MEM  (0xB0000)
+
+/* MBX register offsets */
+#define UMS_MBX_DSP_POP  (0x0000)
+#define UMS_MBX_DSP_PUSH (0x0040)
+#define UMS_MBX_CTRL     (0x0080)
+#define UMS_MBX_STATUS   (0x0084)
+#define UMS_MBX_INT_MSK  (0x0088)
+#define UMS_MBX_INT_CLR  (0x008C)
+
+/* MBX register bitfields */
+#define UMS_MBX_INT0 (1 << 0)
+#define UMS_MBX_INT1 (1 << 1)
+
+#define UMS_MBX_DSP_TO_ARM_EMPTY ( 1 << 24 )
+
+#endif
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/Makefile b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/Makefile
new file mode 100644
index 0000000..97415c3
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/Makefile
@@ -0,0 +1,23 @@
+#
+# Copyright (C)2014 Quantenna Communications
+#
+
+TOPDIR=../..
+
+MUC_STATS_HEADER=./muc_txrx_stats.h
+MUC_COMMON_STAT_HEADER=./qtn_muc_stats_print.h
+
+.PHONY: FORCE
+
+all: ${MUC_COMMON_STAT_HEADER}
+
+${MUC_COMMON_STAT_HEADER}: ${MUC_STATS_HEADER} ${MUC_STATS_HEADER}.raw
+
+${MUC_STATS_HEADER}: ${MUC_STATS_HEADER}.raw
+
+${MUC_STATS_HEADER}.raw: FORCE
+	@cat ${MUC_STATS_HEADER} | \
+		${TOPDIR}/host/scripts/struct_get.sh | \
+		grep -v "^[[:blank:]]*$$" > $@
+	./muc_dbg_parse
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/auc_debug_stats.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/auc_debug_stats.h
new file mode 100755
index 0000000..fd26ce5
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/auc_debug_stats.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2013 Quantenna Communications, Inc.
+ */
+
+#ifndef _AUC_DEBUG_STATS_H_
+#define _AUC_DEBUG_STATS_H_
+
+#include <qtn/auc_share_def.h>
+
+/*
+ * Firmware updates counters through such macros as AUC_DBG_INC(), AUC_DBG_INC_OFFSET(), AUC_DBG_INC_COND(), etc.
+ * Other CPU (e.g. Lhost) can read structure and dump counters.
+ * Feel free to add more counters here.
+ * Good to have counters organized and grouped using name prefix.
+ */
+struct auc_dbg_counters
+{
+	/* pktlogger expects task_alive_counters to be the first member of this struct */
+	uint32_t task_alive_counters[AUC_TID_NUM];
+	uint32_t task_false_trigger[AUC_TID_NUM];
+	uint32_t tqew_ac[4];
+	uint32_t tqew_ac_avail[4];
+	uint32_t tqew_air_humble;
+	uint32_t tqew_air_suppress;
+	uint32_t tqew_air_use_idletime;
+	uint32_t tqew_air_dequeue_only;
+	uint32_t tqew_pkt_pending_for_txdone;
+	uint32_t tqew_descr_alloc_fail;
+	uint32_t tqew_ring_alloc_fail;
+	uint32_t tqew_pop_alloc_fail;
+	uint32_t tqew_pop_sw_limit;
+	uint32_t tqew_pop_empty;
+	uint32_t tqew_available_set;
+	uint32_t tqew_available_reset;
+	uint32_t tqew_rx;
+	uint32_t tqew_drop;
+	uint32_t tqew_free;
+	uint32_t tqew_buf_invalid;
+	uint32_t wmac_tx_done[4];
+	uint32_t agg_aggregate_flag;
+	uint32_t agg_aggressive_agg;
+	uint32_t hdrs_available_recent_min;
+	uint32_t agg_states[QTN_AUC_TID_TX_STATE_MAX + 1];
+	uint32_t ethq_push;
+	uint32_t ethq_pop;
+	uint32_t agg_aggregate_mpdu;
+	uint32_t agg_aggregate_msdu;
+	uint32_t agg_singleton_mpdu;
+	uint32_t agg_singleton_mgmt;
+	uint32_t agg_singleton_ctl;
+	uint32_t agg_singleton_probe;
+	uint32_t agg_4K_amsdu;
+	uint32_t agg_8K_amsdu;
+	uint32_t agg_11K_amsdu;
+	uint32_t tx_feedback_success;
+	uint32_t tx_feedback_fail;
+	uint32_t tx_done_status_success;
+	uint32_t tx_done_status_timeout;
+	uint32_t tx_done_status_xretry;
+	uint32_t tx_done_status_timeout_xretry;
+	uint32_t tx_done_pkt_chain_reset;
+	uint32_t tx_done_pkt_chain_success;
+	uint32_t tx_done_pkt_chain_drop_tid_down;
+	uint32_t tx_done_pkt_chain_drop_xattempts;
+	uint32_t tx_done_singleton_finish;
+	uint32_t tx_done_singleton_swretry;
+	uint32_t tx_done_aggregate_finish;
+	uint32_t tx_done_aggregate_hwretry;
+	uint32_t tx_done_aggregate_swretry;
+	uint32_t tx_done_mpdu_swretry;
+	uint32_t tx_sample;
+	uint32_t tx_bw_sample;
+	uint32_t tx_swretry_lower_bw;
+	uint32_t tx_swretry_agg_exceed;
+	uint32_t tx_scale_base_20m;
+	uint32_t tx_scale_base_40m;
+	uint32_t tx_scale_base_80m;
+	uint32_t tx_scale_max;
+	uint32_t tx_scale_overstep;
+	uint32_t alloc_tqew_fast;
+	uint32_t free_tqew_fast;
+	uint32_t alloc_tqew_slow;
+	uint32_t free_tqew_slow;
+	uint32_t alloc_tqew_local;
+	uint32_t free_tqew_local;
+	uint32_t alloc_hdr_fast;
+	uint32_t free_hdr_fast;
+	uint32_t alloc_hdr_slow;
+	uint32_t free_hdr_slow;
+	uint32_t alloc_msdu_hdr_failed;
+	uint32_t alloc_mpdu_hdr_failed;
+	uint32_t alloc_tid_superfast;
+	uint32_t free_tid_superfast;
+	uint32_t alloc_tid_fast;
+	uint32_t free_tid_fast;
+	uint32_t alloc_tid_slow;
+	uint32_t free_tid_slow;
+	uint32_t alloc_node_rate_fast;
+	uint32_t free_node_rate_fast;
+	uint32_t alloc_node_rate_slow;
+	uint32_t free_node_rate_slow;
+	uint32_t alloc_node_superfast;
+	uint32_t free_node_superfast;
+	uint32_t alloc_node_fast;
+	uint32_t free_node_fast;
+	uint32_t alloc_fcs;
+	uint32_t free_fcs;
+	uint32_t alloc_mac_descr;
+	uint32_t free_mac_descr;
+	uint32_t tx_mac_push;
+	uint32_t tx_mac_idle;
+	uint32_t tx_mac_rts;
+	uint32_t tx_mac_cts2self;
+	uint32_t tx_vlan_drop;
+	uint32_t tx_acm_drop;
+	uint32_t tx_ps_drop;
+	uint32_t ocs_tx_suspend;
+	uint32_t ocs_tx_resume;
+	uint32_t ocs_singleton_suspend;
+	uint32_t ocs_ampdu_suspend;
+	uint32_t ocs_frame_created;
+	uint32_t pwr_mgmt_awake;
+	uint32_t pwr_mgmt_sleep;
+	uint32_t pwr_mgmt_tx;
+	uint32_t pspoll_rx;
+	uint32_t dtim_q_push;
+	uint32_t dtim_q_pop;
+	uint32_t dtim_trigger;
+	uint32_t dtim_q_overflow;
+	uint32_t tx_restrict_dropped;
+	uint32_t tx_throt_dropped;
+	uint32_t tx_block_singleton;
+	uint32_t tx_force_unblock_tid;
+	uint32_t tx_ctl_pkt_hbm_alloc_fails;
+	uint32_t tx_ctl_pkt_alloc_descr_fails;
+	uint32_t tx_bar_alloc_ctl_pkt_fails;
+	uint32_t tx_valid_bit_not_set;
+
+	uint32_t wmm_ps_tx;
+	uint32_t wmm_ps_tx_null_frames;
+	uint32_t wmm_ps_tx_more_data_frames;
+	uint32_t wmm_ps_tx_eosp_frames;
+
+	/*
+	 * Mu Tx & Done & Retry
+	 */
+	uint32_t mu_tx_su_count;	/* Can't find buddy, and this AMPDU be sent as SU */
+
+	uint32_t mu_tx_send_mu_fail;	/* Can't be sent as MU, send them as SU */
+
+	uint32_t mu_tx_push_count;
+	uint32_t mu_tx_done_count;
+
+	uint32_t mu_tx_done_succ;	/* The succ/fail counter of AMPDU which be sent via WMAC1 */
+	uint32_t mu_tx_done_fail;
+	uint32_t mu_tx_sample;            /* mu sampling phy rate count */
+	uint32_t mu_bar_bitmap_non_zero;
+	uint32_t mu_bar_bitmap_zero;
+	uint32_t mu_mac_wmac1_ipc_push;
+	uint32_t mu_mac_wmac1_auc_push;
+	uint32_t mu_wmac1_resets;
+
+	uint32_t mu_tx_swretry_agg_exceed;
+
+	uint32_t mu_tx_buddy_try;
+	uint32_t mu_tx_buddy_fail_wmac;
+	uint32_t mu_tx_buddy_fail_ptid;
+	uint32_t mu_tx_buddy_fail_rate;
+	uint32_t mu_tx_buddy_fail_create_agg;
+
+	uint32_t mu_tx_buddy_mu_only_timeout;
+
+	uint32_t mu_tx_another_q_push_succ;
+	uint32_t mu_tx_another_q_push_fail;	/* If current cont_q is not ready, try another cont_q */
+	uint32_t mu_tx_buddy_multi_tid;
+
+	/* For debug, remove it before submitting */
+	uint32_t mu_tx_wmac_0_done_count;
+	uint32_t mu_tx_wmac_0_bitmap_non_zero;
+	uint32_t mu_tx_wmac_0_bitmap_zero;
+	uint32_t mu_tx_wmac_0_done_timeout;
+	uint32_t mu_tx_wmac_0_done_succ;
+	uint32_t mu_tx_wmac_0_done_fail;
+
+	uint32_t mu_tx_wmac_1_done_succ;
+	uint32_t mu_tx_wmac_1_done_fail;
+
+	uint32_t mu_tx_wmac_0_mpdu_total;
+	uint32_t mu_tx_wmac_0_mpdu_succ;
+
+	uint32_t mu_tx_wmac_1_mpdu_total;
+	uint32_t mu_tx_wmac_1_mpdu_succ;
+
+	uint32_t mu_tx_qnum[AUC_FW_WMAC_TX_QNUM];
+	uint32_t tqe_sema_fails;
+};
+#endif // #ifndef _AUC_DEBUG_STATS_H_
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/auc_share_def.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/auc_share_def.h
new file mode 100755
index 0000000..3fc17e3
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/auc_share_def.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013 Quantenna Communications, Inc.
+ */
+
+#ifndef _AUC_SHARE_DEF_H_
+#define _AUC_SHARE_DEF_H_
+
+/* Define how many TIDs have and which is timer TID */
+#define AUC_TID_FIRST			0
+#define AUC_TID_NUM			20
+#define AUC_TID_TIMER			19
+
+#ifndef __ASSEMBLY__
+/* WMAC parameters */
+#define AUC_FW_WMAC_TX_QNUM		4
+#define AUC_FW_WMAC_TX_QDEEP		4
+
+#define AUC_FW_WMAC_RX_Q_MGMT		0
+#define AUC_FW_WMAC_RX_Q_CTRL		1
+#define AUC_FW_WMAC_RX_Q_DATA		2
+#define AUC_FW_WMAC_RX_QNUM		3
+#define AUC_FW_WMAC_RX_QDEEP_MGMT		8
+#define AUC_FW_WMAC_RX_QDEEP_CTRL		8
+#define AUC_FW_WMAC_RX_QDEEP_DATA		64
+#define AUC_FW_WMAC_RX_DESC_NUM	(AUC_FW_WMAC_RX_QDEEP_MGMT + \
+	AUC_FW_WMAC_RX_QDEEP_CTRL + AUC_FW_WMAC_RX_QDEEP_DATA)
+#endif
+
+/* Used to define 'state' field of qtn_auc_per_tid_data */
+#define QTN_AUC_TID_TX_STATE_IDLE			0		/* idle state, this is init state, please keep zero value */
+#define QTN_AUC_TID_TX_STATE_RUN			1		/* sending state */
+#define QTN_AUC_TID_TX_STATE_WAIT_TIMER_AGG		2		/* waiting on agg timer firing */
+#define QTN_AUC_TID_TX_STATE_WAIT_TX_DONE		3		/* waiting on tx done firing */
+#define QTN_AUC_TID_TX_STATE_WAIT_TX_RESUME		4		/* waiting on ocs tx resume */
+#define QTN_AUC_TID_TX_STATE_MAX			4		/* maximum value of tx states */
+
+/* Used to define 'tqew_state' field of qtn_auc_per_tid_data */
+#define QTN_AUC_TID_TQEW_STATE_RUN			0
+#define QTN_AUC_TID_TQEW_STATE_WAIT_TX_DONE		1
+#define QTN_AUC_TID_TQEW_STATE_MAX			1
+
+#endif // #ifndef _AUC_SHARE_DEF_H_
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/bootcfg.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/bootcfg.h
new file mode 100644
index 0000000..966720a
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/bootcfg.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2009 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ *  Syscfg module - uses config sector for common filesytem between linux and
+ *  uboot.
+ */
+
+
+typedef u32 bootcfg_t;
+
+/******************************************************************************
+	Function:   bootcfg_create
+	Purpose:	create file
+ 	Returns:	0 if successful			
+  	Note:  	    if size is zero, the proc entry is created but
+  	            no data is allocated until the first write
+ *****************************************************************************/
+int bootcfg_create(const char *filename,u32 size);
+
+/******************************************************************************
+	Function:   bootcfg_delete
+	Purpose:	delete file
+ 	Returns:	0 if successful			
+  	Note:  	    
+ *****************************************************************************/
+int bootcfg_delete(const char *token);
+
+/******************************************************************************
+   Function:    bootcfg_get_var
+   Purpose:     Get variable from environment
+   Returns:     NULL if variable not found, pointer to storage otherwise
+   Note:        variable value copied to storage
+ *****************************************************************************/
+char* bootcfg_get_var(const char *variable, char *storage);
+
+/******************************************************************************
+   Function:    bootcfg_set_var
+   Purpose:     Set variable to environment
+   Returns:     NULL if variable not found, pointer to storage otherwise
+   Note:        variable value copied to storage
+ *****************************************************************************/
+int bootcfg_set_var(const char *var, const char *value);
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/dmautil.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/dmautil.h
new file mode 100644
index 0000000..dab25bd
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/dmautil.h
@@ -0,0 +1,122 @@
+/*
+ *  Copyright (c) Quantenna Communications, Inc. 2012
+ *  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef __QTN_DMA_UTIL_H
+#define __QTN_DMA_UTIL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <asm/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#define ALIGNED_DMA_DESC(typeq, type)		\
+	struct aligned_dma_##type {		\
+		uint32_t desc_count;		\
+		typeq type *descs;		\
+		unsigned long aligned_vdescs;	\
+		unsigned long unaligned_vdescs;	\
+		unsigned long descs_dma_addr;	\
+	}
+
+typedef ALIGNED_DMA_DESC(, void) aligned_dma_descs;
+
+#define ALIGNED_DMA_DESC_ALLOC(_ptr, _count, _align, _is_sram)			\
+	dmautil_aligned_dma_desc_alloc((aligned_dma_descs *)(_ptr),		\
+			sizeof((_ptr)->descs[0]), (_count), (_align), (_is_sram))
+int dmautil_aligned_dma_desc_alloc(aligned_dma_descs *d,
+		unsigned int desc_size, unsigned int desc_count,
+		unsigned int align, bool is_sram);
+
+#define ALIGNED_DMA_DESC_FREE(_ptr)	dmautil_aligned_dma_desc_free((aligned_dma_descs *)(_ptr))
+void dmautil_aligned_dma_desc_free(aligned_dma_descs *d);
+
+/*
+ * Alignment helpers
+ */
+__always_inline static unsigned long align_val_up(unsigned long val, unsigned long step)
+{
+	return ((val + step - 1) & (~(step - 1)));
+}
+__always_inline static unsigned long align_val_down(unsigned long val, unsigned long step)
+{
+	return (val & (~(step - 1)));
+}
+__always_inline static void* align_buf_dma(void *addr)
+{
+	return (void*)align_val_up((unsigned long)addr, dma_get_cache_alignment());
+}
+__always_inline static unsigned long align_buf_dma_offset(void *addr)
+{
+	return ((char *)align_buf_dma(addr) - (char *)addr);
+}
+__always_inline static void* align_buf_cache(void *addr)
+{
+	return (void*)align_val_down((unsigned long)addr, dma_get_cache_alignment());
+}
+__always_inline static unsigned long align_buf_cache_offset(void *addr)
+{
+	return ((char *)addr - (char *)align_buf_cache(addr));
+}
+__always_inline static unsigned long align_buf_cache_size(void *addr, unsigned long size)
+{
+	return align_val_up(size + align_buf_cache_offset(addr), dma_get_cache_alignment());
+}
+
+__always_inline static void flush_dcache_sizerange_safe(void *p, size_t size)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dma_cache_wback((unsigned long)p, size);
+#else
+	uintptr_t op_start = (uintptr_t) align_buf_cache(p);
+	size_t op_size = align_buf_cache_size(p, size);
+	flush_dcache_range(op_start, op_start + op_size);
+#endif
+}
+
+__always_inline static void flush_and_inv_dcache_sizerange_safe(void *p, size_t size)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dma_cache_wback_inv((unsigned long)p, size);
+#else
+	uintptr_t op_start = (uintptr_t) align_buf_cache(p);
+	size_t op_size = align_buf_cache_size(p, size);
+	flush_and_inv_dcache_range(op_start, op_start + op_size);
+#endif
+
+}
+
+__always_inline static void inv_dcache_sizerange_safe(void *p, size_t size)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dma_cache_inv((unsigned long)p, size);
+#else
+	uintptr_t op_start = (uintptr_t) align_buf_cache(p);
+	size_t op_size = align_buf_cache_size(p, size);
+	inv_dcache_range(op_start, op_start + op_size);
+#endif
+}
+
+#endif	// __ASSEMBLY__
+#endif	// __QTN_DMA_UTIL_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/dsp_stats.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/dsp_stats.h
new file mode 100644
index 0000000..7f003f2
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/dsp_stats.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016 Quantenna Communications, Inc.
+ */
+
+#ifndef _DSP_STATS_H_
+#define _DSP_STATS_H_
+
+#include "qtn/txbf_common.h"
+
+#define DSP_ACT_RX_DBG_SIZE	10
+
+#if DSP_ENABLE_STATS
+struct qtn_dsp_stats {
+	uint32_t dsp_ndp_rx;
+
+	/* Per-node DSP stats */
+
+	/* Total number of feedbacks received */
+	uint32_t dsp_act_rx[DSP_ACT_RX_DBG_SIZE];
+
+	/* Number of SU feedbacks */
+	uint32_t dsp_act_rx_su[DSP_ACT_RX_DBG_SIZE];
+
+	/* Number of MU group selection feedbacks */
+	uint32_t dsp_act_rx_mu_grp_sel[DSP_ACT_RX_DBG_SIZE];
+
+	/* Number of MU precoding feedbacks */
+	uint32_t dsp_act_rx_mu_prec[DSP_ACT_RX_DBG_SIZE];
+
+	/* Number of bad feedbacks, i.e. those that are not met SU nor MU criteria */
+	uint32_t dsp_act_rx_bad[DSP_ACT_RX_DBG_SIZE];
+
+	/*
+	 * Number of feedbacks that were not places into the cache due to any reason. Counters for two reasons
+	 * are just below
+	 */
+	uint32_t dsp_act_rx_mu_drop[DSP_ACT_RX_DBG_SIZE];
+
+	/* The number of MU feedback not placed into the cache as the previous one has not been exprired */
+	uint32_t dsp_act_rx_mu_nexp[DSP_ACT_RX_DBG_SIZE];
+
+	/* The number of MU feedback not placed into the cache due to cache is locked */
+	uint32_t dsp_act_rx_mu_lock_cache[DSP_ACT_RX_DBG_SIZE];
+
+	/*
+	 * The number of precoding feedback was released unused, i.e. not participated in QMat calculation.
+	 * It means the buddy feedback either have not been received or received after cache expiration time
+	 */
+	uint32_t dsp_act_rx_mu_rel_nuse[DSP_ACT_RX_DBG_SIZE];
+
+	/* The number of MU feedback for which dsp_qmat_check_act_len is failed */
+	uint32_t dsp_act_rx_inval_len[DSP_ACT_RX_DBG_SIZE];
+
+	uint32_t dsp_del_mu_node_rx;
+	uint32_t dsp_ipc_in;
+	uint32_t dsp_ipc_out;
+	uint32_t dsp_sleep_in;
+	uint32_t dsp_sleep_out;
+	uint32_t dsp_act_tx;
+	uint32_t dsp_ndp_discarded;
+	uint32_t dsp_ndp_inv_len;
+	uint32_t dsp_ndp_max_len;
+	uint32_t dsp_ndp_inv_bw;
+	uint32_t dsp_act_free_tx;
+	uint32_t dsp_inst_mu_grp_tx;
+	uint32_t dsp_qmat_invalid;
+	uint32_t dsp_su_feedback_proc_time;
+/* Number of QMat currently installed */
+	int32_t dsp_sram_qmat_num;
+/*
+ * Number of times dsp_sram_qmat_num becomes negative. Non zero value signals that the number
+ * of QMat de-installation is more than the number of installations. This is an error condition but not a critical one
+ */
+	uint32_t dsp_err_neg_qmat_num;
+	uint32_t dsp_flag;
+	/* Interrupts */
+	uint32_t dsp_ipc_int;
+	uint32_t dsp_timer_int;
+	uint32_t dsp_timer1_int;
+	uint32_t dsp_last_int;
+
+	uint32_t dsp_exc;
+	/* registers */
+	uint32_t dsp_status32;
+	uint32_t dsp_status32_l1;
+	uint32_t dsp_status32_l2;
+	uint32_t dsp_ilink1;
+	uint32_t dsp_ilink2;
+	uint32_t dsp_blink;
+	uint32_t dsp_sp;
+	uint32_t dsp_time;
+
+	uint32_t dsp_point;
+	uint32_t dsp_stat_bad_stack;
+
+	int16_t dspmu_D_user1[4];
+	int16_t dspmu_D_user2[4];
+	int16_t dspmu_max_intf_user1;
+	int16_t dspmu_max_intf_user2;
+	int16_t rank_criteria;
+	int16_t pad;
+	uint32_t dsp_trig_mu_grp_sel;
+	uint32_t dsp_mu_rank_success;
+	uint32_t dsp_mu_rank_fail;
+
+	/* The number of failed group installations */
+	uint32_t dsp_mu_grp_inst_fail;
+
+	/* Per-MU group DSP stats */
+	/* The number of successful group installations */
+	uint32_t dsp_mu_grp_inst_success[QTN_MU_QMAT_MAX_SLOTS];
+	/* The number of successful QMat installations */
+	uint32_t dsp_mu_grp_update_success[QTN_MU_QMAT_MAX_SLOTS];
+	/* The number of failed QMat installations */
+	uint32_t dsp_mu_grp_update_fail[QTN_MU_QMAT_MAX_SLOTS];
+	/* Group's AID 0 */
+	uint32_t dsp_mu_grp_aid0[QTN_MU_QMAT_MAX_SLOTS];
+	/* Group's AID 1 */
+	uint32_t dsp_mu_grp_aid1[QTN_MU_QMAT_MAX_SLOTS];
+	/* Group's rank */
+	int32_t dsp_mu_grp_rank[QTN_MU_QMAT_MAX_SLOTS];
+
+	/*
+	 * Distribution (histogram) of MU QMat copying time
+	 0:  0- 3us
+	 1:  4- 7us
+	 ...............
+	 3: 12+ us
+	 */
+#define DSP_MU_QMAT_COPY_TIME_HIST_WIDTH_US	4
+	uint32_t dsp_mu_qmat_qmem_copy_time_hist[4];
+	uint32_t dsp_mu_qmat_qmem_copy_time_max;
+
+	/*
+	 * Distribution (histogram) of MU QMat calculation and installation time
+	 0:  0- 3ms
+	 1:  4- 7ms
+	 ...............
+	 3: 12+ ms
+	 */
+#define DSP_MU_QMAT_INST_TIME_HIST_WIDTH_MS	6
+	uint32_t dsp_mu_qmat_inst_time_hist[8];
+	uint32_t dsp_mu_qmat_inst_time_max;
+
+	uint32_t dsp_mu_grp_inv_act;
+	uint32_t dsp_act_cache_expired[2];
+	uint32_t dsp_mu_grp_upd_done;
+	uint32_t dsp_mu_node_del;
+
+	uint32_t dsp_mimo_ctrl_fail;
+	uint32_t dsp_mu_fb_80mhz;
+	uint32_t dsp_mu_fb_40mhz;
+	uint32_t dsp_mu_fb_20mhz;
+	uint32_t dsp_mu_drop_20mhz;
+};
+#endif
+
+
+#endif	/* _DSP_STATS_H_ */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/emac_debug.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/emac_debug.h
new file mode 100644
index 0000000..6de7aee
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/emac_debug.h
@@ -0,0 +1,31 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef _QTN_EMAC_DEBUG_H
+#define _QTN_EMAC_DEBUG_H
+
+#include <qtn/skb_recycle.h>
+
+uint32_t qtn_eth_rx_lost_get(struct net_device *dev);
+
+#endif	// _QTN_EMAC_DEBUG_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/hardware_revision.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/hardware_revision.h
new file mode 100644
index 0000000..d754fbd
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/hardware_revision.h
@@ -0,0 +1,74 @@
+/*
+ * (C) Copyright 2011 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __RUBY_VERSION_H
+#define __RUBY_VERSION_H
+
+#ifndef __ASSEMBLY__
+
+#include "mproc_sync_base.h"
+
+RUBY_INLINE int get_hardware_revision(void)
+{
+	volatile struct shared_params* sp = qtn_mproc_sync_shared_params_get();
+	if (sp) {
+		return sp->hardware_revision;
+	} else {
+		return HARDWARE_REVISION_UNKNOWN;
+	}
+}
+
+#ifdef __KERNEL__
+RUBY_INLINE int _read_hardware_revision(void)
+{
+	int ret = HARDWARE_REVISION_UNKNOWN;
+	uint32_t board_rev = readl(RUBY_SYS_CTL_CSR);
+
+	if ((board_rev & CHIP_ID_MASK) == CHIP_ID_RUBY) {
+		uint32_t spare1 = readl(RUBY_QT3_BB_TD_SPARE_1);
+		if ((spare1 & CHIP_REV_ID_MASK)  == REV_ID_RUBY_A) {
+			ret = HARDWARE_REVISION_RUBY_A;
+		} else if ((spare1 & CHIP_REV_ID_MASK) == REV_ID_RUBY_B) {
+			ret = HARDWARE_REVISION_RUBY_B;
+		} else if ((spare1 & CHIP_REV_ID_MASK) == REV_ID_RUBY_D){
+			ret = HARDWARE_REVISION_RUBY_D;
+		}
+	} else if ((board_rev & CHIP_ID_MASK) == CHIP_ID_TOPAZ) {
+		switch (board_rev & CHIP_REV_ID_MASK) {
+			case REV_ID_TOPAZ_A:
+				ret = HARDWARE_REVISION_TOPAZ_A;
+				break;
+			case REV_ID_TOPAZ_B:
+				ret = HARDWARE_REVISION_TOPAZ_B;
+				break;
+			case REV_ID_TOPAZ_A2:
+				ret = HARDWARE_REVISION_TOPAZ_A2;
+				break;
+		}
+	}
+	return ret;
+}
+#endif //__KERNEL__
+
+#endif	// __ASSEMBLY__
+#endif	// __RUBY_VERSION_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/iputil.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/iputil.h
new file mode 100644
index 0000000..42791c7
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/iputil.h
@@ -0,0 +1,392 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2012 Quantenna Communications. Inc.                 **
+**                            All Rights Reserved                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef _IPUTIL_H_
+#define _IPUTIL_H_
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <asm/unaligned.h>
+#endif
+
+#include <linux/inetdevice.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net80211/if_ethersubr.h>
+#if defined(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <linux/in6.h>
+#include <linux/inet.h>
+#include <net/addrconf.h>
+#endif
+
+#define IPUTIL_HDR_VER_4	4
+#define IPUTIL_HDR_VER_6	6
+
+#define IPUTIL_V4_ADDR_SSDP		htonl(0xEFFFFFFA) /* 239.255.255.250 */
+#define IPUTIL_V4_ADDR_MULTICAST(_addr)	\
+	((_addr & htonl(0xF0000000)) == htonl(0xE0000000)) /* 224.0.0.0/4 */
+#define IPUTIL_V6_ADDR_MULTICAST(_addr)	\
+	((_addr & htonl(0xFF000000)) == htonl(0xFF000000)) /* ff00::/8 - see __ipv6_addr_type() */
+#define IPUTIL_V4_ADDR_LNCB(_addr)	\
+	((_addr & htonl(0xFFFFFF00)) == htonl(0xE0000000)) /* 224.0.0.0/24 */
+#define IPUTIL_V6_ADDR_LNCB(_addr)     \
+	((_addr & htonl(0xFF0F0000)) == htonl(0xFF020000)) /* ffx2::/8 - see __ipv6_addr_type() */
+
+#define IPUTIL_V4_FRAG_OFFSET(_fh)	(ntohs(_fh->frag_off) & ~0x7)
+#define IPUTIL_V4_FRAG_MF(_fh)		(ntohs(_fh->frag_off) & IP6_MF)
+
+#define IPUTIL_V6_FRAG_OFFSET(_fh)	(ntohs(_fh->frag_off) & ~0x7)
+#define IPUTIL_V6_FRAG_MF(_fh)		(ntohs(_fh->frag_off) & IP6_MF)
+
+#define NIPV6OCTA_FMT "%pI6"
+#define NIPV6OCTA(_ipv6_addr_) _ipv6_addr_
+
+#define IPUTIL_V4_ADDR_LEN 4
+
+#ifdef CONFIG_IPV6
+int iputil_v6_skip_exthdr(const struct ipv6hdr *ipv6h, int start, uint8_t *nexthdrp,
+				int total_len, __be32 *ip_id, uint8_t *more_frags);
+int iputil_v6_ntop(char *buf, const struct in6_addr *addr);
+int iputil_v6_ntop_port(char *buf, const struct in6_addr *addr, __be16 port);
+int iputil_eth_is_v6_mld(void *iphdr, uint32_t data_len);
+
+int iputil_ipv6_is_neigh_msg(struct ipv6hdr *ipv6, struct icmp6hdr *icmpv6);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+int iputil_ipv6_is_neigh_sol_msg(uint8_t dup_addr_detect,
+				const struct in6_addr *target,
+				const struct in6_addr *daddr);
+#else
+int iputil_ipv6_is_neigh_sol_msg(uint8_t dad, struct nd_msg *msg, struct ipv6hdr *ipv6);
+#endif
+#endif
+
+int iputil_v4_pton(const char *ip_str, __be32 *ipaddr);
+int iputil_v4_ntop_port(char *buf, __be32 addr, __be16 port);
+
+/*
+ * IPv6 broadcasts are scoped multicast.
+ * +--------+----+----+---------------------------------------------+
+ * | 8      | 4  | 4  |                 112 bits                    |
+ * +------ -+----+----+---------------------------------------------+
+ * |11111111|flgs|scop|                 group ID                    |
+ * +--------+----+----+---------------------------------------------+
+ *
+ *  Scope:
+ *  1: Interface-Local (loopback)
+ *  2: Link-Local
+ *  4: Admin-Local
+ *  5: Site-Local
+ *  8: Organization-Local
+ *  E: Global
+ *  0,3,F: reserved
+ *  others: unassigned, are available for administrators to define additional multicast regions.
+ *
+ *  RFC4291 http://www.iana.org/assignments/ipv6-multicast-addresses/ipv6-multicast-addresses.xml
+ */
+#ifdef CONFIG_IPV6
+static inline int iputil_mac_is_v6_local(const struct ipv6hdr *ipv6h)
+{
+	const struct in6_addr *ipaddr = &ipv6h->daddr;
+
+	return ((ipaddr->in6_u.u6_addr8[0] == 0xff) &&
+		(ipaddr->in6_u.u6_addr8[1] > 0x01) &&
+		(ipaddr->in6_u.u6_addr8[1] < 0x0E));
+}
+#endif
+
+static inline int iputil_is_v4_ssdp(const void *addr, const void *iph)
+{
+	static const char ssdp_addr[] = {0x01, 0x00, 0x5E, 0x7F, 0xFF, 0xFA};
+
+	if (unlikely(!memcmp(addr, ssdp_addr, sizeof(ssdp_addr)))) {
+		const struct iphdr *ipv4h = iph;
+		uint32_t daddr = get_unaligned((uint32_t *)&ipv4h->daddr);
+
+		if (daddr == IPUTIL_V4_ADDR_SSDP) {
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_IPV6
+/*
+ * IPv6 SSDP is 0xff0?::c
+ */
+static inline int iputil_is_v6_ssdp(const unsigned char *dest, const struct ipv6hdr *ipv6h)
+{
+	static const uint8_t ssdp6_addr[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00, 0x00, 0x0c};
+	const struct in6_addr *ipaddr = &ipv6h->daddr;
+
+	return ((memcmp(dest, &ssdp6_addr, sizeof(ssdp6_addr)) == 0) &&
+		((__constant_ntohl(ipaddr->in6_u.u6_addr32[0]) & 0xfff0ffff) == 0xff000000) &&
+		(ipaddr->in6_u.u6_addr32[1] == 0) && (ipaddr->in6_u.u6_addr32[2] == 0) &&
+		(ipaddr->in6_u.u6_addr32[3] == __constant_htonl(0xc)));
+}
+#endif
+
+static inline int iputil_is_ssdp(const void *addr, const void *iph)
+{
+	if (iputil_is_v4_ssdp(addr, iph)) {
+		return 1;
+	}
+
+#ifdef CONFIG_IPV6
+	if (unlikely(iputil_is_v6_ssdp(addr, iph))) {
+		return 1;
+	}
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_IPV6
+static inline
+void iputil_in6_addr_copy(struct in6_addr *dst, const struct in6_addr *src)
+{
+	memcpy(dst, src, sizeof(*dst));
+}
+
+/*
+ * IPv6 all-nodes multicast address
+ * the link-local scope address to reach all nodes is 0xff02::1
+ */
+static inline int iputil_ipv6_is_ll_all_nodes_mc(const unsigned char *dest, void *iph)
+{
+	struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
+	static const uint8_t ll_all_nodes_mac_addr[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00, 0x00, 0x01};
+	struct in6_addr *ipaddr = &ipv6h->daddr;
+
+	return ((memcmp(dest, ll_all_nodes_mac_addr, sizeof(ll_all_nodes_mac_addr)) == 0) &&
+		(__constant_ntohl(ipaddr->in6_u.u6_addr32[0]) == 0xff020000) &&
+		(ipaddr->in6_u.u6_addr32[1] == 0) && (ipaddr->in6_u.u6_addr32[2] == 0) &&
+		(ipaddr->in6_u.u6_addr32[3] == __constant_htonl(0x1)));
+}
+#endif
+
+/* Check for a local network control block MAC address */
+static inline int iputil_is_lncb(const uint8_t *addr, const void *iph)
+{
+	static const char lncb_addr[] = {0x01, 0x00, 0x5E, 0x00, 0x00};
+
+	if (unlikely(!memcmp(addr, lncb_addr, sizeof(lncb_addr)))) {
+		const struct iphdr *ipv4h = iph;
+		uint32_t daddr = get_unaligned((uint32_t *)&ipv4h->daddr);
+
+		return IPUTIL_V4_ADDR_LNCB(daddr);
+	}
+
+#ifdef CONFIG_IPV6
+	{
+		static const char ipmc6_addr[] = {0x33, 0x33};
+
+		if (unlikely(!memcmp(addr, ipmc6_addr, sizeof(ipmc6_addr)))) {
+			const struct ipv6hdr *ipv6h = iph;
+			uint32_t daddr = get_unaligned((uint32_t *)ipv6h->daddr.s6_addr32);
+
+			return IPUTIL_V6_ADDR_LNCB(daddr);
+		}
+	}
+#endif
+
+	return 0;
+}
+
+static inline int iputil_is_multicast(void *iph)
+{
+	const struct iphdr *ipv4h = iph;
+
+	if (ipv4h->version == 4) {
+		uint32_t daddr = get_unaligned((uint32_t *)&ipv4h->daddr);
+
+		return IPUTIL_V4_ADDR_MULTICAST(daddr);
+	}
+
+#ifdef CONFIG_IPV6
+	if (ipv4h->version == 6) {
+		struct ipv6hdr *ipv6h = iph;
+		__be32 daddr = get_unaligned(ipv6h->daddr.s6_addr32);
+
+		return IPUTIL_V6_ADDR_MULTICAST(daddr);
+	}
+#endif
+	return 0;
+}
+
+static inline size_t iputil_hdrlen(void *iph, uint32_t data_len)
+{
+	const struct iphdr *ipv4h = iph;
+#ifdef CONFIG_IPV6
+	const struct ipv6hdr *ip6hdr_p = iph;
+	uint8_t nexthdr;
+	int nhdr_off;
+#endif
+
+	if (likely(ipv4h->version == 4)) {
+		return (ipv4h->ihl << 2);
+	}
+
+#ifdef CONFIG_IPV6
+	/*
+	 * This is the base IPv6 header. If the next header is an option header, its length must be
+	 * accounted for explicitly elsewhere.
+	 */
+	if (ipv4h->version == 6) {
+		nhdr_off = iputil_v6_skip_exthdr(ip6hdr_p,
+			sizeof(struct ipv6hdr),
+			&nexthdr, data_len, NULL, NULL);
+		return nhdr_off;
+	}
+#endif
+	return 0;
+}
+
+static inline int iputil_mac_is_v6_multicast(const uint8_t *mac)
+{
+	const char ipmc6_addr[] = {0x33, 0x33};
+
+	return mac[0] == ipmc6_addr[0] &&
+		mac[1] == ipmc6_addr[1];
+}
+
+static inline int iputil_mac_is_v4_multicast(const uint8_t *mac)
+{
+	const char ipmc4_addr[] = {0x01, 0x00, 0x5E};
+
+	return mac[0] == ipmc4_addr[0] &&
+		mac[1] == ipmc4_addr[1] &&
+		mac[2] == ipmc4_addr[2];
+}
+
+static inline int iputil_eth_is_type(const struct ether_header *eh, const uint16_t ether_type)
+{
+	if (eh->ether_type == __constant_htons(ETH_P_8021Q)) {
+		return (*(&eh->ether_type + 2) == ether_type);
+	}
+
+	return (eh->ether_type == ether_type);
+}
+
+static inline int iputil_eth_is_v6_multicast(const struct ether_header *eh)
+{
+
+	return iputil_eth_is_type(eh, __constant_htons(ETH_P_IPV6)) &&
+		iputil_mac_is_v6_multicast(eh->ether_dhost);
+}
+
+static inline int iputil_eth_is_v4_multicast(const struct ether_header *eh)
+{
+	return iputil_eth_is_type(eh, __constant_htons(ETH_P_IP)) &&
+		iputil_mac_is_v4_multicast(eh->ether_dhost);
+}
+
+static inline int iputil_eth_is_multicast(const struct ether_header *eh)
+{
+	if (iputil_eth_is_v4_multicast(eh)) {
+		return 1;
+	}
+
+#ifdef CONFIG_IPV6
+	if (iputil_eth_is_v6_multicast(eh)) {
+		return 1;
+	}
+#endif
+	return 0;
+}
+
+static inline int iputil_eth_is_ipv4or6(uint16_t ether_type)
+{
+	return ether_type == __constant_htons(ETH_P_IP) ||
+		ether_type == __constant_htons(ETH_P_IPV6);
+}
+
+/* Multicast data traffic, with the most common types of non-streaming mc filtered out */
+static inline int iputil_is_mc_data(const struct ether_header *eh, void *iph)
+{
+	return iputil_eth_is_multicast(eh) &&
+		!iputil_is_lncb(eh->ether_dhost, iph) &&
+		!iputil_is_ssdp(eh->ether_dhost, iph);
+}
+
+uint8_t iputil_proto_info(void *iph, void *data,
+	void **proto_data, uint32_t *ip_id, uint8_t *more_frags);
+
+static inline struct igmphdr *iputil_igmp_hdr(struct iphdr *p_iphdr)
+{
+	return (struct igmphdr *)((unsigned int*)p_iphdr + p_iphdr->ihl);
+}
+
+struct dhcp_message {
+	uint8_t op;
+	uint8_t htype;
+	uint8_t hlen;
+	uint8_t hops;
+	uint32_t xid;
+	uint16_t secs;
+	uint16_t flags;
+	uint32_t ciaddr;
+	uint32_t yiaddr;
+	uint32_t siaddr;
+	uint32_t giaddr;
+	uint8_t chaddr[16];
+	uint8_t sname[64];
+	uint8_t file[128];
+	uint32_t cookie;
+	uint8_t options[0];
+}__attribute__ ((packed));
+
+#define DHCPSERVER_PORT		67
+#define DHCPCLIENT_PORT		68
+
+#define DHCPV6SERVER_PORT	547
+#define DHCPV6CLIENT_PORT	546
+
+#define BOOTREQUEST		1
+#define DHCPREQUEST		3
+#define ARPHRD_ETHER		1
+#define DHCP_BROADCAST_FLAG	0x8000
+
+#endif
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/lhost_muc_comm.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/lhost_muc_comm.h
new file mode 100644
index 0000000..4268b53
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/lhost_muc_comm.h
@@ -0,0 +1,1129 @@
+/*
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ */
+
+/*
+ * This file contains host definitions which are common between the
+ * host driver and the microcontroller/MAC code.
+ */
+#ifndef _LHOST_MUC_COMM_H
+#define _LHOST_MUC_COMM_H
+
+#include "qtn_uc_comm.h"
+#include "qtn_cca.h"
+#include "qtn_wmm_ac.h"
+#include "net80211/ieee80211.h"
+#include "net80211/ieee80211_crypto.h"
+#include "muc_txrx_stats.h"
+#include "qtn/qvsp_common.h"
+#include "qtn/shared_defs.h"
+#include "qtn/txbf_common.h"
+
+/* packed definitions for each compiler */
+#if defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD)
+# define PACKED	__packed
+# define LM(a,b)	(b)
+# define lhost_volatile
+# define muc_volatile	volatile
+#else
+# define PACKED __attribute__ ((packed))
+# define LM(a,b)	(a)
+# define lhost_volatile	volatile
+# define muc_volatile
+#endif // #if defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD)
+
+#define HOST_TXD_NUMSEG		2
+
+#define QTN_BBIC_11N		0x30
+#define QTN_BBIC_11AC		0x40
+
+#define QTN_VSP_STATS_TID_NUM	4
+
+#define QTN_VSP_TIDS	{ 6, 5, 0, 1 }
+#define QTN_VSP_STATS_TID2IDX	{0, 1, -1, -1, -1, 2, 3, -1}	/* make sure no tids using same index */
+
+struct qtn_vsp_per_node_stats {
+	struct qtn_per_tid_stats per_tid_stats[QTN_VSP_STATS_TID_NUM];
+};
+
+struct qtn_vsp_stats {
+#define QVSP_FAT_MAX		1000
+#define QVSP_FAT_NONE		((uint32_t)(-1))
+	uint32_t	fat;		/* free airtime */
+	uint32_t	intf_ms;	/* interference */
+#if TOPAZ_QTM
+	struct qtn_vsp_per_node_stats per_node_stats[QTN_NCIDX_MAX];
+#endif
+};
+
+/**
+ * \brief This enumeration represents the mode in use on the device.
+ *
+ * This enumeration is used to set the correct bandwidth.
+ */
+
+enum {
+        QTN_11NAC_DISABLE  = 0,
+        QTN_11NAC_ENABLE = 1,
+};
+
+/* Host tx descriptor */
+struct host_txdesc {
+	uint32_t	hd_version:8;	/* Descriptor version */
+	uint32_t	hd_tid:4;	/* packet tid */
+	uint32_t	hd_txstatus:2;	/* Transmit status: 1 sent to MuC, 2 tx success */
+#define QTN_TXSTATUS_TX_ON_MUC 1
+#define QTN_TXSTATUS_TX_SUCCESS 2
+	uint32_t	hd_wmmac:2;	/* Reserved for WMM AC*/
+	uint32_t	hd_pktlen:16;	/* Pkt len (incl. all headers) */
+	uint32_t	hd_node_idx;	/* local node index */
+	uint16_t	hd_seglen[HOST_TXD_NUMSEG];	/* Segment lenghts */
+	uint32_t	hd_segaddr[HOST_TXD_NUMSEG];	/* Phys addr of each seg */
+	uint32_t	hd_ts;		/* Timestamp of the pkt */
+	uint32_t	hd_nextpa;	/* Phys addr of next host tx descr in fwd dir */
+	uint32_t	hd_nextpa_rev;	/* Phys addr of next host tx descr in rev dir */
+	void		*hd_nextva_rev;	/* Virtual addr (LHOST view) of next host tx descr in rev dir */
+	uint32_t	hd_pa;		/* Physical addr of this host tx descr */
+	void		*hd_va;		/* Virtual addr (LHOST view) of this host tx descr */
+	uint32_t		hd_status;	/* Status of HTxD */
+	void		(*hd_muc_txdone_cb)(void *, uint32_t, uint32_t); /* MuC callback after txdone */
+	uint32_t	hd_muc_cb_arg1; /* parameter for hd_muc_txdone_cb */
+	uint32_t	hd_muc_cb_arg2; /* parameter for hd_muc_txdone_cb */
+	uint32_t	hd_txtsf;	/* record the tsf_lo on that frame was sent successfully */
+	uint8_t		hd_mpdu[128];
+	uint8_t		hd_msdu[128];
+	uint8_t		hd_dma[128];
+#define	HTXD_FLAG_AMSDU_DEST_CAPABLE	0x00000002	/* Can be used for AMSDU destination (append to) */
+#define	HTXD_FLAG_AMSDU_SRC_CAPABLE	0x00000004	/* Can be used for AMSDU (copy from) */
+#define HTXD_FLAG_NO_UPDATE_NAV		0x00000008	/* Don't update NAV for this frame */
+#define HTXD_FLAG_NO_RETRY		0x00000010	/* Don't retry this frame if tx failed */
+#define HTXD_FLAG_NO_RETURN		0x00000020	/* Don't return txdesc from MuC to lhost */
+#define HTXD_FLAG_IMM_RETURN		0x00000040	/* Immediately return txdesc from Muc to lhost */
+	uint32_t	hd_flags;
+};
+
+#define QTN_AMSDU_DEST_CAPABLE_SIZE		ETHER_MAX_LEN
+#define QTN_AMSDU_DEST_CAPABLE_GUARD_SIZE	64
+#define QTN_AMSDU_SRC_FRAME_SIZE		(QTN_AMSDU_DEST_CAPABLE_SIZE / 10)
+#define QTN_AMSDU_DEST_CAPABLE_OCCUPY_SIZE	(QTN_AMSDU_DEST_CAPABLE_SIZE / 3 * 2)
+
+#define HTXD_FLAG_SET(_htxd, _flag) \
+	(((struct host_txdesc *)(_htxd))->hd_flags |= (_flag))
+#define	HTXD_FLAG_CLR(_htxd, _flag) \
+	(((struct host_txdesc *)(_htxd))->hd_flags &= ~(_flag))
+#define	HTXD_FLAG_GET(_htxd, _flag) \
+	(((struct host_txdesc *)(_htxd))->hd_flags & (_flag))
+#define	HTXD_FLAG_ISSET(_htxd, _flag) \
+	(!!(((struct host_txdesc *)(_htxd))->hd_flags & (_flag)))
+#define HTXD_FLAG_KEEP_ONLY(_htxd, _flag) \
+	(((struct host_txdesc *)(_htxd))->hd_flags &= (_flag))
+
+
+/* host_ioctl_hifinfo */
+
+#define NAMESIZE		16
+#define VERSION_SIZE		16
+#define MAC_ADDR_LEN		6
+#define MAC_STR_BUF_SIZE	18
+
+#define	HOST_NUM_IOCTLQ		1	/* Number of ioctl q's */
+/*
+ * LHost -> MuC TX queues are per node to allow variable backpressure per node.
+ * One universal management data frame tx mailbox, and one ioctl mailbox
+ */
+#define HOST_NUM_MGMTQ			1
+#define	HOST_NUM_DATAQ			(QTN_NCIDX_MAX)
+#define HOST_NUM_DATASEM		1
+
+#define HOST_IOCTL_INDEX_BASE		0
+#define HOST_MGMT_INDEX_BASE		(HOST_IOCTL_INDEX_BASE + HOST_NUM_IOCTLQ)
+#define HOST_DATA_INDEX_BASE		(HOST_MGMT_INDEX_BASE + HOST_NUM_MGMTQ)
+#define	HOST_NUM_HOSTIFQ		(HOST_NUM_DATAQ + HOST_NUM_IOCTLQ + HOST_NUM_MGMTQ)
+#define HOST_MBOX_SIZE			(sizeof(uint32_t) * HOST_NUM_HOSTIFQ)
+
+#define QTN_PHY_RATE_PROP_SCALE		1024
+#define QTN_MUC_NODE_PKT_LIMIT_MIN	16
+#define QTN_MUC_NODE_PKT_LIMIT_DEFAULT	64
+#define QTN_MUC_NODE_PKT_LIMIT_MAX	128
+
+#define IEEE80211_TXPOW_ENCODE(x)	((255 * 65536) + (x * 256) + 1)
+#define IEEE80211_TXPOW_DECODE(x)	(((x) - (255 * 65536) - 1) / 256)
+#define RF_MIXER_VAL_HI		0x1
+#define RF_MIXER_VAL_LO		0x7
+#define RF_PGA_VAL_HI		0x3
+#define RF_PGA_VAL_LO		0x0
+#define IEEE80211_LOWGAIN_TXPOW_MAX	10
+#define IEEE80211_LOWGAIN_TXPOW_MIN	9
+
+#define IEEE80211_CHAN_SEC_SHIFT	4
+#define IEEE80211_24G_CHAN_SEC_SHIFT	1
+
+struct host_ioctl_hifinfo {
+	uint32_t	hi_mboxstart;			/* Start address for mbox */
+	uint32_t	hi_rxdoneirq;			/* IRQ map for rx done */
+	uint32_t	hi_txdoneirq;			/* IRQ map for tx done */
+	uint32_t	hi_rxfifo;			/* Rx FIFO location */
+	uint32_t	hi_scanirq;			/* IRQ map for Scan */
+	uint32_t	hi_scanfifo;			/* Scan FIFO location */
+	uint32_t	hi_dspgpios;
+	uint32_t	hi_vsp_stats_phys;
+	uint32_t	hi_vapnode_idx;			/* node_idx of the vap node for tx */
+	uint8_t		hi_vapid;
+	char		hi_name[NAMESIZE];		/* Device name */
+	char		hi_version[VERSION_SIZE];	/* basic firmware version */
+	char		hi_algover[VERSION_SIZE];	/* calibration algorithm version */
+	uint8_t		hi_macaddr[MAC_ADDR_LEN];
+	uint8_t		hi_semmap[HOST_NUM_HOSTIFQ];	/* Mapping of semaphores */
+};
+
+typedef int (*scan_done_fn)(int sc_devid, void *chan, int type, int status);
+
+struct host_scandesc {
+	uint8_t	sd_type;
+	uint8_t	sd_devid;
+	uint8_t	status;
+	uint8_t	____pad;
+	uint8_t*	sd_data;
+	scan_done_fn *sd_ppfn;
+	struct host_scandesc *sd_next;
+};
+
+struct host_rxdesc {
+	uint8_t			hw_desc[128]; /* need to be aligned on 8 bytes */
+	uint8_t			*skbuff;
+	uint8_t			*rd_buffer;
+	uint32_t		rs_statword;
+	struct host_rxdesc	*rd_next;
+	struct host_rxdesc	*rd_pa;
+	struct host_rxdesc	*rd_va;
+	void			*node;		/* Where the frame was from */
+	uint8_t			gain_db;
+};
+
+struct host_descfifo {
+	struct host_rxdesc	*df_fifo;		/* Pointer to first descriptor in linked list */
+	volatile uint32_t	df_numelems;		/* Num elems on fifo */
+	volatile uint32_t	df_size;		/* Size of fifo */
+	struct host_rxdesc * volatile hrdstart; /* the ptr to the host_rxdesc linked list  ready for indication */
+};
+
+struct host_scanfifo {
+	uint32_t	sf_req;	/* Pointer to request mailbox */
+	uint32_t	sf_res;	/* Pointer to result mailbox */
+	uint8_t	sf_sem;	/* Semaphore for Scan fifo */
+	uint8_t	tx_sem; /* Semaphore for Scan fifo */
+	uint8_t	____pad[2];
+};
+
+struct host_rxfifo {
+	struct host_descfifo *rf_fifo;	/* Data Descriptor fifo */
+	uint8_t	rf_sem;		/* Semaphore for rx fifo */
+	uint8_t	____pad[3];
+};
+
+struct host_ndp_mesg {
+	uint8_t	macaddr_ta[6];
+	uint8_t	bw;
+	uint8_t	rxgain;
+	uint8_t	mcs;
+	uint8_t	____pad[3];
+};
+
+
+struct host_ioctl {
+	lhost_volatile uint32_t	ioctl_dev;	/* Device to run IOCTL on */
+	lhost_volatile uint32_t	ioctl_command;	/* Command type */
+	lhost_volatile uint32_t	ioctl_arg1;	/* Single valued arg 1 */
+	lhost_volatile uint32_t	ioctl_arg2;	/* Single valued arg 2 */
+	volatile uint32_t		ioctl_argp;	/* Argument payload pointer */
+	volatile uint32_t		ioctl_status;	/* Status from other side */
+	volatile uint32_t		ioctl_rc;	/* Command return code */
+	lhost_volatile struct host_ioctl *ioctl_next;	/* link to next msg in chain */
+};
+
+struct qtn_vap_args {
+	char vap_name[17];
+	uint8_t vap_id;
+	uint8_t vap_macaddr[IEEE80211_ADDR_LEN];
+};
+
+struct qtn_setparams_args
+{
+	int	ni_param;
+	int	ni_value;
+	int	ni_len;
+	unsigned char ni_data[64];
+};
+
+struct qtn_baparams_args {
+	unsigned char ni_addr[8];
+	enum ieee80211_ba_state	state;
+	int	tid;
+	int	type;
+	int	start_seq_num;
+	int	window_size;
+	int	lifetime;
+	uint16_t flags;
+};
+
+#define QTN_HLINK_RC_DONE		0x00000001
+#define QTN_HLINK_RC_ERR		0x00000002
+#define QTN_HLINK_STATUS_AVAIL		1
+
+#define	IOCTL_DEV_VAPCREATE		4	/* Create a vap */
+#define IOCTL_DEV_DEVOPEN		5	/* Bring the device up */
+#define IOCTL_DEV_BEACON_START		6	/* Start Beacon */
+#define	IOCTL_DEV_NEWASSOC		7	/* New associated node */
+#define	IOCTL_DEV_NEWBSSID		8	/* New associated node */
+#define IOCTL_DEV_SEND_NDP_ANNOUNCEMENT	10	/* Send NDP announcement */
+#define IOCTL_DEV_SETPARAMS		11	/* Configure Parameters */
+#define IOCTL_DEV_GETPARAMS		12	/* Configure Parameters */
+#define IOCTL_DEV_BA_ADDED_TX		13
+#define IOCTL_DEV_BA_ADDED_RX		14
+#define IOCTL_DEV_BA_REMOVED_TX		15
+#define IOCTL_DEV_BA_REMOVED_RX		16
+#define	IOCTL_DEV_CHANGE_CHANNEL	17
+#define	IOCTL_DEV_SETKEY		18
+#define IOCTL_DEV_CALCMD		19	/* Send the cal cmd */
+#define	IOCTL_DEV_DELKEY		20
+#define	IOCTL_DEV_CMD			21	/* General commands */
+#define IOCTL_DEV_DISASSOC		22	/* Configure node */
+#define	IOCTL_DEV_SMPS			23	/* MIMO power save mode change */
+#define	IOCTL_DEV_FORCEMICERROR		24
+#define IOCTL_DEV_SET_SCANMODE		25
+#define IOCTL_DEV_XMITCTL		26	/* transmission control (turning on or off) */
+#define IOCTL_DEV_BEACON_STOP		27	/* Stop transmitting beacons */
+#define IOCTL_DEV_SET_MACADDR		30
+#define IOCTL_DEV_KILL_MUC		31
+#define IOCTL_DEV_DUMP_LOG		32
+#define IOCTL_DEV_SET_HRFLAGS		33
+#define IOCTL_DEV_SAMPLE_CHANNEL	34
+#define	IOCTL_DEV_CHANGE_CHAN_DEFERRED	35
+#define	IOCTL_DEV_WMM_PARAMS		36
+#define IOCTL_DEV_VAPDELETE		37	/* Delete a vap */
+#define IOCTL_DEV_STORE_TXPOW		38	/* Store the Tx power, short-range workaround*/
+#define IOCTL_DEV_USE_RTS_CTS		39	/* Enable-disable RTS-CTS */
+#define IOCTL_DEV_RST_QUEUE_DEPTH	40
+#define IOCTL_DEV_SET_POWER_SAVE	41	/* send request to MuC to change power save level */
+#define IOCTL_DEV_VSP			42	/* Configure QVSP */
+#define IOCTL_DEV_SET_11G_ERP           43      /* set 11bg ERP on/off */
+#define IOCTL_DEV_BGSCAN_CHANNEL	44
+#define IOCTL_DEV_SET_OCAC		46
+#define IOCTL_DEV_MEAS_CHANNEL		47	/* notify MUC to execute measurement */
+#define IOCTL_DEV_GET_LINK_MARGIN_INFO	48	/* get rssi info */
+#define	IOCTL_DEV_SET_TDLS_PARAM	49	/* set tdls related paramters */
+#define	IOCTL_DEV_GET_TDLS_PARAM	50	/* set tdls related paramters */
+#define	IOCTL_DEV_POWER_SAVE		51	/* enter/leave power save state */
+#define	IOCTL_DEV_REMAIN_CHANNEL	52	/* Remain on target channel */
+#define IOCTL_DEV_SCS_UPDATE_SCAN_STATS	53
+#define IOCTL_DEV_SET_SCANMODE_STA	54
+#define IOCTL_DEV_GET_MU_GRP		55	/* get MU groups other releated data */
+#define IOCTL_DEV_SET_RX_GAIN_PARAMS	56	/* Set RX gain params */
+#define IOCTL_DEV_GET_MU_ENABLE		57	/* get MU enable flag */
+#define IOCTL_DEV_GET_PRECODE_ENABLE	58	/* get MU precode enable flag */
+#define IOCTL_DEV_GET_MU_USE_EQ		59	/* get EQ enable flag */
+#define IOCTL_DEV_SET_CHAN_POWER_TABLE	60	/* Set MuC power table */
+#define	IOCTL_DEV_ENABLE_VLAN		61	/* Set Global Vlan mode */
+#define	IOCTL_DEV_NODE_UPDATE		62	/* Update node information again after association */
+#define IOCTL_DEV_AIRTIME_CONTROL       63      /* control node tx airtime accumulation start|stop */
+#define IOCTL_DEV_SUSPEND_OFF_CHANNEL   64      /* suspend/resume all off-channel mechanisms globally */
+#define IOCTL_DEV_MU_GROUP_UPDATE	65	/* Update MU groups: nodes and qmats */
+#define IOCTL_DEV_FLUSH_DATA		66	/* periodically flush data */
+#define IOCTL_DEV_GET_TX_MAXAMSDU	67	/* get the TX max msdu size */
+#define IOCTL_DEV_SAMPLE_CHANNEL_CANCEL	68	/* cancel ongoing off channel sampling, when it finished, MuC notifies LHost by irq */
+#define IOCTL_DEV_UPDATE_OCAC_STATE_IE	69	/* OCAC: Update OCAC State IE at MuC */
+
+#define IOCTL_DEV_CMD_MEMDBG_DUMP	1	/* Dump MuC memory */
+#define IOCTL_DEV_CMD_MEMDBG_DUMPCFG	2	/* Configuration for dumping MuC memory */
+#define IOCTL_DEV_CMD_MEMDBG_DUMPNODES	3	/* Configuration for dumping MuC nodes */
+#define IOCTL_DEV_CMD_SET_DRV_DBG	4	/* Set MUC debug message level*/
+#define IOCTL_DEV_CMD_GET_DRV_DBG	5	/* Get MUC debug message level*/
+#define IOCTL_DEV_CMD_RF_REG_DUMP	6	/* Dump Rfic6 write fegister */
+
+#define	IOCTL_DEVATTACH_DEVFLAG_MASK			0xFFFF0000
+#define	IOCTL_DEVATTACH_DEVFLAG_MASK_S			16
+#define	IOCTL_DEVATTACH_DEVID_MASK			0x000000FF
+#define	IOCTL_DEVATTACH_DEV_RFCHIP_FREQID_MASK		0x00000F00
+#define	IOCTL_DEVATTACH_DEV_RFCHIP_FREQID_MASK_S	8
+#define	IOCTL_DEVATTACH_DEV_RFCHIP_VERID_MASK		0x0000F000
+#define	IOCTL_DEVATTACH_DEV_RFCHIP_VERID_MASK_S		12
+#define	IOCTL_DEVATTACH_IRQNUM				0x000000FF
+#define	IOCTL_DEVATTACH_IRQREG				0x00000F00
+#define	IOCTL_DEVATTACH_IRQREG_S			8
+#define	IOCTL_DEVATTACH_NMBOX_MASK			0x000000FF
+
+#define QTN_CHAN_IEEE			(0xFF << 0)
+#define QTN_CHAN_IEEE_S			(0)
+#define QTN_CHAN_PWR			(0xFF << 8)
+#define QTN_CHAN_PWR_S			(8)
+
+#define QTNCHAN_TO_IEEENUM(chan)	(MS(chan, QTN_CHAN_IEEE))
+
+#define QTN_CHAN_FLG_DFS		0x20000000
+#define QTN_CHAN_FLG_HT40		0x40000000
+#define QTN_CHAN_FLG_PRI_HI		0x80000000
+#define QTN_CHAN_FLG_RSV01		0x01000000
+#define QTN_CHAN_FLG_RSV02		0x02000000
+#define QTN_CHAN_FLG_RSV04		0x04000000
+#define QTN_CHAN_FLG_RSV08		0x08000000
+#define QTN_CHAN_FLG_RSV10		0x10000000
+
+#define QTN_CHAN_FLG_VHT80		0x00800000
+
+#define QTN_BAND_FREQ			(0xFF << 0)
+#define QTN_BAND_FREQ_S			(0)
+
+#define	IOCTL_HLINK_DEVATTACH		1	/* Attach device */
+#define	IOCTL_HLINK_DEVDETACH		2	/* Detach device */
+#define	IOCTL_HLINK_DEVCHANGE		3	/* Change device state/flags */
+#define IOCTL_HLINK_LOGATTACH		4	/* Attach Log */
+#define IOCTL_HLINK_TEMP_ATTACH		5	/* Share temperature struct */
+#define IOCTL_HLINK_SVCERRATTACH	6	/* Attach svcerr */
+#define IOCTL_HLINK_RTNLEVENT		7	/* RTNL event */
+#define IOCTL_HLINK_NDP_FRAME		8	/* NDP frame */
+#define IOCTL_HLINK_FOPS_REQ		9	/* Recv File I/O req */
+#define IOCTL_HLINK_MIC_ERR		10	/* TKIP MIC failure detected */
+#define IOCTL_HLINK_BOOTED		11	/* MuC boot complete */
+#define IOCTL_HLINK_DROP_BA		12	/* drop BA */
+#define IOCTL_HLINK_DISASSOC_STA	13	/* disassociate station with a given aid */
+#define IOCTL_HLINK_RFIC_CAUSED_REBOOT  14      /* detected RFIC abnormal reset, reboot the system */
+#define IOCTL_HLINK_BA_ADD_START	15	/* start Tx ADDBA REQ sequence */
+#define IOCTL_HLINK_PEER_RTS		16	/* Peer RTS enable or disable */
+#define IOCTL_HLINK_DYN_WMM		17	/* Dynamic WMM enable or disable */
+#define IOCTL_HLINK_TDLS_EVENTS		18	/* TDLS Events from MuCfw */
+#define IOCTL_HLINK_RATE_TRAIN		19	/* Per-node rate training hash */
+#define IOCTL_HLINK_CSA_COMPLETE	20	/* channel switch count down complete */
+#define IOCTL_HLINK_OCAC_BACKOFF_DONE	21	/* OCAC: BACKOFF -> NONE transition done */
+#define IOCTL_HLINK_CCA_STATS		22	/* Getting CCA stats */
+
+enum {
+	BW_INVALID = 0,
+	BW_HT20 = 20,
+	BW_HT40 = 40,
+	BW_HT80 = 80,
+	BW_HT160 = 160
+};
+
+struct qtn_off_chan_info {
+	uint32_t	freq_band;
+	uint32_t	channel;
+	uint32_t	dwell_msecs;
+	uint32_t	muc_status;
+#define QTN_OFF_CHAN_FLAG_ACTIVE		0x0001
+#define QTN_OFF_CHAN_FLAG_PASSIVE_FAST		0x0002
+#define QTN_OFF_CHAN_FLAG_PASSIVE_NORMAL	0x0004
+#define QTN_OFF_CHAN_FLAG_PASSIVE_SLOW		0x0008
+#define QTN_OFF_CHAN_FLAG_PASSIVE_ONESHOT	0x0010
+#define QTN_OFF_CHAN_TURNOFF_RF			0x0020
+#define QTN_OFF_CHAN_FLAG_MASK			(QTN_OFF_CHAN_FLAG_ACTIVE |		\
+						QTN_OFF_CHAN_FLAG_PASSIVE_FAST |	\
+						QTN_OFF_CHAN_FLAG_PASSIVE_NORMAL |	\
+						QTN_OFF_CHAN_FLAG_PASSIVE_SLOW |	\
+						QTN_OFF_CHAN_FLAG_PASSIVE_ONESHOT)
+	uint16_t	flags;
+};
+
+/* Fixed bw command offset */
+#define QTN_BW_FIXED_BW		0x3
+#define QTN_BW_FIXED_BW_S	0
+#define QTN_BW_FIXED_EN		0x10
+#define QTN_BW_FIXED_EN_S	4
+
+struct qtn_csa_info {
+	uint64_t	req_tsf;		/* aim to change channels at this tsf */
+	uint64_t	switch_tsf;		/* tsf just after channel change completed */
+	uint32_t	pre_notification_tu;	/* pre-switch notification to lhost in TU */
+	uint32_t	post_notification_tu;	/* post channel change notification */
+	uint32_t	freq_band;		/* freqency band info */
+	uint32_t	channel;		/* channel to switch to */
+	uint8_t		sta_dfs_strict_mode;
+#define QTN_CSA_STATUS_MUC_SCHEDULED		0x00000001
+#define QTN_CSA_STATUS_MUC_ERROR_SCHED		0x00000010
+#define QTN_CSA_STATUS_MUC_PRE			0x00000002
+#define QTN_CSA_STATUS_MUC_SWITCHED		0x00000004
+#define QTN_CSA_STATUS_MUC_POST			0x00000008
+#define QTN_CSA_STATUS_MUC_ERROR_SW		0x00000010
+#define QTN_CSA_STATUS_MUC_CANCELLED		0x00000020
+#define QTN_CSA_STATUS_MUC_COMPLETE		0x00000040
+	uint32_t	muc_status;		/* status written by MuC */
+
+#define QTN_CSA_RESTART_QUEUE			0x00000001
+#define QTN_CSA_STATUS_LHOST_PRE_DONE		0x00000002
+#define QTN_CSA_STATUS_LHOST_SWITCH_DONE	0x00000004
+#define QTN_CSA_STATUS_LHOST_POST_DONE		0x00000008
+#define QTN_CSA_CANCEL				0x00000010
+#define QTN_CSA_STATUS_LHOST_ACTIVE		0x00000020
+#define QTN_CSA_STATUS_LHOST_UNITS_OFFSET	0x00000040
+	uint32_t	lhost_status;		/* flags written by lhost */
+};
+
+#define MEAS_RPI_HISTOGRAM_SIZE		8
+
+enum meas_reason {
+	QTN_MEAS_REASON_SUCC = 0,
+	QTN_MEAS_REASON_OFF_CHANNEL_UNSUPPORT,
+	QTN_MEAS_REASON_DURATION_TOO_SHORT,
+	QTN_MEAS_REASON_TIMER_SCHED_FAIL,
+	QTN_MEAS_REASON_TYPE_UNSUPPORT,
+	QTN_MEAS_REASON_MAX,
+};
+
+enum meas_type {
+	QTN_MEAS_TYPE_BASIC = 0,
+	QTN_MEAS_TYPE_CCA,
+	QTN_MEAS_TYPE_RPI,
+	QTN_MEAS_TYPE_CHAN_LOAD,
+	QTN_MEAS_TYPE_NOISE_HIS,
+	QTN_MEAS_TYPE_MAX,
+};
+
+struct meas_time_slice {
+	uint32_t meas_slice;	/* time slice */
+	uint32_t meas_time_pri;	/* prime time count based on meas_slice */
+	uint32_t meas_time_sec; /* secondary time count based on meas_slice */
+};
+
+struct qtn_meas_chan_info {
+	uint32_t work_channel;			/* working channel to return to */
+	int32_t	meas_type;			/* measurement type */
+	int32_t	meas_reason;			/* measurement reason */
+	struct meas_time_slice time_slice;	/* time slice for measurement long duration */
+	uint32_t meas_channel;
+	uint64_t meas_start_tsf;
+	uint32_t meas_dur_ms;
+	union {
+		struct {
+			uint32_t cca_busy_cnt;
+			uint32_t cca_try_cnt;
+			uint32_t cca_try_ms;
+			uint32_t cca_busy_ms;
+		} cca_and_chanload;
+		uint8_t rpi_counts[MEAS_RPI_HISTOGRAM_SIZE];
+		int32_t basic_radar_num;
+		uint8_t basic;
+	} inter_data;
+};
+
+enum scs_lot_tsf_pos {
+	SCS_LOG_TSF_POS_LHOST_TASK_KICKOFF,
+	SCS_LOG_TSF_POS_LHOST_IOCTL2MUC,
+	SCS_LOG_TSF_POS_MUC_POLL_IOCTL_FROM_LHOST,
+	SCS_LOG_TSF_POS_MUC_QOSNULL_SENT,
+	SCS_LOG_TSF_POS_MUC_SMPL_START_BEFORE_CHAN_CHG,
+	SCS_LOG_TSF_POS_MUC_SMPL_START_AFTER_CHAN_CHG,
+	SCS_LOG_TSF_POS_MUC_SMPL_FINISH_BEFORE_CHAN_CHG,
+	SCS_LOG_TSF_POS_MUC_SMPL_FINISH_AFTER_CHAN_CHG,
+	SCS_LOG_TSF_POS_LHOST_CCA_INTR,
+	SCS_LOG_TSF_POS_LHOST_CCA_WORK,
+	SCS_LOG_TSF_POS_NUM
+};
+
+#define IEEE80211_SCS_LOG_TSF(_ic, _sample, _pos)	((_ic)->ic_get_tsf(&((_sample)->tsf[(_pos)])))
+#define QDRV_SCS_LOG_TSF(_sample, _pos)			(hal_get_tsf(&((_sample)->tsf[(_pos)])))
+#define MUC_SCS_LOG_TSF(_qh, _sample, _pos)		(hal_get_tsf((_qh), &((_sample)->tsf[(_pos)])))
+
+struct qtn_samp_chan_info {
+	struct qtn_off_chan_info base;		/* off channel base info, must be placed as the first field */
+#define QTN_CCA_STATUS_IDLE		0x0
+#define QTN_CCA_STATUS_HOST_IOCTL_SENT	0x1
+#define QTN_CCA_STATUS_MUC_SCHEDULED	0x2
+#define QTN_CCA_STATUS_MUC_STARTED	0x3
+#define QTN_CCA_STATUS_MUC_COMPLETE	0x4
+#define QTN_CCA_STATUS_MUC_CANCELLING	0x5
+#define QTN_CCA_STATUS_MUC_CANCELLED	0x6
+
+	uint64_t		start_tsf;	/* tsf at which to start sampling */
+
+	struct out_cca_info	result;		/* results structure for CCA measurement */
+#define QTN_CCA_TYPE_BACKGROUND		0x1
+#define QTN_CCA_TYPE_DIRECTLY		0x2
+	uint32_t		type;
+
+	uint32_t		qosnull_txdesc_host;	/* qosnull frame for channel sampling */
+	uint32_t		qosnull_txdesc_bus;	/* qosnull frame in phyaddr */
+	uint16_t		qosnull_frame_len;	/* the frame length of qosnull */
+	uint16_t		tx_node_idx;		/* the node index that qosnull frame to */
+	uint32_t		qosnull_txtsf;		/* the tsf_lo read from MAC on that qosnull frame was sent successfully */
+	uint32_t		qosnull_nav;		/* the large NAV in qosnull frame */
+	uint64_t		tsf[SCS_LOG_TSF_POS_NUM];	/* timestamps used for precise time control and profiling */
+};
+
+#define QTN_SCS_ASSOC_STA_MAX	12
+
+struct qtn_scs_vsp_node_stats {
+	uint32_t ni_associd;
+	uint32_t tx_usecs;
+	uint32_t rx_usecs;
+};
+
+struct qtn_scs_vsp_info {
+	uint32_t	num_of_assoc;
+	struct  qtn_scs_vsp_node_stats scs_vsp_node_stats[QTN_SCS_ASSOC_STA_MAX];
+};
+
+struct qtn_scs_scan_info {
+	uint32_t	bw_sel;
+	uint32_t	cca_idle;
+	uint32_t	cca_busy;
+	uint32_t	cca_tx;
+	uint32_t	cca_intf;
+	uint32_t	cca_try;
+	uint32_t	cca_pri;
+	uint32_t	cca_sec20;
+	uint32_t	cca_sec40;
+	uint32_t	bcn_rcvd;
+	uint32_t	crc_err;
+	uint32_t	lpre_err;
+	uint32_t	spre_err;
+};
+
+#define QTN_SCS_MAX_OC_INFO	32
+struct qtn_scs_data_history {
+#define QTN_SCS_FILTER_WINDOW_SZ	5
+#define QTN_SCS_FILTER_MEDIAN_IDX	(QTN_SCS_FILTER_WINDOW_SZ / 2)
+	uint32_t idx;
+	uint32_t buffer[QTN_SCS_FILTER_WINDOW_SZ];
+};
+
+struct qtn_scs_stats_history {
+	struct qtn_scs_data_history lp_errs[IEEE80211_MAX_DUAL_CHANNELS];
+	struct qtn_scs_data_history sp_errs[IEEE80211_MAX_DUAL_CHANNELS];
+};
+
+struct qtn_scs_oc_info {
+	uint32_t	off_channel;
+	uint32_t	off_chan_bw_sel;
+	uint32_t	off_chan_cca_busy;
+	uint32_t	off_chan_cca_sample_cnt;
+	uint32_t	off_chan_cca_try_cnt;
+	uint32_t	off_chan_beacon_recvd;
+	uint32_t	off_chan_crc_errs;
+	uint32_t	off_chan_sp_errs;
+	uint32_t	off_chan_lp_errs;
+	uint32_t	off_chan_cca_pri;
+	uint32_t	off_chan_cca_sec;
+	uint32_t	off_chan_cca_sec40;
+};
+/* Smart channel selection data shared between Lhost and MuC */
+struct qtn_scs_info {
+	uint32_t	oc_info_count;
+	struct qtn_scs_oc_info oc_info[QTN_SCS_MAX_OC_INFO];
+	uint32_t	bw_sel;
+	uint32_t	cca_try;
+	uint32_t	cca_busy;
+	uint32_t	cca_idle;
+	uint32_t	cca_tx;
+	uint32_t	cca_interference;
+	uint32_t	cca_pri;
+	uint32_t	cca_sec20;
+	uint32_t	cca_sec40;
+	uint32_t	beacon_recvd;
+	uint32_t	tx_usecs;
+	uint32_t	rx_usecs;
+	struct qtn_scs_vsp_info scs_vsp_info;
+};
+
+struct qtn_scs_info_set {
+	uint32_t	valid_index; /* 0 or 1 */
+	struct qtn_scs_info scs_info[2];
+	struct qtn_scs_scan_info scan_info[IEEE80211_CHAN_MAX];
+	struct qtn_scs_stats_history stats_history;
+};
+
+struct qtn_remain_chan_info {
+	uint32_t freq_band;
+	uint32_t data_channel;		/* Data channel to return to */
+	uint32_t off_channel;		/* The required remain channel */
+	uint32_t duration_usecs;	/* Duration in microseconds to stay on remain channel */
+	uint64_t start_tsf;		/* tsf at which to switch to remain channel */
+	uint32_t timeout_usecs;		/* timeout in microseconds to return base channel */
+
+#define QTN_REM_CHAN_STATUS_IDLE		0x0
+#define QTN_REM_CHAN_STATUS_HOST_IOCTL_SENT	0x1
+#define QTN_REM_CHAN_STATUS_MUC_SCHEDULED	0x2
+#define QTN_REM_CHAN_STATUS_MUC_STARTED	0x3
+#define QTN_REM_CHAN_STATUS_MUC_COMPLETE	0x4
+#define QTN_REM_CHAN_STATUS_MUC_CANCELLED	0x5
+	uint32_t status;		/* channel switch status */
+
+	uint8_t peer_mac[IEEE80211_ADDR_LEN];	/* peer node mac address */
+};
+
+
+#define QTN_CCA_CNT2MS(_cnt)   RUBY_TIMER_MUC_CCA_CNT2MS(_cnt)
+#define QTN_CCA_INTV           RUBY_TIMER_MUC_CCA_INTV
+
+enum scan_chan_tsf_pos {
+	SCAN_CHAN_TSF_LHOST_HOSTLINK_IOCTL = 0,
+	SCAN_CHAN_TSF_MUC_IOCTL_PROCESS,
+	SCAN_CHAN_TSF_MUC_SEND_START_FRM,
+	SCAN_CHAN_TSF_MUC_SEND_START_FRM_DONE,
+	SCAN_CHAN_TSF_MUC_GOTO_OFF_CHAN,
+	SCAN_CHAN_TSF_MUC_GOTO_OFF_CHAN_DONE,
+	SCAN_CHAN_TSF_MUC_SEND_PRBREQ_FRM,
+	SCAN_CHAN_TSF_MUC_SEND_PRBREQ_FRM_DONE,
+	SCAN_CHAN_TSF_MUC_GOTO_DATA_CHAN,
+	SCAN_CHAN_TSF_MUC_GOTO_DATA_CHAN_DONE,
+	SCAN_CHAN_TSF_MUC_SEND_FINISH_FRM,
+	SCAN_CHAN_TSF_MUC_SEND_FINISH_FRM_DONE,
+	SCAN_CHAN_TSF_LHOST_INTERRUPT,
+	SCAN_CHAN_TSF_LHOST_SCANWORK,
+	SCAN_CHAN_TSF_LOG_NUM
+};
+
+struct off_chan_tsf_dbg {
+	int pos_index;
+	char *log_name;
+};
+
+#define QDRV_SCAN_LOG_TSF(_scan, _pos)		(hal_get_tsf(&((_scan)->tsf[(_pos)])))
+#define MUC_SCAN_LOG_TSF(_qh, _scan, _pos)	(hal_get_tsf((_qh), &((_scan)->tsf[(_pos)])))
+
+struct qtn_scan_chan_info {
+	struct qtn_off_chan_info base;		/* off channel base info, must be placed as the first field */
+#define TIME_MARGIN_BEFORE_STARTFRM	3000	/* microseconds, time overhead for others before start frame is sent*/
+#define TIME_MARGIN_AFTER_STARTFRM	1000	/* microseconds, time overhead for others after start frame is sent*/
+#define TIME_OFFSET_SEND_PROBE_REQ	3000	/* microseconds, the time offset for sending probe_req frame
+						 * after switching to off channel*/
+#define TIME_OFFSET_SEND_START_FRM	5000	/* microseconds, the time offset for sending start frame
+						 * after set NETDEV_F_PAUSE_TX flag */
+#define TIME_DUR_FOR_ALL_BEACONS	25000	/* microseconds, the time duration for transmitting all beacons */
+#define TIME_MIN_WAIT_PROBE_REP		5000	/* microseconds, the minimal time for waiting for the probe
+						 * response frame on scanning channel */
+#define QTN_SCAN_CHAN_MUC_IDLE			0x0
+#define QTN_SCAN_CHAN_MUC_STARTED		0x1
+#define QTN_SCAN_CHAN_MUC_PROBING		0x2
+#define QTN_SCAN_CHAN_MUC_COMPLETED		0x3
+#define QTN_SCAN_CHAN_MUC_FAILED		0x4
+#define QTN_SCAN_CHAN_MUC_SCHEDULED		0x5
+
+	uint32_t	scan_flags;
+	uint32_t	start_txdesc_host;	/* The frame sent before go scan channel,
+						 * e.g. pwrsav frame in STA mode */
+	uint32_t	start_txdesc_bus;	/* Start frame in phyaddr */
+	uint16_t	start_node_idx;		/* the node index that frame to */
+	uint16_t	start_frame_len;	/* frame length */
+	uint32_t	prbreq_txdesc_host;	/* probe request frame for active scanning */
+	uint32_t	prbreq_txdesc_bus;	/* probe request frame in phyaddr */
+	uint16_t	prbreq_node_idx;	/* the node index that frame to */
+	uint16_t	prbreq_frame_len;	/* frame length */
+	uint32_t	finish_txdesc_host;	/* The frame sent after back data channel,
+						 * e.g. the frame to announce waking up in STA mode */
+	uint32_t	finish_txdesc_bus;	/* Complete frame in phyaddr */
+	uint16_t	finish_node_idx;		/* the node index that frame to */
+	uint16_t	finish_frame_len;	/* frame length */
+	uint64_t	tsf[SCAN_CHAN_TSF_LOG_NUM];
+};
+
+enum qtn_ocac_tsf_log {
+	OCAC_TSF_LOG_GOTO_OFF_CHAN = 0,
+	OCAC_TSF_LOG_GOTO_OFF_CHAN_DONE,
+	OCAC_TSF_LOG_GOTO_DATA_CHAN,
+	OCAC_TSF_LOG_GOTO_DATA_CHAN_DONE,
+	OCAC_TSF_LOG_NUM
+};
+
+struct qtn_ocac_info {
+	uint32_t		freq_band;	/* frequency band, written by lhost */
+	uint32_t		off_channel;	/* The off channel, "0" means to stop ocac in MuC, written by lhost*/
+	uint32_t		qosnull_txdesc_host;	/* qosnull frame in virtual address, written by lhost */
+	uint32_t		qosnull_txdesc_bus;	/* qosnull frame in physical address, written by lhost */
+	uint16_t		qosnull_frame_len;	/* the frame length of qosnull */
+	uint16_t		tx_node_idx;		/* the node index that qosnull frame to */
+	uint16_t		dwell_time;	/* the required time on off channel in one beacon interval, written by lhost */
+	uint16_t		secure_dwell;	/* milliseconds, the time on off channel within on off-channel action, using
+							qosnull frame with large NAV to protect the traffic */
+	uint16_t		threshold_fat;	/* the fat threshold to run off-channel CAC, written by lhost */
+	uint16_t		threshold_traffic;	/* the traffic threshold to run off-channel CAC, written by lhost */
+	uint16_t		threshold_fat_dec;	/* the threshold for consecutive fat decrease, written by lhost */
+	uint16_t		traffic_ctrl;	/* whether to send qosnull or not, written by lhost */
+	uint16_t		offset_txhalt;	/* milliseconds, the offset after beacon to halt tx, written by lhost */
+	uint16_t		offset_offchan;	/* milliseconds, the offset after halt tx to switch off channel, written by lhost */
+
+#define QTN_OCAC_ON_DATA_CHAN	0x1
+#define QTN_OCAC_ON_OFF_CHAN	0x2
+	uint16_t		chan_status;	/* current on which channel, written by MuC */
+	uint16_t		actual_dwell_time;	/* the actual time on off channel, written by MuC */
+	uint64_t		tsf_log[OCAC_TSF_LOG_NUM];	/* event tsf log, written by MuC */
+};
+
+enum bmps_state_e {
+	BMPS_STATE_OFF		= 0,		/* STA exits BMPS mode */
+	BMPS_STATE_WAKE		= 1,		/* in BMPS mode, and is fully powered on */
+						/* with PM set to 0 */
+	BMPS_STATE_SLEEP	= 2,		/* in BMPS mode, and is fully powered off */
+						/* with PM set to 1 */
+	BMPS_STATE_WAKE_TO_SLEEP	= 3,	/* in BMPS mode, and is transitting from */
+						/* power-on to power-off by sending Null frame */
+						/* with PM=1 to AP */
+	BMPS_STATE_LEAK_WINDOW	= 4,		/* in BMPS mode, and Null frame with PM=1 */
+						/* has been sent, TX path is paused, */
+						/* but RX patgh is still running to */
+						/* receive packets from leaky AP */
+	BMPS_STATE_SLEEP_TO_WAKE	= 5,	/* in BMPS mode, and is transitting from */
+						/* power-off to power-on by sending */
+						/* Null frame with PM=0 to AP */
+	BMPS_STATE_BEACON_SNOOP	= 6,		/* in BMPS mode, and RX chain is powered up */
+						/* to receive beacon */
+	BMPS_STATE_MAX		= BMPS_STATE_BEACON_SNOOP,
+};
+
+struct qtn_bmps_info {
+	uint32_t	null_txdesc_host;	/* null frame in virtual address */
+	uint32_t	null_txdesc_bus;	/* null frame in physical address */
+	uint16_t	null_frame_len;		/* the frame length of null */
+	uint16_t	tx_node_idx;		/* the node index that null frame to */
+	enum bmps_state_e	state;		/* shared BMPS status */
+};
+
+struct qtn_rf_rxgain_params
+{
+	uint8_t lna_on_indx;
+	uint8_t max_gain_idx;
+	int16_t cs_thresh_dbm;
+	int16_t cca_prim_dbm;
+	int16_t cca_sec_scs_off_dbm;
+	int16_t cca_sec_scs_on_dbm;
+};
+
+/* MuC fops requst */
+#define MUC_FOPS_MAX_FNAME_SIZE (50)
+enum {
+	MUC_FOPS_OPEN = 0,
+	MUC_FOPS_READ,
+	MUC_FOPS_WRITE,
+	MUC_FOPS_LSEEK,
+	MUC_FOPS_CLOSE,
+};
+
+enum {
+	MUC_FOPS_PENDING = 0x011ADDED,
+	MUC_FOPS_DONE    = 0xF035D0DE,
+};
+
+enum {
+	MUC_FOPS_RDONLY = 0x0,
+	MUC_FOPS_WRONLY = 0x1,
+	MUC_FOPS_RDWR 	= 0x2,
+	MUC_FOPS_APPEND = 0x4,
+};
+
+struct muc_fops_req	{
+	volatile int32_t ret_val;
+	volatile int32_t fd;
+	volatile uint32_t req_state;
+	volatile char *data_buff;
+};
+
+enum qdrv_cmd_muc_memdbgcnf_s {
+	QDRV_CMD_MUC_MEMDBG_STATUS,
+	QDRV_CMD_MUC_MEMDBG_FD_MAX,
+	QDRV_CMD_MUC_MEMDBG_NODE_MAX,
+	QDRV_CMD_MUC_MEMDBG_DUMP_MAX,
+	QDRV_CMD_MUC_MEMDBG_RATETBL,
+	QDRV_CMD_MUC_MEMDBG_MSG_SEND,
+	QDRV_CMD_MUC_MEMDBG_TRACE,
+	QDRV_CMD_MUC_MEMDBG_LAST
+};
+
+/* The following file indexes and file lists must all be kept in sync */
+#define FOPS_FD_EP_SAMPLES		9
+#define FOPS_FD_DCACHE_SAMPLES		10
+#ifdef PROFILE_MUC_SAMPLE_IPTR_AUC
+#define FOPS_FD_IPTR_SAMPLES		15
+#else
+#define FOPS_FD_IPTR_SAMPLES		11
+#endif
+#define FOPS_FD_UBOOT_ENV		12
+
+#ifdef TOPAZ_RFIC6_PLATFORM
+#define LHOST_CAL_FILES         {       \
+        NULL,                           \
+        "/proc/bootcfg/bf_factor",      \
+        "/tmp/txpower.txt",             \
+        "/proc/bootcfg/txpower.cal",    \
+        "/proc/bootcfg/dc_iq.cal",      \
+        "/mnt/jffs2/mon.out",           \
+        "/mnt/jffs2/gmon.out",          \
+        "/mnt/jffs2/pecount.out",       \
+        "/proc/bootcfg/pdetector.cal",  \
+        "/mnt/jffs2/profile_ep_muc",    \
+        "/mnt/jffs2/profile_dcache_muc",\
+        "/mnt/jffs2/profile_iptr_muc",  \
+        "/proc/bootcfg/env",            \
+        "/etc/mtest",           \
+        "/proc/bootcfg/rx_iq.cal", \
+				"/mnt/jffs2/profile_iptr_auc",	\
+				"/proc/bootcfg/bf_factor_2g",	\
+				"/tmp/txpower_2g.txt",		\
+				"/proc/bootcfg/dc_iq_2g.cal",	\
+				"/proc/bootcfg/pdetector_2g.cal",\
+				"/tmp/bond_opt.txt",	        \
+				"/mnt/jffs2/txlo_lpcalib.cal",	        \
+				"/proc/bootcfg/rc_calib.cal",	\
+}
+#else
+#define LHOST_CAL_FILES		{	\
+	NULL,				\
+	"/proc/bootcfg/bf_factor",	\
+	"/tmp/txpower.txt",		\
+	"/proc/bootcfg/txpower.cal",	\
+	"/proc/bootcfg/dc_iq.cal",	\
+	"/mnt/jffs2/mon.out",		\
+	"/mnt/jffs2/gmon.out",		\
+	"/mnt/jffs2/pecount.out",	\
+	"/proc/bootcfg/pdetector.cal",	\
+	"/mnt/jffs2/profile_ep_muc",	\
+	"/mnt/jffs2/profile_dcache_muc",\
+	"/mnt/jffs2/profile_iptr_muc",	\
+	"/proc/bootcfg/env",		\
+  "/etc/mtest",           \
+	"/proc/bootcfg/rx_iq.cal",	\
+	"/mnt/jffs2/profile_iptr_auc",	\
+}
+#endif
+
+#define MUC_CAL_FILES		{	\
+	NULL,				\
+	NULL,				\
+	NULL,				\
+	NULL,				\
+	NULL,				\
+	"mon.out",			\
+	"gmon.out",			\
+	NULL,				\
+	NULL,				\
+	NULL,				\
+	NULL,				\
+	NULL,				\
+	NULL,				\
+}
+
+enum tdls_ioctl_params {
+	IOCTL_TDLS_STATUS = 1,
+	IOCTL_TDLS_UAPSD_IND_WND,
+	IOCTL_TDLS_PTI_CTRL,
+	IOCTL_TDLS_PTI,
+	IOCTL_TDLS_PTI_PENDING,
+	IOCTL_TDLS_DBG_LEVEL,
+	IOCTL_TDLS_PTI_DELAY,
+	IOCTL_TDLS_PTI_EVENT = 100
+};
+
+struct qtn_tdls_args {
+	uint8_t		ni_macaddr[IEEE80211_ADDR_LEN];
+	uint16_t	ni_ncidx;
+	uint32_t	tdls_cmd;
+	uint32_t	tdls_params;
+};
+
+struct qtn_node_args
+{
+	/* header */
+	uint8_t	ni_macaddr[IEEE80211_ADDR_LEN];
+	uint8_t	ni_bssid[IEEE80211_ADDR_LEN];
+	uint8_t	ni_nrates;
+	uint8_t	ni_rates[IEEE80211_RATE_MAXSIZE];
+	uint8_t	ni_htnrates;
+	uint8_t	ni_htrates[IEEE80211_HT_RATE_MAXSIZE];
+	uint16_t	ni_associd;	/* assoc response */
+	uint16_t	ni_node_idx;
+	uint16_t	ni_flags;	/* special-purpose state */
+	struct wmm_params	wmm_params[WME_NUM_AC];
+	uint8_t	ni_implicit_ba_rx; /* The RX side of the implicit BA. Zero for no implicit RX BA */
+	uint8_t	ni_implicit_ba_tx; /* The TX side of the implicit BA. Zero for no implicit TX BA */
+	uint16_t	ni_raw_bintval;		/* raw beacon interval */
+	uint16_t	ni_implicit_ba_size; /* Size of the implicit BAs */
+	uint8_t	ni_qtn_ie_flags;
+	uint8_t ni_vendor;
+	uint8_t ni_bbf_disallowed;      /* flag to disallow BBF */
+	uint8_t ni_std_bf_disallowed;      /* flag to disallow standard BF */
+	uint8_t ni_uapsd;	/* U-APSD per-node flags matching WMM STA Qos Info field */
+	uint8_t	ni_htcap[sizeof(struct ieee80211_htcap)];	/* Processed HT capabilities */
+	uint8_t	ni_htinfo[sizeof(struct ieee80211_htinfo)];	/* Processed HT info */
+	uint8_t	ni_vhtcap[sizeof(struct ieee80211_vhtcap)];	/* Processed VHT capabilities */
+	uint8_t	ni_vhtop[sizeof(struct ieee80211_vhtop)];	/* Processed VHT operational info */
+	struct qtn_node_shared_stats *ni_shared_stats;
+	uint32_t	ni_ver_sw;
+	uint32_t	ni_qtn_flags;
+	uint32_t	ni_tdls_status;
+	uint8_t		ni_mu_grp[sizeof(struct ieee80211_vht_mu_grp)];
+	uint16_t	ni_rsn_caps;		/* optional rsn capabilities */
+	uint8_t		rsn_ucastcipher;	/* selected unicast cipher */
+	uint16_t	tdls_peer_associd;	/* tdls peer AID allocated by AP, unique in BSS */
+	uint32_t	ni_rate_train;
+	uint32_t	ni_rate_train_peer;
+};
+
+struct qtn_beacon_args
+{
+	struct wmm_params		wmm_params[WME_NUM_AC];
+	uint32_t			bintval;
+	uint32_t			bo_tim_len;
+	uint32_t			bo_htcap;
+	uint32_t			bo_htinfo;
+	uint32_t			bo_vhtcap;
+	uint32_t			bo_vhtop;
+	uint32_t			bc_ie_head;		/* Beacon ie link list head */
+	uint32_t			bc_ie_buf_start;	/* Beacon ie buffer start for MuC */
+};
+
+struct qtn_key {
+	u_int8_t wk_keylen;		/* key length in bytes */
+	u_int8_t wk_flags;
+#define IEEE80211_KEY_XMIT	0x01	/* key used for xmit */
+#define IEEE80211_KEY_RECV	0x02	/* key used for recv */
+#define IEEE80211_KEY_GROUP	0x04	/* key used for WPA group operation */
+#define IEEE80211_KEY_SWCRYPT	0x10	/* host-based encrypt/decrypt */
+#define IEEE80211_KEY_SWMIC	0x20	/* host-based enmic/demic */
+	u_int16_t wk_keyix;		/* key index */
+	u_int8_t wk_key[IEEE80211_KEYBUF_SIZE + IEEE80211_MICBUF_SIZE];
+#define wk_txmic    wk_key + IEEE80211_KEYBUF_SIZE + 0  /* XXX can't () right */
+#define wk_rxmic    wk_key + IEEE80211_KEYBUF_SIZE + 8  /* XXX can't () right */
+	u_int64_t wk_keyrsc[IEEE80211_TID_SIZE];    /* key receive sequence counter */
+	u_int64_t wk_keytsc;		/* key transmit sequence counter */
+	u_int32_t wk_cipher;		/* cipher */
+	u_int32_t wk_ncidx;		/* node cache index */
+};
+#define IEEE80211_KEY_COMMON		/* common flags passed in by apps */\
+	(IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV | IEEE80211_KEY_GROUP)
+
+struct qtn_key_args
+{
+	struct qtn_key	key;
+	uint8_t	wk_addr[IEEE80211_ADDR_LEN];
+};
+
+struct qtn_power_save_args
+{
+	uint32_t enable;
+	uint8_t ni_addr[IEEE80211_ADDR_LEN];
+};
+
+struct lhost_txdesc
+{
+	struct host_txdesc	hw_desc;	/* shared between muc and lhost */
+	struct sk_buff		*skb;
+	struct lhost_txdesc	*next;
+};
+
+#define MUC_TXSTATUS_READY		0x0
+#define MUC_TXSTATUS_DONE		0x1
+
+#define MUC_RXSTATUS_DONE		0x1
+
+#define MUC_RXSTATUS_MIC_ERR		0x00000002
+#define MUC_RXSTATUS_MIC_ERR_S		1
+#define MUC_RXSTATUS_NCIDX		0x00000FFC
+#define MUC_RXSTATUS_NCIDX_S		2
+#define MUC_RXSTATUS_RXLEN		0xFFFF0000
+#define MUC_RXSTATUS_RXLEN_S		16
+
+struct qtn_link_margin_info {
+	uint32_t	mcs;
+	uint32_t	bw;
+	int		rssi_avg;
+	int		reason;
+#define QTN_LINK_MARGIN_REASON_SUCC		0
+#define QTN_LINK_MARGIN_REASON_NOSUCHNODE	1
+	uint8_t		mac_addr[IEEE80211_ADDR_LEN];
+};
+
+#define QTN_RESERVED_DEVIDS		2
+#define QTN_WLANID_FROM_DEVID(devid)	\
+	((devid < QTN_RESERVED_DEVIDS)? 0 : (devid - QTN_RESERVED_DEVIDS))
+
+struct qtn_mu_grp_args {
+	/* MU group ID. 0 means the group is not used and grp_ni is empty*/
+	uint8_t grp_id;
+	/* mu QMat installation status */
+	/* QMat is not installed and not used */
+#define MU_QMAT_DISABLED	0
+	/* QMat is installed and used */
+#define MU_QMAT_ENABLED		1
+	/* QMat is installed, used but not updated */
+#define MU_QMAT_FREEZED		2
+	/* QMat is installed, not used and not updated */
+#define MU_QMAT_NOT_USED	3
+	uint8_t qmat_installed;
+	/* the index of the grp_ni[], is also the user position */
+	uint16_t aid[IEEE80211_MU_GRP_NODES_MAX];
+	uint8_t ncidx[IEEE80211_MU_GRP_NODES_MAX];
+	/* matrix addr offsets in sram */
+	unsigned int u0_1ss_u1_1ss;
+	unsigned int u0_2ss_u1_1ss;
+	unsigned int u0_3ss_u1_1ss;
+	unsigned int u0_1ss_u1_2ss;
+	unsigned int u0_1ss_u1_3ss;
+	unsigned int u0_2ss_u1_2ss;
+	/* stats */
+	uint32_t upd_cnt;
+	int32_t rank;
+};
+
+enum grp_op {
+	MU_GRP_NONE = 0,
+	MU_GRP_INST,
+	MU_GRP_DELE,
+};
+
+struct qtn_mu_group_update_args {
+	enum grp_op op;
+	struct upd_grp {
+		int grp_id;
+		unsigned int ap_devid;
+		uint8_t ap_macaddr[IEEE80211_ADDR_LEN];
+		struct upd_nodes {
+			int as_sta;
+			struct ieee80211_vht_mu_grp grp;
+			uint8_t macaddr[IEEE80211_ADDR_LEN];
+		} nodes[QTN_MU_NODES_PER_GROUP];
+	} groups[QTN_MU_QMAT_MAX_SLOTS];
+};
+
+struct qtn_cca_stats {
+	uint32_t	cca_sample_period;
+	uint32_t	cca_pri_cnt;
+	uint32_t	cca_sec_cnt;
+	uint32_t	cca_sec40_cnt;
+	uint32_t	cca_sample_cnt;
+	uint32_t	cca_try_cnt;
+	uint32_t	cca_csw_cnt;
+	uint32_t	cca_off_try_cnt;
+	uint32_t	cca_meas_cnt;
+	uint32_t	cca_busy_cnt;
+	uint32_t	cca_idle_cnt;
+	uint32_t	cca_tx_cnt;
+	uint32_t	rx_time_cnt;
+	uint32_t	tx_time_cnt;
+	uint32_t	cca_pri;
+	uint32_t	cca_sec;
+	uint32_t	cca_sec40;
+	uint32_t	cca_busy;
+	uint32_t	cca_fat;
+	uint32_t	cca_intf;
+	uint32_t	cca_trfc;
+};
+
+#ifdef CONFIG_NAC_MONITOR
+#define MAX_NAC_STA 128
+	struct nac_stats_entry {
+		uint8_t  nac_valid;
+		uint8_t  nac_avg_rssi;
+		uint8_t  nac_channel;
+		uint8_t  nac_packet_type;
+		uint64_t nac_timestamp;
+		uint8_t  nac_txmac[IEEE80211_ADDR_LEN];
+		uint16_t reserved;
+	};
+	struct nac_mon_info{
+		uint16_t nac_on_time;
+		uint16_t nac_cycle_time;
+		uint16_t nac_monitor_on;
+		uint16_t reserved;
+		struct nac_stats_entry nac_stats[MAX_NAC_STA];
+	};
+#endif
+
+#endif	// _LHOST_MUC_COMM_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/log_table/1024_10log10_table.txt b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/log_table/1024_10log10_table.txt
new file mode 100644
index 0000000..e95c1b3
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/log_table/1024_10log10_table.txt
@@ -0,0 +1,4096 @@
+0,
+3083,
+4886,
+6166,
+7158,
+7969,
+8654,
+9248,
+9772,
+10240,
+10664,
+11051,
+11407,
+11737,
+12044,
+12331,
+12600,
+12854,
+13095,
+13323,
+13540,
+13747,
+13945,
+14134,
+14315,
+14490,
+14658,
+14819,
+14975,
+15126,
+15272,
+15413,
+15550,
+15683,
+15812,
+15937,
+16059,
+16177,
+16293,
+16406,
+16515,
+16623,
+16727,
+16829,
+16929,
+17027,
+17123,
+17216,
+17308,
+17398,
+17486,
+17572,
+17657,
+17740,
+17822,
+17902,
+17981,
+18058,
+18134,
+18209,
+18282,
+18355,
+18426,
+18496,
+18565,
+18633,
+18700,
+18765,
+18830,
+18894,
+18957,
+19020,
+19081,
+19141,
+19201,
+19260,
+19318,
+19376,
+19432,
+19488,
+19543,
+19598,
+19652,
+19705,
+19758,
+19810,
+19861,
+19912,
+19962,
+20012,
+20061,
+20110,
+20158,
+20205,
+20252,
+20299,
+20345,
+20391,
+20436,
+20480,
+20525,
+20569,
+20612,
+20655,
+20697,
+20740,
+20781,
+20823,
+20864,
+20904,
+20945,
+20984,
+21024,
+21063,
+21102,
+21141,
+21179,
+21217,
+21254,
+21291,
+21328,
+21365,
+21401,
+21437,
+21473,
+21508,
+21543,
+21578,
+21613,
+21647,
+21681,
+21715,
+21749,
+21782,
+21815,
+21848,
+21881,
+21913,
+21945,
+21977,
+22009,
+22040,
+22071,
+22102,
+22133,
+22163,
+22194,
+22224,
+22254,
+22284,
+22313,
+22343,
+22372,
+22401,
+22429,
+22458,
+22487,
+22515,
+22543,
+22571,
+22598,
+22626,
+22653,
+22681,
+22708,
+22734,
+22761,
+22788,
+22814,
+22840,
+22866,
+22892,
+22918,
+22944,
+22969,
+22995,
+23020,
+23045,
+23070,
+23094,
+23119,
+23144,
+23168,
+23192,
+23216,
+23240,
+23264,
+23288,
+23311,
+23335,
+23358,
+23382,
+23405,
+23428,
+23450,
+23473,
+23496,
+23518,
+23541,
+23563,
+23585,
+23607,
+23629,
+23651,
+23673,
+23695,
+23716,
+23737,
+23759,
+23780,
+23801,
+23822,
+23843,
+23864,
+23885,
+23905,
+23926,
+23946,
+23967,
+23987,
+24007,
+24027,
+24047,
+24067,
+24087,
+24107,
+24126,
+24146,
+24165,
+24185,
+24204,
+24223,
+24242,
+24261,
+24280,
+24299,
+24318,
+24337,
+24355,
+24374,
+24392,
+24411,
+24429,
+24447,
+24466,
+24484,
+24502,
+24520,
+24538,
+24555,
+24573,
+24591,
+24608,
+24626,
+24643,
+24661,
+24678,
+24695,
+24713,
+24730,
+24747,
+24764,
+24781,
+24798,
+24815,
+24831,
+24848,
+24865,
+24881,
+24898,
+24914,
+24930,
+24947,
+24963,
+24979,
+24995,
+25011,
+25028,
+25043,
+25059,
+25075,
+25091,
+25107,
+25122,
+25138,
+25154,
+25169,
+25185,
+25200,
+25215,
+25231,
+25246,
+25261,
+25276,
+25291,
+25307,
+25322,
+25336,
+25351,
+25366,
+25381,
+25396,
+25410,
+25425,
+25440,
+25454,
+25469,
+25483,
+25498,
+25512,
+25526,
+25541,
+25555,
+25569,
+25583,
+25597,
+25611,
+25625,
+25639,
+25653,
+25667,
+25681,
+25695,
+25708,
+25722,
+25736,
+25749,
+25763,
+25777,
+25790,
+25804,
+25817,
+25830,
+25844,
+25857,
+25870,
+25883,
+25897,
+25910,
+25923,
+25936,
+25949,
+25962,
+25975,
+25988,
+26001,
+26013,
+26026,
+26039,
+26052,
+26064,
+26077,
+26090,
+26102,
+26115,
+26127,
+26140,
+26152,
+26165,
+26177,
+26189,
+26202,
+26214,
+26226,
+26238,
+26251,
+26263,
+26275,
+26287,
+26299,
+26311,
+26323,
+26335,
+26347,
+26359,
+26370,
+26382,
+26394,
+26406,
+26417,
+26429,
+26441,
+26452,
+26464,
+26476,
+26487,
+26499,
+26510,
+26522,
+26533,
+26544,
+26556,
+26567,
+26578,
+26590,
+26601,
+26612,
+26623,
+26634,
+26646,
+26657,
+26668,
+26679,
+26690,
+26701,
+26712,
+26723,
+26734,
+26745,
+26755,
+26766,
+26777,
+26788,
+26799,
+26809,
+26820,
+26831,
+26841,
+26852,
+26863,
+26873,
+26884,
+26894,
+26905,
+26915,
+26926,
+26936,
+26946,
+26957,
+26967,
+26978,
+26988,
+26998,
+27008,
+27019,
+27029,
+27039,
+27049,
+27059,
+27069,
+27080,
+27090,
+27100,
+27110,
+27120,
+27130,
+27140,
+27150,
+27160,
+27169,
+27179,
+27189,
+27199,
+27209,
+27219,
+27228,
+27238,
+27248,
+27257,
+27267,
+27277,
+27286,
+27296,
+27306,
+27315,
+27325,
+27334,
+27344,
+27353,
+27363,
+27372,
+27382,
+27391,
+27400,
+27410,
+27419,
+27429,
+27438,
+27447,
+27456,
+27466,
+27475,
+27484,
+27493,
+27502,
+27512,
+27521,
+27530,
+27539,
+27548,
+27557,
+27566,
+27575,
+27584,
+27593,
+27602,
+27611,
+27620,
+27629,
+27638,
+27647,
+27656,
+27665,
+27673,
+27682,
+27691,
+27700,
+27709,
+27717,
+27726,
+27735,
+27743,
+27752,
+27761,
+27769,
+27778,
+27787,
+27795,
+27804,
+27812,
+27821,
+27829,
+27838,
+27846,
+27855,
+27863,
+27872,
+27880,
+27889,
+27897,
+27905,
+27914,
+27922,
+27931,
+27939,
+27947,
+27955,
+27964,
+27972,
+27980,
+27988,
+27997,
+28005,
+28013,
+28021,
+28029,
+28037,
+28046,
+28054,
+28062,
+28070,
+28078,
+28086,
+28094,
+28102,
+28110,
+28118,
+28126,
+28134,
+28142,
+28150,
+28158,
+28166,
+28174,
+28181,
+28189,
+28197,
+28205,
+28213,
+28221,
+28228,
+28236,
+28244,
+28252,
+28259,
+28267,
+28275,
+28283,
+28290,
+28298,
+28306,
+28313,
+28321,
+28329,
+28336,
+28344,
+28351,
+28359,
+28366,
+28374,
+28382,
+28389,
+28397,
+28404,
+28412,
+28419,
+28426,
+28434,
+28441,
+28449,
+28456,
+28464,
+28471,
+28478,
+28486,
+28493,
+28500,
+28508,
+28515,
+28522,
+28530,
+28537,
+28544,
+28551,
+28559,
+28566,
+28573,
+28580,
+28587,
+28595,
+28602,
+28609,
+28616,
+28623,
+28630,
+28637,
+28645,
+28652,
+28659,
+28666,
+28673,
+28680,
+28687,
+28694,
+28701,
+28708,
+28715,
+28722,
+28729,
+28736,
+28743,
+28750,
+28757,
+28763,
+28770,
+28777,
+28784,
+28791,
+28798,
+28805,
+28812,
+28818,
+28825,
+28832,
+28839,
+28846,
+28852,
+28859,
+28866,
+28873,
+28879,
+28886,
+28893,
+28900,
+28906,
+28913,
+28920,
+28926,
+28933,
+28940,
+28946,
+28953,
+28959,
+28966,
+28973,
+28979,
+28986,
+28992,
+28999,
+29005,
+29012,
+29018,
+29025,
+29031,
+29038,
+29044,
+29051,
+29057,
+29064,
+29070,
+29077,
+29083,
+29090,
+29096,
+29102,
+29109,
+29115,
+29122,
+29128,
+29134,
+29141,
+29147,
+29153,
+29160,
+29166,
+29172,
+29179,
+29185,
+29191,
+29197,
+29204,
+29210,
+29216,
+29222,
+29229,
+29235,
+29241,
+29247,
+29253,
+29260,
+29266,
+29272,
+29278,
+29284,
+29290,
+29296,
+29303,
+29309,
+29315,
+29321,
+29327,
+29333,
+29339,
+29345,
+29351,
+29357,
+29363,
+29369,
+29375,
+29381,
+29387,
+29393,
+29399,
+29405,
+29411,
+29417,
+29423,
+29429,
+29435,
+29441,
+29447,
+29453,
+29459,
+29465,
+29471,
+29477,
+29482,
+29488,
+29494,
+29500,
+29506,
+29512,
+29518,
+29523,
+29529,
+29535,
+29541,
+29547,
+29552,
+29558,
+29564,
+29570,
+29575,
+29581,
+29587,
+29593,
+29598,
+29604,
+29610,
+29616,
+29621,
+29627,
+29633,
+29638,
+29644,
+29650,
+29655,
+29661,
+29667,
+29672,
+29678,
+29683,
+29689,
+29695,
+29700,
+29706,
+29711,
+29717,
+29723,
+29728,
+29734,
+29739,
+29745,
+29750,
+29756,
+29761,
+29767,
+29772,
+29778,
+29783,
+29789,
+29794,
+29800,
+29805,
+29811,
+29816,
+29822,
+29827,
+29833,
+29838,
+29843,
+29849,
+29854,
+29860,
+29865,
+29870,
+29876,
+29881,
+29886,
+29892,
+29897,
+29903,
+29908,
+29913,
+29919,
+29924,
+29929,
+29935,
+29940,
+29945,
+29950,
+29956,
+29961,
+29966,
+29972,
+29977,
+29982,
+29987,
+29993,
+29998,
+30003,
+30008,
+30013,
+30019,
+30024,
+30029,
+30034,
+30039,
+30045,
+30050,
+30055,
+30060,
+30065,
+30070,
+30076,
+30081,
+30086,
+30091,
+30096,
+30101,
+30106,
+30111,
+30116,
+30122,
+30127,
+30132,
+30137,
+30142,
+30147,
+30152,
+30157,
+30162,
+30167,
+30172,
+30177,
+30182,
+30187,
+30192,
+30197,
+30202,
+30207,
+30212,
+30217,
+30222,
+30227,
+30232,
+30237,
+30242,
+30247,
+30252,
+30257,
+30262,
+30267,
+30272,
+30277,
+30281,
+30286,
+30291,
+30296,
+30301,
+30306,
+30311,
+30316,
+30321,
+30325,
+30330,
+30335,
+30340,
+30345,
+30350,
+30355,
+30359,
+30364,
+30369,
+30374,
+30379,
+30383,
+30388,
+30393,
+30398,
+30403,
+30407,
+30412,
+30417,
+30422,
+30426,
+30431,
+30436,
+30441,
+30445,
+30450,
+30455,
+30459,
+30464,
+30469,
+30474,
+30478,
+30483,
+30488,
+30492,
+30497,
+30502,
+30506,
+30511,
+30516,
+30520,
+30525,
+30530,
+30534,
+30539,
+30544,
+30548,
+30553,
+30557,
+30562,
+30567,
+30571,
+30576,
+30580,
+30585,
+30590,
+30594,
+30599,
+30603,
+30608,
+30612,
+30617,
+30622,
+30626,
+30631,
+30635,
+30640,
+30644,
+30649,
+30653,
+30658,
+30662,
+30667,
+30671,
+30676,
+30680,
+30685,
+30689,
+30694,
+30698,
+30703,
+30707,
+30712,
+30716,
+30720,
+30725,
+30729,
+30734,
+30738,
+30743,
+30747,
+30752,
+30756,
+30760,
+30765,
+30769,
+30774,
+30778,
+30782,
+30787,
+30791,
+30795,
+30800,
+30804,
+30809,
+30813,
+30817,
+30822,
+30826,
+30830,
+30835,
+30839,
+30843,
+30848,
+30852,
+30856,
+30861,
+30865,
+30869,
+30873,
+30878,
+30882,
+30886,
+30891,
+30895,
+30899,
+30903,
+30908,
+30912,
+30916,
+30921,
+30925,
+30929,
+30933,
+30937,
+30942,
+30946,
+30950,
+30954,
+30959,
+30963,
+30967,
+30971,
+30975,
+30980,
+30984,
+30988,
+30992,
+30996,
+31001,
+31005,
+31009,
+31013,
+31017,
+31021,
+31026,
+31030,
+31034,
+31038,
+31042,
+31046,
+31050,
+31055,
+31059,
+31063,
+31067,
+31071,
+31075,
+31079,
+31083,
+31087,
+31091,
+31096,
+31100,
+31104,
+31108,
+31112,
+31116,
+31120,
+31124,
+31128,
+31132,
+31136,
+31140,
+31144,
+31148,
+31152,
+31156,
+31161,
+31165,
+31169,
+31173,
+31177,
+31181,
+31185,
+31189,
+31193,
+31197,
+31201,
+31205,
+31209,
+31213,
+31217,
+31221,
+31224,
+31228,
+31232,
+31236,
+31240,
+31244,
+31248,
+31252,
+31256,
+31260,
+31264,
+31268,
+31272,
+31276,
+31280,
+31284,
+31288,
+31291,
+31295,
+31299,
+31303,
+31307,
+31311,
+31315,
+31319,
+31323,
+31327,
+31330,
+31334,
+31338,
+31342,
+31346,
+31350,
+31354,
+31357,
+31361,
+31365,
+31369,
+31373,
+31377,
+31381,
+31384,
+31388,
+31392,
+31396,
+31400,
+31403,
+31407,
+31411,
+31415,
+31419,
+31423,
+31426,
+31430,
+31434,
+31438,
+31441,
+31445,
+31449,
+31453,
+31457,
+31460,
+31464,
+31468,
+31472,
+31475,
+31479,
+31483,
+31487,
+31490,
+31494,
+31498,
+31502,
+31505,
+31509,
+31513,
+31516,
+31520,
+31524,
+31528,
+31531,
+31535,
+31539,
+31542,
+31546,
+31550,
+31553,
+31557,
+31561,
+31565,
+31568,
+31572,
+31576,
+31579,
+31583,
+31587,
+31590,
+31594,
+31598,
+31601,
+31605,
+31608,
+31612,
+31616,
+31619,
+31623,
+31627,
+31630,
+31634,
+31638,
+31641,
+31645,
+31648,
+31652,
+31656,
+31659,
+31663,
+31666,
+31670,
+31674,
+31677,
+31681,
+31684,
+31688,
+31691,
+31695,
+31699,
+31702,
+31706,
+31709,
+31713,
+31716,
+31720,
+31724,
+31727,
+31731,
+31734,
+31738,
+31741,
+31745,
+31748,
+31752,
+31755,
+31759,
+31762,
+31766,
+31769,
+31773,
+31776,
+31780,
+31783,
+31787,
+31790,
+31794,
+31797,
+31801,
+31804,
+31808,
+31811,
+31815,
+31818,
+31822,
+31825,
+31829,
+31832,
+31836,
+31839,
+31843,
+31846,
+31849,
+31853,
+31856,
+31860,
+31863,
+31867,
+31870,
+31874,
+31877,
+31880,
+31884,
+31887,
+31891,
+31894,
+31898,
+31901,
+31904,
+31908,
+31911,
+31915,
+31918,
+31921,
+31925,
+31928,
+31932,
+31935,
+31938,
+31942,
+31945,
+31948,
+31952,
+31955,
+31959,
+31962,
+31965,
+31969,
+31972,
+31975,
+31979,
+31982,
+31985,
+31989,
+31992,
+31995,
+31999,
+32002,
+32005,
+32009,
+32012,
+32015,
+32019,
+32022,
+32025,
+32029,
+32032,
+32035,
+32039,
+32042,
+32045,
+32049,
+32052,
+32055,
+32058,
+32062,
+32065,
+32068,
+32072,
+32075,
+32078,
+32081,
+32085,
+32088,
+32091,
+32094,
+32098,
+32101,
+32104,
+32108,
+32111,
+32114,
+32117,
+32121,
+32124,
+32127,
+32130,
+32133,
+32137,
+32140,
+32143,
+32146,
+32150,
+32153,
+32156,
+32159,
+32163,
+32166,
+32169,
+32172,
+32175,
+32179,
+32182,
+32185,
+32188,
+32191,
+32195,
+32198,
+32201,
+32204,
+32207,
+32210,
+32214,
+32217,
+32220,
+32223,
+32226,
+32230,
+32233,
+32236,
+32239,
+32242,
+32245,
+32249,
+32252,
+32255,
+32258,
+32261,
+32264,
+32267,
+32271,
+32274,
+32277,
+32280,
+32283,
+32286,
+32289,
+32292,
+32296,
+32299,
+32302,
+32305,
+32308,
+32311,
+32314,
+32317,
+32320,
+32324,
+32327,
+32330,
+32333,
+32336,
+32339,
+32342,
+32345,
+32348,
+32351,
+32354,
+32358,
+32361,
+32364,
+32367,
+32370,
+32373,
+32376,
+32379,
+32382,
+32385,
+32388,
+32391,
+32394,
+32397,
+32400,
+32403,
+32407,
+32410,
+32413,
+32416,
+32419,
+32422,
+32425,
+32428,
+32431,
+32434,
+32437,
+32440,
+32443,
+32446,
+32449,
+32452,
+32455,
+32458,
+32461,
+32464,
+32467,
+32470,
+32473,
+32476,
+32479,
+32482,
+32485,
+32488,
+32491,
+32494,
+32497,
+32500,
+32503,
+32506,
+32509,
+32512,
+32515,
+32518,
+32521,
+32524,
+32527,
+32530,
+32533,
+32536,
+32538,
+32541,
+32544,
+32547,
+32550,
+32553,
+32556,
+32559,
+32562,
+32565,
+32568,
+32571,
+32574,
+32577,
+32580,
+32583,
+32586,
+32588,
+32591,
+32594,
+32597,
+32600,
+32603,
+32606,
+32609,
+32612,
+32615,
+32618,
+32620,
+32623,
+32626,
+32629,
+32632,
+32635,
+32638,
+32641,
+32644,
+32646,
+32649,
+32652,
+32655,
+32658,
+32661,
+32664,
+32667,
+32669,
+32672,
+32675,
+32678,
+32681,
+32684,
+32687,
+32690,
+32692,
+32695,
+32698,
+32701,
+32704,
+32707,
+32709,
+32712,
+32715,
+32718,
+32721,
+32724,
+32727,
+32729,
+32732,
+32735,
+32738,
+32741,
+32743,
+32746,
+32749,
+32752,
+32755,
+32758,
+32760,
+32763,
+32766,
+32769,
+32772,
+32774,
+32777,
+32780,
+32783,
+32786,
+32788,
+32791,
+32794,
+32797,
+32800,
+32802,
+32805,
+32808,
+32811,
+32813,
+32816,
+32819,
+32822,
+32825,
+32827,
+32830,
+32833,
+32836,
+32838,
+32841,
+32844,
+32847,
+32849,
+32852,
+32855,
+32858,
+32860,
+32863,
+32866,
+32869,
+32871,
+32874,
+32877,
+32880,
+32882,
+32885,
+32888,
+32891,
+32893,
+32896,
+32899,
+32901,
+32904,
+32907,
+32910,
+32912,
+32915,
+32918,
+32921,
+32923,
+32926,
+32929,
+32931,
+32934,
+32937,
+32939,
+32942,
+32945,
+32948,
+32950,
+32953,
+32956,
+32958,
+32961,
+32964,
+32966,
+32969,
+32972,
+32974,
+32977,
+32980,
+32982,
+32985,
+32988,
+32990,
+32993,
+32996,
+32998,
+33001,
+33004,
+33006,
+33009,
+33012,
+33014,
+33017,
+33020,
+33022,
+33025,
+33028,
+33030,
+33033,
+33036,
+33038,
+33041,
+33044,
+33046,
+33049,
+33051,
+33054,
+33057,
+33059,
+33062,
+33065,
+33067,
+33070,
+33072,
+33075,
+33078,
+33080,
+33083,
+33086,
+33088,
+33091,
+33093,
+33096,
+33099,
+33101,
+33104,
+33106,
+33109,
+33112,
+33114,
+33117,
+33119,
+33122,
+33125,
+33127,
+33130,
+33132,
+33135,
+33137,
+33140,
+33143,
+33145,
+33148,
+33150,
+33153,
+33156,
+33158,
+33161,
+33163,
+33166,
+33168,
+33171,
+33173,
+33176,
+33179,
+33181,
+33184,
+33186,
+33189,
+33191,
+33194,
+33196,
+33199,
+33202,
+33204,
+33207,
+33209,
+33212,
+33214,
+33217,
+33219,
+33222,
+33224,
+33227,
+33229,
+33232,
+33235,
+33237,
+33240,
+33242,
+33245,
+33247,
+33250,
+33252,
+33255,
+33257,
+33260,
+33262,
+33265,
+33267,
+33270,
+33272,
+33275,
+33277,
+33280,
+33282,
+33285,
+33287,
+33290,
+33292,
+33295,
+33297,
+33300,
+33302,
+33305,
+33307,
+33310,
+33312,
+33315,
+33317,
+33320,
+33322,
+33325,
+33327,
+33330,
+33332,
+33334,
+33337,
+33339,
+33342,
+33344,
+33347,
+33349,
+33352,
+33354,
+33357,
+33359,
+33362,
+33364,
+33366,
+33369,
+33371,
+33374,
+33376,
+33379,
+33381,
+33384,
+33386,
+33389,
+33391,
+33393,
+33396,
+33398,
+33401,
+33403,
+33406,
+33408,
+33410,
+33413,
+33415,
+33418,
+33420,
+33423,
+33425,
+33427,
+33430,
+33432,
+33435,
+33437,
+33439,
+33442,
+33444,
+33447,
+33449,
+33452,
+33454,
+33456,
+33459,
+33461,
+33464,
+33466,
+33468,
+33471,
+33473,
+33476,
+33478,
+33480,
+33483,
+33485,
+33487,
+33490,
+33492,
+33495,
+33497,
+33499,
+33502,
+33504,
+33507,
+33509,
+33511,
+33514,
+33516,
+33518,
+33521,
+33523,
+33526,
+33528,
+33530,
+33533,
+33535,
+33537,
+33540,
+33542,
+33544,
+33547,
+33549,
+33551,
+33554,
+33556,
+33559,
+33561,
+33563,
+33566,
+33568,
+33570,
+33573,
+33575,
+33577,
+33580,
+33582,
+33584,
+33587,
+33589,
+33591,
+33594,
+33596,
+33598,
+33601,
+33603,
+33605,
+33608,
+33610,
+33612,
+33615,
+33617,
+33619,
+33622,
+33624,
+33626,
+33628,
+33631,
+33633,
+33635,
+33638,
+33640,
+33642,
+33645,
+33647,
+33649,
+33652,
+33654,
+33656,
+33658,
+33661,
+33663,
+33665,
+33668,
+33670,
+33672,
+33674,
+33677,
+33679,
+33681,
+33684,
+33686,
+33688,
+33690,
+33693,
+33695,
+33697,
+33700,
+33702,
+33704,
+33706,
+33709,
+33711,
+33713,
+33715,
+33718,
+33720,
+33722,
+33725,
+33727,
+33729,
+33731,
+33734,
+33736,
+33738,
+33740,
+33743,
+33745,
+33747,
+33749,
+33752,
+33754,
+33756,
+33758,
+33761,
+33763,
+33765,
+33767,
+33770,
+33772,
+33774,
+33776,
+33779,
+33781,
+33783,
+33785,
+33787,
+33790,
+33792,
+33794,
+33796,
+33799,
+33801,
+33803,
+33805,
+33807,
+33810,
+33812,
+33814,
+33816,
+33819,
+33821,
+33823,
+33825,
+33827,
+33830,
+33832,
+33834,
+33836,
+33838,
+33841,
+33843,
+33845,
+33847,
+33849,
+33852,
+33854,
+33856,
+33858,
+33860,
+33863,
+33865,
+33867,
+33869,
+33871,
+33874,
+33876,
+33878,
+33880,
+33882,
+33885,
+33887,
+33889,
+33891,
+33893,
+33895,
+33898,
+33900,
+33902,
+33904,
+33906,
+33909,
+33911,
+33913,
+33915,
+33917,
+33919,
+33922,
+33924,
+33926,
+33928,
+33930,
+33932,
+33935,
+33937,
+33939,
+33941,
+33943,
+33945,
+33947,
+33950,
+33952,
+33954,
+33956,
+33958,
+33960,
+33962,
+33965,
+33967,
+33969,
+33971,
+33973,
+33975,
+33977,
+33980,
+33982,
+33984,
+33986,
+33988,
+33990,
+33992,
+33995,
+33997,
+33999,
+34001,
+34003,
+34005,
+34007,
+34009,
+34012,
+34014,
+34016,
+34018,
+34020,
+34022,
+34024,
+34026,
+34028,
+34031,
+34033,
+34035,
+34037,
+34039,
+34041,
+34043,
+34045,
+34047,
+34050,
+34052,
+34054,
+34056,
+34058,
+34060,
+34062,
+34064,
+34066,
+34068,
+34071,
+34073,
+34075,
+34077,
+34079,
+34081,
+34083,
+34085,
+34087,
+34089,
+34091,
+34094,
+34096,
+34098,
+34100,
+34102,
+34104,
+34106,
+34108,
+34110,
+34112,
+34114,
+34116,
+34118,
+34121,
+34123,
+34125,
+34127,
+34129,
+34131,
+34133,
+34135,
+34137,
+34139,
+34141,
+34143,
+34145,
+34147,
+34149,
+34151,
+34154,
+34156,
+34158,
+34160,
+34162,
+34164,
+34166,
+34168,
+34170,
+34172,
+34174,
+34176,
+34178,
+34180,
+34182,
+34184,
+34186,
+34188,
+34190,
+34192,
+34194,
+34196,
+34199,
+34201,
+34203,
+34205,
+34207,
+34209,
+34211,
+34213,
+34215,
+34217,
+34219,
+34221,
+34223,
+34225,
+34227,
+34229,
+34231,
+34233,
+34235,
+34237,
+34239,
+34241,
+34243,
+34245,
+34247,
+34249,
+34251,
+34253,
+34255,
+34257,
+34259,
+34261,
+34263,
+34265,
+34267,
+34269,
+34271,
+34273,
+34275,
+34277,
+34279,
+34281,
+34283,
+34285,
+34287,
+34289,
+34291,
+34293,
+34295,
+34297,
+34299,
+34301,
+34303,
+34305,
+34307,
+34309,
+34311,
+34313,
+34315,
+34317,
+34319,
+34321,
+34323,
+34325,
+34327,
+34329,
+34331,
+34333,
+34335,
+34337,
+34339,
+34341,
+34343,
+34345,
+34347,
+34349,
+34351,
+34352,
+34354,
+34356,
+34358,
+34360,
+34362,
+34364,
+34366,
+34368,
+34370,
+34372,
+34374,
+34376,
+34378,
+34380,
+34382,
+34384,
+34386,
+34388,
+34390,
+34392,
+34394,
+34395,
+34397,
+34399,
+34401,
+34403,
+34405,
+34407,
+34409,
+34411,
+34413,
+34415,
+34417,
+34419,
+34421,
+34423,
+34425,
+34427,
+34428,
+34430,
+34432,
+34434,
+34436,
+34438,
+34440,
+34442,
+34444,
+34446,
+34448,
+34450,
+34452,
+34454,
+34455,
+34457,
+34459,
+34461,
+34463,
+34465,
+34467,
+34469,
+34471,
+34473,
+34475,
+34476,
+34478,
+34480,
+34482,
+34484,
+34486,
+34488,
+34490,
+34492,
+34494,
+34496,
+34497,
+34499,
+34501,
+34503,
+34505,
+34507,
+34509,
+34511,
+34513,
+34515,
+34516,
+34518,
+34520,
+34522,
+34524,
+34526,
+34528,
+34530,
+34532,
+34533,
+34535,
+34537,
+34539,
+34541,
+34543,
+34545,
+34547,
+34549,
+34550,
+34552,
+34554,
+34556,
+34558,
+34560,
+34562,
+34564,
+34565,
+34567,
+34569,
+34571,
+34573,
+34575,
+34577,
+34579,
+34580,
+34582,
+34584,
+34586,
+34588,
+34590,
+34592,
+34593,
+34595,
+34597,
+34599,
+34601,
+34603,
+34605,
+34606,
+34608,
+34610,
+34612,
+34614,
+34616,
+34618,
+34619,
+34621,
+34623,
+34625,
+34627,
+34629,
+34631,
+34632,
+34634,
+34636,
+34638,
+34640,
+34642,
+34643,
+34645,
+34647,
+34649,
+34651,
+34653,
+34654,
+34656,
+34658,
+34660,
+34662,
+34664,
+34665,
+34667,
+34669,
+34671,
+34673,
+34675,
+34676,
+34678,
+34680,
+34682,
+34684,
+34686,
+34687,
+34689,
+34691,
+34693,
+34695,
+34696,
+34698,
+34700,
+34702,
+34704,
+34706,
+34707,
+34709,
+34711,
+34713,
+34715,
+34716,
+34718,
+34720,
+34722,
+34724,
+34725,
+34727,
+34729,
+34731,
+34733,
+34735,
+34736,
+34738,
+34740,
+34742,
+34744,
+34745,
+34747,
+34749,
+34751,
+34753,
+34754,
+34756,
+34758,
+34760,
+34761,
+34763,
+34765,
+34767,
+34769,
+34770,
+34772,
+34774,
+34776,
+34778,
+34779,
+34781,
+34783,
+34785,
+34787,
+34788,
+34790,
+34792,
+34794,
+34795,
+34797,
+34799,
+34801,
+34803,
+34804,
+34806,
+34808,
+34810,
+34811,
+34813,
+34815,
+34817,
+34818,
+34820,
+34822,
+34824,
+34826,
+34827,
+34829,
+34831,
+34833,
+34834,
+34836,
+34838,
+34840,
+34841,
+34843,
+34845,
+34847,
+34848,
+34850,
+34852,
+34854,
+34855,
+34857,
+34859,
+34861,
+34862,
+34864,
+34866,
+34868,
+34869,
+34871,
+34873,
+34875,
+34876,
+34878,
+34880,
+34882,
+34883,
+34885,
+34887,
+34889,
+34890,
+34892,
+34894,
+34896,
+34897,
+34899,
+34901,
+34903,
+34904,
+34906,
+34908,
+34910,
+34911,
+34913,
+34915,
+34916,
+34918,
+34920,
+34922,
+34923,
+34925,
+34927,
+34929,
+34930,
+34932,
+34934,
+34935,
+34937,
+34939,
+34941,
+34942,
+34944,
+34946,
+34948,
+34949,
+34951,
+34953,
+34954,
+34956,
+34958,
+34960,
+34961,
+34963,
+34965,
+34966,
+34968,
+34970,
+34972,
+34973,
+34975,
+34977,
+34978,
+34980,
+34982,
+34983,
+34985,
+34987,
+34989,
+34990,
+34992,
+34994,
+34995,
+34997,
+34999,
+35001,
+35002,
+35004,
+35006,
+35007,
+35009,
+35011,
+35012,
+35014,
+35016,
+35017,
+35019,
+35021,
+35023,
+35024,
+35026,
+35028,
+35029,
+35031,
+35033,
+35034,
+35036,
+35038,
+35039,
+35041,
+35043,
+35044,
+35046,
+35048,
+35050,
+35051,
+35053,
+35055,
+35056,
+35058,
+35060,
+35061,
+35063,
+35065,
+35066,
+35068,
+35070,
+35071,
+35073,
+35075,
+35076,
+35078,
+35080,
+35081,
+35083,
+35085,
+35086,
+35088,
+35090,
+35091,
+35093,
+35095,
+35096,
+35098,
+35100,
+35101,
+35103,
+35105,
+35106,
+35108,
+35110,
+35111,
+35113,
+35115,
+35116,
+35118,
+35120,
+35121,
+35123,
+35124,
+35126,
+35128,
+35129,
+35131,
+35133,
+35134,
+35136,
+35138,
+35139,
+35141,
+35143,
+35144,
+35146,
+35148,
+35149,
+35151,
+35152,
+35154,
+35156,
+35157,
+35159,
+35161,
+35162,
+35164,
+35166,
+35167,
+35169,
+35170,
+35172,
+35174,
+35175,
+35177,
+35179,
+35180,
+35182,
+35184,
+35185,
+35187,
+35188,
+35190,
+35192,
+35193,
+35195,
+35197,
+35198,
+35200,
+35201,
+35203,
+35205,
+35206,
+35208,
+35210,
+35211,
+35213,
+35214,
+35216,
+35218,
+35219,
+35221,
+35222,
+35224,
+35226,
+35227,
+35229,
+35231,
+35232,
+35234,
+35235,
+35237,
+35239,
+35240,
+35242,
+35243,
+35245,
+35247,
+35248,
+35250,
+35251,
+35253,
+35255,
+35256,
+35258,
+35260,
+35261,
+35263,
+35264,
+35266,
+35268,
+35269,
+35271,
+35272,
+35274,
+35276,
+35277,
+35279,
+35280,
+35282,
+35283,
+35285,
+35287,
+35288,
+35290,
+35291,
+35293,
+35295,
+35296,
+35298,
+35299,
+35301,
+35303,
+35304,
+35306,
+35307,
+35309,
+35311,
+35312,
+35314,
+35315,
+35317,
+35318,
+35320,
+35322,
+35323,
+35325,
+35326,
+35328,
+35329,
+35331,
+35333,
+35334,
+35336,
+35337,
+35339,
+35341,
+35342,
+35344,
+35345,
+35347,
+35348,
+35350,
+35352,
+35353,
+35355,
+35356,
+35358,
+35359,
+35361,
+35362,
+35364,
+35366,
+35367,
+35369,
+35370,
+35372,
+35373,
+35375,
+35377,
+35378,
+35380,
+35381,
+35383,
+35384,
+35386,
+35387,
+35389,
+35391,
+35392,
+35394,
+35395,
+35397,
+35398,
+35400,
+35401,
+35403,
+35405,
+35406,
+35408,
+35409,
+35411,
+35412,
+35414,
+35415,
+35417,
+35418,
+35420,
+35422,
+35423,
+35425,
+35426,
+35428,
+35429,
+35431,
+35432,
+35434,
+35435,
+35437,
+35439,
+35440,
+35442,
+35443,
+35445,
+35446,
+35448,
+35449,
+35451,
+35452,
+35454,
+35455,
+35457,
+35459,
+35460,
+35462,
+35463,
+35465,
+35466,
+35468,
+35469,
+35471,
+35472,
+35474,
+35475,
+35477,
+35478,
+35480,
+35481,
+35483,
+35484,
+35486,
+35488,
+35489,
+35491,
+35492,
+35494,
+35495,
+35497,
+35498,
+35500,
+35501,
+35503,
+35504,
+35506,
+35507,
+35509,
+35510,
+35512,
+35513,
+35515,
+35516,
+35518,
+35519,
+35521,
+35522,
+35524,
+35525,
+35527,
+35528,
+35530,
+35531,
+35533,
+35534,
+35536,
+35538,
+35539,
+35541,
+35542,
+35544,
+35545,
+35547,
+35548,
+35550,
+35551,
+35553,
+35554,
+35556,
+35557,
+35559,
+35560,
+35562,
+35563,
+35565,
+35566,
+35568,
+35569,
+35571,
+35572,
+35573,
+35575,
+35576,
+35578,
+35579,
+35581,
+35582,
+35584,
+35585,
+35587,
+35588,
+35590,
+35591,
+35593,
+35594,
+35596,
+35597,
+35599,
+35600,
+35602,
+35603,
+35605,
+35606,
+35608,
+35609,
+35611,
+35612,
+35614,
+35615,
+35617,
+35618,
+35620,
+35621,
+35622,
+35624,
+35625,
+35627,
+35628,
+35630,
+35631,
+35633,
+35634,
+35636,
+35637,
+35639,
+35640,
+35642,
+35643,
+35645,
+35646,
+35648,
+35649,
+35650,
+35652,
+35653,
+35655,
+35656,
+35658,
+35659,
+35661,
+35662,
+35664,
+35665,
+35667,
+35668,
+35670,
+35671,
+35672,
+35674,
+35675,
+35677,
+35678,
+35680,
+35681,
+35683,
+35684,
+35686,
+35687,
+35688,
+35690,
+35691,
+35693,
+35694,
+35696,
+35697,
+35699,
+35700,
+35702,
+35703,
+35704,
+35706,
+35707,
+35709,
+35710,
+35712,
+35713,
+35715,
+35716,
+35717,
+35719,
+35720,
+35722,
+35723,
+35725,
+35726,
+35728,
+35729,
+35730,
+35732,
+35733,
+35735,
+35736,
+35738,
+35739,
+35741,
+35742,
+35743,
+35745,
+35746,
+35748,
+35749,
+35751,
+35752,
+35753,
+35755,
+35756,
+35758,
+35759,
+35761,
+35762,
+35764,
+35765,
+35766,
+35768,
+35769,
+35771,
+35772,
+35774,
+35775,
+35776,
+35778,
+35779,
+35781,
+35782,
+35783,
+35785,
+35786,
+35788,
+35789,
+35791,
+35792,
+35793,
+35795,
+35796,
+35798,
+35799,
+35801,
+35802,
+35803,
+35805,
+35806,
+35808,
+35809,
+35810,
+35812,
+35813,
+35815,
+35816,
+35818,
+35819,
+35820,
+35822,
+35823,
+35825,
+35826,
+35827,
+35829,
+35830,
+35832,
+35833,
+35834,
+35836,
+35837,
+35839,
+35840,
+35842,
+35843,
+35844,
+35846,
+35847,
+35849,
+35850,
+35851,
+35853,
+35854,
+35856,
+35857,
+35858,
+35860,
+35861,
+35863,
+35864,
+35865,
+35867,
+35868,
+35870,
+35871,
+35872,
+35874,
+35875,
+35877,
+35878,
+35879,
+35881,
+35882,
+35883,
+35885,
+35886,
+35888,
+35889,
+35890,
+35892,
+35893,
+35895,
+35896,
+35897,
+35899,
+35900,
+35902,
+35903,
+35904,
+35906,
+35907,
+35908,
+35910,
+35911,
+35913,
+35914,
+35915,
+35917,
+35918,
+35920,
+35921,
+35922,
+35924,
+35925,
+35926,
+35928,
+35929,
+35931,
+35932,
+35933,
+35935,
+35936,
+35937,
+35939,
+35940,
+35942,
+35943,
+35944,
+35946,
+35947,
+35948,
+35950,
+35951,
+35953,
+35954,
+35955,
+35957,
+35958,
+35959,
+35961,
+35962,
+35964,
+35965,
+35966,
+35968,
+35969,
+35970,
+35972,
+35973,
+35974,
+35976,
+35977,
+35979,
+35980,
+35981,
+35983,
+35984,
+35985,
+35987,
+35988,
+35989,
+35991,
+35992,
+35994,
+35995,
+35996,
+35998,
+35999,
+36000,
+36002,
+36003,
+36004,
+36006,
+36007,
+36008,
+36010,
+36011,
+36013,
+36014,
+36015,
+36017,
+36018,
+36019,
+36021,
+36022,
+36023,
+36025,
+36026,
+36027,
+36029,
+36030,
+36031,
+36033,
+36034,
+36035,
+36037,
+36038,
+36040,
+36041,
+36042,
+36044,
+36045,
+36046,
+36048,
+36049,
+36050,
+36052,
+36053,
+36054,
+36056,
+36057,
+36058,
+36060,
+36061,
+36062,
+36064,
+36065,
+36066,
+36068,
+36069,
+36070,
+36072,
+36073,
+36074,
+36076,
+36077,
+36078,
+36080,
+36081,
+36082,
+36084,
+36085,
+36086,
+36088,
+36089,
+36090,
+36092,
+36093,
+36094,
+36096,
+36097,
+36098,
+36100,
+36101,
+36102,
+36104,
+36105,
+36106,
+36108,
+36109,
+36110,
+36112,
+36113,
+36114,
+36116,
+36117,
+36118,
+36119,
+36121,
+36122,
+36123,
+36125,
+36126,
+36127,
+36129,
+36130,
+36131,
+36133,
+36134,
+36135,
+36137,
+36138,
+36139,
+36141,
+36142,
+36143,
+36144,
+36146,
+36147,
+36148,
+36150,
+36151,
+36152,
+36154,
+36155,
+36156,
+36158,
+36159,
+36160,
+36162,
+36163,
+36164,
+36165,
+36167,
+36168,
+36169,
+36171,
+36172,
+36173,
+36175,
+36176,
+36177,
+36179,
+36180,
+36181,
+36182,
+36184,
+36185,
+36186,
+36188,
+36189,
+36190,
+36192,
+36193,
+36194,
+36195,
+36197,
+36198,
+36199,
+36201,
+36202,
+36203,
+36205,
+36206,
+36207,
+36208,
+36210,
+36211,
+36212,
+36214,
+36215,
+36216,
+36217,
+36219,
+36220,
+36221,
+36223,
+36224,
+36225,
+36226,
+36228,
+36229,
+36230,
+36232,
+36233,
+36234,
+36235,
+36237,
+36238,
+36239,
+36241,
+36242,
+36243,
+36244,
+36246,
+36247,
+36248,
+36250,
+36251,
+36252,
+36253,
+36255,
+36256,
+36257,
+36259,
+36260,
+36261,
+36262,
+36264,
+36265,
+36266,
+36268,
+36269,
+36270,
+36271,
+36273,
+36274,
+36275,
+36276,
+36278,
+36279,
+36280,
+36282,
+36283,
+36284,
+36285,
+36287,
+36288,
+36289,
+36290,
+36292,
+36293,
+36294,
+36296,
+36297,
+36298,
+36299,
+36301,
+36302,
+36303,
+36304,
+36306,
+36307,
+36308,
+36310,
+36311,
+36312,
+36313,
+36315,
+36316,
+36317,
+36318,
+36320,
+36321,
+36322,
+36323,
+36325,
+36326,
+36327,
+36328,
+36330,
+36331,
+36332,
+36333,
+36335,
+36336,
+36337,
+36339,
+36340,
+36341,
+36342,
+36344,
+36345,
+36346,
+36347,
+36349,
+36350,
+36351,
+36352,
+36354,
+36355,
+36356,
+36357,
+36359,
+36360,
+36361,
+36362,
+36364,
+36365,
+36366,
+36367,
+36369,
+36370,
+36371,
+36372,
+36374,
+36375,
+36376,
+36377,
+36379,
+36380,
+36381,
+36382,
+36384,
+36385,
+36386,
+36387,
+36389,
+36390,
+36391,
+36392,
+36394,
+36395,
+36396,
+36397,
+36398,
+36400,
+36401,
+36402,
+36403,
+36405,
+36406,
+36407,
+36408,
+36410,
+36411,
+36412,
+36413,
+36415,
+36416,
+36417,
+36418,
+36420,
+36421,
+36422,
+36423,
+36424,
+36426,
+36427,
+36428,
+36429,
+36431,
+36432,
+36433,
+36434,
+36436,
+36437,
+36438,
+36439,
+36440,
+36442,
+36443,
+36444,
+36445,
+36447,
+36448,
+36449,
+36450,
+36451,
+36453,
+36454,
+36455,
+36456,
+36458,
+36459,
+36460,
+36461,
+36463,
+36464,
+36465,
+36466,
+36467,
+36469,
+36470,
+36471,
+36472,
+36474,
+36475,
+36476,
+36477,
+36478,
+36480,
+36481,
+36482,
+36483,
+36484,
+36486,
+36487,
+36488,
+36489,
+36491,
+36492,
+36493,
+36494,
+36495,
+36497,
+36498,
+36499,
+36500,
+36501,
+36503,
+36504,
+36505,
+36506,
+36508,
+36509,
+36510,
+36511,
+36512,
+36514,
+36515,
+36516,
+36517,
+36518,
+36520,
+36521,
+36522,
+36523,
+36524,
+36526,
+36527,
+36528,
+36529,
+36530,
+36532,
+36533,
+36534,
+36535,
+36536,
+36538,
+36539,
+36540,
+36541,
+36542,
+36544,
+36545,
+36546,
+36547,
+36548,
+36550,
+36551,
+36552,
+36553,
+36554,
+36556,
+36557,
+36558,
+36559,
+36560,
+36562,
+36563,
+36564,
+36565,
+36566,
+36568,
+36569,
+36570,
+36571,
+36572,
+36574,
+36575,
+36576,
+36577,
+36578,
+36580,
+36581,
+36582,
+36583,
+36584,
+36586,
+36587,
+36588,
+36589,
+36590,
+36591,
+36593,
+36594,
+36595,
+36596,
+36597,
+36599,
+36600,
+36601,
+36602,
+36603,
+36605,
+36606,
+36607,
+36608,
+36609,
+36610,
+36612,
+36613,
+36614,
+36615,
+36616,
+36618,
+36619,
+36620,
+36621,
+36622,
+36623,
+36625,
+36626,
+36627,
+36628,
+36629,
+36630,
+36632,
+36633,
+36634,
+36635,
+36636,
+36638,
+36639,
+36640,
+36641,
+36642,
+36643,
+36645,
+36646,
+36647,
+36648,
+36649,
+36650,
+36652,
+36653,
+36654,
+36655,
+36656,
+36657,
+36659,
+36660,
+36661,
+36662,
+36663,
+36665,
+36666,
+36667,
+36668,
+36669,
+36670,
+36672,
+36673,
+36674,
+36675,
+36676,
+36677,
+36678,
+36680,
+36681,
+36682,
+36683,
+36684,
+36685,
+36687,
+36688,
+36689,
+36690,
+36691,
+36692,
+36694,
+36695,
+36696,
+36697,
+36698,
+36699,
+36701,
+36702,
+36703,
+36704,
+36705,
+36706,
+36708,
+36709,
+36710,
+36711,
+36712,
+36713,
+36714,
+36716,
+36717,
+36718,
+36719,
+36720,
+36721,
+36723,
+36724,
+36725,
+36726,
+36727,
+36728,
+36729,
+36731,
+36732,
+36733,
+36734,
+36735,
+36736,
+36738,
+36739,
+36740,
+36741,
+36742,
+36743,
+36744,
+36746,
+36747,
+36748,
+36749,
+36750,
+36751,
+36752,
+36754,
+36755,
+36756,
+36757,
+36758,
+36759,
+36760,
+36762,
+36763,
+36764,
+36765,
+36766,
+36767,
+36768,
+36770,
+36771,
+36772,
+36773,
+36774,
+36775,
+36776,
+36778,
+36779,
+36780,
+36781,
+36782,
+36783,
+36784,
+36786,
+36787,
+36788,
+36789,
+36790,
+36791,
+36792,
+36793,
+36795,
+36796,
+36797,
+36798,
+36799,
+36800,
+36801,
+36803,
+36804,
+36805,
+36806,
+36807,
+36808,
+36809,
+36810,
+36812,
+36813,
+36814,
+36815,
+36816,
+36817,
+36818,
+36820,
+36821,
+36822,
+36823,
+36824,
+36825,
+36826,
+36827,
+36829,
+36830,
+36831,
+36832,
+36833,
+36834,
+36835,
+36836,
+36838,
+36839,
+36840,
+36841,
+36842,
+36843,
+36844,
+36845,
+36847,
+36848,
+36849,
+36850,
+36851,
+36852,
+36853,
+36854,
+36855,
+36857,
+36858,
+36859,
+36860,
+36861,
+36862,
+36863,
+36864,
+36866,
+36867,
+36868,
+36869,
+36870,
+36871,
+36872,
+36873,
+36874,
+36876,
+36877,
+36878,
+36879,
+36880,
+36881,
+36882,
+36883,
+36884,
+36886,
+36887,
+36888,
+36889,
+36890,
+36891,
+36892,
+36893,
+36894,
+36896,
+36897,
+36898,
+36899,
+36900,
+36901,
+36902,
+36903,
+36904,
+36906,
+36907,
+36908,
+36909,
+36910,
+36911,
+36912,
+36913,
+36914,
+36916,
+36917,
+36918,
+36919,
+36920,
+36921,
+36922,
+36923,
+36924,
+36925,
+36927,
+36928,
+36929,
+36930,
+36931,
+36932,
+36933,
+36934,
+36935,
+36936,
+36938,
+36939,
+36940,
+36941,
+36942,
+36943,
+36944,
+36945,
+36946,
+36947,
+36949,
+36950,
+36951,
+36952,
+36953,
+36954,
+36955,
+36956,
+36957,
+36958,
+36959,
+36961,
+36962,
+36963,
+36964,
+36965,
+36966,
+36967,
+36968,
+36969,
+36970,
+36971,
+36973,
+36974,
+36975,
+36976,
+36977,
+36978,
+36979,
+36980,
+36981,
+36982,
+36983,
+36985,
+36986,
+36987,
+36988,
+36989,
+36990,
+36991,
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/mproc_sync.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/mproc_sync.h
new file mode 100644
index 0000000..633341c
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/mproc_sync.h
@@ -0,0 +1,724 @@
+/*
+ * (C) Copyright 2011 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __QTN_MPROC_SYNC_H
+#define __QTN_MPROC_SYNC_H
+
+#ifdef __KERNEL__
+#include <linux/sched.h>
+#endif
+
+#include "mproc_sync_base.h"
+#include "semaphores.h"
+#include "shared_params.h"
+#include "topaz_tqe_cpuif.h"
+
+#ifndef __ASSEMBLY__
+
+#define QTN_MPROC_TIMEOUT	(6 * HZ)
+
+/*
+ * NOTE: functions started from "__" are internal, and must not be used by client code.
+ */
+
+/* Enum represents each CPU in system */
+typedef enum _QTN_SOC_CPU
+{
+	QTN_LHOST_SOC_CPU = (1 << 0),
+	QTN_MUC_SOC_CPU   = (1 << 1),
+	QTN_DSP_SOC_CPU   = (1 << 2)
+} QTN_SOC_CPU;
+
+#if QTN_SEM_TRACE
+#define QTN_SEM_TRACE_NUM    12
+#define QTN_SEM_TRACE_DEPTH  2
+
+#define SEM_TRACE_CPU_LHOST     0
+#define SEM_TRACE_CPU_MUC       1
+#define SEM_TRACE_CPU_DSP       2
+#define SEM_TRACE_CPU_AUC       3
+#define SEM_TRACE_CPU_NUM       4
+
+enum qtn_sem_state {
+	QTN_SEM_STARTLOCK = 0,
+	QTN_SEM_LOCKED = 1,
+	QTN_SEM_UNLOCKED = 2,
+};
+
+struct qtn_sem_trace_entry {
+	volatile uint32_t    pos;
+	volatile uint64_t    jiffies;        /* per cpu jiffies: lhost: 32b jiffies; mus: 64b jiffies; dsp: no jiffies */
+	volatile uint32_t    state;
+	volatile uint32_t    caller_file[QTN_SEM_TRACE_DEPTH];
+	volatile uint32_t    caller_line[QTN_SEM_TRACE_DEPTH];
+};
+struct qtn_sem_trace_log {
+	volatile uint32_t trace_pos[SEM_TRACE_CPU_NUM];
+	volatile uint32_t trace_idx[SEM_TRACE_CPU_NUM];
+	volatile uint32_t last_dump_pos[SEM_TRACE_CPU_NUM];
+	struct qtn_sem_trace_entry traces[SEM_TRACE_CPU_NUM][QTN_SEM_TRACE_NUM];
+};
+
+#if defined(DSP_BUILD)
+#define PER_CPU_CLK    0
+#else
+#define PER_CPU_CLK    jiffies    /* Lhost, MuC and AuC has different HZ */
+#endif
+
+#endif /* QTN_SEM_TRACE */
+
+#if defined(AUC_BUILD)
+#define PER_CPU_PRINTK                auc_os_printf
+#elif defined(MUC_BUILD)
+#define PER_CPU_PRINTK                uc_printk
+#elif !defined(DSP_BUILD)
+#define PER_CPU_PRINTK                printk
+#endif
+
+#define	HAL_REG_READ_RAW(_r)	(uint32_t)(*((volatile uint32_t *)(_r)))
+
+#if defined(CONFIG_RUBY_PCIE_TARGET)
+/*
+ * Trashed L2M_SEM_REG(0xE0000094) will lead to semaphore deadlock in Muc.
+ * We still don't know who/where/why the register been trashed.
+ * Proposal a workaround as following:
+ * Record a software copy of the L2M and M2L semaphore register, and set/clear/update
+ * the register(L2M and M2L) according to it's software copy.
+ */
+#define	CONFIG_PCIE_TARGET_SEM_TRASHED_WORKAROUND	(1)
+#define	QTN_SYNC_MAX_RW_CHECK_NUM					(10000)
+#endif
+
+#define QTN_ALL_SOC_CPU	(QTN_LHOST_SOC_CPU | QTN_MUC_SOC_CPU | QTN_DSP_SOC_CPU)
+#define QTN_MULTI_PROCESS_TQE_SEMA		0xf
+#define QTN_MULTI_PROCESSOR_SEMA_KEY_SHIFT	28
+
+/*
+ * This multi-processor semaphore register supports up to 7 semaphores,
+ * which is implemented by dedicated flops, not memory. Reading them is as slow or even slower
+ * than reading SRAM.
+ * Currently, the first semaphore is used for TQE and the other 6 semaphores are unused.
+ * However enabling other semaphores could introduce more wait cycles to each other.
+ * The semaphore lock process are:
+ * 1. Try to lock with write CPUID | CPUID << 24 to semaphore register.
+ * 2. Return immediately if successfully lock with passing read verify, otherwise step 3.
+ * 3. Read semaphore and wait for free to lock, then step 1 or timeout with a failure.
+ */
+RUBY_INLINE int
+_qtn_mproc_3way_tqe_sem_down(enum topaz_mproc_tqe_sem_id cpuid)
+{
+	const uint32_t  set_value = ((cpuid << QTN_MULTI_PROCESSOR_SEMA_KEY_SHIFT) | cpuid);
+	uint32_t        sem_set_cnt = 0;
+
+	do {
+		/*
+		 * The semaphore bits [3:0] can be set successfully only when it is unset or already
+		 * owned by current cpuid, otherwise the write has no effect.
+		 */
+		qtn_mproc_sync_mem_write(TOPAZ_MPROC_SEMA, set_value);
+		if ((qtn_mproc_sync_mem_read(TOPAZ_MPROC_SEMA) &
+				QTN_MULTI_PROCESS_TQE_SEMA) == cpuid) {
+			return 1;
+		}
+	} while (++sem_set_cnt < TQE_SEMA_GET_MAX);
+
+	return 0;
+}
+
+/*
+ * Returns 1 mean success.
+ * Returns 0 if the processor did not hold the semaphore.
+ */
+RUBY_INLINE int
+_qtn_mproc_3way_tqe_sem_up(enum topaz_mproc_tqe_sem_id cpuid)
+{
+	uint32_t	value;
+
+	value = qtn_mproc_sync_mem_read(TOPAZ_MPROC_SEMA);
+	value &= QTN_MULTI_PROCESS_TQE_SEMA;
+	if (value != cpuid)
+		return 0;
+	/* Write current ID back to release HW semaphore */
+	qtn_mproc_sync_mem_write(TOPAZ_MPROC_SEMA, value << QTN_MULTI_PROCESSOR_SEMA_KEY_SHIFT);
+
+	return 1;
+}
+
+RUBY_INLINE void
+__qtn_mproc_refcnt_inc(volatile u_int32_t *refcnt)
+{
+	*refcnt = *refcnt + 1;
+	qtn_addr_wmb(qtn_mproc_sync_addr(refcnt));
+}
+
+RUBY_INLINE void
+__qtn_mproc_refcnt_dec(volatile u_int32_t *refcnt)
+{
+	u_int32_t tmp = *refcnt;
+	if (tmp == 0) {
+		qtn_mproc_sync_log("ref counter dec broken");
+	} else {
+		*refcnt = tmp - 1;
+		qtn_addr_wmb(qtn_mproc_sync_addr(refcnt));
+	}
+}
+
+RUBY_INLINE u_int32_t
+__qtn_mproc_sync_hw_sem1_addr(QTN_SOC_CPU current_cpu, QTN_SOC_CPU peer_cpu_set)
+{
+	switch (current_cpu)
+	{
+	case QTN_LHOST_SOC_CPU:
+		if (QTN_DSP_SOC_CPU & peer_cpu_set) {
+			return RUBY_SYS_CTL_L2D_SEM;
+		}
+		break;
+
+	case QTN_MUC_SOC_CPU:
+		if (QTN_DSP_SOC_CPU & peer_cpu_set) {
+			return RUBY_SYS_CTL_M2D_SEM;
+		}
+		break;
+
+	case QTN_DSP_SOC_CPU:
+		if (QTN_MUC_SOC_CPU & peer_cpu_set) {
+			return RUBY_SYS_CTL_D2M_SEM;
+		}
+		break;
+	}
+
+	return RUBY_BAD_BUS_ADDR;
+}
+
+RUBY_INLINE u_int32_t
+__qtn_mproc_sync_hw_sem2_addr(QTN_SOC_CPU current_cpu, QTN_SOC_CPU peer_cpu_set)
+{
+	switch (current_cpu)
+	{
+	case QTN_LHOST_SOC_CPU:
+		if (QTN_MUC_SOC_CPU & peer_cpu_set) {
+			return RUBY_SYS_CTL_L2M_SEM;
+		}
+		break;
+
+	case QTN_MUC_SOC_CPU:
+		if (QTN_LHOST_SOC_CPU & peer_cpu_set) {
+			return RUBY_SYS_CTL_M2L_SEM;
+		}
+		break;
+
+	case QTN_DSP_SOC_CPU:
+		if (QTN_LHOST_SOC_CPU & peer_cpu_set) {
+			return RUBY_SYS_CTL_D2L_SEM;
+		}
+		break;
+	}
+
+	return RUBY_BAD_BUS_ADDR;
+}
+
+RUBY_INLINE u_int32_t
+__qtn_mproc_sync_hw_sem_bit(u_int32_t which_sem)
+{
+	return (1 << which_sem);
+}
+
+RUBY_INLINE int
+__qtn_mproc_sync_set_hw_sem(u_int32_t sem_addr, u_int32_t which_sem)
+{
+	int ret = 0;
+#if defined(CONFIG_PCIE_TARGET_SEM_TRASHED_WORKAROUND)
+	/* check counter */
+	int check_counter = 0;
+#endif
+
+	if (sem_addr == RUBY_BAD_BUS_ADDR) {
+		ret = 1;
+	} else {
+		u_int32_t sem_bit = __qtn_mproc_sync_hw_sem_bit(which_sem);
+		u_int32_t sem_val = qtn_mproc_sync_mem_read(sem_addr);
+
+#if defined(CONFIG_PCIE_TARGET_SEM_TRASHED_WORKAROUND)
+		while (sem_val != qtn_mproc_sync_mem_read(sem_addr)) {
+			if(++check_counter > QTN_SYNC_MAX_RW_CHECK_NUM) {
+				qtn_mproc_sync_log("__qtn_mproc_sync_set_hw_sem: read semaphore mismatch...");
+				return ret;
+			} else {
+				sem_val = qtn_mproc_sync_mem_read(sem_addr);
+			}
+		}
+#endif	/* CONFIG_PCIE_TARGET_SEM_TRASHED_WORKAROUND */
+		sem_val |= sem_bit;
+
+		if (qtn_mproc_sync_mem_write_wmb(sem_addr, sem_val) & sem_bit) {
+			ret = 1;
+		}
+	}
+	return ret;
+}
+
+RUBY_INLINE void
+__qtn_mproc_sync_clear_hw_sem(u_int32_t sem_addr, u_int32_t which_sem)
+{
+#if defined(CONFIG_PCIE_TARGET_SEM_TRASHED_WORKAROUND)
+	/* check counter */
+	int check_counter = 0;
+#endif
+
+	if (sem_addr != RUBY_BAD_BUS_ADDR) {
+		u_int32_t sem_bit = __qtn_mproc_sync_hw_sem_bit(which_sem);
+		u_int32_t sem_val = qtn_mproc_sync_mem_read(sem_addr);
+
+#if defined(CONFIG_PCIE_TARGET_SEM_TRASHED_WORKAROUND)
+		while (sem_val != qtn_mproc_sync_mem_read(sem_addr)) {
+			if(++check_counter > QTN_SYNC_MAX_RW_CHECK_NUM) {
+				check_counter = 0;
+				qtn_mproc_sync_log("__qtn_mproc_sync_clear_hw_sem: read semaphore mismatch...");
+			}
+			sem_val = qtn_mproc_sync_mem_read(sem_addr);
+		}
+#endif	/* CONFIG_PCIE_TARGET_SEM_TRASHED_WORKAROUND */
+		sem_val &= ~sem_bit;
+
+		qtn_mproc_sync_mem_write_wmb(sem_addr, sem_val);
+	}
+}
+
+RUBY_INLINE int
+__qtn_mproc_sync_spin_try_lock(QTN_SOC_CPU current_cpu, QTN_SOC_CPU peer_cpu_set,
+	u_int32_t which_sem, unsigned long *flags, uint32_t *fail_sem)
+{
+	u_int32_t sem1_addr = __qtn_mproc_sync_hw_sem1_addr(current_cpu, peer_cpu_set);
+	u_int32_t sem2_addr = __qtn_mproc_sync_hw_sem2_addr(current_cpu, peer_cpu_set);
+
+	local_irq_save(*flags);
+
+	if(!__qtn_mproc_sync_set_hw_sem(sem1_addr, which_sem)) {
+		*fail_sem = sem1_addr;
+		goto unlock1;
+	}
+
+	if(!__qtn_mproc_sync_set_hw_sem(sem2_addr, which_sem)) {
+		*fail_sem = sem2_addr;
+		goto unlock2;
+	}
+
+	return 1;
+
+unlock2:
+	__qtn_mproc_sync_clear_hw_sem(sem1_addr, which_sem);
+unlock1:
+	local_irq_restore(*flags);
+	return 0;
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_spin_lock_reg_dump(void)
+{
+#if !defined(DSP_BUILD)
+	uint32_t reg;
+
+	PER_CPU_PRINTK("Dump semaphore registers:\n");
+	for (reg = RUBY_SYS_CTL_L2M_SEM; reg <= RUBY_SYS_CTL_D2M_SEM; reg += 4) {
+		PER_CPU_PRINTK("reg 0x%08x=0x%08x\n", reg, HAL_REG_READ_RAW(reg));
+	}
+#endif
+}
+
+#if QTN_SEM_TRACE
+RUBY_INLINE struct qtn_sem_trace_log *qtn_mproc_sync_get_sem_trace_log(void)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_sem_trace_log *log;
+
+#if defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD)
+	log = sp->sem_trace_log_bus;
+#else
+	log = sp->sem_trace_log_lhost;
+#endif
+	return log;
+}
+
+RUBY_INLINE void qtn_sem_trace_log(int state, char *caller0_file, int caller0_line, char *caller1_file, int caller1_line)
+{
+#if !defined(DSP_BUILD) || (QTN_SEM_TRACE_DSP)
+	struct qtn_sem_trace_log *log = qtn_mproc_sync_get_sem_trace_log();
+#if defined(MUC_BUILD)
+	int cpu = SEM_TRACE_CPU_MUC;
+#elif defined(DSP_BUILD)
+	int cpu = SEM_TRACE_CPU_DSP;
+#elif defined(AUC_BUILD)
+	int cpu = SEM_TRACE_CPU_AUC;
+#else
+	int cpu = SEM_TRACE_CPU_LHOST;
+#endif
+	int idx;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	idx = log->trace_idx[cpu];
+
+	log->traces[cpu][idx].pos = log->trace_pos[cpu];
+	log->traces[cpu][idx].jiffies = PER_CPU_CLK;
+	log->traces[cpu][idx].state = state;
+	log->traces[cpu][idx].caller_file[0] = (unsigned int)caller0_file;
+	log->traces[cpu][idx].caller_line[0] = caller0_line;
+	log->traces[cpu][idx].caller_file[1] = (unsigned int)caller1_file;
+	log->traces[cpu][idx].caller_line[1] = caller1_line;
+	log->trace_pos[cpu]++;
+	log->trace_idx[cpu] = (log->trace_pos[cpu]) % QTN_SEM_TRACE_NUM;
+
+	local_irq_restore(flags);
+#endif
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_spin_lock_log_dump(void)
+{
+#if !defined(DSP_BUILD)
+	struct qtn_sem_trace_log *log = qtn_mproc_sync_get_sem_trace_log();
+	int i, j, idx;
+	struct qtn_sem_trace_entry *e;
+	unsigned int file[QTN_SEM_TRACE_DEPTH];
+
+	PER_CPU_PRINTK("Dump semaphore trace log at jiffies=%u\n", (unsigned int)jiffies);
+	for (idx = 0; idx < SEM_TRACE_CPU_NUM; idx++) {
+#if !QTN_SEM_TRACE_DSP
+		if (idx == SEM_TRACE_CPU_DSP) {
+			PER_CPU_PRINTK("CPU %d semaphore trace log is not available in this build\n", idx);
+			continue;
+		}
+#endif
+		PER_CPU_PRINTK("CPU %d semaphore trace log: pos=%u, last_dump_pos=%u\n",
+				idx, log->trace_pos[idx], log->last_dump_pos[idx]);
+		for (i = 0; i < QTN_SEM_TRACE_NUM; i++) {
+			e = &log->traces[idx][i];
+			for (j = 0; j < QTN_SEM_TRACE_DEPTH; j++) {
+				file[j] = 0;
+				if (e->caller_file[j]) {
+					file[j] = e->caller_file[j];
+#if defined(MUC_BUILD)
+					if (idx != SEM_TRACE_CPU_MUC) {
+						/* have no reliable way to convert lhost/dsp/auc string addr to muc */
+						file[j] = 0;
+					}
+#elif defined(AUC_BUILD)
+					if (idx != SEM_TRACE_CPU_AUC) {
+						/* have no reliable way to convert lhost/dsp/muc string addr to auc */
+						file[j] = 0;
+					}
+#else
+					/* lhost */
+					if (idx != SEM_TRACE_CPU_LHOST) {
+						file[j] = (unsigned int)bus_to_virt(file[j]);
+					}
+#endif
+				}
+			}
+			PER_CPU_PRINTK("%d pos=%u, jiffies=%u_%u, state=%u, "
+					"caller0=0x%x %s %d, caller1=0x%x %s %d\n",
+					i, e->pos, U64_HIGH32(e->jiffies), U64_LOW32(e->jiffies),
+					e->state,
+					(unsigned int)e->caller_file[0],
+					file[0] ? (char*)file[0] : "N/A",
+					e->caller_line[0],
+					(unsigned int)e->caller_file[1],
+					file[1] ? (char*)file[1] : "N/A",
+					e->caller_line[1]
+					);
+		}
+		log->last_dump_pos[idx] = log->trace_pos[idx];
+	}
+#endif
+}
+#endif /* QTN_SEM_TRACE */
+
+RUBY_INLINE int
+__qtn_mproc_sync_spin_lock_wait(QTN_SOC_CPU current_cpu)
+{
+	int wait_shift = 0;
+	u_int32_t pm_lock_addr = qtn_mproc_sync_addr(&qtn_mproc_sync_shared_params_get()->pm_duty_lock);
+	int i;
+
+	if (unlikely(qtn_mproc_sync_mem_read(pm_lock_addr))) {
+		wait_shift = 2;
+	}
+
+	for (i = 0; i < (10 << (wait_shift + current_cpu)); ++i) {
+		qtn_pipeline_drain();
+	}
+
+	return wait_shift;
+}
+
+#if QTN_SEM_TRACE
+#define __qtn_mproc_sync_spin_lock(_cpu, _peer, _sem, _flags)  \
+	__qtn_mproc_sync_spin_lock_dbg(_cpu, _peer, _sem, _flags, __FILE__, __LINE__, caller, caller_line)
+
+RUBY_INLINE void
+__qtn_mproc_sync_spin_lock_dbg(QTN_SOC_CPU current_cpu, QTN_SOC_CPU peer_cpu_set,
+	u_int32_t which_sem, unsigned long *flags,
+	char *caller0_file, int caller0_line,
+	char *caller1_file, int caller1_line)
+#else
+RUBY_INLINE void
+__qtn_mproc_sync_spin_lock(QTN_SOC_CPU current_cpu, QTN_SOC_CPU peer_cpu_set,
+	u_int32_t which_sem, unsigned long *flags)
+#endif
+{
+	/* Help to detect lockups */
+	unsigned log_counter = 0;
+	unsigned log_max_counter = 10000;
+#define LOG_MAX_COUNTER_PANIC    320000
+	int log_success = 0;
+	uint32_t fail_sem = 0;
+	int dumped =0;
+#ifdef __KERNEL__
+	unsigned long timeout_jiff;
+#endif
+
+#if QTN_SEM_TRACE
+	qtn_sem_trace_log(QTN_SEM_STARTLOCK, caller0_file, caller0_line, caller1_file, caller1_line);
+#endif
+
+	/*
+	 * We have 3 interlocked hw semaphores to be used for mutual exclusion in 3 CPU pairs.
+	 * Doesn't matter which semaphore be locked first and which second,
+	 * we can easily enters dead-lock state.
+	 * To prevent dead-locking let's rollback if locking of whole set of 3 mutexes is failed
+	 * at any stage.
+	 * Also let's add per-processor delays after failed locking, so in case of collision
+	 * it will be resolved faster.
+	 *
+	 * I think, that hw design of hw interlocked semaphores is not very lucky.
+	 * It would be much better if we have 3 registers, 1 per CPU.
+	 * And all 3 (not 2 as now) registers be interlocked.
+	 */
+	while (!__qtn_mproc_sync_spin_try_lock(current_cpu, peer_cpu_set, which_sem, flags, &fail_sem)) {
+		unsigned int i;
+		for (i = 0; i < 10 * (current_cpu + 1); ++i) {
+			qtn_pipeline_drain();
+		}
+		if(unlikely(!__qtn_mproc_sync_spin_lock_wait(current_cpu) &&
+				(++log_counter >= log_max_counter))) {
+			log_counter = 0;
+			log_max_counter = (log_max_counter << 1);
+			if (unlikely(!log_max_counter)) {
+				log_max_counter = 1;
+			}
+			qtn_mproc_sync_log("qtn_mproc_sync: waiting for semaphore ...");
+			if ((log_max_counter >= LOG_MAX_COUNTER_PANIC) && (!dumped)) {
+				/* Don't make false alert for PM/COC feature */
+#if QTN_SEM_TRACE
+				qtn_mproc_sync_spin_lock_log_dump();
+#endif
+				qtn_mproc_sync_spin_lock_reg_dump();
+#ifdef __KERNEL__
+				timeout_jiff = jiffies + QTN_MPROC_TIMEOUT;
+				while (time_before(jiffies, timeout_jiff)) {
+					schedule();
+				}
+
+				panic("Semaphore hang detected at clk %u: cpu=%x peer=%x sem=%x flags=%x fail_sem=%x\n",
+					(unsigned int)jiffies, current_cpu, peer_cpu_set, which_sem,
+					(unsigned int)*flags, fail_sem);
+#endif
+				dumped = 1;
+			}
+			log_success = 1;
+		}
+	}
+#if QTN_SEM_TRACE
+	qtn_sem_trace_log(QTN_SEM_LOCKED, caller0_file, caller0_line, caller1_file, caller1_line);
+#endif
+	if (unlikely(log_success)) {
+		qtn_mproc_sync_log("qtn_mproc_sync: wait succeeded");
+	}
+}
+
+#if QTN_SEM_TRACE
+#define __qtn_mproc_sync_spin_unlock(_cpu, _peer, _sem, _flags)  \
+	__qtn_mproc_sync_spin_unlock_dbg(_cpu, _peer, _sem, _flags, __FILE__, __LINE__, caller, caller_line)
+
+RUBY_INLINE void
+__qtn_mproc_sync_spin_unlock_dbg(QTN_SOC_CPU current_cpu, QTN_SOC_CPU peer_cpu_set,
+	u_int32_t which_sem, unsigned long *flags,
+	char *caller0_file, int caller0_line,
+	char *caller1_file, int caller1_line)
+#else
+RUBY_INLINE void
+__qtn_mproc_sync_spin_unlock(QTN_SOC_CPU current_cpu, QTN_SOC_CPU peer_cpu_set,
+	u_int32_t which_sem, unsigned long *flags)
+#endif
+{
+	/* Caller must ensure that it hold spinlock. */
+
+	__qtn_mproc_sync_clear_hw_sem(
+		__qtn_mproc_sync_hw_sem2_addr(current_cpu, peer_cpu_set),
+		which_sem);
+	__qtn_mproc_sync_clear_hw_sem(
+		__qtn_mproc_sync_hw_sem1_addr(current_cpu, peer_cpu_set),
+		which_sem);
+
+#if QTN_SEM_TRACE
+	qtn_sem_trace_log(QTN_SEM_UNLOCKED, caller0_file, caller0_line, caller1_file, caller1_line);
+#endif
+
+	local_irq_restore(*flags);
+}
+
+RUBY_INLINE int
+qtn_mproc_sync_set_hw_sem(u_int32_t sem_addr, u_int32_t which_sem)
+{
+	unsigned long flags;
+	int ret;
+
+	local_irq_save(flags);
+	ret = __qtn_mproc_sync_set_hw_sem(sem_addr, which_sem);
+	local_irq_restore(flags);
+
+	return ret;
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_clear_hw_sem(u_int32_t sem_addr, u_int32_t which_sem)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	__qtn_mproc_sync_clear_hw_sem(sem_addr, which_sem);
+	local_irq_restore(flags);
+}
+
+
+/*
+ * Try lock interprocessor spinlock. Spinlock is not recoursive.
+ */
+RUBY_WEAK(qtn_mproc_sync_spin_try_lock) int
+qtn_mproc_sync_spin_try_lock(QTN_SOC_CPU current_cpu, QTN_SOC_CPU peer_cpu_set, u_int32_t which_sem)
+{
+	unsigned long flags;
+	uint32_t fail_sem;
+	if (__qtn_mproc_sync_spin_try_lock(current_cpu, peer_cpu_set, which_sem, &flags, &fail_sem)) {
+		local_irq_restore(flags);
+		return 1;
+	}
+	return 0;
+}
+
+/*
+  * Lock interprocessor spinlock. Spinlock is not recoursive.
+ */
+#if QTN_SEM_TRACE
+RUBY_WEAK(qtn_mproc_sync_spin_lock) void
+qtn_mproc_sync_spin_lock(QTN_SOC_CPU current_cpu, QTN_SOC_CPU peer_cpu_set, u_int32_t which_sem, char *caller, int caller_line)
+#else
+RUBY_WEAK(qtn_mproc_sync_spin_lock) void
+qtn_mproc_sync_spin_lock(QTN_SOC_CPU current_cpu, QTN_SOC_CPU peer_cpu_set, u_int32_t which_sem)
+#endif
+{
+	unsigned long flags;
+	__qtn_mproc_sync_spin_lock(current_cpu, peer_cpu_set, which_sem, &flags);
+	local_irq_restore(flags);
+}
+
+/*
+ * Unlock interprocessor spinlock. Spinlock is not recoursive.
+ */
+#if QTN_SEM_TRACE
+RUBY_WEAK(qtn_mproc_sync_spin_unlock) void
+qtn_mproc_sync_spin_unlock(QTN_SOC_CPU current_cpu, QTN_SOC_CPU peer_cpu_set, u_int32_t which_sem, char *caller, int caller_line)
+#else
+RUBY_WEAK(qtn_mproc_sync_spin_unlock) void
+qtn_mproc_sync_spin_unlock(QTN_SOC_CPU current_cpu, QTN_SOC_CPU peer_cpu_set, u_int32_t which_sem)
+#endif
+{
+	unsigned long flags;
+	local_irq_save(flags);
+	__qtn_mproc_sync_spin_unlock(current_cpu, peer_cpu_set, which_sem, &flags);
+}
+
+RUBY_INLINE volatile u_int32_t*
+qtn_mproc_sync_irq_fixup_data(u_int32_t irq_reg)
+{
+#if CONFIG_RUBY_BROKEN_IPC_IRQS
+	if (irq_reg == RUBY_SYS_CTL_M2L_INT) {
+		return &qtn_mproc_sync_shared_params_get()->m2l_irq[0];
+	}
+#endif
+	return RUBY_BAD_VIRT_ADDR;
+}
+
+RUBY_INLINE volatile u_int32_t*
+qtn_mproc_sync_irq_fixup_data_ack(u_int32_t irq_reg)
+{
+#if !defined(MUC_BUILD) && !defined(DSP_BUILD)
+	return qtn_mproc_sync_irq_fixup_data(irq_reg);
+#else
+	return RUBY_BAD_VIRT_ADDR;
+#endif
+}
+
+RUBY_INLINE volatile u_int32_t*
+qtn_mproc_sync_irq_fixup_data_trigger(u_int32_t irq_reg)
+{
+#if defined(MUC_BUILD)
+	return qtn_mproc_sync_irq_fixup_data(irq_reg);
+#else
+	return RUBY_BAD_VIRT_ADDR;
+#endif
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_irq_trigger(u_int32_t irq_reg, u_int32_t irqno)
+{
+	u_int32_t req = (1 << (irqno + 16)) | (1 << irqno);
+	qtn_mproc_sync_mem_write_wmb(irq_reg, req);
+}
+
+RUBY_INLINE u_int32_t
+qtn_mproc_sync_irq_ack_nolock(u_int32_t irq_reg, u_int32_t mask)
+{
+	u_int32_t status = qtn_mproc_sync_mem_read(irq_reg) & (mask << 16);
+	u_int32_t ret = (status >> 16);
+	if (likely(ret)) {
+		qtn_mproc_sync_mem_write_wmb(irq_reg, status & 0xFFFF0000);
+	}
+	return ret;
+}
+
+RUBY_INLINE u_int32_t
+qtn_mproc_sync_irq_ack(u_int32_t irq_reg, u_int32_t mask)
+{
+	return qtn_mproc_sync_irq_ack_nolock(irq_reg, mask);
+}
+
+RUBY_INLINE u_int32_t
+qtn_mproc_sync_irq_ack_all(u_int32_t irq_reg)
+{
+	return qtn_mproc_sync_irq_ack(irq_reg, 0xFFFFFFFF);
+}
+
+#endif // #ifndef __ASSEMBLY__
+
+#endif // #ifndef __QTN_MPROC_SYNC_H
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/mproc_sync_base.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/mproc_sync_base.h
new file mode 100644
index 0000000..3c5a5c9
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/mproc_sync_base.h
@@ -0,0 +1,167 @@
+/*
+ * (C) Copyright 2011 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __QTN_MPROC_SYNC_BASE_H
+#define __QTN_MPROC_SYNC_BASE_H
+
+#ifndef __ASSEMBLY__
+
+#include "../common/ruby_mem.h"
+#include "../common/topaz_platform.h"
+
+/*
+ * Functions from this module use local_irq_save()/local_irq_restore()
+ * as synchronization primitives within CPU.
+ * This works only for uniprocessor systems.
+ * If we would ever use SMP inside our SoC, we must
+ * switch to something like spin_lock_irqsave()/spin_lock_irqrestore().
+ */
+#if defined(MUC_BUILD) || defined(DSP_BUILD)
+	#include "os/os_arch_arc.h"
+	#define local_irq_save(_flags)		do { (_flags) = _save_disable_all(); } while(0)
+	#define local_irq_restore(_flags)	do { _restore_enable((_flags)); } while(0)
+#elif defined(AUC_BUILD)
+	/* AuC now has no user-defined ISR. No need to synchronize. */
+	#define local_irq_save(_flags)		do { (void)_flags; } while(0)
+	#define local_irq_restore(_flags)	do { (void)_flags; } while(0)
+#else
+	/* Linux target. Functions defined here already. */
+#endif // #if defined(MUC_BUILD) || defined(DSP_BUILD)
+
+
+RUBY_INLINE void
+qtn_mproc_sync_log(const char *msg)
+{
+#if defined(MUC_BUILD)
+	extern int uc_printk(const char *fmt, ...);
+	uc_printk("MuC: %s\n", msg);
+#elif defined(DSP_BUILD)
+	#ifdef DSP_DEBUG
+	extern void dsp_serial_puts(const char *str);
+	dsp_serial_puts("DSP: ");
+	dsp_serial_puts(msg);
+	dsp_serial_puts("\n");
+	#endif
+#elif defined(AUC_BUILD)
+	extern int auc_os_printf(const char *fmt, ...);
+	auc_os_printf("AuC: %s\n", msg);
+#elif defined(ARCSHELL)
+#elif defined(__KERNEL__) && !defined(UBOOT_BUILD)
+	/* Linux target */
+	printk(KERN_INFO"LHOST: %s : %s : %s\n", KBUILD_MODNAME, KBUILD_BASENAME, msg);
+#else
+	printf("LHOST: %s\n", msg);
+#endif // #if defined(MUC_BUILD)
+}
+
+RUBY_INLINE void*
+qtn_mproc_sync_nocache(void *ptr)
+{
+#if defined(MUC_BUILD)
+	return muc_to_nocache(ptr);
+#else
+	return ptr;
+#endif
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_mem_write_16(u_int32_t addr, u_int16_t val)
+{
+	/*
+	 * Rely on fact that this operation is atomic,
+	 * that single bus transaction handles write.
+	 */
+	*((volatile u_int16_t*)addr) = val;
+}
+
+RUBY_INLINE u_int16_t
+qtn_mproc_sync_mem_read_16(u_int32_t addr)
+{
+	/*
+	 * Rely on fact that this operation is atomic,
+	 * that single bus transaction handles read.
+	 */
+	return *((volatile u_int16_t*)addr);
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_mem_write(u_int32_t addr, u_int32_t val)
+{
+	/*
+	 * Rely on fact that this operation is atomic,
+	 * that single bus transaction handles write.
+	 */
+	*((volatile u_int32_t*)addr) = val;
+}
+
+RUBY_INLINE u_int32_t
+qtn_mproc_sync_mem_read(u_int32_t addr)
+{
+	/*
+	 * Rely on fact that this operation is atomic,
+	 * that single bus transaction handles read.
+	 */
+	return *((volatile u_int32_t*)addr);
+}
+
+RUBY_INLINE u_int32_t
+qtn_mproc_sync_mem_write_wmb(u_int32_t addr, u_int32_t val)
+{
+	qtn_mproc_sync_mem_write(addr, val);
+	return qtn_addr_wmb(addr);
+}
+
+RUBY_INLINE u_int32_t
+qtn_mproc_sync_addr(volatile void *addr)
+{
+	return (u_int32_t)addr;
+}
+
+#if defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD)
+	RUBY_INLINE struct shared_params*
+	qtn_mproc_sync_shared_params_get(void)
+	{
+		return (struct shared_params*)qtn_mproc_sync_nocache((void*)
+			qtn_mproc_sync_mem_read(RUBY_SYS_CTL_SPARE));
+	}
+#else
+	extern struct shared_params *soc_shared_params;
+
+	RUBY_INLINE struct shared_params*
+	qtn_mproc_sync_shared_params_get(void)
+	{
+		return soc_shared_params;
+	}
+
+	/* Has to be used by Linux only */
+	RUBY_INLINE void
+	qtn_mproc_sync_shared_params_set(struct shared_params *params)
+	{
+		qtn_mproc_sync_mem_write_wmb(RUBY_SYS_CTL_SPARE, (u_int32_t)params);
+	}
+#endif // #if defined(MUC_BUILD) || defined(DSP_BUILD)
+
+#endif // #ifndef __ASSEMBLY__
+
+#endif // #ifndef __QTN_MPROC_SYNC_BASE_H
+
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/mproc_sync_mutex.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/mproc_sync_mutex.h
new file mode 100644
index 0000000..83c2473
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/mproc_sync_mutex.h
@@ -0,0 +1,149 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * This is implementation of Peterson's algorithm for mutual exclusion for 2 processes.
+ * Implemented for little endian system only now.
+ *
+ *		//flag[] is boolean array; and turn is an integer
+ *		flag[0]   = false;
+ *		flag[1]   = false;
+ *		turn;
+ *
+ *		P0: flag[0] = true;
+ *		turn = 1;
+ *		while (flag[1] == true && turn == 1)
+ *		{
+ *			// busy wait
+ *		}
+ *		// critical section
+ *		...
+ *		// end of critical section
+ *		flag[0] = false;
+ *
+ *		P1: flag[1] = true;
+ *		turn = 0;
+ *		while (flag[0] == true && turn == 0)
+ *		{
+ *			// busy wait
+ *		}
+ *		// critical section
+ *		...
+ *		// end of critical section
+ *		flag[1] = false;
+ *
+ */
+
+#ifndef __QTN_MPROC_SYNC_MUTEX_H
+#define __QTN_MPROC_SYNC_MUTEX_H
+
+#include "mproc_sync_base.h"
+
+#ifndef __ASSEMBLY__
+
+/* Initial value must be zero. */
+typedef union __qtn_mproc_sync_mutex
+{
+	uint32_t dword;
+	struct
+	{
+		uint16_t raw_w0;
+		uint16_t raw_w1;
+	} words;
+	struct
+	{
+		uint8_t __reserved0;
+		uint8_t flag1;
+		uint8_t turn;
+		uint8_t flag0;
+	} bytes;
+} qtn_mproc_sync_mutex;
+
+RUBY_INLINE void
+qtn_mproc_sync_mutex_init(volatile qtn_mproc_sync_mutex *mutex)
+{
+	mutex->dword = 0;
+}
+
+#if !defined(__GNUC__) && defined(_ARC)
+_Inline _Asm void
+__qtn_mproc_sync_mutex_relax(int count)
+{
+	% reg count;
+	mov_s	%r12, count;
+1:
+	sub.f	%r12, %r12, 1;
+	bnz_s	1b;
+}
+RUBY_INLINE void
+qtn_mproc_sync_mutex_relax(int count)
+{
+	if (count) {
+		__qtn_mproc_sync_mutex_relax(count);
+	}
+}
+#else
+RUBY_INLINE void
+qtn_mproc_sync_mutex_relax(int count)
+{
+	int i;
+	for (i = 0; i < count; ++i) {
+		qtn_pipeline_drain();
+	}
+}
+#endif // #if !defined(__GNUC__) && defined(_ARC)
+
+RUBY_INLINE void
+qtn_mproc_sync_mutex0_lock(volatile qtn_mproc_sync_mutex *mutex, int relax_count)
+{
+	mutex->words.raw_w1 = 0x0101;
+
+	while ((mutex->dword & 0x00FFFF00) == 0x00010100) {
+		qtn_mproc_sync_mutex_relax(relax_count);
+	}
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_mutex0_unlock(volatile qtn_mproc_sync_mutex *mutex)
+{
+	mutex->bytes.flag0 = 0;
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_mutex1_lock(volatile qtn_mproc_sync_mutex *mutex, int relax_count)
+{
+	mutex->bytes.flag1 = 1;
+	mutex->bytes.turn = 0;
+
+	while (mutex->words.raw_w1 == 0x0100) {
+		qtn_mproc_sync_mutex_relax(relax_count);
+	}
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_mutex1_unlock(volatile qtn_mproc_sync_mutex *mutex)
+{
+	mutex->bytes.flag1 = 0;
+}
+
+#endif // #ifndef __ASSEMBLY__
+
+#endif // #ifndef __QTN_MPROC_SYNC_MUTEX_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/muc_dbg_parse b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/muc_dbg_parse
new file mode 100755
index 0000000..98174b8
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/muc_dbg_parse
@@ -0,0 +1,79 @@
+#!/usr/bin/perl
+
+$stats_header_file = 'qtn_muc_stats_print.h';
+$dbg_header_file = "muc_txrx_stats.h.raw";
+
+$in_tx_struct=0;
+$tx_stats_content = "";
+$in_rx_struct=0;
+$rx_stats_content = "";
+$rxq_num = 0;
+
+foreach my $line (`cat muc_share_def.h`) {
+	if ($line =~ /^\#define\s+QTN_FW_WMAC_RX_QNUM\s+(\d+).*$/) {
+		$rxq_num = $1;
+	}
+}
+
+if ($rxq_num == 0) {
+	die("muc_dbg_parse: fail to find macro QTN_FW_WMAC_RX_QNUM\n");
+}
+
+open(HDRFILE,"$dbg_header_file");
+while (<HDRFILE>) {
+
+	if (/\}/) {
+		if ($in_tx_struct == 1) {
+			$tx_stats_content .= "}";
+			$in_tx_struct=0;
+		}
+		if ($in_rx_struct == 1) {
+			$rx_stats_content .= "}";
+			$in_rx_struct=0;
+		}
+	}
+
+	if ($in_tx_struct) {
+		if(/(\w+)\;/){
+			$strtmp = $1;
+			$tx_stats_content .= "	\"$strtmp\", \\\n";
+		} else {
+			die("muc_dbg_parse: fail to process tx stats item \"$_\"");
+		}
+	}
+
+	if ($in_rx_struct) {
+		if (/(\w+)\;/) {
+			$strtmp = $1;
+			$rx_stats_content .= "	\"$strtmp\", \\\n";
+		} elsif (/(rxq_\w+)\[\w+\]\;/) {
+			$strtmp = $1;
+			for (my $i = 0; $i < $rxq_num; $i++) {
+				$rx_stats_content .= "	\"$strtmp"."[$i]\", \\\n";
+			}
+		} else {
+			die("muc_dbg_parse: fail to process rx stats item \"$_\"");
+		}
+	}
+
+	if (/^\s*struct\s+muc_tx_stats\s*\{/) {
+		$in_tx_struct=1;
+		$tx_stats_content .= "#define MUC_TX_STATS_NAMES_TABLE { \\\n";
+	}
+
+	if (/^\s*struct\s+muc_rx_stats\s*\{/) {
+		$in_rx_struct=1;
+		$rx_stats_content .= "#define MUC_RX_STATS_NAMES_TABLE { \\\n";
+	}
+
+}
+close(HDRFILE);
+
+unlink($stats_header_file);
+open(headfile,">>$stats_header_file");
+print headfile "/* autogenerated */\n\n";
+print headfile $tx_stats_content;
+print headfile "\n\n";
+print headfile $rx_stats_content;
+print headfile "\n";
+close(headfile);
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/muc_phy_stats.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/muc_phy_stats.h
new file mode 100644
index 0000000..c460727
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/muc_phy_stats.h
@@ -0,0 +1,360 @@
+#ifndef  _MUC_PHY_STATS_H
+#define _MUC_PHY_STATS_H
+
+#include "../common/ruby_mem.h"
+#include "../common/queue.h"
+
+#define NUM_ANT 4
+#define NUM_LOG_BUFFS 0x8
+
+#define MUC_PHY_STATS_ALTERNATE		0
+#define MUC_PHY_STATS_RSSI_RCPI_ONLY	1
+#define MUC_PHY_STATS_ERROR_SUM_ONLY	2
+
+#define MUC_PHY_ERR_SUM_NOT_AVAIL	0xffffffff
+
+#define MUC_PHY_RSSI_NOT_AVAIL		(-1000)
+
+#include <qtn/muc_txrx_stats.h>
+
+/**
+ * \defgroup PHYSTATS PHY generated statistics
+ */
+/** @{ */
+
+#define QTN_STATS_MCS_SGI	0x40000000
+#define QTN_STATS_MCS_BW40	0x80000000
+
+#define QTN_PHY_STATS_MCS_PHYRATE	0xFFFF0000
+#define QTN_PHY_STATS_MCS_PHYRATE_S	16
+#define QTN_PHY_STATS_MCS_NSS	0xf000
+#define QTN_PHY_STATS_MCS_NSS_S	12
+#define QTN_PHY_STATS_MCS_BW		0xC00
+#define QTN_PHY_STATS_MCS_BW_S	10
+#define QTN_PHY_STATS_MCS_MODE		0x380
+#define QTN_PHY_STATS_MCS_MODE_S	7
+#define QTN_STATS_MCS_RATE_MASK	0x7f
+#define QTN_STATS_MCS_RATE_MASK_S 0
+
+#define QTN_PHY_STATS_MODE_11N	1
+#define QTN_PHY_STATS_MODE_11AC	2
+
+
+#define QTN_PER_NODE_STATS_POOL_SIZE	QTN_NODE_TBL_SIZE_LHOST
+
+#define EVM_TIME_MEAN_SHIFT	4
+#define EVM_TIME_MEAN_COUNT	(1 << EVM_TIME_MEAN_SHIFT)
+#define RSSI_TIME_MEAN_SHIFT	4
+#define RSSI_TIME_MEAN_COUNT	(1 << RSSI_TIME_MEAN_SHIFT)
+#define PHY_RATE_MEAN_SHIFT	4
+#define PHY_RATE_MEAN_COUNT	(1 << PHY_RATE_MEAN_SHIFT)
+
+struct qtn_node_shared_stats_tx {
+	uint32_t max_queue;
+	/*
+	 * last_mcs are used as bitmask value
+	 * bit[31:16]		rate		(Mbps)
+	 * bit[15:12]		nss		(1~4)
+	 * bit[11:10]		bw		(0 - 20; 1-40; 2-80; 3-unknow)
+	 * bit[9:7]		mode		(1-11n, 2-11ac others-unknow)
+	 * bit[6:0]		mcs		([0,76] for 11n, [0,9] [33,76] for 11ac)
+	 */
+	uint32_t last_mcs;
+	uint32_t last_tx_scale;
+	uint32_t ralg_inv_phy_rate;
+	int32_t avg_rssi_dbm;
+	uint32_t cost;
+	uint32_t pkts;
+	uint32_t txdone_failed_cum;
+	uint32_t avg_per;
+	uint32_t pkts_per_sec;
+	uint32_t avg_tx_phy_rate;
+	uint32_t acks;
+	uint32_t tx_airtime;
+	uint32_t tx_accum_airtime;
+	/*
+	 * The number of data packets transmitted through
+	 * wireless media for each traffic category(TC).
+	 */
+#define WMM_AC_NUM 4
+	uint32_t tx_sent_data_msdu[WMM_AC_NUM];
+};
+
+struct qtn_node_shared_stats_rx {
+	int32_t last_rssi_dbm[NUM_ANT + 1];
+	int32_t rssi_dbm_smoothed[NUM_ANT + 1];
+	int32_t last_rcpi_dbm[NUM_ANT + 1];
+	int32_t rcpi_dbm_smoothed[NUM_ANT + 1];
+	int32_t last_evm_dbm[NUM_ANT + 1];
+	int32_t evm_dbm_smoothed[NUM_ANT + 1];
+	int32_t last_hw_noise[NUM_ANT + 1];
+	uint32_t last_rxsym;
+	/*
+	 * last_mcs are used as bitmask value
+	 * bit[31:16]		rate		(Mbps)
+	 * bit[15:12]		nss		(1~4)
+	 * bit[11:10]		bw		(0 - 20; 1-40; 2-80; 3-unknow)
+	 * bit[9:7]		mode		(1-11n, 2-11ac others-unknow)
+	 * bit[6:0]		mcs		([0,76] for 11n, [0,9] [33,76] for 11ac)
+	 */
+	uint32_t last_mcs;
+	uint32_t pkts;
+	uint32_t pkts_cum;
+	uint32_t inv_phy_rate_smoothed;
+	uint32_t cost;
+	uint32_t rx_airtime;
+	uint32_t rx_accum_airtime;
+};
+
+/**
+ * Per node values and statistics; updated periodically
+ * with each invocation of qtn_stats_sample, based on
+ * MuC per node stats
+ */
+struct qtn_node_shared_stats {
+	/* 0 for SU, 1 for MU */
+#define STATS_MIN	0
+#define STATS_SU	STATS_MIN
+#define STATS_MU	1
+#define STATS_MAX	2
+	struct qtn_node_shared_stats_tx tx[STATS_MAX];
+	struct qtn_node_shared_stats_rx rx[STATS_MAX];
+	uint64_t beacon_tbtt;
+	uint64_t beacon_tbtt_jiffies;
+	uint64_t last_rx_jiffies;
+	uint64_t dtim_tbtt;
+	uint64_t qtn_tx_bytes;
+	uint64_t qtn_rx_bytes;
+	uint32_t tim_set;
+	uint32_t dtim_set;
+	uint16_t beacon_interval;
+	uint16_t pad;
+};
+
+/**
+ * \brief PHY receive statistics
+ *
+ * These statistics are either read directly from the PHY or are generated
+ * based on PHY inputs (eg, RX vector or other structures).
+ */
+struct qtn_rx_stats {
+	/**
+	 * The count of the number of packets the PHY has received and passed
+	 * up. This is the total of the number of singleton packets (MPDU, MMPDU, control frames),
+	 * plus the total of subframes within AMPDUs, plus the number of AMSDUs which have been
+	 * passed up from the PHY.
+	 *
+	 * \note On BBIC4, it just counts single AMPDU rather than the subframes in AMPDU.
+	 */
+	u_int32_t num_pkts;
+
+	/**
+	 * count of packets with A-MSDU flag set
+	 */
+	u_int32_t num_amsdu;
+
+	/**
+	 * The average RX gain used on the previously received packet
+	 * (MMPDU, MPDU, AMPDU or singleton AMSDU).
+	 */
+	u_int32_t avg_rxgain;
+
+	/**
+	 * The number of packets received by the PHY with invalid CRC.
+	 */
+	u_int32_t cnt_mac_crc;
+
+	/**
+	 * The number of short preamble failures reported by the PHY.
+	 */
+	u_int32_t cnt_sp_fail;
+
+	/**
+	 * The number of long preamble failures reported by the PHY.
+	 */
+	u_int32_t cnt_lp_fail;
+
+	int32_t hw_noise;
+
+	u_int32_t max_init_gain;
+
+	/**
+	 * The current temperature of the system.
+	 */
+	u_int32_t sys_temp;
+
+	/**
+	 * The mode of the last received packet.
+	 * 1 - 11n
+	 * 2 - 11ac
+	 * others - unknow
+	 */
+	u_int32_t last_rx_mode;
+
+	/**
+	 * The bandwidth of the last received packet.
+	 * 0 - 20MHZ
+	 * 1 - 40MHZ
+	 * 2 - 80MHZ
+	 * others - unknow
+	 */
+	u_int32_t last_rx_bw;
+
+	/**
+	 * The MCS index of the last received packet.
+	 */
+	u_int32_t last_rx_mcs;
+
+	/**
+	 * Debug information.
+	 */
+	u_int32_t rx_gain_fields;
+
+	/**
+	 * RSSI / RCPI / EVM for frames
+	 */
+	int32_t last_rssi_evm[NUM_ANT];
+
+	int32_t last_rssi_all;
+
+	u_int32_t last_rxsym;
+};
+
+/**
+ * \brief PHY transmit statistics
+ *
+ * These statistics are either read directly from the PHY or are generated
+ * based on PHY values.
+ */
+struct qtn_tx_stats {
+	/**
+	 * The count of the number of packets (MMPDU, MPDU, AMSDU, and one for each
+	 * subframe in an AMPDU) sent to the PHY.
+	 *
+	 * \note On BBIC4, it just counts single AMPDU rather than the subframes in AMPDU.
+	 */
+	u_int32_t num_pkts;
+
+	/**
+	 * The number of times transmitted packets were deferred due to CCA.
+	 */
+	u_int32_t num_defers;
+
+	/**
+	 * The number of times packets were timed out - spent too long inside the MAC.
+	 */
+	u_int32_t num_timeouts;
+
+	/**
+	 * The number of retries - singleton retransmissions, full AMPDU retransmissions,
+	 * or partial AMPDU retransmissions.
+	 */
+	u_int32_t num_retries;
+
+	/**
+	 * The transmit power scale index used for the last packet
+	 *
+	 * \note On BBIC4, This variable is not available.
+	 */
+	u_int32_t last_tx_scale;
+
+	/**
+	 * The mode of the last transmit packet.
+	 * 1 - 11n
+	 * 2 - 11ac
+	 * others - unknow
+	 */
+	u_int32_t last_tx_mode;
+
+	/**
+	 * The bandwidth of the last transmit packet.
+	 * 0 - 20MHZ
+	 * 1 - 40MHZ
+	 * 2 - 80MHZ
+	 */
+	u_int32_t last_tx_bw;
+
+	/**
+	 * The MCS index of the last acknowledged transmit packet.
+	 */
+	u_int32_t last_tx_mcs;
+
+	/**
+	 * Rate adaptations current best throughput rate
+	 */
+	u_int32_t rate;		/* this field must be last for stat_parser.pl */
+};
+
+/** @} */
+
+struct qtn_stats {
+	u_int32_t tstamp;
+	struct qtn_rx_stats rx_phy_stats;
+	struct qtn_rx_stats mu_rx_phy_stats;
+	struct qtn_tx_stats tx_phy_stats;
+	struct qtn_tx_stats mu_tx_phy_stats;
+};
+
+struct qtn_stats_log {
+	int curr_buff; /* Indx of the buffer with latest data */
+	struct qtn_stats stat_buffs[NUM_LOG_BUFFS];
+	struct muc_rx_stats *rx_muc_stats;
+	struct muc_rx_rates *rx_muc_rates;
+	struct muc_rx_bf_stats *rx_muc_bf_stats;
+	struct muc_tx_stats *tx_muc_stats;
+	struct qtn_rate_tx_stats_per_sec *tx_muc_rates;
+	uint32_t *muc_su_rate_stats_read;
+	uint32_t *muc_mu_rate_stats_read;
+	uint32_t *scs_cnt;
+	uint32_t pad[7]; /* Ensure the pad makes this structure a multiple of ARC cache line size */
+};
+
+/*
+ * Micro stats: provide stats in micro view along the time axis
+ * Can be used for off-channel and other debug purpose.
+ */
+#define QTN_MICRO_STATS_GRANULARITY	1	/* ms, for trace burst in traffic */
+#define QTN_MICRO_STATS_NUM		32	/* enough for max off-channel duration */
+struct qtn_micro_stats {
+	/*
+	 * tx msdu: collected in tx done. With average 1.5ms aggregation timeout, this is accurate
+	 * enough for off-channel use.
+	 */
+	uint32_t tx_msdu;
+	/*
+	 * rx msdu: collected after rx reorder and mpdu decap, and amsdu decap if existing.
+	 * - Delay in rx reorder lead to different instantaneous pkt rate from what is in the
+	 *   air in ms level granularity.
+	 * - If amsdu decap is done in lhost, bbic3 or bbic4 sdp, this value is not correct.
+	 */
+	uint32_t rx_msdu;
+};
+
+struct qtn_micro_stats_log {
+	struct qtn_micro_stats latest_stats;
+	/* snapshot */
+	uint32_t curr_idx;
+	struct qtn_micro_stats micro_stats[QTN_MICRO_STATS_NUM];
+};
+
+RUBY_INLINE int qtn_select_rssi_over_error_sums(u_int32_t timestamp, int muc_phy_stats_mode)
+{
+	int	retval = 0;
+
+	switch (muc_phy_stats_mode) {
+	case MUC_PHY_STATS_RSSI_RCPI_ONLY:
+		retval = 1;
+		break;
+	case MUC_PHY_STATS_ERROR_SUM_ONLY:
+		retval = 0;
+		break;
+	case MUC_PHY_STATS_ALTERNATE:
+	default:
+		retval = (timestamp & 0x01);
+		break;
+	}
+
+	return( retval );
+}
+
+#endif
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/muc_share_def.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/muc_share_def.h
new file mode 100755
index 0000000..2b4c64d
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/muc_share_def.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2014 Quantenna Communications, Inc.
+ */
+
+#ifndef _MUC_SHARE_DEF_H_
+#define _MUUC_SHARE_DEF_H_
+
+#include "../common/ruby_mem.h"
+
+#define QTN_FW_WMAC_RX_Q_MGMT		0
+#define QTN_FW_WMAC_RX_Q_CTRL		1
+#define QTN_FW_WMAC_RX_Q_DATA		2
+#define QTN_FW_WMAC_RX_QNUM		3
+#define QTN_FW_WMAC_RX_QDEEP_MGMT	9
+#define QTN_FW_WMAC_RX_QDEEP_CTRL	9
+#define QTN_FW_WMAC_RX_QDEEP_DATA	394
+#define QTN_FW_WMAC_RX_DESC_NUM	(QTN_FW_WMAC_RX_QDEEP_MGMT + \
+	QTN_FW_WMAC_RX_QDEEP_CTRL + QTN_FW_WMAC_RX_QDEEP_DATA)
+
+#endif // #ifndef _MUC_SHARE_DEF_H_
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/muc_txrx_stats.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/muc_txrx_stats.h
new file mode 100644
index 0000000..5e43739
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/muc_txrx_stats.h
@@ -0,0 +1,794 @@
+/*
+ * Copyright (c) 2008-2012 Quantenna Communications, Inc.
+ */
+
+/*
+ * This file contains host definitions which are common between the
+ * host driver and the microcontroller/MAC code.
+ */
+
+/**
+ * The host tx descriptor for an ethernet packet
+ */
+
+#ifndef _MUC_TXRX_STATS_H_
+#define _MUC_TXRX_STATS_H_
+
+#include <qtn/muc_share_def.h>
+
+#ifdef ENABLE_STATS
+#define MUC_UPDATE_STATS(_a, _b)	(_a += _b)
+#define MUC_SETSTAT(_a, _b)		(_a = _b)
+#else
+#define MUC_UPDATE_STATS(_a, _b)
+#define MUC_SETSTAT(_a, _b)
+#endif
+
+/**
+ * \defgroup MUCSTATS MuC generated statistics
+ */
+/** @{ */
+
+/**
+ * \brief MuC transmit statistics
+ *
+ * These statistics are generated on the MuC, mainly on the transmit datapath.
+ */
+struct muc_tx_stats {
+	/**
+	 * The number of times the software failed to enqueue a beacon to the
+	 * hardware.
+	 *
+	 * \note If this value is non-zero it could indicate a very congested
+	 * medium.
+	 */
+	u_int32_t	bcn_enq_failed;
+
+	/**
+	 * The number of times the TX status bit is set.
+	 */
+	u_int32_t	tx_status_set;
+
+	/**
+	 * The number of interrupts from the host to indicate data is ready for
+	 * transmit.
+	 *
+	 * \note This number will generally be quite low, as the LHost->MuC
+	 * data path is poll driven rather than interrupt driven.
+	 */
+	u_int32_t	host_intr;
+	u_int32_t	tx_reserved;
+	u_int32_t	tx_reserve_fail;
+	u_int32_t	txalert_mu_ndp_update;
+	u_int32_t	txalert_mu_rpt_poll;
+	u_int32_t	txalert_mu_queue_full;
+	u_int32_t	txalert_mu_queue_fail;
+	u_int32_t	sample_rate_mu;
+	u_int32_t	sample_bw_mu;
+	u_int32_t	txdone_intr;
+	u_int32_t	txalert_intr;
+	u_int32_t	txalert_tasklet;
+	u_int32_t	txalert_bcn_update;
+	u_int32_t	txalert_ndp_update;
+	u_int32_t	tx_ndp_q_occupied;
+	u_int32_t	tx_ndp_start;
+	u_int32_t	tx_pwr;
+	u_int32_t	bcn_scheme_power_save;
+	u_int32_t	bcn_scheme;
+
+	u_int32_t	fd_acquire;
+	u_int32_t	fd_release;
+	u_int32_t	fd_acq_fail;
+	u_int32_t	fd_acq_fail_frms;
+	u_int32_t	fd_acq_hal_fail;
+	u_int32_t	fd_acq_hal_fail_frms;
+	u_int32_t	ba_send;
+	u_int32_t	fd_free_nodeclean;
+	u_int32_t	tx_restrict_probe;
+	u_int32_t	tx_restrict_mode;
+	u_int32_t	tx_restrict_delay;
+	u_int32_t	tx_sample_pkts;
+	u_int32_t	tx_sample_bytes;
+	u_int32_t	tx_underflow;
+	u_int32_t	tx_hal_enqueued;
+	u_int32_t	txbf_mode;
+	u_int32_t	psel_matrix;
+	u_int32_t	sample_rate;
+	u_int32_t	sample_bw;
+	uint32_t	ra_flags;
+	u_int32_t	fd_balance;
+	uint32_t	invalid_delay;
+	uint32_t	halt_tx;
+	uint32_t	resume_tx;
+	uint32_t	rfctrl_on;
+	uint32_t	rfctrl_off;
+	uint32_t	go_offchan;
+	uint32_t	go_datachan;
+	uint32_t	defer_cc;
+	uint32_t	deferred_cc_done;
+	uint32_t	off_chan_sample;
+	uint32_t	off_chan_scan;
+	uint32_t	off_chan_cac;
+	uint32_t	cca_pri;
+	uint32_t	cca_sec;
+	uint32_t	cca_sec40;
+	uint32_t	cca_busy;
+	uint32_t	cca_fat;
+	uint32_t	cca_intf;
+	uint32_t	cca_trfc;
+	/**
+	 * These counter show the information of MU frames.
+	 */
+	uint32_t	mu_prec_snd_tx;
+	uint32_t	mu_prec_snd_wait_done;
+	uint32_t	mu_grp_sel_snd_tx;
+	uint32_t	mu_grp_sel_snd_wait_done;
+
+	uint32_t	oc_auctx_timeout;
+	uint32_t	oc_auctx_overwrite;
+	uint32_t	oc_auctx_fail;
+	uint32_t	gi_cnt;			/* times GI has been set for any node */
+	uint32_t	gi_ncidx;		/* last node to have GI set */
+	uint32_t	gi_val;			/* SGI enabled state for this node */
+	uint32_t	select_state_ncidx;	/* last node to have qn_select state set */
+	uint32_t	select_state_val;	/* PPPC state for this node */
+	uint32_t	pppc_scale_cnt;		/* times Tx gain scaling has been set for any node */
+	uint32_t	pppc_scale_ncidx;	/* last node to have Tx gain scaling set */
+	uint32_t	pppc_scale_val;		/* Tx gain scaling for this node (0 is max) */
+	uint32_t	pppc_scale_last_gput;		/* The last goodput used by PPPC */
+	uint32_t	pppc_scale_last_gput_idx;	/* The PPPC index of the last goodput value */
+	uint32_t	pppc_scale_base_cnt;		/* times Tx gain scaling base has been set for any node */
+	uint32_t	pppc_scale_base_20m;	/* Combined tx scale bases for different bf/nss cases in 20MHz */
+	uint32_t	pppc_scale_base_40m;	/* Combined tx scale bases for different bf/nss cases in 40MHz */
+	uint32_t	pppc_scale_base_80m;	/* Combined tx scale bases for different bf/nss cases in 80MHz */
+	uint32_t	pppc_scale_base_copy;	/* combined the flags indicating the tx scale bases are copied bfoff 1ss cases */
+	uint32_t	pppc_scale_overstep;	/* tx scale exceed the maximum scale indices */
+	uint32_t	pppc_scale_rollback;	/* tx scale roll back because scale index over step */
+	uint32_t	pppc_0_gput;		/* times pppc comparing goodput and both are zero */
+	uint32_t	tx_max_power;
+	uint32_t	nc_csr_read_count;	/* number of times Node Cache was read */
+	uint32_t	nc_csr_write_count;	/* number of times Node Cache was written to */
+	uint32_t	nc_csr_done_watermark;	/* Node cache done retries high watermark */
+	uint32_t	nc_csr_watermark_count; /* Number of times read retries reached max */
+	uint32_t	auc_dtim_notify;
+	uint32_t	auc_ps_notify;
+	uint32_t	tx_beacon_done;
+	uint32_t	sfs_peer_rts;
+	uint32_t	sfs_peer_rts_flags;
+	uint32_t	sfs_local_rts;
+	uint32_t	sfs_local_rts_flags;
+	uint32_t	sfs_dyn_wmm;
+	uint32_t	sfs_dyn_wmm_flags;
+	uint32_t	auc_wmm_ps_notify;
+	uint32_t	tx_wmm_ps_null_frames;
+	uint32_t	qtn_bcn_stop;
+	uint32_t	mu_grp_snd_queue_is_not_empty;
+	uint32_t	mu_prec_snd_queue_is_not_empty;
+	uint32_t	mu_group_delete;
+	uint32_t	mu_group_install;
+	uint32_t	mu_group_rate_node_updates;
+	uint32_t	mu_update_rates_mu;
+	uint32_t	mu_update_rates_su;
+	uint32_t	autocs_sample_bits;
+	uint32_t	autocs_adjust_bits;
+	uint32_t	autocs_step_size;
+	uint32_t	autocs_cs_thresh;
+	uint32_t	autocs_min_rssi;
+	uint32_t	bmps_null_tx_success;
+	uint32_t	bmps_null_tx_fail;
+	uint32_t	bmps_null_tx_timeout;
+	uint32_t	txqueue_g1q0_deadline_frozen;	/* beacon deadline register frozen counter */
+	uint32_t	auc_ipc_retry;
+	uint32_t	auc_ipc_hwm;
+	uint32_t	auc_ipc_send_delay;
+	uint32_t	auc_ipc_send_delay_hwm;
+};
+
+/**
+ * \brief MuC receive statistics
+ *
+ * These statistics are generated on the MuC, mainly on the receive datapath. This set of statistics
+ * also include low-level debugging facilities used internally.
+ */
+struct muc_rx_stats {
+	/**
+	 * This counter shows the number of descriptors taken from the host,
+	 * 'popped' from the top of the list.
+	 */
+	u_int32_t	rxdesc_pop_from_host;
+
+	/**
+	 * This counter shows the number of descriptors pushed to the hardware
+	 * for receive buffers.
+	 */
+	u_int32_t	rxdesc_get_from_queue;
+	u_int32_t	rxdesc_push_to_host;
+	u_int32_t	rxdesc_non_aggr_push_to_host;
+	u_int32_t	rxdesc_flush_to_host;
+	u_int32_t	rxdesc_reuse_push;
+	u_int32_t	rxdesc_reuse_pop;
+
+	/**
+	 * This counter shows the number of packets received with a bad duration.
+	 * A bad duration is where the duration field is all 1's - that is,
+	 * a packet which violates the 802.11 standard.
+	 */
+	u_int32_t	rxdesc_status_bad_dur;
+	u_int32_t	rxdesc_status_bad_len;
+	u_int32_t	rxdesc_status_crc_err;
+	u_int32_t	rxdesc_status_cmic_err;
+	u_int32_t	rxdesc_status_cmic_no_crc_err;
+	u_int32_t	rxdesc_status_retry;
+	u_int32_t	agg_stored;
+	u_int32_t	agg_duplicate;
+
+	u_int32_t	accel_mpdu;
+	u_int32_t	accel_msdu;
+	u_int32_t	accel_fwt_lu_timeout;
+	u_int32_t	accel_mcast_send;
+	u_int32_t	accel_mcast_drop;
+	u_int32_t	accel_no_match;
+	u_int32_t	accel_drop;
+	u_int32_t	accel_err;
+
+	u_int32_t	rate_train_chk;
+	u_int32_t	rate_train_err;
+	u_int32_t	rate_train_delay;
+	u_int32_t	rate_train_none;
+	u_int32_t	rate_train_hash_bad;
+	u_int32_t	rate_train_hash_good;
+
+	/**
+	 * This counter shows the number of MPDUs within an AMPDU that have been
+	 * discarded due to the sequence number being outside ('below') the current
+	 * receive sequence window.
+	 */
+	u_int32_t	agg_oldpkts;
+
+	/**
+	 * This counter shows the number of MPDUs within an AMPDU that have been
+	 * discarded due to the sequence number being off by > 2047 (half the sequence
+	 * space).
+	 */
+	u_int32_t	agg_very_oldpkts;
+	u_int32_t	agg_evict_in_order;
+	u_int32_t	agg_evict_in_move;
+
+	/**
+	 * This counter shows the number of received subframes within the
+	 * receive window that are missing when the window is moved.
+	 *
+	 * This counter represents one source receive packet loss.
+	 */
+	u_int32_t	agg_evict_empty;
+
+	/**
+	 * This counter shows the number of received subframes within the
+	 * receive window that are evicted due to timeout. Timeout is used
+	 * to ensure we don't sit with a stuck receive aggregate window when
+	 * the transmitter has stopped re-transmitting a given subframe.
+	 */
+	u_int32_t	agg_timeout;
+	u_int32_t	agg_rxwin_reset;
+	u_int32_t	rx_qnum_err;
+	u_int32_t	rx_mgmt;
+	u_int32_t	rx_ctrl;
+	u_int32_t	rx_pspoll;
+	u_int32_t	rx_pwr_mgmt;
+	u_int32_t	rx_delba;
+	/**
+	 * This counter shows the number of times the powersave bit is set
+	 * in the frame control field of packets received.
+	 *
+	 * \note This counter will generally be one greater than rx_pwr_mgmt_reset
+	 * when we have a single PS client associated and in power save.
+	 *
+	 * \sa rx_pwr_mgmt_reset
+	 */
+	u_int32_t	rx_pwr_mgmt_set;
+
+	/**
+	 * This counter shows the number of times the powersave bit of a
+	 * currently power save client is reset.
+	 *
+	 * \note This counter will generally be one less than rx_pwr_mgmt_set
+	 * when we have a single PS client associated and in power save mode.
+	 *
+	 * \sa rx_pwr_mgmt_set
+	 */
+	u_int32_t	rx_pwr_mgmt_reset;
+	u_int32_t	rx_desc_underflow;
+	u_int32_t	rx_desc_linkerr;
+	u_int32_t	rx_notify;
+	u_int32_t	rx_df_numelems;
+	u_int32_t	last_recv_seq;
+
+	/**
+	 * This counter shows the number of packets received for an unknown
+	 * node - that is - one which we do not have an association with.
+	 */
+	u_int32_t	rx_node_not_found;
+
+	/**
+	 * This counter shows the number of duplicates of non-QoS packets we
+	 * received and discarded.
+	 */
+	u_int32_t	rx_non_qos_duplicate;
+
+	/**
+	 * This counter shows the number of received NDPs.
+	 */
+	u_int32_t	rx_11n_ndp;
+	u_int32_t	rx_11ac_ndp;
+	u_int32_t	rx_ndp_inv_slot;
+	u_int32_t	rx_11n_ndp_no_capt;
+	u_int32_t	rx_ndp_sw_processed;
+	u_int32_t	rx_ndp_lockup;
+	u_int32_t	rx_11n_bf_act;
+	u_int32_t	rx_11ac_bf_act;
+	u_int32_t	rx_bf_act_inv_slot;
+
+	/**
+	 * This counter shows the number of received AMSDUs. This counter does
+	 * not count the number of subframes within the AMSDU.
+	 */
+	u_int32_t	rx_amsdu;
+	u_int32_t	rx_data;
+	u_int32_t	prev_rx_data;
+	u_int32_t	rx_recv_qnull;
+	u_int32_t	rx_recv_act;
+	u_int32_t	rx_recv_bcn;
+	u_int32_t	rx_recv_auth;
+	u_int32_t	rx_recv_assoc_req;
+	u_int32_t	rx_recv_assoc_res;
+	u_int32_t	rx_recv_deauth;
+	u_int32_t	rx_recv_disassoc;
+
+	/**
+	 * This counter shows the number of packets received where the MCS as
+	 * indicated in the PLCP is invalid (> 76).
+	 */
+	u_int32_t	rx_mcs_gt_76;
+	u_int32_t	tkip_keys;		/* Keep count of TKIP keys installed - for debug */
+	u_int32_t	rx_tkip_mic_err;	/* Number of TKIP packets RX with MIC error - the number reported to the higher layers */
+	u_int32_t	icv_errs; /* The number of raw ICV errors reported by the hardware */
+	u_int32_t	tmic_errs; /* The number of raw TMIC errors reported by the hardware */
+	u_int32_t	cmic_errs;
+	u_int32_t	crc_errs;
+
+	/**
+	 * This counter shows the number of transmit block ACK agreements
+	 * installed.
+	 *
+	 * If the upper bit is set, at least one implicit block ACK has been
+	 * established with a Quantenna peer.
+	 *
+	 * \note This number only increments - when block ACK agreements are
+	 * removed, this counter does not decrement.
+	 */
+	u_int32_t	ba_tx;
+
+	/**
+	 * This counter shows the number of receive block ACK agreements
+	 * installed.
+	 *
+	 * If the upper bit is set, at least one implicit block ACK has been
+	 * established with a Quantenna peer.
+	 *
+	 * \note This number only increments - when block ACK agreements are
+	 * removed, this counter does not decrement.
+	 */
+	u_int32_t	ba_rx;
+
+	/**
+	 * The number of times a block ACK has been rejected due to an out of
+	 * resource situation.
+	 */
+	u_int32_t	ba_rx_fail;
+	u_int32_t	sec_oflow;
+	u_int32_t	str_oflow;
+	u_int32_t	oflow_fixup_timeout;
+	u_int32_t	rxdone_intr;
+	u_int32_t	rxtypedone_intr;
+	u_int32_t	ipc_a2m_intr;
+	u_int32_t	tqe_intr;
+	u_int32_t	tqe_in_port_lhost;
+	u_int32_t	tqe_in_port_bad;
+	u_int32_t	tqe_a2m_type_txfb;
+	u_int32_t	tqe_a2m_type_rxpkt;
+	u_int32_t	tqe_a2m_type_unknown;
+	u_int32_t	tqe_reschedule_task;
+	u_int32_t	tqe_desc_unowned;
+
+	/**
+	 * \internal
+	 *
+	 * The number of interrupts from the baseband to the MuC.
+	 *
+	 * \note This should not be distributed externally - the following
+	 * fields are for internal debugging ONLY.
+	 */
+	u_int32_t	bb_intr;
+
+	/**
+	 * \internal
+	 *
+	 * The number of DLEAF overflow interrupts from the baseband.
+	 */
+	u_int32_t	bb_irq_dleaf_oflow;
+	u_int32_t	bb_irq_leaf_uflow;
+	u_int32_t	bb_irq_leaf_ldpc_uflow;
+	u_int32_t	bb_irq_tx_td_oflow_intr;
+	u_int32_t	bb_irq_tx_td_uflow_intr;
+	u_int32_t	bb_irq_rx_sm_wdg_intr;
+	/* BB spends more than 14.4ms (short GI)/16ms (long GI) to receive one packet */
+	u_int32_t	bb_irq_rx_long_dur;
+	u_int32_t	bb_irq_rx_long_dur_11ac;
+	u_int32_t	bb_irq_rx_long_dur_11n;
+	u_int32_t	bb_irq_rx_long_dur_11n_qtn;
+        /* BB reset due to spending more than 14.4ms/16ms to receive a packet. */
+	u_int32_t	bb_irq_rx_sym_exceed_rst;
+	u_int32_t	bb_irq_tx_sm_wdg_intr;
+
+	/**
+	 * \internal
+	 *
+	 * The number of BB state machine watchdogs that have kicked in.
+	 */
+	u_int32_t	bb_irq_main_sm_wdg_intr;
+	u_int32_t	bb_irq_hready_wdg_intr;
+	u_int32_t	mac_irq_rx_sec_buff_oflow;
+	u_int32_t	mac_irq_rx_strq_oflow;
+	u_int32_t	mac_irq_rx_bb_uflow_intr;
+	u_int32_t	mac_irq_rx_bb_oflow_intr;
+	u_int32_t	bb_irq_hready_wdg_reset;
+
+	/**
+	 * \internal
+	 *
+	 * This counter is incremented once at the start of the main watchdog state machine.
+	 *
+	 * \sa sreset_wdg_end
+	 */
+	u_int32_t	sreset_wdg_begin;
+
+	/**
+	 * \internal
+	 *
+	 * This counter is incremented once at the end of the main watchdog state machine.
+	 *
+	 * \sa sreset_wdg_begin
+	 */
+	u_int32_t	sreset_wdg_end;
+	u_int32_t	sreset_wdg_in_place;
+	u_int32_t	sreset_wdg_tx_beacon_hang;
+
+	/**
+	 * \internal
+	 *
+	 * The number of transmit hangs causing soft reset.
+	 *
+	 * Transmit hang is between 400 to 900ms from the time of sending a packet to the hardware
+	 * without receiving a tx done interrupt.
+	 */
+	u_int32_t	sreset_wdg_tx_hang;
+
+	/**
+	 * \internal
+	 *
+	 * The number of packet memory corruption causing soft reset.
+	 *
+	 * For unknown reason, packet memory may be corrupted. When packet memory corruption is detected,
+	 * soft reset is triggered, and this counter incremented once.
+	 */
+	u_int32_t	sreset_wdg_pm_corrupt;
+
+	/**
+	 * \internal
+	 *
+	 * The number of packet transmit control memory corruption causing soft reset.
+	 *
+	 * For unknown reason, transmit control memory may be corrupted. When transmit control memory corruption is detected,
+	 * soft reset is triggered, and this counter incremented once.
+	 */
+	u_int32_t	sreset_wdg_tcm_corrupt;
+
+	/**
+	 * \internal
+	 *
+	 * The number of receive hangs causing a soft reset.
+	 *
+	 * Receive hang is > 70s without receiving a single packet.
+	 *
+	 * Note that this can trigger in idle situations, but should not affect anything because
+	 * the link is idle.
+	 */
+	u_int32_t	sreset_wdg_rx_done;
+	u_int32_t	sreset_wdg_in_place_try;
+	u_int32_t	sreset_wdg_tasklet_sched_1;
+	u_int32_t	sreset_wdg_tasklet_sched_2;
+	u_int32_t	sreset_tasklet_sched;
+	u_int32_t	sreset_tasklet_begin;
+	u_int32_t	sreset_tasklet_end;
+
+	/**
+	 * \internal
+	 *
+	 * This counter is incremented when a BB hard reset is requested
+	 * to occur in the middle of a soft reset sequence
+	 */
+	u_int32_t	hreset_req;
+
+	/**
+	 * \internal
+	 *
+	 * This counter is incremented at the start of a soft reset.
+	 *
+	 * There should always be a corresponding increment in the sreset_end
+	 * counter, or there is a problem.
+	 *
+	 * \sa sreset_end
+	 */
+	u_int32_t	sreset_begin;
+
+	/**
+	 * \internal
+	 *
+	 * This counter is incremented at the end of a soft reset.
+	 *
+	 * The should always being a corresponding increment in the sreset_begin
+	 * counter, or there is a problem.
+	 *
+	 * \sa sreset_begin
+	 */
+	u_int32_t	sreset_end;
+
+	/**
+	 * \internal
+	 *
+	 * This counter is incremented each time DMA RX is in progress when a
+	 * soft reset is triggered.
+	 */
+	u_int32_t	sreset_dma_rx_inprog;
+
+	/**
+	 * \internal
+	 *
+	 * This counter is incremented each time DMA TX is in progress when a
+	 * soft reset is triggered.
+	 */
+	u_int32_t	sreset_dma_tx_inprog;
+	u_int32_t	sreset_dma_rx_max_wait;
+	u_int32_t	sreset_dma_tx_max_wait;
+	u_int32_t	sreset_dma_tx_hang;
+	u_int32_t	sreset_dma_rx_hang;
+	u_int32_t	sreset_dma_rx_wait_timeout;
+	u_int32_t	sreset_dma_tx_wait_timeout;
+	u_int32_t	sreset_drop_not_valid;
+	u_int32_t	sreset_drop_bad_addr;
+	u_int32_t	rf_cmpvtune_out;
+	u_int32_t	rf_cal_freq;
+	u_int32_t	ac_max;
+	u_int32_t	ac_min;
+	u_int32_t	ac_cur;
+	u_int32_t	ac_adj;
+	u_int32_t	rx_gain;
+	u_int32_t	rd_cache_indx;
+	u_int32_t	logger_sreset_wmac1_dma_rx_inprog;
+	u_int32_t	logger_sreset_wmac1_dma_tx_inprog;
+	u_int32_t	logger_sreset_wmac1_dma_rx_max_wait;
+	u_int32_t	logger_sreset_wmac1_dma_tx_max_wait;
+	u_int32_t	logger_sreset_wmac1_dma_tx_hang;
+	u_int32_t	logger_sreset_wmac1_dma_rx_hang;
+	u_int32_t	logger_sreset_wmac1_dma_rx_wait_timeout;
+	u_int32_t	logger_sreset_wmac1_dma_tx_wait_timeout;
+	/**
+	 * These counter show the information of MU frames.
+	 */
+	u_int32_t	mu_rx_pkt;
+
+	/**
+	 * \internal
+	 *
+	 * These counters monitor power duty cycling
+	 */
+	u_int32_t	pduty_sleep;
+	u_int32_t	pduty_rxoff;
+	u_int32_t	pduty_period;
+	u_int32_t	pduty_pct;
+
+	/**
+	 * \internal
+	 *
+	 * These counter are incremented when a soft-ring operation is triggered
+	 */
+	u_int32_t	soft_ring_push_to_tqe;
+	u_int32_t	soft_ring_empty;
+	u_int32_t	soft_ring_not_empty;
+	u_int32_t	soft_ring_add_force;
+	u_int32_t	soft_ring_add_to_head;
+	u_int32_t	soft_ring_add_continue;
+	u_int32_t	soft_ring_free_pool_empty;
+	u_int32_t	mimo_ps_mode_switch;	/* times STA switch MIMO power-save mode by HT action */
+
+	u_int32_t	rx_vlan_drop;
+	u_int32_t	auto_cca_state;
+	u_int32_t	auto_cca_th;
+	u_int32_t	auto_cca_spre;
+	u_int32_t	auto_cca_intf;
+
+	/**
+	 * \internal
+	 *
+	 * These counters are monitor memory allocation.
+	 */
+	u_int32_t	total_dmem_alloc;
+	u_int32_t	total_dram_alloc;
+	u_int32_t	dmem_alloc_fails;
+	u_int32_t	dram_alloc_fails;
+	u_int32_t	total_dmem_free;
+	u_int32_t	total_dram_free;
+
+	/* RX frames BW mode*/
+	u_int32_t	rx_bw_80;
+	u_int32_t	rx_bw_40;
+	u_int32_t	rx_bw_20;
+
+	/* U-APSD rx stats */
+	uint32_t	rx_wmm_ps_trigger;
+	uint32_t	rx_wmm_ps_set;
+	uint32_t	rx_wmm_ps_reset;
+
+	uint32_t	rx_intr_next_ptr_0;
+	uint32_t	rx_hbm_pool_depleted;
+
+	uint32_t	rxq_intr[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_fill[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_nobuf[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_stop[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_pkt[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_bad_status[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_pkt_oversize[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_pkt_delivered[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_status_hole_chk_num[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_status_hole_chk_step_sum[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_status_hole_chk_step_max[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_status_hole[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_status_hole_max_size[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_process_max[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_process_sum[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_process_num[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_process_limited[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rxq_desc_chain_empty[QTN_FW_WMAC_RX_QNUM];
+	uint32_t	rx_data_last_seqfrag;
+	uint32_t	rx_data_last_ip_id;
+	uint32_t	rx_opmode_notify;
+
+	/**
+	 * This counter is incremented once per packet which is sent via the
+	 * external filter (HotSpot functionality).
+	 */
+	uint32_t	accel_l2_ext_filter;
+	uint32_t	accel_mc_send_l2_ext_filter;
+
+	/**
+	 * This counter is incremented once per multicast packet dropped without
+	 * forwording to the external filter (HotSpot functionality).
+	 */
+	uint32_t	accel_mc_drop_l2_ext_filter;
+
+	uint32_t	rx_frame_addressed_to_wrong_bss;
+};
+
+#define MUC_LEGACY_NUM_RATES	12
+#define MUC_HT_NUM_RATES	77
+#define MUC_VHT_NUM_RATES	40
+struct muc_rx_rates {
+	u_int32_t rx_mcs[MUC_HT_NUM_RATES];
+	u_int32_t rx_mcs_11ac[MUC_VHT_NUM_RATES];
+};
+
+#define QTN_STATS_NUM_BF_SLOTS	10
+struct muc_rx_bf_stats {
+	u_int32_t	rx_bf_valid[QTN_STATS_NUM_BF_SLOTS];
+	u_int32_t	rx_bf_aid[QTN_STATS_NUM_BF_SLOTS];
+	u_int32_t	rx_bf_ng[QTN_STATS_NUM_BF_SLOTS];
+	u_int32_t	rx_bf_11n_ndp[QTN_STATS_NUM_BF_SLOTS];
+	u_int32_t	rx_bf_11ac_ndp[QTN_STATS_NUM_BF_SLOTS];
+	u_int32_t	rx_bf_11n_act[QTN_STATS_NUM_BF_SLOTS];
+	/* Total number of 11ac BF feedbacks */
+	u_int32_t	rx_bf_11ac_act[QTN_STATS_NUM_BF_SLOTS];
+	/* Number of MU group selection BF feedbacks */
+	u_int32_t	rx_bf_11ac_grp_sel[QTN_STATS_NUM_BF_SLOTS];
+	/* Number of MU precoding BF feedbacks */
+	u_int32_t	rx_bf_11ac_prec[QTN_STATS_NUM_BF_SLOTS];
+	/* Number of SU BF feedbacks */
+	u_int32_t	rx_bf_11ac_su[QTN_STATS_NUM_BF_SLOTS];
+	/* Number of corrupted BF feedbacks */
+	u_int32_t	rx_bf_11ac_bad[QTN_STATS_NUM_BF_SLOTS];
+	/* Number of MuC to DSP IPC failures while sending BF feedbacks */
+	u_int32_t	rx_bf_11ac_dsp_fail[QTN_STATS_NUM_BF_SLOTS];
+	/* Number of times QMat for this node has been updated (the node was added to MU group) */
+	u_int32_t	mu_grp_add[QTN_STATS_NUM_BF_SLOTS];
+	/* Number of times the node was removed from MU group */
+	u_int32_t	mu_grp_del[QTN_STATS_NUM_BF_SLOTS];
+	u_int32_t	msg_buf_alloc_fail;
+};
+
+/** @} */
+
+extern struct muc_rx_stats uc_rx_stats;
+extern struct muc_rx_rates uc_rx_rates;
+extern struct muc_rx_bf_stats uc_rx_bf_stats;
+extern struct muc_tx_stats uc_tx_stats;
+extern struct qtn_rate_tx_stats_per_sec uc_tx_rates;
+extern uint32_t uc_su_rate_stats_read;
+extern uint32_t uc_mu_rate_stats_read;
+
+/*
+ * Rate adaption data collected for packet logger
+ * NOTE: Any changes to these definitions will require changes to stat_parser.pl
+ */
+#define RATES_STATS_NUM_ADAPTATIONS	16
+#define RATES_STATS_NUM_TX_RATES	6
+#define RATES_STATS_NUM_RX_RATES	8	/* Must be a multiple of word size */
+#define RATES_STATS_EVM_CNT		4
+
+/*
+ * Currently only two user positions are supported for MU group
+ * the following define should be aligned
+ * with IEEE80211_MU_GRP_NODES_MAX (4) in future.
+ * for now we don't want to take care about 2x extra zero-filled
+ * huge arrays in rate stats
+ */
+#define RATES_STATS_MAX_USER_IN_GROUP   2
+
+/**
+ * \addtogroup MUCSTATS
+ */
+/** @{ */
+struct qtn_rate_stats_mcs_data {
+	uint16_t	mcs_rate;
+	uint16_t	rate_index;
+	uint16_t	state;
+	uint16_t	pkt_total;
+	uint16_t	pkt_error;
+	uint16_t	pkt_hw_retry;
+	uint16_t	pkt_sample;
+	uint16_t	avg_per;
+} __attribute__((packed));
+
+struct qtn_rate_su_tx_stats {
+	uint32_t			seq_no;
+	uint32_t			timestamp;
+	uint32_t			flags;
+	uint16_t			sampling_index;
+	uint16_t			sampling_rate;
+	struct qtn_rate_stats_mcs_data	mcs_data[RATES_STATS_NUM_TX_RATES];
+} __attribute__((packed));
+
+struct qtn_rate_mu_tx_stats {
+	struct qtn_rate_su_tx_stats group_stats[RATES_STATS_MAX_USER_IN_GROUP];
+} __attribute__((packed));
+
+struct qtn_rate_gen_stats {
+	u_int16_t   rx_mcs_rates[RATES_STATS_NUM_RX_RATES];
+	u_int32_t  rx_mcs[RATES_STATS_NUM_RX_RATES];
+	u_int32_t  rx_crc;
+	u_int32_t  rx_sp_errors;
+	u_int32_t  rx_lp_errors;
+	u_int32_t  rx_evm[RATES_STATS_EVM_CNT];
+	u_int32_t  tx_subframe_success;
+	u_int32_t  tx_subframe_fail;
+	u_int32_t  tx_mgmt_success;
+	u_int32_t  tx_hw_retry;
+	u_int32_t  tx_sw_retry;
+} __attribute__((packed));
+
+struct qtn_rate_tx_stats_per_sec {
+	struct qtn_rate_su_tx_stats  stats_su[RATES_STATS_NUM_ADAPTATIONS];
+	struct qtn_rate_mu_tx_stats  stats_mu[RATES_STATS_NUM_ADAPTATIONS];
+};
+/** @} */
+
+#endif	/* _STATS_H_ */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qdrv_bld.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qdrv_bld.h
new file mode 100644
index 0000000..268656a
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qdrv_bld.h
@@ -0,0 +1,6 @@
+/* Automatically generated file.  Do not edit. */
+#define QDRV_BLD_NAME	"v37.4.1.89"
+#define QDRV_BLD_LABEL	"v37.4.1.89"
+#define QDRV_BLD_VER	(0x25040159)
+#define QDRV_BLD_REV	"53858"
+#define QDRV_BLD_TYPE	(QDRV_BLD_TYPE_SDK)
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qdrv_sch.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qdrv_sch.h
new file mode 100644
index 0000000..e68a59e
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qdrv_sch.h
@@ -0,0 +1,461 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2011 - 2012 Quantenna Communications, Inc.          **
+**                            All Rights Reserved                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH0*/
+
+#ifndef __QDRV_SCH_H
+#define __QDRV_SCH_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#ifdef CONFIG_IPV6
+#include <linux/ipv6.h>
+#endif
+#include <net/pkt_sched.h>
+
+#include "net80211/if_ethersubr.h"
+#include "net80211/if_llc.h"
+
+#include <linux/if_vlan.h>
+
+#include <qtn/qtn_global.h>
+
+#include <qtn/qdrv_sch_data.h>
+#include "qdrv_sch_const.h"
+#include <qtn/iputil.h>
+#include <qtn/qtn_net_packet.h>
+#include <qtn/topaz_tqe_cpuif.h>
+#include <qtn/topaz_vlan_cpuif.h>
+#include <common/topaz_emac.h>
+#include <asm/hardware.h>
+#define ETHER_TYPE_UNKNOWN			0XFFFF
+#define IPTOS_PREC_SHIFT			5
+
+#define IP_DSCP_SHIFT		2
+#define IP_DSCP(_pri)           (((_pri) & 0xFF) >> IP_DSCP_SHIFT)
+#define IP_DSCP_MAPPING_SIZE	IP_DSCP_NUM >> 1
+#define VLANID_INDEX_INITVAL	4096
+/* 4-bits are used to store TID */
+#define QDRV_IP_DSCP_MAPPING_SHIFT	4
+#define QDRV_IP_DSCP_INDEX_SHIFT	3
+
+#define QDRV_SCH_RESERVED_TOKEN_PER_USER	64
+
+#define QDRV_SCH_EMAC0_IN_USE	1
+#define QDRV_SCH_EMAC1_IN_USE	2
+
+int qdrv_sch_module_init(void);
+void qdrv_sch_module_exit(void);
+
+extern __sram_data uint8_t qdrv_sch_tos2ac[];
+extern __sram_data uint8_t qdrv_vap_vlan_max;
+extern __sram_data uint8_t qdrv_sch_dscp2tid[QTN_MAX_BSS_VAPS][IP_DSCP_MAPPING_SIZE];
+extern __sram_data uint16_t qdrv_sch_vlan2index[QTN_MAX_BSS_VAPS];
+
+/*
+ * Refactoring of emac_wr(struct emac_common *arapc, int reg, u32 val)
+ * We can't visit the emac_common structure here.
+ */
+__always_inline static void qtn_emac_wr(uint32_t vbase, int reg, u32 val)
+{
+	writel(val, IO_ADDRESS(vbase + reg));
+	/* HW bug workaround - dummy access breaks up bus transactions. */
+	readl(RUBY_SYS_CTL_BASE_ADDR);
+}
+
+__always_inline static uint32_t qtn_emac_rd(uint32_t vbase, int reg)
+{
+	/*
+	 *HW bug workaround , sometimes we can't get the correct register value
+	 * so we need to do an extra readl
+	 */
+	readl(RUBY_SYS_CTL_BASE_ADDR);
+	return readl(IO_ADDRESS(vbase + reg));
+}
+
+static inline struct Qdisc *qdrv_tx_sch_vap_get_qdisc(const struct net_device *dev)
+{
+	/* This assumes 1 tx netdev queue per vap */
+	return netdev_get_tx_queue(dev, 0)->qdisc;
+}
+
+static inline int qdrv_sch_tclass_to_ac(const uint8_t dscp)
+{
+	int wme_ac;
+	uint8_t dot1p_up;
+	static const uint8_t qdrv_sch_dscp2dot1p[] = {
+		/* 000xxx */
+		0, 0, 0, 0, 0, 0, 0, 0,
+		/* 001xxx */
+		1, 1, 1, 1, 1, 1, 1, 1,
+		/* 010xxx */
+		2, 2, 2, 2, 2, 2, 2, 2,
+		/* 011xxx */
+		3, 3, 3, 3, 3, 3, 3, 3,
+		/* 100xxx */
+		4, 4, 4, 4, 4, 4, 4, 4,
+		/* 101xxx */
+		5, 5, 5, 5, 5, 5, 5, 5,
+		/* 110xxx */
+		6, 6, 6, 6, 6, 6, 6, 6,
+		/* 111xxx */
+		7, 7, 7, 7, 7, 7, 7, 7
+	};
+
+	dot1p_up = qdrv_sch_dscp2dot1p[IP_DSCP(dscp)];
+	if (dot1p_up < IEEE8021P_PRIORITY_NUM)
+		wme_ac = qdrv_sch_tos2ac[dot1p_up];
+	else
+		wme_ac = qdrv_sch_tos2ac[IPTOS_PREC(dscp) >> IPTOS_PREC_SHIFT];
+
+	return wme_ac;
+}
+
+static inline int
+qdrv_sch_classify_ctrl(struct sk_buff *skb)
+{
+	uint16_t ether_type = QTN_SKB_CB_ETHERTYPE(skb);
+	uint8_t ip_protocol;
+
+	if (likely(iputil_eth_is_ipv4or6(ether_type))) {
+		ip_protocol = QTN_SKB_CB_IPPROTO(skb);
+		if (unlikely((ip_protocol == IPPROTO_ICMP) ||
+				(ip_protocol == IPPROTO_ICMPV6) ||
+				(ip_protocol == IPPROTO_IGMP))) {
+			return 1;
+		}
+	} else if ((ether_type == __constant_htons(ETH_P_ARP)) ||
+			(ether_type == __constant_htons(ETH_P_PAE))) {
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline uint8_t qdrv_sch_mask_gettid(uint8_t ifindex, uint8_t dscp)
+{
+	uint8_t index;
+	uint32_t curval;
+	uint8_t	tid;
+
+	index = (dscp >> 1);
+	curval = qdrv_sch_dscp2tid[ifindex][index];
+
+	if (dscp & 0x1)
+		tid  = (curval & 0xf);
+	else
+		tid = (curval >> QDRV_IP_DSCP_MAPPING_SHIFT) & 0xf;
+
+	return tid;
+}
+
+/* Multiple VLAN tags are not currently supported */
+static inline void topaz_tqe_vlan_gettid(void *data, uint8_t *tid, uint8_t *vlan_index)
+{
+	struct vlan_ethhdr *vhd;
+	uint16_t vid = 0;
+	uint8_t ip_dscp = 0;
+	int i;
+	const void *iphdr = NULL;
+	const struct ether_header *eh = data;
+	const uint16_t *ether_type = &eh->ether_type;
+
+	*vlan_index = 0;
+	if (*ether_type == __constant_htons(ETH_P_8021Q)) {
+		ether_type += 2;
+		vhd = data;
+		vid = __constant_htons(vhd->h_vlan_TCI) & VLAN_VID_MASK;
+		for (i = 0; i < qdrv_vap_vlan_max; i++) {
+			if (qdrv_sch_vlan2index[i] == vid) {
+				*vlan_index = i;
+				break;
+			}
+		}
+	}
+
+	iphdr = ether_type + 1;
+	if (*ether_type == __constant_htons(ETH_P_IP)) {
+		const struct qtn_ipv4 *ipv4 = (const struct qtn_ipv4 *) iphdr;
+		ip_dscp = IP_DSCP(ipv4->dscp);
+	} else if (*ether_type == __constant_htons(ETH_P_IPV6)) {
+		const struct qtn_ipv6 *ipv6 = (const struct qtn_ipv6 *) iphdr;
+		ip_dscp = qtn_ipv6_tclass(ipv6);
+	} else if ((*ether_type == __constant_htons(ETH_P_ARP)) || (*ether_type == __constant_htons(ETH_P_PAE))) {
+		*tid = WME_AC_TO_TID(WMM_AC_VO);
+		return;
+	} else {
+		*tid = WME_AC_TO_TID(WMM_AC_BE);
+		return;
+	}
+
+	*tid = qdrv_sch_mask_gettid(*vlan_index, ip_dscp);
+}
+
+static inline void
+qdrv_sch_classify(struct sk_buff *skb, uint16_t ether_type, uint8_t *data_start)
+{
+	uint8_t wme_ac = WME_AC_BE;
+	uint8_t tid;
+	uint8_t vlan_index;
+
+	if (M_FLAG_ISSET(skb, M_CLASSIFY)) {
+		return;
+	}
+	M_FLAG_SET(skb, M_CLASSIFY);
+
+	QTN_SKB_CB_ETHERTYPE(skb) = ether_type;
+
+	if (ether_type == __constant_htons(ETH_P_IP)) {
+		struct iphdr *iphdr_p = (struct iphdr *)data_start;
+
+		if ((skb->len >= (data_start - skb->data) + sizeof(*iphdr_p)) &&
+				(iphdr_p->version == 4))
+			QTN_SKB_CB_IPPROTO(skb) = iphdr_p->protocol;
+	}
+#ifdef CONFIG_IPV6
+	else if (ether_type == __constant_htons(ETH_P_IPV6)) {
+		struct ipv6hdr *ipv6hdr_p = (struct ipv6hdr *)data_start;
+
+		if (skb->len >= (data_start - skb->data) + sizeof(struct ipv6hdr) &&
+				(ipv6hdr_p->version == 6)) {
+			uint8_t nexthdr;
+
+			iputil_v6_skip_exthdr(ipv6hdr_p, sizeof(struct ipv6hdr), &nexthdr,
+				(skb->len - ((uint8_t *)ipv6hdr_p - skb->data)), NULL, NULL);
+			QTN_SKB_CB_IPPROTO(skb) = nexthdr;
+		}
+	}
+#endif
+
+	if (qdrv_sch_classify_ctrl(skb)) {
+		wme_ac = QTN_AC_MGMT;
+	} else {
+		topaz_tqe_vlan_gettid(skb->data, &tid, &vlan_index);
+		wme_ac = TID_TO_WMM_AC(tid);
+	}
+
+	skb->priority = wme_ac;
+}
+
+static inline void
+qdrv_sch_classify_bk(struct sk_buff *skb)
+{
+	M_FLAG_SET(skb, M_CLASSIFY);
+	skb->priority = QDRV_BAND_AC_BK;
+}
+
+/*
+ * Skip over L2 headers in a buffer
+ *   Returns Ethernet type and a pointer to the payload
+ */
+static inline void *
+qdrv_sch_find_data_start(struct sk_buff *skb,
+		struct ether_header *eh, u16 *ether_type)
+{
+	struct llc *llc_p;
+	struct vlan_ethhdr *vlan_ethhdr_p;
+
+	if (ntohs(eh->ether_type) < ETHER_MAX_LEN) {
+		llc_p = (struct llc *)(eh + 1);
+		if ((skb->len >= LLC_SNAPFRAMELEN) &&
+		    (llc_p->llc_dsap == LLC_SNAP_LSAP) &&
+		    (llc_p->llc_ssap == LLC_SNAP_LSAP)) {
+			*ether_type = llc_p->llc_un.type_snap.ether_type;
+			return (void *)((char *)(eh + 1) - sizeof(ether_type) + LLC_SNAPFRAMELEN);
+		} else {
+			*ether_type = ETHER_TYPE_UNKNOWN;
+			return (void *)(eh + 1);
+		}
+	} else if (ntohs(eh->ether_type) == ETH_P_8021Q) {
+		vlan_ethhdr_p = (struct vlan_ethhdr *)eh;
+		*ether_type = vlan_ethhdr_p->h_vlan_encapsulated_proto;
+		skb->vlan_tci = ntohs(get_unaligned((__be16 *)(&vlan_ethhdr_p->h_vlan_TCI)));
+		return (void *)(vlan_ethhdr_p + 1);
+	} else {
+		*ether_type = eh->ether_type;
+		return (void *)(eh + 1);
+	}
+}
+
+static inline uint8_t qdrv_dscp2tid_default(const uint8_t dscp)
+{
+	const uint8_t tclass = dscp << IP_DSCP_SHIFT;
+	const uint8_t ac = qdrv_sch_tclass_to_ac(tclass);
+	const uint8_t tid = WME_AC_TO_TID(ac);
+
+	return tid;
+}
+
+/* Each byte contains 2 4-bit DSCP mapping values */
+static inline void qdrv_dscp2tid_setvalue(uint8_t ifindex , uint8_t dscp, uint8_t tid)
+{
+	uint8_t curval = 0;
+	uint8_t index = 0;
+
+	index = dscp >> 1;
+	curval = qdrv_sch_dscp2tid[ifindex][index];
+
+	if (dscp & 0x1) {
+		qdrv_sch_dscp2tid[ifindex][index] = (curval & ~0xf) | tid;
+	} else {
+		qdrv_sch_dscp2tid[ifindex][index] = (curval & ~(0xf << QDRV_IP_DSCP_MAPPING_SHIFT)) |
+			(tid << QDRV_IP_DSCP_MAPPING_SHIFT);
+	}
+}
+
+static inline void qdrv_dscp2tid_map_init(void)
+{
+	uint8_t ifindex;
+	uint8_t dscp;
+	uint8_t tid = 0;
+
+	for (dscp = 0; dscp < IP_DSCP_NUM; dscp++) {
+		tid =  qdrv_dscp2tid_default(dscp);
+		qdrv_dscp2tid_setvalue(0, dscp, tid);
+	}
+
+	for (ifindex = 1; ifindex < QTN_MAX_BSS_VAPS; ifindex++) {
+		memcpy(&qdrv_sch_dscp2tid[ifindex][0], &qdrv_sch_dscp2tid[0][0], sizeof(qdrv_sch_dscp2tid[0]));
+	}
+}
+
+#if !defined(CONFIG_TOPAZ_PCIE_HOST) && !defined(CONFIG_TOPAZ_PCIE_TARGET)
+/* conversion is only needed for tables that are different to the first table */
+static inline void qdrv_sch_set_vlanpath(void)
+{
+	int i;
+	union topaz_vlan_entry vlan_entry;
+	uint16_t vid;
+
+	/*
+	 * This comparison must be done again if the user changes wifi0 config to
+	 * be the same as some other interfaces.
+	 */
+	for (i = 1; i < QTN_MAX_BSS_VAPS; i++) {
+		vid = qdrv_sch_vlan2index[i];
+		if (vid == VLANID_INDEX_INITVAL) {
+			continue;
+		}
+
+		vlan_entry = topaz_vlan_get_entry(vid);
+		if (memcmp(&qdrv_sch_dscp2tid[0][0], &qdrv_sch_dscp2tid[i][0], sizeof(qdrv_sch_dscp2tid[0]))) {
+			vlan_entry.data.valid = 1;
+			vlan_entry.data.out_port = TOPAZ_TQE_LHOST_PORT;
+		} else {
+			vlan_entry.data.valid = 0;
+		}
+		topaz_vlan_clear_entry(vid);
+		topaz_vlan_set_entry(vid, vlan_entry);
+	}
+}
+
+/*
+ * Configure the HW DSCP to TID table, which is used for wifi0
+ * and any other TIDs that use the same config.
+ */
+static inline void qdrv_sch_set_dscp_hwtbl(uint8_t dscp, uint8_t tid, uint32_t reg_base)
+{
+	uint32_t dscp_reg_val = 0;
+	uint8_t dscp_reg_index = dscp >> QDRV_IP_DSCP_INDEX_SHIFT;
+	uint8_t dscp_nibble_index = dscp - (dscp_reg_index << QDRV_IP_DSCP_INDEX_SHIFT);
+
+	dscp_reg_val = qtn_emac_rd(reg_base, TOPAZ_EMAC_RXP_IP_DIFF_SRV_TID_REG(dscp_reg_index));
+
+	dscp_reg_val &= ~(0xF <<
+		(dscp_nibble_index << TOPAZ_EMAC_IPDSCP_HWT_SHIFT));
+	dscp_reg_val |= (tid & 0xF) <<
+		(dscp_nibble_index << TOPAZ_EMAC_IPDSCP_HWT_SHIFT);
+
+	qtn_emac_wr(reg_base, TOPAZ_EMAC_RXP_IP_DIFF_SRV_TID_REG(dscp_reg_index), dscp_reg_val);
+}
+#endif
+
+static inline void qdrv_sch_mask_settid(uint8_t ifindex, uint8_t dscp, uint8_t tid,
+		uint32_t emac_in_use)
+{
+	qdrv_dscp2tid_setvalue(ifindex, dscp, tid);
+#if !defined(CONFIG_TOPAZ_PCIE_HOST) && !defined(CONFIG_TOPAZ_PCIE_TARGET)
+	qdrv_sch_set_vlanpath();
+	if (ifindex == 0) {
+		if (emac_in_use & QDRV_SCH_EMAC0_IN_USE) {
+			qdrv_sch_set_dscp_hwtbl(dscp, tid, RUBY_ENET0_BASE_ADDR);
+		}
+		if (emac_in_use & QDRV_SCH_EMAC1_IN_USE) {
+			qdrv_sch_set_dscp_hwtbl(dscp, tid, RUBY_ENET1_BASE_ADDR);
+		}
+	}
+#endif
+}
+
+int qdrv_sch_node_is_active(const struct qdrv_sch_node_band_data *nbd,
+				const struct qdrv_sch_node_data *nd, uint8_t band);
+int qdrv_sch_enqueue_node(struct qdrv_sch_node_data *nd, struct sk_buff *skb,
+				bool is_over_quota, bool is_low_rate);
+
+void qdrv_sch_complete(struct qdrv_sch_node_data *nd, struct sk_buff *skb,
+					uint8_t under_quota);
+/*
+ * If qdrv_sch_dequeue_nostat is called, accounting in the qdisc
+ * is not complete until qdrv_sch_complete is called
+ */
+struct sk_buff *qdrv_sch_dequeue_nostat(struct qdrv_sch_shared_data *sd, struct Qdisc* sch);
+int qdrv_sch_flush_node(struct qdrv_sch_node_data *nd);
+int qdrv_sch_requeue(struct qdrv_sch_shared_data *sd, struct sk_buff *skb, struct Qdisc *sch);
+
+const char *qdrv_sch_tos2ac_str(int tos);
+void qdrv_sch_set_ac_map(int tos, int aid);
+
+int qdrv_sch_set_dscp2ac_map(const uint8_t vapid, uint8_t *ip_dscp, uint8_t listlen, uint8_t ac);
+int qdrv_sch_get_dscp2ac_map(const uint8_t vapid, uint8_t *dscp2ac);
+
+void qdrv_sch_set_dscp2tid_map(const uint8_t vapid, const uint8_t *dscp2tid);
+void qdrv_sch_get_dscp2tid_map(const uint8_t vapid, uint8_t *dscp2tid);
+
+void qdrv_tx_sch_node_data_init(struct Qdisc *sch, struct qdrv_sch_shared_data *sd,
+				struct qdrv_sch_node_data *nd, uint32_t users);
+void qdrv_tx_sch_node_data_exit(struct qdrv_sch_node_data *nd, uint32_t users);
+struct qdrv_sch_shared_data *qdrv_sch_shared_data_init(int16_t tokens, uint16_t rdt);
+void qdrv_sch_shared_data_exit(struct qdrv_sch_shared_data *sd);
+
+#endif
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_arc_processor.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_arc_processor.h
new file mode 100644
index 0000000..8091de5
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_arc_processor.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _QTN_ARC_PROCESSOR_H_
+#define _QTN_ARC_PROCESSOR_H_
+
+#if defined(STATIC_CHECK)
+uint32_t get_sp(void);
+uint32_t get_ilink1(void);
+uint32_t get_ilink2(void);
+uint32_t get_blink(void);
+uint32_t get_status32(void);
+uint8_t arc_read_uncached_8(const uint8_t *addr);
+void arc_write_uncached_8(uint8_t *addr, uint8_t value);
+uint16_t arc_read_uncached_16(const uint16_t *addr);
+void arc_write_uncached_16(uint16_t *addr, uint32_t value);
+uint32_t arc_read_uncached_32(const uint32_t *addr);
+void arc_write_uncached_32(uint32_t *addr, uint32_t value);
+#elif defined(_ARC)
+
+_Inline _Asm uint32_t get_sp(void)
+{
+	mov %r0, %r28
+}
+
+_Inline _Asm uint32_t get_ilink1(void)
+{
+	mov %r0, %r29
+}
+
+_Inline _Asm uint32_t get_ilink2(void)
+{
+	mov %r0, %r30
+}
+
+_Inline _Asm uint32_t get_blink(void)
+{
+	mov %r0, %r31
+}
+
+_Inline _Asm uint32_t get_status32(void)
+{
+	lr %r0, [%status32]
+}
+
+_Inline _Asm uint8_t arc_read_uncached_8(const uint8_t *addr)
+{
+	%reg addr
+	ldb.di %r0, [addr]
+}
+
+_Inline _Asm void arc_write_uncached_8(uint8_t *addr, uint8_t value)
+{
+	%reg addr, value
+	stb.di value, [addr]
+}
+
+_Inline _Asm uint16_t arc_read_uncached_16(const uint16_t *addr)
+{
+	%reg addr
+	ldw.di %r0, [addr]
+}
+
+_Inline _Asm void arc_write_uncached_16(uint16_t *addr, uint32_t value)
+{
+	%reg addr, value
+	stw.di value, [addr]
+}
+
+_Inline _Asm uint32_t arc_read_uncached_32(const uint32_t *addr)
+{
+	%reg addr
+	ld.di %r0, [addr]
+}
+
+_Inline _Asm void arc_write_uncached_32(uint32_t *addr, uint32_t value)
+{
+	%reg addr, value
+	st.di value, [addr]
+}
+
+#else
+/* implementations provided elsewhere */
+#endif	// STATIC_CHECK
+#endif	// _QTN_ARC_PROCESSOR_H_
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_auc_stats_fields.default.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_auc_stats_fields.default.h
new file mode 100644
index 0000000..d85a899
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_auc_stats_fields.default.h
@@ -0,0 +1,235 @@
+	{ 0xe51011f0, "sleep" },
+	{ 0xe5101228, "jiffies" },
+	{ 0xe5103404, "IRQ_0" },
+	{ 0xe5103408, "IRQ_1" },
+	{ 0xe510340c, "IRQ_2" },
+	{ 0xe5103410, "IRQ_3" },
+	{ 0xe5103414, "IRQ_4" },
+	{ 0xe5103418, "IRQ_5" },
+	{ 0xe510341c, "IRQ_6" },
+	{ 0xe5103420, "IRQ_7" },
+	{ 0xe5103424, "IRQ_8" },
+	{ 0xe5103428, "IRQ_9" },
+	{ 0xe510342c, "IRQ_10" },
+	{ 0xe5103430, "IRQ_11" },
+	{ 0xe5103434, "IRQ_12" },
+	{ 0xe5103438, "IRQ_13" },
+	{ 0xe510343c, "IRQ_14" },
+	{ 0xe5103440, "IRQ_15" },
+	{ 0xe5103444, "IRQ_16" },
+	{ 0xe5103448, "IRQ_17" },
+	{ 0xe510344c, "IRQ_18" },
+	{ 0xe5103450, "IRQ_19" },
+	{ 0xe5103460,	"task_alive_counters[0]" },
+	{ 0xe5103464,	"task_alive_counters[1]" },
+	{ 0xe5103468,	"task_alive_counters[2]" },
+	{ 0xe510346c,	"task_alive_counters[3]" },
+	{ 0xe5103470,	"task_alive_counters[4]" },
+	{ 0xe5103474,	"task_alive_counters[5]" },
+	{ 0xe5103478,	"task_alive_counters[6]" },
+	{ 0xe510347c,	"task_alive_counters[7]" },
+	{ 0xe5103480,	"task_alive_counters[8]" },
+	{ 0xe5103484,	"task_alive_counters[9]" },
+	{ 0xe5103488,	"task_alive_counters[10]" },
+	{ 0xe510348c,	"task_alive_counters[11]" },
+	{ 0xe5103490,	"task_alive_counters[12]" },
+	{ 0xe5103494,	"task_alive_counters[13]" },
+	{ 0xe5103498,	"task_alive_counters[14]" },
+	{ 0xe510349c,	"task_alive_counters[15]" },
+	{ 0xe51034a0,	"task_alive_counters[16]" },
+	{ 0xe51034a4,	"task_alive_counters[17]" },
+	{ 0xe51034a8,	"task_alive_counters[18]" },
+	{ 0xe51034ac,	"task_alive_counters[19]" },
+	{ 0xe51034b0,	"task_false_trigger[0]" },
+	{ 0xe51034b4,	"task_false_trigger[1]" },
+	{ 0xe51034b8,	"task_false_trigger[2]" },
+	{ 0xe51034bc,	"task_false_trigger[3]" },
+	{ 0xe51034c0,	"task_false_trigger[4]" },
+	{ 0xe51034c4,	"task_false_trigger[5]" },
+	{ 0xe51034c8,	"task_false_trigger[6]" },
+	{ 0xe51034cc,	"task_false_trigger[7]" },
+	{ 0xe51034d0,	"task_false_trigger[8]" },
+	{ 0xe51034d4,	"task_false_trigger[9]" },
+	{ 0xe51034d8,	"task_false_trigger[10]" },
+	{ 0xe51034dc,	"task_false_trigger[11]" },
+	{ 0xe51034e0,	"task_false_trigger[12]" },
+	{ 0xe51034e4,	"task_false_trigger[13]" },
+	{ 0xe51034e8,	"task_false_trigger[14]" },
+	{ 0xe51034ec,	"task_false_trigger[15]" },
+	{ 0xe51034f0,	"task_false_trigger[16]" },
+	{ 0xe51034f4,	"task_false_trigger[17]" },
+	{ 0xe51034f8,	"task_false_trigger[18]" },
+	{ 0xe51034fc,	"task_false_trigger[19]" },
+	{ 0xe5103500,	"tqew_ac[0]" },
+	{ 0xe5103504,	"tqew_ac[1]" },
+	{ 0xe5103508,	"tqew_ac[2]" },
+	{ 0xe510350c,	"tqew_ac[3]" },
+	{ 0xe5103510,	"tqew_ac_avail[0]" },
+	{ 0xe5103514,	"tqew_ac_avail[1]" },
+	{ 0xe5103518,	"tqew_ac_avail[2]" },
+	{ 0xe510351c,	"tqew_ac_avail[3]" },
+	{ 0xe5103520,	"tqew_air_humble" },
+	{ 0xe5103524,	"tqew_air_suppress" },
+	{ 0xe5103528,	"tqew_air_use_idletime" },
+	{ 0xe510352c,	"tqew_air_dequeue_only" },
+	{ 0xe5103530,	"tqew_pkt_pending_for_txdone" },
+	{ 0xe5103534,	"tqew_descr_alloc_fail" },
+	{ 0xe5103538,	"tqew_ring_alloc_fail" },
+	{ 0xe510353c,	"tqew_pop_alloc_fail" },
+	{ 0xe5103540,	"tqew_pop_sw_limit" },
+	{ 0xe5103544,	"tqew_pop_empty" },
+	{ 0xe5103548,	"tqew_available_set" },
+	{ 0xe510354c,	"tqew_available_reset" },
+	{ 0xe5103550,	"tqew_rx" },
+	{ 0xe5103554,	"tqew_drop" },
+	{ 0xe5103558,	"tqew_free" },
+	{ 0xe510355c,	"tqew_buf_invalid" },
+	{ 0xe5103560,	"wmac_tx_done[0]" },
+	{ 0xe5103564,	"wmac_tx_done[1]" },
+	{ 0xe5103568,	"wmac_tx_done[2]" },
+	{ 0xe510356c,	"wmac_tx_done[3]" },
+	{ 0xe5103570,	"agg_aggregate_flag" },
+	{ 0xe5103574,	"agg_aggressive_agg" },
+	{ 0xe5103578,	"hdrs_available_recent_min" },
+	{ 0xe510357c,	"agg_states[0]" },
+	{ 0xe5103580,	"agg_states[1]" },
+	{ 0xe5103584,	"agg_states[2]" },
+	{ 0xe5103588,	"agg_states[3]" },
+	{ 0xe510358c,	"agg_states[4]" },
+	{ 0xe5103590,	"ethq_push" },
+	{ 0xe5103594,	"ethq_pop" },
+	{ 0xe5103598,	"agg_aggregate_mpdu" },
+	{ 0xe510359c,	"agg_aggregate_msdu" },
+	{ 0xe51035a0,	"agg_singleton_mpdu" },
+	{ 0xe51035a4,	"agg_singleton_mgmt" },
+	{ 0xe51035a8,	"agg_singleton_ctl" },
+	{ 0xe51035ac,	"agg_singleton_probe" },
+	{ 0xe51035b0,	"agg_4K_amsdu" },
+	{ 0xe51035b4,	"agg_8K_amsdu" },
+	{ 0xe51035b8,	"agg_11K_amsdu" },
+	{ 0xe51035bc,	"tx_feedback_success" },
+	{ 0xe51035c0,	"tx_feedback_fail" },
+	{ 0xe51035c4,	"tx_done_status_success" },
+	{ 0xe51035c8,	"tx_done_status_timeout" },
+	{ 0xe51035cc,	"tx_done_status_xretry" },
+	{ 0xe51035d0,	"tx_done_status_timeout_xretry" },
+	{ 0xe51035d4,	"tx_done_pkt_chain_reset" },
+	{ 0xe51035d8,	"tx_done_pkt_chain_success" },
+	{ 0xe51035dc,	"tx_done_pkt_chain_drop_tid_down" },
+	{ 0xe51035e0,	"tx_done_pkt_chain_drop_xattempts" },
+	{ 0xe51035e4,	"tx_done_singleton_finish" },
+	{ 0xe51035e8,	"tx_done_singleton_swretry" },
+	{ 0xe51035ec,	"tx_done_aggregate_finish" },
+	{ 0xe51035f0,	"tx_done_aggregate_hwretry" },
+	{ 0xe51035f4,	"tx_done_aggregate_swretry" },
+	{ 0xe51035f8,	"tx_done_mpdu_swretry" },
+	{ 0xe51035fc,	"tx_sample" },
+	{ 0xe5103600,	"tx_bw_sample" },
+	{ 0xe5103604,	"tx_swretry_lower_bw" },
+	{ 0xe5103608,	"tx_swretry_agg_exceed" },
+	{ 0xe510360c,	"tx_scale_base_20m" },
+	{ 0xe5103610,	"tx_scale_base_40m" },
+	{ 0xe5103614,	"tx_scale_base_80m" },
+	{ 0xe5103618,	"tx_scale_max" },
+	{ 0xe510361c,	"tx_scale_overstep" },
+	{ 0xe5103620,	"alloc_tqew_fast" },
+	{ 0xe5103624,	"free_tqew_fast" },
+	{ 0xe5103628,	"alloc_tqew_slow" },
+	{ 0xe510362c,	"free_tqew_slow" },
+	{ 0xe5103630,	"alloc_tqew_local" },
+	{ 0xe5103634,	"free_tqew_local" },
+	{ 0xe5103638,	"alloc_hdr_fast" },
+	{ 0xe510363c,	"free_hdr_fast" },
+	{ 0xe5103640,	"alloc_hdr_slow" },
+	{ 0xe5103644,	"free_hdr_slow" },
+	{ 0xe5103648,	"alloc_msdu_hdr_failed" },
+	{ 0xe510364c,	"alloc_mpdu_hdr_failed" },
+	{ 0xe5103650,	"alloc_tid_superfast" },
+	{ 0xe5103654,	"free_tid_superfast" },
+	{ 0xe5103658,	"alloc_tid_fast" },
+	{ 0xe510365c,	"free_tid_fast" },
+	{ 0xe5103660,	"alloc_tid_slow" },
+	{ 0xe5103664,	"free_tid_slow" },
+	{ 0xe5103668,	"alloc_node_rate_fast" },
+	{ 0xe510366c,	"free_node_rate_fast" },
+	{ 0xe5103670,	"alloc_node_rate_slow" },
+	{ 0xe5103674,	"free_node_rate_slow" },
+	{ 0xe5103678,	"alloc_node_superfast" },
+	{ 0xe510367c,	"free_node_superfast" },
+	{ 0xe5103680,	"alloc_node_fast" },
+	{ 0xe5103684,	"free_node_fast" },
+	{ 0xe5103688,	"alloc_fcs" },
+	{ 0xe510368c,	"free_fcs" },
+	{ 0xe5103690,	"alloc_mac_descr" },
+	{ 0xe5103694,	"free_mac_descr" },
+	{ 0xe5103698,	"tx_mac_push" },
+	{ 0xe510369c,	"tx_mac_idle" },
+	{ 0xe51036a0,	"tx_mac_rts" },
+	{ 0xe51036a4,	"tx_mac_cts2self" },
+	{ 0xe51036a8,	"tx_vlan_drop" },
+	{ 0xe51036ac,	"tx_acm_drop" },
+	{ 0xe51036b0,	"tx_ps_drop" },
+	{ 0xe51036b4,	"ocs_tx_suspend" },
+	{ 0xe51036b8,	"ocs_tx_resume" },
+	{ 0xe51036bc,	"ocs_singleton_suspend" },
+	{ 0xe51036c0,	"ocs_ampdu_suspend" },
+	{ 0xe51036c4,	"ocs_frame_created" },
+	{ 0xe51036c8,	"pwr_mgmt_awake" },
+	{ 0xe51036cc,	"pwr_mgmt_sleep" },
+	{ 0xe51036d0,	"pwr_mgmt_tx" },
+	{ 0xe51036d4,	"pspoll_rx" },
+	{ 0xe51036d8,	"dtim_q_push" },
+	{ 0xe51036dc,	"dtim_q_pop" },
+	{ 0xe51036e0,	"dtim_trigger" },
+	{ 0xe51036e4,	"dtim_q_overflow" },
+	{ 0xe51036e8,	"tx_restrict_dropped" },
+	{ 0xe51036ec,	"tx_throt_dropped" },
+	{ 0xe51036f0,	"tx_block_singleton" },
+	{ 0xe51036f4,	"tx_force_unblock_tid" },
+	{ 0xe51036f8,	"tx_ctl_pkt_hbm_alloc_fails" },
+	{ 0xe51036fc,	"tx_ctl_pkt_alloc_descr_fails" },
+	{ 0xe5103700,	"tx_bar_alloc_ctl_pkt_fails" },
+	{ 0xe5103704,	"tx_valid_bit_not_set" },
+	{ 0xe5103708,	"wmm_ps_tx" },
+	{ 0xe510370c,	"wmm_ps_tx_null_frames" },
+	{ 0xe5103710,	"wmm_ps_tx_more_data_frames" },
+	{ 0xe5103714,	"wmm_ps_tx_eosp_frames" },
+	{ 0xe5103718,	"mu_tx_su_count" },
+	{ 0xe510371c,	"mu_tx_send_mu_fail" },
+	{ 0xe5103720,	"mu_tx_push_count" },
+	{ 0xe5103724,	"mu_tx_done_count" },
+	{ 0xe5103728,	"mu_tx_done_succ" },
+	{ 0xe510372c,	"mu_tx_done_fail" },
+	{ 0xe5103730,	"mu_tx_sample" },
+	{ 0xe5103734,	"mu_bar_bitmap_non_zero" },
+	{ 0xe5103738,	"mu_bar_bitmap_zero" },
+	{ 0xe510373c,	"mu_mac_wmac1_ipc_push" },
+	{ 0xe5103740,	"mu_mac_wmac1_auc_push" },
+	{ 0xe5103744,	"mu_wmac1_resets" },
+	{ 0xe5103748,	"mu_tx_swretry_agg_exceed" },
+	{ 0xe510374c,	"mu_tx_buddy_try" },
+	{ 0xe5103750,	"mu_tx_buddy_fail_wmac" },
+	{ 0xe5103754,	"mu_tx_buddy_fail_ptid" },
+	{ 0xe5103758,	"mu_tx_buddy_fail_rate" },
+	{ 0xe510375c,	"mu_tx_buddy_fail_create_agg" },
+	{ 0xe5103760,	"mu_tx_buddy_mu_only_timeout" },
+	{ 0xe5103764,	"mu_tx_another_q_push_succ" },
+	{ 0xe5103768,	"mu_tx_another_q_push_fail" },
+	{ 0xe510376c,	"mu_tx_buddy_multi_tid" },
+	{ 0xe5103770,	"mu_tx_wmac_0_done_count" },
+	{ 0xe5103774,	"mu_tx_wmac_0_bitmap_non_zero" },
+	{ 0xe5103778,	"mu_tx_wmac_0_bitmap_zero" },
+	{ 0xe510377c,	"mu_tx_wmac_0_done_timeout" },
+	{ 0xe5103780,	"mu_tx_wmac_0_done_succ" },
+	{ 0xe5103784,	"mu_tx_wmac_0_done_fail" },
+	{ 0xe5103788,	"mu_tx_wmac_1_done_succ" },
+	{ 0xe510378c,	"mu_tx_wmac_1_done_fail" },
+	{ 0xe5103790,	"mu_tx_wmac_0_mpdu_total" },
+	{ 0xe5103794,	"mu_tx_wmac_0_mpdu_succ" },
+	{ 0xe5103798,	"mu_tx_wmac_1_mpdu_total" },
+	{ 0xe510379c,	"mu_tx_wmac_1_mpdu_succ" },
+	{ 0xe51037a0,	"mu_tx_qnum[0]" },
+	{ 0xe51037a4,	"mu_tx_qnum[1]" },
+	{ 0xe51037a8,	"mu_tx_qnum[2]" },
+	{ 0xe51037ac,	"mu_tx_qnum[3]" },
+	{ 0xe51037b0,	"tqe_sema_fails" },
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_auc_stats_fields.nomu.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_auc_stats_fields.nomu.h
new file mode 100644
index 0000000..b828291
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_auc_stats_fields.nomu.h
@@ -0,0 +1,235 @@
+	{ 0xe51011cc, "sleep" },
+	{ 0xe5101200, "jiffies" },
+	{ 0xe51033dc, "IRQ_0" },
+	{ 0xe51033e0, "IRQ_1" },
+	{ 0xe51033e4, "IRQ_2" },
+	{ 0xe51033e8, "IRQ_3" },
+	{ 0xe51033ec, "IRQ_4" },
+	{ 0xe51033f0, "IRQ_5" },
+	{ 0xe51033f4, "IRQ_6" },
+	{ 0xe51033f8, "IRQ_7" },
+	{ 0xe51033fc, "IRQ_8" },
+	{ 0xe5103400, "IRQ_9" },
+	{ 0xe5103404, "IRQ_10" },
+	{ 0xe5103408, "IRQ_11" },
+	{ 0xe510340c, "IRQ_12" },
+	{ 0xe5103410, "IRQ_13" },
+	{ 0xe5103414, "IRQ_14" },
+	{ 0xe5103418, "IRQ_15" },
+	{ 0xe510341c, "IRQ_16" },
+	{ 0xe5103420, "IRQ_17" },
+	{ 0xe5103424, "IRQ_18" },
+	{ 0xe5103428, "IRQ_19" },
+	{ 0xe5103438,	"task_alive_counters[0]" },
+	{ 0xe510343c,	"task_alive_counters[1]" },
+	{ 0xe5103440,	"task_alive_counters[2]" },
+	{ 0xe5103444,	"task_alive_counters[3]" },
+	{ 0xe5103448,	"task_alive_counters[4]" },
+	{ 0xe510344c,	"task_alive_counters[5]" },
+	{ 0xe5103450,	"task_alive_counters[6]" },
+	{ 0xe5103454,	"task_alive_counters[7]" },
+	{ 0xe5103458,	"task_alive_counters[8]" },
+	{ 0xe510345c,	"task_alive_counters[9]" },
+	{ 0xe5103460,	"task_alive_counters[10]" },
+	{ 0xe5103464,	"task_alive_counters[11]" },
+	{ 0xe5103468,	"task_alive_counters[12]" },
+	{ 0xe510346c,	"task_alive_counters[13]" },
+	{ 0xe5103470,	"task_alive_counters[14]" },
+	{ 0xe5103474,	"task_alive_counters[15]" },
+	{ 0xe5103478,	"task_alive_counters[16]" },
+	{ 0xe510347c,	"task_alive_counters[17]" },
+	{ 0xe5103480,	"task_alive_counters[18]" },
+	{ 0xe5103484,	"task_alive_counters[19]" },
+	{ 0xe5103488,	"task_false_trigger[0]" },
+	{ 0xe510348c,	"task_false_trigger[1]" },
+	{ 0xe5103490,	"task_false_trigger[2]" },
+	{ 0xe5103494,	"task_false_trigger[3]" },
+	{ 0xe5103498,	"task_false_trigger[4]" },
+	{ 0xe510349c,	"task_false_trigger[5]" },
+	{ 0xe51034a0,	"task_false_trigger[6]" },
+	{ 0xe51034a4,	"task_false_trigger[7]" },
+	{ 0xe51034a8,	"task_false_trigger[8]" },
+	{ 0xe51034ac,	"task_false_trigger[9]" },
+	{ 0xe51034b0,	"task_false_trigger[10]" },
+	{ 0xe51034b4,	"task_false_trigger[11]" },
+	{ 0xe51034b8,	"task_false_trigger[12]" },
+	{ 0xe51034bc,	"task_false_trigger[13]" },
+	{ 0xe51034c0,	"task_false_trigger[14]" },
+	{ 0xe51034c4,	"task_false_trigger[15]" },
+	{ 0xe51034c8,	"task_false_trigger[16]" },
+	{ 0xe51034cc,	"task_false_trigger[17]" },
+	{ 0xe51034d0,	"task_false_trigger[18]" },
+	{ 0xe51034d4,	"task_false_trigger[19]" },
+	{ 0xe51034d8,	"tqew_ac[0]" },
+	{ 0xe51034dc,	"tqew_ac[1]" },
+	{ 0xe51034e0,	"tqew_ac[2]" },
+	{ 0xe51034e4,	"tqew_ac[3]" },
+	{ 0xe51034e8,	"tqew_ac_avail[0]" },
+	{ 0xe51034ec,	"tqew_ac_avail[1]" },
+	{ 0xe51034f0,	"tqew_ac_avail[2]" },
+	{ 0xe51034f4,	"tqew_ac_avail[3]" },
+	{ 0xe51034f8,	"tqew_air_humble" },
+	{ 0xe51034fc,	"tqew_air_suppress" },
+	{ 0xe5103500,	"tqew_air_use_idletime" },
+	{ 0xe5103504,	"tqew_air_dequeue_only" },
+	{ 0xe5103508,	"tqew_pkt_pending_for_txdone" },
+	{ 0xe510350c,	"tqew_descr_alloc_fail" },
+	{ 0xe5103510,	"tqew_ring_alloc_fail" },
+	{ 0xe5103514,	"tqew_pop_alloc_fail" },
+	{ 0xe5103518,	"tqew_pop_sw_limit" },
+	{ 0xe510351c,	"tqew_pop_empty" },
+	{ 0xe5103520,	"tqew_available_set" },
+	{ 0xe5103524,	"tqew_available_reset" },
+	{ 0xe5103528,	"tqew_rx" },
+	{ 0xe510352c,	"tqew_drop" },
+	{ 0xe5103530,	"tqew_free" },
+	{ 0xe5103534,	"tqew_buf_invalid" },
+	{ 0xe5103538,	"wmac_tx_done[0]" },
+	{ 0xe510353c,	"wmac_tx_done[1]" },
+	{ 0xe5103540,	"wmac_tx_done[2]" },
+	{ 0xe5103544,	"wmac_tx_done[3]" },
+	{ 0xe5103548,	"agg_aggregate_flag" },
+	{ 0xe510354c,	"agg_aggressive_agg" },
+	{ 0xe5103550,	"hdrs_available_recent_min" },
+	{ 0xe5103554,	"agg_states[0]" },
+	{ 0xe5103558,	"agg_states[1]" },
+	{ 0xe510355c,	"agg_states[2]" },
+	{ 0xe5103560,	"agg_states[3]" },
+	{ 0xe5103564,	"agg_states[4]" },
+	{ 0xe5103568,	"ethq_push" },
+	{ 0xe510356c,	"ethq_pop" },
+	{ 0xe5103570,	"agg_aggregate_mpdu" },
+	{ 0xe5103574,	"agg_aggregate_msdu" },
+	{ 0xe5103578,	"agg_singleton_mpdu" },
+	{ 0xe510357c,	"agg_singleton_mgmt" },
+	{ 0xe5103580,	"agg_singleton_ctl" },
+	{ 0xe5103584,	"agg_singleton_probe" },
+	{ 0xe5103588,	"agg_4K_amsdu" },
+	{ 0xe510358c,	"agg_8K_amsdu" },
+	{ 0xe5103590,	"agg_11K_amsdu" },
+	{ 0xe5103594,	"tx_feedback_success" },
+	{ 0xe5103598,	"tx_feedback_fail" },
+	{ 0xe510359c,	"tx_done_status_success" },
+	{ 0xe51035a0,	"tx_done_status_timeout" },
+	{ 0xe51035a4,	"tx_done_status_xretry" },
+	{ 0xe51035a8,	"tx_done_status_timeout_xretry" },
+	{ 0xe51035ac,	"tx_done_pkt_chain_reset" },
+	{ 0xe51035b0,	"tx_done_pkt_chain_success" },
+	{ 0xe51035b4,	"tx_done_pkt_chain_drop_tid_down" },
+	{ 0xe51035b8,	"tx_done_pkt_chain_drop_xattempts" },
+	{ 0xe51035bc,	"tx_done_singleton_finish" },
+	{ 0xe51035c0,	"tx_done_singleton_swretry" },
+	{ 0xe51035c4,	"tx_done_aggregate_finish" },
+	{ 0xe51035c8,	"tx_done_aggregate_hwretry" },
+	{ 0xe51035cc,	"tx_done_aggregate_swretry" },
+	{ 0xe51035d0,	"tx_done_mpdu_swretry" },
+	{ 0xe51035d4,	"tx_sample" },
+	{ 0xe51035d8,	"tx_bw_sample" },
+	{ 0xe51035dc,	"tx_swretry_lower_bw" },
+	{ 0xe51035e0,	"tx_swretry_agg_exceed" },
+	{ 0xe51035e4,	"tx_scale_base_20m" },
+	{ 0xe51035e8,	"tx_scale_base_40m" },
+	{ 0xe51035ec,	"tx_scale_base_80m" },
+	{ 0xe51035f0,	"tx_scale_max" },
+	{ 0xe51035f4,	"tx_scale_overstep" },
+	{ 0xe51035f8,	"alloc_tqew_fast" },
+	{ 0xe51035fc,	"free_tqew_fast" },
+	{ 0xe5103600,	"alloc_tqew_slow" },
+	{ 0xe5103604,	"free_tqew_slow" },
+	{ 0xe5103608,	"alloc_tqew_local" },
+	{ 0xe510360c,	"free_tqew_local" },
+	{ 0xe5103610,	"alloc_hdr_fast" },
+	{ 0xe5103614,	"free_hdr_fast" },
+	{ 0xe5103618,	"alloc_hdr_slow" },
+	{ 0xe510361c,	"free_hdr_slow" },
+	{ 0xe5103620,	"alloc_msdu_hdr_failed" },
+	{ 0xe5103624,	"alloc_mpdu_hdr_failed" },
+	{ 0xe5103628,	"alloc_tid_superfast" },
+	{ 0xe510362c,	"free_tid_superfast" },
+	{ 0xe5103630,	"alloc_tid_fast" },
+	{ 0xe5103634,	"free_tid_fast" },
+	{ 0xe5103638,	"alloc_tid_slow" },
+	{ 0xe510363c,	"free_tid_slow" },
+	{ 0xe5103640,	"alloc_node_rate_fast" },
+	{ 0xe5103644,	"free_node_rate_fast" },
+	{ 0xe5103648,	"alloc_node_rate_slow" },
+	{ 0xe510364c,	"free_node_rate_slow" },
+	{ 0xe5103650,	"alloc_node_superfast" },
+	{ 0xe5103654,	"free_node_superfast" },
+	{ 0xe5103658,	"alloc_node_fast" },
+	{ 0xe510365c,	"free_node_fast" },
+	{ 0xe5103660,	"alloc_fcs" },
+	{ 0xe5103664,	"free_fcs" },
+	{ 0xe5103668,	"alloc_mac_descr" },
+	{ 0xe510366c,	"free_mac_descr" },
+	{ 0xe5103670,	"tx_mac_push" },
+	{ 0xe5103674,	"tx_mac_idle" },
+	{ 0xe5103678,	"tx_mac_rts" },
+	{ 0xe510367c,	"tx_mac_cts2self" },
+	{ 0xe5103680,	"tx_vlan_drop" },
+	{ 0xe5103684,	"tx_acm_drop" },
+	{ 0xe5103688,	"tx_ps_drop" },
+	{ 0xe510368c,	"ocs_tx_suspend" },
+	{ 0xe5103690,	"ocs_tx_resume" },
+	{ 0xe5103694,	"ocs_singleton_suspend" },
+	{ 0xe5103698,	"ocs_ampdu_suspend" },
+	{ 0xe510369c,	"ocs_frame_created" },
+	{ 0xe51036a0,	"pwr_mgmt_awake" },
+	{ 0xe51036a4,	"pwr_mgmt_sleep" },
+	{ 0xe51036a8,	"pwr_mgmt_tx" },
+	{ 0xe51036ac,	"pspoll_rx" },
+	{ 0xe51036b0,	"dtim_q_push" },
+	{ 0xe51036b4,	"dtim_q_pop" },
+	{ 0xe51036b8,	"dtim_trigger" },
+	{ 0xe51036bc,	"dtim_q_overflow" },
+	{ 0xe51036c0,	"tx_restrict_dropped" },
+	{ 0xe51036c4,	"tx_throt_dropped" },
+	{ 0xe51036c8,	"tx_block_singleton" },
+	{ 0xe51036cc,	"tx_force_unblock_tid" },
+	{ 0xe51036d0,	"tx_ctl_pkt_hbm_alloc_fails" },
+	{ 0xe51036d4,	"tx_ctl_pkt_alloc_descr_fails" },
+	{ 0xe51036d8,	"tx_bar_alloc_ctl_pkt_fails" },
+	{ 0xe51036dc,	"tx_valid_bit_not_set" },
+	{ 0xe51036e0,	"wmm_ps_tx" },
+	{ 0xe51036e4,	"wmm_ps_tx_null_frames" },
+	{ 0xe51036e8,	"wmm_ps_tx_more_data_frames" },
+	{ 0xe51036ec,	"wmm_ps_tx_eosp_frames" },
+	{ 0xe51036f0,	"mu_tx_su_count" },
+	{ 0xe51036f4,	"mu_tx_send_mu_fail" },
+	{ 0xe51036f8,	"mu_tx_push_count" },
+	{ 0xe51036fc,	"mu_tx_done_count" },
+	{ 0xe5103700,	"mu_tx_done_succ" },
+	{ 0xe5103704,	"mu_tx_done_fail" },
+	{ 0xe5103708,	"mu_tx_sample" },
+	{ 0xe510370c,	"mu_bar_bitmap_non_zero" },
+	{ 0xe5103710,	"mu_bar_bitmap_zero" },
+	{ 0xe5103714,	"mu_mac_wmac1_ipc_push" },
+	{ 0xe5103718,	"mu_mac_wmac1_auc_push" },
+	{ 0xe510371c,	"mu_wmac1_resets" },
+	{ 0xe5103720,	"mu_tx_swretry_agg_exceed" },
+	{ 0xe5103724,	"mu_tx_buddy_try" },
+	{ 0xe5103728,	"mu_tx_buddy_fail_wmac" },
+	{ 0xe510372c,	"mu_tx_buddy_fail_ptid" },
+	{ 0xe5103730,	"mu_tx_buddy_fail_rate" },
+	{ 0xe5103734,	"mu_tx_buddy_fail_create_agg" },
+	{ 0xe5103738,	"mu_tx_buddy_mu_only_timeout" },
+	{ 0xe510373c,	"mu_tx_another_q_push_succ" },
+	{ 0xe5103740,	"mu_tx_another_q_push_fail" },
+	{ 0xe5103744,	"mu_tx_buddy_multi_tid" },
+	{ 0xe5103748,	"mu_tx_wmac_0_done_count" },
+	{ 0xe510374c,	"mu_tx_wmac_0_bitmap_non_zero" },
+	{ 0xe5103750,	"mu_tx_wmac_0_bitmap_zero" },
+	{ 0xe5103754,	"mu_tx_wmac_0_done_timeout" },
+	{ 0xe5103758,	"mu_tx_wmac_0_done_succ" },
+	{ 0xe510375c,	"mu_tx_wmac_0_done_fail" },
+	{ 0xe5103760,	"mu_tx_wmac_1_done_succ" },
+	{ 0xe5103764,	"mu_tx_wmac_1_done_fail" },
+	{ 0xe5103768,	"mu_tx_wmac_0_mpdu_total" },
+	{ 0xe510376c,	"mu_tx_wmac_0_mpdu_succ" },
+	{ 0xe5103770,	"mu_tx_wmac_1_mpdu_total" },
+	{ 0xe5103774,	"mu_tx_wmac_1_mpdu_succ" },
+	{ 0xe5103778,	"mu_tx_qnum[0]" },
+	{ 0xe510377c,	"mu_tx_qnum[1]" },
+	{ 0xe5103780,	"mu_tx_qnum[2]" },
+	{ 0xe5103784,	"mu_tx_qnum[3]" },
+	{ 0xe5103788,	"tqe_sema_fails" },
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_bb_mutex.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_bb_mutex.h
new file mode 100644
index 0000000..6606a85
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_bb_mutex.h
@@ -0,0 +1,222 @@
+/*
+ * (C) Copyright 2011 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __QTN_BB_MUTEX_H
+#define __QTN_BB_MUTEX_H
+
+#include "mproc_sync.h"
+
+#ifndef __ASSEMBLY__
+
+#define QTN_BB_RESET_REGISTER_VAL		0x7A200
+#define QTN_BB_HRESET_REGISTER_VAL		0x10000
+
+struct qtn_bb_mutex
+{
+	volatile u_int32_t ref_counter;
+	volatile u_int32_t collision_counter;
+	volatile u_int32_t enter_counter;
+};
+
+RUBY_INLINE struct qtn_bb_mutex*
+qtn_bb_mutex_get(void)
+{
+#if defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD)
+	return (struct qtn_bb_mutex*)qtn_mproc_sync_nocache
+		(qtn_mproc_sync_shared_params_get()->bb_mutex_bus);
+#else
+	/* Linux target */
+	return qtn_mproc_sync_shared_params_get()->bb_mutex_lhost;
+#endif
+}
+
+/*
+ * Atomically increase reference counter, acquiring reader mutex if not previously held
+ */
+#if QTN_SEM_TRACE
+#define qtn_bb_mutex_enter(_cpu)    qtn_bb_mutex_enter_dbg(_cpu, __FILE__, __LINE__)
+RUBY_WEAK(qtn_bb_mutex_enter_dbg) int
+qtn_bb_mutex_enter_dbg(QTN_SOC_CPU current_cpu, char *caller, int caller_line)
+#else
+RUBY_WEAK(qtn_bb_mutex_enter) int
+qtn_bb_mutex_enter(QTN_SOC_CPU current_cpu)
+#endif
+{
+	struct qtn_bb_mutex *bb_mutex = qtn_bb_mutex_get();
+	unsigned long flags;
+
+	__qtn_mproc_sync_spin_lock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, &flags);
+	if (bb_mutex->ref_counter == 0) {
+		++(bb_mutex->enter_counter);
+	} else {
+		++(bb_mutex->collision_counter);
+	}
+	__qtn_mproc_refcnt_inc(&bb_mutex->ref_counter);
+	__qtn_mproc_sync_spin_unlock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, &flags);
+
+	return 0;
+}
+
+/*
+ * Atomically decrease reference counter, releasing reader mutex if transitioning to 0
+ */
+#if QTN_SEM_TRACE
+#define qtn_bb_mutex_leave(_cpu)    qtn_bb_mutex_leave_dbg(_cpu, __FILE__, __LINE__)
+RUBY_WEAK(qtn_bb_mutex_leave_dbg) int
+qtn_bb_mutex_leave_dbg(QTN_SOC_CPU current_cpu, char *caller, int caller_line)
+#else
+RUBY_WEAK(qtn_bb_mutex_leave) int
+qtn_bb_mutex_leave(QTN_SOC_CPU current_cpu)
+#endif
+{
+	struct qtn_bb_mutex *bb_mutex = qtn_bb_mutex_get();
+	unsigned long flags;
+
+	__qtn_mproc_sync_spin_lock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, &flags);
+	__qtn_mproc_refcnt_dec(&bb_mutex->ref_counter);
+	__qtn_mproc_sync_spin_unlock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, &flags);
+
+	return 0;
+}
+
+/*
+ * Enable RIFS mode. Safe to call using any processor.
+ * Make sure that SoC support this mode before calling.
+ */
+#if QTN_SEM_TRACE
+#define qtn_rifs_mode_enable(_cpu)   qtn_rifs_mode_enable_dbg(_cpu, __FILE__, __LINE__)
+RUBY_WEAK(qtn_rifs_mode_enable_dbg) void
+qtn_rifs_mode_enable_dbg(QTN_SOC_CPU current_cpu, char *caller, int caller_line)
+#else
+RUBY_WEAK(qtn_rifs_mode_enable) void
+qtn_rifs_mode_enable(QTN_SOC_CPU current_cpu)
+#endif
+{
+	unsigned long flags;
+
+	__qtn_mproc_sync_spin_lock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, &flags);
+	qtn_mproc_sync_mem_write(RUBY_QT3_BB_GLBL_PREG_RIF_ENABLE, RUBY_QT3_BB_GLBL_PREG_RIF_ENABLE_ON);
+	__qtn_mproc_sync_spin_unlock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, &flags);
+}
+
+/*
+ * Disable RIFS mode. Safe to call using any processor.
+ * Make sure that SoC support this mode before calling.
+ */
+#if QTN_SEM_TRACE
+#define qtn_rifs_mode_disable(_cpu)   qtn_rifs_mode_disable_dbg(_cpu, __FILE__, __LINE__)
+RUBY_WEAK(qtn_rifs_mode_disable_dbg) void
+qtn_rifs_mode_disable_dbg(QTN_SOC_CPU current_cpu, char *caller, int caller_line)
+#else
+RUBY_WEAK(qtn_rifs_mode_disable) void
+qtn_rifs_mode_disable(QTN_SOC_CPU current_cpu)
+#endif
+{
+	unsigned long flags;
+
+	__qtn_mproc_sync_spin_lock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, &flags);
+	qtn_mproc_sync_mem_write(RUBY_QT3_BB_GLBL_PREG_RIF_ENABLE, RUBY_QT3_BB_GLBL_PREG_RIF_ENABLE_OFF);
+	__qtn_mproc_sync_spin_unlock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, &flags);
+}
+
+/*
+ * Acquiring writer mutex
+ */
+#if QTN_SEM_TRACE
+#define qtn_bb_mutex_reset_enter(_cpu, _flags)   qtn_bb_mutex_reset_enter_dbg(_cpu, _flags, __FILE__, __LINE__)
+RUBY_WEAK(qtn_bb_mutex_reset_enter_dbg) void
+qtn_bb_mutex_reset_enter_dbg(QTN_SOC_CPU current_cpu, unsigned long *flags, char *caller, int caller_line)
+#else
+RUBY_WEAK(qtn_bb_mutex_reset_enter) void
+qtn_bb_mutex_reset_enter(QTN_SOC_CPU current_cpu, unsigned long *flags)
+#endif
+{
+	struct qtn_bb_mutex *bb_mutex = qtn_bb_mutex_get();
+
+	while (1) {
+		__qtn_mproc_sync_spin_lock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, flags);
+		if (bb_mutex->ref_counter == 0) {
+			break;
+		}
+		__qtn_mproc_sync_spin_unlock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, flags);
+	}
+}
+
+/*
+ * Try to acquire writer mutex (to release qtn_bb_mutex_reset_leave() must be called)
+ */
+#if QTN_SEM_TRACE
+#define qtn_bb_mutex_reset_try_enter(_cpu, _flags)   qtn_bb_mutex_reset_try_enter_dbg(_cpu, _flags, __FILE__, __LINE__)
+RUBY_WEAK(qtn_bb_mutex_reset_try_enter_dbg) int
+qtn_bb_mutex_reset_try_enter_dbg(QTN_SOC_CPU current_cpu, unsigned long *flags, char *caller, int caller_line)
+#else
+RUBY_WEAK(qtn_bb_mutex_reset_try_enter) int
+qtn_bb_mutex_reset_try_enter(QTN_SOC_CPU current_cpu, unsigned long *flags)
+#endif
+{
+	int ret = 0;
+	struct qtn_bb_mutex *bb_mutex = qtn_bb_mutex_get();
+
+	__qtn_mproc_sync_spin_lock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, flags);
+	if (bb_mutex->ref_counter == 0) {
+		ret = 1;
+	} else {
+		__qtn_mproc_sync_spin_unlock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, flags);
+	}
+
+	return ret;
+}
+
+/*
+ * Release writer mutex
+ */
+#if QTN_SEM_TRACE
+#define qtn_bb_mutex_reset_leave(_cpu, _flags)   qtn_bb_mutex_reset_leave_dbg(_cpu, _flags, __FILE__, __LINE__)
+RUBY_WEAK(qtn_bb_mutex_reset_leave_dbg) void
+qtn_bb_mutex_reset_leave_dbg(QTN_SOC_CPU current_cpu, unsigned long *flags, char *caller, int caller_line)
+#else
+RUBY_WEAK(qtn_bb_mutex_reset_leave) void
+qtn_bb_mutex_reset_leave(QTN_SOC_CPU current_cpu, unsigned long *flags)
+#endif
+{
+	__qtn_mproc_sync_spin_unlock(current_cpu, QTN_ALL_SOC_CPU, QTN_SEM_BB_MUTEX_SEMNUM, flags);
+}
+
+/*
+ * Return true if reset needed.
+ */
+RUBY_INLINE int
+qtn_bb_mutex_is_reset(QTN_SOC_CPU current_cpu, u_int32_t status)
+{
+	return (status & QTN_BB_RESET_REGISTER_VAL);
+}
+
+RUBY_INLINE int
+qtn_bb_mutex_is_hreset(QTN_SOC_CPU current_cpu, u_int32_t status)
+{
+	return (status & QTN_BB_HRESET_REGISTER_VAL);
+}
+
+#endif // #ifndef __ASSEMBLY__
+
+#endif // #ifndef __QTN_BB_MUTEX_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_buffers.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_buffers.h
new file mode 100644
index 0000000..102b336
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_buffers.h
@@ -0,0 +1,106 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __QTN_BUFFERS_H
+#define __QTN_BUFFERS_H
+
+#include <common/topaz_config.h>
+
+#define QTN_BUF_USE_11AC_SIZE			1
+#define TOPAZ_HBM_SKB_ALLOCATOR_DEFAULT		1
+
+#define TOPAZ_HBM_SKB_ALLOCATOR			2
+
+#if TOPAZ_HBM_SKB_ALLOCATOR_DEFAULT
+	/*
+	 * Buffer allocation in Topaz when using the switch must be extremely
+	 * careful such that the sum of all possible buffer pileup does not
+	 * exceed the number of possible payloads. Otherwise the hardware will
+	 * start processing null pointers.
+	 */
+	#define TOPAZ_HBM_PAYLOAD_COUNT_S	TOPAZ_HBM_BUF_EMAC_RX_COUNT_S
+	#define QTN_BUFS_EMAC_TX_RING_S		9
+	#define QDRV_MAX_QUEUED_MGMT_FRAMES	256
+	#define QTN_BUFS_WMAC_RX_RING_S		10
+	#define QTN_BUFS_WMAC_TX_NON_LHOST_S	9
+	#define QTN_BUFS_WMAC_TX_QDISC_S	10
+	#define QDRV_TX_SCH_RED_MASK		((1 << 7) - 1)
+#elif QTN_BUF_USE_11AC_SIZE
+	/* Topaz with 11ac buffers, no acceleration */
+	#define TOPAZ_HBM_PAYLOAD_COUNT_S	TOPAZ_HBM_BUF_EMAC_RX_COUNT_S
+	#define QTN_BUFS_EMAC_TX_RING_S		9
+	#define QDRV_MAX_QUEUED_MGMT_FRAMES	256
+	#define QTN_BUFS_WMAC_RX_RING_S		10
+	#define QTN_BUFS_WMAC_TX_NON_LHOST_S	9
+	#define QTN_BUFS_WMAC_TX_QDISC_S	11
+	#define QDRV_TX_SCH_RED_MASK		((1 << 7) - 1)
+#else
+	/* Ruby or Topaz with 11n buffers no acceleration */
+	#define TOPAZ_HBM_PAYLOAD_COUNT_S	12
+	#define QTN_BUFS_EMAC_TX_RING_S		7
+	#define QDRV_MAX_QUEUED_MGMT_FRAMES	512
+	#define QTN_BUFS_WMAC_RX_RING_S		11
+	#define QTN_BUFS_WMAC_TX_NON_LHOST_S	9
+	#define QTN_BUFS_WMAC_TX_QDISC_S	11
+	#define QDRV_TX_SCH_RED_MASK		((1 << 7) - 1)
+#endif
+
+#define TOPAZ_HBM_PAYLOAD_COUNT		(1 << TOPAZ_HBM_PAYLOAD_COUNT_S)
+
+#ifdef QTN_RC_ENABLE_HDP
+#define QTN_BUFS_EMAC_RX_RING_S		12
+#else
+#define QTN_BUFS_EMAC_RX_RING_S		8
+#endif
+#define QTN_BUFS_EMAC_TX_QDISC_S	7
+#define QTN_BUFS_PCIE_TQE_RX_RING_S	12
+#if defined (CONFIG_TOPAZ_PCIE_TARGET)
+#define QTN_BUFS_LHOST_TQE_RX_RING_S	8
+#elif defined (CONFIG_TOPAZ_PCIE_HOST)
+#define QTN_BUFS_LHOST_TQE_RX_RING_S	10
+#elif defined(TOPAZ_VB_CONFIG) || defined(TOPAZ_RGMII_CONFIG) ||  \
+		defined(TOPAZ_VZN_CONFIG) || defined(TOPAZ_RFIC6_CONFIG)
+#define QTN_BUFS_LHOST_TQE_RX_RING_S	11
+#else
+#define QTN_BUFS_LHOST_TQE_RX_RING_S	9
+#endif
+
+#define QTN_BUFS_LHOST_TQE_RX_RING	(1 << QTN_BUFS_LHOST_TQE_RX_RING_S)
+#define QTN_BUFS_EMAC_RX_RING		(1 << QTN_BUFS_EMAC_RX_RING_S)
+#define QTN_BUFS_EMAC_TX_RING		(1 << QTN_BUFS_EMAC_TX_RING_S)
+#define QTN_BUFS_EMAC_TX_QDISC		(1 << QTN_BUFS_EMAC_TX_QDISC_S)
+#define QTN_BUFS_WMAC_RX_RING		(1 << QTN_BUFS_WMAC_RX_RING_S)
+#define QTN_BUFS_WMAC_TX_NON_LHOST	(1 << QTN_BUFS_WMAC_TX_NON_LHOST_S)
+#define QTN_BUFS_WMAC_TX_QDISC		(1 << QTN_BUFS_WMAC_TX_QDISC_S)
+#define QTN_BUFS_PCIE_TQE_RX_RING	(1 << QTN_BUFS_PCIE_TQE_RX_RING_S)
+
+#define QTN_BUFS_CPUS_TOTAL		(1 * (QTN_BUFS_LHOST_TQE_RX_RING))
+#define QTN_BUFS_EMAC_TOTAL		(2 * (QTN_BUFS_EMAC_RX_RING + QTN_BUFS_EMAC_TX_RING + QTN_BUFS_EMAC_TX_QDISC))
+#define QTN_BUFS_WMAC_TOTAL		(1 * (QTN_BUFS_WMAC_RX_RING + QTN_BUFS_WMAC_TX_NON_LHOST + QTN_BUFS_WMAC_TX_QDISC + QDRV_MAX_QUEUED_MGMT_FRAMES))
+#define QTN_BUFS_ALLOC_TOTAL		(QTN_BUFS_CPUS_TOTAL /*+ QTN_BUFS_EMAC_TOTAL*/ + QTN_BUFS_WMAC_TOTAL)
+
+#if TOPAZ_HBM_SKB_ALLOCATOR_DEFAULT && (TOPAZ_HBM_PAYLOAD_COUNT <= QTN_BUFS_ALLOC_TOTAL)
+	#error "Payload buffers distribution error"
+#endif
+
+#endif	// __QTN_BUFFERS_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_cca.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_cca.h
new file mode 100644
index 0000000..1520931
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_cca.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ *
+ * Shared datastructure between lhost, MuC and ADM module for CCA measurement
+ */
+
+#ifndef _QTN_CCA_H
+#define _QTN_CCA_H
+
+struct out_cca_info {
+	u_int64_t	start_tsf;
+	u_int64_t	end_tsf;
+	u_int32_t	cnt_pri_cca;
+	u_int32_t	cnt_sec_cca;
+	u_int32_t	cca_sample_cnt;
+};
+
+#endif	// _QTN_CCA_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_debug.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_debug.h
new file mode 100644
index 0000000..3f1b55e
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_debug.h
@@ -0,0 +1,240 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2008 - 2012 Quantenna Communications, Inc           **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Date        : 21/03/12                                                   **
+**  File        : qtn_debug.c                                                **
+**  Description :                                                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef QTN_DEBUG_H_
+#define QTN_DEBUG_H_
+
+/* When set to 1 LHOST formats AuC print output. It is not possible to use %s and %pM
+conversion specifiers. Also the number of arguments printed are limited to 8 and therefore
+stack size is limited to 32.
+When set to 0 AuC formats the output, pass the formatted line to the LHOST that
+prints it. */
+#define AUC_LHOST_PRINT_FORMAT	1
+#define PRINT_STACK_SIZE	32
+
+#if defined(MUC_BUILD)
+#define	DBGFN		uc_printk
+#elif defined(AUC_BUILD)
+#define	DBGFN		auc_os_printf
+#else
+#define	DBGFN		printk
+#endif
+
+#ifndef __GNUC__
+#define __FUNCTION__	""
+#endif
+
+#define DBGFMT  "%s: "
+#define DBGEFMT "%s: ERROR - "
+#define DBGWFMT "%s: WARNING - "
+#define DBGARG  __func__
+#define DBGMACVAR "%02x:%02x:%02x:%02x:%02x:%02x"
+#define DBGMACFMT(a) \
+	(a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define DBGMACFMT_LE(a) \
+	(a)[5], (a)[4], (a)[3], (a)[2], (a)[1], (a)[0]
+
+#define DBGFMT_BYTEFLD3_P	"%u.%u.%u"
+#define DBGFMT_BYTEFLD3_V(_v)	(_v >> 16) & 0xff, (_v >> 8) & 0xff, _v & 0xff
+#define DBGFMT_BYTEFLD4_P	"%u.%u.%u.%u"
+#define DBGFMT_BYTEFLD4_V(_v)	(_v >> 24) & 0xff, (_v >> 16) & 0xff, (_v >> 8) & 0xff, _v & 0xff
+
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+
+typedef enum {
+	DBG_LM_QDRV = 1,
+	DBG_LM_QPCIE,
+	DBG_LM_QRADAR,
+	DBG_LM_QBOOTCFG,
+	DBG_LM_QADM,
+	DBG_LM_QWLAN,
+	DBG_LM_QMACFW,
+	DBG_LM_MAX
+} dbg_log_module;
+
+typedef const struct
+{
+	dbg_log_module dbg_module_id;
+	const char *dbg_module_name;
+} dbg_module_table_t;
+extern unsigned int g_dbg_log_module;
+
+#if defined(MUC_BUILD)
+extern unsigned int g_dbg_log_level;
+extern unsigned int g_dbg_log_func;
+#else
+extern unsigned int g_dbg_log_level[DBG_LM_MAX];
+extern unsigned int g_dbg_log_func[DBG_LM_MAX];
+#endif
+extern dbg_module_table_t dbg_module_name_entry[];
+
+#define DBG_LL_EMERG					0
+#define DBG_LL_ALERT					1
+#define DBG_LL_ERR					2
+#define DBG_LL_WARNING					3
+#define DBG_LL_CRIT					4
+#define DBG_LL_NOTICE					5
+#define DBG_LL_INFO					6
+#define DBG_LL_HIDDEN					7
+#define DBG_LL_DEBUG					8
+#define DBG_LL_TRIAL					9
+#define DBG_LL_ALL					10
+
+#define DBG_LF_00					0x00000001
+#define DBG_LF_01					0x00000002
+#define DBG_LF_02					0x00000004
+#define DBG_LF_03					0x00000008
+#define DBG_LF_04					0x00000010
+#define DBG_LF_05					0x00000020
+#define DBG_LF_06					0x00000040
+#define DBG_LF_07					0x00000080
+#define DBG_LF_08					0x00000100
+#define DBG_LF_09					0x00000200
+#define DBG_LF_10					0x00000400
+#define DBG_LF_11					0x00000800
+#define DBG_LF_12					0x00001000
+#define DBG_LF_13					0x00002000
+#define DBG_LF_14					0x00004000
+#define DBG_LF_15					0x00008000
+#define DBG_LF_16					0x00010000
+#define DBG_LF_17					0x00020000
+#define DBG_LF_18					0x00040000
+#define DBG_LF_19					0x00080000
+#define DBG_LF_20					0x00100000
+#define DBG_LF_21					0x00200000
+#define DBG_LF_22					0x00400000
+#define DBG_LF_23					0x00800000
+#define DBG_LF_24					0x01000000
+#define DBG_LF_25					0x02000000
+#define DBG_LF_26					0x04000000
+#define DBG_LF_27					0x08000000
+#define DBG_LF_28					0x10000000
+#define DBG_LF_29					0x20000000
+#define DBG_LF_30					0x40000000
+#define DBG_LF_31					0x80000000
+#define DBG_LF_ALL					0xFFFFFFFF
+
+#define DBG_LOG_FUNC (g_dbg_log_func[DBG_LM - 1])
+#define DBG_LOG_LEVEL (g_dbg_log_level[DBG_LM - 1])
+#define DBG_LOG_FUNC_TEST(flag) (g_dbg_log_func[DBG_LM - 1] & (flag))
+
+#if defined(QTN_DEBUG)
+
+#define DBGPRINTF_RAW(ll, lf, fmt, ...)						\
+	do {									\
+		if((g_dbg_log_module & (BIT(DBG_LM - 1))) &&			\
+				(DBG_LOG_LEVEL >= (ll)) &&			\
+				(DBG_LOG_FUNC_TEST(lf))) {			\
+			DBGFN(fmt, ##__VA_ARGS__);				\
+		}								\
+	} while(0)
+
+#define DBGPRINTF(ll, lf, fmt, ...)						\
+	do {									\
+		if((g_dbg_log_module & (BIT(DBG_LM - 1))) &&			\
+				(DBG_LOG_LEVEL >= (ll)) &&			\
+				(DBG_LOG_FUNC_TEST(lf))) {			\
+			DBGFN(DBGFMT fmt, DBGARG, ##__VA_ARGS__);		\
+		}								\
+	} while(0)
+
+#define DBGPRINTF_E(fmt, ...)							\
+	do {									\
+		if (DBG_LOG_LEVEL >= DBG_LL_ERR)				\
+			DBGFN(DBGEFMT fmt, DBGARG, ##__VA_ARGS__);		\
+	} while(0)
+
+#define DBGPRINTF_W(fmt, ...)							\
+	do {									\
+		if (DBG_LOG_LEVEL >= DBG_LL_WARNING)				\
+			DBGFN(DBGWFMT fmt, DBGARG, ##__VA_ARGS__);		\
+	} while(0)
+
+#define DBGPRINTF_N(fmt, ...)							\
+	DBGFN(fmt, ##__VA_ARGS__);
+
+#define DBGPRINTF_LIMIT_E(fmt, ...)						\
+	do {									\
+		if ((DBG_LOG_LEVEL >= DBG_LL_ERR) && (net_ratelimit()))		\
+			DBGFN(DBGEFMT fmt, DBGARG, ##__VA_ARGS__);		\
+	} while(0)
+
+#define DBGPRINTF_LIMIT(ll, lf, fmt, ...)					\
+	do {									\
+		if ((g_dbg_log_module & BIT(DBG_LM - 1)) &&			\
+			DBG_LOG_FUNC_TEST(lf) &&				\
+			DBG_LOG_LEVEL >= (ll) && (net_ratelimit()))		\
+			DBGFN(DBGFMT fmt, DBGARG, ##__VA_ARGS__);		\
+	} while(0)
+#else
+#define DBGPRINTF(ll, lf, fmt, args...)
+#define DBGPRINTF_E(fmt, args...)
+#define DBGPRINTF_W(fmt, args...)
+#define DBGPRINTF_LIMIT_E(fmt, args...)
+#define DBGPRINTF_LIMIT(ll, lf, fmt, args...)
+#endif
+
+#define HERE(x) do {							\
+	DBGFN("%s:%d:%s %s = %d 0x%x\n",				\
+			__FILE__, __LINE__, __FUNCTION__, (#x),		\
+			(int) (x), (unsigned int) (x));			\
+} while(0)
+
+#define HERES(x) do {							\
+	DBGFN("%s:%d:%s %s = '%s'\n",					\
+			__FILE__, __LINE__, __FUNCTION__, (#x), (x));	\
+} while(0)
+
+#define HERE_REG(addr)	do {						\
+	DBGFN("%s:%d:%s reg 0x%08lx = 0x%08lx (%s)\n",			\
+			__FILE__, __LINE__, __FUNCTION__,		\
+			(unsigned long) (addr),				\
+			(unsigned long) readl(addr), (#addr));		\
+} while(0)
+
+#endif /* QTN_DEBUG_H_ */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_decap.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_decap.h
new file mode 100644
index 0000000..920505b
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_decap.h
@@ -0,0 +1,412 @@
+#ifndef __QTN_DECAP_H__
+#define __QTN_DECAP_H__
+
+#include <net80211/ieee80211.h>
+#include <net80211/if_ethersubr.h>
+#include <net80211/if_llc.h>
+
+#include <qtn/qtn_net_packet.h>
+#include <qtn/qtn_vlan.h>
+
+/*
+ * Length of received frame that requires dcache invalidate on receive.
+ * The amount that must be read is:
+ * - VLAN encap case: MAX_VLANS * (LLC + 2b) + LLC
+ * - 802.11 MPDU, no amsdu: LLC + max l3 depth
+ * - 802.11 AMSDU: msdu header + LLC + max l3 depth
+ *
+ * The max of these three is the VLAN case. There is also an assumption
+ * here that if VLANs are processed, there is no need to process L3 header
+ */
+#define QTN_RX_LLC_DCACHE_INV_LEN	(((LLC_SNAPFRAMELEN + 2) * QTN_MAX_VLANS) + LLC_SNAPFRAMELEN)
+#define QTN_RX_MPDU_DCACHE_INV_LEN	(QTN_RX_LLC_DCACHE_INV_LEN + sizeof(struct ieee80211_qosframe_addr4))
+#define QTN_RX_MSDU_DCACHE_INV_LEN	(QTN_RX_LLC_DCACHE_INV_LEN + sizeof(struct ether_header))
+
+struct qtn_rx_decap_info {
+	void		*start;
+	uint16_t	len;
+	struct ether_header eh;			/* the eth header to be written to the packet */
+	uint32_t	vlanh[QTN_MAX_VLANS];	/* space for vlan headers (must be after eh) */
+	const void	*l3hdr;			/* pointer to layer 3 header in the payload */
+	uint16_t	l3_ether_type;		/* l3 header type (may not match eh.ether_type for 802.3 */
+	int8_t		tid;
+	int8_t		nvlans;
+	uint16_t	vlan_tci;		/* to which VLAN te msdu belongs */
+	uint8_t		first_msdu	:1,	/* first msdu in an amsdu */
+			last_msdu	:1,	/* last msdu in an amsdu */
+			decapped	:1,	/* start is decapped eh, not wireless header */
+			check_3addr_br	:1;	/* requires 3 address bridge dest mac set */
+};
+
+static __inline__ uint16_t
+qtn_rx_decap_newhdr_size(const struct qtn_rx_decap_info *const di)
+{
+	return sizeof(struct ether_header) + (sizeof(struct qtn_8021q) * di->nvlans);
+}
+
+static __inline__ const struct qtn_8021q *
+qtn_rx_decap_vlan(const struct qtn_rx_decap_info *const di, int8_t index)
+{
+	const struct qtn_8021q *v = (const void *) &di->eh.ether_type;
+	return &v[index];
+}
+
+static __inline__ uint16_t qtn_rx_decap_header_size(const struct ieee80211_qosframe_addr4 *const wh)
+{
+	uint16_t size;
+	const uint8_t dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+
+	size = sizeof(struct ieee80211_frame);
+
+	if (dir == IEEE80211_FC1_DIR_DSTODS)
+		size += IEEE80211_ADDR_LEN;
+	if (IEEE80211_QOS_HAS_SEQ(wh)) {
+		size += sizeof(uint16_t);
+		if ((wh->i_fc[1] & IEEE80211_FC1_ORDER) == IEEE80211_FC1_ORDER)
+			/* Frame has HT control field in the header */
+			size += sizeof(uint32_t);
+	}
+
+	return size;
+}
+
+#define DECAP_VLAN_ACTION_NONE	0
+#define DECAP_VLAN_ADD_TAG	BIT(0)
+#define DECAP_VLAN_STRIP_TAG	BIT(1)
+#define DECAP_VLAN_REPLACE_TAG	(DECAP_VLAN_ADD_TAG | DECAP_VLAN_STRIP_TAG)
+
+#define DECAP_PRIO_TAGGED	0
+#define DECAP_NON_PRIO_TAGGED	1
+#define DECAP_UNTAGGED		2
+#define DECAP_TAG_MAX		3
+
+static const uint8_t decap_vlan_action[DECAP_TAG_MAX][DECAP_TAG_MAX] = {
+	/* Rx: priority tagged */
+	{DECAP_VLAN_ACTION_NONE, DECAP_VLAN_REPLACE_TAG, DECAP_VLAN_STRIP_TAG},
+	/* Rx: non-priority tagged */
+	{DECAP_VLAN_REPLACE_TAG, DECAP_VLAN_ACTION_NONE, DECAP_VLAN_STRIP_TAG},
+	/* Rx: untagged */
+	{DECAP_VLAN_ADD_TAG, DECAP_VLAN_ADD_TAG, DECAP_VLAN_ACTION_NONE}
+};
+
+static __inline__ uint8_t
+qtn_rx_decap_vlan_action(uint16_t ether_type_l3, const uint8_t *vlan_tci, uint16_t ptci, struct qtn_vlan_info *vlan_info,
+	uint16_t *pkt_tci, uint16_t *out_tci)
+{
+	uint8_t rx;
+	uint8_t tx;
+	uint16_t tag_tci;
+	int tagrx;
+
+	if (ether_type_l3 == htons(ETHERTYPE_8021Q)) {
+		tag_tci = ((vlan_tci[1] << 0) | (vlan_tci[0] << 8));
+		if ((tag_tci & QVLAN_MASK_VID) != QVLAN_PRIO_VID) {
+			*pkt_tci = tag_tci;
+			rx = DECAP_NON_PRIO_TAGGED;
+		} else {
+			*pkt_tci = ptci;
+			rx = DECAP_PRIO_TAGGED;
+		}
+	} else {
+		*pkt_tci = ptci;
+		rx = DECAP_UNTAGGED;
+	}
+
+	tagrx = qtn_vlan_get_tagrx(vlan_info->vlan_tagrx_bitmap, *pkt_tci & QVLAN_MASK_VID);
+	if (tagrx == QVLAN_TAGRX_UNTOUCH) {
+		return DECAP_VLAN_ACTION_NONE;
+	} else if (tagrx == QVLAN_TAGRX_TAG) {
+		tx = DECAP_NON_PRIO_TAGGED;
+		*out_tci = *pkt_tci;
+	} else {
+		tagrx = qtn_vlan_get_tagrx(vlan_info->vlan_tagrx_bitmap, QVLAN_PRIO_VID);
+		if (tagrx == QVLAN_TAGRX_TAG) {
+			tx = DECAP_PRIO_TAGGED;
+			*out_tci = QVLAN_PRIO_VID | (ptci & ~QVLAN_MASK_VID);
+		} else {
+			tx = DECAP_UNTAGGED;
+		}
+	}
+
+	return decap_vlan_action[rx][tx];
+}
+
+#define LLC_ENCAP_RFC1042	0x0
+#define LLC_ENCAP_BRIDGE_TUNNEL	0xF8
+
+/*
+ * Remove the LLC/SNAP header (if present) and replace with an Ethernet header
+ *
+ * See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation.
+ *   Ethernet-II SNAP header (RFC1042 for most Ethertypes)
+ *   Bridge-Tunnel header (for Ethertypes ETH_P_AARP and ETH_P_IPX
+ *   No encapsulation header if Ethertype < 0x600 (=length)
+ */
+static __inline__ void *
+qtn_rx_decap_set_eth_hdr(struct qtn_rx_decap_info *di, const uint8_t *llc, const uint16_t llclen,
+				uint16_t ptci, struct qtn_vlan_info *vlan_info, uint8_t vlan_enabled,
+				void *token, void **rate_train)
+{
+	uint16_t *newhdrp = &di->eh.ether_type;
+	int8_t llc_l3_gap = 0;
+	uint16_t ether_type_l3;
+
+	uint8_t last_byte = llc[5];
+	uint16_t ether_type_eh;
+	bool is_llc_snap_e;
+	uint8_t vlan_hdr = 0;
+
+	ether_type_l3 = (llc[6] << 0) | (llc[7] << 8);
+	ether_type_eh = ether_type_l3;
+
+	di->nvlans = 0;
+	di->vlan_tci = 0;
+
+	/*
+	 * For EAPOL and VLAN frames we do not want to add 802.1Q header.
+	 * Otherwise, the frame won't go through a driver.
+	 */
+	if (vlan_enabled) {
+		uint16_t out_tci = 0;
+		uint8_t action = qtn_rx_decap_vlan_action(ether_type_l3, &llc[8], ptci,
+			vlan_info, &di->vlan_tci, &out_tci);
+
+		if (action & DECAP_VLAN_STRIP_TAG) {
+			ether_type_l3 = ((llc[10] << 0) | (llc[11] << 8));
+			ether_type_eh = ether_type_l3;
+			vlan_hdr = 4;
+		}
+
+		if (action & DECAP_VLAN_ADD_TAG) {
+			if (ether_type_l3 != htons(ETHERTYPE_PAE)) {
+				*newhdrp++ = htons(ETHERTYPE_8021Q);
+				*newhdrp++ = htons(out_tci);
+				di->nvlans++;
+			}
+		}
+	}
+
+	/*
+	* Common part of the header - RFC1042 (final byte is 0x0) or
+	* bridge tunnel encapsulation (final byte is 0xF8)
+	*/
+	is_llc_snap_e = llc[0] == LLC_SNAP_LSAP && llc[1] == LLC_SNAP_LSAP &&
+		llc[2] == LLC_UI && llc[3] == 0x0 && llc[4] == 0x0;
+
+	if (likely(is_llc_snap_e &&
+				((last_byte == LLC_ENCAP_BRIDGE_TUNNEL) ||
+				 (last_byte == LLC_ENCAP_RFC1042 &&
+				  ether_type_eh != htons(ETHERTYPE_AARP) &&
+				  ether_type_eh != htons(ETHERTYPE_IPX))))) {
+		if (last_byte == LLC_ENCAP_RFC1042 && ether_type_eh == htons(ETHERTYPE_802A)) {
+			struct oui_extended_ethertype *pe = (struct oui_extended_ethertype *)&llc[8];
+			if (pe->oui[0] == (QTN_OUI & 0xff) &&
+					pe->oui[1] == ((QTN_OUI >> 8) & 0xff) &&
+					pe->oui[2] == ((QTN_OUI >> 16) & 0xff) &&
+					pe->type == ntohs(QTN_OUIE_TYPE_TRAINING)) {
+				/* Pass back pointer to start of training data */
+				if (rate_train)
+					*rate_train = (pe + 1);
+				return NULL;
+			}
+		}
+
+		llc += (LLC_SNAPFRAMELEN + vlan_hdr);
+		*newhdrp++ = ether_type_eh;
+	} else {
+		ether_type_eh = htons(llclen);
+		*newhdrp++ = ether_type_eh;
+		llc_l3_gap = LLC_SNAPFRAMELEN;
+	}
+
+	di->l3hdr = llc + llc_l3_gap;
+	di->l3_ether_type = ether_type_l3;
+	di->start = (void *) (llc - qtn_rx_decap_newhdr_size(di));
+
+	return di->start;
+}
+
+
+typedef int (*decap_handler_t)(struct qtn_rx_decap_info *, void *);
+
+#define QTN_RX_DECAP_AMSDU	(0)
+#define QTN_RX_DECAP_MPDU	(-1)
+#define QTN_RX_DECAP_TRAINING	(-2)
+#define QTN_RX_DECAP_NOT_DATA	(-3)
+#define QTN_RX_DECAP_RUNT	(-4)
+#define QTN_RX_DECAP_ABORTED	(-5)
+#define QTN_RX_DECAP_ERROR(x)	((x) <= QTN_RX_DECAP_NOT_DATA)
+
+#ifndef QTN_RX_DECAP_FNQUAL
+#ifdef __KERNEL__
+#define QTN_RX_DECAP_FNQUAL	static __sram_text
+#define	qtn_rx_decap_inv_dcache_safe(a,b)
+#else
+#define QTN_RX_DECAP_FNQUAL	static __inline__
+#define	qtn_rx_decap_inv_dcache_safe	invalidate_dcache_range_safe
+#endif
+#endif
+
+QTN_RX_DECAP_FNQUAL int qtn_rx_decap(const struct ieee80211_qosframe_addr4 *const wh_copy,
+		const void *const rxdata, const uint16_t rxlen,
+		uint16_t ptci, struct qtn_vlan_info *vlan_info, uint8_t vlan_enabled,
+		decap_handler_t handler, void *token, void **rate_train)
+{
+	const uint8_t *llc;
+	const uint8_t type = wh_copy->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+	const uint8_t subtype = wh_copy->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+	const uint8_t dir = wh_copy->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+	uint8_t qosctrl0 = 0;
+	int8_t tid;
+	bool is_amsdu = false;
+	size_t header_size;
+	int msdu;
+	struct qtn_rx_decap_info __di[2];
+	int dii	= 0;
+	uint8_t *decap_start;
+
+	/* only attempt to decap data frames */
+	if (unlikely(type != IEEE80211_FC0_TYPE_DATA ||
+				!(subtype == IEEE80211_FC0_SUBTYPE_DATA ||
+				  subtype == IEEE80211_FC0_SUBTYPE_QOS))) {
+		return QTN_RX_DECAP_NOT_DATA;
+	}
+
+	/* find qos ctrl field */
+	if (IEEE80211_QOS_HAS_SEQ(wh_copy)){
+		if (IEEE80211_IS_4ADDRESS(wh_copy)) {
+			qosctrl0 = ((struct ieee80211_qosframe_addr4 *) wh_copy)->i_qos[0];
+		} else {
+			qosctrl0 = ((struct ieee80211_qosframe *) wh_copy)->i_qos[0];
+		}
+		tid = qosctrl0 & IEEE80211_QOS_TID;
+		if (qosctrl0 & IEEE80211_QOS_A_MSDU_PRESENT) {
+			is_amsdu = true;
+		}
+	} else {
+		tid = WME_TID_NONQOS;
+	}
+
+	header_size = qtn_rx_decap_header_size(wh_copy);
+
+	if (unlikely(header_size >= rxlen)) {
+		return QTN_RX_DECAP_RUNT;
+	}
+
+	if (!is_amsdu) {
+		const uint8_t *wh_eth_src;
+		const uint8_t *wh_eth_dest;
+		struct qtn_rx_decap_info *di = &__di[dii];
+
+		switch (dir) {
+		case IEEE80211_FC1_DIR_DSTODS:
+			wh_eth_dest = wh_copy->i_addr3;
+			wh_eth_src = wh_copy->i_addr4;
+			if (IEEE80211_ADDR_EQ(wh_copy->i_addr1, wh_copy->i_addr3))
+				di->check_3addr_br = 1;
+			break;
+		case IEEE80211_FC1_DIR_TODS:
+			wh_eth_dest = wh_copy->i_addr3;
+			wh_eth_src = wh_copy->i_addr2;
+			break;
+		case IEEE80211_FC1_DIR_NODS:
+			wh_eth_dest = wh_copy->i_addr1;
+			wh_eth_src = wh_copy->i_addr2;
+			break;
+		case IEEE80211_FC1_DIR_FROMDS:
+			wh_eth_src = wh_copy->i_addr3;
+			wh_eth_dest = wh_copy->i_addr1;
+			di->check_3addr_br = 1;
+			break;
+		default:
+			return QTN_RX_DECAP_ABORTED;
+		}
+
+		IEEE80211_ADDR_COPY(di->eh.ether_dhost, wh_eth_dest);
+		IEEE80211_ADDR_COPY(di->eh.ether_shost, wh_eth_src);
+		llc = ((uint8_t *) rxdata) + header_size;
+		decap_start = qtn_rx_decap_set_eth_hdr(di, llc, rxlen - header_size,
+							ptci, vlan_info, vlan_enabled, token, rate_train);
+		if (unlikely(!decap_start)) {
+			return QTN_RX_DECAP_TRAINING;
+		}
+
+		di->len = (((uint8_t *) rxdata) + rxlen) - decap_start;
+		di->tid = tid;
+		di->first_msdu = 1;
+		di->last_msdu = 1;
+		di->decapped = 1;
+
+		if (handler(di, token)) {
+			return QTN_RX_DECAP_ABORTED;
+		}
+
+		return QTN_RX_DECAP_MPDU;
+	} else {
+		/* amsdu */
+		struct ether_header *msdu_header;
+		struct ether_header *next_msdu_header;
+		struct qtn_rx_decap_info *prev_di = NULL;
+		uint16_t msdu_len;
+		uint16_t subframe_len;
+		uint16_t subframe_padding;
+		uint16_t total_decapped_len = header_size;
+
+		MUC_UPDATE_STATS(uc_rx_stats.rx_amsdu, 1);
+		next_msdu_header = (struct ether_header *)(((uint8_t *)rxdata) + header_size);
+		for (msdu = 0; total_decapped_len < rxlen; msdu++) {
+			struct qtn_rx_decap_info *di = &__di[dii];
+
+			msdu_header = next_msdu_header;
+			llc = (uint8_t *)(msdu_header + 1);
+			qtn_rx_decap_inv_dcache_safe(msdu_header, QTN_RX_MSDU_DCACHE_INV_LEN);
+			msdu_len = ntohs(msdu_header->ether_type);
+			subframe_len = sizeof(*msdu_header) + msdu_len;
+			if (subframe_len < sizeof(*msdu_header) ||
+					subframe_len > (rxlen - total_decapped_len) ||
+					subframe_len > (ETHER_JUMBO_MAX_LEN + LLC_SNAPFRAMELEN)) {
+				break;
+			}
+			subframe_padding = ((subframe_len + 0x3) & ~0x3) - subframe_len;
+			next_msdu_header = (struct ether_header *)(llc + msdu_len + subframe_padding);
+			/* decapped length includes subframe padding */
+			total_decapped_len = ((uint8_t *)next_msdu_header) - ((uint8_t *)rxdata);
+
+			decap_start = qtn_rx_decap_set_eth_hdr(di, llc, msdu_len, ptci, vlan_info, vlan_enabled,
+								token, rate_train);
+			if (unlikely(!decap_start)) {
+				return QTN_RX_DECAP_TRAINING;
+			}
+
+			if (prev_di) {
+				if (handler(prev_di, token)) {
+					return QTN_RX_DECAP_ABORTED;
+				}
+			}
+
+			IEEE80211_ADDR_COPY(di->eh.ether_dhost, msdu_header->ether_dhost);
+			IEEE80211_ADDR_COPY(di->eh.ether_shost, msdu_header->ether_shost);
+			di->len = ((uint8_t *)next_msdu_header - decap_start) - subframe_padding;
+			di->tid = tid;
+			di->first_msdu = (prev_di == NULL);
+			di->last_msdu = 0;
+			di->decapped = 1;
+			di->check_3addr_br = 0;
+			prev_di = di;
+			dii = !dii;
+		}
+		if (prev_di) {
+			prev_di->last_msdu = 1;
+			if (handler(prev_di, token)) {
+				return QTN_RX_DECAP_ABORTED;
+			}
+		} else {
+			return QTN_RX_DECAP_ABORTED;
+		}
+
+		return QTN_RX_DECAP_AMSDU;
+	}
+}
+
+#endif	// __QTN_DECAP_H__
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_fw_info.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_fw_info.h
new file mode 100644
index 0000000..722abe7
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_fw_info.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2016 Quantenna Communications, Inc.
+ */
+
+/*
+ * This file defines a firmware information section, which contains
+ * firmware version for sanity checking.
+ */
+#ifndef _QTN_FW_INFO_H_
+#define _QTN_FW_INFO_H_
+
+#define FW_INFO_SECTION		".fwinfo"
+/*
+ * defining memory address for this section is to make sure it is easy
+ * to be found and won't be stripped, we won't actually load the section.
+ */
+#define FW_INFO_MEM_ADDR	0xFFFF0000
+#define FW_INFO_MEM_SIZE	0xFFFF
+#define FW_INFO_MEMORY		FWINFO_MEM: ORIGIN=FW_INFO_MEM_ADDR LENGTH=FW_INFO_MEM_SIZE
+#define FW_INFO_ADD_SECTION	GROUP : { FW_INFO_SECTION ALIGN(4): { *(FW_INFO_SECTION) } } > FWINFO_MEM
+
+#ifndef __ASSEMBLY__
+#include <qtn/qdrv_bld.h>
+
+#define FW_INFO_IDENT		"FWINFO"
+#define FW_INFO_IDENT_SIZE	8
+#define FW_INFO_REV		1
+
+struct qtn_fw_info {
+	char fwinfo_ident[FW_INFO_IDENT_SIZE];
+	uint32_t fwinfo_rev;
+	uint32_t fwinfo_fw_version;
+};
+
+#define FW_INFO_DATA_SIZE (sizeof(struct qtn_fw_info))
+
+#define FW_INFO_SEGMENT_FOUND(_vaddr, _filesz, _pdata) \
+	(((_vaddr) == FW_INFO_MEM_ADDR) && \
+	 ((_filesz) >= FW_INFO_DATA_SIZE) && \
+	 !strncmp(((struct qtn_fw_info *)(_pdata))->fwinfo_ident, FW_INFO_IDENT, strlen(FW_INFO_IDENT)))
+
+#define FW_INFO_CHECK_DATA(_fwinfo, _match, _print)	do {			\
+	if ((_fwinfo)->fwinfo_rev != FW_INFO_REV)				\
+		_print("Mismatched fw info revision 0x%x - expected 0x%x\n",	\
+			(_fwinfo)->fwinfo_rev, FW_INFO_REV);			\
+	else if ((_fwinfo)->fwinfo_fw_version != QDRV_BLD_VER)			\
+		_print("Firmware version 0x%x does not match lhost version 0x%x\n",	\
+				(_fwinfo)->fwinfo_fw_version, QDRV_BLD_VER);	\
+	else			\
+		_match = 1;	\
+} while(0)
+
+#define FW_INFO_ADD_DATA	\
+	struct qtn_fw_info _fwinfo = {	\
+		FW_INFO_IDENT,		\
+		FW_INFO_REV,		\
+		QDRV_BLD_VER		\
+	}
+#endif /* __ASSEMBLY__ */
+#endif /* _QTN_FW_INFO_H_ */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_global.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_global.h
new file mode 100644
index 0000000..272d7e0
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_global.h
@@ -0,0 +1,468 @@
+/*
+ * Copyright (c) 2011-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _QTN_GLOBAL_H
+#define _QTN_GLOBAL_H
+
+#define QTN_DEFAULT_LEGACY_RETRY_COUNT 4
+
+#define QTN_GLOBAL_INIT_SELECT_GI_ENABLE	2
+#define QTN_GLOBAL_INIT_SELECT_PPPC_ENABLE	1
+
+#define QTN_GLOBAL_PSEL_MATRIX_ENABLE		1
+#define QTN_GLOBAL_INIT_DEF_MATRIX		1
+
+#define QTN_AC_BE_INHERIT_VO_NO_STA		4
+#define QTN_AC_BE_INHERIT_VI_NO_STA		3
+#define QTN_AC_BE_INHERIT_VO			2
+#define QTN_AC_BE_INHERIT_VI			1
+#define QTN_AC_BE_INHERIT_DISABLE		0
+#define QTN_AC_BE_INHERIT_Q2Q_ENABLE		1
+#define QTN_AC_BE_INHERIT_Q2Q_DISABLE		0
+#define QTN_TXBF_IOT_DISABLE                    0
+#define QTN_TXBF_IOT_ENABLE                     1
+#define QTN_GLOBAL_MU_ENABLE			1
+#define QTN_GLOBAL_MU_DISABLE			0
+#define QTN_GLOBAL_MU_INITIAL_STATE		QTN_GLOBAL_MU_DISABLE
+
+#define QTN_AUTO_CS_ENABLE			1
+#define QTN_AUTO_CS_DISABLE			0
+
+/*
+ * The MuC TX Queuing algorithm is selected by setting
+ * g_tx_queuing_alg. Values are:
+ * 0 = Round robin
+ * 1 = Equal airtime
+ * 2 = Greedy best
+ * 3 = Round robin (filling to make a power of 4)
+ * x >= 4: algorithm chosen by (val % 4), with airtime
+ * debugs enabled, printed every (val) seconds.
+ */
+#define QTN_TX_QUEUING_ALG_ROUND_ROBIN		0
+#define QTN_TX_QUEUING_ALG_EQUAL_AIRTIME	1
+#define QTN_TX_QUEUING_ALG_GREEDY_BEST		2
+#define QTN_TX_QUEUING_ALGS			4
+#if QTN_TX_QUEUING_ALGS & (QTN_TX_QUEUING_ALGS - 1)
+	#error QTN_TX_QUEUING_ALGS should be a power of 2
+#endif
+#define QTN_GLOBAL_INIT_TX_QUEUING_ALG		QTN_TX_QUEUING_ALG_ROUND_ROBIN
+
+#define QTN_TX_AIRTIME_XMIT_BUMP_USECS		100
+
+#define QTN_TX_BUF_RETURN_MIN			100
+/* must be greater than the above to prevent stalling */
+#define QDRV_TX_LOW_RATE_TOKENS_MAX		QTN_TX_BUF_RETURN_MIN + 28
+
+#define QTN_GLOBAL_RATE_NSS_MAX		4
+#define QTN_2X2_GLOBAL_RATE_NSS_MAX	2
+#define QTN_3X3_GLOBAL_RATE_NSS_MAX	3
+#define QTN_RX_REORDER_BUF_TIMEOUT_US		200000
+#define QTN_RX_REORDER_BUF_TIMEOUT_US_VI	800000
+#define QTN_PROBE_RES_MAX_RETRY_COUNT	4
+#define QTN_TX_SWRETRY_AGG_MAX		8	/* high value for zero PER */
+#define QTN_TX_SWRETRY_NOAGG_MAX	1	/* add tx restrict check if this is increased */
+#define QTN_TX_SWRETRY_SUSPEND_XMIT	4	/* sw retry when the sending frames are suspended */
+#define QTN_TX_MSDU_EXPIRY		0	/* allow MSDUs to time out? */
+#define QTN_TX_AGGREGATION		1	/* allow aggregation? */
+#define QTN_CALSTATE_CALIB		1
+#define QTN_CALSTATE_PROD		3
+#define QTN_CALSTATE_DEFAULT		QTN_CALSTATE_PROD
+#define QTN_CALSTATE_IS_PROD()		(likely(g_qtn_params.g_calstate == QTN_CALSTATE_PROD))
+#define QTN_CALSTATE_VPD_LOG		0
+#define QTN_CALSTATE_VPD_LINEAR		1
+#define QTN_CALSTATE_MIN_TX_POWER	7
+#define QTN_CALSTATE_MAX_TX_POWER	23
+#define QTN_EMI_POWER_SWITCH_ENABLE	1
+#define QTN_OPTI_DFLT_ID		0
+
+#define QTN_TX_AMSDU_DISABLED		0
+#define QTN_TX_AMSDU_ADAPTIVE		1
+#define QTN_TX_AMSDU_FIXED		0xff
+
+#define QTN_SEL_PPPC_STEP_DEF		1
+#define QTN_SEL_PPPC_MAX_STEPS		4
+
+#define QTN_INST_1SS_DEF_MAT_THRESH_DEFAULT	2	/* dbm */
+
+#define QTN_FLAG_ACT_FRAME_RTS_CTS		0x00000001
+#define QTN_FLAG_ACT_FRAME_NO_LDPC		0x00000002
+#define QTN_FLAG_MCS_UEQM_DISABLE		0x00000004
+#define QTN_FLAG_AUC_TX				0x00000008
+#define QTN_FLAG_RA_BW_SWITCHING_ENABLE_11N	0x00000010
+#define QTN_FLAG_RA_BW_SWITCHING_ENABLE_11AC	0x00000020
+#define QTN_GLOBAL_MUC_FLAGS_DEFAULT		QTN_FLAG_RA_BW_SWITCHING_ENABLE_11N | \
+						QTN_FLAG_RA_BW_SWITCHING_ENABLE_11AC
+#define QTN_NDPA_IN_HT_VHT_FORMAT	0
+#define QTN_NDPA_IN_LEGACY_FORMAT	1
+
+#define QTN_DBG_MODE_SEND_PWR_MGT		0x00000001
+#define QTN_DBG_MODE_ACCEPT_PWR_MGT		0x00000002
+#define QTN_DBG_MODE_TX_PKT_LOSS		0x00000004
+#define QTN_DBG_MODE_DELBA_ON_TX_PKT_LOSS	0x00000008
+#define QTN_DBG_MODE_CCA_FORCE			0x00000010
+#define QTN_DBG_MODE_INJECT_INV_NDP		0x00000020
+
+#define QTN_DBG_FD_CHECK_PERIODIC	0x00000001
+#define QTN_DBG_FD_DUMP_OLD		0x00000002
+#define QTN_DBG_FD_CHECK_ONESHOT	0x00000004
+#define QTN_DBG_FD_DUMP_BCN_FAIL	0x00000008
+#define QTN_DBG_FD_DUMP_VERBOSE	0x00000010 /* + top byte is the FD to dump */
+#define QTN_DBG_DUMP_SC		0x00000020
+#define QTN_DBG_DUMP_AGEQ		0x00000040
+#define QTN_DBG_FD_FLAG_MASK		0x0000FFFF
+
+#define QTN_HW_UPDATE_NDPA_DUR  0x0
+#define	QTN_SU_TXBF_TX_CNT_DEF_THRSHLD 2
+#define QTN_MU_TXBF_TX_CNT_DEF_THRSHLD 2
+
+#define QTN_RX_BAR_SYNC_DISABLE	0
+#define QTN_RX_BAR_SYNC_QTN	1
+#define QTN_RX_BAR_SYNC_ALL	2
+
+#if (defined(MUC_BUILD) || defined(SYSTEM_BUILD))
+
+#define QTN_RX_GAIN_MIN_THRESHOLD		16
+#define QTN_RX_GAIN_MAX_THRESHOLD		44
+#define QTN_RX_GAIN_TIMER_INTV			1000 /* msecs */
+/* counter for delay in RFIC6 500Mhz */
+#define QTN_RFIC6_DELAY_MICRO_S			500
+#define QTN_RFIC6_DELAY_MILI_S			QTN_RFIC6_DELAY_MICRO_S * 1000
+
+#define SIZE_D1(x)	(sizeof(x)/sizeof(x[0]))
+#define SIZE_D2(x)	(sizeof(x[0])/sizeof(x[0][0]))
+
+struct qtn_gain_settings {
+	uint8_t	gain_flags;
+/* Enable SW workaround for short range association */
+#define QTN_AUTO_PWR_ADJUST_EN		0x1
+/* Hardware supports automatic RX gain */
+#define QTN_SHORTRANGE_SCANCNT_HW	0x2
+	uint32_t	gain_cumulative; /* Cumulative gain for all rx pkts */
+	uint32_t	gain_num_pkts;	 /* Number of pkts for which cumulative gain was considered */
+	uint32_t	gain_timer;
+	uint32_t	gain_min_thresh;
+	uint32_t	gain_max_thresh;
+	uint32_t	gain_timer_intv;
+	uint32_t	gain_low_txpow;
+	int		ext_lna_gain;
+	int		ext_lna_bypass_gain;
+};
+
+#define QTN_SCS_MAX_OC_STATS	32
+/* off channel params */
+struct qtn_scs_oc_stats {
+	uint32_t	oc_pri_chan;
+	uint32_t	oc_bw_sel;
+	uint32_t	oc_crc_cnt;
+	uint32_t	oc_lp_cnt;
+	uint32_t	oc_sp_cnt;
+	uint32_t	oc_cca_pri;
+	uint32_t	oc_cca_sec;
+	uint32_t	oc_cca_sec40;
+	uint32_t	oc_cca_busy;
+	uint32_t	oc_cca_smpl;
+	uint32_t	oc_cca_try;
+	uint32_t	oc_bcn_recvd;
+};
+
+struct qtn_cca_counts {
+	uint32_t	cca_pri_cnt;
+	uint32_t	cca_sec_cnt;
+	uint32_t	cca_sec40_cnt;
+	uint32_t	cca_busy_cnt;
+	uint32_t	cca_sample_cnt;
+	uint32_t	cca_try_cnt;
+	uint32_t	cca_csw_cnt;
+	uint32_t	cca_off_pri_cnt;
+	uint32_t	cca_off_sec_cnt;
+	uint32_t	cca_off_sec40_cnt;
+	uint32_t	cca_off_busy_cnt;
+	uint32_t	cca_off_sample_cnt;
+	uint32_t	cca_off_res_cnt;
+	uint32_t	cca_off_try_cnt;
+	uint32_t	cca_meas_cnt;
+};
+
+struct qtn_scs_params {
+	uint32_t	cca_pri_cnt;
+	uint32_t	cca_sec_cnt;
+	uint32_t	cca_sec40_cnt;
+	uint32_t	cca_busy_cnt;
+	uint32_t	cca_sample_cnt;
+	uint32_t	cca_try_cnt;
+	uint32_t	cca_csw_cnt;
+	uint32_t	cca_off_res_cnt;
+	uint32_t	cca_off_try_cnt;
+	uint32_t	cca_meas_cnt;
+	uint32_t	tx_usecs;
+	uint32_t	rx_usecs;
+	uint32_t	bcn_recvd;
+	uint32_t	oc_stats_index;
+	struct qtn_scs_oc_stats oc_stats[QTN_SCS_MAX_OC_STATS];
+};
+
+struct qtn_vsp_params {
+	uint32_t	vsp_flags;
+#define QTN_VSP_VSP_EN		0x01
+#define QTN_VSP_FAT_DEBUG	0x02
+#define QTN_VSP_NDCST_DEBUG	0x04
+#define QTN_VSP_INTF_DEBUG	0x08
+#define QTN_VSP_RTS_CTS_IN_USE	0x10
+	uint32_t	timer_intv;
+	uint32_t	check_secs;
+	uint32_t	check_scale;
+	uint32_t	fat_last;
+	uint32_t	intf_ms;
+	uint32_t	cca_pri_cnt;
+	uint32_t	cca_sec_cnt;
+	uint32_t	cca_sec40_cnt;
+	uint32_t	cca_busy_cnt;
+	uint32_t	cca_sample_cnt;
+	uint32_t	cca_try_cnt;
+	uint32_t	cca_csw_cnt;
+	uint32_t	cca_off_res_cnt;
+	uint32_t	cca_off_try_cnt;
+	uint32_t	cca_meas_cnt;
+	uint32_t	tot_tx_ms;
+};
+
+
+#define QTN_AUTO_CCA_SHORT_PREAMBLE_THRESHOLD	15000	/* If higher than this value, increase the CCA threshold */
+#define QTN_AUTO_CCA_INTF_THRESHOLD		250	/* If higher than this value, increase the CCA threshold */
+
+#define QTN_AUTO_CCA_THRESHOLD_MAX		0x10000	/* The max cca threshold we can set */
+
+
+struct qtn_auto_cca_params {
+#define QTN_AUTO_CCA_FLAGS_DISABLE			0x0
+#define QTN_AUTO_CCA_FLAGS_ENABLE			0x1
+#define QTN_AUTO_CCA_FLAGS_DEBUG			0x2
+#define QTN_AUTO_CCA_FLAGS_SAMPLE_ONLY			0x4
+	uint32_t	flags;
+
+	uint32_t	spre_threshold;
+	uint32_t	cca_intf_threshold;
+	uint32_t	cca_threshold_max;
+};
+
+struct qtn_wowlan_params {
+	uint16_t	host_state;
+	uint16_t	wowlan_match;
+	uint16_t	l2_ether_type;
+	uint16_t	l3_udp_port;
+};
+
+#define QTN_AUTO_CCA_PARARMS_DEFAULT		\
+	{ QTN_AUTO_CCA_FLAGS_ENABLE, QTN_AUTO_CCA_SHORT_PREAMBLE_THRESHOLD, \
+		QTN_AUTO_CCA_INTF_THRESHOLD, QTN_AUTO_CCA_THRESHOLD_MAX}
+
+struct qtn_global_param {
+	uint32_t	g_legacy_retry_count;
+	uint32_t	g_dbg_check_flags;
+	uint32_t	g_dbg_stop_flags;
+	uint32_t	g_dbg_mode_flags;
+	uint8_t		g_select_gi_enable;
+	uint8_t		g_select_pppc_enable;
+	uint8_t		g_rate_ht_nss_max;
+	uint8_t		g_rate_vht_nss_max;
+	uint32_t	g_rx_agg_timeout;
+	uint32_t	g_muc_flags;
+	struct qtn_scs_params scs_params;
+	struct qtn_vsp_params vsp_params;
+	uint8_t		g_slow_eth_war;
+	uint8_t		g_tx_swretry_agg_max;
+	uint8_t		g_tx_swretry_noagg_max;
+	uint8_t		g_tx_swretry_suspend_xmit;
+	uint8_t		g_tx_msdu_expiry;
+	uint8_t		g_tx_aggregation;
+	uint32_t	g_iot_tweaks;
+	uint8_t		g_calstate;
+	uint8_t		g_psel_mat_enable;
+	uint32_t	g_ack_policy;
+	uint32_t	g_dbg_fd_flags;
+	uint32_t	g_qtn_disassoc_fd_threshold;
+	uint32_t	g_qtn_qn_fd_threshold;
+	int32_t         g_2_tx_chains_mimo_mode;
+	uint8_t		g_calstate_tx_power;
+	uint8_t		g_min_tx_power;
+	uint8_t		g_max_tx_power;
+	uint8_t		g_emi_power_switch_enable;
+	uint8_t		g_dyn_agg_timeout;
+	int32_t		g_sifs_mode;
+	uint8_t		g_tx_amsdu;
+	uint32_t	g_ralg_dbg_aid;
+	uint8_t		g_select_pppc_step_option;
+	uint8_t         g_11g_erp;
+	uint8_t		g_single_agg_queuing;
+	uint8_t		g_def_matrix;
+	uint32_t	g_tx_restrict;
+	uint32_t	g_tx_restrict_fd_limit;
+	uint32_t	g_tx_restrict_rate;	/* Max packets per second in Tx restrict mode */
+	uint32_t	g_tx_restrict_attempts;
+	uint32_t        g_rts_threshold;        /* RTS threshold */
+	uint8_t		g_tx_queuing_alg;
+	uint8_t         g_1bit_enable;          /* enable/disable 1bit */
+	uint32_t        g_carrier_id;		/* Get/Set carrier ID */
+	uint8_t		g_rx_accelerate;
+	uint8_t		g_rx_accel_lu_sa;
+	uint8_t		g_tx_ac_inheritance;	/* promote AC_BE traffic to vo/vi */
+	uint8_t         g_txbf_iot;             /* turn on/off TxBF IOT with non QTN node */
+	uint8_t		g_tx_ac_q2q_inheritance;/* promote AC_BE traffic to vo/vi */
+	uint8_t		g_tx_1ss_amsdu_supp;	/* enable-disable 1ss AMSDU support - Non-qtn clients */
+	uint32_t        g_vht_ndpa_dur;         /* manual update VHT NDPA duration, if it is 0, then HW auto update */
+	uint32_t        g_su_txbf_pkt_cnt;      /* Tx operation count threshold to a SU TxBF station */
+	uint32_t        g_mu_txbf_pkt_cnt;      /* Tx operation count threshold to a MU TxBF station */
+	struct qtn_auto_cca_params	g_auto_cca_params;
+	struct qtn_wowlan_params wowlan_params;
+	uint8_t		g_rx_optim;
+	uint8_t		g_airfair;
+	uint8_t		g_cca_fixed;
+	uint8_t		g_ndpa_legacy_format;	/* Configure HT-VHT / Legacy frame format for NDP announcements */
+	uint8_t		g_inst_1ss_def_mat_en;		/* enable default 1ss matrix feature */
+	uint8_t		g_inst_1ss_def_mat_thresh;	/* the threshold to install defalut 1ss matrix */
+	uint32_t        g_mu_enable;            /* enable/disable MU Tx */
+	uint8_t		g_l2_ext_filter;	/* L2 external filter */
+	uint8_t		g_l2_ext_filter_port;	/* L2 external filter port */
+	uint8_t		g_rate_train_dbg;
+	uint8_t		g_rx_optim_pkt_stats;
+	uint8_t		g_mrc_enable;
+	uint32_t	g_auto_cs_enable;	/* enable/disable auto cs threshold */
+	uint8_t		g_beaconing_scheme;
+	uint32_t	g_muc_sys_dbg;
+	uint32_t	g_rx_bar_sync;		/* sync rx reorder window on receiving BAR */
+	uint32_t	g_qtn_opti_mode;
+	char		*g_last_field;		/* Add all new fields before this one */
+};
+
+/* Please keep this structure in sync with qtn_global_param */
+#define G_PARAMS_INIT	{			\
+	QTN_DEFAULT_LEGACY_RETRY_COUNT,		\
+	0,					\
+	0,					\
+	0,					\
+	QTN_GLOBAL_INIT_SELECT_GI_ENABLE,	\
+	QTN_GLOBAL_INIT_SELECT_PPPC_ENABLE,	\
+	QTN_GLOBAL_RATE_NSS_MAX,		\
+	QTN_GLOBAL_RATE_NSS_MAX,		\
+	QTN_RX_REORDER_BUF_TIMEOUT_US,		\
+	QTN_GLOBAL_MUC_FLAGS_DEFAULT,		\
+	{0,},					\
+	{0,},					\
+	0,					\
+	QTN_TX_SWRETRY_AGG_MAX,			\
+	QTN_TX_SWRETRY_NOAGG_MAX,		\
+	QTN_TX_SWRETRY_SUSPEND_XMIT,		\
+	QTN_TX_MSDU_EXPIRY,			\
+	QTN_TX_AGGREGATION,			\
+	QTN_IOT_DEFAULT_TWEAK,			\
+	QTN_CALSTATE_DEFAULT,			\
+	QTN_GLOBAL_PSEL_MATRIX_ENABLE,		\
+	1,					\
+	0,					\
+	50,					\
+	64,					\
+	1,					\
+	QTN_CALSTATE_VPD_LOG,			\
+	QTN_CALSTATE_MIN_TX_POWER,		\
+	QTN_CALSTATE_MAX_TX_POWER,		\
+	QTN_EMI_POWER_SWITCH_ENABLE,		\
+	0,					\
+	2,					\
+	QTN_TX_AMSDU_ADAPTIVE,			\
+	0,					\
+	QTN_SEL_PPPC_STEP_DEF,			\
+	0,					\
+	0,					\
+	QTN_GLOBAL_INIT_DEF_MATRIX,		\
+	1,					\
+	IEEE80211_NODE_TX_RESTRICT_LIMIT,	\
+	IEEE80211_TX_RESTRICT_RATE,		\
+	IEEE80211_NODE_TX_RESTRICT_RETRY,	\
+	IEEE80211_RTS_THRESH_OFF,		\
+	QTN_GLOBAL_INIT_TX_QUEUING_ALG,		\
+	1,					\
+	0,					\
+	1,					\
+	1,					\
+	QTN_AC_BE_INHERIT_VO,			\
+	QTN_TXBF_IOT_ENABLE,			\
+	QTN_AC_BE_INHERIT_Q2Q_ENABLE,		\
+	QTN_TX_AMSDU_DISABLED,			\
+	QTN_HW_UPDATE_NDPA_DUR,			\
+	QTN_SU_TXBF_TX_CNT_DEF_THRSHLD,	        \
+	QTN_MU_TXBF_TX_CNT_DEF_THRSHLD,	        \
+	QTN_AUTO_CCA_PARARMS_DEFAULT,		\
+	{0, 0, 0x0842, 0xffff},			\
+	0,					\
+	QTN_AUC_AIRFAIR_DFT,			\
+	0,					\
+	QTN_NDPA_IN_LEGACY_FORMAT,		\
+	1,					\
+	QTN_INST_1SS_DEF_MAT_THRESH_DEFAULT,	\
+	QTN_GLOBAL_MU_INITIAL_STATE,		\
+	0,					\
+	TOPAZ_TQE_EMAC_0_PORT,			\
+	0,					\
+	0,					\
+	1,					\
+	QTN_AUTO_CS_ENABLE,			\
+	0,					\
+	0,					\
+	QTN_RX_BAR_SYNC_QTN,			\
+	QTN_OPTI_DFLT_ID,			\
+	"end"					\
+}
+
+extern struct qtn_global_param g_qtn_params;
+extern volatile __uncached__ struct qtn_gain_settings g_gain;
+extern struct qtn_cca_counts g_cca_counts;
+extern struct qtn_cca_stats g_qtn_cca_stats;
+extern uint32_t g_qtn_rxtime_usecs;
+extern uint32_t g_qtn_txtime_usecs;
+extern uint8_t g_rf_mixer_gain;
+extern uint8_t g_afe_pga_gain; 
+extern uint32_t g_rf_xmit_status;
+extern int vlan_enabled_bus;
+
+#endif	/* defined(MUC_BUILD) */
+
+/*
+ * SKBs on the power save queue are tagged with an age and timed out.  We reuse the
+ * hardware checksum field in the mbuf packet header to store this data.
+ */
+#define skb_age csum_offset
+
+#define M_AGE_SET(skb,v)	(skb->skb_age = v)
+#define M_AGE_GET(skb)		(skb->skb_age)
+#define M_AGE_SUB(skb,adj)	(skb->skb_age -= adj)
+
+#define QTN_2G_FIRST_OPERATING_CHAN 1
+#define QTN_2G_LAST_OPERATING_CHAN  14
+#define QTN_4G_FIRST_OPERATING_CHAN 183
+#define QTN_4G_LAST_OPERATING_CHAN  196
+#define QTN_5G_FIRST_OPERATING_CHAN 36
+#define QTN_5G_LAST_UNII1_OPERATING_CHAN 48
+#define QTN_5G_LAST_UNII2_OPERATING_CHAN 140
+#define QTN_5G_LAST_OPERATING_CHAN  169
+
+/* RFIC chip ID */
+#define RFIC5_EAGLE_PROJ_ID (2)
+#define RFIC6_PROJ_ID	(3)
+#define RFIC6_VER_A	(0)
+#define RFIC6_VER_B	(2)
+#define RFIC6_VER_C	(3)
+#define RFIC6_VER_D	(4)
+#define RFIC6_VER_E	(5)
+#define RFIC6_VER_MAX	(7)
+
+/* MU-MIMO WMAC index */
+enum {
+	WMAC_ID_0	= 0,
+	WMAC_ID_1	= 1,
+	WMAC_ID_MAX
+};
+
+#endif	/* _QTN_GLOBAL_MUC_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_math.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_math.h
new file mode 100644
index 0000000..14630d1
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_math.h
@@ -0,0 +1,57 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2008 - 2008 Quantenna Communications Inc            **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Author      : Quantenna                                                  **
+**  Date        : 12/04/08                                                   **
+**  File        : qdrv_math.c                                                **
+**  Description :                                                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH0*/
+#ifndef _QDRV_MATH_H
+#define _QDRV_MATH_H
+
+#ifndef ABS
+#define ABS(x) (((x) > 0) ? (x) : 0 -(x))
+#endif
+
+void convert_evm_db(u_int32_t evm_reg, int n_sym, int *evm_int, int *evm_frac);
+void average_evm_db(const uint32_t *evm_array, int n_sym, int *evm_int, int *evm_frac);
+
+#endif
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_math.inl b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_math.inl
new file mode 100644
index 0000000..2fd8ea5
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_math.inl
@@ -0,0 +1,455 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2008 - 2008 Quantenna Communications Inc            **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Author      : Quantenna                                                  **
+**  Date        : 12/04/08                                                   **
+**  File        : qdrv_math.c                                                **
+**  Description :                                                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH0*/
+
+#include <qtn/qtn_math.h>
+#include <qtn/muc_phy_stats.h>
+
+#ifndef MAX
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+#endif
+#ifndef MIN
+#define MIN(a, b) (((a) > (b)) ? (b) : (a))
+#endif
+
+#define NUM_VALID_ONE_STREAM 1
+#define NUM_VALID_TWO_STREAM 2
+#define NUM_VALID_THREE_STREAM 3
+#define NUM_VALID_FOUR_STREAM 4
+
+#define NUM_BITS_FOR_FRACTION 4
+#define NUM_BITS_EVM_MANT_ONE 2048
+#define NUM_BITS_EVM_MANT_SHIFT 12
+#define NUM_BITS_EVM_EXP_SHIFT 16
+#define NUM_BITS_GUARD_TINY_SHIFT 7
+
+static const u_int16_t lut_1024_10log10[] = {
+	#include "./log_table/1024_10log10_table.txt"
+};
+
+u_int8_t highest_one_bit_pos(u_int32_t val)
+{
+	u_int32_t shift;
+	u_int32_t pos = 0;
+
+	if (val == 0) return 0;
+
+	shift = (val & 0xFFFF0000) ? 16 : 0; val >>= shift; pos |= shift;
+	shift = (val & 0xFF00    ) ?  8 : 0; val >>= shift; pos |= shift;
+	shift = (val & 0xF0      ) ?  4 : 0; val >>= shift; pos |= shift;
+	shift = (val & 0xC       ) ?  2 : 0; val >>= shift; pos |= shift;
+	shift = (val & 0x2       ) ?  1 : 0;                pos |= shift;
+	pos++;
+	return (u_int8_t) pos;
+}
+
+u_int32_t rshift_round(u_int32_t x, int shift)
+{
+	u_int32_t z;
+		
+	if (shift == 0)
+		return x;
+
+	z = x >> (shift - 1);
+	z += (z & 1);
+	z >>= 1;
+	return z;
+}
+
+int linear_to_10log10(u_int32_t x, int8_t nbit_frac_in, int8_t nbit_frac_out)
+{
+	u_int8_t shift;
+
+	if (x <= 0)
+		return (int)0x80000001;  // 10*log10(0) = -infinity
+
+	shift = MAX(highest_one_bit_pos(x) - 8, 0);
+
+	//printk("shift = %d , x = %d, lut_1024_10log10[(x >> shift) - 1] = %d \n\n", shift, x, lut_1024_10log10[(x >> shift) - 1]);
+
+	// y = round((1024*10*log10(x/2^shift) + (shift-nbit_frac_in)*1024*10*log10(2)) / 2^(10-nbit_frac_out))
+	// 49321 = round(16*1024*10*log10(2))
+	return rshift_round((int)(lut_1024_10log10[(x >> shift) - 1] + (((shift - nbit_frac_in) * 49321) >> 4)), 10 - nbit_frac_out) ;
+}
+
+int divide_by_16_x_10000(int x)
+{
+	return (x * 625); //10000/16
+};
+
+
+u_int16_t conv_linear_mantissa(long val, short se)
+{
+	u_int16_t evm_out;
+	short     sht_count;
+	long shift_mask;
+
+	if (val == 0) return 0;
+
+	if (se < 1 || se > 32)
+		return 0; // se is in [1:32]
+
+	shift_mask = 0x000007FF; // MSB mask for 2048
+
+	if (se > NUM_BITS_EVM_MANT_SHIFT ) {
+		sht_count = se - NUM_BITS_EVM_MANT_SHIFT  ;
+		shift_mask = shift_mask << sht_count;
+		evm_out = (u_int16_t) (val & shift_mask) >> sht_count;
+	} else {
+		sht_count = NUM_BITS_EVM_MANT_SHIFT  - se ;
+		shift_mask = shift_mask >> sht_count;
+		evm_out = (u_int16_t) (val & shift_mask) << sht_count;
+	}
+
+	return  (evm_out);
+}
+
+void average_evm_db(const uint32_t *evm_array, int n_sym, int *evm_int, int *evm_frac)
+{
+	int  man[4], exp[4], evm_exp_val[4];
+	int  y, x, evm_4bit_fraction, fraction, db_sign = 0;
+	int  k, valid_evm_cnt = 0, min_exp_val =0, guard_bit_shift = 0;
+	int  evm_exp_sum=0, evm_exp_mul=0, evm_mant_sum=0, evm_mant_mul=0;
+	long evm_tmp_sum=0, evm_tmp_mul=0, linear_evm_val[4] ={0,0,0,0};
+
+	if (n_sym < 3)
+		return;
+
+	for ( k = 0; k < 4; k++) {
+		if (evm_array[k] != MUC_PHY_ERR_SUM_NOT_AVAIL) {// invalid EVM values
+
+			valid_evm_cnt++;
+			man[k] = (u_int32_t)(evm_array[k] >> 5);
+			exp[k] = (evm_array[k] - man[k] * 32);
+			man[k] += NUM_BITS_EVM_MANT_ONE;
+
+			if ( exp[k] > NUM_BITS_EVM_EXP_SHIFT )
+				linear_evm_val[k] =  (man[k]) << (exp[k] - NUM_BITS_EVM_EXP_SHIFT );
+			else
+				linear_evm_val[k] = (man[k]) >> (NUM_BITS_EVM_EXP_SHIFT - exp[k]);
+
+			exp[k] = exp[k] - NUM_BITS_EVM_EXP_SHIFT;
+
+			//printk("reg=%x, man = %d, exp = %d, n_sym=%d\n", evm_array[k], man[k], exp[k], n_sym);
+		}
+	}
+
+	if ( valid_evm_cnt == NUM_VALID_ONE_STREAM ) { // only one stream
+		y = linear_to_10log10((man[0]), 0, NUM_BITS_FOR_FRACTION)  +
+			((exp[0] - 11) * linear_to_10log10(2, 0, NUM_BITS_FOR_FRACTION)) ;
+	} else {
+
+		switch (valid_evm_cnt) {
+
+			case NUM_VALID_TWO_STREAM: //log(b+a) - log(a*b)
+				// get log(a + b)
+				evm_mant_sum = (long) (linear_evm_val[0]);
+				evm_mant_sum += (long) (linear_evm_val[1]);
+				evm_exp_sum = (int) highest_one_bit_pos(evm_mant_sum );
+				evm_mant_sum = conv_linear_mantissa(evm_mant_sum, evm_exp_sum);
+				evm_exp_sum -= NUM_BITS_EVM_MANT_SHIFT ;
+				// get log(a*b)
+				evm_tmp_mul = (long) (man[0]*man[1]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+
+				evm_exp_mul = (int) highest_one_bit_pos( evm_mant_mul );
+				evm_mant_mul = conv_linear_mantissa(evm_mant_mul, evm_exp_mul);
+				evm_exp_mul =  (1) + (evm_exp_mul - NUM_BITS_EVM_MANT_SHIFT );  // 1 time shift 12 bits
+				evm_exp_mul += (exp[0]+exp[1]);
+
+				break;
+
+			case NUM_VALID_THREE_STREAM: // log(bc+ab+ac) -log(abc)
+				// get 3 terms product: bc
+				evm_tmp_mul = (long) (man[1]*man[2]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+				evm_exp_val[0] = 1+(exp[1]+exp[2]);
+
+				min_exp_val = MIN(evm_exp_val[0], min_exp_val);
+				linear_evm_val[0] = (evm_mant_mul);
+
+				// get 3 terms product: ac
+				evm_tmp_mul = (long) (man[0]*man[2]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+				evm_exp_val[1] = 1+(exp[0]+exp[2]);
+
+				min_exp_val = MIN(evm_exp_val[1], min_exp_val);
+				linear_evm_val[1] = (evm_mant_mul);
+
+				// get 3 terms product: ab
+				evm_tmp_mul = (long) (man[0]*man[1]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+				evm_exp_val[2] = 1+(exp[0]+exp[1]);
+
+				min_exp_val = MIN(evm_exp_val[2], min_exp_val);
+				linear_evm_val[2] = (evm_mant_mul);
+
+				// check the tiny input cases
+				guard_bit_shift = min_exp_val + NUM_BITS_EVM_MANT_SHIFT;
+
+				if ( guard_bit_shift < NUM_BITS_GUARD_TINY_SHIFT )
+				       // min 7 bits for log table
+				       guard_bit_shift  = (-min_exp_val - NUM_BITS_EVM_MANT_SHIFT) + NUM_BITS_GUARD_TINY_SHIFT;
+				else   guard_bit_shift = 0;
+
+				// left-shift up to bit 30
+				guard_bit_shift = MIN(guard_bit_shift, (30-NUM_BITS_EVM_MANT_SHIFT));
+
+				for ( k = 0; k < valid_evm_cnt ; k++) {
+				  linear_evm_val[k] = (long)(linear_evm_val[k]) << guard_bit_shift;
+				  if ( evm_exp_val[k] >= 0 )
+				    linear_evm_val[k] <<=  (evm_exp_val[k]);
+				  else
+				    linear_evm_val[k] >>=  (-evm_exp_val[k]);
+				}
+
+
+				// get summatioon of t2 terms products: bc + ac +ab
+				evm_tmp_sum = (long) (linear_evm_val[0]);
+				evm_tmp_sum += (long) (linear_evm_val[1]);
+				evm_tmp_sum += (long) (linear_evm_val[2]);
+				evm_exp_sum = (int) highest_one_bit_pos( evm_tmp_sum );
+				evm_mant_sum = conv_linear_mantissa(evm_tmp_sum, evm_exp_sum);
+				evm_exp_sum -= (NUM_BITS_EVM_MANT_SHIFT  + guard_bit_shift);
+
+				// get 3 term products : a*b*c
+				evm_tmp_mul = (long) (man[0]*man[1]) ;
+				evm_mant_mul = (int) ( evm_tmp_mul) >> NUM_BITS_EVM_MANT_SHIFT ;
+				evm_tmp_mul = (long) (evm_mant_mul * man[2]);
+				evm_mant_mul = (int) ( evm_tmp_mul) >> NUM_BITS_EVM_MANT_SHIFT ;
+
+				evm_exp_mul = (int) highest_one_bit_pos( evm_mant_mul );
+				evm_mant_mul = conv_linear_mantissa(evm_mant_mul, evm_exp_mul);
+				evm_exp_mul =  (2) + (evm_exp_mul - NUM_BITS_EVM_MANT_SHIFT );  // 2 times shift 12 bits 
+				evm_exp_mul += (exp[0]+exp[1]+exp[2]);
+
+				break;
+
+			case NUM_VALID_FOUR_STREAM: // log(bcd+acd+abd+abc) -log(abcd)
+
+				// get 3 terms product: bcd
+				evm_tmp_mul = (long) (man[1]*man[2]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+				evm_tmp_mul = (long) evm_mant_mul * (man[3]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+
+				evm_exp_val[0] = 2+(exp[1]+exp[2]+exp[3]);
+				min_exp_val = MIN(evm_exp_val[0], min_exp_val);
+				linear_evm_val[0] = (long) (evm_mant_mul);
+
+
+				// get 3 terms product: acd
+				evm_tmp_mul = (long) (man[0]*man[2]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+				evm_tmp_mul = (long) evm_mant_mul * (man[3]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+
+				evm_exp_val[1] = 2+(exp[0]+exp[2]+exp[3]);
+				min_exp_val = MIN(evm_exp_val[1], min_exp_val);
+				linear_evm_val[1] = (long) (evm_mant_mul);
+
+				// get 3 terms product: abd
+				evm_tmp_mul = (long) (man[0]*man[1]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+				evm_tmp_mul = (long) evm_mant_mul * (man[3]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+
+				evm_exp_val[2] = 2+(exp[0]+exp[1]+exp[3]);
+				min_exp_val = MIN(evm_exp_val[2], min_exp_val);
+				linear_evm_val[2] = (long) (evm_mant_mul);
+
+				// get 3 terms product: abc
+				evm_tmp_mul = (long) (man[0]*man[1]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+				evm_tmp_mul = (long) evm_mant_mul * (man[2]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+
+				evm_exp_val[3] = 2+(exp[0]+exp[1]+exp[2]);
+				min_exp_val = MIN(evm_exp_val[3], min_exp_val);
+				linear_evm_val[3] = (long) (evm_mant_mul);
+
+				// check the tiny input cases
+				guard_bit_shift = min_exp_val + NUM_BITS_EVM_MANT_SHIFT;
+
+				if ( guard_bit_shift < NUM_BITS_GUARD_TINY_SHIFT )
+				       // min 7 bits for log table
+				       guard_bit_shift  = (-min_exp_val - NUM_BITS_EVM_MANT_SHIFT) + NUM_BITS_GUARD_TINY_SHIFT;
+				else   guard_bit_shift = 0;
+
+				// left-shift up to bit 30
+				guard_bit_shift = MIN(guard_bit_shift, (30-NUM_BITS_EVM_MANT_SHIFT));
+
+				for ( k = 0; k < valid_evm_cnt ; k++) {
+				  linear_evm_val[k] = (long)(linear_evm_val[k]) << guard_bit_shift;
+				  if ( evm_exp_val[k] >= 0 )
+				    linear_evm_val[k] <<=  (evm_exp_val[k]);
+				  else
+				    linear_evm_val[k] >>=  (-evm_exp_val[k]);
+				}
+
+
+				// summation of 3 term products : bcd+acd+abd+abc
+				evm_mant_sum  = (long) (linear_evm_val[0]);
+				evm_mant_sum += (long) (linear_evm_val[1]);
+				evm_mant_sum += (long) (linear_evm_val[2]);
+				evm_mant_sum += (long) (linear_evm_val[3]);
+
+				evm_exp_sum = (int) highest_one_bit_pos( evm_mant_sum );
+				evm_mant_sum = conv_linear_mantissa(evm_mant_sum, evm_exp_sum);
+				evm_exp_sum -= (NUM_BITS_EVM_MANT_SHIFT +  guard_bit_shift);
+
+				// get 4 terms products : a*b*c*d
+				evm_tmp_mul = (long) (man[0]*man[1]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+				evm_tmp_mul = (long) evm_mant_mul * (man[2]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+				evm_tmp_mul = (long) evm_mant_mul * (man[3]);
+				evm_mant_mul = (int) evm_tmp_mul >> NUM_BITS_EVM_MANT_SHIFT ;
+
+				evm_exp_mul = (int) highest_one_bit_pos( evm_mant_mul );
+				evm_mant_mul = conv_linear_mantissa(evm_mant_mul, evm_exp_mul);
+				evm_exp_mul =  (3) + (evm_exp_mul - NUM_BITS_EVM_MANT_SHIFT );  // 3 times shift 12 bits 
+				evm_exp_mul += (exp[0]+exp[1]+exp[2]+exp[3]);
+
+				break;
+		}
+
+		//printk("sum   = (%d, %d)\n", evm_mant_sum, evm_exp_sum);
+		//printk("prod  = (%d, %d)\n", (int) evm_mant_mul, evm_exp_mul);
+
+		y  = -linear_to_10log10((evm_mant_sum + NUM_BITS_EVM_MANT_ONE), 0, NUM_BITS_FOR_FRACTION);
+		y -=  (evm_exp_sum) * linear_to_10log10(2, 0, NUM_BITS_FOR_FRACTION);
+
+		y +=  linear_to_10log10((evm_mant_mul + NUM_BITS_EVM_MANT_ONE), 0, NUM_BITS_FOR_FRACTION);
+		y +=  (evm_exp_mul) * linear_to_10log10(2, 0, NUM_BITS_FOR_FRACTION);
+	}
+
+	x = linear_to_10log10(n_sym-3, 0, NUM_BITS_FOR_FRACTION);
+
+	evm_4bit_fraction = y - x;
+
+	// fix bug: negative dB shift error
+	if ( evm_4bit_fraction < 0 ) {
+		db_sign = 1;
+		evm_4bit_fraction = ABS(evm_4bit_fraction);
+	}
+
+	y = (evm_4bit_fraction >> NUM_BITS_FOR_FRACTION);
+	fraction = evm_4bit_fraction - (y << NUM_BITS_FOR_FRACTION);
+
+	if ( db_sign ==1 )
+		*evm_int = -y;
+	else
+		*evm_int = y;
+
+	*evm_frac = divide_by_16_x_10000(fraction);
+
+	//printk("int = %d, frac = %d\n", *evm_int, *evm_frac);
+}
+
+void convert_evm_db(u_int32_t evm_reg, int n_sym, int *evm_int, int *evm_frac)
+{
+	int man, exp, log_table_index, y, x, evm_4bit_fraction, fraction,db_sign = 0;
+
+	if (n_sym < 3)
+		return;
+
+	man = (u_int32_t)(evm_reg >> 5);
+	exp = (evm_reg - man * 32);
+
+	//printk("man = %d, exp = %d\n", man, exp);
+
+
+	log_table_index = (2048 + man) ;
+
+	y = linear_to_10log10(log_table_index, 0, NUM_BITS_FOR_FRACTION);
+	y += ((exp - 16) * linear_to_10log10(2, 0, NUM_BITS_FOR_FRACTION));
+	y -= (11 * linear_to_10log10(2, 0, NUM_BITS_FOR_FRACTION));
+	x = linear_to_10log10(n_sym-3, 0, NUM_BITS_FOR_FRACTION);
+
+	//printk("y = %d, x = %d\n", y, x);
+
+	evm_4bit_fraction = y - x;
+
+	// fix bug: negative dB shift error
+	if ( evm_4bit_fraction < 0 ) {
+		db_sign = 1;
+		evm_4bit_fraction = ABS(evm_4bit_fraction);
+	}
+
+	y = (evm_4bit_fraction >> NUM_BITS_FOR_FRACTION);
+	fraction = evm_4bit_fraction - (y << NUM_BITS_FOR_FRACTION);
+
+	if ( db_sign==1 )
+		*evm_int = -y;
+	else
+		*evm_int =  y;
+
+	*evm_frac = divide_by_16_x_10000(fraction);
+
+	//printk("int = %d, frac = %d\n", *evm_int, *evm_frac);
+}
+
+#ifdef FLOAT_SUPPORT
+inline double pow_int(int x, int y)
+{
+	unsigned int n;
+	double z;
+
+	if (y >= 0)
+		n = (unsigned int)y;
+	else
+		n = (unsigned int)(-y);
+
+	for (z = 1; ; x *= x) {
+		if ((n & 1) != 0)
+			z *= x;
+		if ((n >>= 1) == 0)
+			return (y < 0 ? 1 / z : z);
+	}
+};
+#endif //#ifdef FLOAT_SUPPORT
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_muc_stats_print.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_muc_stats_print.h
new file mode 100644
index 0000000..3d6df91
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_muc_stats_print.h
@@ -0,0 +1,381 @@
+/* autogenerated */
+
+#define MUC_TX_STATS_NAMES_TABLE { \
+	"bcn_enq_failed", \
+	"tx_status_set", \
+	"host_intr", \
+	"tx_reserved", \
+	"tx_reserve_fail", \
+	"txalert_mu_ndp_update", \
+	"txalert_mu_rpt_poll", \
+	"txalert_mu_queue_full", \
+	"txalert_mu_queue_fail", \
+	"sample_rate_mu", \
+	"sample_bw_mu", \
+	"txdone_intr", \
+	"txalert_intr", \
+	"txalert_tasklet", \
+	"txalert_bcn_update", \
+	"txalert_ndp_update", \
+	"tx_ndp_q_occupied", \
+	"tx_ndp_start", \
+	"tx_pwr", \
+	"bcn_scheme_power_save", \
+	"bcn_scheme", \
+	"fd_acquire", \
+	"fd_release", \
+	"fd_acq_fail", \
+	"fd_acq_fail_frms", \
+	"fd_acq_hal_fail", \
+	"fd_acq_hal_fail_frms", \
+	"ba_send", \
+	"fd_free_nodeclean", \
+	"tx_restrict_probe", \
+	"tx_restrict_mode", \
+	"tx_restrict_delay", \
+	"tx_sample_pkts", \
+	"tx_sample_bytes", \
+	"tx_underflow", \
+	"tx_hal_enqueued", \
+	"txbf_mode", \
+	"psel_matrix", \
+	"sample_rate", \
+	"sample_bw", \
+	"ra_flags", \
+	"fd_balance", \
+	"invalid_delay", \
+	"halt_tx", \
+	"resume_tx", \
+	"rfctrl_on", \
+	"rfctrl_off", \
+	"go_offchan", \
+	"go_datachan", \
+	"defer_cc", \
+	"deferred_cc_done", \
+	"off_chan_sample", \
+	"off_chan_scan", \
+	"off_chan_cac", \
+	"cca_pri", \
+	"cca_sec", \
+	"cca_sec40", \
+	"cca_busy", \
+	"cca_fat", \
+	"cca_intf", \
+	"cca_trfc", \
+	"mu_prec_snd_tx", \
+	"mu_prec_snd_wait_done", \
+	"mu_grp_sel_snd_tx", \
+	"mu_grp_sel_snd_wait_done", \
+	"oc_auctx_timeout", \
+	"oc_auctx_overwrite", \
+	"oc_auctx_fail", \
+	"gi_cnt", \
+	"gi_ncidx", \
+	"gi_val", \
+	"select_state_ncidx", \
+	"select_state_val", \
+	"pppc_scale_cnt", \
+	"pppc_scale_ncidx", \
+	"pppc_scale_val", \
+	"pppc_scale_last_gput", \
+	"pppc_scale_last_gput_idx", \
+	"pppc_scale_base_cnt", \
+	"pppc_scale_base_20m", \
+	"pppc_scale_base_40m", \
+	"pppc_scale_base_80m", \
+	"pppc_scale_base_copy", \
+	"pppc_scale_overstep", \
+	"pppc_scale_rollback", \
+	"pppc_0_gput", \
+	"tx_max_power", \
+	"nc_csr_read_count", \
+	"nc_csr_write_count", \
+	"nc_csr_done_watermark", \
+	"nc_csr_watermark_count", \
+	"auc_dtim_notify", \
+	"auc_ps_notify", \
+	"tx_beacon_done", \
+	"sfs_peer_rts", \
+	"sfs_peer_rts_flags", \
+	"sfs_local_rts", \
+	"sfs_local_rts_flags", \
+	"sfs_dyn_wmm", \
+	"sfs_dyn_wmm_flags", \
+	"auc_wmm_ps_notify", \
+	"tx_wmm_ps_null_frames", \
+	"qtn_bcn_stop", \
+	"mu_grp_snd_queue_is_not_empty", \
+	"mu_prec_snd_queue_is_not_empty", \
+	"mu_group_delete", \
+	"mu_group_install", \
+	"mu_group_rate_node_updates", \
+	"mu_update_rates_mu", \
+	"mu_update_rates_su", \
+	"autocs_sample_bits", \
+	"autocs_adjust_bits", \
+	"autocs_step_size", \
+	"autocs_cs_thresh", \
+	"autocs_min_rssi", \
+	"bmps_null_tx_success", \
+	"bmps_null_tx_fail", \
+	"bmps_null_tx_timeout", \
+	"txqueue_g1q0_deadline_frozen", \
+	"auc_ipc_retry", \
+	"auc_ipc_hwm", \
+	"auc_ipc_send_delay", \
+	"auc_ipc_send_delay_hwm", \
+}
+
+#define MUC_RX_STATS_NAMES_TABLE { \
+	"rxdesc_pop_from_host", \
+	"rxdesc_get_from_queue", \
+	"rxdesc_push_to_host", \
+	"rxdesc_non_aggr_push_to_host", \
+	"rxdesc_flush_to_host", \
+	"rxdesc_reuse_push", \
+	"rxdesc_reuse_pop", \
+	"rxdesc_status_bad_dur", \
+	"rxdesc_status_bad_len", \
+	"rxdesc_status_crc_err", \
+	"rxdesc_status_cmic_err", \
+	"rxdesc_status_cmic_no_crc_err", \
+	"rxdesc_status_retry", \
+	"agg_stored", \
+	"agg_duplicate", \
+	"accel_mpdu", \
+	"accel_msdu", \
+	"accel_fwt_lu_timeout", \
+	"accel_mcast_send", \
+	"accel_mcast_drop", \
+	"accel_no_match", \
+	"accel_drop", \
+	"accel_err", \
+	"rate_train_chk", \
+	"rate_train_err", \
+	"rate_train_delay", \
+	"rate_train_none", \
+	"rate_train_hash_bad", \
+	"rate_train_hash_good", \
+	"agg_oldpkts", \
+	"agg_very_oldpkts", \
+	"agg_evict_in_order", \
+	"agg_evict_in_move", \
+	"agg_evict_empty", \
+	"agg_timeout", \
+	"agg_rxwin_reset", \
+	"rx_qnum_err", \
+	"rx_mgmt", \
+	"rx_ctrl", \
+	"rx_pspoll", \
+	"rx_pwr_mgmt", \
+	"rx_delba", \
+	"rx_pwr_mgmt_set", \
+	"rx_pwr_mgmt_reset", \
+	"rx_desc_underflow", \
+	"rx_desc_linkerr", \
+	"rx_notify", \
+	"rx_df_numelems", \
+	"last_recv_seq", \
+	"rx_node_not_found", \
+	"rx_non_qos_duplicate", \
+	"rx_11n_ndp", \
+	"rx_11ac_ndp", \
+	"rx_ndp_inv_slot", \
+	"rx_11n_ndp_no_capt", \
+	"rx_ndp_sw_processed", \
+	"rx_ndp_lockup", \
+	"rx_11n_bf_act", \
+	"rx_11ac_bf_act", \
+	"rx_bf_act_inv_slot", \
+	"rx_amsdu", \
+	"rx_data", \
+	"prev_rx_data", \
+	"rx_recv_qnull", \
+	"rx_recv_act", \
+	"rx_recv_bcn", \
+	"rx_recv_auth", \
+	"rx_recv_assoc_req", \
+	"rx_recv_assoc_res", \
+	"rx_recv_deauth", \
+	"rx_recv_disassoc", \
+	"rx_mcs_gt_76", \
+	"tkip_keys", \
+	"rx_tkip_mic_err", \
+	"icv_errs", \
+	"tmic_errs", \
+	"cmic_errs", \
+	"crc_errs", \
+	"ba_tx", \
+	"ba_rx", \
+	"ba_rx_fail", \
+	"sec_oflow", \
+	"str_oflow", \
+	"oflow_fixup_timeout", \
+	"rxdone_intr", \
+	"rxtypedone_intr", \
+	"ipc_a2m_intr", \
+	"tqe_intr", \
+	"tqe_in_port_lhost", \
+	"tqe_in_port_bad", \
+	"tqe_a2m_type_txfb", \
+	"tqe_a2m_type_rxpkt", \
+	"tqe_a2m_type_unknown", \
+	"tqe_reschedule_task", \
+	"tqe_desc_unowned", \
+	"bb_intr", \
+	"bb_irq_dleaf_oflow", \
+	"bb_irq_leaf_uflow", \
+	"bb_irq_leaf_ldpc_uflow", \
+	"bb_irq_tx_td_oflow_intr", \
+	"bb_irq_tx_td_uflow_intr", \
+	"bb_irq_rx_sm_wdg_intr", \
+	"bb_irq_rx_long_dur", \
+	"bb_irq_rx_long_dur_11ac", \
+	"bb_irq_rx_long_dur_11n", \
+	"bb_irq_rx_long_dur_11n_qtn", \
+	"bb_irq_rx_sym_exceed_rst", \
+	"bb_irq_tx_sm_wdg_intr", \
+	"bb_irq_main_sm_wdg_intr", \
+	"bb_irq_hready_wdg_intr", \
+	"mac_irq_rx_sec_buff_oflow", \
+	"mac_irq_rx_strq_oflow", \
+	"mac_irq_rx_bb_uflow_intr", \
+	"mac_irq_rx_bb_oflow_intr", \
+	"bb_irq_hready_wdg_reset", \
+	"sreset_wdg_begin", \
+	"sreset_wdg_end", \
+	"sreset_wdg_in_place", \
+	"sreset_wdg_tx_beacon_hang", \
+	"sreset_wdg_tx_hang", \
+	"sreset_wdg_pm_corrupt", \
+	"sreset_wdg_tcm_corrupt", \
+	"sreset_wdg_rx_done", \
+	"sreset_wdg_in_place_try", \
+	"sreset_wdg_tasklet_sched_1", \
+	"sreset_wdg_tasklet_sched_2", \
+	"sreset_tasklet_sched", \
+	"sreset_tasklet_begin", \
+	"sreset_tasklet_end", \
+	"hreset_req", \
+	"sreset_begin", \
+	"sreset_end", \
+	"sreset_dma_rx_inprog", \
+	"sreset_dma_tx_inprog", \
+	"sreset_dma_rx_max_wait", \
+	"sreset_dma_tx_max_wait", \
+	"sreset_dma_tx_hang", \
+	"sreset_dma_rx_hang", \
+	"sreset_dma_rx_wait_timeout", \
+	"sreset_dma_tx_wait_timeout", \
+	"sreset_drop_not_valid", \
+	"sreset_drop_bad_addr", \
+	"rf_cmpvtune_out", \
+	"rf_cal_freq", \
+	"ac_max", \
+	"ac_min", \
+	"ac_cur", \
+	"ac_adj", \
+	"rx_gain", \
+	"rd_cache_indx", \
+	"logger_sreset_wmac1_dma_rx_inprog", \
+	"logger_sreset_wmac1_dma_tx_inprog", \
+	"logger_sreset_wmac1_dma_rx_max_wait", \
+	"logger_sreset_wmac1_dma_tx_max_wait", \
+	"logger_sreset_wmac1_dma_tx_hang", \
+	"logger_sreset_wmac1_dma_rx_hang", \
+	"logger_sreset_wmac1_dma_rx_wait_timeout", \
+	"logger_sreset_wmac1_dma_tx_wait_timeout", \
+	"mu_rx_pkt", \
+	"pduty_sleep", \
+	"pduty_rxoff", \
+	"pduty_period", \
+	"pduty_pct", \
+	"soft_ring_push_to_tqe", \
+	"soft_ring_empty", \
+	"soft_ring_not_empty", \
+	"soft_ring_add_force", \
+	"soft_ring_add_to_head", \
+	"soft_ring_add_continue", \
+	"soft_ring_free_pool_empty", \
+	"mimo_ps_mode_switch", \
+	"rx_vlan_drop", \
+	"auto_cca_state", \
+	"auto_cca_th", \
+	"auto_cca_spre", \
+	"auto_cca_intf", \
+	"total_dmem_alloc", \
+	"total_dram_alloc", \
+	"dmem_alloc_fails", \
+	"dram_alloc_fails", \
+	"total_dmem_free", \
+	"total_dram_free", \
+	"rx_bw_80", \
+	"rx_bw_40", \
+	"rx_bw_20", \
+	"rx_wmm_ps_trigger", \
+	"rx_wmm_ps_set", \
+	"rx_wmm_ps_reset", \
+	"rx_intr_next_ptr_0", \
+	"rx_hbm_pool_depleted", \
+	"rxq_intr[0]", \
+	"rxq_intr[1]", \
+	"rxq_intr[2]", \
+	"rxq_fill[0]", \
+	"rxq_fill[1]", \
+	"rxq_fill[2]", \
+	"rxq_nobuf[0]", \
+	"rxq_nobuf[1]", \
+	"rxq_nobuf[2]", \
+	"rxq_stop[0]", \
+	"rxq_stop[1]", \
+	"rxq_stop[2]", \
+	"rxq_pkt[0]", \
+	"rxq_pkt[1]", \
+	"rxq_pkt[2]", \
+	"rxq_bad_status[0]", \
+	"rxq_bad_status[1]", \
+	"rxq_bad_status[2]", \
+	"rxq_pkt_oversize[0]", \
+	"rxq_pkt_oversize[1]", \
+	"rxq_pkt_oversize[2]", \
+	"rxq_pkt_delivered[0]", \
+	"rxq_pkt_delivered[1]", \
+	"rxq_pkt_delivered[2]", \
+	"rxq_status_hole_chk_num[0]", \
+	"rxq_status_hole_chk_num[1]", \
+	"rxq_status_hole_chk_num[2]", \
+	"rxq_status_hole_chk_step_sum[0]", \
+	"rxq_status_hole_chk_step_sum[1]", \
+	"rxq_status_hole_chk_step_sum[2]", \
+	"rxq_status_hole_chk_step_max[0]", \
+	"rxq_status_hole_chk_step_max[1]", \
+	"rxq_status_hole_chk_step_max[2]", \
+	"rxq_status_hole[0]", \
+	"rxq_status_hole[1]", \
+	"rxq_status_hole[2]", \
+	"rxq_status_hole_max_size[0]", \
+	"rxq_status_hole_max_size[1]", \
+	"rxq_status_hole_max_size[2]", \
+	"rxq_process_max[0]", \
+	"rxq_process_max[1]", \
+	"rxq_process_max[2]", \
+	"rxq_process_sum[0]", \
+	"rxq_process_sum[1]", \
+	"rxq_process_sum[2]", \
+	"rxq_process_num[0]", \
+	"rxq_process_num[1]", \
+	"rxq_process_num[2]", \
+	"rxq_process_limited[0]", \
+	"rxq_process_limited[1]", \
+	"rxq_process_limited[2]", \
+	"rxq_desc_chain_empty[0]", \
+	"rxq_desc_chain_empty[1]", \
+	"rxq_desc_chain_empty[2]", \
+	"rx_data_last_seqfrag", \
+	"rx_data_last_ip_id", \
+	"rx_opmode_notify", \
+	"accel_l2_ext_filter", \
+	"accel_mc_send_l2_ext_filter", \
+	"accel_mc_drop_l2_ext_filter", \
+	"rx_frame_addressed_to_wrong_bss", \
+}
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_net_packet.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_net_packet.h
new file mode 100644
index 0000000..3ebc872
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_net_packet.h
@@ -0,0 +1,171 @@
+#ifndef __QTN_NET_PACKET_H__
+#define __QTN_NET_PACKET_H__
+
+#include <qtn/qtn_global.h>
+
+#ifndef ETHERTYPE_ARP
+#define	ETHERTYPE_ARP	0x0806		/* ARP protocol */
+#endif
+
+#ifndef ETHERTYPE_AARP
+#define ETHERTYPE_AARP	0x80f3		/* Appletalk AARP */
+#endif
+
+#ifndef ETHERTYPE_PAE
+#define	ETHERTYPE_PAE	0x888e		/* EAPOL PAE/802.1x */
+#endif
+
+#ifndef ETHERTYPE_IP
+#define	ETHERTYPE_IP	0x0800		/* IP protocol */
+#endif
+
+#ifndef ETHERTYPE_IPV6
+#define	ETHERTYPE_IPV6	0x86DD		/* IPv6 protocol */
+#endif
+
+#ifndef ETHERTYPE_IPX
+#define ETHERTYPE_IPX	0x8137		/* IPX over DIX */
+#endif
+
+#ifndef ETHERTYPE_802A
+#define ETHERTYPE_802A	0x88B7
+#endif
+
+#ifndef ETHERTYPE_8021Q
+#define	ETHERTYPE_8021Q	0x8100          /* 802.1Q VLAN header */
+#endif
+
+#ifndef ETHERTYPE_8021AD
+#define ETHERTYPE_8021AD	0x88A8  /* 802.1AD VLAN S-TAG header */
+#endif
+
+#ifndef ETHERTYPE_WAKE_ON_LAN
+#define ETHERTYPE_WAKE_ON_LAN	0X0842
+#endif
+
+union qtn_ipv4_addr {
+	uint32_t ip32;
+	uint16_t ip16[2];
+	uint8_t ip8[4];
+};
+
+struct qtn_ipv4 {
+	uint8_t	vers_ihl;
+	uint8_t dscp;
+	uint16_t length;
+	uint16_t ident;
+	uint16_t flags:3,
+		 fragoffset:13;
+	uint8_t ttl;
+	uint8_t proto;
+	uint16_t csum;
+	union qtn_ipv4_addr srcip;
+	union qtn_ipv4_addr dstip;
+	uint32_t opt[0];
+};
+
+union qtn_ipv6_addr {
+	uint64_t ip64[2];
+	uint32_t ip32[4];
+	uint16_t ip16[8];
+	uint8_t ip8[16];
+};
+
+struct qtn_ipv6 {
+	uint16_t vers_tclass_flowlabel[2];
+	uint16_t length;
+	uint8_t next_hdr;
+	uint8_t hop_limit;
+	union qtn_ipv6_addr srcip;
+	union qtn_ipv6_addr dstip;
+};
+
+RUBY_INLINE uint8_t qtn_ipv6_tclass(const struct qtn_ipv6 *ipv6)
+{
+	return ((ipv6->vers_tclass_flowlabel[0]) >> 4) & 0xFF;
+}
+
+#define QTN_IP_PROTO_ICMP	1
+#define QTN_IP_PROTO_IGMP	2
+#define QTN_IP_PROTO_TCP	6
+#define QTN_IP_PROTO_UDP	17
+#define QTN_IP_PROTO_IPV6FRAG	44
+#define QTN_IP_PROTO_ICMPV6	58
+#define QTN_IP_PROTO_RAW	255
+
+#define QTN_MAX_VLANS	4
+
+struct qtn_8021q {
+	uint16_t tpid;
+	uint16_t tci;
+};
+
+struct qtn_udp {
+	uint16_t src_port;
+	uint16_t dst_port;
+	uint16_t length;
+	uint16_t csum;
+};
+
+RUBY_INLINE void qtn_mcast_ipv4_to_mac(uint8_t *const mac_be,
+		const uint8_t *const ipv4)
+{
+	mac_be[0] = 0x01;
+	mac_be[1] = 0x00;
+	mac_be[2] = 0x5E;
+	mac_be[3] = ipv4[1] & 0x7F;
+	mac_be[4] = ipv4[2];
+	mac_be[5] = ipv4[3];
+}
+
+RUBY_INLINE void qtn_mcast_ipv6_to_mac(uint8_t *const mac_be,
+		const uint8_t *const ipv6)
+{
+	mac_be[0] = 0x33;
+	mac_be[1] = 0x33;
+	mac_be[2] = ipv6[12];
+	mac_be[3] = ipv6[13];
+	mac_be[4] = ipv6[14];
+	mac_be[5] = ipv6[15];
+}
+
+RUBY_INLINE void qtn_mcast_mac_to_ipv4(uint8_t *const ipv4,
+		const uint8_t *const mac_be, const uint8_t ip_map)
+{
+	ipv4[0] = ((ip_map >> 1) & 0xF) | (0xE << 4);
+	ipv4[1] = (mac_be[3] & 0x7F) | ((ip_map & 1) << 7);
+	ipv4[2] = mac_be[4];
+	ipv4[3] = mac_be[5];
+}
+
+RUBY_INLINE void qtn_mcast_to_mac(uint8_t *mac_be, const void *addr, uint16_t ether_type)
+{
+	if (ether_type == htons(ETHERTYPE_IP)) {
+		qtn_mcast_ipv4_to_mac(mac_be, addr);
+	} else if (ether_type == htons(ETHERTYPE_IPV6)) {
+		qtn_mcast_ipv6_to_mac(mac_be, addr);
+	} else {
+		/* invalid address family */
+	}
+}
+
+/*
+ * IPV4 extra metadata per entry
+ * Size derive from ipv4 address[27:23]
+ * ipv4[0] is always 0xe followed by 5 bits that define the ipv4
+ * table size so in this way we can differentiate between similar multicast mac addresses
+ * the other 23 bits assemble the mac multicast id.
+ */
+RUBY_INLINE uint8_t qtn_mcast_ipv4_alias(const uint8_t *ipv4)
+{
+	return ((ipv4[1] >> 7) | (ipv4[0] & 0xF) << 1);
+}
+
+RUBY_INLINE uint8_t qtn_ether_type_is_vlan(const uint16_t type)
+{
+	return ((type == htons(ETHERTYPE_8021Q))
+			|| (type == htons(ETHERTYPE_8021AD)));
+}
+
+#endif	// __QTN_NET_PACKET_H__
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_pcap.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_pcap.h
new file mode 100644
index 0000000..300905a
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_pcap.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2013 Quantenna Communications, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __QTN_PCAP_H__
+#define __QTN_PCAP_H__
+
+#define QTN_GENPCAP	0
+
+#ifdef MUC_BUILD
+	#define qtn_pcap_memcpy uc_memcpy
+	extern struct qtn_genpcap *g_qtn_genpcap_state;
+#else
+	#define qtn_pcap_memcpy memcpy
+#endif	/* MUC_BUILD */
+
+struct qtn_genpcap {
+	uint8_t active;
+	uint8_t payloads_count_s;
+	uint8_t payload_size_s;
+	uint8_t ___pad;
+	uint8_t *payloads_vaddr;
+	uint8_t *payloads_paddr;
+	unsigned long payloads_written;
+};
+
+struct qtn_genpcap_args {
+	void *vaddr;
+	dma_addr_t paddr;
+};
+
+
+struct qtn_pcap_hdr {
+	uint64_t tsf;
+	uint16_t incl;
+	uint16_t orig;
+};
+
+static __inline__ unsigned long qtn_pcap_max_payload(const struct qtn_genpcap *state)
+{
+	return (1 << state->payload_size_s) - sizeof(struct qtn_pcap_hdr);
+}
+
+static __inline__ struct qtn_pcap_hdr *
+qtn_pcap_add_packet_start(struct qtn_genpcap *state, uint64_t tsf)
+{
+	unsigned int pkt_index;
+	struct qtn_pcap_hdr *hdr;
+
+	pkt_index = state->payloads_written % (1 << state->payloads_count_s);
+	state->payloads_written++;
+
+	hdr = (void *) (state->payloads_paddr + ((1 << state->payload_size_s) * pkt_index));
+	hdr->tsf = tsf;
+
+	return hdr;
+}
+
+static __inline__ void qtn_pcap_add_packet(struct qtn_genpcap *state,
+		const void *payload, uint16_t len, uint64_t tsf)
+{
+	struct qtn_pcap_hdr *hdr;
+
+	hdr = qtn_pcap_add_packet_start(state, tsf);
+	hdr->orig = len;
+	if (len >= qtn_pcap_max_payload(state))
+		hdr->incl = qtn_pcap_max_payload(state);
+	else
+		hdr->incl = len;
+
+	qtn_pcap_memcpy((hdr + 1), payload, hdr->incl);
+}
+
+#endif	/* __QTN_PCAP_H__ */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_pcap_public.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_pcap_public.h
new file mode 100644
index 0000000..d43b1b1
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_pcap_public.h
@@ -0,0 +1,83 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2013 Quantenna Communications Inc                   **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef _QDRV_PCAP_PUBLIC_H
+#define _QDRV_PCAP_PUBLIC_H
+
+struct pcap_hdr {
+	uint32_t magic_number;
+	uint16_t version_major;	/* major version number */
+	uint16_t version_minor;	/* minor version number */
+	int32_t thiszone;	/* GMT to local correction */
+	uint32_t sigfigs;	/* accuracy of timestamps */
+	uint32_t snaplen;	/* max length of captured packets, in octets */
+	uint32_t network;	/* data link type */
+};
+#define PCAP_HDR_MAGIC			0xa1b2c3d4
+#define PCAP_HDR_LINKTYPE_ETHER		1
+#define PCAP_HDR_LINKTYPE_80211		105
+#define PCAP_HDR_LINKTYPE_80211_RTAP	127
+
+struct pcaprec_hdr {
+	uint32_t ts_sec;
+	uint32_t ts_usec;
+	uint32_t incl_len;
+	uint32_t orig_len;
+};
+
+static __inline__ struct pcap_hdr qtn_pcap_mkhdr(uint32_t max_pkt)
+{
+	struct pcap_hdr h;
+
+	h.magic_number = PCAP_HDR_MAGIC;
+	h.version_major = 2;
+	h.version_minor = 4;
+	h.thiszone = 0;
+	h.sigfigs = 0;
+	h.snaplen = max_pkt;
+	h.network = PCAP_HDR_LINKTYPE_80211;
+
+	return h;
+}
+
+
+#endif	/* _QDRV_PCAP_PUBLIC_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_skb_cb.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_skb_cb.h
new file mode 100644
index 0000000..973e520
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_skb_cb.h
@@ -0,0 +1,71 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __QTN_SKB_CB_H
+#define __QTN_SKB_CB_H
+
+struct qtn_skb_cb {
+	uint8_t		encap;		/* Encapsulation type */
+	uint8_t		ip_protocol;	/* IP protocol */
+	uint16_t	ether_type;	/* Ethernet Type */
+	void		*ni;		/* Node structure pointer */
+	uint32_t	flags;
+#define M_LINK0			0x0001	/* frame needs WEP encryption */
+#define M_FF			0x0002	/* fast frame */
+#define M_PWR_SAV		0x0004	/* bypass powersave handling */
+#define M_UAPSD			0x0008	/* frame flagged for u-apsd handling */
+#define M_RAW			0x0010
+#define M_CLASSIFY		0x0020	/* Packet has been classified */
+#define M_VSP_CHK		0x0040	/* VSP check done */
+#define M_VSP_NOT_FOUND		0x0080	/* VSP stream not found - usually first pkts from Eth */
+#define M_VSP_RX_BSS		0x0100	/* VSP stream originating from the BSS */
+#define M_RATE_TRAINING		0x0200	/* Empty data frame used to do rate training */
+#define M_NO_AMSDU		0x0400	/* AMSDU is prohibited for this frame */
+#define M_ENQUEUED_SCH		0x0800	/* Enqueued in qdrv_sch */
+#define M_ENQUEUED_MUC		0x1000	/* Enqueued to MuC */
+#define	M_TX_DONE_IMM_INT	0x2000	/* Immediately interrupt lhost when tx done */
+#define M_VLAN_TAGGED		0x4000	/* skb belongs to some VLAN */
+#define M_ORIG_OUTSIDE		0x8000	/* skb is not from local protocol stack */
+#define M_ORIG_BR		0x10000	/* skb is sent from bridge interfaces */
+#define M_NO_L2_LRN		0x20000	/* MAC learning disabled */
+#define M_HAS_MISALIGN		0x40000 /* skb has been word-aligned */
+
+};
+
+#define QTN_SKB_ENCAP_ETH		0
+#define QTN_SKB_ENCAP_80211_MGMT	1
+#define QTN_SKB_ENCAP_80211_DATA	2
+#define QTN_SKB_ENCAP(_skb)		((_skb)->qtn_cb.encap)
+#define QTN_SKB_ENCAP_IS_80211(_skb)	((_skb)->qtn_cb.encap > 0)
+#define QTN_SKB_ENCAP_IS_80211_MGMT(_skb) \
+					((_skb)->qtn_cb.encap == QTN_SKB_ENCAP_80211_MGMT)
+
+#define QTN_SKB_CB_NI(_skb)		((_skb)->qtn_cb.ni)
+#define QTN_SKB_CB_ETHERTYPE(_skb)	((_skb)->qtn_cb.ether_type)
+#define QTN_SKB_CB_IPPROTO(_skb)	((_skb)->qtn_cb.ip_protocol)
+
+#define M_FLAG_SET(_skb, _flag)		((_skb)->qtn_cb.flags |= _flag)
+#define M_FLAG_CLR(_skb, _flag)		((_skb)->qtn_cb.flags &= ~_flag)
+#define M_FLAG_GET(_skb, _flag)		((_skb)->qtn_cb.flags & _flag)
+#define M_FLAG_ISSET(_skb, _flag)	(!!((_skb)->qtn_cb.flags & _flag))
+#define M_FLAG_KEEP_ONLY(_skb, _flag)	((_skb)->qtn_cb.flags &= _flag)
+
+#define M_PWR_SAV_SET(skb)		M_FLAG_SET((skb), M_PWR_SAV)
+#define M_PWR_SAV_CLR(skb)		M_FLAG_CLR((skb), M_PWR_SAV)
+#define M_PWR_SAV_GET(skb)		M_FLAG_GET((skb), M_PWR_SAV)
+
+#endif /* #ifndef __QTN_SKB_CB_H */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_skb_size.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_skb_size.h
new file mode 100644
index 0000000..7733127
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_skb_size.h
@@ -0,0 +1,51 @@
+/*
+ * (C) Copyright 2011 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __QTN_SKB_SIZE_H
+#define __QTN_SKB_SIZE_H
+
+#include "qtn_buffers.h"
+
+#define RX_BUF_SIZE_PAYLOAD	(4400)
+
+/*
+ * sk_buff size for Ruby qdrv and arasan driver.
+ * These should be the same since they share the same recycle list.
+ *
+ * In Topaz, HBM is used for buffer allocation
+ */
+#define RX_BUF_SIZE		(RX_BUF_SIZE_PAYLOAD + 2 * L1_CACHE_BYTES - 1)
+
+/*
+ * Optimization for buffer allocation.
+ * Used to modify kernel to have kmalloc() cache entry of this size.
+ */
+#define RX_BUF_SIZE_KMALLOC	(roundup((RX_BUF_SIZE) + 256, 256))
+
+/*
+ * EMAC buffer limits for software rx/tx, not hardware rxp/txp
+ */
+#define RUBY_EMAC_NUM_TX_BUFFERS	(1 << 8)
+#define RUBY_EMAC_NUM_RX_BUFFERS	(1 << 10)
+
+#endif // #ifndef __QTN_SKB_SIZE_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_trace.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_trace.h
new file mode 100644
index 0000000..2c9855e
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_trace.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ */
+
+
+#ifndef _IF_QTN_TRACE_H_
+#define _IF_QTN_TRACE_H_
+
+enum qtn_trace_types {
+	QTN_TRACE_EV_NONE		= 0x00000000,
+	QTN_TRACE_EV_TX_PKT		= 0x01000001,
+	QTN_TRACE_EV_TX_PKT_SZ		= 0x01000002,
+	QTN_TRACE_EV_TX_PKT_BA		= 0x01000003,
+	QTN_TRACE_EV_RX_PKT		= 0x02000001,
+	QTN_TRACE_EV_RX_BAD_PKT		= 0x02000002,
+	QTN_TRACE_EV_RX_NOT_VALID	= 0x02000003,
+	QTN_TRACE_EV_RX_MAX_DUR		= 0x02000004,
+	QTN_TRACE_EV_RX_BAD_LEN		= 0x02000005,
+	QTN_TRACE_EV_RX_BAD_MCS_1	= 0x02000006,
+	QTN_TRACE_EV_RX_BAD_MCS_2	= 0x02000008,
+	QTN_TRACE_EV_RX_BAD_MCS_3	= 0x02000009,
+	QTN_TRACE_EV_RX_BAD_MCS_4	= 0x0200000a,
+	QTN_TRACE_EV_RX_BAD_MCS_5	= 0x0200000b,
+	QTN_TRACE_EV_RX_PS_POLL		= 0x02000007,
+	QTN_TRACE_EV_RX_INTR_SECOVRFL	= 0x03000001,
+	QTN_TRACE_EV_RX_INTR_STRQOFLOW	= 0x03000002,
+	QTN_TRACE_EV_RST_BCN		= 0x04000001,
+	QTN_TRACE_EV_RST_TX		= 0x04000002,
+	QTN_TRACE_EV_RST_RX		= 0x04000003,
+	QTN_TRACE_EV_RST_PM		= 0x04000004,
+	QTN_TRACE_EV_RST_SCHED1		= 0x04000005,
+	QTN_TRACE_EV_RST_SCHED2		= 0x04000006,
+	QTN_TRACE_EV_RST_START		= 0x04000007,
+	QTN_TRACE_EV_RST_END		= 0x04000008,
+	QTN_TRACE_EV_BB_INT		= 0x05000001,
+	QTN_TRACE_EV_RX_DONE_INT	= 0x06000001,
+	QTN_TRACE_EV_RX_TYPEDONE_INT	= 0x06000002,
+	QTN_TRACE_EV_TX_DONE_INT	= 0x07000001,
+	QTN_TRACE_EV_TX_DONE_DEPTH	= 0x07000002,
+	QTN_TRACE_EV_TX_DONE_INHW	= 0x07000003,
+	QTN_TRACE_EV_TX_DONE_CNT	= 0x07000004,
+	QTN_TRACE_EV_TX_XATTEMPTS	= 0x07000005,
+	QTN_TRACE_EV_TX_PROBE_RESP	= 0x07000006,
+	QTN_TRACE_EV_WDOG_TX_START	= 0x08000001,
+	QTN_TRACE_EV_WDOG_TX_DONE	= 0x08000002,
+	QTN_TRACE_EV_MCST_DEFER		= 0x09000001,
+	QTN_TRACE_EV_HW_WDOG_WARN	= 0x0A000001,
+	QTN_TRACE_EV_PROBE_STATE	= 0x0B000001,
+	QTN_TRACE_EV_PROBE_PPPC_START	= 0x0B000002,
+	QTN_TRACE_EV_PROBE_PPPC_END	= 0x0B000003,
+	QTN_TRACE_EV_PROBE_SGI_START	= 0x0B000004,
+	QTN_TRACE_EV_PROBE_SGI_END	= 0x0B000005,
+	QTN_TRACE_EV_PPPC_PWR_INDEX	= 0x0B000003,
+	QTN_TRACE_EV_RA_START		= 0x0C000001,
+	QTN_TRACE_EV_RA_END		= 0x0C000002,
+	QTN_TRACE_EV_RA_MCS_SAMPLE	= 0x0C000003,
+	QTN_TRACE_EV_RF_TXPWR_CAL_START	= 0x0D000001,
+	QTN_TRACE_EV_RF_TXPWR_CAL_END	= 0x0D000002,
+	QTN_TRACE_EV_RF_TXPD_CAL_START	= 0x0D000003,
+	QTN_TRACE_EV_RF_TXPD_CAL_END	= 0x0D000004,
+	QTN_TRACE_EV_RF_VCO_CAL_START	= 0x0D000005,
+	QTN_TRACE_EV_RF_VCO_CAL_END	= 0x0D000006,
+	QTN_TRACE_EV_RF_GAIN_AD_START	= 0x0D000007,
+	QTN_TRACE_EV_RF_GAIN_AD_END	= 0x0D000008,
+	QTN_TRACE_EV_PS_STATE		= 0x0E000001,
+	QTN_TRACE_EV_RST_TCM		= 0x0E000002,
+};
+
+enum qtn_trace_trigger {
+	QTN_TRACE_TRIGGER_DROP_QDRV_SCH = 0x00000001,
+};
+
+#if QTN_ENABLE_TRACE_BUFFER
+/* Debugs for tracing activity */
+
+#define QTN_TRACE_BUF_SIZE	75
+
+extern uint32_t qtn_trace_index;
+struct qtn_trace_record {
+	uint32_t	tsf;
+	uint32_t	event;
+	uint32_t	data;
+};
+extern struct qtn_trace_record qtn_trace_buffer[QTN_TRACE_BUF_SIZE];
+
+#define QTN_TRACE(sc, event, data)	qtn_trace((sc), (event), (uint32_t)(data))
+
+#define QTN_TRACE_SET(field, value)	do { (field) = (value); } while(0)
+
+# ifdef MUC_BUILD
+#  include "qtn/if_qtnvar.h"
+# endif
+
+static __inline__
+# ifdef MUC_BUILD
+void qtn_trace(struct qtn_softc *sc, uint32_t event, uint32_t data)
+{
+	qtn_trace_index++;
+	if (qtn_trace_index >= QTN_TRACE_BUF_SIZE) {
+		qtn_trace_index = 0;
+	}
+	qtn_trace_buffer[qtn_trace_index].tsf = hal_get_tsf_lo(sc->sc_qh);
+	qtn_trace_buffer[qtn_trace_index].event = event;
+	qtn_trace_buffer[qtn_trace_index].data = data;
+}
+# else
+void qtn_trace(struct qdrv_mac *mac, uint32_t event, uint32_t data)
+{
+	qtn_trace_index++;
+	if (qtn_trace_index >= QTN_TRACE_BUF_SIZE) {
+		qtn_trace_index = 0;
+	}
+	qtn_trace_buffer[qtn_trace_index].tsf = jiffies; /* FIXME: hal_get_tsf_lo(sc->sc_qh); */
+	qtn_trace_buffer[qtn_trace_index].event = event;
+	qtn_trace_buffer[qtn_trace_index].data = data;
+}
+# endif //MUC_BUILD
+
+#else //QTN_ENABLE_TRACE_BUFFER
+
+#define QTN_TRACE(sc, type, data)	do {} while(0)
+
+#define QTN_TRACE_SET(field, value)	do {} while(0)
+
+#endif //QTN_ENABLE_TRACE_BUFFER
+
+
+#endif /* _IF_QTN_TRACE_H_ */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_uc_comm.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_uc_comm.h
new file mode 100644
index 0000000..3c35434
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_uc_comm.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2012 Quantenna Communications, Inc.
+ */
+
+#ifndef _QTN_UC_COMM_H
+#define _QTN_UC_COMM_H
+
+#define MAC_UNITS		1
+
+#if defined(TOPAZ_128_NODE_MODE)
+#define QTN_NCIDX_MAX			128
+#define QTN_NODE_TBL_SIZE_LHOST		118
+#define QTN_NODETID_NODE_SHIFT		7
+#else
+#define QTN_NCIDX_MAX			64
+#define QTN_NODE_TBL_SIZE_LHOST		56
+#define QTN_NODETID_NODE_SHIFT		6
+#endif
+#define QTN_MAX_BSS_VAPS		8
+#define QTN_MAX_WDS_VAPS		8
+#define QTN_MAX_VAPS			((QTN_MAX_BSS_VAPS) + (QTN_MAX_WDS_VAPS))
+#define QTN_NODE_TBL_MUC_HEADRM		3 /* Allow for delayed delete on MUC */
+#define QTN_NODE_TBL_SIZE_MUC		((QTN_NODE_TBL_SIZE_LHOST) + (QTN_NODE_TBL_MUC_HEADRM))
+#define QTN_ASSOC_LIMIT			((QTN_NODE_TBL_SIZE_LHOST) - (QTN_MAX_VAPS))
+
+#endif // #ifndef _QTN_UC_COMM_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_vlan.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_vlan.h
new file mode 100644
index 0000000..bc5980b
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_vlan.h
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2014 Quantenna Communications, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _QTN_VLAN_H_
+#define _QTN_VLAN_H_
+
+#include "../common/ruby_mem.h"
+#include <qtn/qtn_debug.h>
+#include <qtn/qtn_uc_comm.h>
+#include <qtn/qtn_net_packet.h>
+#if defined(__KERNEL__) || defined(MUC_BUILD) || defined(AUC_BUILD)
+#include <qtn/topaz_tqe_cpuif.h>
+#endif
+#if defined(__KERNEL__)
+#include <qtn/dmautil.h>
+#endif
+
+#define QVLAN_MODE_ACCESS		0
+#define QVLAN_MODE_TRUNK		1
+#define QVLAN_MODE_HYBRID		2
+#define QVLAN_MODE_DYNAMIC		3
+#define QVLAN_CMD_DEF_PRIORITY		4
+#define QVLAN_MODE_MAX			QVLAN_CMD_DEF_PRIORITY
+#define QVLAN_MODE_DISABLED		(QVLAN_MODE_MAX + 1)
+#define QVLAN_SHIFT_MODE		16
+#define QVLAN_MASK_MODE			0xffff0000
+#define QVLAN_MASK_VID			0x00000fff
+
+#define QVLAN_MODE(x)			(uint16_t)((x) >> QVLAN_SHIFT_MODE)
+#define QVLAN_VID(x)			(uint16_t)((x) & QVLAN_MASK_VID)
+
+#define QVLAN_MODE_STR_ACCESS	"Access mode"
+#define QVLAN_MODE_STR_TRUNK	"Trunk mode"
+#define QVLAN_MODE_STR_HYBRID	"Hybrid mode"
+#define QVLAN_MODE_STR_DYNAMIC	"Dynamic mode"
+
+#define QVLAN_PRIO_VID			0
+#define QVLAN_DEF_PVID			1
+
+#define QVLAN_VID_MAX			4096
+#define QVLAN_VID_MAX_S			12
+#define QVLAN_VID_ALL			0xffff
+
+#ifndef NBBY
+#define NBBY		8
+#endif
+
+#ifndef NBDW
+#define NBDW		32
+#endif
+
+#ifdef CONFIG_TOPAZ_DBDC_HOST
+#define VLAN_INTERFACE_MAX	(QTN_MAX_VAPS + 2 + MAX_QFP_NETDEV)
+#define QFP_VDEV_IDX(dev_id)	(QTN_MAX_VAPS + 2 + (dev_id))
+#else
+#define VLAN_INTERFACE_MAX	(QTN_MAX_VAPS + 2)
+#endif
+#define WMAC_VDEV_IDX_MAX	QTN_MAX_VAPS
+#define EMAC_VDEV_IDX(port)	(QTN_MAX_VAPS + (port))
+#define PCIE_VDEV_IDX		(QTN_MAX_VAPS + 0)
+
+#ifndef howmany
+#define howmany(x, y)			(((x) + ((y) - 1)) / (y))
+#endif
+
+#define bitsz_var(var)			(sizeof(var) * 8)
+#define bitsz_ptr(ptr)			bitsz_var((ptr)[0])
+
+#define set_bit_a(a, i)			((a)[(i) / bitsz_ptr(a)] |= 1 << ((i) % bitsz_ptr(a)))
+#define clr_bit_a(a, i)			((a)[(i) / bitsz_ptr(a)] &= ~(1 << ((i) % bitsz_ptr(a))))
+#define is_set_a(a, i)			((a)[(i) / bitsz_ptr(a)] & (1 << ((i) % bitsz_ptr(a))))
+#define is_clr_a(a, i)			(is_set_a(a, i) == 0)
+
+struct qtn_vlan_stats {
+	uint32_t lhost;
+	uint32_t muc;
+};
+
+struct qtn_vlan_user_interface {
+	unsigned long bus_addr;
+	uint8_t mode;
+};
+
+#define QVLAN_PRIO_MAX	7
+struct qtn_vlan_dev {
+	uint8_t		idx;
+	uint8_t		port;
+	uint16_t	pvid;
+	uint16_t	priority;
+#define QVLAN_DEV_F_DYNAMIC	BIT(0)
+	uint16_t	flags;
+	unsigned long	bus_addr;
+	int		ifindex;
+	union {
+		uint32_t	member_bitmap[howmany(QVLAN_VID_MAX, NBDW)];
+		uint16_t	node_vlan[QTN_NCIDX_MAX];
+	}u;
+	uint32_t	tag_bitmap[howmany(QVLAN_VID_MAX, NBDW)];
+	struct qtn_vlan_stats ig_pass;
+	struct qtn_vlan_stats ig_drop;
+	struct qtn_vlan_stats eg_pass;
+	struct qtn_vlan_stats eg_drop;
+	struct qtn_vlan_stats magic_invalid;
+	void		*user_data;
+};
+#define QVLAN_IS_DYNAMIC(vdev)		((vdev)->flags & QVLAN_DEV_F_DYNAMIC)
+
+struct qtn_vlan_pkt {
+#define QVLAN_PKT_MAGIC			0xf8
+	uint8_t		magic;
+#define QVLAN_PKT_TAGGED		BIT(0)
+#define QVLAN_PKT_ZERO_TAGGED		BIT(1)
+#define QVLAN_PKT_SKIP_CHECK		BIT(2)
+#define QVLAN_PKT_TXACTION_UNTOUCH	0
+#define QVLAN_PKT_TXACTION_UNTAG	BIT(3)
+#define QVLAN_PKT_TXACTION_TAG		BIT(4)
+#define QVLAN_PKT_TXACTION_REPLACE	(QVLAN_PKT_TXACTION_UNTAG | QVLAN_PKT_TXACTION_TAG)
+#define QVLAN_PKT_TXACTION_VLAN0	BIT(5)
+	uint8_t	flag;
+#define QVLAN_PKT_VID_MASK		0x0fff
+#define QVLAN_PKT_PRIORITY_MASK		0xe000
+#define QVLAN_PKT_PRIORITY_SHIFT	13
+	uint16_t	vlan_info;
+} __packed;
+
+#define QVLAN_PKTCTRL_LEN	sizeof(struct qtn_vlan_pkt)
+
+struct qtn_vlan_info {
+#define QVLAN_TAGRX_UNTOUCH		0
+#define QVLAN_TAGRX_STRIP		1
+#define QVLAN_TAGRX_TAG			2
+#define QVLAN_TAGRX_BITMASK		0x3
+#define QVLAN_TAGRX_BITWIDTH		2
+#define QVLAN_TAGRX_BITSHIFT		1
+#define QVLAN_TAGRX_NUM_PER_DW		(32 / QVLAN_TAGRX_BITWIDTH)
+#define QVLAN_TAGRX_NUM_PER_DW_S	4
+	uint32_t vlan_tagrx_bitmap[howmany(QVLAN_VID_MAX * QVLAN_TAGRX_BITWIDTH, NBDW)];
+};
+
+RUBY_INLINE int qvlan_tagrx_index(int vid)
+{
+	return (vid >> QVLAN_TAGRX_NUM_PER_DW_S);
+}
+
+RUBY_INLINE int qvlan_tagrx_shift(int vid)
+{
+	int shift;
+
+	shift = vid & (QVLAN_TAGRX_NUM_PER_DW - 1);
+	return (shift << QVLAN_TAGRX_BITSHIFT);
+}
+
+/*
+ * Must be in sync with qcsapi_vlan_config in qcsapi.h
+ *  -- Whenever 'struct qtn_vlan_config' changes, qcsapi.h changes as well
+ */
+struct qtn_vlan_config {
+	uint32_t	vlan_cfg;
+	uint32_t	priority;
+	union {
+		struct vlan_dev_config {
+			uint32_t	member_bitmap[howmany(QVLAN_VID_MAX, NBDW)];
+			uint32_t	tag_bitmap[howmany(QVLAN_VID_MAX, NBDW)];
+		} dev_config;
+		uint32_t	tagrx_config[howmany(QVLAN_VID_MAX * QVLAN_TAGRX_BITWIDTH, NBDW)];
+	} u;
+};
+
+RUBY_INLINE void qtn_vlan_config_htonl(struct qtn_vlan_config *vcfg, int tagrx)
+{
+	unsigned int i;
+
+	vcfg->vlan_cfg = htonl(vcfg->vlan_cfg);
+	vcfg->priority = htonl(vcfg->priority);
+
+	if (tagrx) {
+		for (i = 0; i < ARRAY_SIZE(vcfg->u.tagrx_config); i++)
+			vcfg->u.tagrx_config[i] = htonl(vcfg->u.tagrx_config[i]);
+	} else {
+		for (i = 0; i < ARRAY_SIZE(vcfg->u.dev_config.member_bitmap); i++)
+			vcfg->u.dev_config.member_bitmap[i] = htonl(vcfg->u.dev_config.member_bitmap[i]);
+
+		for (i = 0; i < ARRAY_SIZE(vcfg->u.dev_config.tag_bitmap); i++)
+			vcfg->u.dev_config.tag_bitmap[i] = htonl(vcfg->u.dev_config.tag_bitmap[i]);
+	}
+}
+
+RUBY_INLINE void qtn_vlan_config_ntohl(struct qtn_vlan_config *vcfg, int tagrx)
+{
+	unsigned int i;
+
+	vcfg->vlan_cfg = ntohl(vcfg->vlan_cfg);
+	vcfg->priority = ntohl(vcfg->priority);
+
+	if (tagrx) {
+		for (i = 0; i < ARRAY_SIZE(vcfg->u.tagrx_config); i++)
+			vcfg->u.tagrx_config[i] = ntohl(vcfg->u.tagrx_config[i]);
+	} else {
+		for (i = 0; i < ARRAY_SIZE(vcfg->u.dev_config.member_bitmap); i++)
+			vcfg->u.dev_config.member_bitmap[i] = ntohl(vcfg->u.dev_config.member_bitmap[i]);
+
+		for (i = 0; i < ARRAY_SIZE(vcfg->u.dev_config.tag_bitmap); i++)
+			vcfg->u.dev_config.tag_bitmap[i] = ntohl(vcfg->u.dev_config.tag_bitmap[i]);
+	}
+}
+
+/*
+* VLAN forward/drop table
+*|	traffic direction	|  frame	|  Access(MBSS/Dynamic mode)	  | Trunk(Passthrough mode)
+*|--------------------------------------------------------------------------------------------------------------
+*|	wifi tx			|  no vlan	|  drop				  | forward
+*|--------------------------------------------------------------------------------------------------------------
+*|				|  vlan tagged	| compare tag with PVID:	  | compare tag against VID list
+*|				|		| 1.equal:untag and forward	  | 1.Found:forward
+*|				|		| 2.not equal:drop		  | 2.Not found:drop
+*|--------------------------------------------------------------------------------------------------------------
+*|	wifi rx			|  no vlan	| Add PVID tag and forward	  | forward
+*|--------------------------------------------------------------------------------------------------------------
+*|				|  vlan tagged	| Compare tag with PVID:	  | compare tag against VID list
+*|				|		| 1.equal:forward		  | 1. Found:forward
+*|				|		| 2.not equal:drop		  | 2. Not found:drop
+*|--------------------------------------------------------------------------------------------------------------
+*/
+
+#define QVLAN_BYTES_PER_VID		((QTN_MAX_BSS_VAPS + NBBY - 1) / NBBY)
+#define QVLAN_BYTES_PER_VID_SHIFT	0
+
+RUBY_INLINE int
+qtn_vlan_is_valid(int vid)
+{
+	return (vid >= 0 && vid < QVLAN_VID_MAX);
+}
+
+RUBY_INLINE int
+qtn_vlan_is_member(volatile struct qtn_vlan_dev *vdev, uint16_t vid)
+{
+	return !!is_set_a(vdev->u.member_bitmap, vid);
+}
+
+RUBY_INLINE int
+qtn_vlan_is_tagged_member(volatile struct qtn_vlan_dev *vdev, uint16_t vid)
+{
+	return !!is_set_a(vdev->tag_bitmap, vid);
+}
+
+RUBY_INLINE int
+qtn_vlan_is_pvid(volatile struct qtn_vlan_dev *vdev, uint16_t vid)
+{
+	return vdev->pvid == vid;
+}
+
+RUBY_INLINE int
+qtn_vlan_is_mode(volatile struct qtn_vlan_dev *vdev, uint16_t mode)
+{
+	return ((struct qtn_vlan_user_interface *)vdev->user_data)->mode == mode;
+}
+
+#if defined(__KERNEL__) || defined(MUC_BUILD) || defined(AUC_BUILD)
+RUBY_INLINE int
+qtn_vlan_port_indexable(uint8_t port)
+{
+	return ((port == TOPAZ_TQE_EMAC_0_PORT)
+		|| (port == TOPAZ_TQE_EMAC_1_PORT)
+		|| (port == TOPAZ_TQE_PCIE_PORT)
+		|| (port == TOPAZ_TQE_DSP_PORT));
+}
+#endif
+
+RUBY_INLINE int
+qtn_vlan_get_tagrx(uint32_t *tagrx_bitmap, uint16_t vlanid)
+{
+	return (tagrx_bitmap[vlanid >> QVLAN_TAGRX_NUM_PER_DW_S] >>
+				((vlanid & (QVLAN_TAGRX_NUM_PER_DW - 1)) << QVLAN_TAGRX_BITSHIFT)) &
+		QVLAN_TAGRX_BITMASK;
+}
+
+RUBY_INLINE void
+qtn_vlan_gen_group_addr(uint8_t *mac, uint16_t vid, uint8_t vapid)
+{
+	uint16_t encode;
+
+	mac[0] = 0xff;
+	mac[1] = 0xff;
+	mac[2] = 0xff;
+	mac[3] = 0xff;
+
+	encode = ((uint16_t)vapid << QVLAN_VID_MAX_S) | vid;
+	mac[4] = encode >> 8;
+	mac[5] = (uint8_t)(encode & 0xff);
+}
+
+RUBY_INLINE int
+qtn_vlan_is_group_addr(const uint8_t *mac)
+{
+	return (mac[0] == 0xff && mac[1] == 0xff
+		&& mac[2] == 0xff && mac[3] == 0xff
+		&& mac[4] != 0xff);
+}
+
+#if defined(__KERNEL__) || defined(MUC_BUILD) || defined(AUC_BUILD)
+RUBY_INLINE struct qtn_vlan_pkt*
+qtn_vlan_get_info(const void *data)
+{
+	struct qtn_vlan_pkt *pkt;
+#if defined(AUC_BUILD)
+#pragma Off(Behaved)
+#endif
+	pkt = (struct qtn_vlan_pkt *)((const uint8_t *)data - QVLAN_PKTCTRL_LEN);
+#if defined(AUC_BUILD)
+#pragma On(Behaved)
+#endif
+	return pkt;
+}
+
+RUBY_INLINE void
+qtn_vlan_inc_stats(struct qtn_vlan_stats *stats) {
+#if defined(__KERNEL__)
+	stats->lhost++;
+#elif defined(MUC_BUILD)
+	stats->muc++;
+#endif
+}
+
+RUBY_INLINE int
+qtn_vlan_magic_check(struct qtn_vlan_dev *outdev, struct qtn_vlan_pkt *pkt)
+{
+	if (unlikely(pkt->magic != QVLAN_PKT_MAGIC)) {
+		qtn_vlan_inc_stats(&outdev->magic_invalid);
+		return 0;
+	}
+
+	return 1;
+}
+
+RUBY_INLINE int
+qtn_vlan_vlanid_check(struct qtn_vlan_dev *vdev, uint16_t ncidx, uint16_t vlanid)
+{
+	if (QVLAN_IS_DYNAMIC(vdev))
+		return (vdev->u.node_vlan[ncidx] == vlanid);
+	else
+		return qtn_vlan_is_member(vdev, vlanid);
+}
+
+#define QVLAN_PKT_TXACTION_INVALID		0xff
+/*
+ * VLAN encapsulation action table
+ * | Frame Non-Zero tagged | Frame Zero tagged | Tx Tag | VLAN 0 Tx Tag | Action     |
+ * | 0                     | 0                 | 0      | 0             | DONT_TOUCH |
+ * | 0                     | 0                 | 0      | 1             | ADD_TAG_0  |
+ * | 0                     | 0                 | 1      | 0             | ADD_TAG    |
+ * | 0                     | 0                 | 1      | 1             | ADD_TAG    |
+ * | 0                     | 1                 | 0      | 0             | STRIP      |
+ * | 0                     | 1                 | 0      | 1             | DONT_TOUCH |
+ * | 0                     | 1                 | 1      | 0             | REPLACE    |
+ * | 0                     | 1                 | 1      | 1             | REPLACE    |
+ * | 1                     | 0                 | 0      | 0             | STRIP      |
+ * | 1                     | 0                 | 0      | 1             | REPLACE_0  |
+ * | 1                     | 0                 | 1      | 0             | DONT_TOUCH |
+ * | 1                     | 0                 | 1      | 1             | DONT_TOUCH |
+ */
+static const uint8_t qvlan_tx_actions[] = {
+	QVLAN_PKT_TXACTION_UNTOUCH,
+	QVLAN_PKT_TXACTION_TAG | QVLAN_PKT_TXACTION_VLAN0,
+	QVLAN_PKT_TXACTION_TAG,
+	QVLAN_PKT_TXACTION_TAG,
+
+	QVLAN_PKT_TXACTION_UNTAG,
+	QVLAN_PKT_TXACTION_UNTOUCH,
+	QVLAN_PKT_TXACTION_REPLACE,
+	QVLAN_PKT_TXACTION_REPLACE,
+
+	QVLAN_PKT_TXACTION_UNTAG,
+	QVLAN_PKT_TXACTION_REPLACE | QVLAN_PKT_TXACTION_VLAN0,
+	QVLAN_PKT_TXACTION_UNTOUCH,
+	QVLAN_PKT_TXACTION_UNTOUCH,
+
+	QVLAN_PKT_TXACTION_INVALID,
+	QVLAN_PKT_TXACTION_INVALID,
+	QVLAN_PKT_TXACTION_INVALID,
+	QVLAN_PKT_TXACTION_INVALID
+};
+
+RUBY_INLINE void
+qtn_vlan_tx_action(struct qtn_vlan_dev *vdev, struct qtn_vlan_pkt *pkt)
+{
+	uint8_t index = 0;
+	uint8_t action;
+	uint16_t vlanid = (pkt->vlan_info & QVLAN_PKT_VID_MASK);
+
+	index |= (qtn_vlan_is_tagged_member(vdev, QVLAN_PRIO_VID) ? BIT(0): 0);
+	index |= (qtn_vlan_is_tagged_member(vdev, vlanid) ? BIT(1) : 0);
+	index |= ((pkt->flag & QVLAN_PKT_ZERO_TAGGED) ? BIT (2) : 0);
+	index |= ((pkt->flag & QVLAN_PKT_TAGGED) ? BIT(3) : 0);
+
+	action = qvlan_tx_actions[index];
+#if defined(AUC_BUILD)
+	AUC_OS_ASSERT(action != QVLAN_PKT_TXACTION_INVALID, "VLAN action invalid\n");
+#elif defined(MUC_BUILD)
+	OS_ASSERT(action != QVLAN_PKT_TXACTION_INVALID, ("VLAN action invalid\n"));
+#elif defined(__KERNEL__)
+	KASSERT((action != QVLAN_PKT_TXACTION_INVALID), ("VLAN action invalid\n"));
+#endif
+
+	pkt->flag |= action;
+}
+
+RUBY_INLINE int
+qtn_vlan_egress(struct qtn_vlan_dev *outdev, uint16_t ncidx, void *data,
+	int need_encap_info, int cache_op)
+{
+	struct qtn_vlan_pkt *pkt = qtn_vlan_get_info(data);
+
+	if (!qtn_vlan_magic_check(outdev, pkt)
+			|| (pkt->flag & QVLAN_PKT_SKIP_CHECK)
+			|| qtn_vlan_vlanid_check(outdev, ncidx, pkt->vlan_info & QVLAN_PKT_VID_MASK)) {
+		qtn_vlan_inc_stats(&outdev->eg_pass);
+
+		if (need_encap_info)
+			qtn_vlan_tx_action(outdev, pkt);
+
+		if (cache_op) {
+#if defined(__KERNEL__)
+			flush_and_inv_dcache_sizerange_safe(pkt, QVLAN_PKTCTRL_LEN);
+#elif defined(MUC_BUILD)
+			flush_and_inv_dcache_range_safe(pkt, QVLAN_PKTCTRL_LEN);
+#endif
+		}
+
+		return 1;
+	}
+
+	qtn_vlan_inc_stats(&outdev->eg_drop);
+	return 0;
+}
+
+#endif
+
+#if defined(__KERNEL__) || defined(MUC_BUILD)
+RUBY_INLINE int
+qtn_vlan_ingress(struct qtn_vlan_dev *indev, uint16_t ncidx, void *data,
+		uint16_t known_vlan, uint16_t known_vlan_tci, uint8_t cache_op)
+{
+	struct ether_header *eh = (struct ether_header *)data;
+	struct qtn_vlan_pkt *pkt;
+	uint16_t vlan_tci = 0;
+	uint16_t vlan_id = QVLAN_PRIO_VID;
+	uint16_t flag = 0;
+
+	if (eh->ether_type == htons(ETHERTYPE_8021Q)) {
+		vlan_tci = ntohs(*(uint16_t *)(eh + 1));
+		vlan_id = vlan_tci & QVLAN_PKT_VID_MASK;
+
+		flag |= (vlan_id == QVLAN_PRIO_VID ? QVLAN_PKT_ZERO_TAGGED : QVLAN_PKT_TAGGED);
+
+		if (vlan_id == QVLAN_PRIO_VID && known_vlan) {
+			vlan_tci = known_vlan_tci;
+			vlan_id = vlan_tci & QVLAN_PKT_VID_MASK;
+		}
+	} else if (known_vlan) {
+		vlan_tci = known_vlan_tci;
+		vlan_id = vlan_tci & QVLAN_PKT_VID_MASK;
+	} else {
+		vlan_tci = (indev->priority << QVLAN_PKT_PRIORITY_SHIFT);
+	}
+
+	if (vlan_id == QVLAN_PRIO_VID) {
+		vlan_tci |= indev->pvid;
+	} else if (!qtn_vlan_vlanid_check(indev, ncidx, vlan_id)) {
+		qtn_vlan_inc_stats(&indev->ig_drop);
+		return 0;
+	}
+
+	pkt = qtn_vlan_get_info(data);
+	pkt->magic = QVLAN_PKT_MAGIC;
+	pkt->flag = flag;
+	pkt->vlan_info = vlan_tci;
+
+	if (cache_op) {
+#if defined(__KERNEL__)
+		flush_and_inv_dcache_sizerange_safe(pkt, QVLAN_PKTCTRL_LEN);
+#elif defined(MUC_BUILD)
+		flush_and_inv_dcache_range_safe(pkt, QVLAN_PKTCTRL_LEN);
+#endif
+	}
+
+	qtn_vlan_inc_stats(&indev->ig_pass);
+	return 1;
+}
+#endif
+
+#if defined(__KERNEL__)
+extern uint8_t vlan_enabled;
+extern struct qtn_vlan_dev *vdev_tbl_lhost[VLAN_INTERFACE_MAX];
+extern struct qtn_vlan_dev *vdev_tbl_bus[VLAN_INTERFACE_MAX];
+extern struct qtn_vlan_dev *vport_tbl_lhost[TOPAZ_TQE_NUM_PORTS];
+extern struct qtn_vlan_dev *vport_tbl_bus[TOPAZ_TQE_NUM_PORTS];
+extern struct qtn_vlan_info qtn_vlan_info;
+
+extern struct qtn_vlan_dev *switch_alloc_vlan_dev(uint8_t port, uint8_t idx, int ifindex);
+extern void switch_free_vlan_dev(struct qtn_vlan_dev *dev);
+extern void switch_free_vlan_dev_by_idx(uint8_t idx);
+extern struct qtn_vlan_dev *switch_vlan_dev_get_by_port(uint8_t port);
+extern struct qtn_vlan_dev *switch_vlan_dev_get_by_idx(uint8_t idx);
+
+extern int switch_vlan_add_member(struct qtn_vlan_dev *vdev, uint16_t vid, uint8_t tag);
+extern int switch_vlan_del_member(struct qtn_vlan_dev *vdev, uint16_t vid);
+extern int switch_vlan_tag_member(struct qtn_vlan_dev *vdev, uint16_t vid);
+extern int switch_vlan_untag_member(struct qtn_vlan_dev *vdev, uint16_t vid);
+extern int switch_vlan_set_pvid(struct qtn_vlan_dev *vdev, uint16_t vid);
+extern int switch_vlan_set_priority(struct qtn_vlan_dev *vdev, uint8_t priority);
+
+extern int switch_vlan_register_node(uint16_t ncidx, struct qtn_vlan_dev *vdev);
+extern void switch_vlan_unregister_node(uint16_t ncidx);
+extern struct qtn_vlan_dev *switch_vlan_dev_from_node(uint16_t ncidx);
+
+/* dynamic VLAN support */
+extern void switch_vlan_dyn_enable(struct qtn_vlan_dev *vdev);
+extern void switch_vlan_dyn_disable(struct qtn_vlan_dev *vdev);
+extern int switch_vlan_set_node(struct qtn_vlan_dev *vdev, uint16_t ncidx, uint16_t vlan);
+extern int switch_vlan_clr_node(struct qtn_vlan_dev *vdev, uint16_t ncidx);
+
+extern struct sk_buff *switch_vlan_to_proto_stack(struct sk_buff *, int copy);
+extern struct sk_buff *switch_vlan_from_proto_stack(struct sk_buff *, struct qtn_vlan_dev *, uint16_t ncidx, int copy);
+extern void switch_vlan_reset(void);
+extern void switch_vlan_dev_reset(struct qtn_vlan_dev *vdev, uint8_t mode);
+extern void switch_vlan_emac_to_lhost(uint32_t enable);
+#endif
+
+#endif
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_wmm_ac.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_wmm_ac.h
new file mode 100644
index 0000000..bab24ee
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_wmm_ac.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013 Quantenna Communications, Inc.
+ */
+
+#ifndef _QTN_WMM_AC_H
+#define _QTN_WMM_AC_H
+
+#define WMM_AC_BE	0
+#define WMM_AC_BK	1
+#define WMM_AC_VI	2
+#define WMM_AC_VO	3
+#define WMM_AC_NUM	4
+#define QTN_AC_MGMT	WMM_AC_VO
+#define WMM_AC_INVALID	WMM_AC_NUM
+
+#define QTN_AC_ORDER	{ WMM_AC_VO, WMM_AC_VI, WMM_AC_BE, WMM_AC_BK }
+
+#define QTN_TID_BE	0
+#define QTN_TID_BK	1
+#define QTN_TID_2	2
+#define QTN_TID_3	3
+#define QTN_TID_WLAN	4	/* 802.11 encap'ed data from wlan driver */
+#define QTN_TID_VI	5
+#define QTN_TID_VO	6
+#define QTN_TID_MGMT	7
+#define QTN_TID_IS_80211(tid)	((tid == QTN_TID_MGMT) || (tid == QTN_TID_WLAN))
+
+#define QTN_TID_ORDER	{ \
+	QTN_TID_MGMT,	\
+	QTN_TID_WLAN,	\
+	QTN_TID_VO,	\
+	QTN_TID_VI,	\
+	QTN_TID_BE,	\
+	QTN_TID_BK	\
+}
+
+#define QTN_TID_ORDER_DATA { \
+	QTN_TID_VO,	\
+	QTN_TID_VI,	\
+	QTN_TID_BE,	\
+	QTN_TID_BK	\
+}
+
+#define QTN_TID_ORDER_POLL { \
+	QTN_TID_VO,	\
+	QTN_TID_VI,	\
+	QTN_TID_BE,	\
+	QTN_TID_BK,	\
+	QTN_TID_WLAN,	\
+	QTN_TID_MGMT	\
+}
+
+#define WMM_AC_TO_TID(_ac) (			\
+	(_ac == WMM_AC_VO) ? QTN_TID_VO :	\
+	(_ac == WMM_AC_VI) ? QTN_TID_VI :	\
+	(_ac == WMM_AC_BK) ? QTN_TID_BK :	\
+	QTN_TID_BE)
+
+#define TID_TO_WMM_AC(_tid) (		\
+	(_tid == QTN_TID_BK)	? WMM_AC_BK :	\
+	(_tid == QTN_TID_VI)	? WMM_AC_VI :	\
+	(_tid == QTN_TID_VO)	? WMM_AC_VO :	\
+	(_tid == QTN_TID_WLAN)	? QTN_AC_MGMT :	\
+	(_tid == QTN_TID_MGMT)	? QTN_AC_MGMT :	\
+	WMM_AC_BE)
+
+#define QTN_TID_COLLAPSE(_tid)	WMM_AC_TO_TID(TID_TO_WMM_AC(_tid))
+
+#define AC_TO_QTN_QNUM(_ac)		\
+	(((_ac) == WME_AC_BE) ? 1 :	\
+	 ((_ac) == WME_AC_BK) ? 0 :	\
+	  (_ac))
+
+#define QTN_TID_MAP_UNUSED(_tid) ( \
+	(_tid == QTN_TID_2) ? QTN_TID_BK : \
+	(_tid == QTN_TID_3) ? QTN_TID_BE : \
+	(_tid))
+
+#endif	/* _QTN_WMM_AC_H */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_wowlan.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_wowlan.h
new file mode 100644
index 0000000..cf15486
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qtn_wowlan.h
@@ -0,0 +1,55 @@
+#ifndef __QTN_WOWLAN_H__
+#define __QTN_WOWLAN_H__
+
+#include <qtn/qtn_net_packet.h>
+#define WOWLAN_MATCH_TYPE_DEFAULT	0
+#define WOWLAN_MATCH_TYPE_L2		1
+#define WOWLAN_MATCH_TYPE_UDP		2
+
+#ifndef IEEE80211_ADDR_BCAST
+#define	IEEE80211_ADDR_BCAST(a)	((a)[0] == 0xff && (a)[1] == 0xff && (a)[2] == 0xff && \
+					(a)[3] == 0xff && (a)[4] == 0xff && (a)[5] == 0xff)
+#endif
+RUBY_INLINE uint16_t get_udp_dst_port(const void *iphdr)
+{
+	const struct qtn_ipv4 *ipv4 = (const struct qtn_ipv4 *)iphdr;
+	const uint8_t proto = ipv4->proto;
+
+	if (proto == QTN_IP_PROTO_UDP) {
+		const struct qtn_udp *udp = (struct qtn_udp *)((uint8_t *)ipv4 + sizeof(struct qtn_ipv4));
+		return udp->dst_port;
+	}
+	return 0;
+}
+
+RUBY_INLINE uint8_t wowlan_is_magic_packet(uint16_t ether_type, const void *eth_hdr, const void *iphdr,
+		uint16_t wowlan_match_type, uint16_t config_ether_type, uint16_t config_udp_port)
+{
+	const struct ether_header *eh = (struct ether_header *)eth_hdr;
+
+	if (wowlan_match_type == WOWLAN_MATCH_TYPE_DEFAULT) {
+		if (IEEE80211_ADDR_BCAST(eh->ether_dhost))/*broadcast*/
+			return 1;
+		if (ether_type == htons(ETHERTYPE_WAKE_ON_LAN))/* ehter type is 0x0842*/
+			return 1;
+		if (ether_type == htons(ETHERTYPE_IP)) {
+			uint16_t udp_dst = get_udp_dst_port(iphdr);
+			if (udp_dst == htons(7) || udp_dst == htons(9))
+				return 1;
+		}
+	} else if (wowlan_match_type == WOWLAN_MATCH_TYPE_L2) {
+		if (ether_type == htons(config_ether_type))/* ehter type is 0x0842 or user defined*/
+			return 1;
+	} else if (wowlan_match_type == WOWLAN_MATCH_TYPE_UDP) {
+		if (ether_type == htons(ETHERTYPE_IP)) {
+			uint16_t udp_dst = get_udp_dst_port(iphdr);
+			if (((config_udp_port == 0xffff) && (udp_dst == htons(7) || udp_dst == htons(9))) ||
+						(udp_dst == htons(config_udp_port)))
+				return 1;
+
+		}
+	}
+
+	return 0;
+}
+#endif
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qvsp.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qvsp.h
new file mode 100644
index 0000000..d982684
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qvsp.h
@@ -0,0 +1,158 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2011-2013 Quantenna Communications, Inc             **
+**                            All Rights Reserved                            **
+**                                                                           **
+*******************************************************************************
+EH0*/
+
+#ifndef _QVSP_DRV_H_
+#define _QVSP_DRV_H_
+
+#ifdef CONFIG_QVSP
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+
+#include <net80211/if_ethersubr.h>
+
+#include "qtn/qvsp_data.h"
+#include "qtn/shared_defs.h"
+
+#define QVSP_BA_THROT_TIMER_INTV	25	/* unit: ms */
+
+struct qvsp_s;
+struct qvsp_strm;
+struct qvsp_ext_s;
+struct ieee80211_node;
+struct qdrv_wlan;
+
+enum qdrv_vsp_check_type {
+	QDRV_VSP_CHECK_ENABLE,
+};
+
+#define QVSP_CHECK_FUNC_PROTOTYPE(_fn)			\
+	int (_fn)(struct qvsp_ext_s *qvsp,		\
+			enum qvsp_if_e qvsp_if,		\
+			struct sk_buff *skb,		\
+			void *data_start,		\
+			uint32_t pktlen,		\
+			uint8_t ac)
+
+typedef void (*_fn_qvsp_inactive_flags_changed_handler)(struct qvsp_ext_s *qvsp_ext);
+
+/* This definition must be kept in sync with QVSP_INACTIVE_REASON */
+struct qvsp_ext_s {
+
+#define QVSP_INACTIVE_CFG	0x00000001
+#define QVSP_INACTIVE_WDS	0x00000002
+#define QVSP_INACTIVE_COC	0x00000004
+	uint32_t				inactive_flags;
+	_fn_qvsp_inactive_flags_changed_handler	flags_changed;
+};
+
+struct qvsp_wrapper {
+	struct qvsp_ext_s	*qvsp;
+	QVSP_CHECK_FUNC_PROTOTYPE(*qvsp_check_func);
+};
+
+static inline void
+__qvsp_inactive_flag_update(struct qvsp_ext_s *qvsp_ext, uint32_t flag, int set)
+{
+	unsigned long irq_flags;
+	unsigned long old_flags;
+	unsigned long update = 0;
+
+	if (qvsp_ext && flag) {
+		local_irq_save(irq_flags);
+
+		old_flags = qvsp_ext->inactive_flags;
+		if (set) {
+			qvsp_ext->inactive_flags |= flag;
+		} else {
+			qvsp_ext->inactive_flags &= ~flag;
+		}
+
+		local_irq_restore(irq_flags);
+
+		update = ((old_flags == 0) != (qvsp_ext->inactive_flags == 0));
+		if (update && qvsp_ext->flags_changed) {
+			qvsp_ext->flags_changed(qvsp_ext);
+		}
+	}
+}
+
+#define qvsp_inactive_flag_set(_qvsp, _flag) \
+		__qvsp_inactive_flag_update((struct qvsp_ext_s *)(_qvsp), (_flag), 1)
+
+#define qvsp_inactive_flag_clear(_qvsp, _flag) \
+		__qvsp_inactive_flag_update((struct qvsp_ext_s *)(_qvsp), (_flag), 0)
+
+static __always_inline int
+__qvsp_is_active(struct qvsp_ext_s *qvsp_ext)
+{
+	return (qvsp_ext && (qvsp_ext->inactive_flags == 0));
+}
+#define qvsp_is_active(_qvsp)  __qvsp_is_active((struct qvsp_ext_s *)(_qvsp))
+
+static __always_inline int
+__qvsp_is_enabled(struct qvsp_ext_s *qvsp_ext)
+{
+	return (qvsp_ext && ((qvsp_ext->inactive_flags & QVSP_INACTIVE_CFG) == 0));
+}
+#define qvsp_is_enabled(_qvsp) \
+		__qvsp_is_enabled((struct qvsp_ext_s *)(_qvsp))
+
+
+
+int qvsp_strm_check_add(struct qvsp_s *qvsp, enum qvsp_if_e qvsp_if, struct ieee80211_node *ni,
+			struct sk_buff *skb, struct ether_header *eh, struct iphdr *iphdr_p,
+			int pktlen, uint8_t ac, int32_t tid);
+void qvsp_cmd_strm_state_set(struct qvsp_s *qvsp, uint8_t strm_state,
+			const struct ieee80211_qvsp_strm_id *strm_id, struct ieee80211_qvsp_strm_dis_attr *attr);
+void qvsp_cmd_vsp_configure(struct qvsp_s *qvsp, uint32_t index, uint32_t value);
+void qvsp_cmd_vsp_cfg_set(struct qvsp_s *qvsp, uint32_t index, uint32_t value);
+int qvsp_cmd_vsp_cfg_get(struct qvsp_s *qvsp, uint32_t index, uint32_t *value);
+void qvsp_fat_set(struct qvsp_s *qvsp, uint32_t fat, uint32_t intf_ms, uint8_t chan);
+void qvsp_node_del(struct qvsp_s *qvsp, struct ieee80211_node *ni);
+void qvsp_reset(struct qvsp_s *qvsp);
+void qvsp_change_stamode(struct qvsp_s *qvsp, uint8_t stamode);
+int qvsp_netdbg_init(struct qvsp_s *qvsp,
+		void (*cb_logger)(void *token, void *vsp_data, uint32_t size),
+		uint32_t interval);
+void qvsp_netdbg_exit(struct qvsp_s *qvsp);
+void qvsp_disable(struct qvsp_s *qvsp);
+struct qvsp_s *qvsp_init(int (*ioctl_fn)(void *token, uint32_t param, uint32_t value),
+			void *ioctl_fn_token, struct net_device *dev, uint8_t stamode,
+			void (*cb_cfg)(void *token, uint32_t index, uint32_t value),
+			void (*cb_strm_ctrl)(void *token, struct ieee80211_node *ni, uint8_t strm_state,
+				struct ieee80211_qvsp_strm_id *strm_id, struct ieee80211_qvsp_strm_dis_attr *attr),
+			void (*cb_strm_ext_throttler)(void *token, struct ieee80211_node *node,
+				uint8_t strm_state, const struct ieee80211_qvsp_strm_id *strm_id,
+				struct ieee80211_qvsp_strm_dis_attr *attr, uint32_t throt_intvl),
+			uint32_t ieee80211node_size, uint32_t ieee80211vap_size
+			);
+void qvsp_exit(struct qvsp_s **qvsp, struct net_device *dev);
+
+void qvsp_wrapper_init(struct qvsp_ext_s *qvsp_ext, QVSP_CHECK_FUNC_PROTOTYPE(fn));
+void qvsp_wrapper_exit(void);
+void qvsp_node_init(struct ieee80211_node *ni);
+
+void qvsp_3rdpt_register_cb(struct qvsp_s *qvsp,
+		void *wme_token,
+		int (*cb_3rdpt_get_method)(struct ieee80211_node *ni, uint8_t *throt_session_dur, uint8_t *throt_winsize),
+		int (*cb_ba_throt)(struct ieee80211_node *ni, int32_t tid, int intv, int dur, int win_size),
+		int (*cb_wme_throt)(void *qw, uint32_t ac, uint32_t enable,
+					uint32_t aifs, uint32_t cwmin, uint32_t cwmax, uint32_t txoplimit,
+					uint32_t add_qwme_ie)
+		);
+
+#if TOPAZ_QTM
+void qvsp_strm_tid_check_add(struct qvsp_s *qvsp, struct ieee80211_node *ni, uint8_t node, uint8_t tid,
+		uint32_t pkts, uint32_t bytes, uint32_t sent_pkts, uint32_t sent_bytes);
+#endif
+#endif /* CONFIG_QVSP */
+
+#endif
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qvsp_common.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qvsp_common.h
new file mode 100644
index 0000000..2dc41dd
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/qvsp_common.h
@@ -0,0 +1,36 @@
+/*
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2012 Quantenna Communications Inc                   **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Author      : Quantenna Communications, Inc.                             **
+**  File        : qvsp_common.h                                              **
+**  Description : Video Screen Protection                                    **
+**                                                                           **
+*******************************************************************************
+*/
+
+#ifndef _QVSP_COMMON_H_
+#define _QVSP_COMMON_H_
+
+/*
+ * Default stream airtime cost in msec per sec to send or receive at 8 Mbps.
+ * Constants are binary for efficiency and do not need to be accurate.  They only need to
+ * scale so that stream cost roughly equates to used airtime, in order to estimate the
+ * affect of disabling or re-enabling a stream.
+ */
+#define BYTES_PER_KIB			(1024)		/* Kibibytes */
+#define BYTES_PER_MIB			(1024 * 1024)	/* Mebibytes */
+#define QVSP_STRM_COST_UNIT_MIB		(8)		/* arbitrary (optimised) cost unit */
+#define QVSP_STRM_COST_UNIT_BYTES	(QVSP_STRM_COST_UNIT_MIB * BYTES_PER_MIB)
+#define QVSP_NODE_COST_DFLT		(1000)
+
+struct qtn_per_tid_stats {
+	uint32_t tx_throt_pkts;
+	uint32_t tx_throt_bytes;
+	uint32_t tx_sent_pkts;
+	uint32_t tx_sent_bytes;
+};
+
+#endif
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/registers.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/registers.h
new file mode 100644
index 0000000..7f68c73
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/registers.h
@@ -0,0 +1,320 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2008 - 2010 Quantenna Communications Inc            **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Author      : Quantenna Communications Inc                               **
+**  File        : registers.h                                                **
+**  Description :                                                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef _QTN_REGISTERS_H
+#define _QTN_REGISTERS_H
+
+#include <asm/io.h>
+
+#if defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD)
+#include <qtn/registers_muc.h>
+#endif
+
+#if (CONFIG_ARC)
+# define CONFIG_RUBY 1
+#else
+# define CONFIG_ENVY 1
+#endif
+
+#define QTN_BIT(_i)				(1L << (_i))
+
+#define SYS_REG_BASE		0xE0000000
+#define SYS_RESET_VECTOR_MASK	(SYS_REG_BASE + 0x0)
+#define SYS_RESET_VECTOR	(SYS_REG_BASE + 0x4)
+#define SYS_CONTROL_MASK	(SYS_REG_BASE + 0x8)
+#define SYS_CONTROL_REG		(SYS_REG_BASE + 0xC)
+
+#define ULA_RESET_VECTOR_MASK 0xE0000000
+#define ULA_RESET_VECTOR 0xE0000004
+
+#define NDP_PKT_SRC_BBO (0xE60B0000)
+#define NDP_PKT_SRC_BB1 (0xE68B0000)
+
+#define DSP_MASTER_GPIO_ENABLE (1<<15)
+
+#define MUC_BASE_ADDR 0xE5000000
+#define MUC_OFFSET_CTRL_REG 0x42000
+
+#define MUC_CTRL_REG_SIZE 0xFF
+
+/* Interrupts */
+#define MUC_INT0  0
+#define MUC_INT1  1
+#define MUC_INT2  2
+#define MUC_INT3  3
+#define MUC_INT4  4
+#define MUC_INT5  5
+#define MUC_INT6  6
+#define MUC_INT7  7
+#define MUC_INT8  8
+#define MUC_INT9  9
+#define MUC_INT10 10
+#define MUC_INT11 11
+#define MUC_INT12 12
+#define MUC_INT13 13
+#define MUC_INT14 14
+
+/* UMS reset Masks */
+#define ARMSS_RESET 0x00000001   
+#define EBI_RESET   0x00000002
+#define DRAM_RESET  0x00000004   
+#define SRAM_RESET  0x00000008   
+#define DSPSS_RESET 0x00000010   
+#define DSP_RESET   0x00000020   
+#define MUC_RESET   0x00000040   
+#define NETSS_RESET 0x00000080   
+#define MMC_RESET   0x00000100   
+#define ENET_RESET  0x00000200   
+#define IOSS_RESET  0x00000400   
+#define PCI_RESET   0x00000800   
+#define SDIO_RESET  0x00001000   
+#define USB_RESET   0x00002000 
+#define BB_RESET    0x00004000  
+#define MB_RESET    0x00008000  
+#define ULA_RESET   0x00010000  
+
+struct muc_ctrl_reg
+{
+	volatile u32 int_mask;
+	volatile u32 int_status;
+	volatile u32 host_sem0;
+	volatile u32 host_sem1;
+	volatile u32 uc_sem0;
+	volatile u32 uc_sem1;
+	volatile u32 mac0_host_int_pri;
+	volatile u32 mac0_host_int_mask;
+	volatile u32 mac0_host_int_status;
+	volatile u32 mac0_host_int_gen;
+	volatile u32 mac1_host_int_pri; //Not in Ruby
+	volatile u32 mac1_host_int_mask; //Not in Ruby
+	volatile u32 mac1_host_int_status; //Not in Ruby
+	volatile u32 mac1_host_int_gen; //Not in Ruby
+	volatile u32 soft_rst;
+	volatile u32 lpbck_cntl; //Not in Ruby
+#if CONFIG_RUBY
+	volatile u32 global_tim0;
+	volatile u32 global_tim1;
+	volatile u32 global_csr;
+	volatile u32 mac_debug_sel;
+#endif
+} __attribute__ ((packed));
+
+struct ruby_sys_ctrl_reg
+{
+	volatile u32 reset_vector_mask; /* 0xE0000000 */
+	volatile u32 reset_vector;
+	volatile u32 system_control_mask;
+	volatile u32 system_control;
+	volatile u32 reset_cause;       /* 0xE0000010 */
+	volatile u32 csr;
+	volatile u32 debug_select;
+	volatile u32 l2m_int;
+	volatile u32 l2m_int_mask;      /* 0xE0000020 */
+	volatile u32 l2d_int;
+	volatile u32 l2d_int_mask;
+	volatile u32 m2l_int;
+	volatile u32 m2l_int_mask;      /* 0xE0000030 */
+	volatile u32 m2d_int;
+	volatile u32 m2d_int_mask;
+	volatile u32 d2l_int;
+	volatile u32 d2l_int_mask;      /* 0xE0000040 */
+	volatile u32 d2m_int;
+	volatile u32 d2m_int_mask;
+	volatile u32 lhost_int_en;
+	volatile u32 muc_int_en;        /* 0xE0000050 */
+	volatile u32 dsp_int_en;
+	volatile u32 lhost_int_or_en;
+	volatile u32 muc_int_or_en;
+	volatile u32 dsp_int_or_en;     /* 0xE0000060 */
+	volatile u32 muc_remap;
+	volatile u32 dsp_remap;
+	volatile u32 pcie_cfg_0;
+	volatile u32 pcie_cfg_1;        /* 0xE0000070 */
+	volatile u32 pcie_cfg_2;
+	volatile u32 pcie_cfg_3;
+	volatile u32 pcie_cfg_4;
+	volatile u32 pll0_ctrl;         /* 0xE0000080 */
+	volatile u32 pll1_ctrl;
+	volatile u32 proc_id;
+	volatile u32 pll2_ctrl;
+	volatile u32 reserved1;         /* 0xE0000090 */
+	volatile u32 l2m_sem;
+	volatile u32 m2l_sem;
+	volatile u32 l2d_sem;
+	volatile u32 d2l_sem;           /* 0xE00000A0 */
+	volatile u32 m2d_sem;
+	volatile u32 d2m_sem;
+	volatile u32 intr_inv0;
+	volatile u32 intr_inv1;         /* 0xE00000B0 */
+	volatile u32 gmii_clkdll;
+	volatile u32 debug_bus;
+	volatile u32 spare;
+	volatile u32 pcie_int_mask;     /* 0xE00000C0 */
+} __attribute__ ((packed));
+
+#define NETDEV_F_ALLOC		0x0001	/* dev allocated */
+#define	NETDEV_F_VALID		0x0002	/* dev is valid */
+#define	NETDEV_F_RUNNING	0x0004	/* dev is running */
+#define	NETDEV_F_PROMISC	0x0008	/* dev is in promiscuous mode */
+#define	NETDEV_F_ALLMULTI	0x0010
+#define	NETDEV_F_UP		0x0020	/* dev is up */
+#define	NETDEV_F_EXTERNAL	0x0040	/* dev is exposed to tcp/ip stack */
+
+#define SCAN_BASEBAND_RESET	0x0000
+#define SCAN_CHAN_CHANGE	0x0001
+
+extern void enable_host_irq(int devid, int irq);
+extern void disable_host_irq(int devid, int irq);
+
+#define BB_BASE_ADDR(_i)	(0xE6000000 + (_i * 0x800000))
+
+#define BB_SPI_BASE(_i)		(BB_BASE_ADDR(_i) + 0x40000)
+#define BB_RF_BASE(_i)		(BB_BASE_ADDR(_i) + 0x70000)
+#define BB_RDR_BASE(_i)		(BB_BASE_ADDR(_i) + 0x80000)
+
+/*
+ * Define RFIC version register.
+ * Expected to map across multiple versions of the RFIC
+ */
+
+#define RFIC_VERSION				(BB_SPI_BASE(0) + 0X0018)
+
+/*Define SPI registers */
+
+#define SPI_VCO_FREQ_CC_OUTPUT(_i)		(BB_SPI_BASE(_i) + 0X0004)
+#define SPI_LX_PLL_COUNTER_DATA_LSB(_i)		(BB_SPI_BASE(_i) + 0X0008)
+#define SPI_LX_PLL_COUNTER_DATA_MSB(_i)		(BB_SPI_BASE(_i) + 0X000C)
+#define SPI_RC_TUNING_DATA_OUT(_i)		(BB_SPI_BASE(_i) + 0X0010)
+#define SPI_CTRL_DATA_AGC_TST(_i)		(BB_SPI_BASE(_i) + 0X00C4)
+#define SPI_READ_AGC_RX1(_i)			(BB_SPI_BASE(_i) + 0X00C8)
+#define SPI_READ_AGC_RX2(_i)			(BB_SPI_BASE(_i) + 0X00CC)
+#define SPI_RX12_AGC_OUT(_i)			(BB_SPI_BASE(_i) + 0X00D0)
+#define SPI_ET_DAC_TX12(_i)			(BB_SPI_BASE(_i) + 0X00D4)
+#define SPI_RX1_DAC_I(_i)			(BB_SPI_BASE(_i) + 0X00D8)
+#define SPI_RX1_DAC_Q(_i)			(BB_SPI_BASE(_i) + 0X00DC)
+#define SPI_RX2_DAC_I(_i)			(BB_SPI_BASE(_i) + 0X00E0)
+#define SPI_RX2_DAC_Q(_i)			(BB_SPI_BASE(_i) + 0X00E4)
+#define SPI_PDN_RX_BUS_CTRL_BUS_1(_i)		(BB_SPI_BASE(_i) + 0X00E8)
+#define SPI_PDN_RX_BUS_CTRL_BUS_2(_i)		(BB_SPI_BASE(_i) + 0X00EC)
+#define SPI_PDN_RX_BUS_CTRL_BUS_3(_i)		(BB_SPI_BASE(_i) + 0x00F0)
+#define SPI_PDN_RX_BUS_CTRL_BUS_4(_i)		(BB_SPI_BASE(_i) + 0x00F4)
+#define SPI_MUX_IN(_i)				(BB_SPI_BASE(_i) + 0x00F8)
+#define SPI_MUX_OUT(_i)				(BB_SPI_BASE(_i) + 0x00FC)
+#define SPI_DAC_RX1TB1_I(_i)			(BB_SPI_BASE(_i) + 0x0100)
+#define SPI_DAC_RX1TB2_I(_i)			(BB_SPI_BASE(_i) + 0x0104)
+#define SPI_DAC_RX1TB3_I(_i)			(BB_SPI_BASE(_i) + 0x0108)
+#define SPI_DAC_RX1TB4_I(_i)			(BB_SPI_BASE(_i) + 0x010C)
+#define SPI_DAC_RX1TB1_Q(_i)			(BB_SPI_BASE(_i) + 0x0110)
+#define SPI_DAC_RX1TB2_Q(_i)			(BB_SPI_BASE(_i) + 0x0114)
+#define SPI_DAC_RX1TB3_Q(_i)			(BB_SPI_BASE(_i) + 0x0118)
+#define SPI_DAC_RX1TB4_Q(_i)			(BB_SPI_BASE(_i) + 0x011C)
+#define SPI_DAC_RX2TB1_I(_i)			(BB_SPI_BASE(_i) + 0x0120)
+#define SPI_DAC_RX2TB2_I(_i)			(BB_SPI_BASE(_i) + 0x0124)
+#define SPI_DAC_RX2TB3_I(_i)			(BB_SPI_BASE(_i) + 0x0128)
+#define SPI_DAC_RX2TB4_I(_i)			(BB_SPI_BASE(_i) + 0x012C)
+#define SPI_DAC_RX2TB1_Q(_i)			(BB_SPI_BASE(_i) + 0x0130)
+#define SPI_DAC_RX2TB2_Q(_i)			(BB_SPI_BASE(_i) + 0x0134)
+#define SPI_DAC_RX2TB3_Q(_i)			(BB_SPI_BASE(_i) + 0x0138)
+#define SPI_DAC_RX2TB4_Q(_i)			(BB_SPI_BASE(_i) + 0x013C)
+#define SPI_RX_PDN_11_0(_i)			(BB_SPI_BASE(_i) + 0x0140)
+#define SPI_RX_PDN_23_12(_i)			(BB_SPI_BASE(_i) + 0x0144)
+#define SPI_TX1_PDN_11_0(_i)			(BB_SPI_BASE(_i) + 0x0148)
+#define SPI_TX1_PDN_23_12(_i)			(BB_SPI_BASE(_i) + 0x014C)
+#define SPI_TX2_PDN_11_0(_i)			(BB_SPI_BASE(_i) + 0X0150)
+#define SPI_TX2_PDN_23_12(_i)			(BB_SPI_BASE(_i) + 0X0154)
+#define SPI_ALX_PDN_11_0(_i)			(BB_SPI_BASE(_i) + 0X0158)
+#define SPI_ALX_PDN_23_12(_i)			(BB_SPI_BASE(_i) + 0X015C)
+#define SPI_PDN_VAL_23_0(_i)			(BB_SPI_BASE(_i) + 0X0160)
+#define SPI_RX_CAL_OVRD_VAL(_i)			(BB_SPI_BASE(_i) + 0X0164)
+#define SPI_AGC_CTRL_DAC_RC_TST(_i)		(BB_SPI_BASE(_i) + 0X0168)
+#define SPI_PLL_FRACTIONAL(_i)			(BB_SPI_BASE(_i) + 0X016C)
+#define SPI_PLL_N_CH(_i)			(BB_SPI_BASE(_i) + 0X0170)
+#define SPI_CP_D_U_VCO(_i)			(BB_SPI_BASE(_i) + 0X0174)
+#define SPI_VCO_CLK_PFD_LPF(_i)			(BB_SPI_BASE(_i) + 0X0178)
+#define SPI_BG_BIAS_EXT_PTAT(_i)		(BB_SPI_BASE(_i) + 0X017C)
+#define SPI_CURR_OPT_DAC_TX_RX_SSB(_i)		(BB_SPI_BASE(_i) + 0X0180)
+#define SPI_RXMX_BBMX_VBIAS_ADJ(_i)		(BB_SPI_BASE(_i) + 0X0184)
+#define SPI_TX_SSB_DAC(_i)			(BB_SPI_BASE(_i) + 0X0188)
+#define SPI_BYP_PWR_OFS_PROBE_IQ(_i)		(BB_SPI_BASE(_i) + 0X018C)
+#define SPI_LNA12_GAIN_FREQ_CURR_LOOPBK(_i)	(BB_SPI_BASE(_i) + 0X0190)
+#define SPI_ICT_WCT_TX1_PA(_i)			(BB_SPI_BASE(_i) + 0X0194)
+#define SPI_ICT_WCT_TX2_PA(_i)			(BB_SPI_BASE(_i) + 0X0198)
+#define SPI_GCT_FCT_TX1_PA(_i)			(BB_SPI_BASE(_i) + 0X019C)
+#define SPI_WCT_TX12_PA(_i)			(BB_SPI_BASE(_i) + 0X01A0)
+#define SPI_MOD_ICT_ABS_VRRP(_i)		(BB_SPI_BASE(_i) + 0X01A4)
+#define SPI_MOD_CCT_RCT(_i)			(BB_SPI_BASE(_i) + 0X01A8)
+#define SPI_MOD_LO_GCT_TX_1MA(_i)		(BB_SPI_BASE(_i) + 0X01AC)
+#define SPI_MODDAC_1M_IQ(_i)			(BB_SPI_BASE(_i) + 0X01B0)
+#define SPI_MODDAC_1A_IQ(_i)			(BB_SPI_BASE(_i) + 0X01B4)
+#define SPI_MOD_LO_GCT_TX_2MA(_i)		(BB_SPI_BASE(_i) + 0X01B8)
+#define SPI_MODDAC_2M_IQ(_i)			(BB_SPI_BASE(_i) + 0X01BC)
+#define SPI_MODDAC_2A_IQ(_i)			(BB_SPI_BASE(_i) + 0X01C0)
+#define SPI_RX12_MUX_DATA_CNTL(_i)		(BB_SPI_BASE(_i) + 0X01C4)
+
+// timer register definition
+#define ARM_TIMER_BASE_ADDR				(0XF3000000)
+#define ARM_TIMER_MEM_LEN					(0x30)
+#define ARM_TIMER_PRESCALE_EN				(0x0000)
+#define ARM_TIMER_PRESCALE_0				(0x0004)
+#define ARM_TIMER_PRESCALE_1				(0x0008)
+#define ARM_TIMER_CTL(_i)					(0x000c + (_i)*8)
+#define ARM_TIMER_ENABLE					(QTN_BIT(0))
+#define ARM_TIMER_PERIODIC					(QTN_BIT(1))
+#define ARM_TIMER_PRESCALER_1				(QTN_BIT(2))
+
+#define ARM_TIMER_CNT(_i)					(0x0010 + (_i)*8)
+
+#if CONFIG_RUBY
+#define  QT3_BB_GLBL_SOFT_RST		0xE6000008
+#define  QT3_BB_TD_PA_CONF		0xE609048C
+
+/* The following definitions used to install the steering vectors */
+#define QT3_RF_NUM_CHAINS		4
+#define QT3_BB_Q_MATRIX_MEM		0xE6100000
+#define QT3_BB_Q_MATRIX_SIZE		0x8000
+#endif
+
+#endif /* _QTN_REGISTERS_H */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/ruby_cpumon.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/ruby_cpumon.h
new file mode 100644
index 0000000..07f6fdb
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/ruby_cpumon.h
@@ -0,0 +1,49 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2012 Quantenna Communications, Inc.                 **
+**                            All Rights Reserved                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH0*/
+
+#ifndef __QTN_CPUMON_H
+#define __QTN_CPUMON_H
+
+void ruby_cpumon_get_cycles(uint64_t *sleep, uint64_t *awake);
+
+#endif	/* __QTN_CPUMON_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/semaphores.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/semaphores.h
new file mode 100644
index 0000000..0f69b29
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/semaphores.h
@@ -0,0 +1,58 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2011 Quantenna Communications Inc                   **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Author      : Quantenna Communications Inc                               **
+**  File        : semaphores.h                                               **
+**  Description :                                                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef _QTN_SEMAPHORES_H_
+#define _QTN_SEMAPHORES_H_
+
+
+#define QTN_SEM_HOST_LINK_SEMNUM	0
+#define QTN_SEM_NUMFREESEM		31
+#define QTN_SEM_BB_MUTEX_SEMNUM		31
+
+
+#endif // #ifndef _QTN_SEMAPHORES_H_
+
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/shared_defs.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/shared_defs.h
new file mode 100644
index 0000000..27a2657
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/shared_defs.h
@@ -0,0 +1,735 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2010 Quantenna Communications Inc                   **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Author      : Quantenna Communications Inc                               **
+**  File        : shared_params.h                                            **
+**  Description :                                                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef _SHARED_DEFS_H_
+#define _SHARED_DEFS_H_
+
+#include "shared_defs_common.h"
+
+#define QTN_SWITCH_CHANNEL_TIME_AVG	3750	/* microseconds */
+
+#define IEEE80211_MAX_NAV	32767
+
+/* SCS (ACI/CCI Detection and Mitigation) APIs */
+enum qtn_vap_scs_cmds {
+	IEEE80211_SCS_SET_ENABLE = 1,
+	IEEE80211_SCS_SET_DEBUG_ENABLE,
+	IEEE80211_SCS_SET_SAMPLE_ENABLE,
+	IEEE80211_SCS_SET_SAMPLE_DWELL_TIME,
+	IEEE80211_SCS_SET_SAMPLE_INTERVAL,
+	IEEE80211_SCS_SET_THRSHLD_SMPL_PKTNUM,
+	IEEE80211_SCS_SET_THRSHLD_PRI_CCA,
+	IEEE80211_SCS_SET_THRSHLD_SEC_CCA,
+	IEEE80211_SCS_SET_THRSHLD_SMPL_AIRTIME,
+	IEEE80211_SCS_SET_WF_CCA,
+	IEEE80211_SCS_SET_WF_RSSI,
+	IEEE80211_SCS_SET_WF_CRC_ERR,
+	IEEE80211_SCS_SET_WF_LPRE,
+	IEEE80211_SCS_SET_WF_SPRE,
+	IEEE80211_SCS_SET_WF_RETRIES,
+	IEEE80211_SCS_SET_WF_DFS,
+	IEEE80211_SCS_SET_WF_MAX_TX_PWR,
+	IEEE80211_SCS_SET_REPORT_ONLY,
+	IEEE80211_SCS_SET_CCA_INTF_RATIO,
+	IEEE80211_SCS_SET_CCA_IDLE_THRSHLD,
+	IEEE80211_SCS_SET_CCA_INTF_LO_THR,
+	IEEE80211_SCS_SET_CCA_INTF_HI_THR,
+	IEEE80211_SCS_SET_CCA_SMPL_DUR,
+	IEEE80211_SCS_GET_REPORT,
+	IEEE80211_SCS_GET_INTERNAL_STATS,
+	IEEE80211_SCS_SET_CCA_INTF_SMTH_FCTR,
+	IEEE80211_SCS_RESET_RANKING_TABLE,
+	IEEE80211_SCS_SET_CHAN_MTRC_MRGN,
+	IEEE80211_SCS_SET_RSSI_SMTH_FCTR,
+	IEEE80211_SCS_SET_ATTEN_ADJUST,
+	IEEE80211_SCS_SET_ATTEN_SWITCH_ENABLE,
+	IEEE80211_SCS_SET_THRSHLD_ATTEN_INC,
+	IEEE80211_SCS_SET_THRSHLD_DFS_REENTRY,
+	IEEE80211_SCS_SET_THRSHLD_DFS_REENTRY_MINRATE,
+	IEEE80211_SCS_SET_PMBL_ERR_SMTH_FCTR,
+	IEEE80211_SCS_SET_PMBL_ERR_RANGE,
+	IEEE80211_SCS_SET_PMBL_ERR_MAPPED_INTF_RANGE,
+	IEEE80211_SCS_SET_THRSHLD_LOAD,
+	IEEE80211_SCS_SET_PMBL_ERR_WF,
+	IEEE80211_SCS_SET_THRSHLD_AGING_NOR,
+	IEEE80211_SCS_SET_THRSHLD_AGING_DFSREENT,
+	IEEE80211_SCS_SET_THRSHLD_DFS_REENTRY_INTF,
+	IEEE80211_SCS_SET_PMP_RPT_CCA_SMTH_FCTR,
+	IEEE80211_SCS_SET_PMP_RX_TIME_SMTH_FCTR,
+	IEEE80211_SCS_SET_PMP_TX_TIME_SMTH_FCTR,
+	IEEE80211_SCS_SET_PMP_STATS_STABLE_PERCENT,
+	IEEE80211_SCS_SET_PMP_STATS_STABLE_RANGE,
+	IEEE80211_SCS_SET_PMP_STATS_CLEAR_INTERVAL,
+	IEEE80211_SCS_SET_PMP_TXTIME_COMPENSATION,
+	IEEE80211_SCS_SET_PMP_RXTIME_COMPENSATION,
+	IEEE80211_SCS_SET_PMP_TDLSTIME_COMPENSATION,
+	IEEE80211_SCS_SET_SWITCH_CHANNEL_MANUALLY,
+	IEEE80211_SCS_SET_AS_RX_TIME_SMTH_FCTR,
+	IEEE80211_SCS_SET_AS_TX_TIME_SMTH_FCTR,
+	IEEE80211_SCS_SET_STATS_START,
+	IEEE80211_SCS_SET_CCA_IDLE_SMTH_FCTR,
+	IEEE80211_SCS_SET_PMBL_ERR_THRSHLD,
+	IEEE80211_SCS_SET_CCA_INTF_DFS_MARGIN,
+	IEEE80211_SCS_SET_LEAVE_DFS_CHAN_MTRC_MRGN,
+	IEEE80211_SCS_SET_CCA_THRESHOLD_TYPE,
+	IEEE80211_SCS_SET_SAMPLE_TYPE,
+	IEEE80211_SCS_SET_BURST_ENABLE,
+	IEEE80211_SCS_SET_BURST_WINDOW,
+	IEEE80211_SCS_SET_BURST_THRESH,
+	IEEE80211_SCS_SET_BURST_PAUSE,
+	IEEE80211_SCS_SET_BURST_SWITCH,
+	IEEE80211_SCS_SET_MAX
+};
+
+#define IEEE80211_SCS_STATE_INIT			0
+#define IEEE80211_SCS_STATE_RESET			1
+#define IEEE80211_SCS_STATE_CHANNEL_SWITCHING		2
+#define IEEE80211_SCS_STATE_MEASUREMENT_CHANGE_CLEAN	3    /* param change */
+#define IEEE80211_SCS_STATE_PERIOD_CLEAN		4
+
+#define IEEE80211_SCS_COMPARE_INIT_TIMER	5
+#define IEEE80211_SCS_COMPARE_TIMER_INTVAL	2
+#define IEEE80211_CCA_SAMPLE_DUR		IEEE80211_SCS_COMPARE_TIMER_INTVAL /* seconds */
+#define IEEE80211_SCS_CHAN_CURRENT		0
+#define IEEE80211_SCS_CHAN_ALL			0xFF
+#define IEEE80211_SCS_THRSHLD_MAX		100	/* metric */
+#define IEEE80211_SCS_THRSHLD_MIN		1	/* metric */
+#define IEEE80211_SCS_SMPL_DWELL_TIME_MAX	24	/* milliseconds, limited by max NAV reservation */
+#define IEEE80211_SCS_SMPL_DWELL_TIME_MIN	5	/* milliseconds */
+#define IEEE80211_SCS_SMPL_DWELL_TIME_DEFAULT	20	/* milliseconds */
+#define IEEE80211_SCS_SMPL_INTV_MAX		3600	/* seconds */
+#define IEEE80211_SCS_SMPL_INTV_MIN		1	/* seconds */
+#define IEEE80211_SCS_SMPL_INTV_DEFAULT		5	/* seconds */
+#define IEEE80211_SCS_THRSHLD_SMPL_PKTNUM_DEFAULT	16	/* packet number */
+#define IEEE80211_SCS_THRSHLD_SMPL_PKTNUM_MAX	1000	/* packet number */
+#define IEEE80211_SCS_THRSHLD_SMPL_PKTNUM_MIN	1	/* packet number */
+#define IEEE80211_SCS_THRSHLD_SMPL_AIRTIME_DEFAULT	200	/* ms */
+#define IEEE80211_SCS_THRSHLD_SMPL_AIRTIME_MAX	1000	/* ms */
+#define IEEE80211_SCS_THRSHLD_SMPL_AIRTIME_MIN	1	/* ms */
+#define IEEE80211_SCS_THRSHLD_PMBL_ERR_MAX	10000	/* count */
+#define IEEE80211_SCS_THRSHLD_PMBL_ERR_MIN	1	/* count */
+
+/*
+ * Packet rate threshold is determined by how many packets we can hold in buffer without drop
+ * during off-channel period. It is limited by:
+ * - sw queue length of each node/tid
+ * - global resource shared by all node/tid, such as tqew descriptors and msdu headers.
+ * Current value doesn't apply to the scenario when tqew descriptors are already used up by large
+ * number of stations.
+ */
+#define IEEE80211_SCS_THRSHLD_SMPL_TX_PKTRATE	(1024 - 128)	/* margin = 128 + hw ring size */
+#define IEEE80211_SCS_THRSHLD_SMPL_RX_PKTRATE	IEEE80211_SCS_THRSHLD_SMPL_TX_PKTRATE /* assume qtn peer */
+#define IEEE80211_SCS_THRSHLD_ATTEN_INC_DFT	5	/* db */
+#define IEEE80211_SCS_THRSHLD_ATTEN_INC_MIN     0       /* db */
+#define IEEE80211_SCS_THRSHLD_ATTEN_INC_MAX     20      /* db */
+#define IEEE80211_SCS_THRSHLD_DFS_REENTRY_DFT	60	/* seconds */
+#define IEEE80211_SCS_THRSHLD_DFS_REENTRY_MIN   0       /* seconds */
+#define IEEE80211_SCS_THRSHLD_DFS_REENTRY_MAX   0xffff  /* seconds */
+#define IEEE80211_SCS_THRSHLD_DFS_REENTRY_INTF_MIN   0
+#define IEEE80211_SCS_THRSHLD_DFS_REENTRY_INTF_MAX   100
+#define IEEE80211_SCS_THRSHLD_DFS_REENTRY_INTF_DFT   40
+#define IEEE80211_SCS_THRSHLD_DFS_REENTRY_MINRATE_UNIT	100	/* kbps */
+#define IEEE80211_SCS_THRSHLD_DFS_REENTRY_MINRATE_DFT	5	/* unit: 100kbps */
+#define IEEE80211_SCS_THRSHLD_DFS_REENTRY_MINRATE_MIN   0       /* unit: 100kbps */
+#define IEEE80211_SCS_THRSHLD_DFS_REENTRY_MINRATE_MAX   0xffff  /* unit: 100kbps */
+#define IEEE80211_SCS_THRSHLD_AGING_MIN         0
+#define IEEE80211_SCS_THRSHLD_AGING_MAX         0xFFFF
+#define IEEE80211_SCS_THRSHLD_AGING_NOR_DFT     (60 * 6)
+#define IEEE80211_SCS_THRSHLD_AGING_DFSREENT_DFT  5
+#define IEEE80211_SCS_CCA_DUR_MAX		10	/* seconds */
+#define IEEE80211_SCS_CCA_DUR_MIN		2	/* seconds */
+#define IEEE80211_SCS_CCA_INTF_SCALE		1000	/* milliseconds */
+#define IEEE80211_SCS_SENDING_QOSNULL_TIME_AVG	1000	/* microseconds */
+#define IEEE80211_SCS_SMPL_TIME_MARGIN		2000	/* microseconds */
+#define IEEE80211_SCS_SMPL_TIME_OFFSET_SEND_QOSNULL	5000	/* microseconds */
+#define IEEE80211_SCS_SMPL_TIME_SENDING_ALL_BEACONS	25000	/* microseconds, the time duration for transmitting all beacons */
+#define IEEE80211_CCA_INTF_SMTH_FCTR_NOXP_DFT	75
+#define IEEE80211_CCA_INTF_SMTH_FCTR_XPED_DFT	90
+#define IEEE80211_CCA_INTF_SMTH_FCTR_MIN	0
+#define IEEE80211_CCA_INTF_SMTH_FCTR_MAX	100
+#define IEEE80211_SCS_CHAN_MTRC_MRGN_MAX	100
+#define IEEE80211_SCS_CHAN_MTRC_MRGN_DFT	15
+#define IEEE80211_SCS_LEAVE_DFS_CHAN_MTRC_MRGN_DFT	25
+#define IEEE80211_SCS_RSSI_SMTH_FCTR_UP_DFT	75
+#define IEEE80211_SCS_RSSI_SMTH_FCTR_DOWN_DFT	25
+#define IEEE80211_SCS_RSSI_SMTH_FCTR_MAX	100
+#define IEEE80211_SCS_ATTEN_ADJUST_MIN		-20
+#define IEEE80211_SCS_ATTEN_ADJUST_MAX		20
+#define IEEE80211_SCS_ATTEN_ADJUST_DFT		5
+#define IEEE80211_SCS_BRCM_RXGLITCH_THRSHLD_SCALE_DFT    40
+#define IEEE80211_SCS_PMBL_ERR_SMTH_FCTR_MIN    0
+#define IEEE80211_SCS_PMBL_ERR_SMTH_FCTR_MAX    100
+#define IEEE80211_SCS_PMBL_ERR_SMTH_FCTR_DFT    66
+#define IEEE80211_SCS_CCA_IDLE_SMTH_FCTR_MIN    0
+#define IEEE80211_SCS_CCA_IDLE_SMTH_FCTR_MAX    100
+#define IEEE80211_SCS_CCA_IDLE_SMTH_FCTR_DFT    50
+#define IEEE80211_SCS_PMP_RPT_CCA_SMTH_FCTR_MAX    100
+#define IEEE80211_SCS_PMP_RPT_CCA_SMTH_FCTR_DFT    66
+#define IEEE80211_SCS_PMP_RX_TIME_SMTH_FCTR_MAX    100
+#define IEEE80211_SCS_PMP_RX_TIME_SMTH_FCTR_DFT    66
+#define IEEE80211_SCS_PMP_TX_TIME_SMTH_FCTR_MAX    100
+#define IEEE80211_SCS_PMP_TX_TIME_SMTH_FCTR_DFT    66
+#define IEEE80211_SCS_PMP_STATS_STABLE_PERCENT_MAX  100
+#define IEEE80211_SCS_PMP_STATS_STABLE_PERCENT_DFT  30
+#define IEEE80211_SCS_PMP_STATS_STABLE_RANGE_MAX    1000
+#define IEEE80211_SCS_PMP_STATS_STABLE_RANGE_DFT    50
+#define IEEE80211_SCS_PMP_STATS_CLEAR_INTERVAL_MAX  3600 /* seconds */
+#define IEEE80211_SCS_PMP_STATS_CLEAR_INTERVAL_DFT  60 /* seconds */
+#define IEEE80211_SCS_AS_RX_TIME_SMTH_FCTR_MAX    100
+#define IEEE80211_SCS_AS_RX_TIME_SMTH_FCTR_DFT    50
+#define IEEE80211_SCS_AS_TX_TIME_SMTH_FCTR_MAX    100
+#define IEEE80211_SCS_AS_TX_TIME_SMTH_FCTR_DFT    50
+
+#define IEEE80211_SCS_SMTH_RBS_TIME			80
+
+#define IEEE80211_SCS_PMBL_ERR_RANGE_MIN        1000
+#define IEEE80211_SCS_PMBL_ERR_RANGE_MAX        0xFFFF
+#define IEEE80211_SCS_PMBL_ERR_RANGE_DFT        5000
+#define IEEE80211_SCS_PMBL_ERR_MAPPED_INTF_RANGE_MIN  0
+#define IEEE80211_SCS_PMBL_ERR_MAPPED_INTF_RANGE_MAX  100
+#define IEEE80211_SCS_PMBL_ERR_MAPPED_INTF_RANGE_DFT  40
+#define IEEE80211_SCS_PMBL_ERR_WF_MIN           0
+#define IEEE80211_SCS_PMBL_ERR_WF_MAX           100
+#define IEEE80211_SCS_PMBL_SHORT_WF_DFT         0
+#define IEEE80211_SCS_PMBL_LONG_WF_DFT          100
+#define IEEE80211_SCS_THRSHLD_LOADED_MIN        0
+#define IEEE80211_SCS_THRSHLD_LOADED_MAX        1000
+#define IEEE80211_SCS_THRSHLD_LOADED_DFT        20
+
+#define IEEE80211_SCS_CHAN_POWER_CUTPOINT       15
+#define IEEE80211_SCS_NORMALIZE(_v, _duration)       (((_v) < (0xFFFFFFFF / IEEE80211_SCS_CCA_INTF_SCALE)) ?  \
+							((_v) * IEEE80211_SCS_CCA_INTF_SCALE / (_duration)) : \
+							((_v) / (_duration) * IEEE80211_SCS_CCA_INTF_SCALE))
+
+#define IEEE80211_SCS_SMOOTH(_old, _new, _fctr)	(((_old) * (_fctr) + (_new) * (100 - (_fctr))) / 100)
+
+#define IEEE80211_SCS_OFFCHAN_WHOLE_DUR(_dwell_us)	((_dwell_us) +					\
+							(2 * QTN_SWITCH_CHANNEL_TIME_AVG) +		\
+							IEEE80211_SCS_SENDING_QOSNULL_TIME_AVG +	\
+							IEEE80211_SCS_SMPL_TIME_MARGIN)
+
+#define IEEE80211_SCS_VALUE_S			0
+#define IEEE80211_SCS_VALUE_M			0xffff
+#define IEEE80211_SCS_WF_VALUE_M		0xff
+#define IEEE80211_SCS_COMMAND_S			16
+#define IEEE80211_SCS_COMMAND_M			0xffff
+
+#define IEEE80211_SCS_NA_CC			0x0
+#define IEEE80211_SCS_STA_CCA_REQ_CC		0x1
+#define IEEE80211_SCS_SELF_CCA_CC               0x2
+#define IEEE80211_SCS_ATTEN_INC_CC		0x4
+#define IEEE80211_SCS_BRCM_STA_TRIGGER_CC	0x8
+#define IEEE80211_SCS_CCA_INTF_CC               (IEEE80211_SCS_STA_CCA_REQ_CC | IEEE80211_SCS_SELF_CCA_CC)
+#define IEEE80211_SCS_INTF_CC                   (IEEE80211_SCS_CCA_INTF_CC | IEEE80211_SCS_BRCM_STA_TRIGGER_CC)
+
+#define IEEE80211_SCS_BURST_ENABLE_MIN		0
+#define IEEE80211_SCS_BURST_ENABLE_MAX		1
+#define	IEEE80211_SCS_BURST_ENABLE_DEFAULT	0
+#define IEEE80211_SCS_BURST_WINDOW_MIN		(1)	/* minutes */
+#define IEEE80211_SCS_BURST_WINDOW_MAX		(300)	/* minutes */
+#define	IEEE80211_SCS_BURST_WINDOW_DEFAULT	(180)	/* minutes */
+#define IEEE80211_SCS_BURST_THRESH_MIN		2
+#define IEEE80211_SCS_BURST_THRESH_MAX		100
+#define	IEEE80211_SCS_BURST_THRESH_DEFAULT	3
+#define IEEE80211_SCS_BURST_PAUSE_MIN		(30)	/* minutes */
+#define IEEE80211_SCS_BURST_PAUSE_MAX		(600)	/* minutes */
+#define	IEEE80211_SCS_BURST_PAUSE_DEFAULT	(60)	/* minutes */
+#define IEEE80211_SCS_BURST_SWITCH_MIN		0
+#define IEEE80211_SCS_BURST_SWITCH_MAX		1
+#define	IEEE80211_SCS_BURST_SWITCH_DEFAULT	0
+
+#define IEEE80211_SCS_UNSTABLE_INTF			0x00000001
+#define IEEE80211_SCS_UNSTABLE_INTF_OUTDATED		0x00000002
+#define IEEE80211_SCS_UNSTABLE_INTF_INVALID		0x00000004
+#define IEEE80211_SCS_UNSTABLE_IDLE			0x00000008
+#define IEEE80211_SCS_UNSTABLE_IDLE_OUTDATED		0x00000010
+#define IEEE80211_SCS_UNSTABLE_OTHERSTIME		0x00000020
+#define IEEE80211_SCS_UNSTABLE_OTHERSTIME_OUTDATED	0x00000040
+#define IEEE80211_SCS_UNSTABLE_TDLS_TX			0x00000080
+#define IEEE80211_SCS_UNSTABLE_TDLS_RX			0x00000100
+#define IEEE80211_SCS_UNSTABLE_TDLS_OUTDATED		0x00000200
+
+#define	IEEE80211_REMAIN_CHAN_MAX_RSV_PERD	4
+#define	IEEE80211_REMAIN_CHAN_MIN_RSV_PERD	3
+
+enum ieee80211_scs_update_mode {
+	IEEE80211_SCS_OFFCHAN,		/* off-channel, use smoothing and omit current channel */
+	IEEE80211_SCS_COCHAN,		/* co-channel mode */
+	IEEE80211_SCS_INIT_SCAN,	/* like off-channel but include current channel */
+};
+
+#define SCSLOG_CRIT                             0
+#define SCSLOG_NOTICE                           1
+#define SCSLOG_INFO                             2
+#define SCSLOG_VERBOSE                          3
+#define SCSLOG_LEVEL_MAX                        3
+#if !defined(MUC_BUILD) && !defined(DSP_BUILD) && !defined(AUC_BUILD)
+#define SCSDBG(_level, _fmt, ...)            do {               \
+		if (ic->ic_scs.scs_debug_enable >= (_level)) {  \
+			DBGFN("SCS: " _fmt, ##__VA_ARGS__);     \
+		}                                               \
+	} while (0)
+#endif
+
+
+/* OCAC (Off-channel CAC) APIs */
+enum qtn_ocac_cmds {
+	IEEE80211_OCAC_SET_ENABLE = 1,
+	IEEE80211_OCAC_SET_DISABLE,
+	IEEE80211_OCAC_SET_DEBUG_LEVEL,
+	IEEE80211_OCAC_SET_DWELL_TIME,
+	IEEE80211_OCAC_SET_DURATION,
+	IEEE80211_OCAC_SET_THRESHOLD_FAT,
+	IEEE80211_OCAC_SET_DUMP_COUNTS,
+	IEEE80211_OCAC_SET_CAC_TIME,
+	IEEE80211_OCAC_SET_THRESHOLD_TRAFFIC,
+	IEEE80211_OCAC_SET_TIMER_INTERVAL,
+	IEEE80211_OCAC_SET_DUMP_TSFLOG,
+	IEEE80211_OCAC_SET_DUMP_CFG,
+	IEEE80211_OCAC_SET_TRAFFIC_CONTROL,
+	IEEE80211_OCAC_SET_THRESHOLD_CCA_INTF,
+	IEEE80211_OCAC_SET_REPORT_ONLY,
+	IEEE80211_OCAC_SET_DUMP_CCA_COUNTS,
+	IEEE80211_OCAC_SET_OFFSET_TXHALT,
+	IEEE80211_OCAC_SET_OFFSET_OFFCHAN,
+	IEEE80211_OCAC_SET_THRESHOLD_FAT_DEC,
+	IEEE80211_OCAC_SET_TIMER_EXPIRE_INIT,
+	IEEE80211_OCAC_SET_SECURE_DWELL_TIME,
+	IEEE80211_OCAC_SET_BEACON_INTERVAL,
+	IEEE80211_OCAC_SET_WEATHER_DURATION,
+	IEEE80211_OCAC_SET_WEATHER_CAC_TIME,
+	IEEE80211_OCAC_SET_WEATHER_DWELL_TIME,
+	IEEE80211_OCAC_SET_ENABLE_AUTO_DFS,
+	IEEE80211_OCAC_SET_MAX
+};
+
+enum qtn_ocac_get_cmds {
+	IEEE80211_OCAC_GET_STATUS = 1,
+	IEEE80211_OCAC_GET_AVAILABILITY,
+};
+
+#define IEEE80211_OCAC_CLEAN_STATS_STOP		0
+#define IEEE80211_OCAC_CLEAN_STATS_START	1
+#define IEEE80211_OCAC_CLEAN_STATS_RESET	2
+
+
+#define IEEE80211_OCAC_DWELL_TIME_MIN		5	/* milliseconds */
+#define IEEE80211_OCAC_DWELL_TIME_MAX		200	/* milliseconds */
+#define IEEE80211_OCAC_DWELL_TIME_DEFAULT	40	/* milliseconds */
+#define IEEE80211_OCAC_WEA_DWELL_TIME_DEFAULT	46	/* milliseconds */
+
+#define IEEE80211_OCAC_SECURE_DWELL_TIME_MIN		5	/* milliseconds */
+#define IEEE80211_OCAC_SECURE_DWELL_TIME_MAX		23	/* milliseconds */
+#define IEEE80211_OCAC_SECURE_DWELL_TIME_DEFAULT	23	/* milliseconds */
+
+#define IEEE80211_OCAC_DURATION_MIN		1	/* seconds */
+#define IEEE80211_OCAC_DURATION_MAX		64800	/* seconds */
+#define IEEE80211_OCAC_DURATION_DEFAULT		720	/* seconds */
+
+#define IEEE80211_OCAC_CAC_TIME_MIN		1	/* seconds */
+#define IEEE80211_OCAC_CAC_TIME_MAX		64800	/* seconds */
+#define IEEE80211_OCAC_CAC_TIME_DEFAULT		240	/* seconds */
+
+#define IEEE80211_OCAC_WEA_DURATION_MIN		60	/* seconds */
+#define IEEE80211_OCAC_WEA_DURATION_MAX		86400	/* seconds */
+#define IEEE80211_OCAC_WEA_DURATION_DEFAULT	11520	/* seconds */
+
+#define IEEE80211_OCAC_WEA_CAC_TIME_MIN		1	/* seconds */
+#define IEEE80211_OCAC_WEA_CAC_TIME_MAX		86400	/* seconds */
+#define IEEE80211_OCAC_WEA_CAC_TIME_DEFAULT	4329	/* seconds */
+
+#define IEEE80211_OCAC_THRESHOLD_FAT_MIN	1	/* percent */
+#define IEEE80211_OCAC_THRESHOLD_FAT_MAX	100	/* percent */
+#define IEEE80211_OCAC_THRESHOLD_FAT_DEFAULT	90	/* percent */
+
+#define IEEE80211_OCAC_THRESHOLD_TRAFFIC_MIN		1	/* percent */
+#define IEEE80211_OCAC_THRESHOLD_TRAFFIC_MAX		100	/* percent */
+#define IEEE80211_OCAC_THRESHOLD_TRAFFIC_DEFAULT	30	/* percent */
+
+#define IEEE80211_OCAC_OFFSET_TXHALT_MIN		2	/* milliseconds */
+#define IEEE80211_OCAC_OFFSET_TXHALT_MAX		80	/* milliseconds */
+#define IEEE80211_OCAC_OFFSET_TXHALT_DEFAULT		10	/* milliseconds */
+
+#define IEEE80211_OCAC_OFFSET_OFFCHAN_MIN		2	/* milliseconds */
+#define IEEE80211_OCAC_OFFSET_OFFCHAN_MAX		80	/* milliseconds */
+#define IEEE80211_OCAC_OFFSET_OFFCHAN_DEFAULT		5	/* milliseconds */
+
+#define IEEE80211_OCAC_TRAFFIC_CTRL_DEFAULT		1	/* on */
+
+#define IEEE80211_OCAC_THRESHOLD_CCA_INTF_MIN		1	/* percent */
+#define IEEE80211_OCAC_THRESHOLD_CCA_INTF_MAX		100	/* percent */
+#define IEEE80211_OCAC_THRESHOLD_CCA_INTF_DEFAULT	20	/* percent */
+
+#define IEEE80211_OCAC_THRESHOLD_FAT_DEC_MIN		1	/* percent */
+#define IEEE80211_OCAC_THRESHOLD_FAT_DEC_MAX		100	/* percent */
+#define IEEE80211_OCAC_THRESHOLD_FAT_DEC_DEFAULT	10	/* percent */
+
+#define IEEE80211_OCAC_TIMER_INTERVAL_MIN		1	/* seconds */
+#define IEEE80211_OCAC_TIMER_INTERVAL_MAX		100	/* seconds */
+#define IEEE80211_OCAC_TIMER_INTERVAL_DEFAULT		2	/* seconds */
+
+#define IEEE80211_OCAC_BEACON_INTERVAL_MIN		100	/* TUs */
+#define IEEE80211_OCAC_BEACON_INTERVAL_MAX		1000	/* TUs */
+#define IEEE80211_OCAC_BEACON_INTERVAL_DEFAULT		100	/* TUs */
+
+#define IEEE80211_OCAC_TIMER_EXPIRE_INIT_MIN		1	/* seconds */
+#define IEEE80211_OCAC_TIMER_EXPIRE_INIT_MAX		65000	/* seconds */
+#define IEEE80211_OCAC_TIMER_EXPIRE_INIT_DEFAULT	2	/* seconds */
+
+#define	IEEE80211_OBSS_PASSIVE_DWELL_DEFAULT		20
+#define	IEEE80211_OBSS_ACTIVE_DWELL_DEFAULT		10
+#define	IEEE80211_OBSS_TRIGGER_INTERVAL_DEFAULT		200
+#define	IEEE80211_OBSS_PASSIVE_TOTAL_DEFAULT		200
+#define	IEEE80211_OBSS_ACTIVE_TOTAL_DEFAULT		20
+#define	IEEE80211_OBSS_CHANNEL_WIDTH_DELAY_DEFAULT	5
+#define	IEEE80211_OBSS_ACTIVITY_THRESHOLD_DEFAULT	25
+
+#define IEEE80211_OCAC_VALUE_S			0
+#define IEEE80211_OCAC_VALUE_M			0xffff
+#define IEEE80211_OCAC_COMMAND_S		16
+#define IEEE80211_OCAC_COMMAND_M		0xffff
+#define IEEE80211_OCAC_COMPRESS_VALUE_F		0x8000
+#define IEEE80211_OCAC_COMPRESS_VALUE_M		0x7fff
+
+#define IEEE80211_OCAC_TIME_MARGIN		2000	/* microseconds */
+
+#define OCACLOG_CRIT				0
+#define OCACLOG_WARNING				1
+#define OCACLOG_NOTICE				2
+#define OCACLOG_INFO				3
+#define OCACLOG_VERBOSE				4
+#define OCACLOG_LEVEL_MAX			4
+#if !defined(MUC_BUILD) && !defined(DSP_BUILD) && !defined(AUC_BUILD)
+#define OCACDBG(_level, _fmt, ...)            do {               \
+		if (ic->ic_ocac.ocac_cfg.ocac_debug_level >= (_level)) {  \
+			DBGFN("DFS_s_radio: " _fmt, ##__VA_ARGS__);     \
+		}                                               \
+        } while (0)
+#endif
+
+#define QTN_M2A_EVENT_TYPE_DTIM		1
+#define	QTN_M2A_PS_EVENT_PM_ENABLE	2		/* enable power management */
+#define	QTN_M2A_PS_EVENT_PM_DISABLE	3		/* disable power management */
+#define	QTN_M2A_PS_EVENT_PS_POLL	4		/* ps poll */
+#define	QTN_M2A_EVENT_TYPE_UAPSD_SP	5		/* U-APSD SP */
+#define QTN_M2A_EVENT_PTID_FLAG_SET     6               /* Set per-TID flag(muc) */
+#define QTN_M2A_EVENT_TYPE_TXBA_DISABLE	7		/* per VAP TX BA est control */
+
+/* Common definitions for flags used to indicate ieee80211_node's states */
+#define	IEEE80211_NODE_AUTH		0x0001	/* authorized for data */
+#define	IEEE80211_NODE_QOS		0x0002	/* QoS enabled */
+#define	IEEE80211_NODE_ERP		0x0004	/* ERP enabled */
+#define	IEEE80211_NODE_HT		0x0008	/* HT enabled */
+/* NB: this must have the same value as IEEE80211_FC1_PWR_MGT */
+#define	IEEE80211_NODE_PWR_MGT		0x0010	/* power save mode enabled */
+#define	IEEE80211_NODE_PS_DELIVERING	0x0040	/* STA out of PS, getting delivery */
+#define	IEEE80211_NODE_PS_POLL		0x0080	/* power save ps poll mode */
+#define	IEEE80211_NODE_AREF		0x0020	/* authentication ref held */
+#define IEEE80211_NODE_2_TX_CHAINS      0x0400  /* this node needs to use 2 TX chain only, for IOT purpose */
+#define IEEE80211_NODE_UAPSD		0x1000
+#define IEEE80211_NODE_WDS_PEER		0x2000	/* this node is the wds peer in a wds vap */
+#define IEEE80211_NODE_VHT		0x4000	/* VHT enabled */
+#define IEEE80211_NODE_TPC		0x8000	/* indicate tpc capability */
+
+/* Common definitions for ext_flags */
+#define IEEE80211_NODE_TDLS_PTI_REQ	0x0001	/* Should sending PTI request to peer */
+#define IEEE80211_NODE_TDLS_PTI_PENDING	0x0002	/* PTI request xmit to peer but not responsed */
+#define IEEE80211_NODE_UAPSD_SP_IN_PROGRESS	0x0004	/* U-APSD SP in progress */
+#define IEEE80211_NODE_TDLS_PTI_RESP	0x0008	/* PTI response frame received */
+#define	IEEE80211_NODE_TDLS_AUTH	0x0010	/* authorized for TDLS link data */
+#define IEEE80211_NODE_OP_MODE_NOTI	0x0020	/* operation mode notification */
+#define IEEE80211_NODE_BSS_TRANSITION   0x0040  /* bss transition */
+#define	IEEE80211_NODE_TDLS_PTI_MASK	0x000B	/* Mask for TDLS PTI bits */
+
+#define QTN_VAP_PRIORITY_RESERVED	2	/* reserve the low values for internal use */
+#define QTN_VAP_PRIORITY_NUM		4
+#define QTN_VAP_PRIORITY_MGMT		(QTN_VAP_PRIORITY_RESERVED + QTN_VAP_PRIORITY_NUM)
+#define QTN_TACMAP_HW_PRI_NUM		8	/* hw limitation for 128 node mode */
+#define QTN_TACMAP_PRI_PER_VAP		8	/* for maximum 8 TIDs */
+#define QTN_TACMAP_SW_PRI_BASE		64	/* values below this are used for "bad apple" nodes */
+
+/* Quantenna specific flags (ni_qtn_flags), do not modify in Auc */
+#define QTN_IS_BCM_NODE			0x0000001
+#define QTN_IS_INTEL_5100_NODE		0x0000002
+#define QTN_IS_INTEL_5300_NODE		0x0000004
+#define QTN_IS_GALAXY_NOTE_4_NODE	0x0000008
+#define QTN_IS_NOT_4ADDR_CAPABLE_NODE	0x0000010
+#define QTN_AC_BE_INHERITANCE_UPTO_VO	0x0000020
+#define QTN_AC_BE_INHERITANCE_UPTO_VI	0x0000040
+#define QTN_IS_INTEL_NODE		0x0000080
+#define QTN_IS_REALTEK_NODE		0x0000100
+#define	QTN_NODE_TX_RESTRICTED		0x0000200 /* restricted tx enabled */
+#define	QTN_NODE_TX_RESTRICT_RTS	0x0000400 /* use RTS to confirm node is lost */
+#define QTN_OPTI_NODE			0x0000800
+#define QTN_NODE_RXAMSDU_SUPPORT	0x0001000 /* node support TX amsdu */
+#define QTN_NODE_11N_TXAMSDU_OFF	0x0002000
+#define	QTN_NODE_TXOP_RESTRICTED	0x0004000
+/*
+ * Bits that can be updated again by Lhost after association creation. Explicit definition helps
+ * avoid overwriting bits maintained by MuC itself.
+ */
+#define QTN_FLAGS_UPDATABLE_BITS	(QTN_IS_INTEL_NODE)
+
+/* QTN bandwidth definition - make sure this is up-to-date with regards
+ * to txbf_common.h
+ */
+#define QTN_BW_20M	0
+#define QTN_BW_40M	1
+#define QTN_BW_80M	2
+#define QTN_BW_MAX	QTN_BW_80M
+
+#define QTN_MAILBOX_INVALID	0xffffffff	/* Invalid value to indicate mailbox is disabled */
+
+enum ni_tdls_status {
+	IEEE80211_TDLS_NODE_STATUS_NONE = 0,
+	IEEE80211_TDLS_NODE_STATUS_INACTIVE = 1,
+	IEEE80211_TDLS_NODE_STATUS_STARTING = 2,
+	IEEE80211_TDLS_NODE_STATUS_ACTIVE = 3,
+	IEEE80211_TDLS_NODE_STATUS_IDLE = 4
+};
+
+/* WoWLAN APIs */
+enum qtn_vap_wowlan_cmds {
+	IEEE80211_WOWLAN_HOST_POWER_SAVE = 1,
+	IEEE80211_WOWLAN_MATCH_TYPE,
+	IEEE80211_WOWLAN_L2_ETHER_TYPE,
+	IEEE80211_WOWLAN_L3_UDP_PORT,
+	IEEE80211_WOWLAN_MAGIC_PATTERN,
+	IEEE80211_WOWLAN_MAGIC_PATTERN_GET,
+	IEEE80211_WOWLAN_SET_MAX
+};
+/*
+ * Definitions relating to individual fields from phy_stats,
+ * shared between the Q driver and the APIs.
+ */
+
+/*
+ * Error Sum needs to be reported together with the corresponding Number of
+ * Symbols; getting them in separate operations would introduce a race condition
+ * where the Error Sum and the Number of Symbols came from different
+ * PHY stat blocks.
+ */
+
+#define QTN_PHY_AVG_ERROR_SUM_NSYM_NAME			"avg_error_sum_nsym"
+
+#define QTN_PHY_EVM_MANTISSA_SHIFT		5
+#define QTN_PHY_EVM_EXPONENT_MASK		0x1f
+
+enum qtn_phy_stat_field {
+	QTN_PHY_NOSUCH_FIELD = -1,
+	QTN_PHY_AVG_ERROR_SUM_NSYM_FIELD,
+};
+
+#define QTN_M2A_TX_SCALE_BITS	4
+#define QTN_M2A_TX_SCALE_MASK	((1 << QTN_M2A_TX_SCALE_BITS) - 1)
+
+/* only for little endian */
+#if defined(AUC_BUILD)
+#define U64_LOW32(_v)		((uint32_t)(_v))
+#define U64_HIGH32(_v)		((uint32_t)((_v) >> 32))
+#else
+#define U64_LOW32(_v)		(((uint32_t*)&(_v))[0])
+#define U64_HIGH32(_v)		(((uint32_t*)&(_v))[1])
+#endif
+
+#define U64_COMPARE_GE(_a, _b)	((U64_HIGH32(_a) > U64_HIGH32(_b)) ||	\
+				((U64_HIGH32(_a) == U64_HIGH32(_b)) && (U64_LOW32(_a) >= U64_LOW32(_b))))
+
+#define U64_COMPARE_GT(_a, _b)	((U64_HIGH32(_a) > U64_HIGH32(_b)) ||	\
+				((U64_HIGH32(_a) == U64_HIGH32(_b)) && (U64_LOW32(_a) > U64_LOW32(_b))))
+
+#define U64_COMPARE_LE(_a, _b)	((U64_HIGH32(_a) < U64_HIGH32(_b)) ||	\
+				((U64_HIGH32(_a) == U64_HIGH32(_b)) && (U64_LOW32(_a) <= U64_LOW32(_b))))
+
+#define U64_COMPARE_LT(_a, _b)	((U64_HIGH32(_a) < U64_HIGH32(_b)) ||	\
+				((U64_HIGH32(_a) == U64_HIGH32(_b)) && (U64_LOW32(_a) < U64_LOW32(_b))))
+
+#ifndef MAC2STR
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02X:%02X:%02X:%02X:%02X:%02X"
+#define MACSTRL "%02x:%02x:%02x:%02x:%02x:%02x"	/* for MuC and Auc which don't support "X" */
+#endif
+
+/*
+ * VSP/QTM
+ * Macro TOPAZ_QTM is used to help identify changes between original VSP and QTM.
+ * In Lhost kernel driver, it must be used within CONFIG_QVSP(in kernel .config).
+ * CONFIG_QVSP	TOPAZ_QTM	ruby		topaz
+ * Y		1		invalid		*QTM works
+ * Y		0		*VSP works	VSP alive but doesn't work for HDP
+ * N		1		invalid		*no VSP/QTM
+ * N		0		*no VSP		no VSP/QTM, and no QTM changes in MuC and AuC
+ * So generally, sololy changing CONFIG_QVSP works for both ruby and topaz as indicated by *.
+ * But to throughly clean QTM code in AuC and MuC, disable TOPAZ_QTM in topaz below.
+ */
+#define TOPAZ_QTM		1
+
+#define COMPILE_TIME_ASSERT(constant_expr)	\
+do {						\
+	switch(0) {				\
+		case 0:				\
+		case constant_expr:		\
+		;				\
+	}					\
+} while(0)
+
+/**@addtogroup DFSAPIs
+ *@{*/
+/**
+ * Reason for channel change
+ */
+#define CSW_REASON_MASK 0xff
+#define CSW_SCS_FLAG_SHIFT 16
+#define CSW_SCS_FLAG_MASK 0xff0000
+#define CSW_SCS_FLAG_STRING_MAX 64
+
+#define	CSW_REASON_GET_SCS_FLAG(_reason) (((_reason) & CSW_SCS_FLAG_MASK) >> CSW_SCS_FLAG_SHIFT)
+#define CSW_REASON_SET_SCS_FLAG(_scs_flag, _reason)	((((_scs_flag) << CSW_SCS_FLAG_SHIFT) & CSW_SCS_FLAG_MASK) | (_reason))
+enum ieee80211_csw_reason {
+	/**
+	 * Reason is unknown
+	 */
+	IEEE80211_CSW_REASON_UNKNOWN,
+	/**
+	 * Smart channel selection
+	 */
+	IEEE80211_CSW_REASON_SCS,
+	/**
+	 * Radar detection
+	 */
+	IEEE80211_CSW_REASON_DFS,
+	/**
+	 * Channel set by user
+	 */
+	IEEE80211_CSW_REASON_MANUAL,
+	/**
+	 * Configuration change
+	 */
+	IEEE80211_CSW_REASON_CONFIG,
+	/**
+	 * Scan initiated by user
+	 */
+	IEEE80211_CSW_REASON_SCAN,
+	/**
+	 * Off-channel CAC
+	 */
+	IEEE80211_CSW_REASON_OCAC,
+	/**
+	 * Channel switch announcement
+	 */
+	IEEE80211_CSW_REASON_CSA,
+	/**
+	 * TDLS Channel switch announcement
+	 */
+	IEEE80211_CSW_REASON_TDLS_CS,
+	/**
+	 * Transition to COC mode
+	 */
+	IEEE80211_CSW_REASON_COC,
+	/**
+	 * Number of values
+	 */
+	IEEE80211_CSW_REASON_MAX
+};
+/**@}*/
+
+/*
+ * Reasons for channel switches that are not recorded and therefore
+ * should not be listed in QCSAPI documentation
+ */
+enum ieee80211_csw_reason_private {
+	IEEE80211_CSW_REASON_SAMPLING = IEEE80211_CSW_REASON_MAX,
+	IEEE80211_CSW_REASON_OCAC_RUN,
+	IEEE80211_CSW_REASON_BGSCAN,
+};
+
+/* Keep this in sync with swfeat_desc */
+enum swfeat {
+	SWFEAT_ID_MODE_AP,
+	SWFEAT_ID_MODE_STA,
+	SWFEAT_ID_MODE_REPEATER,
+	SWFEAT_ID_PCIE_RC,
+	SWFEAT_ID_VHT,
+	SWFEAT_ID_2X2,
+	SWFEAT_ID_2X4,
+	SWFEAT_ID_3X3,
+	SWFEAT_ID_4X4,
+	SWFEAT_ID_HS20,
+	SWFEAT_ID_WPA2_ENT,
+	SWFEAT_ID_MESH,
+	SWFEAT_ID_TDLS,
+	SWFEAT_ID_OCAC,
+	SWFEAT_ID_QHOP,
+	SWFEAT_ID_QSV,
+	SWFEAT_ID_QSV_NEIGH,
+	SWFEAT_ID_MU_MIMO,
+	SWFEAT_ID_DUAL_CHAN_VIRT,
+	SWFEAT_ID_DUAL_CHAN,
+	SWFEAT_ID_DUAL_BAND_VIRT,
+	SWFEAT_ID_DUAL_BAND,
+	SWFEAT_ID_QTM_PRIO,
+	SWFEAT_ID_QTM,
+	SWFEAT_ID_SPEC_ANALYZER,
+	SWFEAT_ID_MAX
+};
+
+#define SWFEAT_MAP_SIZE (SWFEAT_ID_MAX / 8 + 1)
+
+/* Used to scale temperature measurements */
+#define QDRV_TEMPSENS_COEFF    100000
+#define QDRV_TEMPSENS_COEFF10  (10 * QDRV_TEMPSENS_COEFF)
+
+/* Define the max software retry number for aggregation and none-aggregation frames */
+#define	QTN_TX_SW_ATTEMPTS_AGG_MAX 8
+#define QTN_TX_SW_ATTEMPTS_NOAGG_MAX 1
+
+/* Aligns the supplied size to the specified power_of_two */
+#define QTN_ALIGN_TO(size_to_align, power_of_two) \
+	(((size_to_align) + (power_of_two) - 1) & ~((power_of_two) - 1))
+
+#define FIELD_ARRAY_SIZE(t, a)	(sizeof((((t*)0)->a))/sizeof(((((t*)0)->a))[0]))
+
+#endif /* _SHARED_DEFS_H_ */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/shared_defs_common.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/shared_defs_common.h
new file mode 100644
index 0000000..6d15bf7
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/shared_defs_common.h
@@ -0,0 +1,156 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2014 Quantenna Communications Inc                   **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Author      : Quantenna Communications Inc                               **
+**  File        : shared_defs.h                                              **
+**  Description :                                                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef _SHARED_DEFS_COMMON_H_
+#define _SHARED_DEFS_COMMON_H_
+
+/*
+ * Default board type is 0 to match the default (fallback) from get_bootval.
+ * Script returns 0 if the parameter is not defined.
+ */
+#define  QTN_RUBY_BOARD_TYPE_DEFAULT			0
+
+#define  QTN_RUBY_BRINGUP_BOARD				0
+#define  QTN_RUBY_BRINGUP_BOARD_32_320			1
+#define  QTN_RUBY_BRINGUP_BOARD_16_320			2
+#define  QTN_RUBY_BRINGUP_BOARD_16_160			3
+#define  QTN_RUBY_BRINGUP_BOARD_ETRON			4
+#define  QTN_RUBY_BRINGUP_BOARD_ETRON_320		5
+#define  QTN_RUBY_BRINGUP_BOARD_ETRON_160		6
+#define  QTN_RUBY_BRINGUP_BOARD_16_200			7
+#define  QTN_RUBY_BRINGUP_BOARD_32_200			8
+#define  QTN_RUBY_BRINGUP_BOARD_PCIE			9
+/* diag board ids */
+#define  QTN_RUBY_BRINGUP_BOARD_32_160_ARB		10
+#define  QTN_RUBY_BRINGUP_BOARD_32_160_ARB_1		11
+#define  QTN_RUBY_BRINGUP_BOARD_16_160_ARB_1		12
+#define  QTN_RUBY_BRINGUP_BOARD_32_160_ARB_0		13
+#define  QTN_RUBY_BRINGUP_BOARD_ETRON_160_EMAC1		14
+#define  QTN_RUBY_BRINGUP_BOARD_ETRON_250_EMAC1		15
+#define  QTN_RUBY_BRINGUP_BOARD_ETRON_32_320_EMAC1	16
+#define  QTN_RUBY_BRINGUP_ETRON32_160			17
+#define  QTN_RUBY_BRINGUP_ETRON32_320			18
+#define  QTN_RUBY_BRINGUP_BOARD_MICRON_DUALEMAC		19
+#define  QTN_RUBY_BRINGUP_BOARD_MICRON_DUALEMAC_MII	20
+#define  QTN_RUBY_BRINGUP_BOARD_MICRON_DUALEMAC_LOOPBACK 21
+#define  QTN_RUBY_BRINGUP_BOARD_16_160_DUALEMAC		22
+
+
+#define  QTN_RUBY_REFERENCE_DESIGN_BOARD		1000
+#define  QTN_RUBY_REFERENCE_DESIGN_BOARD_250		1001
+#define  QTN_RUBY_REF_BOARD_DUAL_CON			1002
+#define  QTN_RUBY_REFERENCE_DESIGN_BOARD_320		1003
+#define  QTN_RUBY_ETRON_32_320_EMAC1			1004
+#define  QTN_RUBY_ETRON_32_250_EMAC1			1005
+#define  QTN_RUBY_REFERENCE_DESIGN_BOARD_RGMII_DLL	1006
+#define  QTN_RUBY_QHS710_5S5_SIGE_DDR250		1007
+#define  QTN_RUBY_QHS710_5S5_SIGE_DDR320		1008
+#define  QTN_RUBY_OHS711_PCIE_320DDR			1009
+/* pcie reference ids */
+#define  QTN_RUBY_QHS713_5S1_PCIERC_DDR160		1170
+#define  QTN_RUBY_OHS711_5S13_PCIE_DDR320		1171 /* duplicate of 1009 */
+#define  QTN_RUBY_QHS713_5S1_PCIERC_DDR320		1172
+
+#define  QTN_RUBY_ODM_BOARD_0				1200
+#define  QTN_RUBY_ODM_BOARD_1				1201
+#define  QTN_RUBY_ODM_BOARD_2				1202
+#define  QTN_RUBY_ODM_BOARD_3				1203
+#define  QTN_RUBY_ODM_BOARD_4				1204
+#define  QTN_RUBY_ODM_BOARD_5				1205
+#define  QTN_RUBY_ODM_BOARD_6				1206
+#define  QTN_RUBY_ODM_BOARD_7				1207
+#define  QTN_RUBY_ODM_BOARD_8				1208
+#define  QTN_RUBY_ODM_BOARD_9				1209
+#define  QTN_RUBY_ODM_BOARD_10				1210
+#define  QTN_RUBY_ODM_BOARD_11				1211
+#define  QTN_RUBY_ODM_BOARD_12				1212
+#define  QTN_RUBY_ODM_BOARD_13				1213
+#define  QTN_RUBY_ODM_BOARD_14				1214
+#define  QTN_RUBY_ODM_BOARD_15				1215
+#define  QTN_RUBY_ODM_BOARD_16				1216
+#define  QTN_RUBY_ODM_BOARD_17				1217
+#define  QTN_RUBY_ODM_BOARD_18				1218
+#define  QTN_RUBY_ODM_BOARD_19				1219
+#define  QTN_RUBY_ODM_BOARD_20				1220
+#define  QTN_RUBY_ODM_BOARD_21				1221
+#define  QTN_RUBY_ODM_BOARD_22				1222
+#define  QTN_TOPAZ_FPGAA_BOARD				1223
+#define  QTN_TOPAZ_FPGAB_BOARD				1224
+#define  QTN_TOPAZ_DUAL_EMAC_FPGAA_BOARD		1225
+#define  QTN_TOPAZ_DUAL_EMAC_FPGAB_BOARD		1226
+#define  QTN_TOPAZ_RC_BOARD				1227
+#define  QTN_TOPAZ_EP_BOARD				1228
+#define  QTN_TOPAZ_BB_BOARD				1229
+#define  QTN_TOPAZ_RF_BOARD				1230
+#define  QTN_TOPAZ_QHS840_5S1				1231
+#define  QTN_TOPAZ_RGMII_RFIC6				2008
+
+#define		QTN_RUBY_AUTOCONFIG_ID				32768
+#define		QTN_RUBY_UNIVERSAL_BOARD_ID			65535
+
+#define  QTN_RUBY_NOSUCH_BOARD_TYPE			-1
+
+#define  QTN_RUBY_BRINGUP_RWPA				0
+#define  QTN_RUBY_REF_RWPA				1
+#define  QTN_RUBY_SIGE					2
+#define  QTN_RUBY_UNDEFINED				3
+#define  QTN_RUBY_WIFI_NONE				4
+#define	 QTN_TPZ_SE5003L1				5
+#define	 QTN_TPZ_SE5003L1_INV				6
+#define  QTN_TPZ_SKY85703				7
+#define  QTN_TPZ_SKY85405_BPF840			8
+#define  QTN_TPZ_DBS					9	/* BBIC4 + RFIC6 */
+#define  QTN_TPZ_SE5502L				10	/* BBIC4 + RFIC5 */
+#define  QTN_TPZ_SKY85710_NG				11
+#define	 QTN_TPZ_DBS_5591				13	/* BBIC4 A2 + RFIC6 */
+#define	 QTN_TPZ_DBS_NXP_BGU7224_BGU7258		14	/* BBIC4 A2 + RFIC6  DBS support*/
+#define	 QTN_TPZ_2_4GHZ_NXP_BGU7224			15	/* BBIC4 A2 + RFIC6 2.4ghz only */
+#define	 QTN_TPZ_5GHZ_NXP_BGU7258			16	/* BBIC4 A2 + RFIC6 5ghz only */
+#define	 QTN_TPZ_5GHZ_SKY85728				17	/* BBIC4 A2 + RFIC4 5ghz only and BBIC4 A2 + RFIC6 5ghz only */
+#define  QTN_TPZ_DBS_SKY85806_SKY85811			21	/* BBIC4 + RFIC6 E0 2x4 DBS */
+
+#endif /* _SHARED_DEFS_COMMON_H_ */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/shared_params.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/shared_params.h
new file mode 100644
index 0000000..5970371
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/shared_params.h
@@ -0,0 +1,284 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2010 Quantenna Communications Inc                   **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Author      : Quantenna Communications Inc                               **
+**  File        : shared_params.h                                            **
+**  Description :                                                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef _SHARED_PARAMS_H_
+#define _SHARED_PARAMS_H_
+
+#ifndef SYSTEM_BUILD
+#include "../common/ruby_config.h"
+#endif
+#include <qtn/shared_defs.h>
+#include <qtn/topaz_shared_params.h>
+
+/*
+ * Forward declarations.
+ */
+struct qtn_txbf_mbox;
+struct qtn_bb_mutex;
+struct qtn_csa_info;
+struct qtn_samp_chan_info;
+struct qtn_scs_info;
+struct qtn_scs_info_set;
+struct qtn_ocac_info;
+
+#define QTN_SEM_TRACE 1
+/*
+ * As DSP accesses sempahore with high frequency, to minimize performance impact, it would be better to
+ * enable DSP sem trace log only in non-release build.
+ */
+#define QTN_SEM_TRACE_DSP 0
+#if QTN_SEM_TRACE
+struct qtn_sem_trace_log;
+#endif
+
+#ifndef IEEE80211_ADDR_LEN
+	#define IEEE80211_ADDR_LEN	6
+#endif
+
+#define MUC_FCS_CORRUPTION	0x00000001
+#define MUC_QOS_Q_MERGE		0x00000002
+
+#define MUC_PROFILE_DROP	0x08000000
+#define MUC_PROFILE_DCACHE	0x10000000
+#define MUC_PROFILE_P		0x20000000
+#define MUC_PROFILE_EP		0x40000000
+#define MUC_PROFILE_IPTR	0x80000000
+
+#define MUC_BOOT_WAIT_SECS	20
+
+#define CHIP_ID_RUBY			0x30
+#define CHIP_ID_TOPAZ			0x40
+#define REV_ID_RUBY_A			0x0
+#define REV_ID_RUBY_B			0x1
+#define REV_ID_RUBY_D			0x3
+#define REV_ID_TOPAZ_A			0x0
+#define REV_ID_TOPAZ_B			0x1
+#define REV_ID_TOPAZ_A2			0x3
+#define HARDWARE_REVISION_UNKNOWN	0
+#define HARDWARE_REVISION_RUBY_A	1
+#define HARDWARE_REVISION_RUBY_B	2
+#define HARDWARE_REVISION_RUBY_D	3
+#define HARDWARE_REVISION_TOPAZ_A	4
+#define HARDWARE_REVISION_TOPAZ_B	5
+#define HARDWARE_REVISION_TOPAZ_A2	6
+#define CHIP_ID_MASK			0xF0
+#define CHIP_ID_SW_MASK			0xC0	/* bits set in sw, for downgrade only */
+#define CHIP_REV_ID_MASK		0x0F
+
+#define HW_OPTION_BONDING_RUBY_PROD	(HARDWARE_REVISION_RUBY_D << 8)
+#define HW_OPTION_BONDING_RUBY_UNKNOWN	(HW_OPTION_BONDING_RUBY_PROD | 0x00000000)
+#define HW_OPTION_BONDING_RUBY_2x4STA	(HW_OPTION_BONDING_RUBY_PROD | 0x00000001)
+#define HW_OPTION_BONDING_RUBY_4SS	(HW_OPTION_BONDING_RUBY_PROD | 0x00000002)
+
+#define HW_PLAT_TOPAZ_QV860		0x00
+#define HW_PLAT_TOPAZ_QV860_2X2		0x80	/* downgrade only */
+#define HW_PLAT_TOPAZ_QV860_2X4		0x40	/* downgrade only */
+#define HW_PLAT_TOPAZ_QV860_3X3		0xF0	/* downgrade only */
+#define HW_PLAT_TOPAZ_QD840		0x01
+#define HW_PLAT_TOPAZ_QV880		0x32
+#define HW_PLAT_TOPAZ_QV880_2X2		0xb2	/* downgrade only */
+#define HW_PLAT_TOPAZ_QV880_2X4		0x72	/* downgrade only */
+#define HW_PLAT_TOPAZ_QV880_3X3		0xF2	/* downgrade only */
+#define HW_PLAT_TOPAZ_QV920		0x03
+#define HW_PLAT_TOPAZ_QV920_2X4		0x43	/* downgrade only */
+#define HW_PLAT_TOPAZ_QV840		0x04
+#define HW_PLAT_TOPAZ_QV840_2X4		0x44	/* downgrade only */
+#define HW_PLAT_TOPAZ_QV940		0x05
+#define HW_PLAT_TOPAZ_QV840C		0x06
+
+#define HW_OPTION_BONDING_TOPAZ_PROD	(HARDWARE_REVISION_TOPAZ_B << 8)
+enum hw_opt_t {
+	HW_OPTION_BONDING_TOPAZ_QV860 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV860),
+	HW_OPTION_BONDING_TOPAZ_QV860_2X2 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV860_2X2),
+	HW_OPTION_BONDING_TOPAZ_QV860_2X4 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV860_2X4),
+	HW_OPTION_BONDING_TOPAZ_QV860_3X3 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV860_3X3),
+	HW_OPTION_BONDING_TOPAZ_QD840 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QD840),
+	HW_OPTION_BONDING_TOPAZ_QV880 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV880),
+	HW_OPTION_BONDING_TOPAZ_QV880_2X2 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV880_2X2),
+	HW_OPTION_BONDING_TOPAZ_QV880_2X4 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV880_2X4),
+	HW_OPTION_BONDING_TOPAZ_QV880_3X3 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV880_3X3),
+	HW_OPTION_BONDING_TOPAZ_QV920 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV920),
+	HW_OPTION_BONDING_TOPAZ_QV920_2X4 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV920_2X4),
+	HW_OPTION_BONDING_TOPAZ_QV840 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV840),
+	HW_OPTION_BONDING_TOPAZ_QV840_2X4 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV840_2X4),
+	HW_OPTION_BONDING_TOPAZ_QV940 = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV940),
+	HW_OPTION_BONDING_TOPAZ_QV840C = (HW_OPTION_BONDING_TOPAZ_PROD | HW_PLAT_TOPAZ_QV840C),
+};
+
+#define HW_OPTION_BONDING_NOT_SET	0xFFFFFFFF
+
+typedef struct shared_params
+{
+	u_int32_t		tqe_sem_en; /*replaced for TQE SWR lh_flags; */
+	u_int16_t		lh_chip_id;
+	u_int8_t		rf_chip_id;
+	u_int8_t		vco_lock_detect_mode;
+	u_int8_t		lh_wifi_hw;
+	u_int8_t		lh_num_devices;
+	u_int8_t		lh_mac_0[ IEEE80211_ADDR_LEN ];
+	u_int8_t		lh_mac_1[ IEEE80211_ADDR_LEN ];
+
+	u_int32_t		uc_flags;
+	u_int32_t		uc_hostlink;
+
+	u_int32_t		m2l_hostlink_mbox;
+	u_int32_t		m2l_printbuf_producer;
+
+	u_int64_t		last_chan_sw_tsf;
+	u_int32_t		l2m_sem;
+	u_int32_t		m2l_sem;
+
+	int			hardware_revision;
+	uint32_t		hardware_options;
+	uint8_t			swfeat_map[SWFEAT_MAP_SIZE];
+	int8_t			shortrange_scancnt;
+	uint8_t			slow_ethernet_war;
+	uint8_t			calstate;
+	uint8_t			post_rfloop;
+#define QTN_IOT_INTEL5100_TWEAK		0x00000001
+#define QTN_IOT_INTEL6200_TWEAK		0x00000002
+#define QTN_IOT_INTEL6300_TWEAK		0x00000004
+#define QTN_IOT_INTELFD_TWEAK		0x00000008
+#define QTN_IOT_INTEL_SEND_NCW_ACTION   0x00000010   /* IOT action: send Notify Channel Width Action frame to Intel */
+#define QTN_IOT_BCM_TWEAK		0x00000020   /* Disable aggregation on Broadcom MBP clients */
+#define QTN_IOT_INTEL_NOAGG2TXCHAIN_TWEAK     0x00000040   /* NO Aggregation & 2 Tx chain restriction for some Intel */
+#define QTN_IOT_BCM_NO_3SS_MCS_TWEAK	0x00000080   /* Disable 3ss MCS for Broadcom MBP clients */
+#define QTN_IOT_BCM_AMSDU_DUTY_TWEAK	0x00000100   /* AMSDU duty cycle tweak */
+#define QTN_IOT_BCM_MBA_AMSDU_TWEAK	0x00000200   /* MBA doesn't work with 7.9k AMSDU with security mode */
+#define QTN_IOT_RLNK_NO_3SS_MCS_TWEAK	0x00000400   /* Disable 3ss MCS for Ralink clients */
+#define QTN_IOT_RTK_NO_AMSDU_TWEAK	0x00000800   /* Disable A-MSDU for Realtek devices */
+#define QTN_IOT_DEFAULT_TWEAK		(QTN_IOT_BCM_MBA_AMSDU_TWEAK | \
+					 QTN_IOT_BCM_AMSDU_DUTY_TWEAK | \
+					 QTN_IOT_RLNK_NO_3SS_MCS_TWEAK | \
+					 QTN_IOT_RTK_NO_AMSDU_TWEAK \
+					)
+	uint32_t		iot_tweaks;
+	uint32_t		fw_no_mu; /* use FW compiled without MU-MIMO support */
+
+	struct qtn_txbf_mbox*	txbf_mbox_lhost;
+	struct qtn_txbf_mbox*	txbf_mbox_bus;
+
+	struct qtn_muc_dsp_mbox *muc_dsp_mbox_lhost;
+	struct qtn_muc_dsp_mbox *muc_dsp_mbox_bus;
+
+	struct qtn_bb_mutex*	bb_mutex_lhost;
+	struct qtn_bb_mutex*	bb_mutex_bus;
+
+	struct qtn_csa_info*	csa_lhost;
+	struct qtn_csa_info*	csa_bus;
+
+	struct qtn_samp_chan_info*	chan_sample_lhost;
+	struct qtn_samp_chan_info*	chan_sample_bus;
+
+	struct qtn_scan_chan_info*	chan_scan_lhost;
+	struct qtn_scan_chan_info*	chan_scan_bus;
+
+	struct qtn_scs_info_set*	scs_info_lhost;
+	struct qtn_scs_info_set*	scs_info_bus;
+
+	struct qtn_remain_chan_info*	remain_chan_lhost;
+	struct qtn_remain_chan_info*	remain_chan_bus;
+	struct qtn_ocac_info*	ocac_lhost;
+	struct qtn_ocac_info*	ocac_bus;
+
+	struct qtn_bmps_info*	bmps_lhost;
+	struct qtn_bmps_info*	bmps_bus;
+
+	struct qtn_meas_chan_info*	chan_meas_lhost;
+	struct qtn_meas_chan_info*	chan_meas_bus;
+
+#if QTN_SEM_TRACE
+	struct qtn_sem_trace_log *sem_trace_log_lhost;
+	struct qtn_sem_trace_log *sem_trace_log_bus;
+#endif
+
+	struct qtn_vlan_dev **vdev_lhost;
+	struct qtn_vlan_dev **vdev_bus;
+	struct qtn_vlan_dev **vport_lhost;
+	struct qtn_vlan_dev **vport_bus;
+	struct topaz_ipmac_uc_table *ipmac_table_bus;
+	struct qtn_vlan_info *vlan_info;
+
+#if CONFIG_RUBY_BROKEN_IPC_IRQS
+	u_int32_t		m2l_irq[2];
+#endif
+
+	void *			p_debug_1;
+	int			debug_1_arg;
+	void *			p_debug_2;
+	int			debug_2_arg;
+
+	u_int32_t		pm_duty_lock;
+
+#define QTN_EXT_LNA_GAIN_MAX	126
+	int			ext_lna_gain;
+	int			ext_lna_bypass_gain;
+	int			tx_power_cal;
+	int			hardware_id;
+	int			min_tx_power;
+	int			max_tx_power;
+#define QTN_FW_VERSION_LENGTH	32
+	char			fw_version[QTN_FW_VERSION_LENGTH + 1];
+#ifndef SYSTEM_BUILD
+	shared_params_auc	auc;
+#endif
+
+	int			cca_adjusting_flag;
+	int			active_tid_num;
+	uint32_t		bb_param;
+	uint32_t		cs_thresh_base_val;	/* Carrier sense threshold base value */
+	uint32_t		qtn_hal_pm_corrupt;
+	uint32_t		qtn_hal_pm_corrupt_debug;
+	uint32_t		free_airtime;		/* in ms */
+	struct nac_mon_info	*nac_mon_info;
+	struct nac_mon_info	*nac_mon_info_bus;
+	uint16_t		chan_util;	/* channel utilization */
+} shared_params;
+
+#define QTN_RATE_TRAIN_DATA_LEN		64
+#define QTN_RATE_TRAIN_BYTE		0x2A
+
+#endif /* _SHARED_PARAMS_H_ */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/shared_print_buf.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/shared_print_buf.h
new file mode 100644
index 0000000..aef5085
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/shared_print_buf.h
@@ -0,0 +1,16 @@
+#ifndef __SHARED_PRINT_BUF
+#define __SHARED_PRINT_BUF
+
+struct shared_print_producer {
+	u32	produced;
+	u32	bufsize;
+	char*	buf;		/* producer address space ptr */
+};
+
+struct shared_print_consumer {
+	const volatile struct shared_print_producer * producer;
+	u32 consumed;
+	char* buf;		/* consumer address space ptr */
+};
+
+#endif // __SHARED_PRINT_BUF
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/skb_recycle.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/skb_recycle.h
new file mode 100644
index 0000000..5537cc2
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/skb_recycle.h
@@ -0,0 +1,279 @@
+/*
+ * (C) Copyright 2011 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __QTN_SKBUFF_H
+#define __QTN_SKBUFF_H
+
+#include <common/queue.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+#include <linux/interrupt.h>
+
+#include "qtn_global.h"
+#include "qtn_skb_size.h"
+#include "qtn_buffers.h"
+
+#define SKB_RECYCLE_STATS
+
+#ifdef SKB_RECYCLE_STATS
+#define SKB_RECYCLE_STAT(x)	((x) = (x) + 1)
+#else
+#define SKB_RECYCLE_STAT(x)
+#endif
+
+#define QTN_SKB_RECYCLE_ENABLE		0
+
+/**
+ * \addtogroup LHOST_STATS
+ */
+/** @{ */
+
+/**
+ * \brief Linux SKB recycle statistics.
+ *
+ * These statistics are generated and counted per-interface. They are used
+ * primarily as a performance metric - the more recylcling that succeeds, the
+ * more efficient the system will be (less chance for bottlenecks/dropped
+ * packets).
+ */
+struct qtn_skb_recycle_stats {
+	/**
+	 * The number of packets for the given interface that have been
+	 * successfully recycled.
+	 */
+	u32 free_recycle_pass;
+
+	/**
+	 * The number of packets for the given interface that have failed to
+	 * be recycled. If this counter is constantly increasing at a rapid
+	 * rate (approx. the same as the per-packet count of traffic streams),
+	 * then this can indicate a performance issue.
+	 */
+	u32 free_recycle_fail;
+
+	/**
+	 * This counter shows the number of undersized packets that have been
+	 * incorrectly pushed to the recycle code.
+	 *
+	 * \note It is an error for this counter to be anything other than
+	 * zero.
+	 */
+	u32 free_recycle_fail_undersize;
+
+	/**
+	 * This counter shows the number of packets that have been allocated
+	 * off the shared buffer pool.
+	 */
+	u32 alloc_recycle;
+
+	/**
+	 * This counter shows the number of packets that have dropped back to
+	 * the kernel alloc function due to not having any packets in the
+	 * recycle pool.
+	 */
+	u32 alloc_kmalloc;
+};
+/** @} */
+
+struct qtn_skb_recycle_list {
+	struct sk_buff_head list;			/* Shared buffers between wireless and Ethernet driver */
+	int		max;				/* Maximum size of the skb_list */
+	struct qtn_skb_recycle_stats stats_qdrv;	/* skb free/alloc stats for qdrv */
+	struct qtn_skb_recycle_stats stats_eth;		/* skb free/alloc stats for ethernet driver */
+#if defined(CONFIG_RUBY_PCIE_HOST) || defined(CONFIG_RUBY_PCIE_TARGET) \
+	|| defined(CONFIG_TOPAZ_PCIE_HOST) || defined(CONFIG_TOPAZ_PCIE_TARGET)
+	struct qtn_skb_recycle_stats stats_pcie;	/* skb free/alloc stats for pcie driver */
+#endif
+	struct qtn_skb_recycle_stats stats_kfree;	/* skb free stats for the kfree_skb collector */
+	int (*recycle_func)(struct qtn_skb_recycle_list *recycle_list,
+				struct sk_buff *skb);	/* skb recycling check function */
+};
+
+
+/* Define RX buffer size and mapping */
+__inline__ static unsigned long rx_buf_map_size(void)
+{
+	return RX_BUF_SIZE + roundup(NET_IP_ALIGN, dma_get_cache_alignment());
+}
+__inline__ static unsigned long qtn_rx_buf_size(void)
+{
+	/*
+	 * Make sure that we can flush cache (both beginning and ending
+	 * must be aligned on cache line) - otherwise flush would not work.
+	 * Also make sure that we can reserve NET_IP_ALIGN
+	 * at the beginning of data after do cache flush.
+	 * Without NET_IP_ALIGN reserving IP header will be not aligned
+	 * and network stack can kick unaligned access exception, which is expensive
+	 * or even would crash kernel if unaligned access handler is not implemented.
+	 */
+	return rx_buf_map_size() + dma_get_cache_alignment() - 1;
+}
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,30)
+static inline struct dst_entry *skb_dst(struct sk_buff *skb)
+{
+	return skb->dst;
+}
+#endif
+
+#ifdef CONFIG_QTN_SKB_RECYCLE
+static __inline__ int __qtn_skb_recyclable_check(struct qtn_skb_recycle_stats *stats, struct sk_buff *skb)
+{
+	if (!QTN_SKB_RECYCLE_ENABLE) {
+		return 0;
+	}
+
+	if (unlikely(skb->next)) {
+		printk(KERN_EMERG "skb being recycled: 0x%p is queued\n", skb);
+		return 0;
+	}
+
+	if (!skb->is_recyclable ||
+			skb->next ||
+			skb_dst(skb) ||
+			skb_shared(skb) ||
+			skb_is_nonlinear(skb) ||
+			skb_shinfo(skb)->nr_frags ||
+			skb_shinfo(skb)->frag_list ||
+			skb_shinfo(skb)->gso_size ||
+			skb_cloned(skb) ||
+			atomic_read(&(skb_shinfo(skb)->dataref)) != 1) {
+		return 0;
+	}
+
+	/* check for undersize skb; this should never happen, and indicates problems elsewhere */
+	if (skb_end_pointer(skb) - skb->head < qtn_rx_buf_size()) {
+		SKB_RECYCLE_STAT(stats->free_recycle_fail_undersize);
+		return 0;
+	}
+
+	return 1;
+}
+
+static __inline__ struct sk_buff *qtn_skb_recycle_list_pop(
+		struct qtn_skb_recycle_list *recycle_list,
+		struct qtn_skb_recycle_stats *stats)
+{
+	struct sk_buff *skb = NULL;
+	unsigned long flags;
+
+	if (!QTN_SKB_RECYCLE_ENABLE) {
+		return NULL;
+	}
+
+	spin_lock_irqsave(&recycle_list->list.lock, flags);
+	skb = __skb_dequeue(&recycle_list->list);
+	if (skb) {
+		SKB_RECYCLE_STAT(stats->alloc_recycle);
+	} else {
+		SKB_RECYCLE_STAT(stats->alloc_kmalloc);
+	}
+	spin_unlock_irqrestore(&recycle_list->list.lock, flags);
+
+	return skb;
+}
+
+/*
+ * Push a used skb onto the recycle list. returns 1 if it was pushed onto the list
+ */
+static __inline__ int qtn_skb_recycle_list_push(struct qtn_skb_recycle_list *recycle_list,
+		struct qtn_skb_recycle_stats *stats, struct sk_buff *skb)
+{
+	int pushed = 0;
+	unsigned long flags;
+	struct skb_shared_info *shinfo;
+
+	if (!QTN_SKB_RECYCLE_ENABLE) {
+		return 0;
+	}
+
+	spin_lock_irqsave(&recycle_list->list.lock, flags);
+
+	if (skb_queue_len(&recycle_list->list) < recycle_list->max) {
+		if (__qtn_skb_recyclable_check(stats, skb)) {
+			if (skb->destructor) {
+				WARN_ON(in_irq());
+				skb->destructor(skb);
+				skb->destructor = NULL;
+			}
+
+			skb->len = 0;
+			skb->priority = 0;
+			skb->dest_port = 0;
+			skb->src_port = 0;
+			skb->is_recyclable = 0;
+			skb->tail = skb->data = skb->head;
+			skb->vlan_tci = 0;
+			skb->orig_dev = NULL;
+			skb_reserve(skb, NET_SKB_PAD);
+
+			memset(skb->cb, 0, sizeof(skb->cb));
+			memset(&skb->qtn_cb, 0, sizeof(skb->qtn_cb));
+
+			shinfo = skb_shinfo(skb);
+			memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+
+			__skb_queue_tail(&recycle_list->list, skb);
+			pushed = 1;
+		}
+	}
+
+	spin_unlock_irqrestore(&recycle_list->list.lock, flags);
+
+	if (pushed) {
+		SKB_RECYCLE_STAT(stats->free_recycle_pass);
+	} else {
+		SKB_RECYCLE_STAT(stats->free_recycle_fail);
+	}
+
+	return pushed;
+}
+
+static __inline__ struct qtn_skb_recycle_list *qtn_get_shared_recycle_list(void)
+{
+	extern struct qtn_skb_recycle_list __qtn_skb_recycle_list;
+	return &__qtn_skb_recycle_list;
+}
+
+#else // CONFIG_QTN_SKB_RECYCLE
+static __inline__ struct sk_buff *qtn_skb_recycle_list_pop(
+		struct qtn_skb_recycle_list *recycle_list,
+		struct qtn_skb_recycle_stats *stats)
+{
+	return NULL;
+}
+
+static __inline__ int qtn_skb_recycle_list_push(struct qtn_skb_recycle_list *recycle_list,
+		struct qtn_skb_recycle_stats *stats, struct sk_buff *skb)
+{
+	return 0;
+}
+
+static __inline__ struct qtn_skb_recycle_list *qtn_get_shared_recycle_list(void)
+{
+	return NULL;
+}
+#endif // CONFIG_QTN_SKB_RECYCLE
+
+#endif // __QTN_SKBUFF_H
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_congest_queue.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_congest_queue.h
new file mode 100644
index 0000000..190d1ae
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_congest_queue.h
@@ -0,0 +1,156 @@
+/**
+ * Copyright (c) 2012-2013 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+#ifndef __DRIVERS_NET_TOPAZ_CONGEST_H
+#define __DRIVERS_NET_TOPAZ_CONGEST_H
+
+#include <asm/param.h>
+#include <qtn/topaz_tqe_cpuif.h>
+
+/* Total number of congestion queues we will pre-allocated */
+#define TOPAZ_CONGEST_QUEUE_NUM	(6)
+/* Maximum number of packets in one queue */
+#define TOPAZ_CONGEST_PKT_MAX	(2048)
+/* Maximum number of packets in all congest queues */
+#define TOPAZ_CONGEST_TOTAL_PKT_MAX	(2048)
+/* Maximum number of congest queue for unicast frame */
+#define TOPAZ_CONGEST_MAX_UNICAST_QCOUNT	(3)
+
+/* Budget for packet number consmued at softirq at one time */
+#define TOPAZ_SOFTIRQ_BUDGET  8
+
+#define TOPAZ_PCIE_NODE_MAX	(128)
+#define TOPAZ_PCIE_TID_MAX	(16)
+
+#define TOPAZ_CONGEST_QUEUE_STATS_QLEN		0
+#define TOPAZ_CONGEST_QUEUE_STATS_ENQFAIL	1
+
+struct topaz_congest_q_elem {
+	union topaz_tqe_cpuif_ppctl ppctl;
+};
+
+/* Congestion queue descriptor */
+struct topaz_congest_q_desc {
+	struct topaz_congest_q_elem elems[TOPAZ_CONGEST_PKT_MAX];
+	uint32_t head;	/* The index of packet to be sent	*/
+	uint32_t tail;	/* The index of packet to be append	*/
+	uint32_t qlen;	/* Total number of pending requests per queue*/
+
+	uint32_t valid;
+	uint32_t index;
+	uint32_t node_id;
+	uint32_t tid;
+
+	uint32_t congest_xmit;	/* Packet number forwarded successfully */
+	uint32_t congest_drop;	/* Packet number dropped due to transmission time-out */
+	uint32_t congest_enq_fail;	/* packet number dropped due to enqueue failure */
+
+	uint32_t last_retry_success;	/* 0: Fail, 1: Success */
+	unsigned long retry_timeout;
+	uint32_t is_unicast;
+
+	struct topaz_congest_queue *congest_queue;
+};
+
+struct topaz_congest_queue {
+	struct vmac_priv *vmp;
+	int (*xmit_func)(union topaz_tqe_cpuif_ppctl *);
+	int (*tasklet_extra_proc)(void *);
+
+        struct topaz_congest_q_desc queues[TOPAZ_CONGEST_QUEUE_NUM];
+
+	/* A pointer array, if queues[node_id][tid] is not NULL, node-tid queue is congested
+	* and it will point to attached congested queue.
+	*/
+	struct topaz_congest_q_desc* ptrs[TOPAZ_PCIE_NODE_MAX][TOPAZ_PCIE_TID_MAX];
+	struct tasklet_struct congest_tx;
+
+	/* Counters */
+	uint32_t func_entry;	/* tasklet hook function called count */
+	uint32_t cnt_retries;	/* tried times on triggering to TQE */
+	uint32_t xmit_entry;
+
+	int logs[TOPAZ_CONGEST_PKT_MAX]; /* Used to check queue fullness */
+
+	uint32_t congest_timeout;
+	uint32_t tasklet_budget;
+
+	uint32_t total_qlen;	/* Total number of pending requests in all queues*/
+
+	uint32_t unicast_qcount;	/* Total congest queue count of unicast frame */
+	uint32_t max_unicast_qcount;	/* Max unicast congest queue count*/
+};
+
+struct qdrv_tqe_cgq {
+	uint32_t	congest_qlen;
+};
+/**
+* Return NULL if node-tid pair is not congested, not NULL otherwise.
+*/
+RUBY_INLINE int
+topaz_queue_congested(struct topaz_congest_queue *congest_queue, uint32_t node_id, uint32_t tid)
+{
+	BUG_ON(node_id >= TOPAZ_PCIE_NODE_MAX);
+	BUG_ON(tid >= TOPAZ_PCIE_TID_MAX);
+
+	return (int)congest_queue->ptrs[node_id][tid];
+}
+
+RUBY_INLINE struct topaz_congest_q_desc*
+topaz_get_congest_queue(struct topaz_congest_queue *congest_queue, uint32_t node_id, uint32_t tid)
+{
+	return congest_queue->ptrs[node_id][tid];
+}
+
+static inline uint32_t get_timestamp(void)
+{
+	return read_new_aux_reg(ARC_REG_TIMER1_CNT);
+}
+
+/**
+* Return NULL if failed
+*/
+extern struct topaz_congest_queue* topaz_congest_queue_init(void);
+
+extern void topaz_congest_queue_exit(struct topaz_congest_queue* congest_queue);
+
+/**
+* Push ppctl into congestion queue.
+*/
+
+extern int topaz_congest_enqueue(struct topaz_congest_q_desc* queue, union topaz_tqe_cpuif_ppctl *ppctl);
+
+extern void topaz_congest_node(struct topaz_congest_queue *queue);
+extern void topaz_congest_dump(struct topaz_congest_queue *queue);
+
+extern struct topaz_congest_q_desc* topaz_congest_alloc_unicast_queue(struct topaz_congest_queue *congest_queue,
+																		uint32_t node_id,
+																		uint32_t tid);
+
+extern struct topaz_congest_q_desc* topaz_congest_alloc_queue(struct topaz_congest_queue *congest_queue, uint32_t node_id, uint32_t tid);
+
+extern int topaz_congest_queue_xmit(struct topaz_congest_q_desc *queue, uint32_t budget);
+
+extern void reg_congest_queue_stats(void (*fn)(void *, uint32_t, uint8_t, uint32_t), void *ctx);
+
+extern struct topaz_congest_queue* topaz_congest_queue_get(void);
+
+extern void topaz_hbm_congest_queue_put_buf(const union topaz_tqe_cpuif_ppctl *ppctl);
+
+extern void topaz_congest_set_unicast_queue_count(uint32_t qnum);
+#endif
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_dpi.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_dpi.h
new file mode 100644
index 0000000..b96228c
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_dpi.h
@@ -0,0 +1,96 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TOPAZ_DPI_H
+#define __TOPAZ_DPI_H
+
+#include <common/topaz_emac.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <compat.h>
+
+struct topaz_dpi_field_def {
+	uint32_t val;
+	uint32_t mask;
+	union topaz_emac_rx_dpi_ctrl ctrl;
+};
+
+#define TOPAZ_DPI_ANCHOR_FRAME_START	0x0000
+#define TOPAZ_DPI_ANCHOR_VLAN0		0x0001
+#define TOPAZ_DPI_ANCHOR_VLAN1		0x0002
+#define TOPAZ_DPI_ANCHOR_VLAN2		0x0003
+#define TOPAZ_DPI_ANCHOR_VLAN3		0x0004
+#define TOPAZ_DPI_ANCHOR_OTHER		0x0005
+#define TOPAZ_DPI_ANCHOR_LLC		0x0006
+#define TOPAZ_DPI_ANCHOR_IPV4		0x0007
+#define TOPAZ_DPI_ANCHOR_IPV6		0x0008
+#define TOPAZ_DPI_ANCHOR_TCP		0x0009
+#define TOPAZ_DPI_ANCHOR_UDP		0x000a
+
+#define TOPAZ_DPI_CMPOP_EQ		0x00
+#define TOPAZ_DPI_CMPOP_NE		0x01
+#define TOPAZ_DPI_CMPOP_GT		0x02
+#define TOPAZ_DPI_CMPOP_LT		0x03
+
+#define TOPAZ_DPI_DISABLE		0x0
+#define TOPAZ_DPI_ENABLE		0x1
+
+struct topaz_dpi_filter_request {
+	uint8_t out_port;
+	uint8_t out_node;
+	uint8_t tid;
+	struct topaz_dpi_field_def *fields;
+	unsigned int field_count;
+	struct in6_addr srcaddr;
+	struct in6_addr destaddr;
+	uint16_t srcport;
+	uint16_t destport;
+};
+
+int topaz_dpi_filter_add(unsigned int emac,
+		const struct topaz_dpi_filter_request *req);
+void topaz_dpi_filter_del(unsigned int emac, int filter_no);
+int topaz_dpi_init(unsigned int emac);
+
+static inline void topaz_dpi_iptuple_poll_complete(unsigned long base)
+{
+	while (readl(base + TOPAZ_EMAC_RX_DPI_IPT_MEM_COM)
+			& (TOPAZ_EMAC_RX_DPI_IPT_MEM_COM_WRITE | TOPAZ_EMAC_RX_DPI_IPT_MEM_COM_READ)) {}
+
+}
+
+static inline void topaz_dpi_iptuple_read_entry(unsigned long base, uint8_t entry)
+{
+	writel(TOPAZ_EMAC_RX_DPI_IPT_MEM_COM_READ | SM(entry, TOPAZ_EMAC_RX_DPI_IPT_MEM_COM_ENT),
+			base + TOPAZ_EMAC_RX_DPI_IPT_MEM_COM);
+	topaz_dpi_iptuple_poll_complete(base);
+}
+
+static inline void topaz_dpi_iptuple_write_entry(unsigned long base, uint8_t entry)
+{
+	writel(TOPAZ_EMAC_RX_DPI_IPT_MEM_COM_WRITE | SM(entry, TOPAZ_EMAC_RX_DPI_IPT_MEM_COM_ENT),
+			base + TOPAZ_EMAC_RX_DPI_IPT_MEM_COM);
+	topaz_dpi_iptuple_poll_complete(base);
+}
+
+#endif	/* __TOPAZ_DPI_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt.h
new file mode 100644
index 0000000..338815d8
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt.h
@@ -0,0 +1,50 @@
+/*
+ * (C) Copyright 2013 Quantenna Communications Inc.
+ */
+
+#ifndef __TOPAZ_FWT_H
+#define __TOPAZ_FWT_H
+
+#include <qtn/topaz_fwt_cpuif.h>
+
+typedef void (*fwt_notify_swap )( uint16_t dst_index, uint16_t src_index);
+
+/*
+ * The FWT algorithm maintain the first level entries available first for a fast look up
+ * In scenarios where there is a need to delete a first level entry with following index at the
+ * second level, there is a need to copy the second level entry over the first one, then delete
+ * the second level entry. The FWT interface register the overwrite call back so we can mirror the
+ * same entries indexers in both tables
+ * @param cbk_func: call back function to overwrite the index table entries
+ */
+void topaz_fwt_register_overwrite(fwt_notify_swap cbk_func);
+
+int topaz_fwt_add_entry(const uint8_t *mac_be, uint8_t out_port,
+		const uint8_t *out_node, unsigned int out_node_count, uint8_t portal);
+
+int topaz_fwt_del_entry(const uint8_t *mac_id);
+
+uint16_t topaz_fwt_hash(const uint8_t *mac_le);
+
+int topaz_get_mac_be_from_index(uint16_t index, uint8_t *mac_be);
+
+void topaz_update_node(uint16_t index, uint8_t node_index,uint8_t node,bool enable);
+
+void topaz_set_portal(uint16_t index, uint8_t portal);
+
+void topaz_fwt_sw_entry_set(uint16_t index, uint8_t out_port,
+		const uint8_t *out_nodes, unsigned int out_node_count, uint8_t portal);
+void topaz_fwt_sw_entry_del(uint16_t fwt_index);
+
+int topaz_sw_lookup(const uint8_t *mac_be);
+
+void topaz_fwt_sw_entry_set_multicast(uint16_t fwt_index, uint16_t mcast_index);
+
+int topaz_update_entry(uint16_t index, uint8_t port, uint8_t portal,
+		uint8_t node_index , uint8_t node_num, bool enable);
+
+int topaz_fwt_get_timestamp(uint16_t index);
+int topaz_fwt_init(void);
+
+#endif	/* __TOPAZ_FWT_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt_cpuif.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt_cpuif.h
new file mode 100644
index 0000000..e207e6a
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt_cpuif.h
@@ -0,0 +1,771 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TOPAZ_FWT_CPUIF_PLATFORM_H
+#define __TOPAZ_FWT_CPUIF_PLATFORM_H
+
+#include <common/topaz_platform.h>
+#include <qtn/mproc_sync_base.h>
+#include <qtn/topaz_tqe_cpuif.h>
+#include <qtn/qtn_net_packet.h>
+#include <qtn/lhost_muc_comm.h>
+
+#if defined(__linux__)
+	#define TOPAZ_FWT_LOCAL_CPU	TOPAZ_FWT_LOOKUP_LHOST
+#elif defined(ARCSHELL)
+	#define	TOPAZ_FWT_LOCAL_CPU	TOPAZ_FWT_LOOKUP_LHOST
+#elif defined(MUC_BUILD)
+	#define	TOPAZ_FWT_LOCAL_CPU	TOPAZ_FWT_LOOKUP_MUC
+#elif defined(DSP_BUILD)
+	#define	TOPAZ_FWT_LOCAL_CPU	TOPAZ_FWT_LOOKUP_DSP
+#elif defined(AUC_BUILD)
+	#define	TOPAZ_FWT_LOCAL_CPU	TOPAZ_FWT_LOOKUP_AUC
+#else
+	#error No TOPAZ_FWT_LOCAL_CPU set
+#endif
+
+#define	TOPAZ_FWT_LOOKUP_REG		__TOPAZ_FWT_LOOKUP_REG(TOPAZ_FWT_LOCAL_CPU)
+#define	TOPAZ_FWT_LOOKUP_MAC_LO		__TOPAZ_FWT_LOOKUP_MAC_LO(TOPAZ_FWT_LOCAL_CPU)
+#define	TOPAZ_FWT_LOOKUP_MAC_HI		__TOPAZ_FWT_LOOKUP_MAC_HI(TOPAZ_FWT_LOCAL_CPU)
+/* Hardware limitation for node entries */
+#define TOPAZ_FWT_MAX_NODE_ENTRY (6)
+
+/** Macro to get a number with its 'bits' LSB bits set */
+#define SET_LSB_BITS(bits) ((1 << (bits)) - 1)
+
+/** Macro to update a bit-field with a new value */
+#define TOPAZ_FWT_SET_BIT_FIELD(var, start_bit, width, value) \
+		(((var) & ~(SET_LSB_BITS(width)  << (start_bit))) | \
+				((value) & SET_LSB_BITS(width)) << (start_bit))
+
+/** Macro to extract a bit-field */
+#define TOPAZ_FWT_GET_BIT_FIELD(data, start_offset, width)\
+		(((data) >> (start_offset)) & SET_LSB_BITS(width))
+
+#if defined(ARCSHELL)
+typedef unsigned int uint32_t;
+typedef unsigned short uint16_t;
+typedef unsigned char uint8_t;
+#define ETH_ALEN 6
+#define inline _Inline
+#define unlikely
+#define likely
+#elif defined(MUC_BUILD)
+#define ETH_ALEN 6
+#endif
+
+/*
+ * Forwarding Table manipulation
+ *
+ * FWT has 2048 entries. First 1024 are mac address crc hash done by hardware.
+ * Next 1024 are linked list (nxt_entry) for when hash collision occurs.
+ *
+ * Basic forwarding table flow is:
+ * 1) Packet received
+ * 2) crc32 of incoming packet macaddr forms a 10 bit number [0 .. 1023]
+ * 3) This number is the index of the first FWT entry in the physical table. MAC address is compared.
+ *   a) If MAC matches, this is the fwt entry
+ *   b) If not, follow next entry index. Must be [1024 .. 2047], repeat.
+ * 4) Interpret FWT if present
+ *
+ * In sw maintain tailqs to know which slots are occupied
+ */
+
+/*
+ * FWT timestamp field is 10 bits. FWT clock is at 250MHz on BBIC4 ASIC.
+ * UNIT affects clock ticks per timer reg clock tick.
+ * SCALE affects shifting of the above register when applied to the timestamp field
+ * when updating it.
+ * ASIC: Values of 0xe unit and 13 (0xd) scale result in a time wrap time of 1 minute.
+ * FPGA: 0xc unit, 0xa scale results in very rougly 1m wrap
+ */
+
+#define TOPAZ_FWT_TIMESTAMP_BITS	10
+#define TOPAZ_FWT_TIMESTAMP_MASK	((1 << TOPAZ_FWT_TIMESTAMP_BITS) - 1)
+
+#if TOPAZ_FPGA_PLATFORM
+	#define TOPAZ_FWT_TIMESTAMP_UNIT	0xc
+	#define TOPAZ_FWT_TIMESTAMP_SCALE	0xa
+#else
+	#define TOPAZ_FWT_TIMESTAMP_UNIT	0x1b /* (3500 ticks 14 usec) */
+	#define TOPAZ_FWT_TIMESTAMP_SCALE	0x13 /* (2^19) */
+	/*
+	 * Resolution Calculation:
+	 * (per tick) * (FWT clock [sec]) * (bits store location) = Resolution[sec]
+	 * (3500)     * (1 / (250*10^6))  * (2^19)                = 7.34 [sec]
+	 * */
+
+	#define TOPAZ_FWT_RESOLUTION_MSEC	7340 /* Derive from unit & scale */
+#endif
+
+RUBY_INLINE uint32_t topaz_fwt_get_scaled_timestamp(void)
+{
+#ifdef CONSOLE_TEST
+	return 0;
+#else
+	uint32_t tsr = qtn_mproc_sync_mem_read(TOPAZ_FWT_TIME_STAMP_CNT);
+	return (tsr >> TOPAZ_FWT_TIMESTAMP_SCALE) & TOPAZ_FWT_TIMESTAMP_MASK;
+#endif
+}
+
+union topaz_fwt_entry {
+	struct {
+		uint32_t	word0;	/* macaddr[30:1] */
+		uint32_t	word1;	/* valid bit(31), portal(30), next_index(27:16), macaddr[47:32](15:0) */
+		uint32_t	word2;	/* out_node + valid 0, 1, 2, 3 */
+		uint32_t	word3;	/* out_node + valid 4, 5, outport, timestamp(9:0) */
+	} raw;
+	struct {
+		uint8_t		mac_le[ETH_ALEN];
+		uint16_t	__unused1	:1,
+				next_index	:11,
+				__unused2	:2,
+				portal		:1,
+				valid		:1;
+		uint8_t		out_node_0	:7,
+				out_node_vld_0	:1;
+		uint8_t		out_node_1	:7,
+				out_node_vld_1	:1;
+		uint8_t		out_node_2	:7,
+				out_node_vld_2	:1;
+		uint8_t		out_node_3	:7,
+				out_node_vld_3	:1;
+		uint16_t	timestamp	:10,
+				out_port	:4,
+				__unused3	:2;
+		uint8_t		out_node_4	:7,
+				out_node_vld_4	:1;
+		uint8_t		out_node_5	:7,
+				out_node_vld_5	:1;
+	} data;
+};
+
+#define FWT_ZERO_ENTRY_INIT	{ { 0, 0, 0, 0 } }
+
+RUBY_INLINE union topaz_fwt_entry *topaz_fwt_get_hw_entry(uint16_t index)
+{
+#ifdef CONSOLE_TEST
+	extern union topaz_fwt_entry test_hw_fwt[TOPAZ_FWT_HW_TOTAL_ENTRIES];
+	return &test_hw_fwt[index];
+#else
+	union topaz_fwt_entry *e;
+	e = (union topaz_fwt_entry *)(TOPAZ_FWT_TABLE_BASE + index * sizeof(*e));
+	return e;
+#endif
+}
+
+RUBY_INLINE int topaz_fwt_is_valid(const union topaz_fwt_entry *e)
+{
+	return e->raw.word1 & TOPAZ_FWT_ENTRY_VALID;
+}
+
+RUBY_INLINE uint16_t topaz_fwt_next_index(const union topaz_fwt_entry *e)
+{
+	return MS(e->raw.word1, TOPAZ_FWT_ENTRY_NXT_ENTRY);
+}
+
+RUBY_INLINE const uint8_t *topaz_fwt_macaddr(const union topaz_fwt_entry *e)
+{
+	return (void *) e;
+}
+
+RUBY_INLINE void topaz_fwt_set_next_index(union topaz_fwt_entry *e, uint16_t index)
+{
+	unsigned long word1 = e->raw.word1 & ~TOPAZ_FWT_ENTRY_NXT_ENTRY;
+	e->raw.word1 = word1 | SM(index, TOPAZ_FWT_ENTRY_NXT_ENTRY);
+}
+
+RUBY_INLINE void topaz_fwt_copy_entry(union topaz_fwt_entry *dest, const union topaz_fwt_entry *src,
+					int words)
+{
+	int i;
+#pragma Off(Behaved)
+	uint32_t *d = &dest->raw.word0;
+	const uint32_t *s = &src->raw.word0;
+#pragma On(Behaved)
+
+	for (i = 0; i < words; i++) {
+		*d++ = *s++;
+	}
+}
+
+RUBY_INLINE void topaz_fwt_insert_entry(const union topaz_fwt_entry *newent, uint16_t index)
+{
+	union topaz_fwt_entry *hw_ent = topaz_fwt_get_hw_entry(index);
+	topaz_fwt_copy_entry(hw_ent, newent, 4);
+}
+
+/*
+ * Software FWT mirror. Used by MuC for Rx path
+ * acceleration, without accessing the FWT memory
+ */
+union topaz_fwt_sw_entry {
+	uint16_t raw;
+	struct {
+		uint8_t	valid		:1,
+			mcast		:1,
+			vsp		:1,
+			portal		:1,
+			port		:4;
+		uint8_t	__pad		:1,
+			node		:7;
+	} unicast;
+	struct {
+		uint16_t valid		:1,
+			 mcast		:1,
+			 index		:14;
+	} multicast;
+};
+
+/* 23 bits of multicast ipv4 -> mac leaves 5 bits of ambiguity */
+#define TOPAZ_FWT_SW_IP_ALIAS_ENTRIES	32
+#define TOPAZ_FWT_SW_NODE_MAX		128
+#define TOPAZ_BITS_PER_WD		(32)
+#define TOPAZ_FWT_SW_NODE_BITMAP_SIZE	(TOPAZ_FWT_SW_NODE_MAX / TOPAZ_BITS_PER_WD)
+
+struct topaz_fwt_sw_mcast_entry {
+	uint32_t node_bitmap[TOPAZ_FWT_SW_NODE_BITMAP_SIZE];
+	uint8_t port_bitmap;
+	uint8_t flood_forward;
+	uint8_t seen;
+#ifdef CONFIG_TOPAZ_DBDC_HOST
+	uint8_t dev_bitmap;
+#else
+	uint8_t __pad[1];
+#endif
+};
+
+#ifdef CONFIG_TOPAZ_DBDC_HOST
+RUBY_INLINE int topaz_fwt_sw_mcast_dev_is_set(struct topaz_fwt_sw_mcast_entry *const e,
+						const uint8_t dev_id)
+{
+	return (e->dev_bitmap & (1 << dev_id));
+}
+
+RUBY_INLINE int topaz_fwt_sw_mcast_dev_is_empty(struct topaz_fwt_sw_mcast_entry *const e)
+{
+	return (e->dev_bitmap == 0);
+}
+
+RUBY_INLINE void topaz_fwt_sw_mcast_dev_set(struct topaz_fwt_sw_mcast_entry *const e,
+						const uint8_t dev_id)
+{
+	e->dev_bitmap |= (1 << dev_id);
+}
+
+RUBY_INLINE void topaz_fwt_sw_mcast_dev_clear(struct topaz_fwt_sw_mcast_entry *const e,
+						const uint8_t dev_id)
+{
+	e->dev_bitmap &= ~(1 << dev_id);
+}
+#endif
+
+struct topaz_fwt_sw_alias_table {
+	int16_t mcast_entry_index[TOPAZ_FWT_SW_IP_ALIAS_ENTRIES];
+};
+
+RUBY_INLINE int8_t topaz_fwt_mcast_to_ip_alias(const void *addr, uint16_t ether_type)
+{
+	if (ether_type == htons(ETHERTYPE_IP)) {
+		return qtn_mcast_ipv4_alias(addr);
+	} else if (ether_type == htons(ETHERTYPE_IPV6)) {
+		return 0;
+	} else {
+		return -1;
+	}
+}
+
+RUBY_INLINE int topaz_fwt_sw_alias_table_index_valid(int16_t index)
+{
+	return index >= 0 && index < TOPAZ_FWT_MCAST_ENTRIES;
+}
+
+RUBY_INLINE int topaz_fwt_sw_mcast_entry_index_valid(int16_t index)
+{
+	return index >= 0 && index < TOPAZ_FWT_MCAST_ENTRIES;
+}
+
+RUBY_INLINE uint8_t topaz_fwt_sw_mcast_entry_nodes_clear(const struct topaz_fwt_sw_mcast_entry *e)
+{
+	unsigned int i;
+	for (i = 0; i < TOPAZ_FWT_SW_NODE_BITMAP_SIZE; i++) {
+		if (e->node_bitmap[i]) {
+			return 0;
+		}
+	}
+	return 1;
+}
+
+RUBY_INLINE int topaz_fwt_sw_alias_table_empty(const struct topaz_fwt_sw_alias_table *alias_table)
+{
+	unsigned int i;
+	for (i = 0; i < TOPAZ_FWT_SW_IP_ALIAS_ENTRIES; i++) {
+		if (topaz_fwt_sw_mcast_entry_index_valid(alias_table->mcast_entry_index[i])) {
+			return 0;
+		}
+	}
+	return 1;
+}
+
+RUBY_INLINE int topaz_fwt_sw_mcast_port_is_set(const uint8_t port_bitmap, const uint8_t port)
+{
+	return (port_bitmap & (1 << port));
+}
+
+RUBY_INLINE void topaz_fwt_sw_mcast_port_set(struct topaz_fwt_sw_mcast_entry *const e,
+						const uint8_t port)
+{
+	e->port_bitmap |= (1 << port);
+}
+
+RUBY_INLINE void topaz_fwt_sw_mcast_port_clear(struct topaz_fwt_sw_mcast_entry *const e,
+						const uint8_t port)
+{
+	e->port_bitmap &= ~(1 << port);
+}
+
+RUBY_INLINE int topaz_fwt_sw_mcast_port_has_nodes(const uint8_t port)
+{
+	return (port == TOPAZ_TQE_WMAC_PORT);
+}
+
+RUBY_INLINE void
+topaz_fwt_sw_mcast_flood_forward_set(struct topaz_fwt_sw_mcast_entry *const e, const uint8_t enable)
+{
+	e->flood_forward = enable;
+}
+
+RUBY_INLINE int
+topaz_fwt_sw_mcast_is_flood_forward(const struct topaz_fwt_sw_mcast_entry *const e)
+{
+	return (e->flood_forward);
+}
+
+RUBY_INLINE uint32_t topaz_fwt_sw_mcast_do_per_node(
+	void (*handler)(const void *token1, void *token2, uint8_t node, uint8_t port, uint8_t tid),
+	const struct topaz_fwt_sw_mcast_entry *mcast_ent,
+	const void *token1, void *token2, uint8_t in_node, uint8_t port, uint8_t tid)
+{
+	uint8_t node;
+	uint8_t node_cnt = 0;
+	uint32_t bitmap;
+	uint8_t i;
+	uint8_t j;
+
+	for (i = 0; i < TOPAZ_FWT_SW_NODE_BITMAP_SIZE; i++) {
+		bitmap = mcast_ent->node_bitmap[i];
+		j = 0;
+		while (bitmap) {
+			if (bitmap & 0x1) {
+				node = (i * TOPAZ_BITS_PER_WD) + j;
+				if ((in_node == 0) || (node != in_node)) {
+					handler(token1, token2, port, node, tid);
+					node_cnt++;
+				}
+			}
+			bitmap >>= 1;
+			j++;
+		}
+	}
+
+	return node_cnt;
+}
+
+RUBY_INLINE int topaz_fwt_sw_mcast_node_is_set(const struct topaz_fwt_sw_mcast_entry *const e,
+						const uint8_t port, const uint8_t node)
+{
+	if (port == TOPAZ_TQE_WMAC_PORT) {
+		return (e->node_bitmap[node / TOPAZ_BITS_PER_WD] &
+				(1 << (node % TOPAZ_BITS_PER_WD)));
+	}
+	return 0;
+}
+
+RUBY_INLINE void topaz_fwt_sw_mcast_node_set(struct topaz_fwt_sw_mcast_entry *const e,
+						const uint8_t port, const uint16_t node)
+{
+	if (port == TOPAZ_TQE_WMAC_PORT) {
+		e->node_bitmap[node / TOPAZ_BITS_PER_WD] |= (1 << (node % TOPAZ_BITS_PER_WD));
+	}
+}
+
+RUBY_INLINE void topaz_fwt_sw_mcast_node_clear(struct topaz_fwt_sw_mcast_entry *const e,
+						const uint8_t port, const uint16_t node)
+{
+	if (port == TOPAZ_TQE_WMAC_PORT) {
+		e->node_bitmap[node / TOPAZ_BITS_PER_WD] &= ~(1 << (node % TOPAZ_BITS_PER_WD));
+		if (!topaz_fwt_sw_mcast_is_flood_forward(e) &&
+				topaz_fwt_sw_mcast_entry_nodes_clear(e)) {
+			topaz_fwt_sw_mcast_port_clear(e, port);
+		}
+	}
+}
+
+RUBY_INLINE union topaz_fwt_sw_entry *topaz_fwt_sw_entry_get(uint16_t index)
+{
+	union topaz_fwt_sw_entry *fwt = (void *) (RUBY_SRAM_BEGIN + TOPAZ_FWT_SW_START);
+	return &fwt[index];
+}
+
+RUBY_INLINE struct topaz_fwt_sw_mcast_entry *topaz_fwt_sw_mcast_ff_entry_get(void)
+{
+	return (void *)(RUBY_DRAM_BEGIN + TOPAZ_FWT_MCAST_TQE_FF_BASE);
+}
+
+RUBY_INLINE struct topaz_fwt_sw_mcast_entry *topaz_fwt_sw_mcast_entry_get(uint16_t index)
+{
+	struct topaz_fwt_sw_mcast_entry *fwt = (void *) (RUBY_DRAM_BEGIN + TOPAZ_FWT_MCAST_TQE_BASE);
+	return &fwt[index];
+}
+
+RUBY_INLINE struct topaz_fwt_sw_alias_table *topaz_fwt_sw_alias_table_get(uint16_t index)
+{
+	struct topaz_fwt_sw_alias_table *fwt = (void *) (RUBY_DRAM_BEGIN + TOPAZ_FWT_MCAST_IPMAP_BASE);
+	return &fwt[index];
+}
+
+RUBY_INLINE uint8_t topaz_fwt_sw_count_bits(uint32_t x)
+{
+	uint8_t bits_set = 0;
+
+	while (x) {
+		bits_set++;
+		x &= x - 1;
+	}
+
+	return bits_set;
+}
+
+RUBY_INLINE void __topaz_fwt_hash_set(int enable)
+{
+	uint32_t reg = enable ? TOPAZ_FWT_HASH_CTRL_ENABLE : 0;
+	qtn_mproc_sync_mem_write(TOPAZ_FWT_HASH_CTRL, reg);
+}
+
+RUBY_INLINE void __topaz_fwt_hw_lookup_write_be(const uint8_t *mac_be)
+{
+	uint32_t lo = mac_be[5] | (mac_be[4] << 8) | (mac_be[3] << 16) | (mac_be[2] << 24);
+	uint32_t hi = mac_be[1] | (mac_be[0] << 8);
+
+	qtn_mproc_sync_mem_write(TOPAZ_FWT_LOOKUP_MAC_LO, lo);
+	qtn_mproc_sync_mem_write(TOPAZ_FWT_LOOKUP_MAC_HI, hi);
+	qtn_mproc_sync_mem_write(TOPAZ_FWT_LOOKUP_REG, SM(1, TOPAZ_FWT_LOOKUP_TRIG));
+}
+
+RUBY_INLINE void __topaz_fwt_hw_lookup_write_le(const uint8_t *mac_le)
+{
+	uint32_t lo = mac_le[0] | (mac_le[1] << 8) | (mac_le[2] << 16) | (mac_le[3] << 24);
+	uint32_t hi = mac_le[4] | (mac_le[5] << 8);
+
+	qtn_mproc_sync_mem_write(TOPAZ_FWT_LOOKUP_MAC_LO, lo);
+	qtn_mproc_sync_mem_write(TOPAZ_FWT_LOOKUP_MAC_HI, hi);
+	qtn_mproc_sync_mem_write(TOPAZ_FWT_LOOKUP_REG, SM(1, TOPAZ_FWT_LOOKUP_TRIG));
+}
+
+RUBY_INLINE void topaz_fwt_reverse_mac(uint8_t *dest, const uint8_t *src)
+{
+	int i;
+	for (i = 0; i < ETH_ALEN; i++) {
+		dest[ETH_ALEN - i - 1] = src[i];
+	}
+}
+
+RUBY_INLINE void topaz_fwt_setup_entry(union topaz_fwt_entry *ent, const uint8_t *mac_be,
+					uint8_t out_port, const uint8_t *out_nodes,
+					unsigned int out_node_count, uint8_t portal)
+{
+	ent->raw.word0 = 0;
+	ent->raw.word1 = 0;
+	ent->raw.word2 = 0;
+	ent->raw.word3 = 0;
+
+#pragma Off(Behaved)
+	topaz_fwt_reverse_mac(ent->data.mac_le, mac_be);
+#pragma On(Behaved)
+	ent->data.valid = 1;
+	ent->data.portal = portal;
+
+#define __topaz_fwt_setup_entry_set_out_node(x)			\
+	do {							\
+		if (x < out_node_count) {			\
+			ent->data.out_node_##x = out_nodes[x];	\
+			ent->data.out_node_vld_##x = 1;		\
+		}						\
+	} while(0)
+
+	__topaz_fwt_setup_entry_set_out_node(0);
+	__topaz_fwt_setup_entry_set_out_node(1);
+	__topaz_fwt_setup_entry_set_out_node(2);
+	__topaz_fwt_setup_entry_set_out_node(3);
+	__topaz_fwt_setup_entry_set_out_node(4);
+	__topaz_fwt_setup_entry_set_out_node(5);
+
+	ent->data.out_port = out_port;
+	ent->data.timestamp = topaz_fwt_get_scaled_timestamp();
+}
+
+union topaz_fwt_lookup {
+	struct {
+		uint32_t	word0;
+	} raw;
+	struct {
+		uint32_t	trig		:1,
+				__unused	:7,
+				hash_addr	:10,
+				__unused2	:2,
+				entry_addr	:11,
+				valid		:1;
+	} data;
+};
+
+#define FWT_ZERO_LOOKUP_INIT	{ { 0 } }
+
+RUBY_INLINE union topaz_fwt_lookup __topaz_fwt_hw_lookup_rd(void)
+{
+	union topaz_fwt_lookup u;
+	u.raw.word0 = qtn_mproc_sync_mem_read(TOPAZ_FWT_LOOKUP_REG);
+	return u;
+}
+
+RUBY_INLINE union topaz_fwt_lookup __topaz_fwt_hw_lookup_wait_be(const uint8_t *mac_be, int *timeout)
+{
+	unsigned long timeouts = 0;
+	union topaz_fwt_lookup u;
+	union topaz_fwt_lookup zero_lookup = FWT_ZERO_LOOKUP_INIT;
+
+	__topaz_fwt_hw_lookup_write_be(mac_be);
+	while (1) {
+		u = __topaz_fwt_hw_lookup_rd();
+		if (u.data.trig == 0) {
+			*timeout = 0;
+			return u;
+		} else {
+			qtn_pipeline_drain();
+			if (unlikely(timeouts++ > 1000)) {
+				*timeout = 1;
+				return zero_lookup;
+			}
+		}
+	}
+
+	return zero_lookup;
+}
+
+RUBY_INLINE union topaz_fwt_lookup topaz_fwt_hw_lookup_wait_be(const uint8_t *mac_be, int *timeout, uint8_t *false_miss)
+{
+#ifndef TOPAZ_DISABLE_FWT_WAR
+	/*
+	 * This is to workaround the FWT lookup false issue:
+	 * It seems when EMAC is under heavy VLAN traffic, Lhost and MuC
+	 * may get false miss from FWT -- FWT returns invalid while a MAC
+	 * address truly exists in it. A double check reduces count of false
+	 * misses significantly.
+	 */
+	uint8_t retries = 1;
+#else
+	uint8_t retries = 0;
+#endif
+	union topaz_fwt_lookup u;
+
+	do {
+		u = __topaz_fwt_hw_lookup_wait_be(mac_be, timeout);
+	} while (!u.data.valid && retries--);
+
+#if !defined(TOPAZ_DISABLE_FWT_WAR) && !defined(MUC_BUILD)
+	*false_miss += (retries == 0);
+#endif
+
+	return u;
+}
+
+RUBY_INLINE uint32_t __topaz_fwt_cpu_access_rd(void)
+{
+	return qtn_mproc_sync_mem_read(TOPAZ_FWT_CPU_ACCESS);
+}
+
+RUBY_INLINE void __topaz_fwt_set_4addrmode(union topaz_fwt_entry *ent, uint8_t portal)
+{
+	ent->data.portal = !!portal;
+}
+
+RUBY_INLINE void __topaz_fwt_set_port(union topaz_fwt_entry *ent, uint8_t port)
+{
+	ent->data.out_port = port;
+}
+
+
+RUBY_INLINE void __topaz_fwt_set_node(union topaz_fwt_entry *ent,
+		uint8_t node_index, uint8_t node_num, bool enable)
+{
+#define ____topaz_fwt_set_node(n)				\
+	case n:	do {						\
+			ent->data.out_node_##n = node_num;	\
+			ent->data.out_node_vld_##n = !!enable;	\
+		} while(0);					\
+	break
+
+	switch (node_index) {
+		____topaz_fwt_set_node(0);
+		____topaz_fwt_set_node(1);
+		____topaz_fwt_set_node(2);
+		____topaz_fwt_set_node(3);
+		____topaz_fwt_set_node(4);
+		____topaz_fwt_set_node(5);
+	default:
+		break;
+	}
+}
+
+RUBY_INLINE int __topaz_fwt_cpu_access_start_wait(void)
+{
+#ifndef CONSOLE_TEST
+	unsigned long timeouts = 0;
+	qtn_mproc_sync_mem_write(TOPAZ_FWT_CPU_ACCESS, TOPAZ_FWT_CPU_ACCESS_REQ);
+	while (timeouts++ < 1000) {
+		uint32_t reg = __topaz_fwt_cpu_access_rd();
+		if (MS(reg, TOPAZ_FWT_CPU_ACCESS_STATE) == TOPAZ_FWT_CPU_ACCESS_STATE_GRANTED) {
+			return 0;
+		}
+		qtn_pipeline_drain();
+	}
+
+	return -1;
+#else
+	return 0;
+#endif
+}
+
+RUBY_INLINE void __topaz_fwt_cpu_access_stop(void)
+{
+#ifndef CONSOLE_TEST
+	qtn_mproc_sync_mem_write(TOPAZ_FWT_CPU_ACCESS, 0);
+#endif
+}
+
+RUBY_INLINE int topaz_fwt_cpu_access_lock_wait_irqsave(unsigned long *flags)
+{
+#ifndef CONSOLE_TEST
+	int rc;
+
+	local_irq_save(*flags);
+	rc = __topaz_fwt_cpu_access_start_wait();
+	if (rc) {
+		local_irq_restore(*flags);
+	}
+	return rc;
+#else
+	(void)flags;
+	return 0;
+#endif
+}
+
+RUBY_INLINE void topaz_fwt_cpu_access_unlock_irqrestore(unsigned long *flags)
+{
+#ifndef CONSOLE_TEST
+	__topaz_fwt_cpu_access_stop();
+	local_irq_restore(*flags);
+#else
+	(void)flags;
+#endif
+}
+#ifndef TOPAZ_TEST_ASSERT_EQUAL
+# define TOPAZ_TEST_ASSERT_EQUAL(a, b)	if ((a) != (b)) { return -1; }
+#endif
+RUBY_INLINE int topaz_fwt_lookup_bitfield_test(const union topaz_fwt_lookup *e)
+{
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word0, TOPAZ_FWT_LOOKUP_TRIG), e->data.trig);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word0, TOPAZ_FWT_LOOKUP_ENTRY_ADDR), e->data.entry_addr);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word0, TOPAZ_FWT_LOOKUP_HASH_ADDR), e->data.hash_addr);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word0, TOPAZ_FWT_LOOKUP_VALID), e->data.valid);
+
+	return 0;
+}
+
+RUBY_INLINE int topaz_fwt_entry_bitfield_test(const union topaz_fwt_entry *e)
+{
+	TOPAZ_TEST_ASSERT_EQUAL(!!topaz_fwt_is_valid(e), e->data.valid);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word1, TOPAZ_FWT_ENTRY_NXT_ENTRY), e->data.next_index);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word2, TOPAZ_FWT_ENTRY_OUT_NODE_0), e->data.out_node_0);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word2, TOPAZ_FWT_ENTRY_OUT_NODE_VLD_0), e->data.out_node_vld_0);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word2, TOPAZ_FWT_ENTRY_OUT_NODE_1), e->data.out_node_1);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word2, TOPAZ_FWT_ENTRY_OUT_NODE_VLD_1), e->data.out_node_vld_1);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word2, TOPAZ_FWT_ENTRY_OUT_NODE_2), e->data.out_node_2);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word2, TOPAZ_FWT_ENTRY_OUT_NODE_VLD_2), e->data.out_node_vld_2);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word2, TOPAZ_FWT_ENTRY_OUT_NODE_3), e->data.out_node_3);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word2, TOPAZ_FWT_ENTRY_OUT_NODE_VLD_3), e->data.out_node_vld_3);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word3, TOPAZ_FWT_ENTRY_OUT_NODE_4), e->data.out_node_4);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word3, TOPAZ_FWT_ENTRY_OUT_NODE_VLD_4), e->data.out_node_vld_4);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word3, TOPAZ_FWT_ENTRY_OUT_NODE_5), e->data.out_node_5);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word3, TOPAZ_FWT_ENTRY_OUT_NODE_VLD_5), e->data.out_node_vld_5);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word3, TOPAZ_FWT_ENTRY_OUT_PORT), e->data.out_port);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw.word3, TOPAZ_FWT_ENTRY_TIMESTAMP), e->data.timestamp);
+#ifdef __KERNEL_
+	TOPAZ_TEST_ASSERT_EQUAL(memcmp(topaz_fwt_macaddr(e), e->data.mac_le, ETH_ALEN), 0);
+#endif
+
+	return 0;
+}
+
+struct topaz_ipmac_uc_entry {
+	struct topaz_ipmac_uc_entry *next;
+	union {
+		uint8_t		ipv4_addr[4];
+		uint8_t		ipv6_addr[16];
+	}u;
+	uint8_t		mac_addr[MAC_ADDR_LEN];
+	uint16_t	type;
+	struct topaz_ipmac_uc_entry *lhost_next;
+};
+
+#define TOPAZ_IPMAC_UC_HASH_SLOT	128
+#define TOPAZ_IPMAC_UC_HASH_SLOT_MASK	0x7f
+#define TOPAZ_IPMAC_UC_HASH_SIZE	(TOPAZ_IPMAC_UC_HASH_SLOT * sizeof(void *))
+
+#define TOPAZ_IPMAC_UC_ENTRY_SIZE	sizeof(struct topaz_ipmac_uc_entry)
+#define TOPAZ_IPMAC_UC_ENTRY_COUNT	31
+
+struct topaz_ipmac_uc_table {
+	struct topaz_ipmac_uc_entry *slots[TOPAZ_IPMAC_UC_HASH_SLOT];
+	struct topaz_ipmac_uc_entry entries[TOPAZ_IPMAC_UC_ENTRY_COUNT];
+	uint32_t	update_cnt_lhost;
+	uint32_t	update_cnt_muc;
+};
+
+#define TOPAZ_IPMAC_UC_TBL_SIZE		(sizeof(struct topaz_ipmac_uc_table))
+
+/*
+ * The hash works under the assumption that in most cases, hosts behind
+ * the STA are in the same IP subnet. The host number differs so we have
+ * a good chance to have diverse MSB byte of a be IP address
+ */
+RUBY_INLINE uint16_t topaz_ipmac_uc_hash(__be32 ipaddr)
+{
+	return ((ipaddr >> 24) & TOPAZ_IPMAC_UC_HASH_SLOT_MASK);
+}
+
+RUBY_INLINE uint16_t topaz_ipmac_ipv6uc_hash(const uint8_t *ipv6_addr)
+{
+	return (ipv6_addr[15] & TOPAZ_IPMAC_UC_HASH_SLOT_MASK);
+}
+
+#endif	/* __TOPAZ_FWT_CPUIF_PLATFORM_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt_db.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt_db.h
new file mode 100644
index 0000000..e16698f
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt_db.h
@@ -0,0 +1,287 @@
+/*
+ * (C) Copyright 2013 Quantenna Communications Inc.
+ */
+
+#ifndef FWT_DB_H_
+#define FWT_DB_H_
+
+
+#ifndef CONSOLE_TEST
+#include <linux/types.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+
+#include <common/queue.h>
+
+#include <qtn/br_types.h>
+#include <qtn/topaz_fwt.h>
+#include <qtn/dmautil.h>
+#include <qtn/topaz_tqe_cpuif.h>
+#include <qtn/topaz_fwt_cpuif.h>
+#include <qtn/qtn_net_packet.h>
+
+typedef enum
+{
+	FWT_DB_MLT_PORT_WMAC = 0,
+	FWT_DB_MLT_PORT_MAX,
+}fwt_db_mlt_ports;
+
+/* Set success return status */
+#define FWT_DB_STATUS_SUCCESS (1)
+
+/* Set Invalid node number */
+#define FWT_DB_INVALID_NODE (0xFF)
+
+/* Set Invalid IPV4 value */
+#define FWT_DB_INVALID_IPV4 (0xFF)
+
+/* Size of IPV4 address */
+#define FWT_DB_IPV4_SIZE (4)
+
+/*
+ * LHost FWT entry copy.
+ * Sufficient for unicast; multicast with multiple ports/nodes
+ * is handled in topaz_fwt_sw_mcast_entry
+ */
+typedef struct fwt_db_entry {
+	uint8_t mac_id[ETH_ALEN];
+	uint8_t out_port;
+	uint8_t out_node;
+	int16_t fwt_index;
+	int16_t alias_table_index;
+	uint32_t false_miss;
+	uint8_t portal	:1,
+		valid	:1,
+#ifdef CONFIG_TOPAZ_DBDC_HOST
+		mcast	:1,
+		dev_id  :DEV_ID_BITS;
+#else
+		mcast	:1;
+#endif
+} fwt_db_entry;
+
+/* node list indexed by the fwt */
+typedef struct fwt_db_node_element {
+	uint16_t index;
+	uint8_t ip_alias;
+	uint8_t port;
+	STAILQ_ENTRY(fwt_db_node_element) next;
+} fwt_db_node_element;
+
+typedef struct {
+	fwt_db_node_element *element;
+	bool in_use;
+	int node_index;
+} fwt_db_node_iterator;
+
+static inline struct topaz_fwt_sw_alias_table *
+fwt_db_get_sw_alias_table(struct fwt_db_entry *db)
+{
+	if (db && topaz_fwt_sw_alias_table_index_valid(db->alias_table_index)) {
+		return topaz_fwt_sw_alias_table_get(db->alias_table_index);
+	}
+	return NULL;
+}
+
+static inline struct topaz_fwt_sw_mcast_entry *
+fwt_db_get_sw_mcast(struct fwt_db_entry *db, uint8_t ipmap_index)
+{
+	struct topaz_fwt_sw_alias_table *ipmap = fwt_db_get_sw_alias_table(db);
+	if (ipmap) {
+		int16_t mcast_index = ipmap->mcast_entry_index[ipmap_index];
+		if (topaz_fwt_sw_mcast_entry_index_valid(mcast_index)) {
+			return topaz_fwt_sw_mcast_entry_get(mcast_index);
+		}
+	}
+
+	return NULL;
+}
+
+static inline struct topaz_fwt_sw_mcast_entry *fwt_db_get_sw_mcast_ff(void)
+{
+	return topaz_fwt_sw_mcast_ff_entry_get();
+}
+
+static inline void topaz_fwt_sw_alias_table_flush(struct topaz_fwt_sw_alias_table *p)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dma_cache_wback(virt_to_phys(p), sizeof(*p));
+#else
+	flush_dcache_sizerange_safe(p, sizeof(*p));
+#endif
+}
+
+static inline void topaz_fwt_sw_mcast_flush(struct topaz_fwt_sw_mcast_entry *p)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dma_cache_wback(virt_to_phys(p), sizeof(*p));
+#else
+	flush_dcache_sizerange_safe(p, sizeof(*p));
+#endif
+}
+
+static inline void fwt_mcast_to_mac(uint8_t *mac_be, const struct br_ip *group)
+{
+	return qtn_mcast_to_mac(mac_be, &group->u, group->proto);
+}
+
+static inline int8_t fwt_mcast_to_ip_alias(const struct br_ip *group)
+{
+	if (group == NULL) {
+		return -1;
+	} else {
+		return topaz_fwt_mcast_to_ip_alias(&group->u, group->proto);
+	}
+}
+
+fwt_db_node_element *fwt_db_create_node_element(void);
+void fwt_db_free_node_element(fwt_db_node_element *node_element);
+int fwt_db_is_node_exists_list(uint8_t node_index, uint16_t table_index,
+		uint8_t ip_alias, uint8_t port);
+/*
+ * Initialise the fwt_db database
+ */
+void fwt_db_init(void);
+
+/*
+ * Get IP Flood-forwarding configuration
+ * @Param buf: location of print buffer
+ * @Param buflen: size of print buffer
+ * @return number of characters printed
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+int fwt_db_get_ipff(struct seq_file *sfile);
+#else
+int fwt_db_get_ipff(char *buf, int buflen);
+#endif
+
+/*
+ * Print FWT Database to console
+ * @return number of existing valid entries
+ */
+int fwt_db_print(int is_mult);
+
+/*
+ * Print FWT node hash lise to console
+ * @return number of existing valid entries
+ */
+int fwt_db_node_table_print(void);
+
+/*
+ * Print the FWT multicast entries to a buffer
+ * @return number of bytes written
+ */
+int fwt_db_get_mc_list(char *buf, int buflen);
+
+/*
+ * Calculate ageing time by the FWT HW timestamp
+ * @Param fwt_index: FWT table index
+ */
+int fwt_db_calculate_ageing_scale(int fwt_index);
+/*
+ * Insert new table entry
+ * @param index: the HW FWT index.
+ * Note: the FWT SW table use the FWT HW index algorithm for matching the table entries
+ * @param element: FWT SW table element that reflect both FWT HW table entry and additional data
+ * @return Success / Failure indication
+ */
+int fwt_db_table_insert(uint16_t index, fwt_db_entry *element);
+/*
+ * Acquire iterator to run over the list database from node index entry
+ * @param node_index: node number represented as a hash list index
+ * @return: Iterator database element
+ */
+fwt_db_node_iterator *fwt_db_iterator_acquire(uint8_t node_index);
+
+/*
+ * Release iterator to mark elements on the database can be erase or modify safely.
+ */
+void fwt_db_iterator_release(void);
+
+/*
+ * Give back current element and advance iterator to the next one.
+ * service function for running over the database.
+ * Note: Node_index is a part of the iterator
+ * @param iterator: Iterator database element
+ * @return: Current iterator database element
+ */
+fwt_db_node_element *fwt_db_iterator_next(fwt_db_node_iterator **iterator);
+
+/* Add a new node entry
+ * @param node_num:
+ * @param table_index: the HW FWT table index
+ */
+void fwt_db_add_new_node(uint8_t node_num, uint16_t table_index,
+		const int8_t ip_alias, uint8_t port, fwt_db_node_element *const new_element);
+
+/* Remove specific node from the hash list,
+ * Note: does not involve removing node from the fwt table.
+ * @param node_index: the hash list entry point that represent the node number
+ * @return: Function returns the number of elements that were removed.(Debug feature)
+ */
+int fwt_db_clear_node(uint8_t node_index);
+
+/* In cases where fwt entry is remove we need to maintain the specific index from
+ * the node table since its not relevant anymore.
+ * We conduct the maintenance procedure in order to take advantage of the knowledge of the specific node index
+ * so we can avoid going through the whole database.
+ * @param node_index: the hash list entry point that represent the node number
+ * @param table_index: the specific fwt index to be removed
+ */
+void fwt_db_delete_index_from_node_table(uint8_t node_index, uint16_t table_index,
+		uint8_t ip_alias, uint8_t port);
+
+fwt_db_node_element *fwt_db_get_table_index_from_node(uint8_t node_num);
+
+/*
+ * Delete fwt table entry.
+ * @param index: the HW FWT index.
+ * Note: the FWT SW table use the FWT HW index algorithm for matching the table entries
+ */
+void fwt_db_delete_table_entry(uint16_t index);
+/*
+ * Get table entry.
+ * @param index: the HW FWT index.
+ * Note: return ptr from database. Handle with care.
+ * @return indexed fwt database entry
+ */
+fwt_db_entry *fwt_db_get_table_entry(uint16_t index);
+
+/* Initialize the fwt db entry */
+int fwt_db_init_entry(fwt_db_entry *entry);
+
+/*
+ * Update parameters to existing entry
+ * @param index: the HW FWT index that match the SW one
+ * @param port: TQE output port.
+ * @param node: node number.
+ * @param portal: 4addr mode flag.
+ */
+int fwt_db_update_params(uint16_t index, uint8_t port, uint8_t node, uint8_t portal);
+
+/*
+ * Return the existing entry if present, otherwise create a new multicast entry
+ */
+struct topaz_fwt_sw_mcast_entry *fwt_db_get_or_add_sw_mcast(struct fwt_db_entry *db,
+		int8_t ip_alias);
+
+/*
+ * Free multicast entry, and possibly the alias_table if it becomes empty.
+ * Returns 1 if there are no multicast entries present under this db anymore.
+ */
+int fwt_db_delete_sw_mcast(struct fwt_db_entry *db, uint8_t ipmap_index);
+
+/*
+ * Get mac addresses of nondes behind associated node
+ * @param index: node index
+ * @param num_entries: returns number of entries found
+ * @param max_req: maximum entries requested
+ * @param flags: bit 0 - results overflowed/truncated, bit 1 - 4addr node
+ * @param buf: buffer to store macs
+ */
+int fwt_db_get_macs_behind_node(const uint8_t index, uint32_t *num_entries, uint32_t max_req,
+					uint32_t *flags, uint8_t *buf);
+
+#endif // TOPAZ PLATFORM
+
+#endif /* FWT_DB_H_ */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt_if.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt_if.h
new file mode 100644
index 0000000..746e275
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt_if.h
@@ -0,0 +1,89 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2013 Quantenna Communications Inc                   **
+**                            All Rights Reserved                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef FWT_INTERFACE_H_
+#define FWT_INTERFACE_H_
+
+#include <qtn/br_types.h>
+
+/* Must match FWT_IF_KEY_xxx macros */
+typedef enum fwt_if_usr_cmd {
+	FWT_IF_CMD_CLEAR = 0,
+	FWT_IF_CMD_ON,
+	FWT_IF_CMD_OFF,
+	FWT_IF_CMD_PRINT,
+	FWT_IF_CMD_ADD_STATIC_MC,
+	FWT_IF_CMD_DEL_STATIC_MC,
+	FWT_IF_CMD_GET_MC_LIST,
+	FWT_IF_CMD_ADD,
+	FWT_IF_CMD_DELETE,
+	FWT_IF_CMD_AUTO,
+	FWT_IF_CMD_MANUAL,
+	FWT_IF_CMD_4ADDR,
+	FWT_IF_CMD_DEBUG,
+	FWT_IF_CMD_HELP,
+	FWT_IF_CMD_AGEING,
+	FWT_IF_MAX_CMD,
+} fwt_if_usr_cmd;
+
+#include <linux/types.h>
+
+#define FWT_IF_USER_NODE_MAX (6)
+
+struct fwt_if_id {
+	uint8_t mac_be[ETH_ALEN];
+	struct br_ip ip;
+};
+
+struct fwt_if_common {
+	struct fwt_if_id id;
+	uint8_t port;
+	uint8_t node[FWT_IF_USER_NODE_MAX];
+	uint32_t param;
+	void *extra;
+};
+
+typedef int (*fwt_if_sw_cmd_hook)(fwt_if_usr_cmd cmd, struct fwt_if_common *data);
+
+void fwt_if_register_cbk_t(fwt_if_sw_cmd_hook cbk_func);
+
+#endif /* FWT_INTERFACE_H_ */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt_sw.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt_sw.h
new file mode 100644
index 0000000..8185ed1
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_fwt_sw.h
@@ -0,0 +1,152 @@
+/*
+ * (C) Copyright 2013 Quantenna Communications Inc.
+ */
+
+#ifndef FWT_SW_H_
+#define FWT_SW_H_
+
+
+#include <linux/types.h>
+#include <qtn/topaz_fwt_db.h>
+#include <qtn/topaz_fwt_if.h>
+
+/* Success definition in FWT Interface is return positive value */
+#define FWT_IF_SUCCESS(x)	((x) >= 0)
+/* Error definition in FWT Interface is return negative value */
+#define FWT_IF_ERROR(x)		(!(FWT_IF_SUCCESS(x)))
+
+typedef enum
+{
+	FWT_SW_4_ADDR_DEPRECATE = 0,
+	FWT_SW_4_ADDR_SUPPORT,
+	FWT_SW_4_ADDR_MAX,
+}fwt_sw_4addr_status;
+
+/* Portal represent support in 4 address mode */
+#define FWT_SW_DEFAULT_4ADDR	FWT_SW_4_ADDR_DEPRECATE
+
+/*
+ * Register overwrite entries for second level entry delete protocol from FWT table
+ * @param dst_index: Index to be overwritten
+ * @param src_index: Source index
+ * */
+void topaz_fwt_register_overwrite_entries(uint16_t dst_index,uint16_t src_index);
+
+/*
+ * Register a node
+ * @param node_num: node number
+ * @param vap_idx: vap index
+ */
+void fwt_sw_register_node(uint16_t node_num);
+
+/*
+ * Unregister a node
+ * @param node_num: node number
+ * @param vap_idx: vap index
+ */
+void fwt_sw_unregister_node(uint16_t node_num);
+
+/*
+ * Add device to the FWT.
+ * If successful, update the FWT mirror table and the node list at the fwt database
+ *
+ * @param mac_id: MAC ID in big endian presentation
+ * @param port_id: the port number. Note: must match the FWT HW presentation.
+ * @param node_num: the node number
+ * @param ip_map: Multicast aliasing bit identifier.
+ * @return success / failure indication.
+ */
+int fwt_sw_add_device(const uint8_t *mac_be, uint8_t port_id, uint8_t node_num,
+		const struct br_ip *group);
+
+/*
+ * Set 4 address support for specific MAC ID
+ * @param mac_be: mac id in big endian presentation
+ * @param addr: Support indication for 4 address method
+ */
+int fwt_sw_set_4_address_support(const uint8_t *mac_be, fwt_sw_4addr_status addr);
+/*
+ * Reset call will clear both HW and Software FWT tables
+ */
+void fwt_sw_reset(void);
+
+/*
+ * Print both node list and FWT table
+ */
+int fwt_sw_print(void);
+
+
+/* Inidicate expiry time
+ * @param mac_be: mac id in big endian presentation
+ */
+int fwt_sw_get_timestamp(const uint8_t *mac_be);
+
+/*
+ * Delete device entry from both HW and SW FWT tables
+ * @param mac_be: MAC in big endian
+ */
+int fwt_sw_delete_device(const uint8_t *mac_be);
+
+/*
+ * Update or insert new FWT table entry from multicast IGMP message.
+ * @param node: node number.
+ * @param port_id: output port.
+ * @param group: Indication for the multicast address by group id.
+ */
+int fwt_sw_join_multicast(uint8_t node, uint8_t port_id,
+		const struct br_ip *group);
+/*
+ * Remove or delete node from the FWT table entry from multicast IGMP message.
+ * @param node: node number.
+ * @param port_id: output port.
+ * @param group: Indication for the multicast address by group id.
+ */
+int fwt_sw_leave_multicast(uint8_t node, uint8_t port_id,
+		const struct br_ip *group);
+
+typedef int (*fwt_sw_4addr_callback_t)(void *token, const uint8_t *mac_be, uint8_t port_id,
+		uint8_t node_num);
+
+typedef uint8_t (*fwt_sw_remapper_t)(uint8_t in_port, const uint8_t *mac_be);
+
+void fwt_sw_register_port_remapper(uint8_t port, fwt_sw_remapper_t remapper);
+
+/**
+ * Callback to determine whether a FWT table entry should be added as 4 address mode or not.
+ * Typically registered by qdrv
+ * @param callback: callback function pointer to register
+ * @param token: will always be provided to the callback when invoked, as the first argument
+ */
+void fwt_sw_4addr_callback_set(fwt_sw_4addr_callback_t callback, void *token);
+
+/* Get number of current entries */
+uint16_t fwt_sw_get_entries_cnt(void);
+
+int fwt_sw_cmd(fwt_if_usr_cmd cmd, struct fwt_if_common *data);
+
+int fwt_sw_get_index_from_mac_be(const uint8_t *mac_be);
+
+/*
+ * Fast way to get fwt entry for unicast packet
+ * @param src_mac_be: source mac address of the packet
+ * @param dst_mac_be: destination mac address of the packet
+ */
+fwt_db_entry *fwt_sw_fast_get_ucast_entry(const unsigned char *src_mac_be,
+		const unsigned char *dst_mac_be);
+
+void fwt_sw_update_false_miss(int index, uint8_t false_miss);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+int fwt_sw_get_ipff(struct seq_file *sfile);
+#else
+int fwt_sw_get_ipff(char *buf, int buflen);
+#endif
+
+extern dma_addr_t ipmac_hash_bus;
+extern dma_addr_t ipmac_base_bus;
+
+int fwt_sw_update_uc_ipmac(const uint8_t *mac_be, const uint8_t *ip, uint16_t type);
+
+void fwt_sw_remove_uc_ipmac(const uint8_t *ip, uint16_t type);
+
+#endif /* FWT_INTERFACE_H_ */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_hbm.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_hbm.h
new file mode 100644
index 0000000..283dc81
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_hbm.h
@@ -0,0 +1,89 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TOPAZ_HBM_H
+#define __TOPAZ_HBM_H
+
+#include <linux/skbuff.h>
+#include <qtn/qtn_buffers.h>
+#include <qtn/topaz_hbm_cpuif.h>
+
+#include <qtn/dmautil.h>
+#include <asm/cacheflush.h>
+
+#define topaz_hbm_attach_skb(buf_virt, pool, headroom)		\
+	_topaz_hbm_attach_skb((buf_virt), (pool), 1, (headroom)	\
+			QTN_SKB_ALLOC_TRACE_ARGSRC)
+#define topaz_hbm_attach_skb_no_invalidate(buf_virt, pool, headroom)		\
+	_topaz_hbm_attach_skb((buf_virt), (pool), 0, (headroom) 	\
+			QTN_SKB_ALLOC_TRACE_ARGSRC)
+struct sk_buff *_topaz_hbm_attach_skb(void *buf_virt, int8_t pool, int inv, uint8_t headroom
+		QTN_SKB_ALLOC_TRACE_ARGS);
+
+#define topaz_hbm_attach_skb_bus(buf_bus, pool)	\
+	_topaz_hbm_attach_skb_bus((buf_bus), (pool)	\
+			QTN_SKB_ALLOC_TRACE_ARGSRC)
+static inline struct sk_buff *
+_topaz_hbm_attach_skb_bus(void *buf_bus, int8_t pool
+		QTN_SKB_ALLOC_TRACE_ARGS)
+{
+	void *buf_virt;
+
+	if (unlikely(buf_bus == NULL)) {
+		return NULL;
+	}
+
+	buf_virt = bus_to_virt((uintptr_t) buf_bus);
+	if (unlikely(buf_virt == RUBY_BAD_VIRT_ADDR)) {
+		return NULL;
+	}
+
+	return _topaz_hbm_attach_skb(buf_virt, pool, 1, 0
+			QTN_SKB_ALLOC_TRACE_ARGVARS);
+}
+
+static inline void topaz_hbm_flush_skb_cache(struct sk_buff *skb)
+{
+	uintptr_t flush_start = (uintptr_t) align_buf_cache(skb->head);
+	uintptr_t flush_end = align_val_up((uintptr_t) skb_end_pointer(skb),
+			dma_get_cache_alignment());
+	if (!skb->cache_is_cleaned)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		dma_cache_wback_inv(flush_start, flush_end - flush_start);
+#else
+		flush_and_inv_dcache_range(flush_start, flush_end);
+#endif
+}
+
+void topaz_hbm_filter_txdone_pool(void);
+void topaz_hbm_filter_txdone_buf(void *const buf_bus);
+unsigned int topaz_hbm_pool_available(int8_t pool);
+#ifdef TOPAZ_EMAC_NULL_BUF_WR
+extern void (*topaz_emac_null_buf_del_cb)(void);
+#endif
+
+void topaz_hbm_release_buf_safe(void *const pkt_bus);
+
+struct sk_buff *topaz_hbm_attach_skb_quarantine(void *buf_virt, int pool, int len, uint8_t **whole_frm_hdr);
+
+#endif	/* __TOPAZ_HBM_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_hbm_cpuif.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_hbm_cpuif.h
new file mode 100644
index 0000000..64488e0
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_hbm_cpuif.h
@@ -0,0 +1,793 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TOPAZ_HBM_CPUIF_PLATFORM_H
+#define __TOPAZ_HBM_CPUIF_PLATFORM_H
+
+#include "mproc_sync.h"
+#include "qtn_buffers.h"
+#include "qtn_arc_processor.h"
+
+/**
+ * HBM Requestors
+ * 0	- LHost
+ * 1	- MuC
+ * 2	- emac0	hw wired
+ * 3	- emac1	hw wired
+ * 4	- wmac	hw wired
+ * 5	- tqe	hw wired
+ * 6	- AuC
+ * 7	- DSP
+ * 8	- PCIE(?)
+ *
+ * Note: qdrv_pktlogger_get_hbm_stats must be updated if this list is changed.
+ */
+#define TOPAZ_HBM_REQUESTOR_NAMES	{ "lhost", "muc", "emac0", "emac1", "wmac", "tqe", "AuC", "DSP", "PCIe" }
+
+#if defined(__linux__)
+	#define TOPAZ_HBM_LOCAL_CPU	0
+	#define topaz_hbm_local_irq_save	local_irq_save
+	#define topaz_hbm_local_irq_restore	local_irq_restore
+#elif defined(MUC_BUILD)
+	#define	TOPAZ_HBM_LOCAL_CPU	1
+	#define topaz_hbm_local_irq_save(x)	do { (x) = _save_disable(); } while(0)
+	#define topaz_hbm_local_irq_restore(x)	do { _restore_enable((x)); } while(0)
+#elif defined(AUC_BUILD)
+	#define	TOPAZ_HBM_LOCAL_CPU	6
+	#define topaz_hbm_local_irq_save(x)	do { (void)(x); } while(0)
+	#define topaz_hbm_local_irq_restore(x)	do { (void)(x); } while(0)
+#elif defined(DSP_BUILD)
+	#define	TOPAZ_HBM_LOCAL_CPU	7
+#else
+	#error No TOPAZ_HBM_LOCAL_CPU set
+#endif
+
+#define TOPAZ_HBM_NUM_POOLS			4
+
+#ifdef QTN_RC_ENABLE_HDP
+#define TOPAZ_HBM_PAYLOAD_HEADROOM		128
+#else
+#define TOPAZ_HBM_PAYLOAD_HEADROOM		64
+#endif
+
+#define TOPAZ_HBM_PAYLOAD_END_GUARD_SIZE	32		/* equal to cacheline size */
+#define TOPAZ_HBM_BUF_GUARD_MAGIC		0xDEADBEEF
+#define TOPAZ_HBM_BUF_CORRUPTED_MAGIC		0xDEADBEEF
+#define TOPAZ_HBM_BUF_PAYLOAD_POISON		0xA5
+#define TOPAZ_HBM_BUF_PAYLOAD_POISON32		(TOPAZ_HBM_BUF_PAYLOAD_POISON |                \
+							(TOPAZ_HBM_BUF_PAYLOAD_POISON << 8) |  \
+							(TOPAZ_HBM_BUF_PAYLOAD_POISON << 16) | \
+							(TOPAZ_HBM_BUF_PAYLOAD_POISON << 24))
+
+#define TOPAZ_HBM_BUF_EXTERNAL_META		1	/* move meta data outside of buffer */
+#define TOPAZ_HBM_BUF_WMAC_RX_QUARANTINE	1	/* quarantine wmac rx buffers when deliver important packets */
+
+#if defined(CONFIG_TOPAZ_PCIE_TARGET) || defined(CONFIG_TOPAZ_DBDC_HOST)
+/*
+ * Checking emac rx pool buffers lead to performance impact in PCIe, so limit
+ * the check for wmac rx pool only.
+ */
+#define TOPAZ_HBM_BUF_MAGIC_CHK_ALLPOOL		0
+#else
+#define TOPAZ_HBM_BUF_MAGIC_CHK_ALLPOOL		1
+#endif
+
+#define TOPAZ_HBM_DEBUG_DUMP			0	/* inline debug dump functions */
+#define TOPAZ_HBM_DEBUG_STAMPS			0	/* extra trace info in meta data */
+
+#if TOPAZ_HBM_DEBUG_STAMPS
+#define TOPAZ_HBM_OWNER_MUC_FREE	0x9
+#define TOPAZ_HBM_OWNER_AUC_FREE	0xa
+#define TOPAZ_HBM_OWNER_LH_TX_TQE	0xb
+#define TOPAZ_HBM_OWNER_LH_RX_TQE	0xc
+#define TOPAZ_HBM_OWNER_LH_RX_MBOX	0xd
+#define TOPAZ_HBM_OWNER_INIT		0xe
+#define TOPAZ_HBM_OWNER_FREE		0xf
+#endif
+
+#define TOPAZ_HBM_ERR_NONE		0
+#define TOPAZ_HBM_ERR_PTR		(-1)
+#define TOPAZ_HBM_ERR_MAGIC		(-2)
+#define TOPAZ_HBM_ERR_TAILGUARD		(-3)
+
+/*
+ * The usage of the HBM buffer headroom and meta data, depending on TOPAZ_HBM_BUF_EXTERNAL_META:
+ * 1. When it is 1, all the below meta data except magic in head and tail, and pointer to meta are
+ * in separate memory region, outside of the buffer.
+ * 2. When it is 0, all the below meta data are in HBM buffer headroom.
+ * To avoid define different offset values for above 2 cases, we use the same definition. The only
+ * difference is where the meta data is stored.
+ */
+enum QTN_HBM_BUF_HEADROOM_OFFSET {
+	HBM_HR_OFFSET_ENQ_CNT = 1,	/* in word */
+	HBM_HR_OFFSET_FREE_CNT = 2,	/* in word */
+	HBM_HR_OFFSET_OCS_FRM_ID = 3,	/* in word */
+	HBM_HR_OFFSET_FREE_JIFF = 4,	/* debugging; jiffies of last free. leaked buffer heuristic */
+	HBM_HR_OFFSET_OWNER = 5,	/* debugging; buffer owner */
+	HBM_HR_OFFSET_SIZE = 6,		/* debugging; buffer size */
+	HBM_HR_OFFSET_STATE = 7,	/* state about the buffer */
+	HBM_HR_OFFSET_META_PTR = 8,	/* pointer and back pointer bwtween buffer and meta, bus addr */
+	HBM_HR_OFFSET_MAGIC = 9,	/* the magic, keep it biggest thus first in headroom */
+	HBM_HR_OFFSET_MAX = HBM_HR_OFFSET_MAGIC,
+};
+
+#define QTN_HBM_SANITY_BAD_HR_MAGIC	BIT(0)
+#define QTN_HBM_SANITY_BAD_TAIL_GUARD	BIT(1)
+#define QTN_HBM_SANITY_BAD_ALREADY	BIT(31)
+
+#define TOPAZ_HBM_BUF_DUMP_MAX		0xFFFF
+#define TOPAZ_HBM_BUF_DUMP_DFT		512U
+#define TOPAZ_HBM_BUF_DUMP_TAIL_DFT	2048U
+
+#if TOPAZ_HBM_DEBUG_DUMP
+#if defined(DSP_BUILD)
+#elif defined(AUC_BUILD)
+	#define CPU_PRINT auc_os_printf
+	#define CPU_INV_DCACHE_RANGE(_v, _range)
+	#define CPU_HZ		AUC_CPU_TIMER_HZ
+#elif defined(MUC_BUILD)
+	#define CPU_PRINT uc_printk
+	#define CPU_INV_DCACHE_RANGE invalidate_dcache_range_safe
+	#define CPU_HZ		HZ
+#else
+	#ifdef __KERNEL__
+		#include <qtn/dmautil.h>
+		#define CPU_PRINT printk
+		#define CPU_INV_DCACHE_RANGE inv_dcache_sizerange_safe
+		#define CPU_HZ		HZ
+	#endif
+#endif
+#endif // TOPAZ_HBM_DEBUG_DUMP
+
+RUBY_INLINE uint32_t topaz_hbm_buf_offset_from_start_bus(void *buf_bus, uint8_t pool, uint8_t is_aligned)
+{
+	if (is_aligned) {
+		return ((uint32_t)buf_bus) & (TOPAZ_HBM_BUF_ALIGN - 1);
+	}
+	if (pool == TOPAZ_HBM_BUF_WMAC_RX_POOL) {
+		return (((uint32_t)buf_bus) - (RUBY_DRAM_BUS_BEGIN + TOPAZ_HBM_BUF_WMAC_RX_BASE)) % TOPAZ_HBM_BUF_WMAC_RX_SIZE;
+	} else if (pool == TOPAZ_HBM_BUF_EMAC_RX_POOL) {
+		return (((uint32_t)buf_bus) - (RUBY_DRAM_BUS_BEGIN + TOPAZ_HBM_BUF_EMAC_RX_BASE)) % TOPAZ_HBM_BUF_EMAC_RX_SIZE;
+	} else {
+		return 0;
+	}
+}
+
+RUBY_INLINE int topaz_hbm_buf_identify_buf_bus(const void *buf_bus, uint32_t *sizep, uint32_t *idxp)
+{
+	if (__in_mem_range((uint32_t)buf_bus, TOPAZ_HBM_BUF_EMAC_RX_BASE, TOPAZ_HBM_BUF_EMAC_RX_TOTAL)) {
+		*sizep = TOPAZ_HBM_BUF_EMAC_RX_SIZE;
+		*idxp = (((uint32_t)buf_bus) - TOPAZ_HBM_BUF_EMAC_RX_BASE) / TOPAZ_HBM_BUF_EMAC_RX_SIZE;
+		return TOPAZ_HBM_BUF_EMAC_RX_POOL;
+	} else if (__in_mem_range((uint32_t)buf_bus, TOPAZ_HBM_BUF_WMAC_RX_BASE, TOPAZ_HBM_BUF_WMAC_RX_TOTAL)) {
+		*sizep = TOPAZ_HBM_BUF_WMAC_RX_SIZE;
+		*idxp = (((uint32_t)buf_bus) - TOPAZ_HBM_BUF_WMAC_RX_BASE) / TOPAZ_HBM_BUF_WMAC_RX_SIZE;
+		return TOPAZ_HBM_BUF_WMAC_RX_POOL;
+	} else {
+		return -1;
+	}
+}
+
+RUBY_INLINE int topaz_hbm_buf_identify_buf_virt(const void *buf_virt, uint32_t *sizep, uint32_t *idxp)
+{
+	if (__in_mem_range((uint32_t)buf_virt, TOPAZ_HBM_BUF_EMAC_RX_BASE_VIRT, TOPAZ_HBM_BUF_EMAC_RX_TOTAL)) {
+		*sizep = TOPAZ_HBM_BUF_EMAC_RX_SIZE;
+		*idxp = (((uint32_t)buf_virt) - TOPAZ_HBM_BUF_EMAC_RX_BASE_VIRT) / TOPAZ_HBM_BUF_EMAC_RX_SIZE;
+		return TOPAZ_HBM_BUF_EMAC_RX_POOL;
+	} else if (__in_mem_range((uint32_t)buf_virt, TOPAZ_HBM_BUF_WMAC_RX_BASE_VIRT, TOPAZ_HBM_BUF_WMAC_RX_TOTAL)) {
+		*sizep = TOPAZ_HBM_BUF_WMAC_RX_SIZE;
+		*idxp = (((uint32_t)buf_virt) - TOPAZ_HBM_BUF_WMAC_RX_BASE_VIRT) / TOPAZ_HBM_BUF_WMAC_RX_SIZE;
+		return TOPAZ_HBM_BUF_WMAC_RX_POOL;
+	} else {
+		return -1;
+	}
+}
+
+RUBY_INLINE int topaz_hbm_buf_ptr_valid(const void *buf_virt)
+{
+	uint32_t offset;
+
+	if (__in_mem_range((uint32_t)buf_virt, TOPAZ_HBM_BUF_EMAC_RX_BASE_VIRT, TOPAZ_HBM_BUF_EMAC_RX_TOTAL)) {
+		offset = (((uint32_t)buf_virt) - TOPAZ_HBM_BUF_EMAC_RX_BASE_VIRT) % TOPAZ_HBM_BUF_EMAC_RX_SIZE;
+	} else if (__in_mem_range((uint32_t)buf_virt, TOPAZ_HBM_BUF_WMAC_RX_BASE_VIRT, TOPAZ_HBM_BUF_WMAC_RX_TOTAL)) {
+		offset = (((uint32_t)buf_virt) - TOPAZ_HBM_BUF_WMAC_RX_BASE_VIRT) % TOPAZ_HBM_BUF_WMAC_RX_SIZE;
+	} else {
+		return 0;
+	}
+
+	return (offset == TOPAZ_HBM_PAYLOAD_HEADROOM);
+}
+
+#if TOPAZ_HBM_BUF_EXTERNAL_META
+RUBY_INLINE void* topaz_hbm_buf_get_meta(const void *buf_virt)
+{
+	uint32_t idx;
+
+	if (__in_mem_range((uint32_t)buf_virt, TOPAZ_HBM_BUF_EMAC_RX_BASE_VIRT, TOPAZ_HBM_BUF_EMAC_RX_TOTAL)) {
+		idx = (((uint32_t)buf_virt) - TOPAZ_HBM_BUF_EMAC_RX_BASE_VIRT) / TOPAZ_HBM_BUF_EMAC_RX_SIZE;
+		return (void*)(TOPAZ_HBM_BUF_META_EMAC_RX_BASE_VIRT + TOPAZ_HBM_BUF_META_SIZE +
+			idx * TOPAZ_HBM_BUF_META_SIZE);
+	} else if (__in_mem_range((uint32_t)buf_virt, TOPAZ_HBM_BUF_WMAC_RX_BASE_VIRT, TOPAZ_HBM_BUF_WMAC_RX_TOTAL)) {
+		idx = (((uint32_t)buf_virt) - TOPAZ_HBM_BUF_WMAC_RX_BASE_VIRT) / TOPAZ_HBM_BUF_WMAC_RX_SIZE;
+		return (void*)(TOPAZ_HBM_BUF_META_WMAC_RX_BASE_VIRT + TOPAZ_HBM_BUF_META_SIZE +
+			idx * TOPAZ_HBM_BUF_META_SIZE);
+	} else {
+		return NULL;
+	}
+}
+
+/*
+ * A fast way to get the meta address.
+ * However this assume the meta address in buffer headroom is not corrupted as long as the magic in
+ * buffer headroom is not corrupted. So this is not 100% correct, but it can be used to speed up for
+ * some non-real-world test.
+ */
+RUBY_INLINE void* topaz_hbm_buf_get_meta_fast(const void *buf_virt)
+{
+	uint32_t *magicp = (uint32_t*)buf_virt - HBM_HR_OFFSET_MAGIC;
+
+	/* assume ptr is valid when magic is not corrupted */
+	if (likely(arc_read_uncached_32(magicp) == TOPAZ_HBM_BUF_GUARD_MAGIC)) {
+		uint32_t *meta_ptr_p = (uint32_t*)buf_virt - HBM_HR_OFFSET_META_PTR;
+		return (void*)bus_to_virt(arc_read_uncached_32(meta_ptr_p));
+	}
+
+	return topaz_hbm_buf_get_meta(buf_virt);
+}
+#else
+#define topaz_hbm_buf_get_meta_fast(_buf_virt)	(_buf_virt)
+#define topaz_hbm_buf_get_meta(_buf_virt)	(_buf_virt)
+#endif
+
+RUBY_INLINE uint32_t topaz_hbm_buf_offset_from_start_virt(void *buf_virt, uint8_t pool, uint8_t is_aligned)
+{
+	return topaz_hbm_buf_offset_from_start_bus((void *)virt_to_bus(buf_virt), pool, is_aligned);
+}
+
+RUBY_INLINE void *topaz_hbm_payload_store_align_bus(void *buf_bus, uint8_t pool, uint8_t is_aligned)
+{
+	return ((uint8_t *)buf_bus) - topaz_hbm_buf_offset_from_start_bus(buf_bus, pool, is_aligned)
+			+ TOPAZ_HBM_PAYLOAD_HEADROOM;
+}
+
+RUBY_INLINE void *topaz_hbm_payload_store_align_virt(void *buf_virt, uint8_t pool, uint8_t is_aligned)
+{
+	return ((uint8_t *)buf_virt) - topaz_hbm_buf_offset_from_start_virt(buf_virt, pool, is_aligned)
+			+ TOPAZ_HBM_PAYLOAD_HEADROOM;
+}
+
+RUBY_INLINE unsigned long topaz_hbm_payload_store_align_from_index(int8_t pool, uint16_t index)
+{
+	if (pool == TOPAZ_HBM_BUF_EMAC_RX_POOL && index < TOPAZ_HBM_BUF_EMAC_RX_COUNT) {
+		return RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_EMAC_RX_BASE +
+			(index * TOPAZ_HBM_BUF_EMAC_RX_SIZE) + TOPAZ_HBM_PAYLOAD_HEADROOM;
+	} else if (pool == TOPAZ_HBM_BUF_WMAC_RX_POOL && index < TOPAZ_HBM_BUF_WMAC_RX_COUNT) {
+		return RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_WMAC_RX_BASE +
+			(index * TOPAZ_HBM_BUF_WMAC_RX_SIZE) + TOPAZ_HBM_PAYLOAD_HEADROOM;
+	}
+	return 0;
+}
+
+RUBY_INLINE long topaz_hbm_payload_buff_ptr_offset_bus(void *buf_bus, uint8_t pool, void *align_bus)
+{
+	unsigned long buf_align = (unsigned long)topaz_hbm_payload_store_align_bus(
+			align_bus ? align_bus : buf_bus, pool, !!align_bus);
+
+	return buf_align - (unsigned long)buf_bus;
+}
+
+RUBY_INLINE long topaz_hbm_payload_buff_ptr_offset_virt(void *buf_virt, uint8_t pool, void *align_virt)
+{
+	return topaz_hbm_payload_buff_ptr_offset_bus((void *)virt_to_bus(buf_virt), pool,
+			align_virt ? (void *)virt_to_bus(align_virt) : NULL);
+}
+
+RUBY_INLINE int __topaz_hbm_is_done(void)
+{
+	return qtn_mproc_sync_mem_read(TOPAZ_HBM_POOL_REQ(TOPAZ_HBM_LOCAL_CPU)) & TOPAZ_HBM_DONE;
+}
+
+RUBY_INLINE void __topaz_hbm_release_buf(void *buf, uint8_t pool)
+{
+	/* assumes previous operations are complete */
+	qtn_mproc_sync_mem_write_wmb(TOPAZ_HBM_POOL_DATA(TOPAZ_HBM_LOCAL_CPU),
+			(unsigned long) buf);
+	qtn_mproc_sync_mem_write_wmb(TOPAZ_HBM_POOL_REQ(TOPAZ_HBM_LOCAL_CPU),
+			TOPAZ_HBM_POOL_NUM(pool) | TOPAZ_HBM_RELEASE_BUF);
+}
+
+RUBY_INLINE void __topaz_hbm_request_start(uint8_t pool)
+{
+	qtn_mproc_sync_mem_write_wmb(TOPAZ_HBM_POOL_REQ(TOPAZ_HBM_LOCAL_CPU),
+			TOPAZ_HBM_POOL_NUM(pool) | TOPAZ_HBM_REQUEST_BUF);
+}
+
+RUBY_INLINE void *__topaz_hbm_rd_buf(uint8_t pool)
+{
+	/* must be preceded by __topaz_hbm_rd_req, then polling on __topaz_hbm_is_done */
+	return (void *) qtn_mproc_sync_mem_read(TOPAZ_HBM_POOL_DATA(TOPAZ_HBM_LOCAL_CPU));
+}
+
+RUBY_INLINE void __topaz_hbm_wait(void)
+{
+	unsigned int timeouts = 0;
+	unsigned int timeout_reached = 0;
+
+	while (1) {
+		int i;
+		if (__topaz_hbm_is_done()) {
+			return;
+		}
+
+		/* busy wait until buffer is available */
+		for (i = 0; i < 10; i++) {
+#if defined(AUC_BUILD) && defined(_ARC)
+			/*
+			 * This is a workaround for MetaWare C Compiler v7.4.0
+			 * bug in Zero-Delay Loop code generation for ARC 600 family cores.
+			 * Without it the LP_START register will be written to, two
+			 * instruction before the end address of the loop, but at least three
+			 * instructions are required according to the ARC ISA Programmer's
+			 * Reference.
+			 */
+			_nop();
+			_nop();
+			_nop();
+#else
+			qtn_pipeline_drain();
+#endif
+		}
+
+		if (unlikely(timeout_reached == 0 && timeouts++ == 1000)) {
+			timeout_reached = 1;
+			qtn_mproc_sync_log("__topaz_hbm_wait timeout");
+		}
+	}
+
+	if (unlikely(timeout_reached)) {
+		qtn_mproc_sync_log("__topaz_hbm_wait succeeded");
+	}
+}
+
+RUBY_INLINE int __topaz_hbm_put_buf_nowait(void *buf, uint8_t pool)
+{
+	if (__topaz_hbm_is_done()) {
+		__topaz_hbm_release_buf(buf, pool);
+		return 0;
+	}
+	return -EBUSY;
+}
+
+RUBY_INLINE void __topaz_hbm_put_buf(void *buf, uint8_t pool)
+{
+	__topaz_hbm_wait();
+	__topaz_hbm_release_buf(buf, pool);
+}
+
+#if defined(MUC_BUILD)
+RUBY_INLINE void topaz_hbm_put_buf(void *buf, uint8_t pool)
+#else
+RUBY_WEAK(topaz_hbm_put_buf) void topaz_hbm_put_buf(void *buf, uint8_t pool)
+#endif
+{
+	unsigned long flags;
+
+	topaz_hbm_local_irq_save(flags);
+	__topaz_hbm_put_buf(buf, pool);
+	topaz_hbm_local_irq_restore(flags);
+}
+
+RUBY_INLINE void *__topaz_hbm_get_buf(uint8_t pool)
+{
+	__topaz_hbm_wait();
+	__topaz_hbm_request_start(pool);
+	__topaz_hbm_wait();
+	return __topaz_hbm_rd_buf(pool);
+}
+
+#if defined(MUC_BUILD)
+RUBY_INLINE void *topaz_hbm_get_buf(uint8_t pool)
+#else
+RUBY_WEAK(topaz_hbm_get_buf) void *topaz_hbm_get_buf(uint8_t pool)
+#endif
+{
+	unsigned long flags;
+	void *buf;
+
+	topaz_hbm_local_irq_save(flags);
+	buf = __topaz_hbm_get_buf(pool);
+	topaz_hbm_local_irq_restore(flags);
+
+	return buf;
+}
+
+RUBY_INLINE void topaz_hbm_init(void *pool_list_bus, uint16_t payload_count_s, uint8_t pool, int full)
+{
+	unsigned long csr;
+	const uint16_t payload_count = BIT(payload_count_s);
+
+	qtn_mproc_sync_mem_write_wmb(TOPAZ_HBM_BASE_REG(pool), (unsigned long) pool_list_bus);
+	qtn_mproc_sync_mem_write_wmb(TOPAZ_HBM_LIMIT_REG(pool), payload_count);
+	qtn_mproc_sync_mem_write_wmb(TOPAZ_HBM_WR_PTR(pool), full ? payload_count : 0);
+	qtn_mproc_sync_mem_write_wmb(TOPAZ_HBM_RD_PTR(pool), 0);
+
+	csr = qtn_mproc_sync_mem_read(TOPAZ_HBM_CSR_REG);
+	qtn_mproc_sync_mem_write_wmb(TOPAZ_HBM_CSR_REG, csr | TOPAZ_HBM_CSR_Q_EN(pool));
+}
+
+RUBY_INLINE uint32_t topaz_hbm_pool_buf_whole_size(int8_t pool)
+{
+	if (pool == TOPAZ_HBM_BUF_EMAC_RX_POOL) {
+		return TOPAZ_HBM_BUF_EMAC_RX_SIZE;
+	} else if (pool == TOPAZ_HBM_BUF_WMAC_RX_POOL) {
+		return TOPAZ_HBM_BUF_WMAC_RX_SIZE;
+	} else {
+		return 0;
+	}
+}
+
+RUBY_INLINE uint32_t topaz_hbm_pool_buf_max_size(int8_t pool)
+{
+	uint32_t size;
+
+	size = topaz_hbm_pool_buf_whole_size(pool);
+	if (!size)
+		return 0;
+
+	return size -
+		TOPAZ_HBM_PAYLOAD_HEADROOM -
+		TOPAZ_HBM_PAYLOAD_END_GUARD_SIZE;
+}
+
+RUBY_INLINE int8_t topaz_hbm_pool_valid(int8_t pool)
+{
+	return pool >= 0 && pool < TOPAZ_HBM_NUM_POOLS;
+}
+
+RUBY_INLINE int8_t topaz_hbm_payload_get_pool_bus(const void *buf_bus)
+{
+	const unsigned long v = (const unsigned long) buf_bus;
+	if (__in_mem_range(v, RUBY_DRAM_BUS_BEGIN + TOPAZ_HBM_BUF_EMAC_RX_BASE, TOPAZ_HBM_BUF_EMAC_RX_TOTAL)) {
+		return TOPAZ_HBM_BUF_EMAC_RX_POOL;
+	} else if (__in_mem_range(v, RUBY_DRAM_BUS_BEGIN + TOPAZ_HBM_BUF_WMAC_RX_BASE, TOPAZ_HBM_BUF_WMAC_RX_TOTAL)) {
+		return TOPAZ_HBM_BUF_WMAC_RX_POOL;
+	} else {
+		return -1;
+	}
+}
+
+RUBY_INLINE int8_t topaz_hbm_payload_get_free_pool_bus(const void *buf_bus)
+{
+	int8_t orig_pool = topaz_hbm_payload_get_pool_bus(buf_bus);
+	if (orig_pool == TOPAZ_HBM_BUF_EMAC_RX_POOL || orig_pool == TOPAZ_HBM_BUF_WMAC_RX_POOL) {
+		return TOPAZ_HBM_EMAC_TX_DONE_POOL;
+	}
+	return -1;
+}
+
+RUBY_INLINE void topaz_hbm_put_payload_aligned_bus(void *buf_bus, int8_t pool)
+{
+	if (likely(topaz_hbm_pool_valid(pool))) {
+		topaz_hbm_put_buf(topaz_hbm_payload_store_align_bus(buf_bus, pool, 1), pool);
+	}
+}
+
+RUBY_INLINE void topaz_hbm_put_payload_realign_bus(void *buf_bus, int8_t pool)
+{
+	if (likely(topaz_hbm_pool_valid(pool))) {
+		topaz_hbm_put_buf(topaz_hbm_payload_store_align_bus(buf_bus, pool, 0), pool);
+	}
+}
+
+RUBY_INLINE void topaz_hbm_put_payload_aligned_virt(void *buff_virt, int8_t pool)
+{
+	topaz_hbm_put_payload_aligned_bus((void *) virt_to_bus(buff_virt), pool);
+}
+
+RUBY_INLINE void topaz_hbm_put_payload_realign_virt(void *buff_virt, int8_t pool)
+{
+	topaz_hbm_put_payload_realign_bus((void *) virt_to_bus(buff_virt), pool);
+}
+
+#ifdef __KERNEL__
+#define topaz_hbm_get_payload_bus(pool)		__topaz_hbm_get_payload_bus((pool), __FILE__, __LINE__, __FUNCTION__)
+RUBY_INLINE void *__topaz_hbm_get_payload_bus(int8_t pool, const char *file, const int line, const char *func)
+{
+	if (likely(topaz_hbm_pool_valid(pool))) {
+		return topaz_hbm_get_buf(pool);
+	}
+	if (printk_ratelimit()) {
+		printk("%s:%u%s null buffer from pool %hhd\n",
+				file, line, func, pool);
+	}
+	return NULL;
+}
+
+#define topaz_hbm_get_payload_virt(pool)	__topaz_hbm_get_payload_virt((pool), __FILE__, __LINE__, __FUNCTION__)
+RUBY_INLINE void *__topaz_hbm_get_payload_virt(int8_t pool, const char *file, const int line, const char *func)
+{
+	void *buf_bus = topaz_hbm_get_payload_bus(pool);
+	if (unlikely(!buf_bus)) {
+		if (printk_ratelimit()) {
+			printk("%s:%u%s null buffer from pool %hhd\n",
+					file, line, func, pool);
+		}
+		return NULL;
+	}
+	return bus_to_virt((unsigned long) buf_bus);
+}
+
+#else
+
+RUBY_INLINE void *topaz_hbm_get_payload_bus(int8_t pool)
+{
+	if (likely(topaz_hbm_pool_valid(pool))) {
+		return topaz_hbm_get_buf(pool);
+	}
+	return NULL;
+}
+
+RUBY_INLINE void *topaz_hbm_get_payload_virt(int8_t pool)
+{
+	void *buf_bus = topaz_hbm_get_payload_bus(pool);
+	if (unlikely(!buf_bus)) {
+		return NULL;
+	}
+	return bus_to_virt((unsigned long) buf_bus);
+}
+#endif
+
+RUBY_INLINE int hbm_buf_check_wmac_rx_buf_overrun(void *v, int fix)
+{
+	uint32_t *guardp;
+
+	/* only check last 4 bytes guard */
+	guardp =(uint32_t*)((uint32_t)v + TOPAZ_HBM_BUF_WMAC_RX_SIZE - TOPAZ_HBM_PAYLOAD_HEADROOM - 4);
+	if (likely(arc_read_uncached_32(guardp) == TOPAZ_HBM_BUF_GUARD_MAGIC)) {
+		return TOPAZ_HBM_ERR_NONE;
+	}
+
+	/*
+	 * It is best if we do the buffer pointer check first, but as we only do the overrun check after wmac rx,
+	 * if it is bad, the memory is already corrupted.
+	 */
+
+	if (fix) {
+		arc_write_uncached_32(guardp, TOPAZ_HBM_BUF_GUARD_MAGIC);
+	}
+
+	return TOPAZ_HBM_ERR_TAILGUARD;
+}
+
+RUBY_INLINE int hbm_buf_check_buf_magic(void *v)
+{
+	uint32_t *magicp = (uint32_t*)v - HBM_HR_OFFSET_MAGIC;
+
+	if (likely(arc_read_uncached_32(magicp) == TOPAZ_HBM_BUF_GUARD_MAGIC)) {
+		return TOPAZ_HBM_ERR_NONE;
+	}
+
+	return TOPAZ_HBM_ERR_MAGIC;
+}
+
+RUBY_INLINE void hbm_buf_fix_buf_magic(void *v)
+{
+	uint32_t *magicp = (uint32_t*)v - HBM_HR_OFFSET_MAGIC;
+
+	arc_write_uncached_32(magicp, TOPAZ_HBM_BUF_GUARD_MAGIC);
+}
+
+#if TOPAZ_HBM_DEBUG_DUMP
+/* assume v is 4 bytes aligned */
+RUBY_INLINE void topaz_buf_dump_range(const void *v, int len)
+{
+#if defined(DSP_BUILD)
+#elif defined(AUC_BUILD)
+	int i;
+	const uint32_t *d32;
+	int dump_loop;
+
+	d32 = v;
+	dump_loop = ((len + 3) >> 2) >> 3;
+	for (i = 0; i < dump_loop; i++) {
+		CPU_PRINT("0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+			d32[0], d32[1], d32[2], d32[3], d32[4], d32[5], d32[6], d32[7]);
+		d32 += 8;
+	}
+#else
+	int i;
+	const uint8_t *d;
+
+	d = v;
+	for (i = 0; i < len; ) {
+		if (!(i % 32))
+			CPU_PRINT("%08x ", (i - i % 32));
+		++i;
+		CPU_PRINT("%02x%s", *d++, (i % 32) == 0 ? "\n" : " ");
+	}
+	CPU_PRINT("\n");
+#endif
+}
+
+RUBY_INLINE void topaz_hbm_buf_show(void *v, const uint32_t len, const uint32_t tail_len)
+{
+#if defined(DSP_BUILD)
+#else
+	const uint32_t *_p = v;
+	const uint32_t *_m = topaz_hbm_buf_get_meta(_p);
+	const uint32_t *enqueuep = _m - HBM_HR_OFFSET_ENQ_CNT;
+	const uint32_t *freep = _m - HBM_HR_OFFSET_FREE_CNT;
+	const uint32_t *jiffp = _m - HBM_HR_OFFSET_FREE_JIFF;
+	const uint32_t *ownerp = _m - HBM_HR_OFFSET_OWNER;
+	const uint32_t *sizep = _m - HBM_HR_OFFSET_SIZE;
+	const uint32_t *magicp = _p - HBM_HR_OFFSET_MAGIC;
+	const uint32_t ec = arc_read_uncached_32(enqueuep);
+	const uint32_t fc = arc_read_uncached_32(freep);
+	const uint32_t jc = arc_read_uncached_32(jiffp);
+	const uint32_t oc = arc_read_uncached_32(ownerp);
+	const uint32_t sz = arc_read_uncached_32(sizep);
+	const uint32_t magic = arc_read_uncached_32(magicp);
+	uint32_t *guardp;
+	uint32_t guard;
+	int dump_bytes;
+	uint32_t whole_size;
+	uint32_t payload_size;
+	int pool;
+	uint32_t idx;
+
+	pool = topaz_hbm_buf_identify_buf_virt(v, &whole_size, &idx);
+	if (pool < 0) {
+		return;
+	}
+	payload_size = whole_size - TOPAZ_HBM_PAYLOAD_HEADROOM;
+	CPU_INV_DCACHE_RANGE((void*)v, payload_size);
+	dump_bytes = (len == TOPAZ_HBM_BUF_DUMP_MAX) ? payload_size : len;
+
+	/* only check last 4 bytes guard */
+	guardp =(uint32_t*)((uint32_t)_p + payload_size - 4);
+	guard = arc_read_uncached_32(guardp);
+
+	CPU_PRINT("buf start 0x%x pool %d idx %u size %u dump %u\n",
+			(unsigned int)v, pool, idx, whole_size, dump_bytes);
+#ifdef __KERNEL__
+	CPU_PRINT("%p ec %u fp %u own %08x size %u j %u (%u s ago)\n",
+			v, ec, fc, oc, sz, jc, (((uint32_t) jiffies) - jc) / CPU_HZ);
+#else
+	/* free jiffies is only set by Lhost, so no way to do jiffies diff */
+	CPU_PRINT("%p ec %u fp %u own %08x size %u j %u (local j %u)\n",
+			v, ec, fc, oc, sz, jc, ((uint32_t) jiffies));
+#endif
+	if (magic != TOPAZ_HBM_BUF_GUARD_MAGIC) {
+		CPU_PRINT("magic %x corrupted\n", magic);
+	}
+	if (guard != TOPAZ_HBM_BUF_GUARD_MAGIC) {
+		CPU_PRINT("guard %x corrupted\n", guard);
+	}
+
+	topaz_buf_dump_range(v, dump_bytes);
+
+	if (tail_len) {
+		uint32_t tail;
+
+		tail = (uint32_t)v;
+		tail += payload_size;
+		tail -= tail_len;
+
+		CPU_PRINT("buf tail 0x%x\n", tail);
+		topaz_buf_dump_range((void*)tail, tail_len);
+	}
+#endif
+}
+
+/*
+ * Full sanity check suitable for all buffers.
+ * Debug build use only, not suitable for release because of performance impact.
+ */
+RUBY_INLINE int hbm_buf_check_sanity(void *v)
+{
+	const uint32_t *magicp = (uint32_t*)v - HBM_HR_OFFSET_MAGIC;
+	uint32_t *_m;
+	uint32_t *statep;
+	uint32_t state;
+	uint32_t magic;
+	uint32_t payload_size;
+	uint32_t *guardp;
+	uint32_t guard;
+	uint32_t size = 0;
+	uint32_t idx = 0;
+	uint32_t bad = 0;
+	int pool;
+
+	magic = arc_read_uncached_32(magicp);
+	if (unlikely(magic != TOPAZ_HBM_BUF_GUARD_MAGIC)) {
+		bad |= QTN_HBM_SANITY_BAD_HR_MAGIC;
+	}
+
+	pool = topaz_hbm_buf_identify_buf_virt(v, &size, &idx);
+	payload_size = size - TOPAZ_HBM_PAYLOAD_HEADROOM;
+	/* only check last 4 bytes guard */
+	guardp =(uint32_t*)((uint32_t)v + payload_size - 4);
+	guard = arc_read_uncached_32(guardp);
+	if (unlikely(guard != TOPAZ_HBM_BUF_GUARD_MAGIC)) {
+		bad |= QTN_HBM_SANITY_BAD_TAIL_GUARD;
+	}
+
+	if (likely(!bad))
+		return 0;
+
+	/* avoid multiple alert */
+	_m = topaz_hbm_buf_get_meta(v);
+	statep = (uint32_t*)_m - HBM_HR_OFFSET_STATE;
+	state = arc_read_uncached_32(statep);
+	if ((bad & (~state)) == 0) {
+		return (bad | QTN_HBM_SANITY_BAD_ALREADY);
+	}
+
+	/* new corruption */
+	arc_write_uncached_32(statep, bad);
+	CPU_PRINT("ERROR: hbm buffer %x corrupted, pool %d, idx %u\n",
+			(unsigned int)v, pool, idx);
+
+	topaz_hbm_buf_show(v, TOPAZ_HBM_BUF_DUMP_DFT, 0);
+
+	/* new corruption of tail guard */
+	if ((bad & QTN_HBM_SANITY_BAD_TAIL_GUARD) && !(state & QTN_HBM_SANITY_BAD_TAIL_GUARD)) {
+		/* find the corruption extent */
+		int i;
+		int j = 0;
+		int lines = (size * 4) / 16;
+		uint32_t pos = 0;
+		for (i = 0; i < lines ; i++) {
+			for (j = 0; j < 4; j++) {
+				pos = (uint32_t)v + i * 16 + j * 4;
+				if (*(uint32_t*)pos != (uint32_t)TOPAZ_HBM_BUF_PAYLOAD_POISON32)
+					break;
+			}
+			if (j == 4)
+				break;
+		}
+		CPU_PRINT("guess tail corruption length %d %x\n", (i * 16) + (j * 4), pos);
+	}
+
+	return bad;
+}
+#endif // TOPAZ_HBM_DEBUG_DUMP
+
+#if TOPAZ_HBM_DEBUG_STAMPS
+RUBY_INLINE void topaz_hbm_debug_stamp(void *buf, uint8_t port, uint32_t size)
+{
+	uint32_t *p = buf;
+	uint32_t *_m = topaz_hbm_buf_get_meta(p);
+	uint32_t *ownerp = _m - HBM_HR_OFFSET_OWNER;
+	uint32_t *sizep = _m - HBM_HR_OFFSET_SIZE;
+
+	arc_write_uncached_32(ownerp, (arc_read_uncached_32(ownerp) << 4) | (port & 0xF));
+	if (size) {
+		arc_write_uncached_32(sizep, size);
+	}
+}
+
+#else
+#define topaz_hbm_debug_stamp(_buf, _port, _size)
+#endif /* TOPAZ_HBM_DEBUG_STAMPS */
+
+#endif	/* __TOPAZ_HBM_CPUIF_PLATFORM_H */
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_ipprt.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_ipprt.h
new file mode 100644
index 0000000..206f344
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_ipprt.h
@@ -0,0 +1,92 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TOPAZ_IPPROTO_TABLE_H
+#define __TOPAZ_IPPROTO_TABLE_H
+
+#include <common/topaz_emac.h>
+#include <qtn/mproc_sync_base.h>
+
+union topaz_ipprt_entry {
+	uint32_t raw;
+	struct {
+		uint32_t	out_node	:7,
+				out_port	:4,
+				valid		:1,
+				__unused	:21;
+	} data;
+};
+
+
+#define TOPAZ_IPPRT_ENTRY_INIT	{ 0 }
+
+RUBY_INLINE void topaz_ipprt_set_entry(uint8_t emac, uint8_t ip_proto, union topaz_ipprt_entry e)
+{
+	uint32_t emac_base = emac ? RUBY_ENET1_BASE_ADDR : RUBY_ENET0_BASE_ADDR;
+	qtn_mproc_sync_mem_write(emac_base + TOPAZ_EMAC_IP_PROTO_ENTRY(ip_proto), e.raw);
+}
+
+RUBY_INLINE void topaz_ipprt_clear_entry(uint8_t emac, uint8_t ip_proto)
+{
+	uint32_t emac_base = emac ? RUBY_ENET1_BASE_ADDR : RUBY_ENET0_BASE_ADDR;
+	qtn_mproc_sync_mem_write(emac_base + TOPAZ_EMAC_IP_PROTO_ENTRY(ip_proto), 0x0);
+}
+
+RUBY_INLINE void topaz_ipprt_clear_all_entries(uint8_t emac)
+{
+	int proto;
+	for (proto = 0; proto < TOPAZ_EMAC_IP_PROTO_ENTRIES; ++proto) {
+		topaz_ipprt_clear_entry(emac, proto);
+	}
+}
+
+RUBY_INLINE void topaz_ipprt_set(uint8_t emac, uint8_t ip_proto, uint8_t out_port, uint8_t out_node)
+{
+	union topaz_ipprt_entry e = TOPAZ_IPPRT_ENTRY_INIT;
+	e.data.out_node = out_node;
+	e.data.out_port = out_port;
+	e.data.valid = 1;
+	topaz_ipprt_set_entry(emac, ip_proto, e);
+}
+
+RUBY_INLINE union topaz_ipprt_entry topaz_ipprt_get_entry(uint8_t emac, uint8_t ip_proto)
+{
+	uint32_t emac_base = emac ? RUBY_ENET1_BASE_ADDR : RUBY_ENET0_BASE_ADDR;
+	union topaz_ipprt_entry e;
+	e.raw = qtn_mproc_sync_mem_read(emac_base + TOPAZ_EMAC_IP_PROTO_ENTRY(ip_proto));
+	return e;
+}
+
+#ifndef TOPAZ_TEST_ASSERT_EQUAL
+# define TOPAZ_TEST_ASSERT_EQUAL(a, b)	if ((a) != (b)) { return -1; }
+#endif
+RUBY_INLINE int topaz_ipprt_entry_bitfield_test(const union topaz_ipprt_entry *e)
+{
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw, TOPAZ_EMAC_IP_PROTO_OUT_NODE), e->data.out_node);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw, TOPAZ_EMAC_IP_PROTO_OUT_PORT), e->data.out_port);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw, TOPAZ_EMAC_IP_PROTO_VALID), e->data.valid);
+
+	return 0;
+}
+
+#endif	/* __TOPAZ_IPPROTO_TABLE_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_qfp.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_qfp.h
new file mode 100644
index 0000000..f15a2e9
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_qfp.h
@@ -0,0 +1,143 @@
+/*
+ * (C) Copyright 2015 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TOPAZ_QFP_H__
+#define __TOPAZ_QFP_H__
+
+
+#define ioremap ioremap_nocache
+
+/**
+ * \brief Initialize topaz PCIe QFP interface.
+ *
+ * Initialize topaz PCIe QFP interface. This function should be called before
+ * any other QFP API call.
+ *
+ * \param pci_dev pci_dev pci_dev structure point to PCIe adapter
+ * \param msi flag for using legacy interrupt (0) or MSI (1)
+ *
+ * \return 0 on success.
+ *
+ */
+extern int qfp_init(struct pci_dev * pci_dev, int msi);
+
+/**
+ * \brief De-initialize topaz PCIe QFP interface.
+ *
+ * De-initialize topaz PCIe QFP interface.
+ *
+ * \param pci_dev pci_dev pci_dev structure point to PCIe adapter
+ * \param msi flag for using legacy interrupt (0) or MSI (1)
+ *
+ *
+ */
+extern void qfp_deinit(struct pci_dev * pci_dev, int msi);
+
+/**
+ * \brief register master netdev to QFP
+ *
+ * Register master netdev to QFP. After calling this function, packets received
+ * or transmit through this netdef will be accelerated by QFP.
+ *
+ * The caller should call this function right before calling register_netdev()
+ * for the master netdev.
+ *
+ * \param netdev pointer to master netdev
+ *
+ * \return 0 on success and other for failure
+ */
+extern int qfp_register_netdev(struct net_device * net_dev);
+
+/**
+ * \brief un-register master netdev from QFP
+ *
+ * Un-register master netdev from QFP.
+ *
+ * The caller should call this function right after calling unregister_netdev()
+ * for the master netdev.
+ *
+ * \param netdev pointer to master netdev
+ *
+ * \return 0 on success and other for failure
+ */
+extern void qfp_unregister_netdev(struct net_device * net_dev);
+
+/**
+ * \brief register virtual netdev to QFP
+ *
+ * Register virtual netdev to QFP. After calling this function, packets
+ * received or transmit through this netdef will be accelerated by QFP. This
+ * function is used to create virtual netdev for VAP.
+ *
+ * The caller should call this function right before calling register_netdev()
+ * for the virtual netdev.
+ *
+ * \param netdev pointer to virtual netdev
+ *
+ * \return 0 on success and other for failure
+ */
+extern int qfp_register_virtual_netdev(struct net_device * net_dev);
+
+/**
+ * \brief un-register virtual netdev from QFP
+ *
+ * Un-register virtual netdev from QFP.
+ *
+ * The caller should call this function right after calling unregister_netdev()
+ * for the virtual netdev.
+ *
+ * \param netdev pointer to virtual netdev
+ *
+ * \return 0 on success and other for failure
+ */
+extern void qfp_unregister_virtual_netdev(struct net_device * net_dev);
+
+/**
+ * \brief allocate skb.
+ *
+ * Allocate a skb from QFP, and all skb will be received to QFP must allocate by
+ * calling this function. The caller should call this function instead of any
+ * linux skb allocation function for RX packets.
+ *
+ * \param size max size of bytes for payload of skb
+ *
+ * \return pointer to a skb or NULL for failure
+ */
+extern struct sk_buff * qfp_alloc_skb(unsigned int size);
+
+/**
+ * \brief Receive skb to QFP.
+ *
+ * Received a skb which allocate by calling qfp_alloc_skb() to QFP. The caller
+ * should call this function instead of calling netif_rx() or netif_receive_skb()
+ * The caller loses reference to the skb when this function return successfu. And
+ * caller should still call netif_rx() or netif_receive_skb() when this function
+ * return failure.
+ *
+ * \param skb pointer to skb need to received to QFP
+ *
+ * \return 0 on success; -1 on failure
+ */
+extern int qfp_rx(struct sk_buff * skb);
+
+#endif
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_shared_params.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_shared_params.h
new file mode 100644
index 0000000..f2dc0cb
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_shared_params.h
@@ -0,0 +1,426 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2012 Quantenna Communications Inc                   **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Author      : Quantenna Communications Inc                               **
+**  File        : topaz_shared_params.h                                            **
+**  Description :                                                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef _TOPAZ_SHARED_PARAMS_H_
+#define _TOPAZ_SHARED_PARAMS_H_
+
+#include <qtn/mproc_sync_mutex.h>
+#include <qtn/qtn_uc_comm.h>
+#include <qtn/qtn_wmm_ac.h>
+
+enum shared_params_auc_ipc_cmd
+{
+	SHARED_PARAMS_IPC_NONE_CMD		= 0,
+
+	/* begining of M2A IPC config commands */
+	SHARED_PARAMS_IPC_M2A_CFG_PARAMS_MIN,
+	SHARED_PARAMS_IPC_M2A_SLOW_NODE_CREATE_CMD,
+	SHARED_PARAMS_IPC_M2A_NODE_CREATE_CMD,
+	SHARED_PARAMS_IPC_M2A_NODE_DESTROY_CMD,
+	SHARED_PARAMS_IPC_M2A_SLOW_TID_CREATE_CMD,
+	SHARED_PARAMS_IPC_M2A_TID_CREATE_CMD,
+	SHARED_PARAMS_IPC_M2A_TID_DESTROY_CMD,
+	SHARED_PARAMS_IPC_M2A_TID_ACTIVATE_CMD,
+	SHARED_PARAMS_IPC_M2A_TID_DEACTIVATE_CMD,
+	SHARED_PARAMS_IPC_M2A_TID_CHECK_IDLE_CMD,
+	SHARED_PARAMS_IPC_M2A_TID_BA_CTL_CMD,
+	SHARED_PARAMS_IPC_M2A_TX_SCALE_CMD,
+	SHARED_PARAMS_IPC_M2A_TX_SCALE_BASE_CMD,
+	SHARED_PARAMS_IPC_M2A_TX_SCALE_MAX_CMD,
+	SHARED_PARAMS_IPC_M2A_TX_AGG_TIMEOUT_CMD,
+	SHARED_PARAMS_IPC_M2A_TX_DBG_CMD,
+	SHARED_PARAMS_IPC_M2A_TX_QOS_SCH_CMD,
+	SHARED_PARAMS_IPC_M2A_TX_AGG_DURATION,
+	SHARED_PARAMS_IPC_M2A_FCS_GIVE_CMD,
+	SHARED_PARAMS_IPC_M2A_NODE_RATEDATA_CHANGE_CMD,
+	SHARED_PARAMS_IPC_M2A_OCS_TX_SUSPEND_CMD,
+	SHARED_PARAMS_IPC_M2A_TQEW_DESCR_LIMIT_CMD,
+	SHARED_PARAMS_IPC_M2A_ENABLE_VLAN_CMD,
+	SHARED_PARAMS_IPC_M2A_MU_GRP_UPDATE_CMD,
+	SHARED_PARAMS_IPC_M2A_MU_DBG_FLAG_UPDATE_CMD,
+	SHARED_PARAMS_IPC_M2A_MU_AIRTIME_PADDING_UPDATE_CMD,
+	/* end of M2A IPC config commands */
+	SHARED_PARAMS_IPC_M2A_CFG_PARAMS_MAX,
+
+	SHARED_PARAMS_IPC_M2A_MU_QMAT_UPDATE_CMD,
+	SHARED_PARAMS_IPC_M2A_SRESET_BEGIN_CMD,
+	SHARED_PARAMS_IPC_M2A_SRESET_END_CMD,
+	SHARED_PARAMS_IPC_M2A_PAUSE_ON_CMD,
+	SHARED_PARAMS_IPC_M2A_PAUSE_OFF_CMD,
+
+	/*
+	 * Following are cmd used in A2M IPC interrupt. Put in same enum so that most code can be
+	 * used for both A2M and M2A IPC.
+	 */
+	SHARED_PARAMS_IPC_A2M_FIRST_CMD = 0x100,
+	SHARED_PARAMS_IPC_A2M_AUC_BOOTED_CMD,
+	SHARED_PARAMS_IPC_A2M_BA_ADD_START_CMD,
+	SHARED_PARAMS_IPC_A2M_PANIC,
+	SHARED_PARAMS_IPC_A2M_TDLS_PTI_CMD,
+	SHARED_PARAMS_IPC_A2M_BB_RESET,
+#if QTN_HDP_MU_FCS_WORKROUND
+	SHARED_PARAMS_IPC_A2M_PUSH_WMAC1_FCS,
+#endif
+	SHARED_PARAMS_IPC_A2M_LAST_CMD,
+};
+
+enum qtn_exp_mat_cmd {
+	EXP_MAT_DIS_CMD	= 0,
+	EXP_MAT_DEL_CMD= EXP_MAT_DIS_CMD,
+	EXP_MAT_EN_CMD,
+	EXP_MAT_ADD_CMD,
+	EXP_MAT_FRZ_CMD,
+	EXP_MAT_NUSE_CMD,
+};
+
+#define AUC_IPC_CMD_BA_NODE		0x000000FF
+#define AUC_IPC_CMD_BA_NODE_S		0
+#define AUC_IPC_CMD_BA_TID		0x00000F00
+#define AUC_IPC_CMD_BA_TID_S		8
+#define AUC_IPC_CMD_BA_STATE		0x0000F000
+#define AUC_IPC_CMD_BA_STATE_S		12
+#define AUC_IPC_CMD_BA_FLAGS		0xFFFF0000
+#define AUC_IPC_CMD_BA_FLAGS_S		16
+#define AUC_IPC_CMD_BA_SUBFRM_MAX	0x000000FF
+#define AUC_IPC_CMD_BA_SUBFRM_MAX_S	0
+#define AUC_IPC_CMD_BA_WINSIZE		0x000FFF00
+#define AUC_IPC_CMD_BA_WINSIZE_S	8
+#define AUC_IPC_CMD_BA_AMSDU		0x00100000
+#define AUC_IPC_CMD_BA_AMSDU_S		20
+#define AUC_IPC_CMD_BA_AGG_TIMEOUT	0x0000FFFF
+#define AUC_IPC_CMD_BA_AGG_TIMEOUT_S	0
+#define AUC_IPC_CMD_AGG_TIMEOUT_UNIT	100	/* us */
+
+#define QTN_BA_ARGS_F_IMPLICIT		BIT(0)
+#define QTN_BA_ARGS_F_AMSDU		BIT(1)
+#define QTN_BA_ARGS_F_BLOCK_SINGLETON	BIT(2)
+
+#define AUC_IPC_CMD_AGGTIMEOUT_BE	0x0000FFFF
+#define AUC_IPC_CMD_AGGTIMEOUT_BE_S	0
+#define AUC_IPC_CMD_AGGTIMEOUT_BK	0xFFFF0000
+#define AUC_IPC_CMD_AGGTIMEOUT_BK_S	16
+#define AUC_IPC_CMD_AGGTIMEOUT_VI	0x0000FFFF
+#define AUC_IPC_CMD_AGGTIMEOUT_VI_S	0
+#define AUC_IPC_CMD_AGGTIMEOUT_VO	0xFFFF0000
+#define AUC_IPC_CMD_AGGTIMEOUT_VO_S	16
+
+/*
+ * AuC tx tunable params
+ */
+#define AUC_QOS_SCH_PARAM	0xF0000000
+#define AUC_QOS_SCH_PARAM_S	28
+#define AUC_QOS_SCH_VALUE	0x0FFFFFFF
+#define AUC_QOS_SCH_VALUE_S	0
+#define AUC_QOS_SCH_PARAM_AIRTIME_FAIRNESS	1
+#define AUC_QOS_SCH_PARAM_MERCY_RATIO		3
+#define AUC_QOS_SCH_PARAM_TID_THROT		4
+#define AUC_QOS_SCH_PARAM_AIRTIME_INTRABSS_LOAD_THRSH	5
+#define AUC_QOS_SCH_PARAM_AIRTIME_MARGIN	6
+#define AUC_QOS_SCH_PARAM_AIRTIME_TWEAK		7
+#define AUC_TX_AGG_BASE				8
+#define AUC_TX_AGG_FLAG				(AUC_TX_AGG_BASE + 0)
+#define AUC_TX_AGG_DYN_EAGER_THRSH		(AUC_TX_AGG_BASE + 1)
+#define AUC_TX_AGG_ADAP_SWITCH			(AUC_TX_AGG_BASE + 2)
+#define AUC_TX_OPTIM_FLAG				(AUC_TX_AGG_BASE + 3)
+
+#define QTN_AUC_THROT_NODE	0x0FF00000
+#define QTN_AUC_THROT_NODE_S	20
+#define QTN_AUC_THROT_TID	0x000F0000
+#define QTN_AUC_THROT_TID_S	16
+#define QTN_AUC_THROT_INTVL	0x0000F800
+#define QTN_AUC_THROT_INTVL_S	11
+#define QTN_AUC_THROT_QUOTA	0x000007FF
+#define QTN_AUC_THROT_QUOTA_S	0
+
+#define QTN_AUC_THROT_INTVL_MAX		(0x1F)
+#define QTN_AUC_THROT_INTVL_UNIT	(1 * 5)		/* ms */
+#define QTN_AUC_THROT_QUOTA_MAX		(0x7FF)
+#define QTN_AUC_THROT_QUOTA_UNIT	(1024 * 5)	/* byte */
+
+#define QTN_AUC_AIRFAIR_DFT	1
+#define QTN_AUC_AGG_ADAP_SWITCH_DFT	0
+#define QTN_AUC_TQEW_DESCR_LIMIT_PERCENT_DFT 75
+#define QTN_AUC_OPTIM_FLAG_DFT	0
+
+/*
+ * M2A event setting per-TID flags
+ */
+#define M2A_TIDFLAG_NODE        0x000000FF
+#define M2A_TIDFLAG_NODE_S      0
+#define M2A_TIDFLAG_TID         0x00000F00
+#define M2A_TIDFLAG_TID_S       8
+#define M2A_TIDFLAG_FLAG        0x00FF0000
+#define M2A_TIDFLAG_FLAG_S      16
+#define M2A_TIDFLAG_VAL         0xFF000000
+#define M2A_TIDFLAG_VAL_S       24
+
+enum shared_params_auc_ipc_irq
+{
+	SHARED_PARAMS_IPC_M2A_SRESET_IRQ	= 0,
+	SHARED_PARAMS_IPC_M2A_CONFIG_IRQ,
+	SHARED_PARAMS_IPC_M2A_PAUSE_IRQ
+};
+
+enum shared_params_a2m_ipc_irq
+{
+	/*
+	 * Currently only use 1 bit of IPC register and use "cmd" to expand the ipc usage.
+	 * This makes the top half and bottom half simple.
+	 */
+	SHARED_PARAMS_IPC_A2M_CFG_IRQ	= 0,
+};
+
+/*
+ * Command structure for both A2M and M2A IPC
+ */
+typedef struct shared_params_auc_ipc
+{
+	uint32_t cmd; /* "enum shared_params_auc_ipc_cmd" type, but want to ensure 32-bit size */
+	uint32_t arg1;
+	uint32_t arg2;
+	uint32_t arg3;
+	uint32_t ret;
+} shared_params_auc_ipc;
+
+struct qtn_auc_per_node_data_s;
+struct qtn_auc_misc_data_s;
+struct qtn_auc_per_mac_data_s;
+struct qtn_auc_mu_grp_tbl_elem_s;
+struct qtn_hal_tcm;
+
+typedef struct qtn_shared_node_stats {
+	/* Write by Muc only */
+	uint32_t qtn_rx_pkts;
+	uint32_t qtn_rx_bytes;
+	uint32_t qtn_rx_ucast;
+	uint32_t qtn_rx_bcast;
+	uint32_t qtn_rx_mcast;
+	uint32_t qtn_tx_pkts;
+	uint32_t qtn_tx_bytes;
+	uint32_t qtn_rx_vlan_pkts;
+
+	uint32_t qtn_tx_mcast; /* Lhost */
+	uint32_t qtn_muc_tx_mcast; /* Muc */
+	/*
+	 * The number of dropped data packets failed to transmit through
+	 * wireless media for each traffic category(TC).
+	 */
+	uint32_t qtn_tx_drop_data_msdu[WMM_AC_NUM]; /* AuC */
+} qtn_shared_node_stats_t;
+
+typedef struct qtn_shared_vap_stats {
+	/* Write by Muc only */
+	uint32_t qtn_rx_pkts;
+	uint32_t qtn_rx_bytes;
+	uint32_t qtn_rx_ucast;
+	uint32_t qtn_rx_bcast;
+	uint32_t qtn_rx_mcast;
+	uint32_t qtn_rx_dropped;
+	uint32_t qtn_tx_pkts;
+	uint32_t qtn_tx_bytes;
+
+	uint32_t qtn_tx_mcast; /* Lhost */
+	uint32_t qtn_muc_tx_mcast; /* Muc */
+	uint32_t qtn_tx_dropped; /* Auc */
+} qtn_shared_vap_stats_t;
+
+typedef struct shared_params_auc
+{
+#define SHARED_PARAMS_AUC_CONFIG_ASSERT_EN		BIT(0)
+#define SHARED_PARAMS_AUC_CONFIG_PRINT_EN		BIT(1)
+	u_int32_t				auc_config;
+	u_int32_t				a2l_printbuf_producer;
+	uint32_t				auc_tqe_sem_en;
+#define SHARED_PARAMS_AUC_IPC_STUB			((shared_params_auc_ipc*)1)
+	struct shared_params_auc_ipc		*m2a_ipc;	/* M2A */
+	struct shared_params_auc_ipc		*a2m_ipc;	/* A2M */
+	/*
+	 * 'ma_shared_buf' is used to transfer data btw MuC and AuC in IPC call.
+	 * So far it is used to pass node position in node cache and ieee80211
+	 * vht group. The buffer size is defined to exactly match those data:
+	 * sizeof(struct ieee80211_mu_groups_update)
+	 */
+#define MA_SHARED_BUF_SIZE	(150)
+	uint8_t					(*ma_shared_buf)[MA_SHARED_BUF_SIZE];
+	struct qtn_auc_per_node_data_s		**auc_per_node_data_ptr;
+	struct qtn_auc_misc_data_s		*auc_misc_data_ptr;
+	struct qtn_auc_per_mac_data_s		*auc_per_mac_data_ptr;
+	qtn_mproc_sync_mutex			*auc_per_node_mutex;
+	struct qtn_hal_tcm			*hal_tcm;
+	uint32_t				*auc_last_ilink1_p;
+	uint32_t				*auc_last_ilink2_p;
+	qtn_shared_node_stats_t			*node_stats;
+	qtn_shared_vap_stats_t			*vap_stats;
+	uint32_t				*per_ac_traffic_prev_second;
+	struct qtn_auc_mu_grp_tbl_elem_s	*mu_grp_tbl;
+	struct qtn_hal_tcm                      *hal_wmac1_tcm;
+	struct qtn_vlan_dev			**vdev_bus;
+	struct qtn_vlan_dev			**vport_bus;
+} shared_params_auc;
+
+RUBY_INLINE void
+qtn_mproc_sync_auc_ipc_set_arg1(volatile struct shared_params_auc_ipc *ipc, uint32_t arg1)
+{
+	ipc->arg1 = arg1;
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_auc_ipc_set_args(volatile struct shared_params_auc_ipc *ipc,
+		uint32_t arg1, uint32_t arg2, uint32_t arg3)
+{
+	ipc->arg1 = arg1;
+	ipc->arg2 = arg2;
+	ipc->arg3 = arg3;
+	ipc->ret = 0;
+}
+
+RUBY_INLINE uint32_t
+qtn_mproc_sync_auc_ipc_get_arg1(volatile struct shared_params_auc_ipc *ipc)
+{
+	return ipc->arg1;
+}
+
+RUBY_INLINE uint32_t
+qtn_mproc_sync_auc_ipc_get_arg2(volatile struct shared_params_auc_ipc *ipc)
+{
+	return ipc->arg2;
+}
+
+RUBY_INLINE uint32_t
+qtn_mproc_sync_auc_ipc_get_arg3(volatile struct shared_params_auc_ipc *ipc)
+{
+	return ipc->arg3;
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_auc_ipc_set_cmd(volatile struct shared_params_auc_ipc *ipc,
+		enum shared_params_auc_ipc_cmd cmd)
+{
+	ipc->cmd = cmd;
+}
+
+RUBY_INLINE int
+qtn_mproc_sync_auc_ipc_wait_ready(volatile struct shared_params_auc_ipc *ipc,
+		enum shared_params_auc_ipc_cmd cmd, int relax_count, uint32_t loop_count)
+{
+	uint32_t cnt = 0;
+
+	while (ipc->cmd != cmd) {
+		if ((loop_count > 0) && (cnt >= loop_count)) {
+			return -1;
+		}
+
+		qtn_mproc_sync_mutex_relax(relax_count);
+
+		cnt++;
+	}
+
+	return 0;
+}
+
+RUBY_INLINE enum shared_params_auc_ipc_cmd
+qtn_mproc_sync_auc_ipc_wait_mready(volatile struct shared_params_auc_ipc *ipc,
+		int relax_count, int loop_count)
+{
+	enum shared_params_auc_ipc_cmd cmd;
+	int loop = 0;
+
+	while(loop++ < loop_count) {
+		cmd = ipc->cmd;
+		if (cmd > SHARED_PARAMS_IPC_M2A_CFG_PARAMS_MIN &&
+				cmd < SHARED_PARAMS_IPC_M2A_CFG_PARAMS_MAX) {
+			return cmd;
+		}
+
+		qtn_mproc_sync_mutex_relax(relax_count);
+	}
+
+	return SHARED_PARAMS_IPC_NONE_CMD;
+}
+
+RUBY_INLINE int
+qtn_mproc_sync_auc_ipc_wait_done(volatile struct shared_params_auc_ipc *ipc,
+		int relax_count, uint32_t loop_count)
+{
+	return qtn_mproc_sync_auc_ipc_wait_ready(ipc,
+		SHARED_PARAMS_IPC_NONE_CMD, relax_count, loop_count);
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_auc_ipc_done(volatile struct shared_params_auc_ipc *ipc)
+{
+	qtn_mproc_sync_auc_ipc_set_cmd(ipc, SHARED_PARAMS_IPC_NONE_CMD);
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_auc_ipc_req(volatile struct shared_params_auc_ipc *ipc,
+		enum shared_params_auc_ipc_cmd cmd, int relax_count)
+{
+	qtn_mproc_sync_auc_ipc_set_cmd(ipc, cmd);
+	qtn_mproc_sync_auc_ipc_wait_done(ipc, relax_count, 0);
+}
+
+RUBY_INLINE void
+qtn_mproc_sync_auc_ipc_ack(volatile struct shared_params_auc_ipc *ipc,
+		enum shared_params_auc_ipc_cmd cmd, int relax_count)
+{
+	qtn_mproc_sync_auc_ipc_wait_ready(ipc, cmd, relax_count, 0);
+	qtn_mproc_sync_auc_ipc_done(ipc);
+}
+
+RUBY_INLINE int
+qtn_mproc_sync_auc_ipc_init_wait(volatile struct shared_params_auc *params, int relax_count)
+{
+	while (!params->m2a_ipc) {
+		qtn_mproc_sync_mutex_relax(relax_count);
+	}
+	return (params->m2a_ipc != SHARED_PARAMS_AUC_IPC_STUB);
+}
+
+#define topaz_mgmt_fcs_offset(buf, len)	roundup_ptr(((unsigned long) (buf)) + (len))
+
+#endif /* _TOPAZ_SHARED_PARAMS_H_ */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_tqe.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_tqe.h
new file mode 100644
index 0000000..36a7a0a
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_tqe.h
@@ -0,0 +1,73 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TOPAZ_TQE_H
+#define __TOPAZ_TQE_H
+
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <qtn/topaz_tqe_cpuif.h>
+#include <qtn/br_types.h>
+
+typedef void (*tqe_port_handler)(void *token,
+		const union topaz_tqe_cpuif_descr *descr,
+		struct sk_buff *skb, uint8_t *whole_frm_hdr);
+
+typedef const struct topaz_fwt_sw_mcast_entry*(
+		*tqe_fwt_get_mcast_hook)(uint16_t fwt_index, const void *addr, uint16_t ether_type);
+typedef const struct fwt_db_entry*(
+		*tqe_fwt_get_ucast_hook)(const unsigned char *src_mac_be, const unsigned char *dst_mac_be);
+typedef const struct fwt_db_entry*(
+		*tqe_fwt_get_from_mac_hook)(const unsigned char *mac_be);
+typedef int(*tqe_mac_reserved_hook)(const uint8_t *addr);
+typedef struct topaz_fwt_sw_mcast_entry*(
+		*tqe_fwt_get_mcast_ff_hook)(void);
+typedef void(*tqe_fwt_false_miss_hook)(int fwt_index, uint8_t false_miss);
+
+typedef int (*tqe_fwt_add_from_mac_hook)(const uint8_t *mac_be, uint8_t port_id,
+			uint8_t node_idx, const struct br_ip *group);
+typedef int (*tqe_fwt_del_from_mac_hook)(const uint8_t *mac_be);
+
+int tqe_port_add_handler(enum topaz_tqe_port port, tqe_port_handler handler, void *token);
+void tqe_port_remove_handler(enum topaz_tqe_port port);
+int tqe_tx(union topaz_tqe_cpuif_ppctl *ppctl, struct sk_buff *skb);
+void tqe_register_fwt_cbk(tqe_fwt_get_mcast_hook mcast_cbk_func,
+				tqe_fwt_get_mcast_ff_hook mcast_ff_get_cbk_func,
+				tqe_fwt_false_miss_hook false_miss_func);
+void tqe_register_ucastfwt_cbk(tqe_fwt_get_ucast_hook cbk_func);
+void tqe_register_macfwt_cbk(tqe_fwt_get_from_mac_hook cbk_func,
+			tqe_fwt_add_from_mac_hook add_func, tqe_fwt_del_from_mac_hook del_func);
+void tqe_register_mac_reserved_cbk(tqe_mac_reserved_hook cbk_func);
+int tqe_rx_multicast(void *queue, const union topaz_tqe_cpuif_descr *desc);
+void tqe_port_register(const enum topaz_tqe_port port);
+void tqe_port_unregister(const enum topaz_tqe_port port);
+void tqe_reg_multicast_tx_stats(void (*fn)(void *ctx, uint8_t), void *ctx);
+void tqe_port_set_group(const enum topaz_tqe_port port, int32_t group);
+void tqe_port_clear_group(const enum topaz_tqe_port port);
+uint32_t switch_tqe_multi_proc_sem_down(char * funcname, int linenum);
+uint32_t switch_tqe_multi_proc_sem_up(void);
+int tqe_rx_l2_ext_filter(union topaz_tqe_cpuif_descr *desc, struct sk_buff *skb);
+void tqe_rx_call_port_handler(union topaz_tqe_cpuif_descr *desc,
+		struct sk_buff *skb, uint8_t *whole_frm_hdr);
+
+#endif	/* __TOPAZ_TQE_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_tqe_cpuif.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_tqe_cpuif.h
new file mode 100644
index 0000000..0d41069
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_tqe_cpuif.h
@@ -0,0 +1,570 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TOPAZ_TQE_CPUIF_PLATFORM_H
+#define __TOPAZ_TQE_CPUIF_PLATFORM_H
+
+#include "mproc_sync_base.h"
+
+enum topaz_tqe_port {
+	TOPAZ_TQE_FIRST_PORT	= 0,
+
+	TOPAZ_TQE_EMAC_0_PORT	= 0,
+	TOPAZ_TQE_EMAC_1_PORT	= 1,
+	TOPAZ_TQE_WMAC_PORT	= 2,
+	TOPAZ_TQE_PCIE_PORT	= 3,
+	TOPAZ_TQE_LHOST_PORT	= 4,
+	TOPAZ_TQE_MUC_PORT	= 5,
+	TOPAZ_TQE_DSP_PORT	= 6,
+	TOPAZ_TQE_AUC_PORT	= 7,
+
+	TOPAZ_TQE_NUM_PORTS	= 8,
+	TOPAZ_TQE_DROP_PORT	= 15
+};
+
+enum topaz_mproc_tqe_sem_id
+{
+	TOPAZ_MPROC_TQE_SEM_INVALID	= 0,
+	TOPAZ_MPROC_TQE_SEM_LHOST	= 1,
+	TOPAZ_MPROC_TQE_SEM_MUC		= 2,
+	TOPAZ_MPROC_TQE_SEM_AUC		= 3
+};
+
+/* bits of port_id  */
+#define PORT_ID_BITS                       8
+/* high 3 bits in port_id is used to save dev_id for 2.4G VAP */
+#define DEV_ID_BITS                        3
+
+#define MAX_QFP_NETDEV                     (1 << DEV_ID_BITS)
+#define MAX_DEV_ID                         (1 << DEV_ID_BITS)
+#define MAX_PORT_ID                        (1 << (PORT_ID_BITS - DEV_ID_BITS))
+
+#define GET_BIT_FIELD(var, offset, width) \
+		(((var) >> (offset)) & ((1 << (width)) - 1))
+
+#define INJECT_DEV_ID_TO_PORT_ID(port_id, dev_id, target) \
+		do {\
+			BUG_ON((port_id) >= MAX_PORT_ID || (dev_id) >= MAX_DEV_ID); \
+			(target) = (port_id) | ((dev_id) << (PORT_ID_BITS - DEV_ID_BITS)); \
+		} while (0)
+#define EXTRACT_PORT_ID_FROM_PORT_ID(port_id) \
+		GET_BIT_FIELD((port_id), 0, (PORT_ID_BITS - DEV_ID_BITS))
+#define EXTRACT_DEV_ID_FROM_PORT_ID(port_id) \
+		GET_BIT_FIELD((port_id), (PORT_ID_BITS - DEV_ID_BITS), DEV_ID_BITS)
+
+#define TOPAZ_TQE_PORT_NAMES	{ "emac0", "emac1", "wmac", "pcie", "lhost", "muc", "dsp", "auc", \
+					"", "", "", "", "", "", "", "drop", }
+#define TOPAZ_TQE_PORT_IS_EMAC(_port)	(((_port) == TOPAZ_TQE_EMAC_0_PORT) || \
+						((_port) == TOPAZ_TQE_EMAC_1_PORT))
+#define TOPAZ_TQE_PORT_IS_WMAC(_port)	((_port) == TOPAZ_TQE_WMAC_PORT)
+
+#if defined(__linux__)
+	#define TOPAZ_TQE_LOCAL_CPU	TOPAZ_TQE_LHOST_PORT
+#elif defined(ARCSHELL)
+	#define TOPAZ_TQE_LOCAL_CPU	TOPAZ_TQE_LHOST_PORT
+#elif defined(MUC_BUILD)
+	#define TOPAZ_TQE_LOCAL_CPU	TOPAZ_TQE_MUC_PORT
+#elif defined(DSP_BUILD)
+	#define TOPAZ_TQE_LOCAL_CPU	TOPAZ_TQE_DSP_PORT
+#elif defined(AUC_BUILD)
+	#define TOPAZ_TQE_LOCAL_CPU	TOPAZ_TQE_AUC_PORT
+#else
+	#error No TOPAZ_TQE_LOCAL_CPU set
+#endif
+
+union topaz_tqe_cpuif_descr
+{
+	struct
+	{
+		uint32_t dw0;
+		uint32_t dw1;
+		uint32_t dw2;
+		uint32_t dw3;
+	} raw;
+	struct
+	{
+		signed buff_ptr_offset:16;
+		unsigned misc_user:10;
+		unsigned __reserved1:5;
+		unsigned own:1;
+		unsigned length:16;
+		enum topaz_tqe_port in_port:4;
+		unsigned need_to_free:1;
+		unsigned __reserved2:3;
+		unsigned control:8;
+		void *pkt;
+		union topaz_tqe_cpuif_descr *next;
+	} data;
+};
+
+#define TQE_MISCUSER_A2M_TYPE		0x300
+#define TQE_MISCUSER_A2M_TYPE_S		8
+#define TQE_MISCUSER_A2M_TYPE_PARAM	0x0FF
+#define TQE_MISCUSER_A2M_TYPE_PARAM_S	0
+
+#define TQE_MISCUSER_A2M_TYPE_TXFEEDBACK	0
+#define TQE_MISCUSER_A2M_TYPE_RXPKT		1
+#define TQE_MISCUSER_A2M_TYPE_TXPKT		2
+
+#define TQE_MISCUSER_M2L_DATA_NODE_IDX		0x7F
+#define TQE_MISCUSER_M2L_DATA_NODE_IDX_S	0
+#if TOPAZ_SWITCH_OUT_NODE_MASK != TQE_MISCUSER_M2L_DATA_NODE_IDX
+	#error Node cache index misc_user must support 128 entries
+#endif
+#define TQE_MISCUSER_M2L_DATA_3ADDR_BR		0x80
+#define TQE_MISCUSER_M2L_DATA_3ADDR_BR_S	7
+#define TQE_MISCUSER_M2L_DROP			0x100
+#define TQE_MISCUSER_M2L_DROP_S			8
+
+#define TQE_MISCUSER_L2A_NO_AMSDU	0x002
+#define TQE_MISCUSER_L2A_RATE_TRAINING	0x008
+#define TQE_MISCUSER_L2A_RESERVED_FOR_A2A		0x10	/* place holder for A2A below */
+
+#define TQE_MISCUSER_M2A_MGMT_SKIP_RATE_RETRY1		0x01
+#define TQE_MISCUSER_M2A_MGMT_SKIP_RATE_RETRY1_S	0
+#define TQE_MISCUSER_M2A_MGMT_OCS_FRAME			0x02
+#define TQE_MISCUSER_M2A_MGMT_OCS_FRAME_S		1
+#define TQE_MISCUSER_M2A_EVENT_VIA_TQE			0x04
+#define TQE_MISCUSER_M2A_EVENT_VIA_TQE_S		2
+#define TQE_MISCUSER_M2A_MGMT_PROBE_FRAME		0x08
+#define TQE_MISCUSER_M2A_MGMT_PROBE_FRAME_S		3
+#define TQE_MISCUSER_M2A_RESERVED_FOR_A2A		0x10	/* place holder for A2A below */
+#define TQE_MISCUSER_M2A_MGMT_GROUP			0x20
+#define TQE_MISCUSER_M2A_MGMT_GROUP_S			5
+
+/*
+ * At the ethq stage, only tqew descriptor is available for use. Some place holder have been added
+ * above in M2A and L2A define to reserve bits.
+ */
+#define TQE_MISCUSER_A2A_GROUP				0x10
+
+
+#define TQE_MISCUSER_DTIM_GROUP		(TQE_MISCUSER_M2A_MGMT_GROUP | TQE_MISCUSER_A2A_GROUP)
+
+union topaz_tqe_cpuif_q_ptr_status
+{
+	uint32_t raw;
+	struct {
+		unsigned write_idx:15;
+		unsigned write_idx_wrap:1;
+		unsigned read_idx:15;
+		unsigned read_idx_wrap:1;
+	} data;
+};
+
+union topaz_tqe_cpuif_status
+{
+	uint32_t raw;
+	struct {
+		unsigned available:16;
+		unsigned __reserved1:14;
+		unsigned empty:1;
+		unsigned full:1;
+	} data;
+};
+
+union topaz_tqe_cpuif_tx_start
+{
+#define TOPAZ_TQE_CPUIF_TX_START_NREADY		RUBY_BIT(0)
+#define TOPAZ_TQE_CPUIF_TX_START_NOT_SUCCESS	RUBY_BIT(30)
+#define TOPAZ_TQE_CPUIF_TX_START_SUCCESS	RUBY_BIT(31)
+#define TOPAZ_TQE_CPUIF_TX_START_DELIVERED	(TOPAZ_TQE_CPUIF_TX_START_NOT_SUCCESS | TOPAZ_TQE_CPUIF_TX_START_SUCCESS)
+	uint32_t raw;
+	struct {
+		unsigned nready:1;
+		unsigned __reserved1:29;
+		unsigned not_success:1;
+		unsigned success:1;
+	} data;
+};
+
+union topaz_tqe_cpuif_ppctl
+{
+	struct
+	{
+#define TOPAZ_TQE_CPUIF_SM(val, mask, shift)	(((uint32_t)(val) & (mask)) << (shift))
+#define TOPAZ_TQE_CPUIF_PPCTL_DW0(descr)	TOPAZ_TQE_CPUIF_SM(descr, 0xFFFFFFFF, 0)
+		uint32_t ppctl0;
+#define TOPAZ_TQE_CPUIF_PPCTL_DW1(pkt)		TOPAZ_TQE_CPUIF_SM(pkt, 0xFFFFFFFF, 0)
+		uint32_t ppctl1;
+#define TOPAZ_TQE_CPUIF_PPCTL_DW2(out_pri, out_node, out_port, out_portal, out_node_1, out_node_1_en, out_node_2, out_node_2_en) \
+						TOPAZ_TQE_CPUIF_SM(out_pri, 0xF, 0) | \
+						TOPAZ_TQE_CPUIF_SM(out_node, 0x7F, 4) | \
+						TOPAZ_TQE_CPUIF_SM(out_port, 0xF, 11) | \
+						TOPAZ_TQE_CPUIF_SM(out_portal, 0x1, 15) | \
+						TOPAZ_TQE_CPUIF_SM(out_node_1, 0x7F, 16) | \
+						TOPAZ_TQE_CPUIF_SM(out_node_1_en, 0x1, 23) | \
+						TOPAZ_TQE_CPUIF_SM(out_node_2, 0x7F, 24) | \
+						TOPAZ_TQE_CPUIF_SM(out_node_2_en, 0x1, 31)
+		uint32_t ppctl2;
+#define TOPAZ_TQE_CPUIF_PPCTL_DW3(out_node_3, out_node_3_en, out_node_4, out_node_4_en, out_node_5, out_node_5_en, out_node_6, out_node_6_en) \
+						TOPAZ_TQE_CPUIF_SM(out_node_3, 0x7F, 0) | \
+						TOPAZ_TQE_CPUIF_SM(out_node_3_en, 0x1, 7) | \
+						TOPAZ_TQE_CPUIF_SM(out_node_4, 0x7F, 8) | \
+						TOPAZ_TQE_CPUIF_SM(out_node_4_en, 0x1, 15) | \
+						TOPAZ_TQE_CPUIF_SM(out_node_5, 0x7F, 16) | \
+						TOPAZ_TQE_CPUIF_SM(out_node_5_en, 0x1, 23) | \
+						TOPAZ_TQE_CPUIF_SM(out_node_6, 0x7F, 24) | \
+						TOPAZ_TQE_CPUIF_SM(out_node_6_en, 0x1, 31)
+		uint32_t ppctl3;
+#define TOPAZ_TQE_CPUIF_PPCTL_DW4(buff_ptr_offset, sa_match, da_match, mcast, free, buff_pool_num, tqe_free) \
+						TOPAZ_TQE_CPUIF_SM(buff_ptr_offset, 0xFFFF, 0) | \
+						TOPAZ_TQE_CPUIF_SM(sa_match, 0x1, 24) | \
+						TOPAZ_TQE_CPUIF_SM(da_match, 0x1, 25) | \
+						TOPAZ_TQE_CPUIF_SM(mcast, 0x1, 26) | \
+						TOPAZ_TQE_CPUIF_SM(free, 0x1, 27) | \
+						TOPAZ_TQE_CPUIF_SM(buff_pool_num, 0x3, 28) | \
+						TOPAZ_TQE_CPUIF_SM(tqe_free, 0x1, 30)
+		uint32_t ppctl4;
+#define TOPAZ_TQE_CPUIF_PPCTL_DW5(length, misc_user) \
+						TOPAZ_TQE_CPUIF_SM(length, 0xFFFF, 0) | \
+						TOPAZ_TQE_CPUIF_SM(misc_user, 0x3FF, 16)
+		uint32_t ppctl5;
+	} raw;
+	struct
+	{
+		void *descr;
+		void *pkt;
+		unsigned out_pri:4;
+		unsigned out_node_0:7;
+		enum topaz_tqe_port out_port:4;
+		unsigned portal:1;
+		unsigned out_node_1:7;
+		unsigned out_node_1_en:1;
+		unsigned out_node_2:7;
+		unsigned out_node_2_en:1;
+		unsigned out_node_3:7;
+		unsigned out_node_3_en:1;
+		unsigned out_node_4:7;
+		unsigned out_node_4_en:1;
+		unsigned out_node_5:7;
+		unsigned out_node_5_en:1;
+		unsigned out_node_6:7;
+		unsigned out_node_6_en:1;
+		signed buff_ptr_offset:16;
+		unsigned __reserved1:8;
+		unsigned sa_match:1;
+		unsigned da_match:1;
+		unsigned mcast:1;
+		unsigned free:1;
+		unsigned buff_pool_num:2;
+		unsigned tqe_free:1;
+		unsigned __reserved2:1;
+		unsigned length:16;
+		unsigned misc_user:10;
+		unsigned __reserved3:6;
+	} data;
+};
+
+RUBY_INLINE void
+topaz_tqe_cpuif_ppctl_clear(union topaz_tqe_cpuif_ppctl *pp)
+{
+	pp->raw.ppctl0 = 0;
+	pp->raw.ppctl1 = 0;
+	pp->raw.ppctl2 = 0;
+	pp->raw.ppctl3 = 0;
+	pp->raw.ppctl4 = 0;
+	pp->raw.ppctl5 = 0;
+}
+
+RUBY_INLINE void
+topaz_tqe_cpuif_ppctl_init(union topaz_tqe_cpuif_ppctl *pp,
+		uint8_t port, const uint8_t *const nodes, uint8_t node_count, uint8_t pri,
+		uint8_t portal, uint8_t free, uint8_t buff_pool, uint8_t tqe_free, uint16_t misc_user)
+{
+	pp->raw.ppctl0 = TOPAZ_TQE_CPUIF_PPCTL_DW0(0);
+	pp->raw.ppctl1 = TOPAZ_TQE_CPUIF_PPCTL_DW1(0);
+	pp->raw.ppctl2 = TOPAZ_TQE_CPUIF_PPCTL_DW2(pri, nodes ? nodes[0] : 0, port, portal, 0, 0, 0, 0);
+	pp->raw.ppctl3 = TOPAZ_TQE_CPUIF_PPCTL_DW3(0, 0, 0, 0, 0, 0, 0, 0);
+	pp->raw.ppctl4 = TOPAZ_TQE_CPUIF_PPCTL_DW4(0, 0, 0, (node_count > 1), free, buff_pool, tqe_free);
+	pp->raw.ppctl5 = TOPAZ_TQE_CPUIF_PPCTL_DW5(0, misc_user);
+#if 0
+#define	_outnode(_i)	do {							\
+		if ((_i) <= node_count) {					\
+			pp->data.out_node_##_i = nodes[(_i)-1];			\
+			pp->data.out_node_##_i##_en = 1;			\
+		}								\
+	} while(0)
+
+	if(nodes) {
+		/* Multicast nodes number range from 1->6. unicast set to 0*/
+		_outnode(1);
+		_outnode(2);
+		_outnode(3);
+		_outnode(4);
+		_outnode(5);
+		_outnode(6);
+	}
+#undef	_outnode
+#endif
+}
+
+RUBY_INLINE int
+topaz_tqe_cpuif_port_to_num(enum topaz_tqe_port port)
+{
+	if (port == TOPAZ_TQE_PCIE_PORT) {
+		return 4;
+	} else {
+		return port - TOPAZ_TQE_LHOST_PORT;
+	}
+}
+
+RUBY_INLINE void
+__topaz_tqe_cpuif_setup_irq(enum topaz_tqe_port port, int enable, unsigned int threshold)
+{
+	int num = topaz_tqe_cpuif_port_to_num(port);
+	uint32_t csr = qtn_mproc_sync_mem_read(TOPAZ_TQE_CPUIF_CSR(num));
+
+	csr &= ~TOPAZ_TQE_CPUIF_CSR_IRQ_THRESHOLD(~0x0);
+	if (threshold) {
+		csr |= TOPAZ_TQE_CPUIF_CSR_IRQ_THRESHOLD_EN;
+		csr |= TOPAZ_TQE_CPUIF_CSR_IRQ_THRESHOLD(threshold);
+	} else {
+		csr &= ~TOPAZ_TQE_CPUIF_CSR_IRQ_THRESHOLD_EN;
+	}
+
+	if (enable) {
+		csr |= TOPAZ_TQE_CPUIF_CSR_IRQ_EN;
+	} else {
+		csr &= ~TOPAZ_TQE_CPUIF_CSR_IRQ_EN;
+	}
+
+	qtn_mproc_sync_mem_write_wmb(TOPAZ_TQE_CPUIF_CSR(num), csr); /* can be used to disable/enable, so better to have barrier*/
+}
+
+RUBY_INLINE void
+topaz_tqe_cpuif_setup_irq(int enable, unsigned int threshold)
+{
+	__topaz_tqe_cpuif_setup_irq(TOPAZ_TQE_LOCAL_CPU, enable, threshold);
+}
+
+RUBY_INLINE void
+__topaz_tqe_cpuif_setup_reset(enum topaz_tqe_port port, int reset)
+{
+	int num = topaz_tqe_cpuif_port_to_num(port);
+	uint32_t csr = qtn_mproc_sync_mem_read(TOPAZ_TQE_CPUIF_CSR(num));
+	if (reset) {
+		qtn_mproc_sync_mem_write(TOPAZ_TQE_CPUIF_CSR(num), csr | TOPAZ_TQE_CPUIF_CSR_RESET);
+	} else {
+		qtn_mproc_sync_mem_write(TOPAZ_TQE_CPUIF_CSR(num), csr & ~TOPAZ_TQE_CPUIF_CSR_RESET);
+	}
+}
+
+RUBY_INLINE void
+topaz_tqe_cpuif_setup_reset(int reset)
+{
+	__topaz_tqe_cpuif_setup_reset(TOPAZ_TQE_LOCAL_CPU, reset);
+}
+
+RUBY_INLINE void
+__topaz_tqe_cpuif_setup_ring(enum topaz_tqe_port port, union topaz_tqe_cpuif_descr *base, uint16_t count)
+{
+	int num = topaz_tqe_cpuif_port_to_num(port);
+	qtn_mproc_sync_mem_write(TOPAZ_TQE_CPUIF_RX_RING(num), (uint32_t)base);
+	qtn_mproc_sync_mem_write(TOPAZ_TQE_CPUIF_RX_RING_SIZE(num), count);
+}
+
+RUBY_INLINE void
+topaz_tqe_cpuif_setup_ring(union topaz_tqe_cpuif_descr *base, uint16_t count)
+{
+	__topaz_tqe_cpuif_setup_ring(TOPAZ_TQE_LOCAL_CPU, base, count);
+}
+
+RUBY_INLINE uint16_t
+__topaz_tqe_cpuif_get_ring_size(enum topaz_tqe_port port)
+{
+	int num = topaz_tqe_cpuif_port_to_num(port);
+	return qtn_mproc_sync_mem_read(TOPAZ_TQE_CPUIF_RX_RING_SIZE(num));
+}
+
+RUBY_INLINE uint16_t
+topaz_tqe_cpuif_get_ring_size(void)
+{
+	return __topaz_tqe_cpuif_get_ring_size(TOPAZ_TQE_LOCAL_CPU);
+}
+
+RUBY_INLINE union topaz_tqe_cpuif_descr*
+__topaz_tqe_cpuif_get_curr(enum topaz_tqe_port port)
+{
+	int num = topaz_tqe_cpuif_port_to_num(port);
+	return (union topaz_tqe_cpuif_descr*)
+		qtn_mproc_sync_mem_read(TOPAZ_TQE_CPUIF_RX_CURPTR(num));
+}
+
+RUBY_INLINE union topaz_tqe_cpuif_descr*
+topaz_tqe_cpuif_get_curr(void)
+{
+	return __topaz_tqe_cpuif_get_curr(TOPAZ_TQE_LOCAL_CPU);
+}
+
+RUBY_INLINE void
+__topaz_tqe_cpuif_put_back(enum topaz_tqe_port port, union topaz_tqe_cpuif_descr * descr)
+{
+	int num = topaz_tqe_cpuif_port_to_num(port);
+	qtn_mproc_sync_mem_write(TOPAZ_TQE_CPUIF_PKT_FINISH(num), (uint32_t)descr);
+}
+
+RUBY_INLINE void
+topaz_tqe_cpuif_put_back(union topaz_tqe_cpuif_descr * descr)
+{
+	__topaz_tqe_cpuif_put_back(TOPAZ_TQE_LOCAL_CPU, descr);
+}
+
+RUBY_INLINE union topaz_tqe_cpuif_q_ptr_status
+__topaz_tqe_cpuif_get_q_ptr_status(enum topaz_tqe_port port)
+{
+	int num = topaz_tqe_cpuif_port_to_num(port);
+	union topaz_tqe_cpuif_q_ptr_status status;
+	status.raw = qtn_mproc_sync_mem_read(TOPAZ_TQE_CPUIF_Q_PTR_STATUS(num));
+	return status;
+}
+
+RUBY_INLINE union topaz_tqe_cpuif_q_ptr_status
+topaz_tqe_cpuif_get_q_ptr_status(void)
+{
+	return __topaz_tqe_cpuif_get_q_ptr_status(TOPAZ_TQE_LOCAL_CPU);
+}
+
+RUBY_INLINE union topaz_tqe_cpuif_status
+__topaz_tqe_cpuif_get_status(enum topaz_tqe_port port)
+{
+	int num = topaz_tqe_cpuif_port_to_num(port);
+	union topaz_tqe_cpuif_status status;
+	status.raw = qtn_mproc_sync_mem_read(TOPAZ_TQE_CPUIF_STATUS(num));
+	return status;
+}
+
+RUBY_INLINE union topaz_tqe_cpuif_status
+topaz_tqe_cpuif_get_status(void)
+{
+	return __topaz_tqe_cpuif_get_status(TOPAZ_TQE_LOCAL_CPU);
+}
+
+RUBY_INLINE int
+__topaz_tqe_cpuif_tx_nready(enum topaz_tqe_port port)
+{
+	int num = topaz_tqe_cpuif_port_to_num(port);
+	return (qtn_mproc_sync_mem_read(TOPAZ_TQE_CPUIF_TXSTART(num)) &
+		TOPAZ_TQE_CPUIF_TX_START_NREADY);
+}
+
+RUBY_INLINE int
+topaz_tqe_cpuif_tx_nready(void)
+{
+	return __topaz_tqe_cpuif_tx_nready(TOPAZ_TQE_LOCAL_CPU);
+}
+
+RUBY_INLINE int
+__topaz_tqe_cpuif_tx_success(enum topaz_tqe_port port)
+{
+	int num = topaz_tqe_cpuif_port_to_num(port);
+	uint32_t tx_start = qtn_mproc_sync_mem_read(TOPAZ_TQE_CPUIF_TXSTART(num));
+
+	if ((tx_start & TOPAZ_TQE_CPUIF_TX_START_NREADY) ||
+			!(tx_start & TOPAZ_TQE_CPUIF_TX_START_DELIVERED)) {
+		return -1;
+	} else if (tx_start & TOPAZ_TQE_CPUIF_TX_START_SUCCESS) {
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+RUBY_INLINE int
+topaz_tqe_cpuif_tx_success(void)
+{
+	return __topaz_tqe_cpuif_tx_success(TOPAZ_TQE_LOCAL_CPU);
+}
+
+RUBY_INLINE int
+__topaz_tqe_cpuif_ppctl_write(enum topaz_tqe_port port, const union topaz_tqe_cpuif_ppctl *ctl)
+{
+	int num = topaz_tqe_cpuif_port_to_num(port);
+	qtn_mproc_sync_mem_write(TOPAZ_TQE_CPUIF_PPCTL0(num), ctl->raw.ppctl0);
+	qtn_mproc_sync_mem_write(TOPAZ_TQE_CPUIF_PPCTL1(num), ctl->raw.ppctl1);
+	qtn_mproc_sync_mem_write(TOPAZ_TQE_CPUIF_PPCTL2(num), ctl->raw.ppctl2);
+	qtn_mproc_sync_mem_write(TOPAZ_TQE_CPUIF_PPCTL3(num), ctl->raw.ppctl3);
+	qtn_mproc_sync_mem_write(TOPAZ_TQE_CPUIF_PPCTL4(num), ctl->raw.ppctl4);
+	qtn_mproc_sync_mem_write_wmb(TOPAZ_TQE_CPUIF_PPCTL5(num), ctl->raw.ppctl5);
+	return num;
+}
+
+RUBY_INLINE int
+topaz_tqe_cpuif_ppctl_write(const union topaz_tqe_cpuif_ppctl *ctl)
+{
+	return __topaz_tqe_cpuif_ppctl_write(TOPAZ_TQE_LOCAL_CPU, ctl);
+}
+
+RUBY_INLINE void
+__topaz_tqe_cpuif_tx_start(enum topaz_tqe_port port, const union topaz_tqe_cpuif_ppctl *ctl)
+{
+	int num = __topaz_tqe_cpuif_ppctl_write(port, ctl);
+	qtn_mproc_sync_mem_write(TOPAZ_TQE_CPUIF_TXSTART(num), TOPAZ_TQE_CPUIF_TX_START_NREADY);
+}
+
+RUBY_INLINE void
+topaz_tqe_cpuif_tx_start(const union topaz_tqe_cpuif_ppctl *ctl)
+{
+	__topaz_tqe_cpuif_tx_start(TOPAZ_TQE_LOCAL_CPU, ctl);
+}
+
+#define TQE_SEMA_GET_MAX			0xFFFF
+
+#define QTN_WAIT_TQE_CPUIF_LOOP_MASK		0xFFFF
+RUBY_INLINE void topaz_tqe_wait(void)
+{
+	uint32_t loop = 0;
+
+	while (topaz_tqe_cpuif_tx_nready()) {
+		loop++;
+		if ((loop & ~QTN_WAIT_TQE_CPUIF_LOOP_MASK) &&
+				!((loop) & QTN_WAIT_TQE_CPUIF_LOOP_MASK)) {
+#ifdef __KERNEL__
+			printk("stuck in topaz_tqe_wait()\n");
+#endif
+#ifdef MUC_BUILD
+			uc_printk("stuck in topaz_tqe_wait()\n");
+#endif
+		}
+	}
+}
+
+RUBY_INLINE void topaz_tqe_emac_reflect_to(const uint8_t out_port, const int bonded)
+{
+	if (out_port < TOPAZ_TQE_NUM_PORTS) {
+		uint32_t done_dly = qtn_mproc_sync_mem_read(TOPAZ_TQE_MISC);
+
+		done_dly &= ~TOPAZ_TQE_MISC_RFLCT_OUT_PORT;
+		done_dly |= SM(out_port, TOPAZ_TQE_MISC_RFLCT_OUT_PORT) |
+						TOPAZ_TQE_MISC_RFLCT_OUT_PORT_ENABLE;
+		if (bonded) {
+			done_dly |= TOPAZ_TQE_MISC_RFLCT_2_OUT_PORT_ENABLE;
+		}
+		qtn_mproc_sync_mem_write(TOPAZ_TQE_MISC, done_dly);
+#if defined (__KERNEL__) && defined (DEBUG)
+		printk("TOPAZ_TQE_MISC: 0x%x\n", done_dly);
+#endif
+	}
+}
+#endif /* #ifndef __TOPAZ_TQE_CPUIF_PLATFORM_H */
+
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_vlan_cpuif.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_vlan_cpuif.h
new file mode 100644
index 0000000..e0c8bf4
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/topaz_vlan_cpuif.h
@@ -0,0 +1,88 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TOPAZ_VLAN_CPUIF_PLATFORM_H
+#define __TOPAZ_VLAN_CPUIF_PLATFORM_H
+
+#include <common/topaz_platform.h>
+#include <qtn/mproc_sync_base.h>
+
+union topaz_vlan_entry {
+	uint32_t raw;
+	struct {
+		uint32_t	out_node	:7,
+				out_port	:3,
+				valid		:1,
+				__unused	:22;
+	} data;
+};
+
+#define TOPAZ_VLAN_ENTRY_INIT	{ 0 }
+
+RUBY_INLINE void topaz_vlan_set_entry(uint16_t vlan_id, union topaz_vlan_entry e)
+{
+	qtn_mproc_sync_mem_write(TOPAZ_VLAN_ENTRY_ADDR(vlan_id), e.raw);
+}
+
+RUBY_INLINE void topaz_vlan_clear_entry(uint16_t vlan_id)
+{
+	qtn_mproc_sync_mem_write(TOPAZ_VLAN_ENTRY_ADDR(vlan_id), 0x0);
+}
+
+RUBY_INLINE void topaz_vlan_clear_all_entries(void)
+{
+	int vlan_id;
+	for (vlan_id = 0; vlan_id < TOPAZ_VLAN_ENTRIES; ++vlan_id) {
+		topaz_vlan_clear_entry(vlan_id);
+	}
+}
+
+RUBY_INLINE void topaz_vlan_set(uint16_t vlan_id, uint8_t out_port, uint8_t out_node)
+{
+	union topaz_vlan_entry e = TOPAZ_VLAN_ENTRY_INIT;
+	e.data.out_node = out_node;
+	e.data.out_port = out_port;
+	e.data.valid = 1;
+	topaz_vlan_set_entry(vlan_id, e);
+}
+
+RUBY_INLINE union topaz_vlan_entry topaz_vlan_get_entry(uint16_t vlan_id)
+{
+	union topaz_vlan_entry e;
+	e.raw = qtn_mproc_sync_mem_read(TOPAZ_VLAN_ENTRY_ADDR(vlan_id));
+	return e;
+}
+
+#ifndef TOPAZ_TEST_ASSERT_EQUAL
+# define TOPAZ_TEST_ASSERT_EQUAL(a, b)	if ((a) != (b)) { return -1; }
+#endif
+RUBY_INLINE int topaz_vlan_entry_bitfield_test(const union topaz_vlan_entry *e)
+{
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw, TOPAZ_VLAN_OUT_NODE), e->data.out_node);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw, TOPAZ_VLAN_OUT_PORT), e->data.out_port);
+	TOPAZ_TEST_ASSERT_EQUAL(MS(e->raw, TOPAZ_VLAN_VALID), e->data.valid);
+
+	return 0;
+}
+
+#endif	/* __TOPAZ_VLAN_CPUIF_PLATFORM_H */
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/txbf_common.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/txbf_common.h
new file mode 100644
index 0000000..57dc1ff
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/txbf_common.h
@@ -0,0 +1,702 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2008 - 2009 Quantenna Communications Inc            **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Date        : 01/28/09                                                   **
+**  File        : txbf_api.h                                                 **
+**  Description :                                                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef _TXBF_API_H_
+#define _TXBF_API_H_
+
+/*
+ * Enable installation of a fixed Q matrix in place of the derived ones.
+ * The matrix should be put in the file drivers/qdrv/fixedqmat.h
+ * The format of the data should be word values comma delimited.
+ */
+#define TXBF_CANNED_QMAT
+#undef TXBF_CANNED_QMAT
+
+/* Until we get 2 tx antennas working on a 2x4 station */
+#define HAL_2x4STA_USE_4_TX_ANTENNAS
+
+/*
+ * Comment this line to disable locking.
+ * Or set to 1 to enable manual locking.
+ * Or set to 0 to enable hw-centric automatic locking.
+ */
+#define QTN_TXBF_FFT_LOCK_MANUAL	(1)
+
+/* VHTTXBFTBD - 11ac BF enabled by default */
+#define TXBF_ENABLE_VHT_BF
+
+/* Use sw generated VHT BF path */
+/* #define TXBF_VHT_SW_FEEDBACK */
+/* #define TXBF_VHT_SW_UNCOMPRESSED */	/* for debugging */
+
+/* Expansion Matrix Modes */
+#define TXBF_MODE_NO_MATRIX		0
+#define TXBF_MODE_DEFAULT_MATRIX	1
+#define TXBF_MODE_BBF			2
+#define TXBF_MODE_STD_BF		3
+
+/* Enable (1) both hw and sw generated VHT BF feedback, used for debugging */
+#define TXBF_VHT_HW_AND_SW_FEEDBACK	0
+
+enum txbf_buff_state
+{
+	/* Tx BF buffer for the frame is free */
+	TXBF_BUFF_FREE		= 0,
+	/* The frame is stored in Tx BF buffer for processing and can not be released*/
+	TXBF_BUFF_IN_USE	= 1,
+	/* Not used */
+	TXBF_DMA_FROM_BB	= 2,
+	/* NDP only. The frame is being processed by DSP */
+	TXBF_DSP_PROC		= 3,
+	/* DSP completes frame processing */
+	TXBF_DSP_DONE		= 4,
+	/* For action frame only. Action frame is stored in action frame cache */
+	TXBF_DSP_STORED		= 5
+};
+
+#define TXBF_BF_VER1	1	/* Envy */
+#define TXBF_BF_VER2	2	/* 2 stream Ruby */
+#define TXBF_BF_VER3	3	/* 4 stream non-tone grouping Ruby */
+#define TXBF_BF_VER4	4	/* 4 stream tone grouping Ruby and later */
+/*
+ * Version 4 means the action frames generated are now standards compliant
+ * and the BF parameters are derived from the various fields and association
+ * exchange, rather than from using a new version for each combination
+ */
+
+/*
+ * These structures are shared between Linux, MuC and DSP.
+ */
+struct txbf_ndp_info
+{
+	char bw_mode;
+	char rxgain;
+	char MN;
+	char hwnoise;
+	char max_gain;
+	char macaddr[6];
+	signed char reg_scale_fac;
+	unsigned char Nsts;
+	unsigned char Ness;
+};
+
+#define TXBF_MUC_DSP_SHMEM_START (0x80060000)
+
+#define QTN_MU_QMAT_MAX_SLOTS		3
+
+/* Beamforming message types */
+#define QTN_TXBF_NDP_RX_MSG		1
+#define QTN_TXBF_ACT_FRM_TX_MSG		2
+#define QTN_TXBF_ACT_FRM_RX_MSG		3
+#define QTN_TXBF_ACT_FRM_FREE_MSG	4
+#define QTN_TXBF_DEL_MU_NODE_MSG	5
+#define QTN_TXBF_MU_GRP_UPD_DONE_MSG	6
+#define QTN_TXBF_TRIG_MU_GRP_SEL_MSG	7
+#define QTN_TXBF_RATE_TRAIN_MSG		8
+#define QTN_TXBF_RATE_TRAIN_HASH_MSG	9
+#define QTN_TXBF_NDP_DISCARD_MSG	10
+#define QTN_TXBF_SET_OPTI_NODE_MSG	11
+
+#define QTN_TXBF_ACT_FRM_XTRA_HDR_LEN	10
+
+#define QTN_TXBF_MODE_HT		0
+#define QTN_TXBF_MODE_VHT		1
+
+#define QTN_TXBF_NO_EXPMAT		0xFFFF
+
+#define MU_NDPA_TOKEN_MASK		0x1F
+
+/* Number of 10ms timeslots used on the DSP to process feedback */
+#define QTN_TXBF_SU_DSP_TIMESLOTS	1
+#define QTN_TXBF_MU_DSP_TIMESLOTS	2
+
+/* We leave backward compatibility here. As SU token value was randomly chosen as 0x33
+we now say when bit 5 is set it indicates SU sounding. */
+#define MU_NDPA_SU_MASK			0x20
+#define MU_NDPA_GRP_SND_MASK		0x10
+#define IS_MU_GRP_SND(token)		((token) & MU_NDPA_GRP_SND_MASK)
+
+/* TODO: Needs reworking. Some fields (at least mu_grp_id) are used only by
+distinct message type */
+struct txbf_pkts
+{
+	unsigned msg_type;
+	unsigned state;
+	unsigned bf_ver;
+	unsigned bf_mode;
+	unsigned act_frame_phys;
+	unsigned buffer_start;
+	unsigned act_frame_len;
+	unsigned skb;
+	unsigned qmat_offset;
+	unsigned inst_1ss_def_mat;
+	unsigned success;
+	unsigned ndp_phys;
+	unsigned nstream;
+	unsigned bf_mimo_nc;
+	unsigned bf_mimo_nr;
+	unsigned bf_tone_grp;
+	unsigned bf_coeff_size;
+	unsigned bf_nss_snr[4];
+	unsigned bf_compressed;
+	unsigned bf_codebook;
+	unsigned pkt_indx;
+	unsigned short aid;
+	unsigned node_bw;
+	unsigned bw_pri_40m_lower;
+	unsigned bw_pri_20m_lower;
+	unsigned txbf_skip_dftmat_flag;
+	unsigned txbf_2x4sta_flag;
+	unsigned txbf_qmat_install_wait;
+	struct txbf_ndp_info ndp_info;
+	char act_frame_sa[6];
+	char act_frame_bssid[6];
+	char slot;
+	uint8_t mu_grp_id[QTN_MU_QMAT_MAX_SLOTS];
+	uint8_t vapid;
+	unsigned counter;
+};
+
+#define IEEE80211_ADDR_LEN	6
+struct qtn_rate_train_info
+{
+	unsigned msg_type;
+	unsigned state;
+	char src[IEEE80211_ADDR_LEN];
+	char dst[IEEE80211_ADDR_LEN];
+	unsigned ver;
+	unsigned nonce;
+	unsigned hash;
+	unsigned stamp;
+	unsigned ni;
+	void *next; /* Chaining for retry on mbox busy */
+	int index;
+	char devid;
+	char padding[15]; /* Cache aligned */
+};
+
+struct txbf_ctrl {
+	unsigned bf_tone_grp;
+	unsigned svd_mode;
+	unsigned bfoff_thresh;
+};
+
+typedef struct
+{
+	signed int pad1:4;
+	signed int im:12;
+	signed int pad2:4;
+	signed int re:12;
+} ndp_format;
+
+typedef union
+{
+	ndp_format ndp;
+	int wrd;
+} bbmem_ndp;
+
+
+typedef struct
+{
+	/* Compq format BB0 20 bits */
+	signed int bb0_re_s0:8;
+	signed int bb0_im_s0:8;
+	signed int bb0_re_s1_lo:4;
+	/* Interleaved BB1 12 bits */
+	signed int bb1_re_s1_hi:4;
+	signed int bb1_im_s1:8;
+	/* Compq format BB0 12 bits */
+	signed int bb0_re_s1_hi:4;
+	signed int bb0_im_s1:8;
+	/* Interleaved BB1 20 bits */
+	signed int bb1_re_s0:8;
+	signed int bb1_im_s0:8;
+	signed int bb1_re_s1_lo:4;
+}st_format;
+
+typedef union
+{
+	st_format st;
+	unsigned wrd[2];
+} bbmem_st;
+
+/*
+ * Maximum streams supported for different matrix types
+ */
+#define QTN_MAX_STREAMS			4
+#define QTN_MAX_40M_VHT_STREAMS		2
+#define QTN_MAX_20M_VHT_STREAMS		2
+#define QTN_MAX_IOT_QMAT_STREAMS	3
+
+/*
+ * Default decimation used for matrices in Q memory. Some matrices
+ * may use more decimation if space is a problem
+ */
+#define QTN_TXBF_DEFAULT_QMAT_NG	1
+#define QTN_TXBF_MAX_QMAT_NG		2
+
+#define NDP_TO_STVEC_SIZE_RATIO 4
+
+#define NDP_START_DELAY		2		/* in seconds  */
+
+#define STVEC_SIZE_BYTES_1STRM_20M 0x100	/* Assumes NG 1 matrices */
+#define STVEC_MAX_NODES		10
+
+/*
+ * Matrix sizes for NG 1 matrices
+ */
+#define STVEC_SIZE_BYTES_1STRM_40M (STVEC_SIZE_BYTES_1STRM_20M << 1)
+#define STVEC_SIZE_BYTES_1STRM_80M (STVEC_SIZE_BYTES_1STRM_40M << 1)
+#define STVEC_SIZE_BYTES_2STRM_20M (STVEC_SIZE_BYTES_1STRM_20M << 1)
+#define STVEC_SIZE_BYTES_2STRM_40M (STVEC_SIZE_BYTES_2STRM_20M << 1)
+#define STVEC_SIZE_BYTES_2STRM_80M (STVEC_SIZE_BYTES_2STRM_40M << 1)
+#define STVEC_SIZE_BYTES_3STRM_20M (STVEC_SIZE_BYTES_2STRM_20M + STVEC_SIZE_BYTES_1STRM_20M)
+#define STVEC_SIZE_BYTES_3STRM_40M (STVEC_SIZE_BYTES_3STRM_20M << 1)
+#define STVEC_SIZE_BYTES_3STRM_80M (STVEC_SIZE_BYTES_3STRM_40M << 1)
+#define STVEC_SIZE_BYTES_4STRM_20M (STVEC_SIZE_BYTES_2STRM_20M << 1)
+#define STVEC_SIZE_BYTES_4STRM_40M (STVEC_SIZE_BYTES_2STRM_40M << 1)
+#define STVEC_SIZE_BYTES_4STRM_80M (STVEC_SIZE_BYTES_4STRM_40M << 1)
+#define STVEC_SIZE_BYTES_1STRM_MAX STVEC_SIZE_BYTES_1STRM_80M
+#define STVEC_SIZE_BYTES_2STRM_MAX STVEC_SIZE_BYTES_2STRM_80M
+#define STVEC_SIZE_BYTES_3STRM_MAX STVEC_SIZE_BYTES_3STRM_80M
+#define STVEC_SIZE_BYTES_4STRM_MAX STVEC_SIZE_BYTES_4STRM_80M
+
+#ifndef QTN_BW_20M
+# define QTN_BW_20M 0
+# define QTN_BW_40M 1
+# define QTN_BW_80M 2
+#endif
+#define QTN_BW_SW_MAX QTN_BW_80M
+
+#define NDP_SIZE_BYTES_BASE	1024
+#define NDP_SIZE_BYTES_20M	(NDP_SIZE_BYTES_BASE << QTN_BW_20M)
+#define NDP_SIZE_BYTES_40M	(NDP_SIZE_BYTES_BASE << QTN_BW_40M)
+#define NDP_SIZE_BYTES_80M	(NDP_SIZE_BYTES_BASE << QTN_BW_80M)
+#define NDP_SIZE_BYTES_MAX	(NDP_SIZE_BYTES_BASE << QTN_BW_SW_MAX)
+
+/*
+ * Q matrix defines for 80 MHz nodes using NG 1
+ */
+#define QTN_TXBF_QMAT80_1STRM_OFFSET(offset)		(offset)
+#define QTN_TXBF_QMAT80_1STRM_MAT_TOTAL			(STVEC_SIZE_BYTES_1STRM_80M + \
+							STVEC_SIZE_BYTES_1STRM_40M + \
+							STVEC_SIZE_BYTES_1STRM_20M)
+#define QTN_TXBF_QMAT80_2STRM_MAT_TOTAL			(STVEC_SIZE_BYTES_2STRM_80M + \
+							STVEC_SIZE_BYTES_2STRM_40M + \
+							STVEC_SIZE_BYTES_2STRM_20M)
+#define QTN_TXBF_QMAT80_3STRM_MAT_TOTAL			(STVEC_SIZE_BYTES_3STRM_80M)
+#define QTN_TXBF_QMAT80_4STRM_MAT_TOTAL			(STVEC_SIZE_BYTES_4STRM_80M)
+#define QTN_TXBF_QMAT80_1STRM_40M_OFFSET(offset)	(offset + \
+							STVEC_SIZE_BYTES_1STRM_80M)
+#define QTN_TXBF_QMAT80_1STRM_20M_OFFSET(offset)	(offset + \
+							STVEC_SIZE_BYTES_1STRM_80M + \
+							STVEC_SIZE_BYTES_1STRM_40M)
+#define QTN_TXBF_QMAT80_2STRM_OFFSET(offset)		(offset + \
+							QTN_TXBF_QMAT80_1STRM_MAT_TOTAL)
+#define QTN_TXBF_QMAT80_2STRM_40M_OFFSET(offset)	(offset + \
+							QTN_TXBF_QMAT80_1STRM_MAT_TOTAL + \
+							STVEC_SIZE_BYTES_2STRM_80M)
+#define QTN_TXBF_QMAT80_2STRM_20M_OFFSET(offset)	(offset + \
+							QTN_TXBF_QMAT80_1STRM_MAT_TOTAL + \
+							STVEC_SIZE_BYTES_2STRM_80M + \
+							STVEC_SIZE_BYTES_2STRM_40M)
+#define QTN_TXBF_QMAT80_3STRM_OFFSET(offset)		(offset + \
+							QTN_TXBF_QMAT80_1STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT80_2STRM_MAT_TOTAL)
+#define QTN_TXBF_QMAT80_4STRM_OFFSET(offset)		(offset + \
+							QTN_TXBF_QMAT80_1STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT80_2STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT80_3STRM_MAT_TOTAL)
+#define QTN_TXBF_QMAT80_TOTAL_SIZE			(QTN_TXBF_QMAT80_1STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT80_2STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT80_3STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT80_4STRM_MAT_TOTAL)
+
+/*
+ * Q matrix defines for 80 MHz nodes using NG 2
+ */
+#define QTN_TXBF_QMAT80_NG2_1STRM_OFFSET(offset)	(offset)
+#define QTN_TXBF_QMAT80_NG2_1STRM_MAT_TOTAL		((STVEC_SIZE_BYTES_1STRM_80M / 2) + \
+							( STVEC_SIZE_BYTES_1STRM_40M / 2) + \
+							(STVEC_SIZE_BYTES_1STRM_20M / 2))
+#define QTN_TXBF_QMAT80_NG2_2STRM_MAT_TOTAL		((STVEC_SIZE_BYTES_2STRM_80M / 2) + \
+							(STVEC_SIZE_BYTES_2STRM_40M / 2) + \
+							(STVEC_SIZE_BYTES_2STRM_20M / 2))
+#define QTN_TXBF_QMAT80_NG2_3STRM_MAT_TOTAL		(STVEC_SIZE_BYTES_3STRM_80M / 2)
+#define QTN_TXBF_QMAT80_NG2_4STRM_MAT_TOTAL		(STVEC_SIZE_BYTES_4STRM_80M / 2)
+#define QTN_TXBF_QMAT80_NG2_1STRM_40M_OFFSET(offset)	(offset + \
+							(STVEC_SIZE_BYTES_1STRM_80M / 2))
+#define QTN_TXBF_QMAT80_NG2_1STRM_20M_OFFSET(offset)	(offset + \
+							(STVEC_SIZE_BYTES_1STRM_80M / 2) + \
+							(STVEC_SIZE_BYTES_1STRM_40M / 2))
+#define QTN_TXBF_QMAT80_NG2_2STRM_OFFSET(offset)	(offset + \
+							QTN_TXBF_QMAT80_NG2_1STRM_MAT_TOTAL)
+#define QTN_TXBF_QMAT80_NG2_2STRM_40M_OFFSET(offset)	(offset + \
+							QTN_TXBF_QMAT80_NG2_1STRM_MAT_TOTAL + \
+							(STVEC_SIZE_BYTES_2STRM_80M / 2))
+#define QTN_TXBF_QMAT80_NG2_2STRM_20M_OFFSET(offset)	(offset + \
+							QTN_TXBF_QMAT80_NG2_1STRM_MAT_TOTAL + \
+							(STVEC_SIZE_BYTES_2STRM_80M / 2) + \
+							(STVEC_SIZE_BYTES_2STRM_40M / 2))
+#define QTN_TXBF_QMAT80_NG2_3STRM_OFFSET(offset)	(offset + \
+							QTN_TXBF_QMAT80_NG2_1STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT80_NG2_2STRM_MAT_TOTAL)
+#define QTN_TXBF_QMAT80_NG2_4STRM_OFFSET(offset)	(offset + \
+							QTN_TXBF_QMAT80_NG2_1STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT80_NG2_2STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT80_NG2_3STRM_MAT_TOTAL)
+#define QTN_TXBF_QMAT80_NG2_TOTAL_SIZE			(QTN_TXBF_QMAT80_1STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT80_NG2_2STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT80_NG2_3STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT80_NG2_4STRM_MAT_TOTAL)
+/*
+ * Q matrix defines for 40 MHz nodes using NG 1
+ */
+#define QTN_TXBF_QMAT40_1STRM_MAT_TOTAL			(STVEC_SIZE_BYTES_1STRM_40M + \
+							STVEC_SIZE_BYTES_1STRM_20M)
+#define QTN_TXBF_QMAT40_2STRM_MAT_TOTAL			(STVEC_SIZE_BYTES_2STRM_40M + \
+							STVEC_SIZE_BYTES_2STRM_20M)
+#define QTN_TXBF_QMAT40_3STRM_MAT_TOTAL			(STVEC_SIZE_BYTES_3STRM_40M)
+#define QTN_TXBF_QMAT40_4STRM_MAT_TOTAL			(STVEC_SIZE_BYTES_4STRM_40M)
+#define QTN_TXBF_QMAT40_1STRM_OFFSET(offset)		(offset)
+#define QTN_TXBF_QMAT40_1STRM_40M_OFFSET(offset)	(offset)
+#define QTN_TXBF_QMAT40_1STRM_20M_OFFSET(offset)	(offset + \
+							STVEC_SIZE_BYTES_1STRM_40M)
+#define QTN_TXBF_QMAT40_2STRM_OFFSET(offset)		(offset + \
+							QTN_TXBF_QMAT40_1STRM_MAT_TOTAL)
+#define QTN_TXBF_QMAT40_2STRM_40M_OFFSET(offset)	(offset + \
+							QTN_TXBF_QMAT40_1STRM_MAT_TOTAL)
+#define QTN_TXBF_QMAT40_2STRM_20M_OFFSET(offset)	(offset + \
+							QTN_TXBF_QMAT40_1STRM_MAT_TOTAL + \
+							STVEC_SIZE_BYTES_2STRM_40M)
+#define QTN_TXBF_QMAT40_3STRM_OFFSET(offset)		(offset + \
+							QTN_TXBF_QMAT40_1STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT40_2STRM_MAT_TOTAL)
+#define QTN_TXBF_QMAT40_4STRM_OFFSET(offset)		(offset + \
+							QTN_TXBF_QMAT40_1STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT40_2STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT40_3STRM_MAT_TOTAL)
+#define QTN_TXBF_QMAT40_TOTAL_SIZE			(QTN_TXBF_QMAT40_1STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT40_2STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT40_3STRM_MAT_TOTAL + \
+							QTN_TXBF_QMAT40_4STRM_MAT_TOTAL)
+
+/*
+ * Defines for dividing Q memory into slots for nodes using standard BF
+ */
+#define QTN_TXBF_QMAT_SLOT_SIZE			MAX(QTN_TXBF_QMAT40_TOTAL_SIZE, \
+							QTN_TXBF_QMAT80_TOTAL_SIZE / 2)
+
+#define QTN_TXBF_QMAT_SLOTS_USED(qn)	(MAX(1, ((qn)->qn_node.ni_bw_cap >>		\
+					MAX(((qn)->qn_expmat.ng - QTN_TXBF_DEFAULT_QMAT_NG), 0))))
+
+#define QTN_TXBF_QMAT_SLOT(idx)		((idx) * QTN_TXBF_QMAT_SLOT_SIZE)
+
+#define QTN_TXBF_QMAT_OFFSET_SHIFT		6
+
+/*
+ * Defines for fixed matrix (BBF and default) sizes
+ */
+#define QTN_TXBF_QMAT_MIN_OFFSET		(1 << QTN_TXBF_QMAT_OFFSET_SHIFT)
+
+#define QTN_TXBF_1SS_WORDS_PER_TONE		2
+#define QTN_TXBF_2SS_WORDS_PER_TONE		4
+#define QTN_TXBF_3SS_WORDS_PER_TONE		6
+#define QTN_TXBF_4SS_WORDS_PER_TONE		8
+#define QTN_TXBF_IOT_QMAT_1SS_WORDS		(QTN_TXBF_IOT_QMAT_TONES * \
+							QTN_TXBF_1SS_WORDS_PER_TONE)
+#define QTN_TXBF_IOT_QMAT_2SS_WORDS		(QTN_TXBF_IOT_QMAT_TONES * \
+							QTN_TXBF_2SS_WORDS_PER_TONE)
+#define QTN_TXBF_IOT_QMAT_3SS_WORDS		(QTN_TXBF_IOT_QMAT_TONES * \
+							QTN_TXBF_3SS_WORDS_PER_TONE)
+#define QTN_TXBF_IOT_QMAT_4SS_WORDS		(QTN_TXBF_IOT_QMAT_TONES * \
+							QTN_TXBF_4SS_WORDS_PER_TONE)
+#define QTN_TXBF_IOT_QMAT_1SS_MEM		MAX((QTN_TXBF_IOT_QMAT_1SS_WORDS * 4), \
+                                                        QTN_TXBF_QMAT_MIN_OFFSET)
+#define QTN_TXBF_IOT_QMAT_2SS_MEM		MAX((QTN_TXBF_IOT_QMAT_2SS_WORDS * 4), \
+                                                        QTN_TXBF_QMAT_MIN_OFFSET)
+#define QTN_TXBF_IOT_QMAT_3SS_MEM		MAX((QTN_TXBF_IOT_QMAT_3SS_WORDS * 4), \
+                                                        QTN_TXBF_QMAT_MIN_OFFSET)
+#define QTN_TXBF_IOT_QMAT_4SS_MEM		MAX((QTN_TXBF_IOT_QMAT_4SS_WORDS * 4), \
+                                                        QTN_TXBF_QMAT_MIN_OFFSET)
+
+#define QTN_TXBF_QMAT_FIXED_MAT_START		(QTN_TXBF_QMAT_SLOT_SIZE * STVEC_MAX_NODES)
+
+/*
+ * Fixed 2x4 node matrix definitions
+ */
+/*
+ * 80MHz 2x4 matrices need to start in normal BF area to fit,
+ * this is OK, as they are used on the station only at present
+ */
+#define QTN_TXBF_QMAT_2x4STA_1_STRM_OFFSET	(QTN_TXBF_QMAT_SLOT_SIZE * (STVEC_MAX_NODES - 1))
+#define QTN_TXBF_QMAT_2x4STA_2_STRM_OFFSET	(QTN_TXBF_QMAT_2x4STA_1_STRM_OFFSET + \
+						STVEC_SIZE_BYTES_1STRM_80M)
+
+#define QTN_TXBF_2x4STA_1SS_TONE_DATA	{0x00400040, 0x00000000}
+#define QTN_TXBF_2x4STA_2SS_TONE_DATA	{0x00000040, 0x00400000, 0x00000000, 0x00000000}
+
+/*
+ * Fixed default matrix offset definitions
+ */
+#define QTN_TXBF_QMAT_STD_START			QTN_TXBF_QMAT_FIXED_MAT_START
+#define QTN_TXBF_QMAT_STD_1_STRM_OFFSET		QTN_TXBF_QMAT_STD_START
+#define QTN_TXBF_QMAT_STD_2_STRM_OFFSET		(QTN_TXBF_QMAT_STD_1_STRM_OFFSET + \
+						QTN_TXBF_IOT_QMAT_1SS_MEM)
+#define QTN_TXBF_QMAT_STD_3_STRM_OFFSET		(QTN_TXBF_QMAT_STD_2_STRM_OFFSET + \
+						QTN_TXBF_IOT_QMAT_2SS_MEM)
+#define QTN_TXBF_QMAT_STD_4_STRM_OFFSET		(QTN_TXBF_QMAT_STD_3_STRM_OFFSET + \
+						QTN_TXBF_IOT_QMAT_3SS_MEM)
+
+#define QTN_TXBF_IOT_QMAT_START			(QTN_TXBF_QMAT_STD_4_STRM_OFFSET + \
+						QTN_TXBF_IOT_QMAT_4SS_MEM)
+#define QTN_TXBF_IOT_QMAT_TONES			2	/* number of tones for fixed matrices */
+
+/*
+ * BBF slot and matrix offset definitions
+ *
+ * For each slot there is space for the probed matrix, plus the 1, 2 and 3 streams
+ * matrices for the index being used by that node
+ */
+#define QTN_TXBF_IOT_QMAT_MAX_SLOTS		9
+#define QTN_TXBF_IOT_QMAT_NG			7
+#define QTN_TXBF_IOT_QMAT_PER_SS		18
+#define QTN_TXBF_IOT_QMAT_PROBE_MEM		QTN_TXBF_IOT_QMAT_3SS_MEM
+#define QTN_TXBF_IOT_QMAT_SLOT_SIZE		(QTN_TXBF_IOT_QMAT_1SS_MEM + \
+						QTN_TXBF_IOT_QMAT_2SS_MEM + \
+						QTN_TXBF_IOT_QMAT_3SS_MEM + \
+						QTN_TXBF_IOT_QMAT_PROBE_MEM)
+#define QTN_TXBF_IOT_QMAT_BASE_OFFSET(slot)	(QTN_TXBF_IOT_QMAT_START + \
+						(QTN_TXBF_IOT_QMAT_SLOT_SIZE * (slot)))
+#define QTN_TXBF_IOT_QMAT_1SS_OFFSET(slot)	(QTN_TXBF_IOT_QMAT_BASE_OFFSET(slot))
+#define QTN_TXBF_IOT_QMAT_2SS_OFFSET(slot)	(QTN_TXBF_IOT_QMAT_BASE_OFFSET(slot) + \
+						QTN_TXBF_IOT_QMAT_1SS_MEM)
+#define QTN_TXBF_IOT_QMAT_3SS_OFFSET(slot)	(QTN_TXBF_IOT_QMAT_BASE_OFFSET(slot) + \
+						QTN_TXBF_IOT_QMAT_1SS_MEM + \
+						QTN_TXBF_IOT_QMAT_2SS_MEM)
+#define QTN_TXBF_IOT_QMAT_PROBE_OFFSET(slot)	(QTN_TXBF_IOT_QMAT_BASE_OFFSET(slot) + \
+						QTN_TXBF_IOT_QMAT_1SS_MEM + \
+						QTN_TXBF_IOT_QMAT_2SS_MEM + \
+						QTN_TXBF_IOT_QMAT_3SS_MEM)
+
+#define FFT_TONE_20M_LO 1
+#define FFT_TONE_20M_HI 28
+
+#define FFT_TONE_40M_LO 2
+#define FFT_TONE_40M_HI 58
+
+#define QTN_TXBF_TONES_PER_CHAN 64
+#define QTN_TXBF_MAX_TONES	128
+#define QTN_TXBF_MIN_TONES	1
+#define	QTN_TXBF_MODE	4 /* 80MHz Mode */
+
+enum {
+	SVD_MODE_STREAM_MIXING =0,
+	SVD_MODE_TWO_STREAM,
+	SVD_MODE_PER_ANT_SCALE,
+	SVD_MODE_CHANNEL_INV,
+	SVD_MODE_BYPASS,
+};
+
+#define SVD_MODE_GET(X,S) 	(( X >> S) & 1)
+#define SVD_MODE_SET(S) 	(1 << S)
+
+#if !defined(QTN_TXBF_FFT_LOCK_MANUAL) || (QTN_TXBF_FFT_LOCK_MANUAL == 1)
+	/* Locking is disabled or manual locking */
+	#define QT3_BB_MIMO_BF_RX_INIT_VAL	(0x0A)
+	#define QT4_BB_MIMO_BF_RX_INIT_VAL	(QT3_BB_MIMO_BF_RX_INIT_VAL)
+#else
+	/* Automatic, hw-centric locking. */
+	#define QT3_BB_MIMO_BF_RX_INIT_VAL	(0x0B)
+	#define QT4_BB_MIMO_BF_RX_INIT_VAL	(QT3_BB_MIMO_BF_RX_INIT_VAL)
+#endif
+
+/* should be 64 bytes aligned (expmat ptr in TxVector drops lower 5 bits)*/
+struct phys_qmat_layout {
+	uint64_t length;
+	int8_t body[0];
+} __packed;
+
+// TODO: describe bodies as 2 dimensional arrays
+struct phys_qmat_1x1 {
+	uint64_t length;
+	int8_t body[STVEC_SIZE_BYTES_2STRM_80M];
+} __packed;
+
+struct phys_qmat_2x1 {
+	uint64_t length;
+	int8_t body[STVEC_SIZE_BYTES_3STRM_80M];
+} __packed;
+
+struct phys_qmat_3x1 {
+	uint64_t length;
+	int8_t body[STVEC_SIZE_BYTES_4STRM_80M];
+} __packed;
+
+struct phys_qmat_1x2 {
+	uint64_t length;
+	int8_t body[STVEC_SIZE_BYTES_3STRM_80M];
+} __packed;
+
+struct phys_qmat_1x3 {
+	uint64_t length;
+	int8_t body[STVEC_SIZE_BYTES_4STRM_80M];
+} __packed;
+
+struct phys_qmat_2x2 {
+	uint64_t length;
+	int8_t body[STVEC_SIZE_BYTES_4STRM_80M];
+} __packed;
+
+struct phys_qmat_dummy {
+	uint64_t length;
+	int8_t body[0x10];
+} __packed;
+
+/* structure to hold MU group qmatrix info */
+struct qtn_sram_qmat {
+	uint8_t valid;   /* set to 1 when it is occupied, 0 indicates it is free */
+	uint8_t grp_id;  /* MU group id */
+	uint16_t tk;	/* Token */
+	uint16_t u0_aid;  /* user position 0 AID */
+	uint16_t u1_aid;  /* user position 1 AID */
+	int32_t rank;
+/* Number of following Q matrix elements */
+#define MU_QMAT_ELEM_NUM	6
+	/* matrix starting addresses in sram */
+	uint32_t u0_1ss_u1_1ss;
+	uint32_t u0_2ss_u1_1ss;
+	uint32_t u0_3ss_u1_1ss;
+	uint32_t u0_1ss_u1_2ss;
+	uint32_t u0_1ss_u1_3ss;
+	uint32_t u0_2ss_u1_2ss;
+	uint32_t dsp_cnt;
+} __packed;
+
+struct qtn_grp_rank {
+	uint16_t u0_aid;
+	uint16_t u1_aid;
+	int32_t rank;
+} __packed;
+
+#define TXBF_MAX_NC 4
+#define TXBF_MAX_NR 4
+#define TXBF_MAX_NG 3
+#define TXBF_MAX_BW 4	/* Multiple of 20 MHz channels */
+
+#define TXBF_EXPMAT_TYPE_0_BYPASS 0
+#define TXBF_EXPMAT_TYPE_2_QMEM_MODE 2
+#define TXBF_EXPMAT_TYPE_5_QREG_MODE 5
+
+#define QTN_TXBF_QMAT_STD_1SS_TONE_DATA	{0x00400040, 0x00400040, 0x00400040, 0x00400040}
+#define QTN_TXBF_QMAT_STD_2SS_TONE_DATA	{0x002D002D, 0x2D00002D, 0x00D3002D, 0xD300002D, \
+					0x002D002D, 0x2D00002D, 0x00D3002D, 0xD300002D}
+#define QTN_TXBF_QMAT_STD_3SS_TONE_DATA	{0x00250025, 0x00250025, 0x00DB2500, 0x00DB0025, \
+					0x00250025, 0x00DBDB00, 0x00250025, 0x00250025, \
+					0x00DB2500, 0x00DB0025, 0x00250025, 0x00DBDB00}
+#define QTN_TXBF_QMAT_STD_4SS_TONE_DATA	{0x00000040, 0x00000000, 0x00400000, 0x00000000, \
+					0x00000000, 0x00000040, 0x00000000, 0x00400000, \
+					0x00000040, 0x00000000, 0x00400000, 0x00000000, \
+					0x00000000, 0x00000040, 0x00000000, 0x00400000}
+
+static __inline__ uint8_t qtn_txbf_get_bf_qmat_offsets(uint32_t *expmat_ss, uint8_t max,
+		uint32_t qmat_base_offset, uint8_t node_bw, uint8_t bw, uint8_t ng)
+{
+	if ((bw == QTN_BW_40M) && (node_bw == QTN_BW_80M)) {
+		if (ng == QTN_TXBF_DEFAULT_QMAT_NG) {
+			expmat_ss[0] = QTN_TXBF_QMAT80_1STRM_40M_OFFSET(qmat_base_offset);
+			expmat_ss[1] = QTN_TXBF_QMAT80_2STRM_40M_OFFSET(qmat_base_offset);
+		} else {
+			expmat_ss[0] = QTN_TXBF_QMAT80_NG2_1STRM_40M_OFFSET(qmat_base_offset);
+			expmat_ss[1] = QTN_TXBF_QMAT80_NG2_2STRM_40M_OFFSET(qmat_base_offset);
+		}
+		return (QTN_MAX_40M_VHT_STREAMS - 1);
+	} else if ((bw == QTN_BW_20M) && (node_bw > QTN_BW_20M)) {
+		if ((node_bw == QTN_BW_80M) && (ng == QTN_TXBF_DEFAULT_QMAT_NG)) {
+			expmat_ss[0] = QTN_TXBF_QMAT80_1STRM_20M_OFFSET(qmat_base_offset);
+			expmat_ss[1] = QTN_TXBF_QMAT80_2STRM_20M_OFFSET(qmat_base_offset);
+		} else if (node_bw == QTN_BW_80M) {
+			expmat_ss[0] = QTN_TXBF_QMAT80_NG2_1STRM_20M_OFFSET(qmat_base_offset);
+			expmat_ss[1] = QTN_TXBF_QMAT80_NG2_2STRM_20M_OFFSET(qmat_base_offset);
+		} else {
+			expmat_ss[0] = QTN_TXBF_QMAT40_1STRM_20M_OFFSET(qmat_base_offset);
+			expmat_ss[1] = QTN_TXBF_QMAT40_2STRM_20M_OFFSET(qmat_base_offset);
+		}
+		return (QTN_MAX_20M_VHT_STREAMS - 1);
+	}
+	if (node_bw == QTN_BW_80M) {
+		if (ng == QTN_TXBF_DEFAULT_QMAT_NG) {
+			expmat_ss[0] = QTN_TXBF_QMAT80_1STRM_OFFSET(qmat_base_offset);
+			expmat_ss[1] = QTN_TXBF_QMAT80_2STRM_OFFSET(qmat_base_offset);
+		} else {
+			expmat_ss[0] = QTN_TXBF_QMAT80_NG2_1STRM_OFFSET(qmat_base_offset);
+			expmat_ss[1] = QTN_TXBF_QMAT80_NG2_2STRM_OFFSET(qmat_base_offset);
+		}
+		if ((max == QTN_MAX_STREAMS) && (ng == QTN_TXBF_DEFAULT_QMAT_NG)) {
+			expmat_ss[2] = QTN_TXBF_QMAT80_3STRM_OFFSET(qmat_base_offset);
+			expmat_ss[3] = QTN_TXBF_QMAT80_4STRM_OFFSET(qmat_base_offset);
+		} else if (max == QTN_MAX_STREAMS) {
+			expmat_ss[2] = QTN_TXBF_QMAT80_NG2_3STRM_OFFSET(qmat_base_offset);
+			expmat_ss[3] = QTN_TXBF_QMAT80_NG2_4STRM_OFFSET(qmat_base_offset);
+		}
+	} else {
+		expmat_ss[0] = QTN_TXBF_QMAT40_1STRM_OFFSET(qmat_base_offset);
+		expmat_ss[1] = QTN_TXBF_QMAT40_2STRM_OFFSET(qmat_base_offset);
+		if (max == QTN_MAX_STREAMS) {
+			expmat_ss[2] = QTN_TXBF_QMAT40_3STRM_OFFSET(qmat_base_offset);
+			expmat_ss[3] = QTN_TXBF_QMAT40_4STRM_OFFSET(qmat_base_offset);
+		}
+	}
+
+	return (QTN_MAX_STREAMS - 1);
+}
+
+unsigned dsp_rt_hash(volatile struct qtn_rate_train_info *p_rate_info);
+void dsp_process_muc_opti_msg(volatile struct qtn_rate_train_info *muc_dsp_msg);
+void dsp_opti_check(void);
+
+#endif /*TXBF_COMMON_H_*/
+
diff --git a/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/txbf_mbox.h b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/txbf_mbox.h
new file mode 100644
index 0000000..4641758
--- /dev/null
+++ b/arch/arc/plat-qtn/sdk-qsr1000/include/qtn/txbf_mbox.h
@@ -0,0 +1,374 @@
+/*
+ * (C) Copyright 2011 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __QTN_TXBF_MBOX_H
+#define __QTN_TXBF_MBOX_H
+
+#include "mproc_sync.h"
+#include "txbf_common.h"
+#include "dsp_stats.h"
+
+#define QTN_TXBF_MBOX_BAD_IDX			((u_int32_t)-1)
+
+#define QTN_TXBF_MUC_TO_DSP_MBOX_INT		(0)
+#define QTN_TXBF_DSP_TO_HOST_MBOX_INT		(0)
+#define QTN_TXBF_DSP_TO_MUC_MBOX_INT		(0)
+
+#define QTN_RATE_MUC_DSP_MSG_RING_SIZE		(32)
+
+/*
+ * QTN_MAX_MU_SND_NODES nodes (6) + QTN_MAX_SND_NODES (10) + 3 for IPC cmd 19.
+ * This value still causes buffer allocation failure, which is probably due to bad DSP performance.
+ * With 3 more there is no allocation failure for 4 STAs case.
+ */
+#define QTN_TXBF_MUC_DSP_MSG_RING_SIZE		(6 + 10 + 3 + 3)
+
+#define QTN_TXBF_NDP_DATA_BUFS			(1)
+
+/* MU group install/delete IPC from DSP to LHost */
+#define QTN_TXBF_DSP_TO_HOST_INST_MU_GRP        1
+#define QTN_TXBF_DSP_TO_HOST_DELE_MU_GRP        2
+
+#ifndef __ASSEMBLY__
+
+#if DSP_ENABLE_STATS && !defined(QTN_RC_ENABLE_HDP)
+#define DSP_UPDATE_STATS(_a, _b)	(qtn_txbf_mbox_get()->dsp_stats._a += (_b))
+#define DSP_SETSTAT(_a, _b)		(qtn_txbf_mbox_get()->dsp_stats._a =  (_b))
+#else
+#define DSP_UPDATE_STATS(_a, _b)
+#define DSP_SETSTAT(_a, _b)
+#endif
+
+/* Structure be used for txbf message box */
+struct qtn_txbf_mbox
+{
+	/* Write index in txbf_msg_bufs array. Updated only by a sender */
+	volatile u_int32_t wr;
+
+	#define MUC_TO_DSP_ACT_MBOX_SIZE	12
+	volatile u_int32_t muc_to_dsp_action_frame_mbox[MUC_TO_DSP_ACT_MBOX_SIZE];
+	volatile u_int32_t muc_to_dsp_ndp_mbox;
+	volatile u_int32_t muc_to_dsp_del_grp_node_mbox;
+	volatile u_int32_t muc_to_dsp_gr_upd_done_mbox;
+	volatile u_int32_t muc_to_trig_mu_grp_sel_mbox;
+	volatile u_int32_t dsp_to_host_mbox;
+
+	volatile struct txbf_pkts txbf_msg_bufs[QTN_TXBF_MUC_DSP_MSG_RING_SIZE];
+
+	volatile struct txbf_ctrl bfctrl_params;
+
+	/* Debug verbosity level */
+#define DEBUG_LVL_NO	0
+#define DEBUG_LVL_ALL	1
+	volatile uint32_t debug_level;
+
+#define MU_QMAT_FREEZE				0x00000001
+#define MU_MANUAL_RANK				0x00000002
+#define MU_FREEZE_RANK				0x00000004
+#define MU_QMAT_ZERO_STA0			0x00000010
+#define MU_QMAT_ZERO_STA1			0x00000020
+#define MU_QMAT_PRINT_CHMAT			0x00000100
+#define MU_QMAT_PRINT_PRECMAT			0x00000200
+#define MU_QMAT_PRINT_SNR			0x00000400
+#define MU_QMAT_PRINT_RANK			0x00000800
+#define MU_QMAT_PRINT_STUFFMEM			0x00001000
+#define MU_QMAT_PRINT_ACTFRM			0x00002000
+#define MU_MATLAB_PROCESS			0x00004000
+#define MU_V_ANGLE				0x00008000
+#define MU_PROJ_PREC_MUEQ_NEED_MASK		0x000F0000
+#define MU_PROJ_PREC_MUEQ_NEED_NC0_MASK		0x00030000
+#define MU_PROJ_PREC_MUEQ_NEED_NC0_SHIFT	16
+#define MU_PROJ_PREC_MUEQ_NEED_NC1_MASK		0x000C0000
+#define MU_PROJ_PREC_MUEQ_NEED_NC1_SHIFT	18
+#define MU_PRINT_RANK_INFO			0x00100000
+#define MU_LIMIT_GRP_ENTRY			0x00300000
+	volatile uint32_t debug_flag;
+	volatile struct qtn_sram_qmat mu_grp_qmat[QTN_MU_QMAT_MAX_SLOTS];
+	/* Used for testing to set rank for STA pairs manually */
+	volatile struct qtn_grp_rank mu_grp_man_rank[QTN_MU_QMAT_MAX_SLOTS];
+#if DSP_ENABLE_STATS
+	volatile struct qtn_dsp_stats dsp_stats;
+#endif
+
+#define MU_ALGORITHM_AUTO		0x00000000
+#define MU_ALGORITHM_PROJECTION		0x00000001
+#define MU_ALGORITHM_ITERATION		0x00000002
+#define MU_PRECODING_ALGORITHM_DEFAULT	MU_ALGORITHM_PROJECTION
+#define MU_RANKING_ALGORITHM_DEFAULT	MU_ALGORITHM_AUTO
+/* in case of adding algorithms above please update below equation accordingly */
+#define MU_ALLOWED_ALG(x) ((x)<=MU_ALGORITHM_ITERATION)
+	volatile uint32_t ranking_algorithm_to_use;
+	volatile uint32_t precoding_algorithm_to_use;
+#define RANK_CRIT_ONE_AND_ONE	0x00000000
+#define RANK_CRIT_TWO_AND_ONE	0x00000001
+#define RANK_CRIT_THREE_AND_ONE	0x00000002
+#define RANK_CRIT_ONE_AND_TWO	0x00000003
+#define RANK_CRIT_ONE_AND_THREE	0x00000004
+#define RANK_CRIT_TWO_AND_TWO	0x00000005
+#define RANK_CRIT_MAX_MU_SUB_MAX_SU	0x00000006
+#define RANK_CRIT_DEFAULT	RANK_CRIT_TWO_AND_TWO
+#define RANK_CRIT_NO_USER_CONF	0x0000000f
+	volatile uint32_t rank_criteria_to_use;
+
+	volatile uint32_t mu_prec_cache_max_time;
+	volatile int32_t mu_rank_tolerance;
+};
+
+#define QTN_MUC_DSP_OPTI_MSG_INDEX 0xFF
+struct qtn_muc_dsp_mbox
+{
+	volatile u_int32_t muc_to_dsp_mbox;
+	volatile u_int32_t dsp_to_muc_mbox;
+	volatile struct qtn_rate_train_info muc_dsp_msg_bufs[QTN_RATE_MUC_DSP_MSG_RING_SIZE]
+				__attribute__ ((aligned (ARC_DCACHE_LINE_LENGTH) ));
+	volatile struct qtn_rate_train_info muc_dsp_opti_msg
+				__attribute__ ((aligned (ARC_DCACHE_LINE_LENGTH) ));
+};
+
+#define QTN_TXBF_MBOX_PROCESSED 1
+#define QTN_TXBF_MBOX_NOT_PROCESSED 0
+
+#if !defined(MUC_BUILD) && !defined(DSP_BUILD) && !defined(AUC_BUILD)
+
+#if CONFIG_USE_SPI1_FOR_IPC
+	#define QTN_TXBF_D2L_IRQ	RUBY_IRQ_SPI
+	#define QTN_TXBF_D2L_IRQ_NAME	"DSP(spi)"
+#else
+	#define QTN_TXBF_D2L_IRQ	RUBY_IRQ_DSP
+	#define QTN_TXBF_D2L_IRQ_NAME	"DSP(d2l)"
+#endif
+
+RUBY_INLINE void
+qtn_txbf_lhost_init(void)
+{
+#if CONFIG_USE_SPI1_FOR_IPC
+	/* Initialize SPI controller, keep IRQ disabled */
+	qtn_mproc_sync_mem_write(RUBY_SPI1_SPCR,
+		RUBY_SPI1_SPCR_SPE | RUBY_SPI1_SPCR_MSTR |
+		RUBY_SPI1_SPCR_SPR(0));
+	qtn_mproc_sync_mem_write(RUBY_SPI1_SPER,
+		RUBY_SPI1_SPER_ESPR(0));
+#else
+	/* Ack, and keep IRQ disabled */
+	qtn_mproc_sync_mem_write(RUBY_SYS_CTL_D2L_INT,
+		qtn_mproc_sync_mem_read(RUBY_SYS_CTL_D2L_INT));
+	qtn_mproc_sync_mem_write(RUBY_SYS_CTL_D2L_INT_MASK,
+		~(1 << QTN_TXBF_DSP_TO_HOST_MBOX_INT));
+#endif
+}
+
+RUBY_INLINE u_int32_t
+qtn_txbf_lhost_irq_ack(struct qdrv_mac *mac)
+{
+#if CONFIG_USE_SPI1_FOR_IPC
+	/*
+	 * Only single interrupt is supported now.
+	 * If need to support more interrupts then something like
+	 * 'status' in RAM, guarded by semaphores has to be implemented.
+	 * This should be avoided, as it is performance penalty.
+	 */
+	qtn_mproc_sync_mem_write(RUBY_SPI1_SPSR,
+		qtn_mproc_sync_mem_read(RUBY_SPI1_SPSR));
+	return (1 << QTN_TXBF_DSP_TO_HOST_MBOX_INT);
+#else
+	return qtn_mproc_sync_irq_ack_all((u_int32_t)mac->mac_host_dsp_int_status);
+#endif
+}
+
+RUBY_INLINE void
+qtn_txbf_lhost_irq_enable(struct qdrv_mac *mac)
+{
+#if CONFIG_USE_SPI1_FOR_IPC
+	set_bit(RUBY_SPI1_SPCR_SPIE_BIT, (void*)RUBY_SPI1_SPCR);
+#else
+	set_bit(QTN_TXBF_DSP_TO_HOST_MBOX_INT, (void*)mac->mac_host_dsp_int_mask);
+#endif
+}
+
+RUBY_INLINE void
+qtn_txbf_lhost_irq_disable(struct qdrv_mac *mac)
+{
+#if CONFIG_USE_SPI1_FOR_IPC
+	clear_bit(RUBY_SPI1_SPCR_SPIE_BIT, (void*)RUBY_SPI1_SPCR);
+#else
+	clear_bit(QTN_TXBF_DSP_TO_HOST_MBOX_INT, (void*)mac->mac_host_dsp_int_mask);
+#endif
+}
+
+#endif // #if !defined(MUC_BUILD) && !defined(DSP_BUILD) && !defined(AUC_BUILD)
+
+RUBY_INLINE volatile struct txbf_pkts *
+qtn_txbf_mbox_alloc_msg_buf(volatile struct qtn_txbf_mbox* mbox) {
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mbox->txbf_msg_bufs); i++) {
+		int j = (i + mbox->wr) % ARRAY_SIZE(mbox->txbf_msg_bufs);
+		if (mbox->txbf_msg_bufs[j].state == TXBF_BUFF_FREE) {
+			mbox->wr = j;
+			mbox->txbf_msg_bufs[j].state = TXBF_BUFF_IN_USE;
+			return &mbox->txbf_msg_bufs[j];
+		}
+	}
+
+	return NULL;
+}
+
+RUBY_INLINE void
+qtn_txbf_mbox_free_msg_buf(volatile struct txbf_pkts *msg) {
+	msg->state = TXBF_BUFF_FREE;
+}
+
+RUBY_INLINE u_int32_t
+qtn_txbf_mbox_get_index(volatile struct qtn_txbf_mbox* mbox) {
+	return mbox->wr;
+}
+
+RUBY_INLINE volatile struct qtn_txbf_mbox*
+qtn_txbf_mbox_get(void)
+{
+#if defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD)
+	return qtn_mproc_sync_nocache
+		(qtn_mproc_sync_shared_params_get()->txbf_mbox_bus);
+#else
+	/* Linux target */
+	return qtn_mproc_sync_shared_params_get()->txbf_mbox_lhost;
+#endif
+}
+
+RUBY_INLINE volatile struct qtn_muc_dsp_mbox*
+qtn_muc_dsp_mbox_get(void)
+{
+#if defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD)
+	return qtn_mproc_sync_nocache
+		(qtn_mproc_sync_shared_params_get()->muc_dsp_mbox_bus);
+#else
+	/* Linux target */
+	return qtn_mproc_sync_shared_params_get()->muc_dsp_mbox_lhost;
+#endif
+}
+
+#if defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD)
+RUBY_INLINE int
+qtn_muc_dsp_mbox_send(u_int32_t mbox, u_int32_t idx)
+{
+	int ret = 0;
+
+	if (qtn_mproc_sync_mem_read(mbox) == QTN_TXBF_MBOX_BAD_IDX) {
+		qtn_mproc_sync_mem_write_wmb(mbox, idx);
+#if defined(MUC_BUILD)
+		qtn_mproc_sync_irq_trigger(RUBY_SYS_CTL_M2D_INT,
+			QTN_TXBF_MUC_TO_DSP_MBOX_INT);
+#else
+		qtn_mproc_sync_irq_trigger(RUBY_SYS_CTL_D2M_INT,
+			QTN_TXBF_DSP_TO_MUC_MBOX_INT);
+#endif
+		ret = 1;
+	}
+
+	return ret;
+}
+#endif
+
+#if defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD)
+RUBY_INLINE int
+qtn_txbf_mbox_send(u_int32_t mbox, u_int32_t idx)
+{
+	int ret = 0;
+
+	if (qtn_mproc_sync_mem_read(mbox) == QTN_TXBF_MBOX_BAD_IDX) {
+		qtn_mproc_sync_mem_write_wmb(mbox, idx);
+#if defined(MUC_BUILD)
+		qtn_mproc_sync_irq_trigger(RUBY_SYS_CTL_M2D_INT,
+			QTN_TXBF_MUC_TO_DSP_MBOX_INT);
+#else
+	#if CONFIG_USE_SPI1_FOR_IPC
+		qtn_mproc_sync_mem_write(RUBY_SPI1_SPDR, 0x1/*value is not important*/);
+	#else
+		qtn_mproc_sync_irq_trigger(RUBY_SYS_CTL_D2L_INT,
+			QTN_TXBF_DSP_TO_HOST_MBOX_INT);
+	#endif
+#endif
+		ret = 1;
+	}
+
+	return ret;
+}
+#endif
+
+RUBY_INLINE u_int32_t
+qtn_txbf_mbox_recv(u_int32_t mbox)
+{
+	u_int32_t ret = QTN_TXBF_MBOX_BAD_IDX;
+
+	ret = qtn_mproc_sync_mem_read(mbox);
+	if (ret != QTN_TXBF_MBOX_BAD_IDX) {
+		qtn_mproc_sync_mem_write_wmb(mbox, QTN_TXBF_MBOX_BAD_IDX);
+	}
+
+	return ret;
+}
+
+RUBY_INLINE void
+qtn_txbf_fft_lock(void)
+{
+#ifndef QTN_TXBF_FFT_LOCK_MANUAL
+	/* Locking is disabled */
+#elif QTN_TXBF_FFT_LOCK_MANUAL
+	/* Manual, sw-centric locking. */
+	qtn_mproc_sync_mem_write_wmb(RUBY_QT3_BB_MIMO_BF_RX,
+		qtn_mproc_sync_mem_read(RUBY_QT3_BB_MIMO_BF_RX) & ~RUBY_QT3_BB_MIMO_BF_RX_DUMP_ENABLE);
+#else
+	/* Automatic, hw-centric locking.
+	 * Hw locks FFT memory automatically after the NDP packet is received.
+	 * No need to explicitely lock FFT.
+	 */
+#endif
+}
+
+RUBY_INLINE void
+qtn_txbf_fft_unlock(void)
+{
+#ifndef QTN_TXBF_FFT_LOCK_MANUAL
+	/* Locking is disabled */
+#elif QTN_TXBF_FFT_LOCK_MANUAL
+	/* Manual, sw-centric locking. */
+	qtn_mproc_sync_mem_write_wmb(RUBY_QT3_BB_MIMO_BF_RX,
+		qtn_mproc_sync_mem_read(RUBY_QT3_BB_MIMO_BF_RX) | RUBY_QT3_BB_MIMO_BF_RX_DUMP_ENABLE);
+#else
+	/* Automatic, hw-centric locking. */
+	qtn_mproc_sync_mem_write_wmb(RUBY_QT3_BB_GLBL_PREG_INTR_STATUS, RUBY_QT3_BB_FFT_INTR);
+#endif
+}
+
+/*
+* qtn_txbf_mbox can be used to set parameters for DSP core from other cores.
+* Ideally this way should be reworked but until it happens lets use dedicated macros to access such parameters
+* to distibuish this qtn_txbf_mbox usage purpose from others (IPC, BF feedbacks exchange)
+*/
+#define DSP_PARAM_GET(param) (qtn_txbf_mbox_get()->param)
+#define DSP_PARAM_SET(param, value) qtn_txbf_mbox_get()->param = (value)
+
+#endif // #ifndef __ASSEMBLY__
+
+#endif // #ifndef __QTN_TXBF_MBOX_H
+
+
diff --git a/drivers/qtn/Makefile b/drivers/qtn/Makefile
new file mode 100644
index 0000000..f30443a
--- /dev/null
+++ b/drivers/qtn/Makefile
@@ -0,0 +1,48 @@
+
+QTN_EXTERNAL_MODULES=y
+export QTN_EXTERNAL_MODULES
+
+ifneq ($(KERNELRELEASE),)
+ifeq ($(CONFIG_QUANTENNA_RUBY),y)
+obj-m += ruby/
+endif
+ifeq ($(CONFIG_QUANTENNA_TOPAZ),y)
+obj-m += topaz/
+#
+# Check for fwt_ctrl.ko source code
+#
+ifneq ($(wildcard $(src)/fwt/Makefile),)
+obj-m += fwt/
+endif
+endif
+
+ifeq ($(filter topaz_host_config,$(board_config)),)
+obj-m += qdrv/
+obj-m += wlan/
+obj-m += i2cbus/
+obj-m += se95sensor/
+obj-m += pm_interval/
+endif
+obj-m += bootcfg/
+ifeq ($(CONFIG_TOPAZ_PCIE_HOST), y)
+obj-m += pcie2/host/quantenna/
+endif
+else
+KERNELDIR	= ../linux
+CROSS		= ARCH=arc
+INSTALL		= INSTALL_MOD_PATH=../linux/modules
+MDIR		= ../drivers/
+EXTRA_CFLAGS	+= -Wall -Werror -I../include -I$(KERNELDIR)/include
+
+default: force
+	$(MAKE) -C $(KERNELDIR) $(CROSS) M=$(MDIR) modules KBUILD_EXTRA_SYMBOLS=$(MDIR)/extra_kos.symvers
+
+install: force
+	$(MAKE) -C $(KERNELDIR) $(CROSS) $(INSTALL) M=$(MDIR) modules_install
+
+clean: force
+	$(MAKE) -C $(KERNELDIR) $(CROSS) M=$(MDIR) clean
+
+force:
+
+endif
diff --git a/drivers/qtn/auc/auc_print.h b/drivers/qtn/auc/auc_print.h
new file mode 100644
index 0000000..d714344
--- /dev/null
+++ b/drivers/qtn/auc/auc_print.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2009-2014 Quantenna Communications, Inc.
+ * All rights reserved.
+ */
+#ifndef __AUC_PRINT_H__
+#define __AUC_PRINT_H__
+
+#include <qtn/shared_print_buf.h>
+
+extern void (*uc_print_auc_cb)(struct shared_print_consumer* shared_buf);
+
+#endif /* __AUC_PRINT_H__ */
diff --git a/drivers/qtn/bootcfg/Makefile b/drivers/qtn/bootcfg/Makefile
new file mode 100644
index 0000000..36f96d0
--- /dev/null
+++ b/drivers/qtn/bootcfg/Makefile
@@ -0,0 +1,45 @@
+#
+# Quantenna Communications Inc. Driver Makefile
+#
+# Author: Jim Wood
+#
+
+EXTRA_CFLAGS	+= -Wall -Werror -I../drivers -I../include
+EXTRA_CFLAGS    += -mlong-calls
+EXTRA_CFLAGS += -DQTN_DEBUG
+ifneq ($(KERNELRELEASE),)
+bootcfg-objs += bootcfg_drv.o \
+	bootcfg_store_init.o \
+	bootcfg_file.o
+
+ifeq ($(CONFIG_MTD),y)
+bootcfg-objs += bootcfg_mtd.o \
+	bootcfg_eeprom.o \
+	bootcfg_compress.o
+endif
+
+obj-m   += bootcfg.o
+else
+KERNELDIR	?= ../../linux-2.6.20.1
+INSTALL		= INSTALL_MOD_PATH=../linux/modules
+CROSS		= ARCH=arm CROSS_COMPILE=../buildroot/build_arm/staging_dir/bin/arm-linux-
+PWD			:= $(shell pwd)
+
+
+default:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
+
+install:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) $(INSTALL) M=$(PWD) modules_install
+	
+endif
+
+clean:
+	rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions
+
+depend .depend dep:
+	$(CC) $(CFLAGS) -M *.c > .depend
+
+ifeq (.depend,$(wildcard .depend))
+include .depend
+endif
diff --git a/drivers/qtn/bootcfg/bootcfg_compress.c b/drivers/qtn/bootcfg/bootcfg_compress.c
new file mode 100644
index 0000000..21aac60
--- /dev/null
+++ b/drivers/qtn/bootcfg/bootcfg_compress.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * Create a wrapper around other bootcfg datastores which compresses on write
+ * and decompresses on read.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+#include "bootcfg_drv.h"
+
+#include <qtn/bootcfg.h>
+#include <common/ruby_partitions.h>
+#include <common/ruby_version.h>
+
+#include <linux/init.h>
+#include <linux/zlib.h>
+#include <linux/zutil.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+
+struct bootcfg_zops_data {
+	struct bootcfg_store_ops outer_ops;
+	struct bootcfg_store_ops *inner_ops;
+	size_t inner_store_limit;
+	z_stream inflate_stream;
+	z_stream deflate_stream;
+};
+
+static struct bootcfg_zops_data *get_zops_data(struct bootcfg_store_ops *ops)
+{
+	return (struct bootcfg_zops_data*)ops;
+}
+
+static struct bootcfg_store_ops *get_inner_ops(struct bootcfg_store_ops *ops)
+{
+	return get_zops_data(ops)->inner_ops;
+}
+
+int __init bootcfg_zadpt_init(struct bootcfg_store_ops *ops, size_t *store_limit)
+{
+	struct bootcfg_zops_data *data = get_zops_data(ops);
+	struct bootcfg_store_ops *inner_ops = get_inner_ops(ops);
+	int ret = 0;
+
+	data->inflate_stream.workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+	if (data->inflate_stream.workspace == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	data->deflate_stream.workspace = kmalloc(zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL),
+						 GFP_KERNEL);
+#else
+	data->deflate_stream.workspace = kmalloc(zlib_deflate_workspacesize(), GFP_KERNEL);
+#endif
+	if (data->deflate_stream.workspace == NULL) {
+		ret = -ENOMEM;
+		goto out_free_inf;
+	}
+
+	ret = inner_ops->init(inner_ops, &data->inner_store_limit);
+	if (ret) {
+		goto out_free_both;
+	}
+
+	return 0;
+
+out_free_both:
+	kfree(data->deflate_stream.workspace);
+out_free_inf:
+	kfree(data->inflate_stream.workspace);
+out:
+	kfree(ops);
+
+	return ret;
+}
+
+void __exit bootcfg_zadpt_exit(struct bootcfg_store_ops *ops)
+{
+	struct bootcfg_zops_data *data = get_zops_data(ops);
+	struct bootcfg_store_ops *inner_ops = get_inner_ops(ops);
+
+	kfree(data->deflate_stream.workspace);
+	kfree(data->inflate_stream.workspace);
+
+	inner_ops->exit(inner_ops);
+	kfree(ops);
+}
+
+static int bootcfg_zadpt_read(struct bootcfg_store_ops *ops, void* buf, const size_t bytes)
+{
+	struct bootcfg_zops_data *data = get_zops_data(ops);
+	struct bootcfg_store_ops *inner_ops = get_inner_ops(ops);
+	uint8_t *inner_buf;
+	uint32_t compressed_size = 0;
+	size_t inner_buf_size;
+	int ret = 0;
+
+	inner_buf_size = bytes;
+	if (data->inner_store_limit && data->inner_store_limit < inner_buf_size) {
+		inner_buf_size = data->inner_store_limit;
+	}
+
+	inner_buf = kzalloc(inner_buf_size, GFP_KERNEL);
+	if (inner_buf == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = inner_ops->read(inner_ops, inner_buf, sizeof(uint32_t));
+	if (ret) {
+		goto out;
+	}
+
+	/* get compressed size (first 4 bytes) and sanity check */
+	memcpy(&compressed_size, &inner_buf[0], sizeof(uint32_t));
+	if (compressed_size > inner_buf_size - sizeof(uint32_t)) {
+		ret = -ENODATA;
+		goto out;
+	}
+
+	ret = inner_ops->read(inner_ops, inner_buf, compressed_size + sizeof(uint32_t));
+	if (ret) {
+		goto out;
+	}
+
+	data->inflate_stream.next_in = &inner_buf[sizeof(uint32_t)];
+	data->inflate_stream.total_in = 0;
+	data->inflate_stream.avail_in = compressed_size;
+	data->inflate_stream.next_out = buf;
+	data->inflate_stream.total_out = 0;
+	data->inflate_stream.avail_out = bytes;
+
+	ret = zlib_inflateInit(&data->inflate_stream);
+	if (ret != Z_OK) {
+		ret = -EIO;
+		goto out;
+	}
+
+	ret = zlib_inflate(&data->inflate_stream, Z_FINISH);
+	if (ret != Z_STREAM_END) {
+		printk(KERN_ERR "%s deflate failed: ret %d\n", __FUNCTION__, ret);
+		ret = -ENODATA;
+		goto out;
+	}
+
+#ifdef DEBUG
+	printk(KERN_DEBUG "%s zlib decompressed %ld bytes into %ld\n",
+			__FUNCTION__, data->inflate_stream.total_in, data->inflate_stream.total_out);
+#endif
+
+	ret = 0;
+
+out:
+	if (inner_buf) {
+		kfree(inner_buf);
+	}
+	return ret;
+}
+
+static int bootcfg_zadpt_write(struct bootcfg_store_ops *ops, const void* buf, const size_t bytes)
+{
+	struct bootcfg_zops_data *data = get_zops_data(ops);
+	struct bootcfg_store_ops *inner_ops = get_inner_ops(ops);
+	uint8_t *inner_buf = NULL;
+	size_t inner_buf_size;
+	uint32_t compressed_size;
+	int ret = 0;
+
+	inner_buf_size = bytes;
+	if (data->inner_store_limit && data->inner_store_limit < inner_buf_size) {
+		inner_buf_size = data->inner_store_limit;
+	}
+
+	inner_buf = kzalloc(inner_buf_size, GFP_KERNEL);
+	if (inner_buf == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = zlib_deflateInit(&data->deflate_stream, 3);
+	if (ret != Z_OK) {
+		ret = -EIO;
+		goto out;
+	}
+
+	data->deflate_stream.next_in = buf;
+	data->deflate_stream.total_in = 0;
+	data->deflate_stream.avail_in = bytes;
+	data->deflate_stream.next_out = &inner_buf[sizeof(uint32_t)];
+	data->deflate_stream.total_out = 0;
+	data->deflate_stream.avail_out = inner_buf_size - sizeof(uint32_t);
+
+	ret = zlib_deflate(&data->deflate_stream, Z_FINISH);
+	if (ret != Z_STREAM_END) {
+		printk(KERN_ERR "%s deflate failed: ret %d\n", __FUNCTION__, ret);
+		ret = -ENOSPC;
+		goto out;
+	}
+
+#ifdef DEBUG
+	printk(KERN_DEBUG "%s zlib compressed %ld bytes into %ld\n",
+			__FUNCTION__, data->deflate_stream.total_in, data->deflate_stream.total_out);
+#endif
+
+	compressed_size = data->deflate_stream.total_out;
+	memcpy(inner_buf, &compressed_size, sizeof(compressed_size));
+
+	ret = inner_ops->write(inner_ops, inner_buf, compressed_size + sizeof(uint32_t));
+
+out:
+	if (inner_buf) {
+		kfree(inner_buf);
+	}
+	return ret;
+}
+
+
+static const struct bootcfg_store_ops zapdt_outer_ops = {
+	.read	= bootcfg_zadpt_read,
+	.write	= bootcfg_zadpt_write,
+	.init	= bootcfg_zadpt_init,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	.exit	= bootcfg_zadpt_exit,
+#else
+	.exit	= __devexit_p(bootcfg_zadpt_exit),
+#endif
+};
+
+struct bootcfg_store_ops *bootcfg_compression_adapter(struct bootcfg_store_ops *raw_accessor)
+{
+	struct bootcfg_zops_data *z_ops = kmalloc(sizeof(*z_ops), GFP_KERNEL);
+	if (z_ops == NULL) {
+		return NULL;
+	}
+
+	z_ops->outer_ops = zapdt_outer_ops;
+	z_ops->inner_ops = raw_accessor;
+
+	return &z_ops->outer_ops;
+}
+
diff --git a/drivers/qtn/bootcfg/bootcfg_drv.c b/drivers/qtn/bootcfg/bootcfg_drv.c
new file mode 100644
index 0000000..11dc14f8
--- /dev/null
+++ b/drivers/qtn/bootcfg/bootcfg_drv.c
@@ -0,0 +1,1403 @@
+/*
+ * Copyright (c) 2008-2014 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * Create a wrapper around other bootcfg datastores which compresses on write
+ * and decompresses on read.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+/*
+ * Syscfg module - uses config sector for common filesytem between linux and uboot.
+ */
+
+#include "bootcfg_drv.h"
+
+#include <qtn/bootcfg.h>
+#include <common/ruby_partitions.h>
+#include <common/ruby_version.h>
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>	/* for copy_from_user */
+#include <asm/board/board_config.h>
+
+///////////////////////////////////////////////////////////////////////////////
+//             Definitions
+///////////////////////////////////////////////////////////////////////////////
+#define DRV_NAME		"bootcfg"
+#define DRV_VERSION		"1.1"
+#define DRV_AUTHOR		"Quantenna Communciations, Inc."
+#define DRV_DESC		"Boot Configuration Driver"
+
+#define BOOTCFG_PENDING_END     " writes complete\n"
+
+static struct proc_dir_entry *bootcfg_proc;
+static struct proc_dir_entry *boardparam_proc;
+static struct proc_dir_entry *bootcfg_dir;
+static struct proc_dir_entry *pending_proc;
+
+///////////////////////////////////////////////////////////////////////////////
+//          Structures and types
+///////////////////////////////////////////////////////////////////////////////
+typedef struct sBootCfg {
+	char entry[256];
+	u32 addr;
+	u32 len;
+	int flags;
+	struct proc_dir_entry *proc;
+	struct sBootCfg *next;
+} tsBootCfgFile, *ptsBootCfgFile;
+
+typedef struct sBootCfgData {
+	u32 crc;
+	u8 data[BOOT_CFG_DATA_SIZE];
+	int isValid;
+	int dirty;
+	int size;
+} tsBootCfgData, *ptsBootCfgData;
+
+///////////////////////////////////////////////////////////////////////////////
+//          Global Data
+///////////////////////////////////////////////////////////////////////////////
+static tsBootCfgData gBootCfgData = { 0 };
+
+static spinlock_t gBootCfgLock;
+static spinlock_t gDefragLock;
+static spinlock_t gFlashLock;
+static spinlock_t gBootCfgVarLock;
+
+static ptsBootCfgFile gFiles = NULL;
+
+static ptsBootCfgFile bootcfg_mount(const char *filename, u32 addr, u32 len);
+static int bootcfg_defragment(void);
+
+// do flash ops in background
+//static void bootcfg_do_flash(unsigned long data);
+//struct tasklet_struct work = { 0, 0, ATOMIC_INIT(0), bootcfg_do_flash, 0};
+
+static void bootcfg_store_write_wq(struct work_struct *unused);
+static DECLARE_DELAYED_WORK(work, bootcfg_store_write_wq);
+
+///////////////////////////////////////////////////////////////////////////////
+//          Functions
+///////////////////////////////////////////////////////////////////////////////
+/******************************************************************************
+   Function:    bootcfg_crc32
+   Purpose:     calculate crc for bootcfg area
+   Returns:
+   Note:
+ *****************************************************************************/
+static u32 bootcfg_crc32(u32 crc, const u8 * buf, u32 len)
+{
+	return crc32(crc ^ 0xffffffffL, buf, len) ^ 0xffffffffL;
+}
+
+#define BOOTCFG_COMMIT_DELAY_MS		500
+
+/******************************************************************************
+   Function:    bootcfg_store_write
+   Purpose:     Writes data to flash device
+   Returns:
+   Note:        We now do this in deferred mode
+ *****************************************************************************/
+static void bootcfg_store_write(void)
+{
+	WARN_ON(in_interrupt());
+
+	/* keep pushing the commit into the future while updates are arriving */
+	cancel_delayed_work_sync(&work);
+
+	spin_lock(&gBootCfgLock);
+
+	gBootCfgData.dirty = 1;
+
+	spin_unlock(&gBootCfgLock);
+
+	schedule_delayed_work(&work, msecs_to_jiffies(BOOTCFG_COMMIT_DELAY_MS));
+}
+
+static struct bootcfg_store_ops *store_ops = NULL;
+
+static void bootcfg_store_write_wq(struct work_struct *unused)
+{
+	int ret;
+	ptsBootCfgData data = NULL;
+	ptsBootCfgData reread_verify_data = NULL;
+	size_t crc_size;
+	size_t write_size;
+
+	spin_lock(&gBootCfgLock);
+
+	data = kmalloc(sizeof(gBootCfgData), GFP_KERNEL);
+	reread_verify_data = kmalloc(sizeof(gBootCfgData), GFP_KERNEL);
+	if (data == NULL || reread_verify_data == NULL) {
+		printk(KERN_ERR "%s out of memory\n", __FUNCTION__);
+		goto out;
+	}
+
+	crc_size = gBootCfgData.size;
+	write_size = gBootCfgData.size + sizeof(u32);
+	memcpy(data->data, gBootCfgData.data, crc_size);
+
+	data->crc = bootcfg_crc32(0, gBootCfgData.data, crc_size);
+
+	ret = store_ops->write(store_ops, data, write_size);
+	if (ret < 0) {
+		printk(KERN_ERR "%s %s data write failed: %d\n",
+				DRV_NAME, __FUNCTION__, ret);
+		goto out;
+	}
+
+	ret = store_ops->read(store_ops, reread_verify_data, write_size);
+	if (ret < 0) {
+		printk(KERN_ERR "%s %s data read for verify failed: %d\n",
+				DRV_NAME, __FUNCTION__, ret);
+		goto out;
+	}
+
+	if (memcmp(data, reread_verify_data, write_size) != 0) {
+		printk(KERN_ERR "%s %s data write verify failed\n",
+				DRV_NAME, __FUNCTION__);
+		goto out;
+	}
+
+out:
+	kfree(data);
+	kfree(reread_verify_data);
+
+	/*
+	 * TBD: leaving this flag set in the event of a failure will cause subsequent callers to
+	 * hang.  A failure indication should be returned to the caller.
+	 */
+	gBootCfgData.dirty = 0;
+
+	spin_unlock(&gBootCfgLock);
+}
+
+/******************************************************************************
+   Function:    bootcfg_store_read
+   Purpose:     reads data from flash, checks crc and sets valid bit
+   Returns:
+   Note:        todo: unmap on exit
+ *****************************************************************************/
+
+static int empty_flash = 0;
+module_param(empty_flash, int, 0);
+#define VERSION_STR_SIZE 16
+
+static int bootcfg_store_read(void)
+{
+	int ret;
+	u32 crc;
+	int32_t idx = 0;
+	uint32_t env_size[] = {gBootCfgData.size, F64K_ENV_PARTITION_SIZE - sizeof(u32),
+				BOOT_CFG_BASE_SIZE - sizeof(u32)};
+
+	ret = store_ops->read(store_ops, &gBootCfgData, gBootCfgData.size + sizeof(u32));
+	if (ret < 0 && ret != -ENODATA) {
+		printk(KERN_ERR "%s %s data read failed: %d\n",
+				DRV_NAME, __FUNCTION__, ret);
+	} else {
+		/* Check CRC */
+		do {
+			crc = bootcfg_crc32(0, gBootCfgData.data, env_size[idx]);
+			if (crc == gBootCfgData.crc) {
+				gBootCfgData.size = env_size[idx];
+				break;
+			}
+		} while (++idx < ARRAY_SIZE(env_size));
+
+		if (crc != gBootCfgData.crc) {
+			printk(KERN_WARNING "%s %s: crc32 does not match crc: %x expect %x\n",
+				DRV_NAME, __FUNCTION__, crc, gBootCfgData.crc);
+		}
+
+		if (empty_flash) {
+			/*
+			 * Empty out data. This may be due to corruption,
+			 * decompression failure when eeprom is uninitialised,
+			 * or a legitimately empty flash... user can override.
+			 */
+			ret = 0;
+			memset(&gBootCfgData, 0, BOOT_CFG_SIZE);
+		}
+	}
+
+	return ret;
+}
+
+static int __init bootcfg_store_init(void)
+{
+	int ret;
+	size_t store_limit = 0;
+
+	store_ops = bootcfg_get_datastore();
+	if (store_ops == NULL) {
+		printk(KERN_ERR "%s: no datastore provided\n", __FUNCTION__);
+		return -ENODEV;
+	}
+
+	ret = store_ops->init(store_ops, &store_limit);
+	if (ret) {
+		printk(KERN_ERR "%s: datastore init failed for store, ret = %d\n",
+				__FUNCTION__, ret);
+		return ret;
+	}
+
+	if (store_limit) {
+		/* a restricted number of bytes for storage is available */
+		gBootCfgData.size = store_limit - sizeof(u32);
+	} else {
+		gBootCfgData.size = BOOT_CFG_DATA_SIZE;
+	}
+
+	if (gBootCfgData.size > BOOT_CFG_DATA_SIZE) {
+		gBootCfgData.size = BOOT_CFG_DATA_SIZE;
+	}
+
+	return 0;
+}
+
+static void bootcfg_store_exit(void)
+{
+	if (store_ops && store_ops->exit)
+		store_ops->exit(store_ops);
+}
+
+/******************************************************************************
+   Function:    bootcfg_get_var
+   Purpose:     Get variable from environment
+   Returns:     NULL if variable not found, pointer to storage otherwise
+   Note:        variable value copied to storage
+ *****************************************************************************/
+char *bootcfg_get_var(const char *variable, char *storage)
+{
+	char *ptr;
+	int len;
+
+	if ((len = strlen(variable)) == 0) {
+		return NULL;
+	}
+	// printk("find %s %d\n",variable,len);
+
+	ptr = (char *)gBootCfgData.data;
+	while (*ptr) {
+		if (strncmp(variable, ptr, len) == 0) {
+			// found it, copy and return string
+			strcpy(storage, &ptr[len]);
+			return storage;
+		}
+		// flush to 0, end marked by double 0
+		while (*ptr++) {
+		}
+	}
+	return NULL;
+}
+
+EXPORT_SYMBOL(bootcfg_get_var);
+
+/******************************************************************************
+   Function:    bootcfg_set_var
+   Purpose:     Set variable to environment
+   Returns:     NULL if variable not found, pointer to storage otherwise
+   Note:        variable value copied to storage
+ *****************************************************************************/
+int bootcfg_set_var(const char *var, const char *value)
+{
+	char *ptr, *next;
+	u32 len;
+	if ((len = strlen(var)) == 0) {
+		return -1;
+	}
+	// find the index
+	spin_lock(&gBootCfgVarLock);
+	ptr = (char *)gBootCfgData.data;
+
+	while (*ptr) {
+		if (strncmp(var, ptr, len) == 0) {
+			// found it, delete the entry
+			next = ptr;
+
+			// flush to next entry
+			while (*next++ != 0) {
+			}
+
+			// now copy reset of table
+			while (*next || *(next + 1)) {
+				*ptr++ = *next++;
+			}
+			*ptr++ = 0;
+			*ptr = 0;
+			break;
+		}
+		// flush to 0, end marked by double 0
+		while (*ptr++) {
+		}
+	}
+
+	// if we are deleting we are done, otherwise write the new value
+	if (value != NULL) {
+
+		ptr = (char *)gBootCfgData.data;
+		while (*ptr) {
+			// flush to end
+			while (*ptr++) {
+			}
+		}
+		while (*var) {
+			*ptr++ = *var++;
+		}
+
+		*ptr++ = '=';
+		while (*value) {
+			*ptr++ = *value++;
+		}
+		*ptr++ = 0;	// mark end with double 0
+		*ptr = 0;	// mark end with double 0    }
+	}
+	// dump for debug
+#if 0
+	ptr = gBootCfgData.data;
+	while (*ptr) {
+		printk("%s\n", ptr);
+		while (*ptr++) {
+		}
+	}
+#endif
+	spin_unlock(&gBootCfgVarLock);
+	return 0;
+}
+
+EXPORT_SYMBOL(bootcfg_set_var);
+
+/******************************************************************************
+	Function:   bootcfg_get_end
+	Purpose:	Get end of data section
+ 	Returns:	0 if successful
+  	Note:  	    if size is zero, the proc entry is created but
+  	            no data is allocated until the first write
+ *****************************************************************************/
+static u32 bootcfg_get_end(void)
+{
+	char tmpBuf[256];
+	char *dataend;
+	u32 addr;
+	if ((dataend = bootcfg_get_var("config_data_end", tmpBuf)) == NULL) {
+		printk("bootcfg: first entry in system\n");
+		addr = BOOT_CFG_DEF_START;
+	} else {
+		if (sscanf(dataend, "=0x%x", &addr) != 1) {
+			return 0;
+		}
+	}
+	return addr;
+}
+
+/******************************************************************************
+	Function:   bootcfg_delete
+	Purpose:	delete file
+ 	Returns:	0 if successful
+  	Note:
+ *****************************************************************************/
+int bootcfg_delete(const char *token)
+{
+	char tmpBuf[256];
+	ptsBootCfgFile prev = NULL;
+	ptsBootCfgFile next = gFiles;
+
+	spin_lock(&gBootCfgLock);
+
+	// figure out our address and len
+	if (bootcfg_get_var(token, tmpBuf) == NULL) {
+		printk("bootcfg error: %s filename not found\n", token);
+		return -1;
+	}
+	bootcfg_set_var(token, NULL);
+
+	// unmount the file
+	while (next != NULL) {
+		if (strcmp(next->entry, token) == 0) {
+			remove_proc_entry(next->entry, bootcfg_dir);
+
+			// fix links
+			if (prev == NULL) {
+				gFiles = next->next;
+			} else {
+				prev->next = next->next;
+			}
+			kfree(next);
+			break;
+		}
+		prev = next;
+		next = next->next;
+		// error check for null here without finding entry?
+	}
+	spin_unlock(&gBootCfgLock);
+
+	bootcfg_defragment();
+	bootcfg_store_write();
+	return 0;
+}
+
+EXPORT_SYMBOL(bootcfg_delete);
+
+/******************************************************************************
+	Function:   bootcfg_create
+	Purpose:	create file
+ 	Returns:	0 if successful
+  	Note:  	    if size is zero, the proc entry is created but
+  	            no data is allocated until the first write
+ *****************************************************************************/
+int bootcfg_create(const char *filename, u32 size)
+{
+	char tmpBuf[256];
+	u32 addr = 0;
+	ptsBootCfgFile ptr, next;
+	spin_lock(&gBootCfgLock);
+	//printk("create %s %x\n",filename,size);
+	if (bootcfg_get_var(filename, tmpBuf) != NULL) {
+		printk("bootcfg error: %s filename already taken\n", filename);
+		spin_unlock(&gBootCfgLock);
+		return -EFAULT;
+	}
+
+	if (size != 0) {
+		// need exclusive access now so nobody messes up our allocation process
+		if ((addr = bootcfg_get_end()) == 0) {
+			printk("bootcfg error - getting cfg address\n");
+			spin_unlock(&gBootCfgLock);
+			return -EFAULT;
+		}
+		// create our entry
+		if ((size + addr) > gBootCfgData.size) {
+			printk("bootcfg error - len out of range\n");
+			spin_unlock(&gBootCfgLock);
+			return -EFAULT;
+		}
+		// set end marker
+		sprintf(tmpBuf, "0x%08x", addr + size);
+		bootcfg_set_var("config_data_end", tmpBuf);
+	}
+
+	sprintf(tmpBuf, "cfg 0x%08x 0x%08x", addr, size);
+	bootcfg_set_var(filename, tmpBuf);
+	spin_unlock(&gBootCfgLock);
+	// make it so - someone else could come in here, but the
+	// flash data structure is intact, so should be ok
+	bootcfg_store_write();
+	ptr = bootcfg_mount(filename, addr, size);
+
+	// add our file into list
+	spin_lock(&gBootCfgLock);
+	if (gFiles == NULL) {
+		gFiles = ptr;
+	} else {
+		next = gFiles;
+		while (next->next != NULL) {
+			next = next->next;
+		}
+		next->next = ptr;
+	}
+	spin_unlock(&gBootCfgLock);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(bootcfg_create);
+
+/******************************************************************************
+	Function:   bootcfg_proc_write_env
+	Purpose:	write data to boot environment
+	Returns:	Number of bytes written
+	Note:
+ *****************************************************************************/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static ssize_t
+bootcfg_proc_write_env(struct file *file, const char __user *buffer,
+		       size_t count, loff_t *ppos)
+#else
+static int
+bootcfg_proc_write_env(struct file *file, const char *buffer,
+			unsigned long count, void *data)
+#endif
+{
+	/* get buffer size */
+	char *procfs_buffer;
+	char tmpBuf[256];
+	char *ptr, *token, *arg;
+	u32 len;
+
+	if ((procfs_buffer = kmalloc(count + 1, GFP_KERNEL)) == NULL) {
+		printk("bootcfg error: out of memory\n");
+		return -ENOMEM;
+	}
+
+	/* write data to the buffer */
+	if (copy_from_user(procfs_buffer, buffer, count)) {
+		goto bail;
+	}
+
+	ptr = (char *)procfs_buffer;
+	ptr[count] = '\0';
+	token = strsep(&ptr, " \n");
+
+	// create a new file
+	if (strcmp(token, "create") == 0) {
+		// figure out our address and len
+		token = strsep(&ptr, " ");
+		if (bootcfg_get_var(token, tmpBuf) != NULL) {
+			printk("bootcfg error: %s filename already taken\n",
+			       token);
+			goto bail;
+		}
+		arg = strsep(&ptr, " \n");
+		if (arg == NULL) {
+			printk("bootcfg error: must supply max size\n");
+			goto bail;
+		}
+		sscanf(arg, "%x", &len);
+
+		// create the file
+		bootcfg_create(token, len);
+	} else
+		// delete a file
+	if (strcmp(token, "delete") == 0) {
+		// get our filename
+		token = strsep(&ptr, " \n");
+		if (token != NULL) {
+			bootcfg_delete(token);
+		}
+	} else {
+
+		// default case, we are just setting a variable
+		if (*ptr != '\0') {
+			arg = strsep(&ptr, "\n");
+		} else {
+			arg = NULL;
+		}
+		bootcfg_set_var(token, arg);
+		bootcfg_store_write();
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	*ppos += count;
+#endif
+bail:
+	kfree(procfs_buffer);
+	return count;
+}
+
+static void bootcfg_setFile(char *file, u32 addr, u32 len)
+{
+	char tmpBuf[256];
+
+	// update our entry and end of memory marker
+	sprintf(tmpBuf, "cfg 0x%08x 0x%08x", addr, len);
+	bootcfg_set_var(file, tmpBuf);
+
+	sprintf(tmpBuf, "0x%08x", addr + len);
+	bootcfg_set_var("config_data_end", tmpBuf);
+}
+
+/******************************************************************************
+	Function:   bootcfg_proc_write
+	Purpose:	get data from proc file
+ 	Returns:	Number of bytes written, updates file->f_pos
+  	Note:
+ *****************************************************************************/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static ssize_t
+bootcfg_proc_write(struct file *file, const char __user *buffer,
+		   size_t count, loff_t *ppos)
+{
+	char *procfs_buffer;
+	ptsBootCfgFile id = (ptsBootCfgFile) PDE_DATA(file_inode(file));
+#else
+static int
+bootcfg_proc_write(struct file *file, const char *buffer,
+		   unsigned long count, void *data)
+{
+	char *procfs_buffer;
+	ptsBootCfgFile id = (ptsBootCfgFile) data;
+#endif
+
+	if ((procfs_buffer = kmalloc(count, GFP_KERNEL)) == NULL) {
+		printk("bootcfg error: out of memory\n");
+		return -ENOMEM;
+	}
+
+	/* write data to the buffer */
+	if (copy_from_user(procfs_buffer, buffer, count)) {
+		kfree(procfs_buffer);
+		return -EFAULT;
+	}
+	/* write to bootcfg data file */
+	//printk("bootcfg: write %s (%x) %d %d [%x]\n",id->entry,id->addr,(int)count,(int)file->f_pos,procfs_buffer[0]);
+
+	spin_lock(&gBootCfgLock);
+	// do we need to increase our size?
+	if ((file->f_pos + count) > id->len) {
+		int len = file->f_pos + count;
+		u32 addr = bootcfg_get_end();
+
+		if (addr + count > gBootCfgData.size) {
+			printk("bootcfg error1: file too large\n");
+			spin_unlock(&gBootCfgLock);
+			kfree(procfs_buffer);
+			return -ENOSPC;
+		}
+
+		if (id->addr == 0) {
+			/* just need to allocate? */
+			id->addr = addr;
+			bootcfg_setFile(id->entry, addr, len);
+		} else if ((id->addr + id->len) == addr) {
+			/* just need to increase size */
+			bootcfg_setFile(id->entry, id->addr, len);
+		} else {
+			u8 *orig;
+			// make a copy of our data
+			if ((orig = kmalloc(id->len, GFP_KERNEL)) == NULL) {
+				goto bail;
+			}
+			memcpy(orig, &gBootCfgData.data[id->addr - 4], id->len);
+
+			// remove our current file and defragment
+			bootcfg_set_var(id->entry, NULL);
+			bootcfg_defragment();
+
+			// now add our new data
+			addr = bootcfg_get_end();
+			//printk("bootcfg: moving %s (%x->%x) %d %d\n",id->entry,id->addr,addr,len,(int)file->f_pos);
+
+			// make sure we have room for data
+			if ((addr + len) > gBootCfgData.size) {
+				printk("bootcfg error2: file too large\n");
+
+				// revert to flash data and return
+				bootcfg_store_read();
+				kfree(orig);
+				goto bail;
+			}
+			bootcfg_setFile(id->entry, addr, len);
+
+			// copy our original data
+			memcpy((char *)&gBootCfgData.data[addr - 4], orig,
+			       id->len);
+
+			// update our fs info
+			id->addr = addr;
+			kfree(orig);
+		}
+		id->len = len;
+	}
+	// offset by 4 to compensate for crc
+	memcpy((char *)&gBootCfgData.data[file->f_pos + id->addr - 4],
+	       procfs_buffer, count);
+	bootcfg_store_write();
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	*ppos += count;
+	proc_set_size(id->proc, file->f_pos + count);
+#else
+	id->proc->size = file->f_pos + count;
+	file->f_pos += count;
+#endif
+
+ bail:
+	spin_unlock(&gBootCfgLock);
+	kfree(procfs_buffer);
+	return count;
+}
+
+/******************************************************************************
+	Function:   bootcfg_get_args
+	Purpose:	parse arguments from cfg file entry
+	Returns:	filename copied to buffer, addr and len scanned and set
+	Note:
+ *****************************************************************************/
+static void bootcfg_get_args(char *ptr, char *buffer, u32 * addr, u32 * len)
+{
+	while (*ptr != '=') {
+		*buffer++ = *ptr++;
+	}
+	*buffer = 0;
+
+	// figure out our addr and len from entry
+	sscanf(ptr, "=cfg 0x%x 0x%x", addr, len);
+}
+
+/******************************************************************************
+	Function:   bootcfg_defragment
+	Purpose:	defragment bootcfg structure
+ 	Returns:
+  	Note:  	    avoid some error checking by assuming data integrity.
+  	            hopefully this is a safe assumption
+ *****************************************************************************/
+static int bootcfg_defragment(void)
+{
+	char *ptr;
+	u8 *data;
+	char tmpBuf[64];
+	u32 end = BOOT_CFG_DEF_START;
+	ptsBootCfgFile next;
+
+	// first make a copy of the entire buffer
+	if ((data = (u8 *) kmalloc(BOOT_CFG_DATA_SIZE, GFP_KERNEL)) == NULL) {
+		return -ENOMEM;
+	}
+
+	spin_lock(&gDefragLock);
+	memcpy(data, gBootCfgData.data, BOOT_CFG_DATA_SIZE);
+
+	// loop through bootcfg file entries
+	ptr = (char *)data;
+	while (*ptr) {
+		if (strstr(ptr, "=cfg") != 0) {
+			u32 addr, len;
+			char filename[256];
+
+			// get current addr and len
+			bootcfg_get_args(ptr, filename, &addr, &len);
+
+			// copy data to new location
+			memcpy(&gBootCfgData.data[end - 4], &data[addr - 4],
+			       len);
+
+			//printk("bootcfg defrag: moving %s from %x to %x\n",
+			//    filename,addr,end);
+
+			// save entry in env
+			sprintf(tmpBuf, "cfg 0x%08x 0x%08x", end, len);
+			bootcfg_set_var(filename, tmpBuf);
+
+			// update id
+			next = gFiles;
+			while (next != NULL) {
+				if (strcmp(next->entry, filename) == 0) {
+					next->addr = end;
+					next->len = len;
+					break;
+				}
+				next = next->next;
+				// error check for null here without finding entry?
+			}
+			if (next == NULL) {
+				// not much we can do here other than output a warning
+				// should never happen - this is also engineering mode
+				// only.  Production should never change a file size
+				// or anything more than an ip or mac address, etc.
+				printk("Bootcfg error: file information not found\n");
+			}
+			end += len;
+		}
+		// flush to 0, end marked by double 0
+		while (*ptr++) {
+		}
+	}
+
+	// update end of memory pointer
+	sprintf(tmpBuf, "0x%08x", end);
+	bootcfg_set_var("config_data_end", tmpBuf);
+
+	spin_unlock(&gDefragLock);
+	kfree(data);
+	return 0;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+/******************************************************************************
+	Function:	bootcfg_proc_read
+	Purpose:	read data from bootcfg flash area
+	Returns:
+	Note:	Debug support file - displays register contents
+ *****************************************************************************/
+static ssize_t
+bootcfg_proc_read(struct file *file, char __user *buffer,
+		  size_t buffer_length, loff_t *ppos)
+{
+	ssize_t len = 0;
+	ptsBootCfgFile id = (ptsBootCfgFile) PDE_DATA(file_inode(file));
+
+	//printk("Proc read offset:%x len:%x\n",offset,buffer_length);
+	if (id == NULL) {
+		// this is env read
+		char *ptr = (char *)&gBootCfgData.data[0];
+		size_t line_len = 0;
+		if (*ppos > 0) {
+			// we read on one shot
+			return 0;
+		}
+
+		while (*ptr) {
+			line_len = strlen(ptr);
+			if (copy_to_user(&buffer[len], ptr, line_len)) {
+				return -EFAULT;
+			}
+			len += line_len;
+			if (copy_to_user(&buffer[len++], "\n", 1)) {
+				return -EFAULT;
+			}
+			while (*ptr++) {
+			}
+		}
+	} else {
+		// read from file
+		//printk("reading %s (%x) %d %d\n",id->entry,id->addr,(int)offset,buffer_length);
+		if (*ppos >= id->len) {
+			// end of file
+			return 0;
+		}
+		if (buffer_length > id->len) {
+			len = id->len;
+			//printk("bootcfg error: len:%d > id->len:%d\n",buffer_length,id->len);
+		} else if ((buffer_length + *ppos) > id->len) {
+			len = id->len - *ppos;
+		} else {
+			len = buffer_length;
+		}
+		// compensate for crc
+		copy_to_user(buffer,
+			     (char *)&gBootCfgData.data[id->addr - 4 + *ppos], len);
+		//printk("data: %d %x %x\n",offset,buffer[offset],&gBootCfgData.data[id->addr-4+offset]);
+	}
+
+	*ppos += len;
+	return len;
+}
+
+/******************************************************************************
+	Function:   boardparam_proc_read
+	Purpose:	read board paramaters
+	Returns:
+	Note:
+ *****************************************************************************/
+static ssize_t
+boardparam_proc_read(struct file *file, char __user *buffer,
+		     size_t buffer_length, loff_t *ppos)
+{
+	ssize_t len = 0;
+	char *procfs_buffer;
+
+	if ((procfs_buffer = kmalloc(buffer_length, GFP_KERNEL)) == NULL) {
+		printk("bootcfg error: out of memory\n");
+		return -ENOMEM;
+	}
+
+	if ((len = get_all_board_params(procfs_buffer)) < 0) {
+		printk("Failed to generate output\n");
+		len = 0;
+		goto bail;
+	}
+	len = simple_read_from_buffer(buffer, buffer_length, ppos,
+				      procfs_buffer, len);
+bail:
+	kfree(procfs_buffer);
+	return len;
+}
+
+static void
+bootcfg_pending_proc_wait(void)
+{
+	int writes_pending = 1;
+
+	while (writes_pending) {
+		spin_lock(&gBootCfgLock);
+		writes_pending = gBootCfgData.dirty;
+		spin_unlock(&gBootCfgLock);
+
+		if (writes_pending) {
+			msleep(BOOTCFG_COMMIT_DELAY_MS + 1);
+		}
+	}
+}
+
+static ssize_t
+bootcfg_pending_proc_read(struct file *file, char __user *buffer,
+			  size_t buffer_length, loff_t *ppos)
+{
+	bootcfg_pending_proc_wait();
+
+	return simple_read_from_buffer(buffer, buffer_length, ppos,
+				       DRV_NAME BOOTCFG_PENDING_END,
+				       strlen(DRV_NAME) + strlen(BOOTCFG_PENDING_END));
+}
+
+static const struct file_operations fops = {
+	.read = bootcfg_proc_read,
+	.write = bootcfg_proc_write,
+};
+
+static const struct file_operations fops_env = {
+	.read = bootcfg_proc_read,
+	.write = bootcfg_proc_write_env,
+};
+
+static const struct file_operations fops_boardparam = {
+	.read = boardparam_proc_read,
+};
+
+static const struct file_operations fops_pending = {
+	.read = bootcfg_pending_proc_read,
+};
+
+/******************************************************************************
+   Function:    bootcfg_mount
+   Purpose:     Mount /proc/bootcfg file
+   Returns:     pointer to device file
+   Note:    
+ *****************************************************************************/
+static ptsBootCfgFile bootcfg_mount(const char *filename, u32 addr, u32 len)
+{
+	ptsBootCfgFile id;
+	// found a file, create an entry
+	if ((id =
+	     (ptsBootCfgFile) kmalloc(sizeof(tsBootCfgFile),
+				      GFP_KERNEL)) == NULL) {
+		printk("bootcfg: out of memory\n");
+		return NULL;
+	}
+	strcpy(id->entry, filename);
+	id->addr = addr;
+	id->len = len;
+	id->next = NULL;
+
+	//printk("mounting /proc/bootcfg/%s, %x %x\n",id->entry,id->addr,id->len);
+
+	if ((id->proc =
+	     proc_create_data(id->entry, 0x644, bootcfg_dir,
+			      &fops, id)) == NULL) {
+		kfree(id);
+		printk("unable to create /proc/bootcfg/%s\n", id->entry);
+		return NULL;
+	}
+	proc_set_size(id->proc, len);
+	return id;
+}
+#else
+/******************************************************************************
+	Function:	bootcfg_proc_read
+	Purpose:	read data from bootcfg flash area
+	Returns:
+	Note:	Debug support file - displays register contents
+ *****************************************************************************/
+static int
+bootcfg_proc_read(char *buffer,
+		  char **buffer_location, off_t offset,
+		  int buffer_length, int *eof, void *data)
+{
+	int len = 0;
+	//printk("Proc read offset:%x len:%x\n",offset,buffer_length);
+	if (data == NULL) {
+		// this is env read
+		char *ptr = (char *)&gBootCfgData.data[0];
+
+		// we read on one shot
+		if (offset > 0) {
+			*eof = 1;
+			return 0;
+		}
+
+		while (*ptr) {
+			len += sprintf(&buffer[len], "%s\n", ptr);
+			while (*ptr++) {
+			}
+		}
+
+	} else {
+		// read from file
+		ptsBootCfgFile id = (ptsBootCfgFile) data;
+		//printk("reading %s (%x) %d %d\n",id->entry,id->addr,(int)offset,buffer_length);
+		if (offset >= id->len) {
+			// end of file
+			*eof = 1;
+			return 0;
+		}
+		if (buffer_length > id->len) {
+			len = id->len;
+			*eof = 1;
+			//printk("bootcfg error: len:%d > id->len:%d\n",buffer_length,id->len);
+		} else if ((buffer_length + offset) > id->len) {
+			len = id->len - offset;
+			*eof = 1;
+		} else {
+			len = buffer_length;
+		}
+
+		if (len > PAGE_SIZE) {
+			/*
+			 * procfs has a limitation of one physical page per copy
+			 * any buffer that exceeds the limit has to be split
+			 */
+			len = PAGE_SIZE;
+			*eof = 0;
+		}
+
+		/*
+		 * fs/proc/generic.c, L98: the value returned in *buffer_location
+		 * has to be smaller than (unsigned long)buffer
+		 */
+		BUG_ON(PAGE_SIZE >= (unsigned long)buffer);
+		*(unsigned long *)buffer_location = len;
+
+		// compensate for crc
+		memcpy(buffer,
+			(char *)&gBootCfgData.data[id->addr - 4 + offset], len);
+		//printk("data: %d %x %x\n",offset,buffer[offset],&gBootCfgData.data[id->addr-4+offset]);
+
+		return len;
+	}
+	return (len + offset);
+}
+
+/******************************************************************************
+	Function:   boardparam_proc_read
+	Purpose:	read board paramaters
+	Returns:
+	Note:
+ *****************************************************************************/
+static int
+boardparam_proc_read(char *buffer,
+		  char **buffer_location, off_t offset,
+		  int buffer_length, int *eof, void *data)
+{
+	int len = 0;
+
+	if((len = get_all_board_params(buffer)) <0 ){
+		printk("Failed to generate output\n");
+		return(0);
+	}
+
+	return len;
+}
+
+static void
+bootcfg_pending_proc_wait(void)
+{
+	int writes_pending = 1;
+
+	while (writes_pending) {
+		spin_lock(&gBootCfgLock);
+		writes_pending = gBootCfgData.dirty;
+		spin_unlock(&gBootCfgLock);
+
+		if (writes_pending) {
+			msleep(BOOTCFG_COMMIT_DELAY_MS + 1);
+		}
+	}
+}
+
+static int
+bootcfg_pending_proc_read(char *buffer,
+		  char **buffer_location, off_t offset,
+		  int buffer_length, int *eof, void *data)
+{
+	int len = 0;
+
+	bootcfg_pending_proc_wait();
+
+	len += snprintf(&buffer[len], buffer_length - len, "%s writes complete\n", DRV_NAME);
+
+	*eof = 1;
+
+	return len;
+}
+
+/******************************************************************************
+	Purpose:	override linux version to correct not suitable
+			here write behaviour (position is not updated after
+			write in original version)
+ *****************************************************************************/
+static ssize_t bootcfg_proc_file_write(struct file *file,
+				       const char __user * buffer, size_t count,
+				       loff_t * ppos);
+static ssize_t bootcfg_proc_file_read(struct file *file, char __user * buf,
+				      size_t nbytes, loff_t * ppos);
+static loff_t bootcfg_proc_file_lseek(struct file *file, loff_t offset,
+				      int orig);
+
+struct bootcfg_file_operations {
+	const struct file_operations wrapper;
+	const struct file_operations *internal;
+};
+
+static struct bootcfg_file_operations bootcfg_proc_dir_operations = {
+	.wrapper = {
+		    .read = bootcfg_proc_file_read,
+		    .write = bootcfg_proc_file_write,
+		    .llseek = bootcfg_proc_file_lseek,
+		    },
+	.internal = NULL,
+};
+
+static ssize_t
+bootcfg_proc_file_write(struct file *file, const char __user * buffer,
+			size_t count, loff_t * ppos)
+{
+	ssize_t ret;
+	if (!bootcfg_proc_dir_operations.internal) {
+		panic("No function implementation\n");
+	}
+	ret =
+	    bootcfg_proc_dir_operations.internal->write(file, buffer, count,
+							ppos);
+	if (ret > 0) {
+		*ppos += ret;
+	}
+	return ret;
+}
+
+static ssize_t
+bootcfg_proc_file_read(struct file *file, char __user * buf, size_t nbytes,
+		       loff_t * ppos)
+{
+	if (!bootcfg_proc_dir_operations.internal) {
+		panic("No function implementation\n");
+	}
+	return bootcfg_proc_dir_operations.internal->read(file, buf, nbytes,
+							  ppos);
+}
+
+static loff_t
+bootcfg_proc_file_lseek(struct file *file, loff_t offset, int orig)
+{
+	if (!bootcfg_proc_dir_operations.internal) {
+		panic("No function implementation\n");
+	}
+	return bootcfg_proc_dir_operations.internal->llseek(file, offset, orig);
+}
+
+static void bootcfg_assign_file_fops(ptsBootCfgFile id)
+{
+	if (!id || !id->proc || !id->proc->proc_fops) {
+		panic("Bad pointer.\n");
+	}
+
+	if (!bootcfg_proc_dir_operations.internal) {
+		bootcfg_proc_dir_operations.internal = id->proc->proc_fops;
+	}
+
+	if (bootcfg_proc_dir_operations.internal != id->proc->proc_fops) {
+		panic("Impossible\n");
+	}
+
+	id->proc->proc_fops = &(bootcfg_proc_dir_operations.wrapper);
+}
+
+/******************************************************************************
+   Function:    bootcfg_mount
+   Purpose:     Mount /proc/bootcfg file
+   Returns:     pointer to device file
+   Note:
+ *****************************************************************************/
+static ptsBootCfgFile bootcfg_mount(const char *filename, u32 addr, u32 len)
+{
+	ptsBootCfgFile id;
+	// found a file, create an entry
+	if ((id =
+	     (ptsBootCfgFile) kmalloc(sizeof(tsBootCfgFile),
+				      GFP_KERNEL)) == NULL) {
+		printk("bootcfg: out of memory\n");
+		return NULL;
+	}
+	strcpy(id->entry, filename);
+	id->addr = addr;
+	id->len = len;
+	id->next = NULL;
+
+	//printk("mounting /proc/bootcfg/%s, %x %x\n",id->entry,id->addr,id->len);
+
+	if ((id->proc =
+	     create_proc_entry(id->entry, 0x644, bootcfg_dir)) == NULL) {
+		remove_proc_entry("bootcfg", bootcfg_dir);
+		kfree(id);
+		printk("unable to create /proc/bootcfg\n");
+		return NULL;
+	}
+	id->proc->data = id;
+	id->proc->read_proc = bootcfg_proc_read;
+	id->proc->write_proc = bootcfg_proc_write;
+	id->proc->mode = S_IFREG | S_IRUGO;
+	id->proc->uid = 0;
+	id->proc->gid = 0;
+	id->proc->size = len;
+	bootcfg_assign_file_fops(id);
+	return id;
+}
+#endif
+
+/******************************************************************************
+   Function:    bootcfg_init
+   Purpose:     Set up crctable, and initialize module
+   Returns:
+   Note:
+ *****************************************************************************/
+static int __init bootcfg_init(void)
+{
+	char *next;
+	int err;
+	ptsBootCfgFile nextFile = gFiles;
+
+	spin_lock_init(&gBootCfgLock);
+	spin_lock_init(&gDefragLock);
+	spin_lock_init(&gFlashLock);
+	spin_lock_init(&gBootCfgVarLock);
+
+	err = bootcfg_store_init();
+	if (err) {
+		goto out;
+	}
+
+	// read the bootcfg data
+	err = bootcfg_store_read();
+	if (err != 0) {
+		goto out_exit_store;
+	}
+	gBootCfgData.isValid = 1;
+	gBootCfgData.dirty = 0;
+
+	// create a proc entry
+	bootcfg_dir = proc_mkdir("bootcfg", NULL);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if ((bootcfg_proc = proc_create_data("env", 0x644, bootcfg_dir, &fops_env, NULL)) == NULL) {
+		printk(KERN_ERR "unable to create /proc/bootcfg/%s\n", "env");
+		goto out_exit_env;
+	}
+	proc_set_size(bootcfg_proc, 0x1000);
+
+	if ((boardparam_proc = proc_create_data("boardparam", 0x444, bootcfg_dir, &fops_boardparam, NULL)) == NULL) {
+		printk(KERN_ERR "unable to create /proc/bootcfg/%s\n", "boardparam");
+		goto out_exit_boardparam;
+	}
+
+	if ((pending_proc = proc_create_data("pending", 0x444, bootcfg_dir, &fops_pending, NULL)) == NULL) {
+		printk(KERN_ERR "unable to create /proc/bootcfg/%s\n", "pending");
+		goto out_exit_pending;
+	}
+#else
+	if ((bootcfg_proc = create_proc_entry("env", 0x644, bootcfg_dir)) == NULL) {
+		printk(KERN_ERR "unable to create /proc/bootcfg/%s\n", "env");
+		goto out_exit_env;
+	}
+	bootcfg_proc->read_proc = bootcfg_proc_read;
+	bootcfg_proc->write_proc = bootcfg_proc_write_env;
+	bootcfg_proc->mode = S_IFREG | S_IRUGO;
+	bootcfg_proc->uid = 0;
+	bootcfg_proc->gid = 0;
+	bootcfg_proc->size = 0x1000;
+	bootcfg_proc->data = NULL;
+
+	if ((boardparam_proc = create_proc_entry("boardparam", 0x444, bootcfg_dir)) == NULL) {
+		printk(KERN_ERR "unable to create /proc/bootcfg/%s\n", "boardparam");
+		goto out_exit_boardparam;
+	}
+	boardparam_proc->read_proc = boardparam_proc_read;
+	boardparam_proc->write_proc = NULL;
+	boardparam_proc->mode = S_IFREG | S_IRUGO;
+	boardparam_proc->uid = 0;
+	boardparam_proc->gid = 0;
+	boardparam_proc->data = NULL;
+
+	if ((pending_proc = create_proc_entry("pending", 0x444, bootcfg_dir)) == NULL) {
+		printk(KERN_ERR "unable to create /proc/bootcfg/%s\n", "pending");
+		goto out_exit_pending;
+	}
+	pending_proc->read_proc = bootcfg_pending_proc_read;
+	pending_proc->write_proc = NULL;
+	pending_proc->mode = S_IFREG | S_IRUGO;
+	pending_proc->uid = 0;
+	pending_proc->gid = 0;
+	pending_proc->data = NULL;
+#endif
+
+	// now look for files in bootcfg
+	next = (char *)gBootCfgData.data;
+	while (*next) {
+		if (strstr(next, "=cfg") != 0) {
+			u32 addr, len;
+			char buffer[256];
+			bootcfg_get_args(next, buffer, &addr, &len);
+			if (gFiles == NULL) {
+				gFiles = bootcfg_mount(buffer, addr, len);
+			} else {
+				nextFile = gFiles;
+				while (nextFile->next != NULL) {
+					nextFile = nextFile->next;
+				}
+				nextFile->next =
+				    bootcfg_mount(buffer, addr, len);
+			}
+
+		}
+		// flush to next entry
+		while (*next++) {
+		}
+	}
+
+	return 0;
+
+out_exit_pending:
+	remove_proc_entry("boardparam", bootcfg_dir);
+out_exit_boardparam:
+	remove_proc_entry("env", bootcfg_dir);
+out_exit_env:
+	err = -ENOMEM;
+	remove_proc_entry("bootcfg", bootcfg_dir);
+out_exit_store:
+	bootcfg_store_exit();
+out:
+	return err;
+}
+
+/******************************************************************************
+   Function:    bootcfg_exit
+   Purpose:     exit our driver
+   Returns:
+   Note:
+ *****************************************************************************/
+static void __exit bootcfg_exit(void)
+{
+	ptsBootCfgFile nextFile = gFiles;
+	ptsBootCfgFile prev;
+
+	bootcfg_store_exit();
+
+	while (nextFile != NULL) {
+		remove_proc_entry(nextFile->entry, bootcfg_dir);
+		prev = nextFile;
+		nextFile = nextFile->next;
+		kfree(prev);
+	}
+	remove_proc_entry("env", bootcfg_dir);
+	remove_proc_entry("boardparam", bootcfg_dir);
+	remove_proc_entry("pending", bootcfg_dir);
+	remove_proc_entry("bootcfg", NULL);
+
+	bootcfg_pending_proc_wait();
+
+	printk(KERN_ALERT "bootcfg Driver Terminated\n");
+}
+
+/******************************************************************************
+   Function:Linux driver entries/declarations
+ *****************************************************************************/
+module_init(bootcfg_init);
+module_exit(bootcfg_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/qtn/bootcfg/bootcfg_drv.h b/drivers/qtn/bootcfg/bootcfg_drv.h
new file mode 100644
index 0000000..a2e309f
--- /dev/null
+++ b/drivers/qtn/bootcfg/bootcfg_drv.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * Create a wrapper around other bootcfg datastores which compresses on write
+ * and decompresses on read.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __BOOTCFG_DRV_H__
+#define __BOOTCFG_DRV_H__
+
+#include <linux/types.h>
+#include <linux/version.h>
+
+struct bootcfg_store_ops {
+	int (*read)(struct bootcfg_store_ops *ops, void *buf, const size_t bytes);
+	int (*write)(struct bootcfg_store_ops *ops, const void *buf, const size_t bytes);
+	int (*init)(struct bootcfg_store_ops *ops, size_t *store_limit);
+	void (*exit)(struct bootcfg_store_ops *ops);
+};
+
+struct bootcfg_store_ops *bootcfg_get_datastore(void);
+struct bootcfg_store_ops *bootcfg_compression_adapter(struct bootcfg_store_ops *raw_accessor);
+
+#endif	/* __BOOTCFG_DRV_H__ */
+
diff --git a/drivers/qtn/bootcfg/bootcfg_eeprom.c b/drivers/qtn/bootcfg/bootcfg_eeprom.c
new file mode 100644
index 0000000..4901f91
--- /dev/null
+++ b/drivers/qtn/bootcfg/bootcfg_eeprom.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * Bootcfg storage with AT24C64D EEPROMs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include "bootcfg_drv.h"
+
+#include <qtn/bootcfg.h>
+#include <common/ruby_partitions.h>
+#include <common/ruby_version.h>
+
+#include <linux/i2c.h>
+#include <linux/sched.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <linux/nvmem-provider.h>
+#include <linux/platform_data/at24.h>
+#include <linux/moduleparam.h>
+#else
+#include <linux/i2c/at24.h>
+#endif
+#include <linux/sched.h>
+
+#define I2C_EEPROM_ADAPTER_NUM	0x0
+#define I2C_EEPROM_DEVICE_ADDR	0x50	/* first bits are 1010, then 3 chip select pins; 0x50 - 0x57 */
+
+#define I2C_EEPROM_SIZE_BITS	(64 * 1024)
+#define I2C_EEPROM_NBBY		8
+#define I2C_EEPROM_SIZE_BYTES	(I2C_EEPROM_SIZE_BITS / I2C_EEPROM_NBBY)
+#define I2C_EEPROM_PAGE_BYTES	32
+
+static long i2c_devaddr = I2C_EEPROM_DEVICE_ADDR;
+
+module_param(i2c_devaddr, long, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+MODULE_PARM_DESC(i2c_devaddr, "I2C device address of EEPROM");
+
+
+static struct i2c_client *dev_client = NULL;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static struct nvmem_device *eeprom_nvmem_dev = NULL;
+static spinlock_t g_eeprom_lock;
+
+static void at24_setup(struct nvmem_device *nvmem, void *context)
+{
+	eeprom_nvmem_dev = nvmem;
+}
+
+#else
+static struct memory_accessor *eeprom_mem_acc = NULL;
+static spinlock_t g_eeprom_lock;
+
+static void at24_setup(struct memory_accessor *mem_acc, void *context)
+{
+	eeprom_mem_acc = mem_acc;
+}
+#endif
+
+static struct at24_platform_data at24c64d_plat = {
+	.byte_len	= I2C_EEPROM_SIZE_BYTES,
+	.page_size	= I2C_EEPROM_PAGE_BYTES,
+	.flags		= AT24_FLAG_ADDR16,
+	.setup		= &at24_setup,
+};
+
+static struct i2c_board_info eeprom_info = {
+	I2C_BOARD_INFO("at24", I2C_EEPROM_DEVICE_ADDR),
+	.platform_data = &at24c64d_plat,
+};
+
+int __init bootcfg_eeprom_init(struct bootcfg_store_ops *ops, size_t *store_limit)
+{
+	spin_lock_init(&g_eeprom_lock);
+
+	eeprom_info.addr = i2c_devaddr;
+
+	printk("%s i2c_devaddr : 0x%x\n", __FUNCTION__, eeprom_info.addr);
+
+	dev_client = i2c_new_device(i2c_get_adapter(I2C_EEPROM_ADAPTER_NUM), &eeprom_info);
+	if (!dev_client) {
+		printk(KERN_ERR "%s: error instantiating i2c device\n",
+				__FUNCTION__);
+		goto device_client_fail;
+	}
+
+	*store_limit = I2C_EEPROM_SIZE_BYTES;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if (eeprom_nvmem_dev == NULL) {
+		return -ENODEV;
+	}
+#else
+	if (eeprom_mem_acc == NULL) {
+		return -ENODEV;
+	}
+#endif
+	return 0;
+
+device_client_fail:
+	return -1;
+}
+
+void __exit bootcfg_eeprom_exit(struct bootcfg_store_ops *ops)
+{
+	i2c_unregister_device(dev_client);
+}
+
+static int bootcfg_eeprom_read(struct bootcfg_store_ops *ops, void* buf, const size_t bytes)
+{
+	int ret;
+
+	spin_lock(&g_eeprom_lock);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	ret = nvmem_device_read(eeprom_nvmem_dev, 0, bytes, buf);
+#else
+	ret = eeprom_mem_acc->read(eeprom_mem_acc, buf, 0, bytes);
+#endif
+	spin_unlock(&g_eeprom_lock);
+
+	if (ret == bytes) {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static int bootcfg_eeprom_write(struct bootcfg_store_ops *ops, const void* buf, const size_t bytes)
+{
+	int ret;
+
+	spin_lock(&g_eeprom_lock);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	ret = nvmem_device_write(eeprom_nvmem_dev, 0, bytes, buf);
+#else
+	ret = eeprom_mem_acc->write(eeprom_mem_acc, buf, 0, bytes);
+#endif
+	spin_unlock(&g_eeprom_lock);
+
+	if (ret == bytes) {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+
+static struct bootcfg_store_ops eeprom_store_ops = {
+	.read	= bootcfg_eeprom_read,
+	.write	= bootcfg_eeprom_write,
+	.init	= bootcfg_eeprom_init,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	.exit	= bootcfg_eeprom_exit,
+#else
+	.exit	= __devexit_p(bootcfg_eeprom_exit),
+#endif
+};
+
+struct bootcfg_store_ops * __init bootcfg_eeprom_get_ops(void)
+{
+	return &eeprom_store_ops;
+}
+
diff --git a/drivers/qtn/bootcfg/bootcfg_file.c b/drivers/qtn/bootcfg/bootcfg_file.c
new file mode 100644
index 0000000..c2554a1
--- /dev/null
+++ b/drivers/qtn/bootcfg/bootcfg_file.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * Bootcfg store through filesystem
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include "bootcfg_drv.h"
+#include "bootcfg_store_init.h"
+
+#include <qtn/bootcfg.h>
+#include <common/ruby_version.h>
+
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/syscalls.h>
+#include <asm/uaccess.h>
+
+static const char *env_store_path;
+
+static int bootcfg_do_file_op(int (*op)(struct file *, void *, size_t), void *buf, size_t bytes)
+{
+	int rc = -1;
+	int fd;
+	struct file *file;
+	mm_segment_t old_fs;
+
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	fd = sys_open(env_store_path, O_RDWR, 0);
+	if (fd < 0) {
+		return fd;
+	}
+
+	file = fget(fd);
+	if (file) {
+		rc = op(file, buf, bytes);
+		fput(file);
+	}
+
+	sys_close(fd);
+	set_fs(old_fs);
+
+	return rc;
+}
+
+static int bootcfg_do_file_write(struct file *file, void *buf, size_t bytes)
+{
+	int rc;
+	loff_t offset = 0;
+
+	rc = vfs_write(file, buf, bytes, &offset);
+
+	return rc < 0 ? rc : 0;
+}
+
+static int bootcfg_do_file_read(struct file *file, void *buf, size_t bytes)
+{
+	int rc;
+	loff_t offset = 0;
+
+	rc = vfs_read(file, buf, bytes, &offset);
+
+	return rc < 0 ? rc : 0;
+}
+
+static int bootcfg_file_write(struct bootcfg_store_ops *ops, const void* buf, const size_t bytes)
+{
+	return bootcfg_do_file_op(&bootcfg_do_file_write, (void *)buf, bytes);
+}
+
+static int bootcfg_file_read(struct bootcfg_store_ops *ops, void* buf, const size_t bytes)
+{
+	return bootcfg_do_file_op(&bootcfg_do_file_read, (void *)buf, bytes);
+}
+
+int __init bootcfg_file_init(struct bootcfg_store_ops *ops, size_t *store_limit)
+{
+	return 0;
+}
+
+void __exit bootcfg_file_exit(struct bootcfg_store_ops *ops)
+{
+}
+
+static struct bootcfg_store_ops file_store_ops = {
+	.read	= bootcfg_file_read,
+	.write	= bootcfg_file_write,
+	.init	= bootcfg_file_init,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	.exit	= bootcfg_file_exit,
+#else
+	.exit	= __devexit_p(bootcfg_file_exit),
+#endif
+};
+
+struct bootcfg_store_ops * __init bootcfg_file_get_ops(const char *path)
+{
+	env_store_path = path;
+
+	printk("%s: using file storage '%s'\n", __FUNCTION__, path);
+
+	return &file_store_ops;
+}
diff --git a/drivers/qtn/bootcfg/bootcfg_mtd.c b/drivers/qtn/bootcfg/bootcfg_mtd.c
new file mode 100644
index 0000000..c75f88e6
--- /dev/null
+++ b/drivers/qtn/bootcfg/bootcfg_mtd.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * Bootcfg store through mtd driver (flash partitions)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include "bootcfg_drv.h"
+#include "bootcfg_store_init.h"
+
+#include <qtn/bootcfg.h>
+#include <common/ruby_partitions.h>
+#include <common/ruby_version.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/sched.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <linux/spinlock_types.h>
+#endif
+
+#define UBOOT_MTD_DEVICE	0
+#define BOOTCFG_MTD_DEVICE	1
+
+static struct cfg_data_t {
+	spinlock_t	lock;
+	struct mtd_info *mtd;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+} cfg_data = {  __SPIN_LOCK_UNLOCKED((worker).lock), NULL };
+#else
+} cfg_data = { SPIN_LOCK_UNLOCKED, NULL };
+#endif
+
+static void erase_callback(struct erase_info *done)
+{
+	wait_queue_head_t *wait_q = (wait_queue_head_t *) done->priv;
+	wake_up(wait_q);
+}
+
+static int bootcfg_flash_write(struct bootcfg_store_ops *ops, const void *buf, const size_t bytes)
+{
+	size_t bytes_written;
+	struct erase_info erase;
+	DECLARE_WAITQUEUE(wait, current);
+	wait_queue_head_t wait_q;
+	size_t erase_size;
+	int ret = 0;
+
+	spin_lock(&cfg_data.lock);
+
+	if (cfg_data.mtd == NULL) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	erase_size = bytes + (cfg_data.mtd->erasesize - 1);
+	erase_size = erase_size - (erase_size % cfg_data.mtd->erasesize);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if (cfg_data.mtd->_unlock && cfg_data.mtd->_unlock(cfg_data.mtd, 0, erase_size)) {
+#else
+	if (cfg_data.mtd->unlock && cfg_data.mtd->unlock(cfg_data.mtd, 0, erase_size)) {
+#endif
+		printk("bootcfg: %s unlock failed\n", cfg_data.mtd->name);
+		ret = -ENOLCK;
+		goto out;
+	}
+
+	init_waitqueue_head(&wait_q);
+	set_current_state(TASK_INTERRUPTIBLE);
+	add_wait_queue(&wait_q, &wait);
+
+	memset(&erase, 0, sizeof(struct erase_info));
+	erase.mtd = cfg_data.mtd;
+	erase.callback = erase_callback;
+	erase.addr = 0;
+	erase.len = erase_size;
+	erase.priv = (u_long) & wait_q;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	ret = cfg_data.mtd->_erase(cfg_data.mtd, &erase);
+#else
+	ret = cfg_data.mtd->erase(cfg_data.mtd, &erase);
+#endif
+	if (ret) {
+		set_current_state(TASK_RUNNING);
+		remove_wait_queue(&wait_q, &wait);
+		printk(KERN_WARNING "bootcfg: erase of region [0x%x, 0x%x] "
+		       "on \"%s\" failed\n",
+		       (unsigned)erase.addr, (unsigned)erase.len, cfg_data.mtd->name);
+		ret = -EIO;
+		goto out;
+	}
+
+	schedule();		/* Wait for erase to finish. */
+	remove_wait_queue(&wait_q, &wait);
+
+	/* write to device */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if (cfg_data.mtd->_write(cfg_data.mtd, 0, bytes, &bytes_written, buf)) {
+#else
+	if (cfg_data.mtd->write(cfg_data.mtd, 0, bytes, &bytes_written, buf)) {
+#endif
+		printk("bootcfg: could not write device\n");
+		ret = -EIO;
+		goto out;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if (cfg_data.mtd->_lock && cfg_data.mtd->_lock(cfg_data.mtd, 0, erase_size)) {
+#else
+	if (cfg_data.mtd->lock && cfg_data.mtd->lock(cfg_data.mtd, 0, erase_size)) {
+#endif
+		printk("bootcfg: could not lock device\n");
+		ret = -ENOLCK;
+	}
+out:
+	spin_unlock(&cfg_data.lock);
+	return ret;
+}
+
+#define VERSION_STR_SIZE 16
+
+static int bootcfg_flash_read(struct bootcfg_store_ops *ops, void *buf, const size_t bytes)
+{
+	size_t bytes_read;
+	int ret = 0;
+
+	spin_lock(&cfg_data.lock);
+
+	if (cfg_data.mtd == NULL) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	ret = cfg_data.mtd->_read(cfg_data.mtd, 0, bytes, &bytes_read, buf);
+#else
+	ret = cfg_data.mtd->read(cfg_data.mtd, 0, bytes, &bytes_read, buf);
+#endif
+	if (ret) {
+		goto out;
+	}
+
+	if (bytes_read != bytes) {
+		ret = -ENODATA;
+	}
+out:
+	spin_unlock(&cfg_data.lock);
+	return ret;
+
+}
+
+int __init bootcfg_flash_init(struct bootcfg_store_ops *ops, size_t *store_limit)
+{
+	struct mtd_info *mtd;
+	uint8_t version[VERSION_STR_SIZE];
+	size_t version_bytes;
+	int ret = 0;
+
+	spin_lock(&cfg_data.lock);
+
+	mtd = get_mtd_device(NULL, UBOOT_MTD_DEVICE);
+	if (mtd == NULL) {
+		printk(KERN_ERR "%s: Could not get flash device mtd%d\n",
+				__FUNCTION__, UBOOT_MTD_DEVICE);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	cfg_data.mtd = get_mtd_device(NULL, BOOTCFG_MTD_DEVICE);
+	if (cfg_data.mtd == NULL) {
+		printk(KERN_ERR "Could not get flash device mtd%d\n", BOOTCFG_MTD_DEVICE);
+		ret = -ENODEV;
+		goto out;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if (mtd->_read(mtd, 4, VERSION_STR_SIZE, &version_bytes, version)) {
+#else
+	if (mtd->read(mtd, 4, VERSION_STR_SIZE, &version_bytes, version)) {
+#endif
+		ret = -EIO;
+		put_mtd_device(cfg_data.mtd);
+		cfg_data.mtd = NULL;
+		goto out;
+	}
+
+	*store_limit = cfg_data.mtd->size;
+	/* here we need to figure out version to be backward compatible */
+	/* version previous to 1.1.2 do not have U_BOOT tag at fixed location */
+	/* also note this will not work for umsdl uboot, must be set to boot from flash */
+	/* we only check for presense of the string to make sure we are > U-boot 1.1.1 */
+	/* Hardwired the string here since the global version string was modified */
+	if (memcmp(version, "U-BOOT", 6) != 0) {
+		printk(KERN_WARNING "%s: warning, detected old U-BOOT.  bootcfg data size limited to 4k\n",
+				__FUNCTION__);
+		/* size here is 4k env, 4k data */
+		*store_limit = 8192;
+	}
+
+out:
+	if (mtd)
+		put_mtd_device(mtd);
+	spin_unlock(&cfg_data.lock);
+	return ret;
+}
+
+void __exit bootcfg_flash_exit(struct bootcfg_store_ops *ops)
+{
+	if (cfg_data.mtd)
+		put_mtd_device(cfg_data.mtd);
+}
+
+static struct bootcfg_store_ops flash_store_ops = {
+	.read	= bootcfg_flash_read,
+	.write	= bootcfg_flash_write,
+	.init	= bootcfg_flash_init,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	.exit	= bootcfg_flash_exit,
+#else
+	.exit	= __devexit_p(bootcfg_flash_exit),
+#endif
+};
+
+struct bootcfg_store_ops *__init bootcfg_flash_get_ops(void)
+{
+	return &flash_store_ops;
+}
+
diff --git a/drivers/qtn/bootcfg/bootcfg_store_init.c b/drivers/qtn/bootcfg/bootcfg_store_init.c
new file mode 100644
index 0000000..757bcf8
--- /dev/null
+++ b/drivers/qtn/bootcfg/bootcfg_store_init.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include "bootcfg_drv.h"
+#include "bootcfg_store_init.h"
+#include <common/ruby_partitions.h>
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#define STORE_NAME_MTD		"mtd"
+#define STORE_NAME_EEPROM	"eeprom"
+
+static char *store = STORE_NAME_MTD;
+
+module_param(store, charp, S_IRUGO);
+MODULE_PARM_DESC(store, "Datastore name");
+
+#define LOG_FAIL(_r)								\
+	do {									\
+		printk("%s failed at line %d, %s = %d\n",			\
+				__FUNCTION__, __LINE__, #_r, (int)(_r));	\
+	} while(0)
+
+
+#ifdef CONFIG_MTD
+static __init int bootcfg_copy_mtd_to_eeprom(void)
+{
+	struct bootcfg_store_ops *mtd;
+	struct bootcfg_store_ops *eeprom;
+	size_t mtd_size_lim = BOOT_CFG_SIZE;
+	size_t eeprom_size_lim = BOOT_CFG_SIZE;
+	uint8_t *buf = NULL;
+	int ret = -1;
+
+	buf = kmalloc(BOOT_CFG_SIZE, GFP_KERNEL);
+	if (buf == NULL) {
+		LOG_FAIL(buf);
+		goto out;
+	}
+
+	mtd = bootcfg_flash_get_ops();
+	if (mtd == NULL) {
+		LOG_FAIL(mtd);
+		goto out;
+	}
+
+	eeprom = bootcfg_compression_adapter(bootcfg_eeprom_get_ops());
+	if (eeprom == NULL) {
+		LOG_FAIL(eeprom);
+		goto out;
+	}
+
+	ret = mtd->init(mtd, &mtd_size_lim);
+	if (ret) {
+		LOG_FAIL(ret);
+		kfree(eeprom);
+		goto out;
+	}
+
+	ret = eeprom->init(eeprom, &eeprom_size_lim);
+	if (ret) {
+		LOG_FAIL(ret);
+		goto out_exit_mtd;
+	}
+
+	ret = mtd->read(mtd, buf, mtd_size_lim);
+	if (ret) {
+		LOG_FAIL(ret);
+		goto out_exit_both;
+	}
+
+
+	ret = eeprom->write(eeprom, buf, eeprom_size_lim);
+	if (ret) {
+		LOG_FAIL(ret);
+		goto out_exit_both;
+	}
+
+out_exit_both:
+	eeprom->exit(eeprom);
+out_exit_mtd:
+	mtd->exit(mtd);
+out:
+	if (buf) {
+		kfree(buf);
+	}
+	return ret;
+}
+#endif
+
+/*
+ * Provide different storage implementation depending on module parameter
+ */
+__init struct bootcfg_store_ops *bootcfg_get_datastore(void)
+{
+#ifdef CONFIG_MTD
+	if (strcmp(store, STORE_NAME_MTD) == 0) {
+		return bootcfg_flash_get_ops();
+	} else if (strcmp(store, STORE_NAME_EEPROM) == 0) {
+		return bootcfg_compression_adapter(bootcfg_eeprom_get_ops());
+	} else if (strcmp(store, "mtd_to_eeprom") == 0) {
+		bootcfg_copy_mtd_to_eeprom();
+	} else {
+		return bootcfg_file_get_ops(store);
+	}
+	return NULL;
+#else
+	return bootcfg_file_get_ops(store);
+#endif
+}
+
diff --git a/drivers/qtn/bootcfg/bootcfg_store_init.h b/drivers/qtn/bootcfg/bootcfg_store_init.h
new file mode 100644
index 0000000..26bbf43
--- /dev/null
+++ b/drivers/qtn/bootcfg/bootcfg_store_init.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __BOOTCFG_STORE_INIT_H__
+#define __BOOTCFG_STORE_INIT_H__
+
+#include "bootcfg_drv.h"
+
+/*
+ * List of storage implementations for bootcfg driver,
+ * hidden from bootcfg common implementation.
+ */
+
+#ifdef CONFIG_MTD
+struct bootcfg_store_ops *bootcfg_flash_get_ops(void);
+struct bootcfg_store_ops *bootcfg_eeprom_get_ops(void);
+#endif
+
+struct bootcfg_store_ops *bootcfg_file_get_ops(const char *path);
+
+#endif	/* __BOOTCFG_STORE_INIT_H__ */
+
diff --git a/drivers/qtn/i2cbus/Makefile b/drivers/qtn/i2cbus/Makefile
new file mode 100644
index 0000000..aac3173
--- /dev/null
+++ b/drivers/qtn/i2cbus/Makefile
@@ -0,0 +1,39 @@
+#
+# Quantenna Communications Inc. Driver Makefile
+#
+# Author: Mats Aretun
+# 
+#
+
+EXTRA_CFLAGS	+= -Wall -Werror	\
+					-I../drivers	\
+
+ifneq ($(KERNELRELEASE),)
+
+i2cbus-objs	+= i2c-qtn-platdrv.o
+i2cbus-objs	+= i2c-qtn-core.o
+obj-m	+= i2cbus.o
+
+else
+KERNELDIR	?= ../../linux
+INSTALL		= INSTALL_MOD_PATH=../linux/modules
+CROSS		= ARCH=arc CROSS_COMPILE=/usr/local/ARC/gcc/bin/arc-linux-uclibc-
+PWD			:= $(shell pwd)
+
+default:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
+
+install:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) $(INSTALL) M=$(PWD) modules_install
+
+endif
+
+clean:
+	rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions
+
+depend .depend dep:
+	$(CC) $(CFLAGS) -M *.c > .depend
+
+ifeq (.depend,$(wildcard .depend))
+include .depend
+endif
diff --git a/drivers/qtn/i2cbus/i2c-qtn-core.c b/drivers/qtn/i2cbus/i2c-qtn-core.c
new file mode 100644
index 0000000..c458193
--- /dev/null
+++ b/drivers/qtn/i2cbus/i2c-qtn-core.c
@@ -0,0 +1,817 @@
+/*
+ * Quantenna I2C adapter driver (master only).
+ *
+ * Based on the TI DAVINCI I2C adapter driver.
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software Inc.
+ * Copyright (C) 2009 Provigent Ltd.
+ * Copyright (C) 2014 Quantenna Communications Inc.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include "i2c-qtn-core.h"
+
+/*
+ * Registers offset
+ */
+#define DW_IC_CON		0x0
+#define DW_IC_TAR		0x4
+#define DW_IC_DATA_CMD		0x10
+#define DW_IC_SS_SCL_HCNT	0x14
+#define DW_IC_SS_SCL_LCNT	0x18
+#define DW_IC_FS_SCL_HCNT	0x1c
+#define DW_IC_FS_SCL_LCNT	0x20
+#define DW_IC_INTR_STAT		0x2c
+#define DW_IC_INTR_MASK		0x30
+#define DW_IC_RAW_INTR_STAT	0x34
+#define DW_IC_RX_TL		0x38
+#define DW_IC_TX_TL		0x3c
+#define DW_IC_CLR_INTR		0x40
+#define DW_IC_CLR_RX_UNDER	0x44
+#define DW_IC_CLR_RX_OVER	0x48
+#define DW_IC_CLR_TX_OVER	0x4c
+#define DW_IC_CLR_RD_REQ	0x50
+#define DW_IC_CLR_TX_ABRT	0x54
+#define DW_IC_CLR_RX_DONE	0x58
+#define DW_IC_CLR_ACTIVITY	0x5c
+#define DW_IC_CLR_STOP_DET	0x60
+#define DW_IC_CLR_START_DET	0x64
+#define DW_IC_CLR_GEN_CALL	0x68
+#define DW_IC_ENABLE		0x6c
+#define DW_IC_STATUS		0x70
+#define DW_IC_TXFLR		0x74
+#define DW_IC_RXFLR		0x78
+#define DW_IC_SDA_HOLD		0x7c
+#define DW_IC_TX_ABRT_SOURCE	0x80
+#define DW_IC_ENABLE_STATUS	0x9c
+#define DW_IC_COMP_PARAM_1	0xf4
+#define DW_IC_COMP_VERSION	0xf8
+#define DW_IC_SDA_HOLD_MIN_VERS	0x3131312A
+#define DW_IC_COMP_TYPE		0xfc
+#define DW_IC_COMP_TYPE_VALUE	0x44570140
+
+#define DW_IC_INTR_RX_UNDER	0x001
+#define DW_IC_INTR_RX_OVER	0x002
+#define DW_IC_INTR_RX_FULL	0x004
+#define DW_IC_INTR_TX_OVER	0x008
+#define DW_IC_INTR_TX_EMPTY	0x010
+#define DW_IC_INTR_RD_REQ	0x020
+#define DW_IC_INTR_TX_ABRT	0x040
+#define DW_IC_INTR_RX_DONE	0x080
+#define DW_IC_INTR_ACTIVITY	0x100
+#define DW_IC_INTR_STOP_DET	0x200
+#define DW_IC_INTR_START_DET	0x400
+#define DW_IC_INTR_GEN_CALL	0x800
+
+#define DW_IC_INTR_DEFAULT_MASK		(DW_IC_INTR_RX_FULL | \
+					 DW_IC_INTR_TX_EMPTY | \
+					 DW_IC_INTR_TX_ABRT | \
+					 DW_IC_INTR_STOP_DET)
+
+#define DW_IC_STATUS_ACTIVITY	0x1
+
+#define DW_IC_ERR_TX_ABRT	0x1
+
+#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
+
+/*
+ * status codes
+ */
+#define STATUS_IDLE			0x0
+#define STATUS_WRITE_IN_PROGRESS	0x1
+#define STATUS_READ_IN_PROGRESS		0x2
+
+#define TIMEOUT			20 /* ms */
+
+/*
+ * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
+ *
+ * only expected abort codes are listed here
+ * refer to the datasheet for the full list
+ */
+#define ABRT_7B_ADDR_NOACK	0
+#define ABRT_10ADDR1_NOACK	1
+#define ABRT_10ADDR2_NOACK	2
+#define ABRT_TXDATA_NOACK	3
+#define ABRT_GCALL_NOACK	4
+#define ABRT_GCALL_READ		5
+#define ABRT_SBYTE_ACKDET	7
+#define ABRT_SBYTE_NORSTRT	9
+#define ABRT_10B_RD_NORSTRT	10
+#define ABRT_MASTER_DIS		11
+#define ARB_LOST		12
+
+#define DW_IC_TX_ABRT_7B_ADDR_NOACK	(1UL << ABRT_7B_ADDR_NOACK)
+#define DW_IC_TX_ABRT_10ADDR1_NOACK	(1UL << ABRT_10ADDR1_NOACK)
+#define DW_IC_TX_ABRT_10ADDR2_NOACK	(1UL << ABRT_10ADDR2_NOACK)
+#define DW_IC_TX_ABRT_TXDATA_NOACK	(1UL << ABRT_TXDATA_NOACK)
+#define DW_IC_TX_ABRT_GCALL_NOACK	(1UL << ABRT_GCALL_NOACK)
+#define DW_IC_TX_ABRT_GCALL_READ	(1UL << ABRT_GCALL_READ)
+#define DW_IC_TX_ABRT_SBYTE_ACKDET	(1UL << ABRT_SBYTE_ACKDET)
+#define DW_IC_TX_ABRT_SBYTE_NORSTRT	(1UL << ABRT_SBYTE_NORSTRT)
+#define DW_IC_TX_ABRT_10B_RD_NORSTRT	(1UL << ABRT_10B_RD_NORSTRT)
+#define DW_IC_TX_ABRT_MASTER_DIS	(1UL << ABRT_MASTER_DIS)
+#define DW_IC_TX_ARB_LOST		(1UL << ARB_LOST)
+
+#define DW_IC_TX_ABRT_NOACK		(DW_IC_TX_ABRT_7B_ADDR_NOACK | \
+					 DW_IC_TX_ABRT_10ADDR1_NOACK | \
+					 DW_IC_TX_ABRT_10ADDR2_NOACK | \
+					 DW_IC_TX_ABRT_TXDATA_NOACK | \
+					 DW_IC_TX_ABRT_GCALL_NOACK)
+
+static char *abort_sources[] = {
+	[ABRT_7B_ADDR_NOACK] =
+		"slave address not acknowledged (7bit mode)",
+	[ABRT_10ADDR1_NOACK] =
+		"first address byte not acknowledged (10bit mode)",
+	[ABRT_10ADDR2_NOACK] =
+		"second address byte not acknowledged (10bit mode)",
+	[ABRT_TXDATA_NOACK] =
+		"data not acknowledged",
+	[ABRT_GCALL_NOACK] =
+		"no acknowledgement for a general call",
+	[ABRT_GCALL_READ] =
+		"read after general call",
+	[ABRT_SBYTE_ACKDET] =
+		"start byte acknowledged",
+	[ABRT_SBYTE_NORSTRT] =
+		"trying to send start byte when restart is disabled",
+	[ABRT_10B_RD_NORSTRT] =
+		"trying to read when restart is disabled (10bit mode)",
+	[ABRT_MASTER_DIS] =
+		"trying to use disabled adapter",
+	[ARB_LOST] =
+		"lost arbitration",
+};
+
+u32 dw_readl(struct dw_i2c_dev *dev, int offset)
+{
+	u32 value;
+
+	if (dev->accessor_flags & ACCESS_16BIT)
+		value = readw(dev->base + offset) |
+			(readw(dev->base + offset + 2) << 16);
+	else
+		value = readl(dev->base + offset);
+
+	if (dev->accessor_flags & ACCESS_SWAP)
+		return swab32(value);
+	else
+		return value;
+}
+
+void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
+{
+	if (dev->accessor_flags & ACCESS_SWAP)
+		b = swab32(b);
+
+	if (dev->accessor_flags & ACCESS_16BIT) {
+		writew((u16)b, dev->base + offset);
+		writew((u16)(b >> 16), dev->base + offset + 2);
+	} else {
+		writel(b, dev->base + offset);
+	}
+}
+
+static u32
+i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
+{
+	/*
+	 * DesignWare I2C core doesn't seem to have solid strategy to meet
+	 * the tHD;STA timing spec.  Configuring _HCNT based on tHIGH spec
+	 * will result in violation of the tHD;STA spec.
+	 */
+	if (cond)
+		/*
+		 * Conditional expression:
+		 *
+		 *   IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH
+		 *
+		 * This is based on the DW manuals, and represents an ideal
+		 * configuration.  The resulting I2C bus speed will be
+		 * faster than any of the others.
+		 *
+		 * If your hardware is free from tHD;STA issue, try this one.
+		 */
+		return (ic_clk * tSYMBOL + 5000) / 10000 - 8 + offset;
+	else
+		/*
+		 * Conditional expression:
+		 *
+		 *   IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf)
+		 *
+		 * This is just experimental rule; the tHD;STA period turned
+		 * out to be proportinal to (_HCNT + 3).  With this setting,
+		 * we could meet both tHIGH and tHD;STA timing specs.
+		 *
+		 * If unsure, you'd better to take this alternative.
+		 *
+		 * The reason why we need to take into account "tf" here,
+		 * is the same as described in i2c_dw_scl_lcnt().
+		 */
+		return (ic_clk * (tSYMBOL + tf) + 5000) / 10000 - 3 + offset;
+}
+
+static u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
+{
+	/*
+	 * Conditional expression:
+	 *
+	 *   IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (tLOW + tf)
+	 *
+	 * DW I2C core starts counting the SCL CNTs for the LOW period
+	 * of the SCL clock (tLOW) as soon as it pulls the SCL line.
+	 * In order to meet the tLOW timing spec, we need to take into
+	 * account the fall time of SCL signal (tf).  Default tf value
+	 * should be 0.3 us, for safety.
+	 */
+	return ((ic_clk * (tLOW + tf) + 5000) / 10000) - 1 + offset;
+}
+
+static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
+{
+	int timeout = 25;
+
+	do {
+		dw_writel(dev, enable, DW_IC_ENABLE);
+		if ((dw_readl(dev, DW_IC_ENABLE_STATUS) & 1) == enable)
+			return;
+
+		/*
+		 * Wait 10 times the signaling period of the highest I2C
+		 * transfer supported by the driver (for 400KHz this is
+		 * 25us) as described in the DesignWare I2C databook.
+		 * Round up to 1 ms since we do not have usleep()
+		 */
+		msleep(1);
+	} while (timeout--);
+
+	dev_warn(dev->dev, "timeout in %sabling adapter\n",
+		 enable ? "en" : "dis");
+}
+
+/**
+ * i2c_dw_init() - initialize the designware i2c master hardware
+ * @dev: device private data
+ *
+ * This functions configures and enables the I2C master.
+ * This function is called during I2C init function, and in case of timeout at
+ * run time.
+ */
+int i2c_dw_init(struct dw_i2c_dev *dev)
+{
+	u32 input_clock_khz;
+	u32 hcnt, lcnt;
+	u32 reg;
+
+	input_clock_khz = dev->get_clk_rate_khz(dev);
+
+	reg = dw_readl(dev, DW_IC_COMP_TYPE);
+	if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
+		/* Configure register endianess access */
+		dev->accessor_flags |= ACCESS_SWAP;
+	} else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
+		/* Configure register access mode 16bit */
+		dev->accessor_flags |= ACCESS_16BIT;
+	} else if (reg != DW_IC_COMP_TYPE_VALUE) {
+		dev_err(dev->dev, "Unknown Synopsys component type: "
+			"0x%08x\n", reg);
+		return -ENODEV;
+	}
+
+	/* Disable the adapter */
+	__i2c_dw_enable(dev, false);
+
+	/* set standard and fast speed deviders for high/low periods */
+
+	/* Standard-mode */
+	hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+				40,	/* tHD;STA = tHIGH = 4.0 us */
+				3,	/* tf = 0.3 us */
+				0,	/* 0: DW default, 1: Ideal */
+				0);	/* No offset */
+	lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+				47,	/* tLOW = 4.7 us */
+				3,	/* tf = 0.3 us */
+				0);	/* No offset */
+
+	dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
+	dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
+	dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+
+	/* Fast-mode */
+	hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+				6,	/* tHD;STA = tHIGH = 0.6 us */
+				3,	/* tf = 0.3 us */
+				0,	/* 0: DW default, 1: Ideal */
+				0);	/* No offset */
+	lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+				13,	/* tLOW = 1.3 us */
+				3,	/* tf = 0.3 us */
+				0);	/* No offset */
+
+	dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
+	dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
+	dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+
+	/* Configure Tx/Rx FIFO threshold levels */
+	dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL);
+	dw_writel(dev, 0, DW_IC_RX_TL);
+
+	/* configure the i2c master */
+	dw_writel(dev, dev->master_cfg , DW_IC_CON);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(i2c_dw_init);
+
+/*
+ * Waiting for bus not busy
+ */
+static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
+{
+	int timeout = TIMEOUT;
+
+	while (dw_readl(dev, DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) {
+		if (timeout <= 0) {
+			dev_warn(dev->dev, "timeout waiting for bus ready\n");
+			return -ETIMEDOUT;
+		}
+		timeout--;
+		msleep(1);
+	}
+
+	return 0;
+}
+
+static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
+{
+	struct i2c_msg *msgs = dev->msgs;
+	u32 ic_con, ic_tar = 0;
+
+	/* Disable the adapter */
+	__i2c_dw_enable(dev, false);
+
+	/* if the slave address is ten bit address, enable 10BITADDR */
+	ic_con = dw_readl(dev, DW_IC_CON);
+	if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
+		ic_con |= DW_IC_CON_10BITADDR_MASTER;
+		/*
+		 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
+		 * mode has to be enabled via bit 12 of IC_TAR register.
+		 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
+		 * detected from registers.
+		 */
+		ic_tar = DW_IC_TAR_10BITADDR_MASTER;
+	} else {
+		ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+	}
+
+	dw_writel(dev, ic_con, DW_IC_CON);
+
+	/*
+	 * Set the slave (target) address and enable 10-bit addressing mode
+	 * if applicable.
+	 */
+	dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
+
+	/* Enable the adapter */
+	__i2c_dw_enable(dev, true);
+
+	/* Clear and enable interrupts */
+	i2c_dw_clear_int(dev);
+	dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
+}
+
+/*
+ * Initiate (and continue) low level master read/write transaction.
+ * This function is only called from i2c_dw_isr, and pumping i2c_msg
+ * messages into the tx buffer.  Even if the size of i2c_msg data is
+ * longer than the size of the tx buffer, it handles everything.
+ */
+static void
+i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
+{
+	struct i2c_msg *msgs = dev->msgs;
+	u32 intr_mask;
+	int tx_limit, rx_limit;
+	u32 addr = msgs[dev->msg_write_idx].addr;
+	u32 buf_len = dev->tx_buf_len;
+	u8 *buf = dev->tx_buf;
+	bool need_restart = false;
+
+	intr_mask = DW_IC_INTR_DEFAULT_MASK;
+
+	for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
+		/*
+		 * if target address has changed, we need to
+		 * reprogram the target address in the i2c
+		 * adapter when we are done with this transfer
+		 */
+		if (msgs[dev->msg_write_idx].addr != addr) {
+			dev_err(dev->dev,
+				"%s: invalid target address\n", __func__);
+			dev->msg_err = -EINVAL;
+			break;
+		}
+
+		if (msgs[dev->msg_write_idx].len == 0) {
+			dev_err(dev->dev,
+				"%s: invalid message length\n", __func__);
+			dev->msg_err = -EINVAL;
+			break;
+		}
+
+		if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
+			/* new i2c_msg */
+			buf = msgs[dev->msg_write_idx].buf;
+			buf_len = msgs[dev->msg_write_idx].len;
+
+			/* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
+			 * IC_RESTART_EN are set, we must manually
+			 * set restart bit between messages.
+			 */
+			if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
+					(dev->msg_write_idx > 0))
+				need_restart = true;
+		}
+
+		tx_limit = dev->tx_fifo_depth - dw_readl(dev, DW_IC_TXFLR);
+		rx_limit = dev->rx_fifo_depth - dw_readl(dev, DW_IC_RXFLR);
+
+		while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
+			u32 cmd = 0;
+
+			/*
+			 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
+			 * manually set the stop bit. However, it cannot be
+			 * detected from the registers so we set it always
+			 * when writing/reading the last byte.
+			 */
+			if (dev->msg_write_idx == dev->msgs_num - 1 &&
+			    buf_len == 1)
+				cmd |= BIT(9);
+
+			if (need_restart) {
+				cmd |= BIT(10);
+				need_restart = false;
+			}
+
+			if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
+
+				/* avoid rx buffer overrun */
+				if (rx_limit - dev->rx_outstanding <= 0)
+					break;
+
+				dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
+				rx_limit--;
+				dev->rx_outstanding++;
+			} else
+				dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
+			tx_limit--; buf_len--;
+		}
+
+		dev->tx_buf = buf;
+		dev->tx_buf_len = buf_len;
+
+		if (buf_len > 0) {
+			/* more bytes to be written */
+			dev->status |= STATUS_WRITE_IN_PROGRESS;
+			break;
+		} else
+			dev->status &= ~STATUS_WRITE_IN_PROGRESS;
+	}
+
+	/*
+	 * If i2c_msg index search is completed, we don't need TX_EMPTY
+	 * interrupt any more.
+	 */
+	if (dev->msg_write_idx == dev->msgs_num)
+		intr_mask &= ~DW_IC_INTR_TX_EMPTY;
+
+	if (dev->msg_err)
+		intr_mask = 0;
+
+	dw_writel(dev, intr_mask,  DW_IC_INTR_MASK);
+}
+
+static void
+i2c_dw_read(struct dw_i2c_dev *dev)
+{
+	struct i2c_msg *msgs = dev->msgs;
+	int rx_valid;
+
+	for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
+		u32 len;
+		u8 *buf;
+
+		if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
+			continue;
+
+		if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
+			len = msgs[dev->msg_read_idx].len;
+			buf = msgs[dev->msg_read_idx].buf;
+		} else {
+			len = dev->rx_buf_len;
+			buf = dev->rx_buf;
+		}
+
+		rx_valid = dw_readl(dev, DW_IC_RXFLR);
+
+		for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
+			*buf++ = dw_readl(dev, DW_IC_DATA_CMD);
+			dev->rx_outstanding--;
+		}
+
+		if (len > 0) {
+			dev->status |= STATUS_READ_IN_PROGRESS;
+			dev->rx_buf_len = len;
+			dev->rx_buf = buf;
+			return;
+		} else
+			dev->status &= ~STATUS_READ_IN_PROGRESS;
+	}
+}
+
+static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
+{
+	unsigned long abort_source = dev->abort_source;
+	int i;
+
+	if (abort_source & DW_IC_TX_ABRT_NOACK) {
+		for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
+			dev_dbg(dev->dev,
+				"%s: %s\n", __func__, abort_sources[i]);
+		return -EREMOTEIO;
+	}
+
+	for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
+		dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]);
+
+	if (abort_source & DW_IC_TX_ARB_LOST)
+		return -EAGAIN;
+	else if (abort_source & DW_IC_TX_ABRT_GCALL_READ)
+		return -EINVAL; /* wrong msgs[] data */
+	else
+		return -EIO;
+}
+
+/*
+ * Prepare controller for a transaction and call i2c_dw_xfer_msg
+ */
+int
+i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+	int ret;
+
+	dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
+
+	mutex_lock(&dev->lock);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	reinit_completion(&dev->cmd_complete);
+#else
+	INIT_COMPLETION(dev->cmd_complete);
+#endif
+	dev->msgs = msgs;
+	dev->msgs_num = num;
+	dev->cmd_err = 0;
+	dev->msg_write_idx = 0;
+	dev->msg_read_idx = 0;
+	dev->msg_err = 0;
+	dev->status = STATUS_IDLE;
+	dev->abort_source = 0;
+	dev->rx_outstanding = 0;
+
+	ret = i2c_dw_wait_bus_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* start the transfers */
+	i2c_dw_xfer_init(dev);
+
+	/* wait for tx to complete */
+	ret = wait_for_completion_timeout(&dev->cmd_complete, HZ);
+	if (ret == 0) {
+		dev_err(dev->dev, "controller timed out\n");
+		/* i2c_dw_init implicitly disables the adapter */
+		i2c_dw_init(dev);
+		ret = -ETIMEDOUT;
+		goto done;
+	}
+
+	/*
+	 * We must disable the adapter before unlocking the &dev->lock mutex
+	 * below. Otherwise the hardware might continue generating interrupts
+	 * which in turn causes a race condition with the following transfer.
+	 * Needs some more investigation if the additional interrupts are
+	 * a hardware bug or this driver doesn't handle them correctly yet.
+	 */
+	__i2c_dw_enable(dev, false);
+
+	if (dev->msg_err) {
+		ret = dev->msg_err;
+		goto done;
+	}
+
+	/* no error */
+	if (likely(!dev->cmd_err)) {
+		ret = num;
+		goto done;
+	}
+
+	/* We have an error */
+	if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
+		ret = i2c_dw_handle_tx_abort(dev);
+		goto done;
+	}
+	ret = -EIO;
+
+done:
+	mutex_unlock(&dev->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i2c_dw_xfer);
+
+u32 i2c_dw_func(struct i2c_adapter *adap)
+{
+	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+	return dev->functionality;
+}
+EXPORT_SYMBOL_GPL(i2c_dw_func);
+
+static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
+{
+	u32 stat;
+
+	/*
+	 * The IC_INTR_STAT register just indicates "enabled" interrupts.
+	 * Ths unmasked raw version of interrupt status bits are available
+	 * in the IC_RAW_INTR_STAT register.
+	 *
+	 * That is,
+	 *   stat = dw_readl(IC_INTR_STAT);
+	 * equals to,
+	 *   stat = dw_readl(IC_RAW_INTR_STAT) & dw_readl(IC_INTR_MASK);
+	 *
+	 * The raw version might be useful for debugging purposes.
+	 */
+	stat = dw_readl(dev, DW_IC_INTR_STAT);
+
+	/*
+	 * Do not use the IC_CLR_INTR register to clear interrupts, or
+	 * you'll miss some interrupts, triggered during the period from
+	 * dw_readl(IC_INTR_STAT) to dw_readl(IC_CLR_INTR).
+	 *
+	 * Instead, use the separately-prepared IC_CLR_* registers.
+	 */
+	if (stat & DW_IC_INTR_RX_UNDER)
+		dw_readl(dev, DW_IC_CLR_RX_UNDER);
+	if (stat & DW_IC_INTR_RX_OVER)
+		dw_readl(dev, DW_IC_CLR_RX_OVER);
+	if (stat & DW_IC_INTR_TX_OVER)
+		dw_readl(dev, DW_IC_CLR_TX_OVER);
+	if (stat & DW_IC_INTR_RD_REQ)
+		dw_readl(dev, DW_IC_CLR_RD_REQ);
+	if (stat & DW_IC_INTR_TX_ABRT) {
+		/*
+		 * The IC_TX_ABRT_SOURCE register is cleared whenever
+		 * the IC_CLR_TX_ABRT is read.  Preserve it beforehand.
+		 */
+		dev->abort_source = dw_readl(dev, DW_IC_TX_ABRT_SOURCE);
+		dw_readl(dev, DW_IC_CLR_TX_ABRT);
+	}
+	if (stat & DW_IC_INTR_RX_DONE)
+		dw_readl(dev, DW_IC_CLR_RX_DONE);
+	if (stat & DW_IC_INTR_ACTIVITY)
+		dw_readl(dev, DW_IC_CLR_ACTIVITY);
+	if (stat & DW_IC_INTR_STOP_DET)
+		dw_readl(dev, DW_IC_CLR_STOP_DET);
+	if (stat & DW_IC_INTR_START_DET)
+		dw_readl(dev, DW_IC_CLR_START_DET);
+	if (stat & DW_IC_INTR_GEN_CALL)
+		dw_readl(dev, DW_IC_CLR_GEN_CALL);
+
+	return stat;
+}
+
+/*
+ * Interrupt service routine. This gets called whenever an I2C interrupt
+ * occurs.
+ */
+irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
+{
+	struct dw_i2c_dev *dev = dev_id;
+	u32 stat, enabled;
+
+	enabled = dw_readl(dev, DW_IC_ENABLE);
+	stat = dw_readl(dev, DW_IC_RAW_INTR_STAT);
+	dev_dbg(dev->dev, "%s:  %s enabled= 0x%x stat=0x%x\n", __func__,
+		dev->adapter.name, enabled, stat);
+	if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
+		return IRQ_NONE;
+
+	stat = i2c_dw_read_clear_intrbits(dev);
+
+	if (stat & DW_IC_INTR_TX_ABRT) {
+		dev->cmd_err |= DW_IC_ERR_TX_ABRT;
+		dev->status = STATUS_IDLE;
+
+		/*
+		 * Anytime TX_ABRT is set, the contents of the tx/rx
+		 * buffers are flushed.  Make sure to skip them.
+		 */
+		dw_writel(dev, 0, DW_IC_INTR_MASK);
+		goto tx_aborted;
+	}
+
+	if (stat & DW_IC_INTR_RX_FULL)
+		i2c_dw_read(dev);
+
+	if (stat & DW_IC_INTR_TX_EMPTY)
+		i2c_dw_xfer_msg(dev);
+
+	/*
+	 * No need to modify or disable the interrupt mask here.
+	 * i2c_dw_xfer_msg() will take care of it according to
+	 * the current transmit status.
+	 */
+
+tx_aborted:
+	if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
+		complete(&dev->cmd_complete);
+
+	return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(i2c_dw_isr);
+
+void i2c_dw_enable(struct dw_i2c_dev *dev)
+{
+       /* Enable the adapter */
+	__i2c_dw_enable(dev, true);
+}
+EXPORT_SYMBOL_GPL(i2c_dw_enable);
+
+u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev)
+{
+	return dw_readl(dev, DW_IC_ENABLE);
+}
+EXPORT_SYMBOL_GPL(i2c_dw_is_enabled);
+
+void i2c_dw_disable(struct dw_i2c_dev *dev)
+{
+	/* Disable controller */
+	__i2c_dw_enable(dev, false);
+
+	/* Disable all interupts */
+	dw_writel(dev, 0, DW_IC_INTR_MASK);
+	dw_readl(dev, DW_IC_CLR_INTR);
+}
+EXPORT_SYMBOL_GPL(i2c_dw_disable);
+
+void i2c_dw_clear_int(struct dw_i2c_dev *dev)
+{
+	dw_readl(dev, DW_IC_CLR_INTR);
+}
+EXPORT_SYMBOL_GPL(i2c_dw_clear_int);
+
+void i2c_dw_disable_int(struct dw_i2c_dev *dev)
+{
+	dw_writel(dev, 0, DW_IC_INTR_MASK);
+}
+EXPORT_SYMBOL_GPL(i2c_dw_disable_int);
+
+u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
+{
+	return dw_readl(dev, DW_IC_COMP_PARAM_1);
+}
+EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
+
+MODULE_DESCRIPTION("Quantenna I2C bus adapter core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/qtn/i2cbus/i2c-qtn-core.h b/drivers/qtn/i2cbus/i2c-qtn-core.h
new file mode 100644
index 0000000..03b5638
--- /dev/null
+++ b/drivers/qtn/i2cbus/i2c-qtn-core.h
@@ -0,0 +1,118 @@
+/*
+ * Synopsys DesignWare I2C adapter driver (master only).
+ *
+ * Based on the TI DAVINCI I2C adapter driver.
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software Inc.
+ * Copyright (C) 2009 Provigent Ltd.
+ * Copyright (C) 2014 Quantenna Communications Inc.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+
+
+#define DW_IC_CON_MASTER		0x1
+#define DW_IC_CON_SPEED_STD		0x2
+#define DW_IC_CON_SPEED_FAST		0x4
+#define DW_IC_CON_10BITADDR_MASTER	0x10
+#define DW_IC_CON_RESTART_EN		0x20
+#define DW_IC_CON_SLAVE_DISABLE		0x40
+
+
+/**
+ * struct dw_i2c_dev - private i2c-designware data
+ * @dev: driver model device node
+ * @base: IO registers pointer
+ * @cmd_complete: tx completion indicator
+ * @lock: protect this struct and IO registers
+ * @clk: input reference clock
+ * @cmd_err: run time hadware error code
+ * @msgs: points to an array of messages currently being transfered
+ * @msgs_num: the number of elements in msgs
+ * @msg_write_idx: the element index of the current tx message in the msgs
+ *	array
+ * @tx_buf_len: the length of the current tx buffer
+ * @tx_buf: the current tx buffer
+ * @msg_read_idx: the element index of the current rx message in the msgs
+ *	array
+ * @rx_buf_len: the length of the current rx buffer
+ * @rx_buf: the current rx buffer
+ * @msg_err: error status of the current transfer
+ * @status: i2c master status, one of STATUS_*
+ * @abort_source: copy of the TX_ABRT_SOURCE register
+ * @irq: interrupt number for the i2c master
+ * @adapter: i2c subsystem adapter node
+ * @tx_fifo_depth: depth of the hardware tx fifo
+ * @rx_fifo_depth: depth of the hardware rx fifo
+ * @rx_outstanding: current master-rx elements in tx fifo
+ * @ss_hcnt: standard speed HCNT value
+ * @ss_lcnt: standard speed LCNT value
+ * @fs_hcnt: fast speed HCNT value
+ * @fs_lcnt: fast speed LCNT value
+ *
+ * HCNT and LCNT parameters can be used if the platform knows more accurate
+ * values than the one computed based only on the input clock frequency.
+ * Leave them to be %0 if not used.
+ */
+struct dw_i2c_dev {
+	struct device		*dev;
+	void __iomem		*base;
+	struct completion	cmd_complete;
+	struct mutex		lock;
+	struct clk		*clk;
+	u32			(*get_clk_rate_khz) (struct dw_i2c_dev *dev);
+	int			cmd_err;
+	struct i2c_msg		*msgs;
+	int			msgs_num;
+	int			msg_write_idx;
+	u32			tx_buf_len;
+	u8			*tx_buf;
+	int			msg_read_idx;
+	u32			rx_buf_len;
+	u8			*rx_buf;
+	int			msg_err;
+	unsigned int		status;
+	u32			abort_source;
+	int			irq;
+	u32			accessor_flags;
+	struct i2c_adapter	adapter;
+	u32			functionality;
+	u32			master_cfg;
+	unsigned int		tx_fifo_depth;
+	unsigned int		rx_fifo_depth;
+	int			rx_outstanding;
+};
+
+#define ACCESS_SWAP		0x00000001
+#define ACCESS_16BIT		0x00000002
+
+extern u32 dw_readl(struct dw_i2c_dev *dev, int offset);
+extern void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset);
+extern int i2c_dw_init(struct dw_i2c_dev *dev);
+extern int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+		int num);
+extern u32 i2c_dw_func(struct i2c_adapter *adap);
+extern irqreturn_t i2c_dw_isr(int this_irq, void *dev_id);
+extern void i2c_dw_enable(struct dw_i2c_dev *dev);
+extern u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev);
+extern void i2c_dw_disable(struct dw_i2c_dev *dev);
+extern void i2c_dw_clear_int(struct dw_i2c_dev *dev);
+extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
+extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
diff --git a/drivers/qtn/i2cbus/i2c-qtn-platdrv.c b/drivers/qtn/i2cbus/i2c-qtn-platdrv.c
new file mode 100644
index 0000000..81fa74e
--- /dev/null
+++ b/drivers/qtn/i2cbus/i2c-qtn-platdrv.c
@@ -0,0 +1,280 @@
+/*
+ * Quantenna I2C adapter driver (master only).
+ *
+ * Based on the TI DAVINCI I2C adapter driver.
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software Inc.
+ * Copyright (C) 2009 Provigent Ltd.
+ * Copyright (C) 2014 Quantenna Communications Inc.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <asm/board/platform.h>
+#include <asm/board/clock.h>
+#include <ruby/gpio.h>
+#include "i2c-qtn-core.h"
+
+static int i2c_xfer_noop = 0;
+
+static int qtn_i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+	if (i2c_xfer_noop) {
+		return -EAGAIN;
+	}
+
+	return i2c_dw_xfer(adap, msgs, num);
+}
+
+static struct i2c_algorithm i2c_dw_algo = {
+	.master_xfer	= qtn_i2c_dw_xfer,
+	.functionality	= i2c_dw_func,
+};
+static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
+{
+	return qtn_clk_get_rate(dev->clk)/1000;
+}
+
+static int dw_i2c_probe(struct platform_device *pdev)
+{
+	struct dw_i2c_dev *dev;
+	struct i2c_adapter *adap;
+	struct resource *mem = NULL;
+	unsigned long mem_res_size = 0;
+	int irq, r;
+	u32 param1;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "no irq resource?\n");
+		return irq; /* -ENXIO */
+	}
+
+	dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+#ifndef TOPAZ_AMBER_IP
+	r = gpio_request(RUBY_GPIO_I2C_SCL, "i2c-scl");
+	if (r) {
+			dev_err(&pdev->dev, "fail to request gpio i2c scl pin #%d\n", RUBY_GPIO_I2C_SCL);
+			goto fail_ret;
+	}
+
+	r = gpio_request(RUBY_GPIO_I2C_SDA, "i2c-sda");
+	if (r) {
+			dev_err(&pdev->dev, "fail to request gpio i2c sda pin #%d\n", RUBY_GPIO_I2C_SDA);
+			goto fail_ret;
+	}
+
+	gpio_config(RUBY_GPIO_I2C_SCL, RUBY_GPIO_ALT_INPUT);
+	gpio_config(RUBY_GPIO_I2C_SDA, RUBY_GPIO_ALT_INPUT);
+#else
+	/*
+	 * In Amber GPIO pins are not shared. No need to set up alternate function.
+	 */
+#endif
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(&pdev->dev, "couldn't get memory resource\n");
+		r = -ENODEV;
+		goto fail_ret;
+	}
+	mem_res_size = resource_size(mem);
+
+	if (!devm_request_mem_region(&pdev->dev, mem->start, mem_res_size, pdev->name)) {
+		dev_err(&pdev->dev, "can't request region for resource %pR\n", mem);
+		mem = NULL;
+		r = -EBUSY;
+		goto fail_ret;
+	}
+
+	dev->base = devm_ioremap_nocache(&pdev->dev, mem->start, mem_res_size);
+	if (IS_ERR(dev->base)) {
+		dev_err(&pdev->dev, "failed to map controller\n");
+		r = PTR_ERR(dev->base);
+		goto fail_ret;
+	}
+
+	init_completion(&dev->cmd_complete);
+	mutex_init(&dev->lock);
+	dev->dev = &pdev->dev;
+	dev->irq = irq;
+	platform_set_drvdata(pdev, dev);
+
+	dev->clk = qtn_clk_get(&pdev->dev, NULL);
+	dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
+
+	if (IS_ERR(dev->clk)) {
+		dev_err(&pdev->dev, "no clk found\n");
+		r = PTR_ERR(dev->clk);
+		goto fail_ret;
+	}
+
+	dev->functionality =
+		I2C_FUNC_I2C |
+		I2C_FUNC_10BIT_ADDR |
+		I2C_FUNC_SMBUS_BYTE |
+		I2C_FUNC_SMBUS_BYTE_DATA |
+		I2C_FUNC_SMBUS_WORD_DATA |
+		I2C_FUNC_SMBUS_I2C_BLOCK;
+	dev->master_cfg =  DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
+		DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
+
+	param1 = i2c_dw_read_comp_param(dev);
+
+	dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
+	dev->rx_fifo_depth = ((param1 >> 8)  & 0xff) + 1;
+	dev->adapter.nr = pdev->id;
+
+	r = i2c_dw_init(dev);
+	if (r)
+		goto fail_ret;
+
+	i2c_dw_disable_int(dev);
+	r = request_irq(dev->irq, i2c_dw_isr, IRQF_SHARED, pdev->name, dev);
+	if (r) {
+		dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
+		goto fail_ret;
+	}
+
+	adap = &dev->adapter;
+	i2c_set_adapdata(adap, dev);
+	adap->owner = THIS_MODULE;
+	adap->class = I2C_CLASS_HWMON;
+	strlcpy(adap->name, "Quantenna I2C Adapter",
+			sizeof(adap->name));
+	adap->algo = &i2c_dw_algo;
+	adap->dev.parent = &pdev->dev;
+
+	r = i2c_add_numbered_adapter(adap);
+	if (r) {
+		dev_err(&pdev->dev, "failure adding adapter\n");
+		free_irq(dev->irq, dev);
+		goto fail_ret;
+	}
+
+	return 0;
+
+fail_ret:
+	if (dev->base)
+		devm_iounmap(&pdev->dev, dev->base);
+
+	if (mem)
+		devm_release_mem_region(&pdev->dev, mem->start, mem_res_size);
+
+	kfree(dev);
+	return r;
+}
+
+static int dw_i2c_remove(struct platform_device *pdev)
+{
+	struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+
+	free_irq(dev->irq, dev);
+	i2c_del_adapter(&dev->adapter);
+
+	i2c_dw_disable(dev);
+
+#ifndef TOPAZ_AMBER_IP
+	gpio_free(RUBY_GPIO_I2C_SCL);
+	gpio_free(RUBY_GPIO_I2C_SDA);
+#else
+	/*
+	 * In Amber GPIO pins are not shared. No need to set up alternate function.
+	 */
+#endif
+
+	return 0;
+}
+
+static ssize_t qtn_i2c_xfer_noop_show(struct device_driver *ddrv, char *buf)
+{
+	return sprintf(buf, "%d\n", i2c_xfer_noop);
+}
+
+static ssize_t qtn_i2c_xfer_noop_store(struct device_driver *ddrv, const char *buf, size_t count)
+{
+	if (count > 0) {
+		i2c_xfer_noop = (buf[0] == '1');
+	}
+	if (i2c_xfer_noop) {
+		pr_info("%s suppressing i2c bus activity\n", ddrv->name);
+	}
+	return count;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+DRIVER_ATTR(i2c_xfer_noop, S_IRUSR | S_IWUSR, qtn_i2c_xfer_noop_show, qtn_i2c_xfer_noop_store);
+#else
+DRIVER_ATTR(i2c_xfer_noop, S_IRUGO | S_IWUGO, qtn_i2c_xfer_noop_show, qtn_i2c_xfer_noop_store);
+#endif
+
+static struct platform_driver qtn_i2c_driver = {
+	.probe = dw_i2c_probe,
+	.remove = dw_i2c_remove,
+	.driver		= {
+		.name	= "qtn-i2c",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init qtn_i2c_init_driver(void)
+{
+	int ret;
+
+	ret = platform_driver_probe(&qtn_i2c_driver, dw_i2c_probe);
+	if (ret < 0) {
+		return ret;
+	}
+
+	if (driver_create_file(&qtn_i2c_driver.driver, &driver_attr_i2c_xfer_noop) < 0) {
+		pr_err("qtn-i2c %s: could not register sysfs driver file\n", __FUNCTION__);
+	}
+
+	return 0;
+}
+module_init(qtn_i2c_init_driver);
+
+static void __exit dw_i2c_exit_driver(void)
+{
+	driver_remove_file(&qtn_i2c_driver.driver, &driver_attr_i2c_xfer_noop);
+	platform_driver_unregister(&qtn_i2c_driver);
+}
+module_exit(dw_i2c_exit_driver);
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_AUTHOR("Quantenna Communications");
+MODULE_DESCRIPTION("Quantenna I2C Adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_beacon_desc.h b/drivers/qtn/include/kernel/net80211/ieee80211_beacon_desc.h
new file mode 100644
index 0000000..9354cdd
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_beacon_desc.h
@@ -0,0 +1,55 @@
+/**
+  Copyright (c) 2015 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+**/
+
+#ifndef IEEE80211_BEACON_DESC_H_
+#define IEEE80211_BEACON_DESC_H_
+#include "qtn/beacon_ioctl.h"
+
+struct ieee80211_beacon_param_t {
+	struct beacon_shared_ie_t  *head;
+	struct beacon_shared_ie_t  *tail;
+	uint8_t buf[BEACON_PARAM_SIZE];		/* liner buffer for the ie list */
+	uint32_t curr;			/* current offset of using buffer */
+	uint16_t size;				/* allocated buffer size */
+};
+
+#define IEEE80211_ELEMID_VENDOR_WME		(IEEE80211_ELEMID_VENDOR << 8 | 0x0)
+#define IEEE80211_ELEMID_VENDOR_WPA		(IEEE80211_ELEMID_VENDOR << 8 | 0x1)
+#define IEEE80211_ELEMID_VENDOR_ATH		(IEEE80211_ELEMID_VENDOR << 8 | 0x2)
+#define IEEE80211_ELEMID_VENDOR_QTN		(IEEE80211_ELEMID_VENDOR << 8 | 0x3)
+#define IEEE80211_ELEMID_VENDOR_EXT_ROLE	(IEEE80211_ELEMID_VENDOR << 8 | 0x4)
+#define IEEE80211_ELEMID_VENDOR_EXT_BSSID	(IEEE80211_ELEMID_VENDOR << 8 | 0x5)
+#define IEEE80211_ELEMID_VENDOR_EXT_STATE	(IEEE80211_ELEMID_VENDOR << 8 | 0x6)
+#define IEEE80211_ELEMID_VENDOR_QTN_WME		(IEEE80211_ELEMID_VENDOR << 8 | 0x7)
+#define IEEE80211_ELEMID_VENDOR_EPIGRAM		(IEEE80211_ELEMID_VENDOR << 8 | 0x8)
+#define IEEE80211_ELEMID_VENDOR_APP		(IEEE80211_ELEMID_VENDOR << 8 | 0x9)
+#define IEEE80211_ELEMID_VENDOR_QTN_OCAC_STATE	(IEEE80211_ELEMID_VENDOR << 8 | 0xA)
+#define IEEE80211_ELEMID_VENDOR_PPS2		(IEEE80211_ELEMID_VENDOR << 8 | 0xB)
+
+int ieee80211_beacon_create_param(struct ieee80211vap *vap);
+void ieee80211_beacon_flush_param(struct ieee80211_beacon_param_t *param);
+void ieee80211_beacon_destroy_param(struct ieee80211vap *vap);
+uint8_t *ieee80211_add_beacon_desc_header(struct ieee80211_node *ni, uint8_t *frm);
+uint8_t *ieee80211_add_beacon_desc_mandatory_fields(struct ieee80211_node *ni, uint8_t *frm,
+		struct ieee80211_beacon_offsets *bo);
+uint8_t *ieee80211_add_beacon_desc_ie(struct ieee80211_node *ni, uint16_t ext_ie_id, uint8_t *frm);
+void ieee80211_dump_beacon_desc_ie(struct ieee80211_beacon_param_t *param);
+
+#endif
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_bsa.h b/drivers/qtn/include/kernel/net80211/ieee80211_bsa.h
new file mode 100644
index 0000000..e9b376b
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_bsa.h
@@ -0,0 +1,158 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2016 Quantenna Communications, Inc.                 **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  File        : ieee80211_bsa.h                                            **
+**  Description : Quantenna Band steering defines                            **
+**                                                                           **
+**  This module implements portions of the IEEE Std 802.11z specification,   **
+** as well as a proprietary discovery mechanism.                             **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH0*/
+
+#ifndef _NET80211_IEEE80211_BSA_H_
+#define _NET80211_IEEE80211_BSA_H_
+
+#define BSA_STATUS_ACTIVE		1
+#define BSA_STATUS_INACTIVE		0
+
+#define BSA_PROBE_EVENT_REQ		3
+#define BSA_CONNECT_COMPLETE_EVENT	4
+#define BSA_EVENT_BSS_TRANS_STATUS	8
+#define IEEE80211_MAX_EVENT_DATA_LEN	256
+#define CAPABILITY_INFOMATION		12
+
+#define BSA_EVENT_DEAUTH 5
+#define BSA_EVENT_DISASSOC 6
+
+#define	BSA_HT_NOT_SUPPORTED		0
+#define	BSA_HT_SUPPORTED		2
+#define BSA_VHT_NOT_SUPPORTED		0
+#define	BSA_VHT_SUPPORTED		1
+#define	BSA_WNM_NOT_SUPPORTED		0
+#define	BSA_WNM_SUPPORTED		1
+#define	BSA_MU_NOT_SUPPORTED		0
+#define	BSA_MU_SUPPORTED		1
+
+#define BSA_VHT_MCSMAP_NOT_SUPPORT 0xC000
+#define BSA_VHT_MCSMAP_MASK		0xC000
+#define DEFAULT_BANDWIDTH		40
+
+#define BSA_MACFILTER_HASHSIZE		32
+
+#define	BSA_MACFILTER_HASH(addr)	\
+	(((u_int8_t *)(addr))[IEEE80211_ADDR_LEN - 1] % BSA_MACFILTER_HASHSIZE)
+
+enum {
+	BSA_MACFILTER_OPEN = 0,
+	BSA_MACFILTER_DENY = 1,
+	BSA_MACFILTER_ALLOW = 2,
+};
+
+enum bsa_curr_band {
+	BSA_OPER_BAND_2G = 0,
+	BSA_OPER_BAND_5G = 1,
+	BSA_OPER_BAND_MAX = 2,
+};
+
+enum bsa_disconnect_dir {
+	BSA_DISCONNECT_SELF_GENERATED = 0,
+	BSA_DISCONNECT_PEER_GENERATED = 1,
+};
+
+struct qtn_bsa_peer_event_data {
+	char		bsa_name[32];
+	uint8_t		bsa_bssid[IEEE80211_ADDR_LEN];
+	uint16_t	bsa_event_id;
+	uint32_t	offset;
+}__packed;
+
+struct qtn_bsa_probe_event_info {
+	uint8_t		bsa_sta_mac[IEEE80211_ADDR_LEN];
+	uint16_t	bsa_nss;
+	int32_t		bsa_rssi;
+	uint16_t	bsa_max_phy_rate;
+	uint16_t	reserved;
+	uint16_t	bsa_curr_band;
+	uint16_t	bsa_channel;
+	uint16_t	bsa_band_width;
+	uint16_t	bsa_bss_transition;
+	uint8_t		bsa_vht_capab;
+	uint8_t		bsa_mu_mimo_capab;
+	uint16_t	cookie_len;
+	void		*cookie;
+}__packed;
+
+struct qtn_bsa_assoc_compl_event_info {
+	uint8_t		bsa_sta_mac[IEEE80211_ADDR_LEN];
+	uint16_t	reserved;
+	int32_t		bsa_rssi;
+	uint16_t	bsa_nss;
+	uint16_t	bsa_max_phy_rate;
+	uint16_t	bsa_curr_band;
+	uint16_t	bsa_channel;
+	uint32_t	bsa_band_width;
+	uint8_t		bsa_bss_transition_support;
+	uint8_t		bsa_vht_capab;
+	uint8_t		bsa_mu_mimo_capab;
+}__packed;
+
+struct qtn_bsa_disconnect_event_info {
+	uint8_t         bsa_sta_mac[IEEE80211_ADDR_LEN];
+	uint16_t        reason_code;
+	uint8_t         direction;
+}__packed;
+
+struct bsa_deny_sta {
+	LIST_ENTRY(bsa_deny_sta) list;
+	uint8_t  bsa_macaddr[IEEE80211_ADDR_LEN];
+};
+
+uint32_t ieee80211_wlan_vht_mcs_streams(uint16_t mcsmap);
+uint32_t ieee80211_wlan_vht_rxstreams(struct ieee80211_ie_vhtcap *vhtcap);
+uint32_t ieee80211_wlan_ht_rx_maxrate(struct ieee80211_ie_htcap *htcap, uint32_t *rx_ss);
+uint32_t ieee80211_wlan_vht_rx_maxrate(struct ieee80211_ie_vhtcap *vhtcap);
+/* probe and assoc complete event sent to bsa peer entity*/
+int ieee80211_bsa_probe_event_send(struct ieee80211vap *vap,struct sk_buff *skb,
+					uint8_t *bssid,uint8_t *sta_mac, int rssi);
+int ieee80211_bsa_disconnect_event_send(struct ieee80211vap *vap, struct ieee80211_node *ni,
+                                        uint16_t reason_code, uint8_t fc_subtype, uint8_t direction);
+int ieee80211_bsa_connect_complete_event_send(struct ieee80211vap *vap,struct ieee80211_node *ni);
+int ieee80211_bsa_macfilter_check(struct ieee80211vap *vap, uint8_t mac[IEEE80211_ADDR_LEN]);
+#endif  /* BSA_IEEE80211_H */
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_chan_select.h b/drivers/qtn/include/kernel/net80211/ieee80211_chan_select.h
new file mode 100755
index 0000000..02fdf5e
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_chan_select.h
@@ -0,0 +1,120 @@
+/*-

+ * Copyright (c) 2016 Quantenna

+ * All rights reserved.

+ *

+ * Redistribution and use in source and binary forms, with or without

+ * modification, are permitted provided that the following conditions

+ * are met:

+ * 1. Redistributions of source code must retain the above copyright

+ *    notice, this list of conditions and the following disclaimer.

+ * 2. Redistributions in binary form must reproduce the above copyright

+ *    notice, this list of conditions and the following disclaimer in the

+ *    documentation and/or other materials provided with the distribution.

+ * 3. The name of the author may not be used to endorse or promote products

+ *    derived from this software without specific prior written permission.

+ *

+ * Alternatively, this software may be distributed under the terms of the

+ * GNU General Public License ("GPL") version 2 as published by the Free

+ * Software Foundation.

+ *

+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR

+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES

+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.

+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,

+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT

+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,

+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY

+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT

+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF

+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ */

+

+

+#ifndef IEEE80211_CHAN_SELECT_H

+#define IEEE80211_CHAN_SELECT_H

+

+

+#define CHAN_CCA_SIZE		4

+#define CHAN_NUMACIBINS		2

+

+

+enum chan_selection_scan_type {

+	CHAN_SELECT_SCAN_INVALID = 0,

+	CHAN_SELECT_SCAN_BW20 = 1,

+	CHAN_SELECT_SCAN_BW40 = 2,

+	CHAN_SELECT_SCAN_BW40_ABOVE = 3,

+	CHAN_SELECT_SCAN_BW40_BELOW = 4,

+	CHAN_SELECT_SCAN_BW80 = 5,

+	CHAN_SELECT_SCAN_BW160 = 6,

+	CHAN_SELECT_SCAN_MAX = 7,

+};

+

+struct autochan_ranking_params

+{

+	int cci_instnt_factor;

+	int aci_instnt_factor;

+	int cci_longterm_factor;

+	int aci_longterm_factor;

+	int range_factor;

+	int dfs_factor;

+	int min_cochan_rssi;

+	int maxbw_minbenefit;

+	int dense_cci_span;

+};

+

+struct chan_aci_params

+{

+	int rssi;

+	int bw;

+	int weight;

+};

+

+struct ieee80211_chanset

+{

+	int pri_chan;

+	int sec20_offset;

+	int bw;

+	int center_chan;

+	int invalid;

+	int inactive;

+	int cca_array[CHAN_CCA_SIZE];

+	int cca_pri[CHAN_NUMACIBINS];	/* Store CCA value on different RSSI strenth */

+	int cca_intf;

+	int cci_instnt;

+	int aci_instnt;

+	int cci_longterm;

+	int aci_longterm;

+	int range_cost;

+	int is_dfs;

+	int cost;

+};

+

+struct ieee80211_chanset_table

+{

+	struct ieee80211_chanset *chanset;

+	int num;

+};

+

+

+#define CHAN_SEL_LOG_ERR			0

+#define CHAN_SEL_LOG_WARN			1

+#define CHAN_SEL_LOG_INFO			2

+#define CHAN_SEL_LOG_MAX                        3

+

+#define IEEE80211_CSDBG(_level, _fmt, ...)	do {	\

+	if (ic->ic_autochan_dbg_level >= (_level)) {		\

+			printk(_fmt, ##__VA_ARGS__);	\

+		}					\

+	} while (0)

+

+void ieee80211_init_chanset_ranking_params(struct ieee80211com *ic);

+int ieee80211_chan_selection_allowed(struct ieee80211com *ic);

+int ieee80211_chanset_scan_finished(struct ieee80211com *ic);

+int ieee80211_start_chanset_scan(struct ieee80211vap *vap, int scan_flags);

+int ieee80211_start_chanset_selection(struct ieee80211vap *vap, int scan_flags);

+struct ieee80211_channel * ieee80211_chanset_pick_channel(struct ieee80211vap *vap);

+void ieee80211_clean_chanset_values(struct ieee80211com *ic);

+

+

+#endif /* IEEE80211_CHAN_SELECT_H */

+

diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_dot11_msg.h b/drivers/qtn/include/kernel/net80211/ieee80211_dot11_msg.h
new file mode 100644
index 0000000..2e5a070
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_dot11_msg.h
@@ -0,0 +1,62 @@
+/*-
+ * Copyright (c) 2010 Quantenna Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _NET80211_IEEE80211_DOT11_MSG_H_
+#define _NET80211_IEEE80211_DOT11_MSG_H_
+
+/* Enumeration for dot11 messages */
+enum ieee80211_dot11_msg_message {
+	IEEE80211_DOT11_MSG_CLIENT_CONNECTED = 0,
+	IEEE80211_DOT11_MSG_CLIENT_DISCONNECTED,
+	IEEE80211_DOT11_MSG_CLIENT_AUTH_FAILED,
+	IEEE80211_DOT11_MSG_CLIENT_REMOVED,
+	IEEE80211_DOT11_MSG_AP_CONNECTED,
+	IEEE80211_DOT11_MSG_AP_CONNECTION_FAILED,
+	IEEE80211_DOT11_MSG_AP_DISCONNECTED,
+};
+
+/* Enumeration for message codes */
+enum ieee80211_dot11_msg_reason {
+	IEEE80211_DOT11_MSG_REASON_DISASSOCIATED = 0,
+	IEEE80211_DOT11_MSG_REASON_DEAUTHENTICATED,
+	IEEE80211_DOT11_MSG_REASON_TKIP_CMEASURES,
+	IEEE80211_DOT11_MSG_REASON_CLIENT_TIMEOUT,
+	IEEE80211_DOT11_MSG_REASON_WPA_PASSWORD_FAIL,
+	IEEE80211_DOT11_MSG_REASON_WPA_TIMEOUT,
+	IEEE80211_DOT11_MSG_REASON_BEACON_LOSS,
+	IEEE80211_DOT11_MSG_REASON_CLIENT_SENT_DEAUTH,
+	IEEE80211_DOT11_MSG_REASON_CLIENT_SENT_DISASSOC,
+};
+
+/* FIXME: this value must correspond to the d11_r array as defined in ieee80211_wireless.c */
+#define DOT11_MAX_REASON_CODE 45
+
+#endif /* _NET80211_IEEE80211_DOT11_MSG_H_ */
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_linux.h b/drivers/qtn/include/kernel/net80211/ieee80211_linux.h
new file mode 100644
index 0000000..f17217a
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_linux.h
@@ -0,0 +1,557 @@
+/*-
+ * Copyright (c) 2003-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_linux.h 2696 2007-08-30 01:59:13Z kelmo $
+ */
+#ifndef _NET80211_IEEE80211_LINUX_H_
+#define _NET80211_IEEE80211_LINUX_H_
+
+#include <linux/version.h>
+#include <linux/wireless.h>
+#include <linux/fs.h>
+#include "linux/net/8021q/vlan.h"
+#include "compat.h"
+
+/*
+ * Compatibility definition of statistics flags
+ * (bitmask in (struct iw_quality *)->updated)
+ */
+#ifndef IW_QUAL_QUAL_UPDATED
+#define IW_QUAL_QUAL_UPDATED	0x01	/* Value was updated since last read */
+#define IW_QUAL_LEVEL_UPDATED	0x02
+#define IW_QUAL_NOISE_UPDATED	0x04
+#define IW_QUAL_QUAL_INVALID	0x10	/* Driver doesn't provide value */
+#define IW_QUAL_LEVEL_INVALID	0x20
+#define IW_QUAL_NOISE_INVALID	0x40
+#endif /* IW_QUAL_QUAL_UPDATED */
+
+#ifndef IW_QUAL_ALL_UPDATED
+#define IW_QUAL_ALL_UPDATED \
+	(IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_UPDATED)
+#endif
+#ifndef IW_QUAL_ALL_INVALID
+#define IW_QUAL_ALL_INVALID \
+	(IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID | IW_QUAL_NOISE_INVALID)
+#endif
+
+void set_quality(struct iw_quality *iq, u_int rssi, int noise);
+
+/*
+ * Task deferral
+ *
+ * Deduce if tasklets are available.  If not then
+ * fall back to using the immediate work queue.
+ */
+#include <linux/interrupt.h>
+#ifdef DECLARE_TASKLET			/* native tasklets */
+#define IEEE80211_TQ_STRUCT tasklet_struct
+#define IEEE80211_INIT_TQUEUE(a,b,c)	tasklet_init((a),(b),(unsigned long)(c))
+#define IEEE80211_SCHEDULE_TQUEUE(a)	tasklet_schedule((a))
+#define IEEE80211_CANCEL_TQUEUE(a)	if (!in_interrupt()) tasklet_kill((a))
+typedef unsigned long IEEE80211_TQUEUE_ARG;
+#define mark_bh(a) do {} while (0)
+#else					/* immediate work queue */
+#define IEEE80211_TQ_STRUCT tq_struct
+#define IEEE80211_INIT_TQUEUE(a,b,c)		INIT_TQUEUE(a,b,c)
+#define IEEE80211_SCHEDULE_TQUEUE(a) do { \
+	int __macro_needmark;		\
+	__macro_needmark |= queue_task((a), &tq_immediate);	\
+	if (__macro_needmark)		\
+		mark_bh(IMMEDIATE_BH);	\
+} while(0)
+typedef void *IEEE80211_TQUEUE_ARG;
+#define	tasklet_disable(t)	do { (void) t; local_bh_disable(); } while (0)
+#define	tasklet_enable(t)	do { (void) t; local_bh_enable(); } while (0)
+/* XXX: not supporting cancel in old kernels! */
+#define IEEE80211_CANCEL_TQUEUE(a)	((a),0)
+#endif /* !DECLARE_TASKLET */
+
+#define	IEEE80211_RESCHEDULE	schedule
+
+/*
+ * Beacon handler locking definitions.
+ * Beacon locking 
+ * UAPSD locking 
+ */
+typedef spinlock_t ieee80211com_lock_t;
+#define	IEEE80211_LOCK_INIT(_ic, _name)				\
+	spin_lock_init(&(_ic)->ic_comlock)
+#define	IEEE80211_LOCK_DESTROY(_ic)
+#define	IEEE80211_LOCK_IRQ(_ic) do {				\
+	unsigned long __ilockflags;				\
+	spin_lock_irqsave(&(_ic)->ic_comlock, __ilockflags);
+#define	IEEE80211_UNLOCK_IRQ(_ic)					\
+	spin_unlock_irqrestore(&(_ic)->ic_comlock, __ilockflags);	\
+} while (0)
+#define	IEEE80211_UNLOCK_IRQ_EARLY(_ic)					\
+	spin_unlock_irqrestore(&(_ic)->ic_comlock, __ilockflags);
+#define IEEE80211_LOCK_BH(_ic)	spin_lock_bh(&(_ic)->ic_comlock)
+#define IEEE80211_UNLOCK_BH(_ic) spin_unlock_bh(&(_ic)->ic_comlock)
+#define IEEE80211_LOCK(_ic)	spin_lock(&(_ic)->ic_comlock)
+#define IEEE80211_UNLOCK(_ic)	spin_unlock(&(_ic)->ic_comlock)
+
+/* NB: beware, spin_is_locked() is unusable for !SMP */
+#if defined(CONFIG_SMP)
+#define	IEEE80211_LOCK_ASSERT(_ic) \
+	KASSERT(spin_is_locked(&(_ic)->ic_comlock),("ieee80211com not locked!"))
+#else
+#define	IEEE80211_LOCK_ASSERT(_ic)
+#endif
+
+#define IEEE80211_VAPS_LOCK_INIT(_ic, _name)		\
+	spin_lock_init(&(_ic)->ic_vapslock)
+#define IEEE80211_VAPS_LOCK_DESTROY(_ic)
+#define IEEE80211_VAPS_LOCK(_ic)	spin_lock(&(_ic)->ic_vapslock);
+#define IEEE80211_VAPS_UNLOCK(_ic)	spin_unlock(&(_ic)->ic_vapslock);
+#define IEEE80211_VAPS_LOCK_BH(_ic)	spin_lock_bh(&(_ic)->ic_vapslock);
+#define IEEE80211_VAPS_UNLOCK_BH(_ic)	spin_unlock_bh(&(_ic)->ic_vapslock);
+#define IEEE80211_VAPS_LOCK_IRQ(_ic)	do {	\
+	int _vaps_lockflags;			\
+	spin_lock_irqsave(&(_ic)->ic_vapslock, _vaps_lockflags);
+#define IEEE80211_VAPS_UNLOCK_IRQ(_ic)	\
+	spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags); \
+} while (0)
+#define IEEE80211_VAPS_UNLOCK_IRQ_EARLY(_ic)	spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags)
+
+
+/* NB: beware, spin_is_locked() is unusable for !SMP */
+#if defined(CONFIG_SMP)
+#define IEEE80211_VAPS_LOCK_ASSERT(_ic) \
+	KASSERT(spin_is_locked(&(_ic)->ic_vapslock),("ieee80211com_vaps not locked!"))
+#else
+#define IEEE80211_VAPS_LOCK_ASSERT(_ic)
+#endif
+
+
+/*
+ * Node locking definitions.
+ */
+typedef spinlock_t ieee80211_node_lock_t;
+#define	IEEE80211_NODE_LOCK_INIT(_nt, _name)	spin_lock_init(&(_nt)->nt_nodelock)
+#define	IEEE80211_NODE_LOCK_DESTROY(_nt)
+#define	IEEE80211_NODE_LOCK(_nt)	spin_lock(&(_nt)->nt_nodelock)
+#define	IEEE80211_NODE_UNLOCK(_nt)	spin_unlock(&(_nt)->nt_nodelock)
+#define	IEEE80211_NODE_LOCK_BH(_nt)	spin_lock_bh(&(_nt)->nt_nodelock)
+#define	IEEE80211_NODE_UNLOCK_BH(_nt)	spin_unlock_bh(&(_nt)->nt_nodelock)
+#define	IEEE80211_NODE_LOCK_IRQ(_nt)	do {	\
+	unsigned long __node_lockflags;		\
+	spin_lock_irqsave(&(_nt)->nt_nodelock, __node_lockflags);
+#define	IEEE80211_NODE_UNLOCK_IRQ(_nt)		\
+	spin_unlock_irqrestore(&(_nt)->nt_nodelock, __node_lockflags); \
+} while(0)
+#define	IEEE80211_NODE_UNLOCK_IRQ_EARLY(_nt)		\
+	spin_unlock_irqrestore(&(_nt)->nt_nodelock, __node_lockflags);
+
+/* NB: beware, *_is_locked() are bogusly defined for UP+!PREEMPT */
+#if (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)) && defined(spinlock_is_locked)
+#define	IEEE80211_NODE_LOCK_ASSERT(_nt) \
+	KASSERT(spinlock_is_locked(&(_nt)->nt_nodelock), \
+		("802.11 node table not locked!"))
+#else
+#define	IEEE80211_NODE_LOCK_ASSERT(_nt)
+#endif
+
+/*
+ * Node table scangen locking definitions.
+ */
+typedef spinlock_t ieee80211_scan_lock_t;
+#define	IEEE80211_SCAN_LOCK_INIT(_nt, _name) spin_lock_init(&(_nt)->nt_scanlock)
+#define	IEEE80211_SCAN_LOCK_DESTROY(_nt)
+#define	IEEE80211_SCAN_LOCK_BH(_nt)	spin_lock_bh(&(_nt)->nt_scanlock)
+#define	IEEE80211_SCAN_UNLOCK_BH(_nt)	spin_unlock_bh(&(_nt)->nt_scanlock)
+#define	IEEE80211_SCAN_LOCK_IRQ(_nt)	do {	\
+	unsigned long __scan_lockflags;		\
+	spin_lock_irqsave(&(_nt)->nt_scanlock, __scan_lockflags);
+#define	IEEE80211_SCAN_UNLOCK_IRQ(_nt)		\
+	spin_unlock_irqrestore(&(_nt)->nt_scanlock, __scan_lockflags); \
+} while (0)
+#define	IEEE80211_SCAN_UNLOCK_IRQ_EARLY(_nt)		\
+	spin_unlock_irqrestore(&(_nt)->nt_scanlock, __scan_lockflags);
+
+/* NB: beware, spin_is_locked() is unusable for !SMP */
+#if defined(CONFIG_SMP)
+#define	IEEE80211_SCAN_LOCK_ASSERT(_nt) \
+	KASSERT(spin_is_locked(&(_nt)->nt_scanlock), ("scangen not locked!"))
+#else
+#define	IEEE80211_SCAN_LOCK_ASSERT(_nt)
+#endif
+
+/*
+ * 802.1x MAC ACL database locking definitions.
+ */
+typedef spinlock_t acl_lock_t;
+#define	ACL_LOCK_INIT(_as, _name)	spin_lock_init(&(_as)->as_lock)
+#define	ACL_LOCK_DESTROY(_as)
+#define	ACL_LOCK(_as)			spin_lock(&(_as)->as_lock)
+#define	ACL_UNLOCK(_as)			spin_unlock(&(_as)->as_lock)
+#define	ACL_LOCK_BH(_as)		spin_lock_bh(&(_as)->as_lock)
+#define	ACL_UNLOCK_BH(_as)		spin_unlock_bh(&(_as)->as_lock)
+
+/* NB: beware, spin_is_locked() is unusable for !SMP */
+#if defined(CONFIG_SMP)
+#define	ACL_LOCK_ASSERT(_as) \
+	KASSERT(spin_is_locked(&(_as)->as_lock), ("ACL not locked!"))
+#else
+#define	ACL_LOCK_ASSERT(_as)
+#endif
+
+/*
+ * Per-node power-save queue definitions.  Beware of control
+ * flow with IEEE80211_NODE_SAVEQ_LOCK/IEEE80211_NODE_SAVEQ_UNLOCK.
+ */
+#define	IEEE80211_NODE_SAVEQ_INIT(_ni, _name) do {		\
+	skb_queue_head_init(&(_ni)->ni_savedq);			\
+} while (0)
+#define	IEEE80211_NODE_SAVEQ_DESTROY(_ni)
+#define	IEEE80211_NODE_SAVEQ_QLEN(_ni)	skb_queue_len(&(_ni)->ni_savedq)
+#define	IEEE80211_NODE_SAVEQ_LOCK(_ni)				\
+	spin_lock(&(_ni)->ni_savedq.lock)
+#define	IEEE80211_NODE_SAVEQ_UNLOCK(_ni)			\
+	spin_unlock(&(_ni)->ni_savedq.lock)
+#define	IEEE80211_NODE_SAVEQ_LOCK_IRQ(_ni) do {			\
+	unsigned long __sqlockflags;				\
+	spin_lock_irqsave(&(_ni)->ni_savedq.lock, __sqlockflags);
+#define	IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(_ni)			\
+	spin_unlock_irqrestore(&(_ni)->ni_savedq.lock, __sqlockflags);\
+} while (0)
+
+/* caller MUST lock IEEE80211_NODE_SAVEQ */
+#define	IEEE80211_NODE_SAVEQ_DEQUEUE(_ni, _skb, _qlen) do {	\
+	_skb = __skb_dequeue(&(_ni)->ni_savedq);		\
+	(_qlen) = skb_queue_len(&(_ni)->ni_savedq);		\
+} while (0)
+#define	_IEEE80211_NODE_SAVEQ_ENQUEUE(_ni, _skb, _qlen, _age) do {\
+	struct sk_buff *tail = skb_peek_tail(&(_ni)->ni_savedq);\
+	if (tail != NULL) {					\
+		_age -= M_AGE_GET(tail);			\
+		__skb_append(tail, _skb, &(_ni)->ni_savedq);	\
+	} else {						\
+		__skb_queue_head(&(_ni)->ni_savedq, _skb);	\
+	}							\
+	M_AGE_SET(_skb, _age);					\
+	(_qlen) = skb_queue_len(&(_ni)->ni_savedq);		\
+} while (0)
+
+struct ieee80211com;
+struct ieee80211vap;
+
+int ieee80211_load_module(const char *);
+
+/*
+ * Node reference counting definitions.
+ *
+ * ieee80211_node_initref	initialize the reference count to 1
+ * ieee80211_node_incref	add a reference
+ * ieee80211_node_dectestref	remove a reference and return 1 if this
+ *				is the last reference, otherwise 0
+ * ieee80211_node_refcnt	reference count for printing (only)
+ */
+#define ieee80211_node_initref(_ni)	atomic_set(&(_ni)->ni_refcnt, 1)
+#define ieee80211_node_incref(_ni)	atomic_inc(&(_ni)->ni_refcnt)
+#define ieee80211_node_decref(_ni)	atomic_dec(&(_ni)->ni_refcnt)
+#define	ieee80211_node_dectestref(_ni)	atomic_dec_and_test(&(_ni)->ni_refcnt)
+#define	ieee80211_node_refcnt(_ni)	(_ni)->ni_refcnt.counter
+
+#define	le16toh(_x)	le16_to_cpu(_x)
+#define	htole16(_x)	cpu_to_le16(_x)
+#define	le32toh(_x)	le32_to_cpu(_x)
+#define	htole32(_x)	cpu_to_le32(_x)
+#define	be16toh(_x)	be16_to_cpu(_x)
+#define	htobe16(_x)	cpu_to_be16(_x)
+#define	be32toh(_x)	be32_to_cpu(_x)
+#define	htobe32(_x)	cpu_to_be32(_x)
+
+/*
+ * Linux has no equivalents to malloc types so null these out.
+ */
+#define	MALLOC_DEFINE(type, shortdesc, longdesc)
+#define	MALLOC_DECLARE(type)
+
+/*
+ * flags to malloc.
+ */
+#define	M_NOWAIT	0x0001		/* do not block */
+#define	M_WAITOK	0x0002		/* ok to block */
+#define	M_ZERO		0x0100		/* bzero the allocation */
+
+/* Debug memory alloc/free. Keeps a running total of allocated/freed
+ * dynamic memory on the WLAN driver.
+ */
+#define WLAN_MALLOC_FREE_TOT_DEBUG
+
+#ifdef WLAN_MALLOC_FREE_TOT_DEBUG
+/* Total number of bytes allocated using the MALLOC macro */
+extern int g_wlan_tot_alloc;
+extern int g_wlan_tot_alloc_cnt;
+/* Total number of bytes freed using the FREE macro */
+extern int g_wlan_tot_free;
+extern int g_wlan_tot_free_cnt;
+/* g_wlan_tot_alloc - g_wlan_tot_free - convenience value */
+extern int g_wlan_balance;
+#endif
+
+static __inline void
+ieee80211_safe_wait_ms(uint32_t wait_ms, int is_proc)
+{
+#define	IEEE80211_SAFE_WAIT_MS_MAX	1000
+	unsigned long start_time = jiffies;
+	uint32_t ret = 0;
+
+	KASSERT(wait_ms <= IEEE80211_SAFE_WAIT_MS_MAX,
+		("%s: safe wait limit exceeded\n", __func__));
+
+	while (!ret) {
+		if (is_proc) {
+			msleep(wait_ms);
+			ret = 1;
+		} else {
+			ret = time_after(jiffies,
+				start_time + msecs_to_jiffies(wait_ms));
+		}
+	}
+}
+
+/* WLAN driver malloc - chain into kmalloc while supporting extra
+ * features such as zeroing the memory. 
+ */
+static __inline void *
+ieee80211_malloc(size_t size, int flags)
+{
+#ifndef WLAN_MALLOC_FREE_TOT_DEBUG
+	void *p = kmalloc(size, flags & M_NOWAIT ? GFP_ATOMIC : GFP_KERNEL);
+	if (p && (flags & M_ZERO))
+		memset(p, 0, size);
+	return p;
+#else
+	/* Debug version of the MALLOC - add in extra fields for accounting */
+	/* The memory looks like: | Size | Magic | Block ..... | 
+	 * Where magic is 0xDEADBEEF and is used to ensure we're only
+	 * doing accounting/pointer juggling on blocks allocated using
+	 * this MALLOC
+	 */
+	void *p = kmalloc(size + (2 * sizeof(int)), flags & M_NOWAIT ? GFP_ATOMIC : GFP_KERNEL);
+	if (p)
+	{
+		int *p_size = (int *)p;
+		if (flags & M_ZERO)
+		{
+			memset(p, 0, size + (2 * sizeof(int)));
+		}
+		/* First extra word is the length of the allocation */
+		*p_size = size;
+		p_size++;
+		/* Magic value for making free work properly */
+		*p_size = 0xDEADBEEF;
+		p_size++;
+		p = p_size;
+		/* Accounting */
+		g_wlan_tot_alloc += size;
+		g_wlan_tot_alloc_cnt++;
+		/* This is NOT thread safe */
+		g_wlan_balance += size;
+	}
+	return p;
+#endif
+}
+
+#ifdef WLAN_MALLOC_FREE_TOT_DEBUG
+static __inline void
+ieee80211_free(void *addr)
+{
+	int *p_size = (int *)(addr);
+	p_size--;
+	/* Only do pointer juggling for blocks allocated using MALLOC */
+	if (*p_size == 0xDEADBEEF)
+	{
+		p_size--;
+		g_wlan_tot_free += *p_size;
+		g_wlan_tot_free_cnt++;
+		/* This is NOT thread safe */
+		g_wlan_balance -= *p_size;
+	}
+	else
+	{
+		/* Invalid - block not allocated using 'MALLOC' passed in */
+		p_size++;
+	}
+	//printk("Free %p\n", p_size);
+	kfree((void *)p_size);
+}
+#endif
+
+#define	MALLOC(_ptr, cast, _size, _type, _flags) \
+	((_ptr) = (cast)ieee80211_malloc(_size, _flags))
+#ifdef WLAN_MALLOC_FREE_TOT_DEBUG
+# define	FREE(addr, type)	ieee80211_free((addr))
+#else
+# define	FREE(addr, type)	kfree((addr))
+#endif
+
+/*
+ * This unlikely to be popular but it dramatically reduces diffs.
+ */
+#define printf(...) printk(__VA_ARGS__)
+struct ieee80211com;
+extern void if_printf(struct net_device *, const char *, ...);
+extern const char *ether_sprintf(const u_int8_t *);
+
+/*
+ * Queue write-arounds and support routines.
+ */
+struct sk_buff *ieee80211_getdataframe(struct ieee80211vap *vap, uint8_t **frm, uint8_t qos,
+					uint32_t payload_len);
+struct sk_buff *ieee80211_getmgtframe(uint8_t **frm, uint32_t payload_len);
+#define	IF_ENQUEUE(_q,_skb)	skb_queue_tail(_q,_skb)
+#define	IF_DEQUEUE(_q,_skb)	(_skb = skb_dequeue(_q))
+#define	_IF_QLEN(_q)		skb_queue_len(_q)
+#define	IF_DRAIN(_q)		skb_queue_drain(_q)
+void skb_queue_drain(struct sk_buff_head *q);
+
+#ifndef __MOD_INC_USE_COUNT
+#define	_MOD_INC_USE(_m, _err)						\
+	if (!try_module_get(_m)) {					\
+		printk(KERN_WARNING "%s: try_module_get failed\n",	\
+			__func__); \
+		_err;							\
+	}
+#define	_MOD_DEC_USE(_m)		module_put(_m)
+#else
+#define	_MOD_INC_USE(_m, _err)	MOD_INC_USE_COUNT
+#define	_MOD_DEC_USE(_m)	MOD_DEC_USE_COUNT
+#endif
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+
+#ifndef CLONE_KERNEL
+/*
+ * List of flags we want to share for kernel threads,
+ * if only because they are not used by them anyway.
+ */
+#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
+#endif
+
+#include <linux/mm.h>
+#ifndef offset_in_page
+#define	offset_in_page(p) ((unsigned long) (p) & ~PAGE_MASK)
+#endif
+
+#ifndef module_put_and_exit
+#define module_put_and_exit(code) do {	\
+	_MOD_DEC_USE(THIS_MODULE);	\
+	do_exit(code);			\
+} while (0)
+#endif
+
+/*
+ * Linux uses __BIG_ENDIAN and __LITTLE_ENDIAN while BSD uses _foo
+ * and an explicit _BYTE_ORDER.  Sorry, BSD got there first--define
+ * things in the BSD way...
+ */
+#undef _LITTLE_ENDIAN
+#define	_LITTLE_ENDIAN	1234	/* LSB first: i386, vax */
+#undef _BIG_ENDIAN
+#define	_BIG_ENDIAN	4321	/* MSB first: 68000, ibm, net */
+#include <asm/byteorder.h>
+#if defined(__LITTLE_ENDIAN)
+#define	_BYTE_ORDER	_LITTLE_ENDIAN
+#elif defined(__BIG_ENDIAN)
+#define	_BYTE_ORDER	_BIG_ENDIAN
+#else
+#error "Please fix asm/byteorder.h"
+#endif
+
+
+/*
+ * Deal with the sysctl handler api changing.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
+#define	IEEE80211_SYSCTL_DECL(f, ctl, write, filp, buffer, lenp, ppos) \
+	f(struct ctl_table *ctl, int write, \
+	  void __user *buffer, size_t *lenp, loff_t *ppos)
+#define	IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \
+	proc_dointvec(ctl, write, buffer, lenp, ppos)
+#define BIN_ATTR_ACCESS_DECL(f, filp, kobj, bin_attr, buf, offset, size) \
+	ssize_t f(struct file *filp, struct kobject * kobj, struct bin_attribute * bin_attr, char * buf, loff_t offset, size_t size)
+#else
+#define	IEEE80211_SYSCTL_DECL(f, ctl, write, filp, buffer, lenp, ppos) \
+	f(struct ctl_table *ctl, int write, struct file *filp, \
+	  void __user *buffer, size_t *lenp, loff_t *ppos)
+#define	IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \
+	proc_dointvec(ctl, write, filp, buffer, lenp, ppos)
+#define BIN_ATTR_ACCESS_DECL(f, filp, kobj, bin_attr, buf, offset, size) \
+	ssize_t f(struct kobject * kobj, struct bin_attribute * bin_attr, char * buf, loff_t offset, size_t size)
+#endif
+
+void ieee80211_sysctl_vattach(struct ieee80211vap *);
+void ieee80211_sysctl_vdetach(struct ieee80211vap *);
+int ieee80211_proc_vcreate(struct ieee80211vap *, struct file_operations *,
+	       char *);
+void ieee80211_proc_cleanup(struct ieee80211vap *);
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define IEEE80211_VLAN_TAG_USED 1
+
+#ifndef VLAN_GROUP_ARRAY_PART_LEN
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+#define vlan_group_set_device(group, vid, dev) do { \
+	group->vlan_devices[vid] = dev; \
+} while (0);
+#endif
+
+#endif
+
+#else
+#define IEEE80211_VLAN_TAG_USED 0
+#endif
+void ieee80211_vlan_vattach(struct ieee80211vap *);
+void ieee80211_vlan_vdetach(struct ieee80211vap *);
+
+void ieee80211_ioctl_vattach(struct ieee80211vap *);
+void ieee80211_ioctl_vdetach(struct ieee80211vap *);
+void sample_rel_client_data(struct ieee80211vap *vap);
+struct ifreq;
+int ieee80211_ioctl_create_vap(struct ieee80211com *, struct ifreq *,
+	struct net_device *);
+int ieee80211_create_vap(struct ieee80211com *, char *, struct net_device *,
+	int, int);
+
+/* For explicitly logging messages */
+void ieee80211_dot11_msg_send(struct ieee80211vap *vap,
+		const char *mac_bssid,
+		const char *message,
+		const char *message_code,
+		int message_reason,
+		const char *message_description,
+		const char *auth,
+		const char *crypto);
+
+extern char *d11_m[];
+extern char *d11_c[];
+extern char *d11_r[];
+
+#endif /* _NET80211_IEEE80211_LINUX_H_ */
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_monitor.h b/drivers/qtn/include/kernel/net80211/ieee80211_monitor.h
new file mode 100644
index 0000000..0ef1541
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_monitor.h
@@ -0,0 +1,247 @@
+/*-
+ * Copyright (c) 2005 John Bicket
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_monitor.h 2602 2007-07-24 14:15:34Z kelmo $
+ */
+#ifndef _NET80211_IEEE80211_MONITOR_H_
+#define _NET80211_IEEE80211_MONITOR_H_
+
+
+#ifndef ARPHRD_IEEE80211_RADIOTAP
+#define ARPHRD_IEEE80211_RADIOTAP	803 /* IEEE 802.11 + radiotap header */
+#endif /* ARPHRD_IEEE80211_RADIOTAP */
+
+#ifndef ARPHRD_IEEE80211_ATHDESC
+#define ARPHRD_IEEE80211_ATHDESC	804 /* IEEE 802.11 + atheros descriptor */
+#endif /* ARPHRD_IEEE80211_RADIOTAP */
+
+#define ATHDESC_HEADER_SIZE	32
+#include <compat.h>
+#include "net80211/ieee80211_radiotap.h"
+struct ieee80211_phy_params {
+	u_int8_t rate0;
+	u_int8_t rate1;
+	u_int8_t rate2;
+	u_int8_t rate3;
+
+	u_int8_t try0;
+	u_int8_t try1;
+	u_int8_t try2;
+	u_int8_t try3;
+
+	u_int8_t power;
+	u_int32_t flags;
+};
+
+
+
+enum {
+	DIDmsg_lnxind_wlansniffrm		= 0x00000044,
+	DIDmsg_lnxind_wlansniffrm_hosttime	= 0x00010044,
+	DIDmsg_lnxind_wlansniffrm_mactime	= 0x00020044,
+	DIDmsg_lnxind_wlansniffrm_channel	= 0x00030044,
+	DIDmsg_lnxind_wlansniffrm_rssi		= 0x00040044,
+	DIDmsg_lnxind_wlansniffrm_sq		= 0x00050044,
+	DIDmsg_lnxind_wlansniffrm_signal	= 0x00060044,
+	DIDmsg_lnxind_wlansniffrm_noise		= 0x00070044,
+	DIDmsg_lnxind_wlansniffrm_rate		= 0x00080044,
+	DIDmsg_lnxind_wlansniffrm_istx		= 0x00090044,
+	DIDmsg_lnxind_wlansniffrm_frmlen	= 0x000A0044
+};
+enum {
+	P80211ENUM_msgitem_status_no_value	= 0x00
+};
+enum {
+	P80211ENUM_truth_false			= 0x00,
+	P80211ENUM_truth_true			= 0x01
+};
+
+/*
+ * Transmit descriptor status.  This structure is filled
+ * in only after the tx descriptor process method finds a
+ * ``done'' descriptor; at which point it returns something
+ * other than HAL_EINPROGRESS.
+ *
+ * Note that ts_antenna may not be valid for all h/w.  It
+ * should be used only if non-zero.
+ */
+struct ath_tx_status {
+        u_int16_t       ts_seqnum;      /* h/w assigned sequence number */
+        u_int16_t       ts_tstamp;      /* h/w assigned timestamp */
+        u_int8_t        ts_status;      /* frame status, 0 => xmit ok */
+        u_int8_t        ts_rate;        /* h/w transmit rate index */
+        int8_t          ts_rssi;        /* tx ack RSSI */
+        u_int8_t        ts_shortretry;  /* # short retries */
+        u_int8_t        ts_longretry;   /* # long retries */
+        u_int8_t        ts_virtcol;     /* virtual collision count */
+        u_int8_t        ts_antenna;     /* antenna information */
+};
+
+/*
+ * Receive descriptor status.  This structure is filled
+ * in only after the rx descriptor process method finds a
+ * ``done'' descriptor; at which point it returns something
+ * other than HAL_EINPROGRESS.
+ *
+ * If rx_status is zero, then the frame was received ok;
+ * otherwise the error information is indicated and rs_phyerr
+ * contains a phy error code if HAL_RXERR_PHY is set.  In general
+ * the frame contents is undefined when an error occurred thought
+ * for some errors (e.g. a decryption error), it may be meaningful.
+ *
+ * Note that the receive timestamp is expanded using the TSF to
+ * 15 bits (regardless of what the h/w provides directly).
+ *
+ * rx_rssi is in units of dbm above the noise floor.  This value
+ * is measured during the preamble and PLCP; i.e. with the initial
+ * 4us of detection.  The noise floor is typically a consistent
+ * -96dBm absolute power in a 20MHz channel.
+ */
+struct ath_rx_status {
+        u_int16_t       rs_datalen;     /* rx frame length */
+        u_int16_t       rs_tstamp;      /* h/w assigned timestamp */
+        u_int8_t        rs_status;      /* rx status, 0 => recv ok */
+        u_int8_t        rs_phyerr;      /* phy error code */
+        int8_t          rs_rssi;        /* rx frame RSSI */
+        u_int8_t        rs_keyix;       /* key cache index */
+        u_int8_t        rs_rate;        /* h/w receive rate index */
+        u_int8_t        rs_antenna;     /* antenna information */
+        u_int8_t        rs_more;        /* more descriptors follow */
+};
+
+/*
+ * Definitions for the software frame/packet descriptors used by
+ * the Quantenna HAL.  This definition obscures hardware-specific
+ * details from the driver.  Drivers are expected to fillin the
+ * portions of a descriptor that are not opaque then use HAL calls
+ * to complete the work.  Status for completed frames is returned
+ * in a device-independent format.
+ */
+#define ds_txstat       ds_us.tx
+#define ds_rxstat       ds_us.rx
+
+#define HAL_RXERR_CRC           0x01    /* CRC error on frame */
+#define HAL_RXERR_PHY           0x02    /* PHY error, rs_phyerr is valid */
+#define HAL_RXERR_FIFO          0x04    /* fifo overrun */
+#define HAL_RXERR_DECRYPT       0x08    /* non-Michael decrypt error */
+#define HAL_RXERR_MIC   
+
+struct qnt_desc {
+        /*
+         * The following definitions are passed directly
+         * the hardware and managed by the HAL.  Drivers
+         * should not touch those elements marked opaque.
+         */
+        u_int32_t       ds_link;        /* phys address of next descriptor */
+        u_int32_t       ds_data;        /* phys address of data buffer */
+        u_int32_t       ds_ctl0;        /* opaque DMA control 0 */
+        u_int32_t       ds_ctl1;        /* opaque DMA control 1 */
+        u_int32_t       ds_hw[4];       /* opaque h/w region */
+        /*
+         * The remaining definitions are managed by software;
+         * these are valid only after the rx/tx process descriptor
+         * methods return a non-EINPROGRESS  code.
+         */
+        union {
+                struct ath_tx_status tx;/* xmit status */
+                struct ath_rx_status rx;/* recv status */
+        } ds_us;
+        void            *ds_vdata;      /* virtual addr of data buffer */
+} __packed;
+
+
+typedef struct {
+        u_int32_t did;
+        u_int16_t status;
+        u_int16_t len;
+        u_int32_t data;
+} p80211item_uint32_t;
+
+typedef struct {
+        u_int32_t msgcode;
+        u_int32_t msglen;
+#define WLAN_DEVNAMELEN_MAX 16
+        u_int8_t devname[WLAN_DEVNAMELEN_MAX];
+        p80211item_uint32_t hosttime;
+        p80211item_uint32_t mactime;
+        p80211item_uint32_t channel;
+        p80211item_uint32_t rssi;
+        p80211item_uint32_t sq;
+        p80211item_uint32_t signal;
+        p80211item_uint32_t noise;
+        p80211item_uint32_t rate;
+        p80211item_uint32_t istx;
+        p80211item_uint32_t frmlen;
+} wlan_ng_prism2_header;
+
+
+
+#define ATH_RX_RADIOTAP_PRESENT (               \
+	(1 << IEEE80211_RADIOTAP_TSFT)		| \
+        (1 << IEEE80211_RADIOTAP_FLAGS)         | \
+        (1 << IEEE80211_RADIOTAP_RATE)          | \
+        (1 << IEEE80211_RADIOTAP_CHANNEL)       | \
+	(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL)	| \
+	(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE)	| \
+        (1 << IEEE80211_RADIOTAP_ANTENNA)       | \
+        (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL)  | \
+        0)
+
+struct ath_rx_radiotap_header {
+        struct ieee80211_radiotap_header wr_ihdr;
+	__le64 wr_tsft;
+        u_int8_t wr_flags;
+        u_int8_t wr_rate;
+        __le16 wr_chan_freq;
+        __le16 wr_chan_flags;
+	int8_t  wr_dbm_antsignal;
+	int8_t  wr_dbm_antnoise;
+        u_int8_t wr_antenna;
+        u_int8_t wr_antsignal;
+}__attribute__((__packed__));
+
+#define ATH_TX_RADIOTAP_PRESENT (               \
+	(1 << IEEE80211_RADIOTAP_TSFT)		| \
+        (1 << IEEE80211_RADIOTAP_FLAGS)         | \
+        (1 << IEEE80211_RADIOTAP_RATE)          | \
+        (1 << IEEE80211_RADIOTAP_DBM_TX_POWER)  | \
+        (1 << IEEE80211_RADIOTAP_ANTENNA)       | \
+        0)
+
+struct ath_tx_radiotap_header {
+        struct ieee80211_radiotap_header wt_ihdr;
+	__le64 wt_tsft;
+        u_int8_t wt_flags;	
+        u_int8_t wt_rate;
+        u_int8_t wt_txpower;
+        u_int8_t wt_antenna;
+};
+
+
+void ieee80211_monitor_encap(struct ieee80211vap *, struct sk_buff *);
+
+
+#endif /* _NET80211_IEEE80211_MONITOR_H_ */
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_node.h b/drivers/qtn/include/kernel/net80211/ieee80211_node.h
new file mode 100644
index 0000000..5697020
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_node.h
@@ -0,0 +1,874 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_node.h 2607 2007-07-25 15:20:59Z mrenzmann $
+ */
+#ifndef _NET80211_IEEE80211_NODE_H_
+#define _NET80211_IEEE80211_NODE_H_
+
+#include "net80211/ieee80211_ioctl.h"		/* for ieee80211_nodestats */
+#include "net80211/ieee80211_proto.h"		/* for proto macros on node */
+#include "net80211/ieee80211_var.h"
+#include "qtn/muc_phy_stats.h"
+#include "qtn/shared_defs.h"
+#include "qtn/qdrv_sch_data.h"
+#include "qtn/topaz_shared_params.h"
+
+/* #define IEEE80211_DEBUG_REFCNT */
+#define IEEE80211_NODEREF_INCR	1
+#define IEEE80211_NODEREF_DECR	0
+
+#ifdef IEEE80211_DEBUG_REFCNT
+#define REFDEBUG_ENTRY_MAX	150
+struct node_refdebug_info_entry {
+	const char *fname;
+	uint32_t line;
+	uint32_t count;
+};
+
+struct node_refdebug_info {
+	int entry_count;
+	uint32_t inc_count;
+	uint32_t dec_count;
+	struct node_refdebug_info_entry entry[REFDEBUG_ENTRY_MAX];
+};
+#endif
+void ieee80211_node_dbgref_history_dump(void);
+
+/* Define of the V optimization */
+enum ieee80211_vopt_state {
+	IEEE80211_VOPT_DISABLED	= 0,
+	IEEE80211_VOPT_ENABLED	= 1,
+	IEEE80211_VOPT_AUTO	= 2,
+};
+
+/*
+ * Each ieee80211com instance has a single timer that fires once a
+ * second.  This is used to initiate various work depending on the
+ * state of the instance: scanning (passive or active), ``transition''
+ * (waiting for a response to a management frame when operating
+ * as a station), and node inactivity processing (when operating
+ * as an AP).  For inactivity processing each node has a timeout
+ * set in it's ni_inact field that is decremented on each timeout
+ * and the node is reclaimed when the counter goes to zero.  We
+ * use different inactivity timeout values depending on whether
+ * the node is associated and authorized (either by 802.1x or
+ * open/shared key authentication) or associated but yet to be
+ * authorized.  The latter timeout is shorter to more aggressively
+ * reclaim nodes that leave part way through the 802.1x exchange.
+ */
+#define	IEEE80211_INACT_WAIT	5		/* inactivity interval (secs) */
+#define	IEEE80211_INACT_INIT	(30/IEEE80211_INACT_WAIT)	/* initial */
+#define	IEEE80211_INACT_AUTH	(30/IEEE80211_INACT_WAIT)	/* associated but not authorized */
+#define	IEEE80211_INACT_RUN	(90 /IEEE80211_INACT_WAIT)	/* authorized */
+#define	IEEE80211_INACT_RUN_STA	(30 /IEEE80211_INACT_WAIT)	/* authorized for STA */
+#define	IEEE80211_INACT_RUN_WDS	(30 /IEEE80211_INACT_WAIT)	/* authorized for WDS */
+#define	IEEE80211_INACT_PROBE	(10/IEEE80211_INACT_WAIT)	/* probe */
+#define	IEEE80211_INACT_SCAN	(300/IEEE80211_INACT_WAIT)	/* scanned */
+#define IEEE80211_INACT_SEND_PKT_THRSH	3			/* The threshold that starts to send detection packet */
+
+#define	IEEE80211_TRANS_WAIT	2		/* mgt frame tx timer (secs) + random32()%HZ */
+
+#define	IEEE80211_NODE_HASHSIZE	32
+/* simple hash is enough for variation of macaddr */
+#define	IEEE80211_NODE_HASH(addr)	\
+	(((const u_int8_t *)(addr))[IEEE80211_ADDR_LEN - 1] % \
+		IEEE80211_NODE_HASHSIZE)
+
+#define IEEE80211_NODE_TRAINING_NORMAL_MODE	1
+#define IEEE80211_NODE_TRAINING_TDLS_MODE	2
+
+struct ieee80211_rsnparms {
+	u_int8_t rsn_mcastcipher;	/* mcast/group cipher */
+	u_int8_t rsn_mcastkeylen;	/* mcast key length */
+	u_int8_t rsn_ucastcipherset;	/* unicast cipher set */
+	u_int8_t rsn_ucastcipher;	/* selected unicast cipher */
+	u_int8_t rsn_ucastkeylen;	/* unicast key length */
+	u_int8_t rsn_keymgmtset;	/* key management algorithms */
+	u_int8_t rsn_keymgmt;		/* selected key mgmt algo */
+	u_int16_t rsn_caps;		/* capabilities */
+};
+
+#ifdef CONFIG_QVSP
+struct ieee80211_ba_throt {
+	unsigned long		last_setup_jiffies;
+	uint32_t		unthroted_win_size;
+	uint32_t		throt_win_size;
+	uint32_t		throt_intv;		/* ms */
+	uint32_t		throt_dur;		/* ms */
+};
+#endif
+
+struct ieee80211_ba_tid {
+	enum ieee80211_ba_state	state;
+	unsigned long		state_deadline;
+	seqlock_t		state_lock;
+	enum ieee80211_ba_type	type;
+	u_int16_t		timeout;
+	u_int16_t		buff_size;
+	u_int16_t		seq;
+	u_int8_t		frag;
+	u_int8_t		dlg_out;
+	u_int8_t		dlg_in;
+	u_int16_t		flags;
+#ifdef CONFIG_QVSP
+	struct ieee80211_ba_throt ba_throt;
+#endif
+};
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+struct ieee80211_dotk_waitq_state {
+	int status;
+	int pending;
+};
+
+struct ieee80211_dotk_meas_state {
+	/* Radio measurement - STA Statistics */
+	struct ieee80211_dotk_waitq_state meas_state_sta;
+	/* ... */
+};
+#endif
+
+struct meas_info {
+	u_int8_t ni_meas_rep_mode;	/* report mode */
+	u_int32_t ni_meas_rep_time;	/* last receive time */
+	wait_queue_head_t meas_waitq;	/* wait queue for syncing with user space */
+	u_int8_t pending;		/* flag indicating pending request from user space */
+	u_int8_t reason;		/* 0 = success; 1 = timeout; 2 = node leave */
+
+	/* measurement result */
+	union {
+		uint8_t basic;
+		uint8_t cca;
+		uint8_t rpi[8];
+		uint8_t chan_load;
+		struct {
+			uint8_t antenna_id;
+			uint8_t anpi;
+			uint8_t ipi[11];
+		} noise_his;
+		struct {
+			uint8_t reported_frame_info;
+			uint8_t rcpi;
+			uint8_t rsni;
+			uint8_t bssid[IEEE80211_ADDR_LEN];
+			uint8_t antenna_id;
+			uint32_t parent_tsf;
+		} beacon;
+		struct {
+			uint32_t sub_ele_flag;
+			uint8_t ta[IEEE80211_ADDR_LEN];
+			uint8_t bssid[IEEE80211_ADDR_LEN];
+			uint8_t phy_type;
+			uint8_t avg_rcpi;
+			uint8_t last_rsni;
+			uint8_t last_rcpi;
+			uint8_t antenna_id;
+			uint16_t frame_count;
+		} frame_count;
+		struct {
+			uint8_t reason;
+			uint32_t tran_msdu_cnt;
+			uint32_t msdu_discard_cnt;
+			uint32_t msdu_fail_cnt;
+			uint32_t msdu_mul_retry_cnt;
+			uint32_t qos_lost_cnt;
+			uint32_t avg_queue_delay;
+			uint32_t avg_tran_delay;
+			uint8_t bin0_range;
+			uint32_t bins[6];
+		} tran_stream_cat;
+		struct {
+			uint8_t reason;
+			uint32_t mul_rec_msdu_cnt;
+			uint16_t first_seq_num;
+			uint16_t last_seq_num;
+			uint16_t mul_rate;
+		} multicast_diag;
+	} rep;
+};
+
+struct ieee80211_tpc_info {
+	struct  {
+		int8_t	min_txpow;
+		int8_t	max_txpow;
+	} tpc_sta_cap;
+
+	struct {
+		int8_t	node_txpow;
+		int8_t	node_link_margin;
+	} tpc_report;
+
+	struct {
+		u_int8_t reason;
+		u_int8_t tpc_pending;
+		wait_queue_head_t tpc_waitq;
+	} tpc_wait_info;
+};
+
+struct ieee80211_rm_link_measure_report {
+	struct {
+		int8_t tx_power;
+		int8_t link_margin;
+	} tpc_report;
+	uint8_t recv_antenna_id;
+	uint8_t tran_antenna_id;
+	uint8_t rcpi;
+	uint8_t rsni;
+};
+
+struct ieee80211_neighbor_report_item {
+	uint8_t bssid[IEEE80211_ADDR_LEN];
+	uint32_t bssid_info;
+	uint8_t operating_class;
+	uint8_t channel;
+	uint8_t phy_type;
+};
+
+#define IEEE80211_RM_NEIGHBOR_REPORT_ITEM_MAX	32
+
+struct ieee80211_rm_neighbor_report {
+	uint8_t report_count;
+	struct ieee80211_neighbor_report_item *item_table[IEEE80211_RM_NEIGHBOR_REPORT_ITEM_MAX];
+};
+
+struct ieee80211_node_table;
+struct ieee80211com;
+struct ieee80211vap;
+
+/**
+ * Local qdrv node statistics for smoothing
+ */
+struct qtn_node_phy_stats {
+	uint32_t avg_tx_phy_rate;
+	uint32_t avg_rx_phy_rate;
+};
+
+/*
+ * Node specific information.  Note that drivers are expected
+ * to derive from this structure to add device-specific per-node
+ * state.  This is done by overriding the ic_node_* methods in
+ * the ieee80211com structure.
+ */
+struct ieee80211_node {
+	struct ieee80211vap *ni_vap;
+	struct ieee80211com *ni_ic;
+	struct ieee80211_node_table *ni_table;
+	TAILQ_ENTRY(ieee80211_node) ni_list;
+	TAILQ_ENTRY(ieee80211_node) ni_addba_list;
+	LIST_ENTRY(ieee80211_node) ni_hash;
+	atomic_t ni_refcnt;
+	u_int ni_scangen;			/* gen# for timeout scan */
+	u_int8_t ni_authmode;			/* authentication algorithm */
+	u_int16_t ni_flags;			/* special-purpose state */
+	uint16_t ni_ext_flags;			/* extended special-purpose state */
+	u_int8_t ni_ath_flags;			/* Atheros feature flags */
+	/* NB: These must have the same values as IEEE80211_ATHC_* */
+#define IEEE80211_NODE_TURBOP	0x0001		/* Turbo prime enable */
+#define IEEE80211_NODE_COMP	0x0002		/* Compresssion enable */
+#define IEEE80211_NODE_FF	0x0004          /* Fast Frame capable */
+#define IEEE80211_NODE_XR	0x0008		/* Atheros WME enable */
+#define IEEE80211_NODE_AR	0x0010		/* AR capable */
+#define IEEE80211_NODE_BOOST	0x0080
+#define IEEE80211_NODE_PS_CHANGED	0x0200	/* PS state change */
+	u_int16_t ni_ath_defkeyindex;		/* Atheros def key index */
+#define IEEE80211_INVAL_DEFKEY	0x7FFF
+	u_int8_t ni_brcm_flags;			/* Broadcom feature flags */
+	u_int8_t ni_bbf_disallowed;		/* flag to disallow BBF */
+	u_int8_t ni_std_bf_disallowed;		/* flag to disallow standard BF */
+	uint16_t ni_associd;			/* assoc response */
+	uint16_t ni_node_idx;			/* local node index */
+	u_int16_t ni_txpower;			/* current transmit power (in 0.5 dBm) */
+	u_int16_t ni_vlan;			/* vlan tag */
+	u_int32_t *ni_challenge;			/* shared-key challenge */
+	u_int8_t *ni_wpa_ie;			/* captured WPA ie */
+	u_int8_t *ni_rsn_ie;			/* captured RSN ie */
+	u_int8_t *ni_osen_ie;			/* captured OSEN ie */
+	u_int8_t *ni_wme_ie;			/* captured WME ie */
+	u_int8_t *ni_wsc_ie;			/* captured WSC ie */
+	u_int8_t *ni_ath_ie;			/* captured Atheros ie */
+	u_int8_t *ni_qtn_assoc_ie;		/* captured Quantenna ie from assoc */
+	u_int8_t *ni_qtn_pairing_ie;		/* captured QTN Pairing IE from assoc */
+	u_int8_t *ni_qtn_brmacs;		/* captured QTN brmacs IE for TDLS */
+	u_int8_t ni_ext_role;			/* extender role of the node */
+	u_int8_t *ni_ext_bssid_ie;			/* captured extender bssid IE */
+	u_int8_t ni_vendor;
+	u_int16_t ni_txseqs[17];		/* tx seq per-tid */
+	u_int16_t ni_rxseqs[17];		/* rx seq previous per-tid*/
+	unsigned long ni_rxfragstamp;		/* time stamp of last rx frag */
+	struct sk_buff *ni_rxfrag;		/* rx frag reassembly */
+	struct ieee80211_rsnparms ni_rsn;	/* RSN/WPA parameters */
+	struct ieee80211_key ni_ucastkey;	/* unicast key */
+	int ni_rxkeyoff;			/* Receive key offset */
+	struct ieee80211_htcap ni_htcap;	/* parsed HT capabilities */
+	struct ieee80211_ie_htcap ni_ie_htcap;	/* received htcap IE */
+	struct ieee80211_htinfo ni_htinfo;	/* parsed HT information */
+	struct ieee80211_ie_htinfo ni_ie_htinfo;/* received htinfo IE */
+	uint32_t ni_rate_train;
+	uint32_t ni_rate_train_hash;
+	uint32_t ni_ver_sw;
+	uint16_t ni_ver_hw;
+	uint16_t ni_ver_platform_id;
+	uint32_t ni_ver_timestamp;
+	uint32_t ni_ver_flags;
+	uint8_t ni_vsp_version;
+#ifdef CONFIG_QVSP
+	uint32_t ni_vsp_ba_throt_bm;            /* VSP BA throtting bitmap */
+#endif
+
+	struct ieee80211_vhtcap		ni_vhtcap;
+	struct ieee80211_vhtop		ni_vhtop;
+	struct ieee80211_vht_mu_grp	ni_mu_grp;
+	struct ieee80211_ie_vhtcap	ni_ie_vhtcap;
+	struct ieee80211_ie_vhtop	ni_ie_vhtop;
+
+	struct qtn_node_phy_stats	ni_local_stats;
+	struct qtn_node_shared_stats	*ni_shared_stats;
+	struct qtn_node_shared_stats	*ni_shared_stats_phys;
+#ifdef CONFIG_QVSP
+#if TOPAZ_QTM
+	/* used to calculate traffic per check interval */
+	struct qtn_vsp_per_node_stats	ni_prev_vsp_stats;
+#endif
+#endif
+
+	struct qdrv_sch_node_data	ni_tx_sch;
+
+	struct ieee80211_action	ni_action;
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+	/* action token */
+	u_int8_t ni_action_token;
+#endif
+	u_int8_t ni_implicit_ba; /* Implicit block ack flags as passed in by the client */
+	u_int8_t ni_implicit_ba_valid;
+	u_int16_t ni_implicit_ba_size;
+
+#ifndef WME_NUM_TID
+#define WME_NUM_TID 16
+#endif
+	/* block ack */
+	struct ieee80211_ba_tid ni_ba_rx[WME_NUM_TID];
+	struct ieee80211_ba_tid ni_ba_tx[WME_NUM_TID];
+	struct work_struct ni_tx_addba_task;
+#define IEEE80211_WDS_LINK_MAINTAIN_BA_TID 0
+	struct timer_list ni_training_timer;	/* timer for running rate training */
+#define NI_TRAINING_INIT	0x0
+#define NI_TRAINING_RUNNING	0x1
+#define NI_TRAINING_END		0x2
+	int ni_training_flag;
+
+	int ni_training_count;
+	unsigned long ni_training_start;
+	int ni_wds_ba_attempts;
+
+	spinlock_t ni_lock;
+
+	/* 11n information */
+	enum ieee80211_cwm_width ni_chwidth;	/* recommended tx channel width */
+	u_int8_t ni_newchwidth;	/* channel width changed */
+
+	/* hardware */
+	u_int32_t ni_rstamp;			/* recv timestamp */
+	u_int32_t ni_last_rx;			/* recv jiffies */
+	int32_t ni_rssi;			/* recv ssi */
+	int32_t ni_smthd_rssi;			/* Smoothed RSSI */
+	int32_t ni_atten_smoothed;		/* conservative index for rate ratio in scs channel ranking */
+	uint16_t ni_recent_cca_intf;
+	uint32_t ni_recent_cca_intf_jiffies;
+	uint16_t ni_recent_cca_intf_smthed;
+	uint16_t ni_others_rx_time_smthed;
+	uint16_t ni_others_tx_time_smthed;
+	uint16_t ni_others_time;		/* Total time (Rx + Tx) */
+	uint32_t ni_cca_intf_smth_jiffies;
+	uint32_t ni_others_time_smth_jiffies;
+	int32_t ni_recent_rxglitch_trig_consecut;    /* rxglitch consecutively exceed threshold */
+	uint32_t ni_recent_rxglitch;
+	uint32_t ni_recent_sp_fail;
+	uint32_t ni_recent_lp_fail;
+	uint16_t ni_recent_others_time;
+	uint16_t ni_recent_others_time_smth;
+	uint16_t ni_recent_tdls_tx_time;
+	uint16_t ni_recent_tdls_rx_time;
+	uint32_t ni_recent_cca_idle;
+	uint32_t ni_recent_cca_idle_smthed;
+	uint32_t ni_recent_cca_idle_smth_jiffies;
+	uint16_t ni_tdls_tx_time_smthed;
+	uint16_t ni_tdls_rx_time_smthed;
+	uint32_t ni_tdls_time_smth_jiffies;
+	int ni_hw_noise;
+
+	/* header */
+	u_int8_t ni_macaddr[IEEE80211_ADDR_LEN];
+	u_int8_t ni_bssid[IEEE80211_ADDR_LEN];
+
+	/* beacon, probe response */
+	union {
+		u_int8_t data[8];
+		__le64 tsf;
+	} ni_tstamp;				/* from last rcv'd beacon */
+
+	u_int16_t ni_raw_bintval;		/* raw beacon interval */
+	u_int16_t ni_intval;			/* beacon interval */
+	u_int16_t ni_intval_old;		/* beacon interval before first change */
+	u_int16_t ni_intval_cnt;		/* count of ni_intval != ni_intval_old */
+	unsigned long ni_intval_end;		/* end of transition interval jiffies */
+	uint64_t ni_tbtt;			/* TBTT of AP node */
+	uint64_t ni_dtim_tbtt;			/* dtim TBTT of AP node */
+
+	u_int16_t ni_capinfo;			/* capabilities */
+	u_int8_t ni_esslen;
+	u_int8_t ni_essid[IEEE80211_NWID_LEN];
+	struct ieee80211_rateset ni_rates;	/* negotiated rate set */
+	struct ieee80211_ht_rateset ni_htrates;	/* negotiated ht rate set */
+	struct ieee80211_channel *ni_chan;
+	uint8_t ni_supp_chans[IEEE80211_CHAN_BYTES];	/* supported channels bitmap */
+	uint32_t ni_chan_num;				/* supported channels number */
+	u_int16_t ni_fhdwell;			/* FH only */
+	u_int8_t ni_fhindex;			/* FH only */
+	u_int8_t ni_erp;				/* ERP from beacon/probe resp */
+	u_int16_t ni_timoff;			/* byte offset to TIM ie */
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+	/* action frame, radio measurement category, sta statistics */
+	u_int8_t ni_rm_sta_seq;
+#endif
+	/* others */
+	struct sk_buff_head ni_savedq;		/* packets queued for pspoll */
+	short ni_inact;				/* inactivity mark count */
+	short ni_inact_reload;			/* inactivity reload value */
+	struct work_struct ni_inact_work;	/* inactivity workqueue */
+	int ni_txrate;				/* index to ni_rates[] */
+	struct ieee80211_nodestats ni_stats;	/* per-node statistics */
+	struct ieee80211vap *ni_prev_vap;	/* previously associated vap */
+	u_int8_t ni_uapsd;			/* U-APSD per-node flags matching WMM STA Qos Info field */
+	u_int16_t ni_uapsd_trigseq[WME_NUM_AC];	/* trigger suppression on retry */
+	__le16 ni_pschangeseq;
+
+	u_int16_t ni_linkqual;			/* link quality - provisional - currently TX PHY rate*/
+	u_int16_t ni_rx_phy_rate;
+	unsigned long ni_blacklist_timeout;	/* MAC filtering timeout in jiffies - 0 if not blacklisted*/
+	int ni_lncb_4addr;			/* Support 4-addr encap of LNCB packets (multicast) */
+	TAILQ_ENTRY(ieee80211_node) ni_lncb_lst;/* List of STAs supporting 4 address LNCB packets */
+	int is_in_lncb_lst;
+	TAILQ_ENTRY(ieee80211_node) ni_bridge_lst; /* List of bridge STAs */
+	int is_in_bridge_lst;
+	u_int64_t ni_start_time_assoc;		/* start time of association (jiffies) */
+	int ni_snr;
+	int ni_in_auth_state;
+	int ni_max_queue;
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+	/* 802.11k related */
+	struct ieee80211_ie_qtn_rm_sta_all ni_qtn_rm_sta_all;
+	struct ieee80211_ie_rm_sta_grp221 ni_rm_sta_grp221;
+	wait_queue_head_t ni_dotk_waitq;
+	struct ieee80211_dotk_meas_state ni_dotk_meas_state;
+	unsigned long ni_last_update[RM_QTN_MAX+1];
+#endif
+	/* TPC related */
+	struct ieee80211_tpc_info ni_tpc_info;
+
+	/* Measurement related */
+	struct meas_info	ni_meas_info;	/* mainly for measurement report */
+
+	struct ieee80211_rm_link_measure_report ni_lm;
+	struct ieee80211_rm_neighbor_report ni_neighbor;
+
+	u_int32_t ni_ip_addr;
+#ifdef CONFIG_IPV6
+	struct in6_addr ipv6_llocal;		/* IPv6 link local address */
+#endif
+#define IEEE80211_IP_ADDR_FILTER_NONE 0
+#define IEEE80211_IP_ADDR_FILTER_DHCP_RSP 1
+#define IEEE80211_IP_ADDR_FILTER_ARP_RSP 2
+	uint8_t		ni_ip_addr_filter;
+	uint32_t	ni_qtn_flags;
+	uint32_t	last_tx_phy_rate;	/* For TDLS path selection */
+	uint32_t	rx_pkts;
+	uint32_t	tx_acks;
+	uint32_t	ni_sa_query_tid;
+	unsigned long	ni_sa_query_timeout;
+
+	unsigned long tdls_last_seen;		/* jiffies TDLS peer is last seen */
+	uint32_t tdls_last_path_sel;		/* Last path selection result */
+	int32_t tdls_path_sel_num;		/* Continuous same path selection number*/
+	uint16_t tdls_peer_associd;		/* tdls peer node's AID, allocated by AP, unique value at BSS */
+	enum ni_tdls_status tdls_status;	/* TDLS status */
+	uint8_t tdls_send_cs_req;		/* TDLS channel switch request has sent out */
+	uint8_t tdls_initiator;			/* TDLS peer as initiator */
+	uint8_t tdls_no_send_cs_resp;		/* TDLS channel switch response not send yet*/
+	unsigned long tdls_setup_start;		/* jiffies TDLS setup start */
+
+/* These values must be kept in sync with ieee80211_node_type_str */
+#define IEEE80211_NODE_TYPE_NONE	0
+#define IEEE80211_NODE_TYPE_VAP		1
+#define IEEE80211_NODE_TYPE_STA		2
+#define IEEE80211_NODE_TYPE_WDS		3
+#define IEEE80211_NODE_TYPE_TDLS	4
+	uint32_t ni_node_type;
+
+	uint8_t ni_used_auth_algo;
+	int32_t	rssi_avg_dbm;
+	uint8_t ni_wifi_mode;
+	uint8_t ni_vhtop_notif_mode;
+
+#ifdef IEEE80211_DEBUG_REFCNT
+	struct node_refdebug_info *ni_refdebug_info_p;
+#endif
+	uint8_t ni_coex;
+	uint8_t ni_obss_scan;
+	struct ieee80211_obss_scan_ie ni_obss_ie;
+#define IEEE80211_NODE_OBSS_CAPABLE	0x00000001
+#define IEEE80211_NODE_OBSS_RUNNING	0x00000002
+#define IEEE80211_NODE_OBSS_NOT_RUN	0x00000004
+
+	uint16_t ni_qtn_dfs_enabled;
+
+	uint32_t ni_rrm_capability;
+#define IEEE80211_NODE_NEIGHBOR_REPORT_CAPABLE		0x00000001
+#define IEEE80211_NODE_BEACON_ACTIVE_REPORT_CAPABLE	0x00000002
+#define IEEE80211_NODE_BEACON_PASSIVE_REPORT_CAPABLE	0x00000003
+#define IEEE80211_NODE_BEACON_TABLE_REPORT_CAPABLE	0x00000004
+	uint8_t	pending_beacon_req_token;
+
+	uint32_t ni_wnm_capability;
+#define IEEE80211_NODE_WNM_BTM_CAPABLE			0x00000001
+	struct timer_list ni_btm_resp_wait_timer;	/* timer for BTM response timeout */
+	uint8_t	ni_btm_req;				/* last btm request dialog token */
+	uint8_t *ni_tx_rsn_ie;			/* Transmitting RSN IE */
+	uint8_t *ni_rx_md_ie;			/* Mobility domain IE */
+	uint8_t *ni_tx_md_ie;			/* Mobility domain IE */
+	uint8_t *ni_rx_ft_ie;			/* Fast transition IE */
+	uint8_t *ni_tx_ft_ie;			/* Fast transition IE */
+	uint8_t ni_ft_capability;
+};
+MALLOC_DECLARE(M_80211_NODE);
+
+/*
+ * Association IDs are managed with a bit vector.
+ */
+#define	IEEE80211_NODE_AID(ni)			IEEE80211_AID((ni)->ni_associd)
+#define IEEE80211_NODE_IS_VHT(_ni)		(((_ni)->ni_flags & IEEE80211_NODE_VHT) != 0)
+#define IEEE80211_NODE_IS_HT(_ni)		(((_ni)->ni_flags & IEEE80211_NODE_HT) != 0)
+
+#define	IEEE80211_AID_SET(_ic, _b) \
+	((_ic)->ic_aid_bitmap[IEEE80211_AID(_b) / 32] |= \
+		(1 << (IEEE80211_AID(_b) % 32)))
+#define	IEEE80211_AID_CLR(_ic, _b) \
+	((_ic)->ic_aid_bitmap[IEEE80211_AID(_b) / 32] &= \
+		~(1 << (IEEE80211_AID(_b) % 32)))
+#define	IEEE80211_AID_ISSET(_ic, _b) \
+	((_ic)->ic_aid_bitmap[IEEE80211_AID(_b) / 32] & (1 << (IEEE80211_AID(_b) % 32)))
+
+#define	IEEE80211_NODE_STAT(ni,stat)		(ni->ni_stats.ns_##stat++)
+#define	IEEE80211_NODE_STAT_ADD(ni,stat,v)	(ni->ni_stats.ns_##stat += v)
+#define	IEEE80211_NODE_STAT_SET(ni,stat,v)	(ni->ni_stats.ns_##stat = v)
+
+#define WMM_UAPSD_NODE_IS_PWR_MGT(_ni) ( \
+		((_ni)->ni_flags & IEEE80211_NODE_PWR_MGT) && ((_ni)->ni_uapsd))
+#define WME_UAPSD_AC_CAN_TRIGGER(_ac, _ni) ( \
+		(WMM_UAPSD_NODE_IS_PWR_MGT(_ni)) && WME_UAPSD_AC_ENABLED((_ac), (_ni)->ni_uapsd) )
+
+#define	IEEE80211_NODE_IS_TDLS_ACTIVE(_ni)	((_ni)->tdls_status ==\
+			IEEE80211_TDLS_NODE_STATUS_ACTIVE)
+#define	IEEE80211_NODE_IS_NONE_TDLS(_ni)	((_ni)->tdls_status ==\
+			IEEE80211_TDLS_NODE_STATUS_NONE)
+#define	IEEE80211_NODE_IS_TDLS_STARTING(_ni)	((_ni)->tdls_status ==\
+			IEEE80211_TDLS_NODE_STATUS_STARTING)
+#define	IEEE80211_NODE_IS_TDLS_INACTIVE(_ni)	((_ni)->tdls_status ==\
+			IEEE80211_TDLS_NODE_STATUS_INACTIVE)
+#define	IEEE80211_NODE_IS_TDLS_IDLE(_ni)	((_ni)->tdls_status ==\
+			IEEE80211_TDLS_NODE_STATUS_IDLE)
+
+#define WME_UAPSD_NODE_MAXQDEPTH	8
+#define IEEE80211_NODE_UAPSD_USETIM(_ni) (((_ni)->ni_uapsd & 0xF) == 0xF )
+#define WME_UAPSD_NODE_INVALIDSEQ	0xffff
+#define WME_UAPSD_NODE_TRIGSEQINIT(_ni)	(memset(&(_ni)->ni_uapsd_trigseq[0], 0xff, sizeof((_ni)->ni_uapsd_trigseq)))
+
+#define IEEE80211_NODE_MU_DEL_GRP(_ni, _grp)		IEEE80211_MU_DEL_GRP(_ni->ni_mu_grp, _grp)
+#define IEEE80211_NODE_MU_ADD_GRP(_ni, _grp, _pos)	IEEE80211_MU_ADD_GRP(_ni->ni_mu_grp, _grp, _pos)
+#define IEEE80211_NODE_MU_IS_GRP_MBR(_ni, _grp)		IEEE80211_MU_IS_GRP_MBR(_ni->ni_mu_grp, _grp)
+#define IEEE80211_NODE_MU_GRP_POS(_ni, _grp)		IEEE80211_MU_GRP_POS(_ni->ni_mu_grp, _grp)
+
+static __inline int ieee80211_node_is_running(const struct ieee80211_node *ni)
+{
+	return (IEEE80211_NODE_IS_NONE_TDLS(ni) || IEEE80211_NODE_IS_TDLS_ACTIVE(ni));
+}
+
+static __inline int ieee80211_node_is_qtn(const struct ieee80211_node *ni)
+{
+	return ni->ni_qtn_assoc_ie != NULL;
+}
+
+void ieee80211_node_attach(struct ieee80211com *);
+void ieee80211_node_detach(struct ieee80211com *);
+void ieee80211_node_vattach(struct ieee80211vap *);
+void ieee80211_node_latevattach(struct ieee80211vap *);
+void ieee80211_node_vdetach(struct ieee80211vap *);
+
+static __inline int
+ieee80211_node_is_authorized(const struct ieee80211_node *ni)
+{
+	return (ni->ni_flags & IEEE80211_NODE_AUTH);
+}
+
+static __inline int
+ieee80211_node_power_save_scheme(const struct ieee80211_node *ni)
+{
+/*Will expand the bitmap to include other power save schemes*/
+#define POWER_SAVE_SCHEME_UAPSD		BIT(0)
+	uint32_t ret = 0;
+	if ((ni->ni_flags & IEEE80211_NODE_UAPSD)) {
+		ret |= POWER_SAVE_SCHEME_UAPSD;
+	}
+	return ret;
+}
+
+void _ieee80211_node_authorize(struct ieee80211_node *);
+void ieee80211_node_authorize(struct ieee80211_node *);
+void ieee80211_node_unauthorize(struct ieee80211_node *);
+void ieee80211_tdls_add_rate_detection(struct ieee80211_node *);
+void ieee80211_node_training_start(struct ieee80211_node *ni, int immediate);
+void ieee80211_restore_bw(struct ieee80211vap *vap, struct ieee80211com *ic);
+void ieee80211_create_bss(struct ieee80211vap *, struct ieee80211_channel *);
+void ieee80211_reset_bss(struct ieee80211vap *);
+int ieee80211_ibss_merge(struct ieee80211_node *);
+struct ieee80211_scan_entry;
+int ieee80211_sta_join(struct ieee80211vap *, const struct ieee80211_scan_entry *);
+void ieee80211_sta_join1(struct ieee80211vap *, struct ieee80211_node *, int reauth);
+void ieee80211_sta_join1_tasklet(IEEE80211_TQUEUE_ARG);
+void ieee80211_sta_leave(struct ieee80211_node *);
+void ieee80211_sta_fast_rejoin(unsigned long arg);
+void ieee80211_send_vht_opmode_action(struct ieee80211vap *vap,
+                                       struct ieee80211_node *ni,
+                                       uint8_t bw, uint8_t rx_nss);
+int ieee80211_send_20_40_bss_coex(struct ieee80211vap *vap);
+int ieee80211_check_40_bw_allowed(struct ieee80211vap *vap);
+void ieee80211_update_active_chanlist(struct ieee80211com *ic, int bw);
+void ieee80211_nonqtn_sta_join(struct ieee80211vap *vap, struct ieee80211_node *ni, const char *caller);
+void ieee80211_nonqtn_sta_leave(struct ieee80211vap *vap, struct ieee80211_node *ni, const char *caller);
+
+#define WDS_AGING_TIME		600   /* 10 minutes */
+#define WDS_AGING_COUNT		2
+#define WDS_AGING_STATIC	0xffff
+#define WDS_AGING_TIMER_VAL	(WDS_AGING_TIME / 2)
+
+struct ieee80211_wds_addr {
+	LIST_ENTRY(ieee80211_wds_addr) wds_hash;
+	u_int8_t		wds_macaddr[IEEE80211_ADDR_LEN];
+	struct ieee80211_node	*wds_ni;
+	u_int16_t		wds_agingcount;
+};
+
+/*
+ * Table of ieee80211_node instances.  Each ieee80211com
+ * has at least one for holding the scan candidates.
+ * When operating as an access point or in ibss mode there
+ * is a second table for associated stations or neighbors.
+ */
+struct ieee80211_node_table {
+	struct ieee80211com *nt_ic;		/* back reference */
+	ieee80211_node_lock_t nt_nodelock;	/* on node table */
+	TAILQ_HEAD(, ieee80211_node) nt_node;	/* information of all nodes */
+	ATH_LIST_HEAD(, ieee80211_node) nt_hash[IEEE80211_NODE_HASHSIZE];
+	ATH_LIST_HEAD(, ieee80211_wds_addr) nt_wds_hash[IEEE80211_NODE_HASHSIZE];
+	const char *nt_name;			/* for debugging */
+	ieee80211_scan_lock_t nt_scanlock;	/* on nt_scangen */
+	u_int nt_scangen;			/* gen# for timeout scan */
+	int nt_inact_init;			/* initial node inact setting */
+	struct timer_list nt_wds_aging_timer;	/* timer to age out wds entries */
+};
+
+struct ieee80211_node *ieee80211_alloc_node(struct ieee80211_node_table *,
+	struct ieee80211vap *, const u_int8_t *, const char*);
+struct ieee80211_node *ieee80211_tmp_node(struct ieee80211vap *,
+	const u_int8_t *);
+struct ieee80211_node *_ieee80211_tmp_node(struct ieee80211vap *,
+	const u_int8_t *, const u_int8_t *);
+struct ieee80211_node *ieee80211_dup_bss(struct ieee80211vap *,
+	const u_int8_t *);
+
+#define ieee80211_check_free_node(_held, _ni) do	\
+{							\
+	if (_held) {					\
+		ieee80211_free_node(_ni);		\
+	}						\
+} while (0)
+
+#ifdef IEEE80211_DEBUG_REFCNT
+void ieee80211_node_dbgref(const struct ieee80211_node *ni, const char *filename,
+				const int line, int is_increased);
+static __inline void
+ieee80211_ref_node_debug(struct ieee80211_node *ni, const char * filename, const int line)
+{
+	ieee80211_node_incref(ni);
+	ieee80211_node_dbgref(ni, filename, line, IEEE80211_NODEREF_INCR);
+}
+
+void ieee80211_free_node_debug(struct ieee80211_node *,
+			const char *filename,
+			int line);
+struct ieee80211_node *ieee80211_find_node_debug(struct ieee80211_node_table *,
+							const u_int8_t *macaddr,
+							const char *filename,
+							int line);
+struct ieee80211_node *ieee80211_find_node_by_node_idx_debug(struct ieee80211vap *vap,
+			uint16_t aid,
+			const char *filename,
+			int line);
+struct ieee80211_node *ieee80211_find_rxnode_debug(struct ieee80211com *ic,
+							const struct ieee80211_frame_min *wh,
+							const char *filename,
+							int line);
+struct ieee80211_node *ieee80211_find_txnode_debug(struct ieee80211vap *vap,
+			const u_int8_t *macaddr,
+			const char *filename,
+			int line);
+
+struct ieee80211_node *ieee80211_find_node_by_idx_debug(struct ieee80211com *ic,
+						  struct ieee80211vap *vap,
+						  uint16_t node_idx,
+						  const char *filename,
+						  int line);
+
+#define ieee80211_ref_node(ni) \
+	ieee80211_ref_node_debug(ni, __FILE__, __LINE__)
+
+#define ieee80211_free_node(ni) \
+	ieee80211_free_node_debug(ni, __FILE__, __LINE__)
+
+#define ieee80211_find_node(nt, mac) \
+	ieee80211_find_node_debug(nt, mac, __FILE__, __LINE__)
+
+#define ieee80211_find_node_by_node_idx(vap, aid) \
+	ieee80211_find_node_by_node_idx_debug(vap, aid, __FILE__, __LINE__)
+
+#define ieee80211_find_node_by_idx(ic, vap, aid) \
+	ieee80211_find_node_by_idx_debug(ic, vap, aid, __FILE__, __LINE__)
+
+#define ieee80211_find_rxnode(nt, wh) \
+	ieee80211_find_rxnode_debug(nt, wh, __FILE__, __LINE__)
+
+#define ieee80211_find_txnode(nt, mac) \
+	ieee80211_find_txnode_debug(nt, mac, __FILE__, __LINE__)
+
+#define ieee80211_find_node_by_ip_addr(vap, ip_addr) \
+	ieee80211_find_node_by_ip_addr_debug(vap, ip_addr, __FILE__, __LINE__)
+
+#ifdef CONFIG_IPV6
+#define ieee80211_find_node_by_ipv6_addr(vap, ipv6_addr) \
+	ieee80211_find_node_by_ipv6_addr_debug(vap, ipv6_addr, __FILE__, __LINE__)
+#endif
+
+#else /* IEEE80211_DEBUG_REFCNT */
+
+#define ieee80211_node_dbgref(ni, filename, line, is_increased)
+
+static __inline void
+ieee80211_ref_node(struct ieee80211_node *ni)
+{
+	ieee80211_node_incref(ni);
+}
+
+void ieee80211_free_node(struct ieee80211_node *);
+struct ieee80211_node *ieee80211_find_node(struct ieee80211_node_table *nt,
+						const u_int8_t *macaddr);
+struct ieee80211_node *ieee80211_find_node_by_idx(struct ieee80211com *ic,
+						  struct ieee80211vap *vap,
+						  uint16_t node_idx);
+struct ieee80211_node *ieee80211_find_node_by_node_idx(struct ieee80211vap *vap, uint16_t node_idx);
+struct ieee80211_node *ieee80211_find_rxnode(struct ieee80211com *vap,
+						const struct ieee80211_frame_min *macaddr);
+struct ieee80211_node *ieee80211_find_txnode(struct ieee80211vap *vap, const u_int8_t *macaddr);
+struct ieee80211_node *ieee80211_find_node_by_ip_addr(struct ieee80211vap *vap,
+							uint32_t ip_addr);
+
+#ifdef CONFIG_IPV6
+struct ieee80211_node *ieee80211_find_node_by_ipv6_addr(struct ieee80211vap *vap,
+							struct in6_addr *ipv6_addr);
+#endif
+
+#define ieee80211_ref_node_debug(ni, filename, line)	ieee80211_ref_node(ni)
+
+#endif /* IEEE80211_DEBUG_REFCNT */
+
+void ieee80211_sta_assocs_inc(struct ieee80211vap *vap, const char *caller);
+void ieee80211_sta_assocs_dec(struct ieee80211vap *vap, const char *caller);
+
+int ieee80211_add_wds_addr(struct ieee80211_node_table *, struct ieee80211_node *,
+	const u_int8_t *, u_int8_t);
+void ieee80211_remove_wds_addr(struct ieee80211_node_table *, const u_int8_t *);
+void ieee80211_del_wds_node(struct ieee80211_node_table *,
+	struct ieee80211_node *);
+struct ieee80211_node *ieee80211_find_wds_node(struct ieee80211_node_table *,
+	const u_int8_t *);
+
+typedef void ieee80211_iter_func(void *, struct ieee80211_node *);
+void ieee80211_iterate_nodes(struct ieee80211_node_table *,
+	ieee80211_iter_func *, void *, int ignore_blacklisted);
+#ifdef CONFIG_QVSP
+void ieee80211_node_vsp_send_action(void *arg, struct ieee80211_node *ni);
+#endif
+void ieee80211_iterate_dev_nodes(struct net_device *,
+	struct ieee80211_node_table *, ieee80211_iter_func *, void *,
+	int ignore_blacklisted);
+
+void	ieee80211_dump_node(struct ieee80211_node_table *,
+	struct ieee80211_node *);
+void	ieee80211_dump_nodes(struct ieee80211_node_table *);
+
+struct ieee80211_node *ieee80211_fakeup_adhoc_node(struct ieee80211vap *,
+	const u_int8_t macaddr[]);
+struct ieee80211_scanparams;
+struct ieee80211_node *ieee80211_add_neighbor(struct ieee80211vap *,
+	const struct ieee80211_frame *, const struct ieee80211_scanparams *);
+int ieee80211_aid_acquire(struct ieee80211com *ic, struct ieee80211_node *ni);
+void ieee80211_node_join(struct ieee80211_node *, int);
+void ieee80211_node_leave(struct ieee80211_node *);
+void ieee80211_node_set_chan(struct ieee80211com *ic, struct ieee80211_node *ni);
+uint8_t ieee80211_getrssi(struct ieee80211com *);
+void ieee80211_idx_add(struct ieee80211_node *ni, uint16_t new_idx);
+void ieee80211_node_ba_state_clear(struct ieee80211_node *ni);
+
+/*
+ * Set up the implicit block ACK for the given node.
+ * ni the node to set up the implicit BA agreement with. The bitmap of TIDs is within the
+ * ni_implicit_ba field of the ieee80211_node structure.
+ */
+void ieee80211_node_implicit_ba_setup(struct ieee80211_node *ni);
+void ieee80211_node_ba_del(struct ieee80211_node *ni, uint8_t tid, uint8_t is_tx, uint16_t reason);
+
+void ieee80211_node_tx_ba_set_state(struct ieee80211_node *ni, uint8_t tid, enum ieee80211_ba_state state, unsigned delay);
+
+uint16_t ieee80211_find_aid_by_mac_addr(struct ieee80211_node_table *nt,
+		const u_int8_t *macaddr);
+void ieee80211_disconnect_node(struct ieee80211vap *vap, struct ieee80211_node *ni);
+int ieee80211_node_is_intel(struct ieee80211_node *ni);
+int ieee80211_node_is_realtek(struct ieee80211_node *ni);
+int ieee80211_node_is_opti_node(struct ieee80211_node *ni);
+#endif /* _NET80211_IEEE80211_NODE_H_ */
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_power.h b/drivers/qtn/include/kernel/net80211/ieee80211_power.h
new file mode 100644
index 0000000..3b32e94
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_power.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_power.h 1441 2006-02-06 16:03:21Z mrenzmann $
+ */
+#ifndef _NET80211_IEEE80211_POWER_H_
+#define _NET80211_IEEE80211_POWER_H_
+
+struct ieee80211com;
+struct ieee80211vap;
+
+void	ieee80211_power_attach(struct ieee80211com *);
+void	ieee80211_power_detach(struct ieee80211com *);
+void	ieee80211_power_vattach(struct ieee80211vap *);
+void	ieee80211_power_latevattach(struct ieee80211vap *);
+void	ieee80211_power_vdetach(struct ieee80211vap *);
+
+struct ieee80211_node;
+
+int	ieee80211_node_saveq_drain(struct ieee80211_node *);
+int	ieee80211_node_saveq_age(struct ieee80211_node *);
+void	ieee80211_pwrsave(struct ieee80211_node *, struct sk_buff *);
+void	ieee80211_node_pwrsave(struct ieee80211_node *, int);
+void	ieee80211_sta_pwrsave(struct ieee80211vap *, int);
+#endif /* _NET80211_IEEE80211_POWER_H_ */
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_proto.h b/drivers/qtn/include/kernel/net80211/ieee80211_proto.h
new file mode 100644
index 0000000..a0bc2f0
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_proto.h
@@ -0,0 +1,618 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_proto.h 2029 2007-01-30 04:01:29Z proski $
+ */
+#ifndef _NET80211_IEEE80211_PROTO_H_
+#define _NET80211_IEEE80211_PROTO_H_
+
+/* Current Fix. As mutual inclusion of files prevent to define following. Need fix */
+#define	IEEE80211_C_11N		0x00000010	/* CAPABILITY: 11n HT available */
+/*
+ * 802.11 protocol implementation definitions.
+ */
+
+enum ieee80211_state {
+	IEEE80211_S_INIT	= 0,	/* default state */
+	IEEE80211_S_SCAN	= 1,	/* scanning */
+	IEEE80211_S_AUTH	= 2,	/* try to authenticate */
+	IEEE80211_S_ASSOC	= 3,	/* try to assoc */
+	IEEE80211_S_RUN		= 4,	/* associated */
+};
+#define	IEEE80211_S_MAX		(IEEE80211_S_RUN + 1)
+
+#define	IEEE80211_SEND_MGMT(_ni,_type,_arg) \
+	((*(_ni)->ni_ic->ic_send_mgmt)(_ni, _type, _arg))
+
+extern const char *ieee80211_mgt_subtype_name[];
+extern const char *ieee80211_ctl_subtype_name[];
+extern const char *ieee80211_state_name[IEEE80211_S_MAX];
+extern const char *ieee80211_wme_acnames[];
+extern const char *ieee80211_phymode_name[];
+
+void ieee80211_proto_attach(struct ieee80211com *);
+void ieee80211_proto_detach(struct ieee80211com *);
+void ieee80211_proto_vattach(struct ieee80211vap *);
+void ieee80211_proto_vdetach(struct ieee80211vap *);
+
+struct ieee80211_node;
+struct ieee80211vap *ieee80211_get_sta_vap(struct ieee80211com *ic);
+struct ieee80211_channel *ieee80211_doth_findchan(struct ieee80211vap *, u_int8_t);
+int ieee80211_input(struct ieee80211_node *, struct sk_buff *, int, u_int32_t);
+int ieee80211_input_all(struct ieee80211com *, struct sk_buff *, int, u_int32_t);
+int ieee80211_setup_rates(struct ieee80211_node *, const u_int8_t *,
+	const u_int8_t *, int);
+int ieee80211_parse_htcap(struct ieee80211_node *ni, u_int8_t *ie);
+int ieee80211_parse_htinfo(struct ieee80211_node *ni, u_int8_t *ie);
+void ieee80211_parse_vhtcap(struct ieee80211_node *ni, u_int8_t *ie);
+int ieee80211_parse_vhtop(struct ieee80211_node *ni, u_int8_t *ie);
+int ieee80211_parse_rates(struct ieee80211_node *ni,
+	const u_int8_t *rates, const u_int8_t *xrates);
+int ieee80211_parse_supp_chan(struct ieee80211_node *ni, uint8_t *ie);
+void ieee80211_saveie(u_int8_t **, const u_int8_t *);
+void ieee80211_saveath(struct ieee80211_node *, u_int8_t *);
+int ieee80211_input_tdls_qtnie(struct ieee80211_node *ni, struct ieee80211vap *vap,
+				struct ieee80211_ie_qtn *qtnie);
+void ieee80211_update_current_mode(struct ieee80211_node *ni);
+#if TOPAZ_RX_ACCELERATE
+int ieee80211_tdls_tqe_path_check(struct ieee80211_node *ni, struct sk_buff *skb, int rssi, uint16_t ether_type);
+#endif
+void ieee80211_recv_mgmt(struct ieee80211_node *, struct sk_buff *,
+	int, int, u_int32_t);
+void ieee80211_sta_pwrsave(struct ieee80211vap *, int);
+void ieee80211_parent_queue_xmit(struct sk_buff *);
+struct sk_buff * ieee80211_get_nulldata(struct ieee80211_node *ni);
+int ieee80211_send_nulldata(struct ieee80211_node *);
+int ieee80211_send_qosnulldata(struct ieee80211_node *, int);
+int ieee80211_send_qosnulldata_ext(struct ieee80211com *ic, uint8_t *mac, int pwr);
+void ieee80211_send_csa_frame(struct ieee80211vap *vap, u_int8_t csa_mode,
+	u_int8_t csa_chan, u_int8_t csa_count, u_int64_t tsf);
+void ieee80211_get_channel_bw_offset(struct ieee80211com *ic, int16_t *is_40, int16_t *offset);
+int ieee80211_send_mgmt(struct ieee80211_node *, int, int);
+void ieee80211_mgmt_output(struct ieee80211_node *ni, struct sk_buff *skb, int type,
+	const u_int8_t da[IEEE80211_ADDR_LEN]);
+void ieee80211_tdls_mgmt_output(struct ieee80211_node *ni,
+	struct sk_buff *skb, const uint8_t type, const uint8_t subtype,
+	const uint8_t *da, const uint8_t *bssid);
+void ieee80211_send_pspoll(struct ieee80211_node *ni);
+void ieee80211_initiate_scan(struct ieee80211vap *vap);
+struct sk_buff * ieee80211_get_qosnulldata(struct ieee80211_node *ni, int ac);
+struct sk_buff *
+ieee80211_get_probereq(struct ieee80211_node *ni,
+	const u_int8_t sa[IEEE80211_ADDR_LEN],
+	const u_int8_t da[IEEE80211_ADDR_LEN],
+	const u_int8_t bssid[IEEE80211_ADDR_LEN],
+	const u_int8_t *ssid, size_t ssidlen,
+	const void *optie, size_t optielen);
+int ieee80211_send_probereq(struct ieee80211_node *,
+	const u_int8_t sa[IEEE80211_ADDR_LEN],
+	const u_int8_t da[IEEE80211_ADDR_LEN],
+	const u_int8_t bssid[IEEE80211_ADDR_LEN],
+	const u_int8_t *, size_t, const void *, size_t);
+struct sk_buff *ieee80211_encap(struct ieee80211_node *, struct sk_buff *, int *);
+void ieee80211_pwrsave(struct ieee80211_node *, struct sk_buff *);
+void ieee80211_send_delba(struct ieee80211_node *ni, int tid, int tx, int reason);
+
+void ieee80211_reset_erp(struct ieee80211com *, enum ieee80211_phymode);
+void ieee80211_set_shortslottime(struct ieee80211com *, int);
+int ieee80211_iserp_rateset(struct ieee80211com *, struct ieee80211_rateset *);
+void ieee80211_set11gbasicrates(struct ieee80211_rateset *, enum ieee80211_phymode);
+enum ieee80211_phymode ieee80211_get11gbasicrates(struct ieee80211_rateset *);
+void ieee80211_send_pspoll(struct ieee80211_node *);
+void ieee80211_send_vht_opmode_to_all(struct ieee80211com *ic, uint8_t bw);
+void ieee80211_tkip_mic_failure(struct ieee80211vap *, int count);
+void ieee80211_send_rm_req_stastats(struct ieee80211_node *ni, u_int32_t flags);
+int32_t ieee80211_send_rm_rep_stastats(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t group_id,
+		u_int16_t duration_tu,
+		void *sub_item);
+void ieee80211_send_rm_req_cca(struct ieee80211_node *ni);
+void ieee80211_send_action_cca_report(struct ieee80211_node *ni, uint8_t token,
+		uint16_t cca_intf, uint64_t tsf, uint16_t duration, uint32_t sp_fail,
+		uint32_t lp_fail, uint16_t others_time, uint8_t *extra_ie, uint16_t ie_len);
+void ieee80211_send_action_fat_report(struct ieee80211_node *ni, uint8_t token,
+		uint16_t cca_intf, uint64_t tsf, uint16_t duration, uint16_t idle_time);
+void ieee80211_send_action_dfs_report(struct ieee80211_node *ni);
+void ieee80211_send_rm_req_stastats_all(struct ieee80211com *ic);
+void ieee80211_send_rm_req_chan_load(struct ieee80211_node *ni,
+				u_int8_t channel,
+				u_int16_t duration_ms,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail);
+void ieee80211_send_rm_req_noise_his(struct ieee80211_node *ni,
+				u_int8_t channel,
+				u_int16_t duration_ms,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail);
+void ieee80211_send_rm_req_beacon(struct ieee80211_node *ni,
+				u_int8_t op_class,
+				u_int8_t channel,
+				u_int16_t duration_ms,
+				u_int8_t mode,
+				u_int8_t *bssid,
+				u_int8_t *ssid,
+				u_int8_t ssid_len,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail);
+void ieee80211_send_rm_req_frame(struct ieee80211_node *ni,
+				u_int8_t op_class,
+				u_int8_t channel,
+				u_int16_t duration_ms,
+				u_int8_t type,
+				u_int8_t *mac_address,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail);
+void ieee80211_send_rm_req_tran_stream_cat(struct ieee80211_node *ni,
+				u_int16_t duration_ms,
+				u_int8_t *peer_sta,
+				u_int8_t tid,
+				u_int8_t bin0,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail);
+void ieee80211_send_rm_req_multicast_diag(struct ieee80211_node *ni,
+				u_int16_t duration_ms,
+				u_int8_t *group_mac,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail);
+void ieee80211_send_link_measure_request(struct ieee80211_node *ni,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail);
+void ieee80211_send_neighbor_report_request(struct ieee80211_node *ni,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail);
+void ieee80211_send_rm_rep_chan_load(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t op_class,
+		u_int8_t channel,
+		u_int16_t duration_tu,
+		u_int8_t channel_load);
+void ieee80211_send_rm_rep_noise_his(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t op_class,
+		u_int8_t channel,
+		u_int16_t duration_tu,
+		u_int8_t antenna_id,
+		u_int8_t anpi,
+		u_int8_t *ipi);
+void ieee80211_send_rm_rep_beacon(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t op_class,
+		u_int8_t channel,
+		u_int16_t duration_tu,
+		u_int8_t reported_frame_info,
+		u_int8_t rcpi,
+		u_int8_t rsni,
+		u_int8_t *bssid,
+		u_int8_t antenna_id,
+		u_int8_t *parent_tsf);
+void ieee80211_send_rm_rep_frame(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t op_class,
+		u_int8_t channel,
+		u_int16_t duration_tu,
+		void *sub_ele);
+void ieee80211_send_rm_rep_multicast_diag(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int16_t duration_tu,
+		u_int8_t *group_mac,
+		u_int8_t reason,
+		u_int32_t mul_rec_msdu_cnt,
+		u_int16_t first_seq_num,
+		u_int16_t last_seq_num,
+		u_int16_t mul_rate);
+int32_t ieee80211_send_meas_request_basic(struct ieee80211_node *ni,
+		u_int8_t channel,
+		u_int16_t tsf_offset,
+		u_int16_t duration,
+		unsigned long expire,
+		void *fn_success,
+		void *fn_fail);
+int32_t ieee80211_send_meas_request_cca(struct ieee80211_node *ni,
+		u_int8_t channel,
+		u_int16_t tsf_offset,
+		u_int16_t duration,
+		unsigned long expire,
+		void *fn_success,
+		void *fn_fail);
+int32_t ieee80211_send_meas_request_rpi(struct ieee80211_node *ni,
+		u_int8_t channel,
+		u_int16_t tsf_offset,
+		u_int16_t duration,
+		unsigned long expire,
+		void *fn_success,
+		void *fn_fail);
+int32_t ieee80211_send_meas_report_basic(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t channel,
+		u_int64_t start_tsf,
+		u_int16_t duration,
+		u_int8_t basic_report);
+int32_t ieee80211_send_meas_report_cca(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t channel,
+		u_int64_t start_tsf,
+		u_int16_t duration,
+		u_int8_t cca_report);
+int32_t ieee80211_send_meas_report_rpi(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t channel,
+		u_int64_t start_tsf,
+		u_int16_t duration,
+		u_int8_t *rpi_report);
+void ieee80211_send_neighbor_report_response(struct ieee80211_node *ni,
+					u_int8_t token,
+					u_int8_t bss_num,
+					void *table);
+void ieee80211_send_notify_chan_width_action(struct ieee80211vap *vap,
+					     struct ieee80211_node *ni,
+					     u_int32_t width);
+void ieee80211_send_sa_query (struct ieee80211_node *ni, u_int8_t action,
+					u_int16_t tid);
+int ieee80211_send_wnm_bss_tm_solicited_req(struct ieee80211_node *ni,
+		uint8_t mode,
+		uint16_t disassoc_timer,
+		uint8_t valid_int,
+		const uint8_t *bss_term_dur,
+		const char *url,
+		const uint8_t *nei_rep,
+		size_t nei_rep_len,
+		uint8_t token);
+int ieee80211_send_wnm_bss_tm_unsolicited_req(struct ieee80211_node *ni,
+		uint8_t mode,
+		uint16_t disassoc_timer,
+		uint8_t valid_int,
+		const uint8_t *bss_term_dur,
+		const char *url,
+		const uint8_t *nei_rep,
+		size_t nei_rep_len,
+		uint8_t token);
+int ieee80211_wnm_btm_create_pref_candidate_list(struct ieee80211_node *ni, uint8_t **list);
+
+/*
+ * Return the size of the 802.11 header for a management or data frame.
+ */
+static __inline int
+ieee80211_hdrsize(u_int32_t ht_capable, const void *data)
+{
+	const struct ieee80211_frame *wh = data;
+	int size = sizeof(struct ieee80211_frame);
+
+	/* NB: we don't handle control frames */
+	KASSERT((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL,
+		("%s: control frame", __func__));
+	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
+		size += IEEE80211_ADDR_LEN;
+	if (IEEE80211_QOS_HAS_SEQ(wh))
+		size += sizeof(u_int16_t);
+	if (ht_capable) {
+		if ((wh->i_fc[1] & IEEE80211_FC1_ORDER) == IEEE80211_FC1_ORDER) {
+			/* Frame has HT control field in the header */
+			size += sizeof(u_int32_t);
+		}
+	}
+	return size;
+}
+
+/*
+ * Like ieee80211_hdrsize, but handles any type of frame.
+ */
+static __inline int
+ieee80211_anyhdrsize(u_int32_t ht_capable, const void *data)
+{
+	const struct ieee80211_frame *wh = data;
+
+	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
+		switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
+		case IEEE80211_FC0_SUBTYPE_CTS:
+		case IEEE80211_FC0_SUBTYPE_ACK:
+			return sizeof(struct ieee80211_frame_ack);
+		}
+		return sizeof(struct ieee80211_frame_min);
+	} else {
+		if (ht_capable)
+			return ieee80211_hdrsize(IEEE80211_HT_CAPABLE, data);
+		else
+			return ieee80211_hdrsize(IEEE80211_NON_HT_CAPABLE, data);
+	}
+}
+
+/*
+ * Template for an in-kernel authenticator.  Authenticators
+ * register with the protocol code and are typically loaded
+ * as separate modules as needed.
+ */
+struct ieee80211_authenticator {
+	const char *ia_name;		/* printable name */
+	int (*ia_attach)(struct ieee80211vap *);
+	void (*ia_detach)(struct ieee80211vap *);
+	void (*ia_node_join)(struct ieee80211_node *);
+	void (*ia_node_leave)(struct ieee80211_node *);
+};
+void ieee80211_authenticator_register(int, const struct ieee80211_authenticator *);
+void ieee80211_authenticator_unregister(int);
+const struct ieee80211_authenticator *ieee80211_authenticator_get(int);
+
+struct eapolcom;
+/*
+ * Template for an in-kernel authenticator backend.  Backends
+ * register with the protocol code and are typically loaded
+ * as separate modules as needed.
+ */
+struct ieee80211_authenticator_backend {
+	const char *iab_name;		/* printable name */
+	int (*iab_attach)(struct eapolcom *);
+	void (*iab_detach)(struct eapolcom *);
+};
+void ieee80211_authenticator_backend_register(
+	const struct ieee80211_authenticator_backend *);
+void ieee80211_authenticator_backend_unregister(
+	const struct ieee80211_authenticator_backend *);
+const struct ieee80211_authenticator_backend *
+	ieee80211_authenticator_backend_get(const char *);
+
+/*
+ * Template for an MAC ACL policy module.  Such modules
+ * register with the protocol code and are passed the sender's
+ * address of each received frame for validation.
+ */
+struct ieee80211_aclator {
+	const char *iac_name;		/* printable name */
+	int (*iac_attach)(struct ieee80211vap *);
+	void (*iac_detach)(struct ieee80211vap *);
+	int (*iac_check)(struct ieee80211vap *,
+		const u_int8_t mac[IEEE80211_ADDR_LEN]);
+	int (*iac_add)(struct ieee80211vap *,
+		const u_int8_t mac[IEEE80211_ADDR_LEN]);
+	int (*iac_add_mac_list)(struct ieee80211vap *,
+					int, struct ieee80211_mac_addr *);
+	int (*iac_remove)(struct ieee80211vap *,
+		const u_int8_t mac[IEEE80211_ADDR_LEN]);
+	int (*iac_flush)(struct ieee80211vap *);
+	int (*iac_setpolicy)(struct ieee80211vap *, int);
+	int (*iac_getpolicy)(struct ieee80211vap *);
+};
+void ieee80211_aclator_register(const struct ieee80211_aclator *);
+void ieee80211_aclator_unregister(const struct ieee80211_aclator *);
+const struct ieee80211_aclator *ieee80211_aclator_get(const char *name);
+int ieee80211_mac_acl(struct ieee80211vap *, int);
+
+/* flags for ieee80211_fix_rate() */
+#define	IEEE80211_F_DOSORT	0x00000001	/* sort rate list */
+#define	IEEE80211_F_DOFRATE	0x00000002	/* use fixed rate */
+#define	IEEE80211_F_DONEGO	0x00000004	/* calc negotiated rate */
+#define	IEEE80211_F_DODEL	0x00000008	/* delete ignore rate */
+#define	IEEE80211_F_DOXSECT	0x00000010	/* intersection of rates */
+#define	IEEE80211_F_DOBRS	0x00000020	/* check for basic rates */
+int	ieee80211_fix_rate(struct ieee80211_node *, int);
+int	ieee80211_fix_ht_rate(struct ieee80211_node *, int);
+
+#define IEEE80211_EXPONENT_TO_VALUE(_exp) (1 << (u_int32_t)(_exp)) - 1
+#define IEEE80211_TXOP_TO_US(_txop)	(u_int32_t)(_txop) << 5
+#define IEEE80211_US_TO_TXOP(_us)	(u_int16_t)((u_int32_t)(_us)) >> 5
+
+struct chanAccParams{
+	/* XXX: is there any reason to have multiple instances of cap_info_count??? */
+	u_int8_t cap_info_count;		 		/* ver. of the current param set */
+	struct wmm_params cap_wmeParams[WME_NUM_AC];	/*WME params for each access class */
+};
+
+struct ieee80211_wme_state {
+	u_int32_t wme_flags;
+#define	WME_F_AGGRMODE	0x00000001	/* STATUS: WME aggressive mode */
+
+	u_int wme_hipri_traffic;			/* VI/VO frames in beacon interval */
+	u_int wme_hipri_switch_thresh;		/* aggressive mode switch threshold */
+	u_int wme_hipri_switch_hysteresis;	/* aggressive mode switch hysteresis */
+
+	struct chanAccParams wme_wmeChanParams;	/* configured WME parameters applied to itself*/
+	struct chanAccParams wme_wmeBssChanParams; /* configured WME parameters broadcasted to STAs*/
+	struct chanAccParams wme_chanParams;	/* channel parameters applied to itself*/
+	struct chanAccParams wme_bssChanParams;	/* channel parameters broadcasted to STAs*/
+	u_int8_t wme_nonAggressiveMode;   	/* don't use aggressive params and use WME params */
+
+#ifdef CONFIG_QVSP
+	uint32_t wme_throt_bm;			/* VSP WME throt bitmap */
+	struct chanAccParams wme_throt_bssChanParams;
+	uint32_t wme_throt_add_qwme_ie;
+#endif
+	/* update hardware tx params after wme state change */
+	int (*wme_update)(struct ieee80211com *);
+};
+
+void ieee80211_wme_initparams(struct ieee80211vap *);
+void ieee80211_wme_initparams_locked(struct ieee80211vap *);
+void ieee80211_wme_updateparams(struct ieee80211vap *vap, int sync_chan_param);
+void ieee80211_wme_updateparams_locked(struct ieee80211vap *);
+void ieee80211_wme_updateparams_delta(struct ieee80211vap *vap, uint8_t apply_delta);
+struct ieee80211_wme_state *ieee80211_vap_get_wmestate(struct ieee80211vap *vap);
+void ieee80211_vap_sync_chan_wmestate(struct ieee80211vap *vap);
+void ieee80211_adjust_wme_by_vappri(struct ieee80211com *ic);
+
+int ieee80211_open(struct net_device *);
+int ieee80211_init(struct net_device *, int);
+void ieee80211_start_running(struct ieee80211com *);
+int ieee80211_stop(struct net_device *);
+void ieee80211_stop_running(struct ieee80211com *);
+void ieee80211_beacon_miss(struct ieee80211com *);
+int ieee80211_new_state(struct ieee80211vap *, enum ieee80211_state, int);
+void ieee80211_print_essid(const u_int8_t *, int);
+void ieee80211_dump_pkt(struct ieee80211com *, const u_int8_t *, int, int, int);
+struct sk_buff *ieee80211_getcfframe(struct ieee80211vap *, int);
+void ieee80211_swbmiss(unsigned long arg);
+void ieee80211_swberp(unsigned long arg);
+
+
+/* used for send formatted string custom event IWEVCUSTOM */
+int ieee80211_eventf(struct net_device *dev, const char *fmt, ...);
+
+/*
+ * Beacon frames constructed by ieee80211_beacon_alloc
+ * have the following structure filled in so drivers
+ * can update the frame later w/ minimal overhead.
+ */
+struct ieee80211_beacon_offsets {
+	__le16 *bo_caps;		/* capabilities */
+	u_int8_t *bo_tim;		/* start of atim/dtim */
+	u_int8_t *bo_wme;		/* start of WME parameters */
+	u_int8_t *bo_tim_trailer;	/* start of fixed-size tim trailer */
+	u_int16_t bo_tim_len;		/* atim/dtim length in bytes */
+	u_int16_t bo_tim_trailerlen;	/* trailer length in bytes */
+	u_int8_t *bo_bss_load;          /* start of bss load */
+	u_int8_t *bo_chanswitch;	/* where channel switch IE will go */
+	u_int8_t *bo_ath_caps;		/* where ath caps is */
+	u_int8_t *bo_xr;		/* start of xr element */
+	u_int8_t *bo_cca;		/* start of clear channel assessment element. */
+	u_int8_t *bo_htcap;		/* start of HT Capability element */
+	u_int8_t *bo_htinfo;		/* start of HT Info element */
+	u_int8_t *bo_tpc_rep;		/* start of TPC Report IE*/
+	u_int8_t *bo_erp;		/* start of ERP element */
+	u_int8_t *bo_appie_buf;		/* start of APP IE buf */
+	u_int16_t bo_appie_buf_len;	/* APP IE buf length in bytes */
+	u_int8_t *bo_ocac_state;	/* start of the OCAC State IE */
+	u_int8_t *bo_rrm_enabled;	/* start of rm enabled */
+};
+struct sk_buff *ieee80211_beacon_alloc(struct ieee80211_node *,
+	struct ieee80211_beacon_offsets *);
+void ieee80211_beacon_update(struct ieee80211_node *,
+	struct ieee80211_beacon_offsets *, struct sk_buff *, int);
+void ieee80211_beacon_update_all(struct ieee80211com *);
+
+/* XXX exposed due to of beacon code botch */
+uint8_t *ieee80211_add_beacon_header(struct ieee80211_node *ni, uint8_t *frm);
+uint8_t *ieee80211_add_epigram_ie(uint8_t *frm);
+uint8_t *ieee80211_add_mandatory_field(struct ieee80211_node *ni, uint8_t *frm,
+		struct ieee80211_beacon_offsets *bo);
+u_int8_t *ieee80211_add_rates(u_int8_t *, const struct ieee80211_rateset *);
+u_int8_t *ieee80211_add_supported_chans(uint8_t *frm, struct ieee80211com *ic);
+u_int8_t *ieee80211_add_xrates(u_int8_t *, const struct ieee80211_rateset *);
+u_int8_t *ieee80211_add_bss_load(u_int8_t *, struct ieee80211vap *);
+u_int8_t *ieee80211_add_extcap(u_int8_t *);
+u_int8_t *ieee80211_add_wpa(u_int8_t *, struct ieee80211vap *);
+u_int8_t *ieee80211_add_csa(u_int8_t *, u_int8_t, u_int8_t, u_int8_t);
+u_int8_t *ieee80211_add_erp(u_int8_t *, struct ieee80211com *);
+u_int8_t *ieee80211_add_athAdvCap(u_int8_t *, u_int8_t, u_int16_t);
+u_int8_t *ieee80211_add_xr_param(u_int8_t *, struct ieee80211vap *);
+u_int8_t *ieee80211_add_xr_param(u_int8_t *, struct ieee80211vap *);
+u_int8_t *ieee80211_add_wme_param(u_int8_t *, struct ieee80211_wme_state *, int, int);
+u_int8_t *ieee80211_add_country(u_int8_t *, struct ieee80211com *);
+u_int8_t *ieee80211_add_country(u_int8_t *, struct ieee80211com *);
+u_int8_t *ieee80211_add_athAdvCap(u_int8_t *, u_int8_t, u_int16_t);
+uint8_t *ieee80211_add_qtn_ie(uint8_t *frm, struct ieee80211com *ic, uint8_t flags,
+				uint8_t my_flags, uint8_t implicit_ba, uint16_t implicit_ba_size,
+				uint32_t rate_train);
+u_int8_t *ieee80211_add_chansw_wrap(u_int8_t *, struct ieee80211com *);
+u_int8_t *ieee80211_add_wband_chanswitch(u_int8_t *, struct ieee80211com *);
+u_int8_t *ieee80211_add_vhttxpwr_envelope(u_int8_t *, struct ieee80211com *);
+#ifdef CONFIG_QVSP
+uint8_t *ieee80211_add_qtn_wme_param(struct ieee80211vap *, u_int8_t *);
+#endif
+u_int8_t *ieee80211_add_htcap(struct ieee80211_node *, u_int8_t *, struct ieee80211_htcap *, int subtype);
+u_int8_t *ieee80211_add_htinfo(struct ieee80211_node *, u_int8_t *, struct ieee80211_htinfo *);
+
+u_int8_t *ieee80211_add_vhtcap(struct ieee80211_node *, u_int8_t *, struct ieee80211_vhtcap *, uint8_t);
+u_int8_t *ieee80211_add_vhtop(struct ieee80211_node *, u_int8_t *, struct ieee80211_vhtop *);
+uint8_t *ieee80211_add_vhtop_notif(struct ieee80211_node *ni, uint8_t *frm, struct ieee80211com *ic, int band_24g);
+uint8_t *ieee80211_add_qtn_ocac_state_ie(uint8_t *frm);
+uint8_t * ieee80211_add_mdie(uint8_t *frm, struct ieee80211vap *vap);
+
+/* MU MIMO */
+void
+ieee80211_send_vht_grp_id_mgmt_action(struct ieee80211vap *vap,
+				      struct ieee80211_node *ni);
+struct ieee80211_node *ieee80211_find_node_by_aid(struct ieee80211com *ic, uint8_t aid);
+
+u_int8_t * ieee80211_add_20_40_bss_coex_ie(u_int8_t *frm, u_int8_t coex);
+void ieee80211_get_20_40_bss_into_chan_list(struct ieee80211com *ic, struct ieee80211vap *vap,
+					u_int16_t *pp_ch_list);
+u_int8_t * ieee80211_add_20_40_bss_into_ch_rep(u_int8_t *frm,
+	struct ieee80211com *ic, u_int16_t ch_list);
+u_int8_t * ieee80211_add_obss_scan_ie(u_int8_t *frm, struct ieee80211_obss_scan_ie *obss_ie);
+u_int8_t *ieee80211_add_qtn_extender_role_ie(uint8_t *frm, uint8_t role);
+u_int8_t *ieee80211_add_qtn_extender_bssid_ie(struct ieee80211vap *vap, uint8_t *frm);
+u_int8_t *ieee80211_add_qtn_extender_state_ie(uint8_t *frm, uint8_t ocac);
+int ieee80211_extender_send_event(struct ieee80211vap *vap,
+	const struct qtn_wds_ext_event_data *p_wds_event_data, uint8_t *ie);
+struct ieee80211_extender_wds_info *ieee80211_extender_find_peer_wds_info(struct ieee80211com *ic,
+	uint8_t *mac_addr);
+int ieee80211_extender_remove_peer_wds_info(struct ieee80211com *ic, uint8_t *mac_addr);
+void ieee80211_extender_notify_ext_role(struct ieee80211_node *ni);
+void ieee80211_extender_sta_update_info(struct ieee80211_node *ni,
+		const struct ieee80211_qtn_ext_role *ie_role,
+		const struct ieee80211_qtn_ext_bssid *ie_bssid);
+struct ieee80211_scanparams;
+void extender_event_data_prepare(struct ieee80211com *ic,
+			struct ieee80211_scanparams *p_scan,
+			struct qtn_wds_ext_event_data *data,
+			uint8_t cmd,
+			uint8_t *peer_mac);
+void ieee80211_extender_cleanup_wds_link(struct ieee80211vap *vap);
+void ieee80211_extender_vdetach(struct ieee80211vap *vap);
+uint8_t *ieee80211_add_qtn_tdls_sta_info(uint8_t *frm, void *sta_info);
+uint8_t *ieee80211_add_rrm_enabled(u_int8_t *, struct ieee80211vap *);
+/*
+ * Notification methods called from the 802.11 state machine.
+ * Note that while these are defined here, their implementation
+ * is OS-specific.
+ */
+void ieee80211_notify_node_join(struct ieee80211_node *, int);
+void ieee80211_notify_node_leave(struct ieee80211_node *);
+void ieee80211_notify_scan_done(struct ieee80211vap *);
+void ieee80211_notify_sta_stats(struct ieee80211_node *ni);
+void ieee80211_nofity_sta_require_leave(struct ieee80211_node *ni);
+void ieee80211_update_bss_tm(uint8_t *, int len, struct ieee80211com *ic, struct ieee80211vap *vap);
+#endif /* _NET80211_IEEE80211_PROTO_H_ */
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_rate.h b/drivers/qtn/include/kernel/net80211/ieee80211_rate.h
new file mode 100644
index 0000000..bd45388
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_rate.h
@@ -0,0 +1,148 @@
+/*-
+ * Copyright (c) 2004 Sam Leffler, Errno Consulting
+ * Copyright (c) 2004 Video54 Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ *    redistribution must be conditioned upon including a substantially
+ *    similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: if_athrate.h 1667 2006-07-04 10:23:35Z kelmo $
+ */
+#ifndef _ATH_RATECTRL_H_
+#define _ATH_RATECTRL_H_
+
+/*
+ * Interface definitions for transmit rate control modules for the
+ * Atheros driver.
+ *
+ * A rate control module is responsible for choosing the transmit rate
+ * for each data frame.  Management+control frames are always sent at
+ * a fixed rate.
+ *
+ * An instance of the rate control module is attached to each device
+ * at attach time and detached when the device is destroyed.  The module
+ * may associate data with each device and each node (station).  Both
+ * sets of storage are opaque except for the size of the per-node storage
+ * which must be provided when the module is attached.
+ *
+ * The rate control module is notified for each state transition and
+ * station association/reassociation.  Otherwise it is queried for a
+ * rate for each outgoing frame and provided status from each transmitted
+ * frame.  Any ancillary processing is the responsibility of the module
+ * (e.g. if periodic processing is required then the module should setup
+ * it's own timer).
+ *
+ * In addition to the transmit rate for each frame the module must also
+ * indicate the number of attempts to make at the specified rate.  If this
+ * number is != ATH_TXMAXTRY then an additional callback is made to setup
+ * additional transmit state.  The rate control code is assumed to write
+ * this additional data directly to the transmit descriptor.
+ */
+
+enum {
+	IEEE80211_RATE_AMRR,
+	IEEE80211_RATE_ONOE,
+	IEEE80211_RATE_SAMPLE,
+	IEEE80211_RATE_MAX
+};
+
+struct ath_softc;
+struct ath_node;
+struct ath_desc;
+struct ieee80211vap;
+
+struct ieee80211_rate_ops {
+	int ratectl_id;
+
+	/* Attach/detach a rate control module */
+	struct ath_ratectrl *(*attach)(struct ath_softc *sc);
+	void (*detach)(struct ath_ratectrl *arc);
+
+	/* Register proc entries with a VAP */
+	void (*dynamic_proc_register)(struct ieee80211vap *vap);
+
+	/* *** State storage handling *** */
+
+	/* Initialize per-node state already allocated for the specified
+	 * node; this space can be assumed initialized to zero */
+	void (*node_init)(struct ath_softc *sc, struct ath_node *an);
+
+	/* Cleanup any per-node state prior to the node being reclaimed */
+	void (*node_cleanup)(struct ath_softc *sc, struct ath_node *an);
+
+	/* Update rate control state on station associate/reassociate 
+	 * (when operating as an ap or for nodes discovered when operating
+	 * in ibss mode) */
+	void (*newassoc)(struct ath_softc *sc, struct ath_node *an,
+			 int isnew);
+
+	/* Update/reset rate control state for 802.11 state transitions.
+	 * Important mostly as the analog to newassoc when operating
+	 * in station mode */
+	void (*newstate)(struct ieee80211vap *vap,
+			 enum ieee80211_state state);
+
+	/* *** Transmit handling *** */
+
+	/* Return the transmit info for a data packet.  If multi-rate state
+	 * is to be setup then try0 should contain a value other than ATH_TXMATRY
+	 * and setupxtxdesc will be called after deciding if the frame
+	 * can be transmitted with multi-rate retry. */
+	void (*findrate)(struct ath_softc *sc, struct ath_node *an,
+			 int shortPreamble, size_t frameLen,
+			 u_int8_t *rix, int *try0, u_int8_t *txrate);
+
+	/* Setup any extended (multi-rate) descriptor state for a data packet.
+	 * The rate index returned by findrate is passed back in. */
+	void (*setupxtxdesc)(struct ath_softc *sc, struct ath_node *an,
+			     struct ath_desc *ds, int shortPreamble,
+			     size_t frame_size, u_int8_t rix);
+
+	/* Update rate control state for a packet associated with the
+	 * supplied transmit descriptor.  The routine is invoked both
+	 * for packets that were successfully sent and for those that
+	 * failed (consult the descriptor for details). */
+	void (*tx_complete)(struct ath_softc *sc, struct ath_node *an,
+			    const struct ath_desc *ds);
+};
+
+struct ath_ratectrl {
+	struct ieee80211_rate_ops *ops;
+	size_t arc_space;	/* space required for per-node state */
+	size_t arc_vap_space;	/* space required for per-vap state */
+};
+
+int ieee80211_rate_register(struct ieee80211_rate_ops *ops);
+void ieee80211_rate_unregister(struct ieee80211_rate_ops *ops);
+
+struct ath_ratectrl *ieee80211_rate_attach(struct ath_softc *sc, const char *name);
+void ieee80211_rate_detach(struct ath_ratectrl *);
+#endif /* _ATH_RATECTRL_H_ */
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_scan.h b/drivers/qtn/include/kernel/net80211/ieee80211_scan.h
new file mode 100644
index 0000000..d1ac89d
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_scan.h
@@ -0,0 +1,499 @@
+/*-
+ * Copyright (c) 2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_scan.h 2366 2007-05-23 08:43:05Z mrenzmann $
+ */
+#ifndef _NET80211_IEEE80211_SCAN_H_
+#define _NET80211_IEEE80211_SCAN_H_
+
+#include <net/iw_handler.h>
+#define	IEEE80211_SCAN_MAX	IEEE80211_CHAN_MAX
+
+struct ieee80211_scanner;
+
+struct ieee80211_scan_ssid {
+	int len;					/* length in bytes */
+	u_int8_t ssid[IEEE80211_NWID_LEN];	/* ssid contents */
+};
+#define	IEEE80211_SCAN_MAX_SSID	1
+
+struct ieee80211_scan_state {
+	struct ieee80211vap *ss_vap;
+	const struct ieee80211_scanner *ss_ops;	/* policy hookup, see below */
+	void *ss_priv;				/* scanner private state */
+	void *ss_scs_priv;			/* scs private state independent of the scan state */
+	u_int16_t ss_flags;
+#define	IEEE80211_SCAN_NOPICK			0x0001	/* scan only, no selection */
+#define	IEEE80211_SCAN_ACTIVE			0x0002	/* active scan (probe req) */
+#define	IEEE80211_SCAN_PICK1ST			0x0004	/* ``hey sailor'' mode */
+#define	IEEE80211_SCAN_BGSCAN			0x0008	/* bg scan, exit ps at end */
+#define	IEEE80211_SCAN_ONCE			0x0010	/* do one complete pass */
+#define	IEEE80211_SCAN_NO_DFS			0x0020	/* avoid DFS channels, AP only */
+#define IEEE80211_SCAN_DFS_ACTION		0x0040	/* scan end, do DFS action */
+#define IEEE80211_SCAN_QTN_BGSCAN		0x0080	/* quantenna background scanning required */
+#define IEEE80211_SCAN_OPCHAN			0x0100	/* quantenna scanning on operating channel only */
+#define IEEE80211_SCAN_QTN_SEARCH_MBS		0x0200	/* seach MBS, no action on scan end */
+#define IEEE80211_SCAN_OBSS			0x0400	/* OBSS scan */
+/* Note: top 4 bits for internal use only */
+#define	IEEE80211_SCAN_GOTPICK			0x1000	/* got candidate, can stop */
+
+	u_int16_t ss_pick_flags;			/* pick a channel via a algorithm in a special domain */
+
+  /*
+   * Be aware only the lower 12 bits actually make it to ss_flags from
+   * the flags parameter to ieee80211_start_scan.
+   */
+	u_int8_t	ss_nssid;			/* # ssid's to probe/match */
+	struct ieee80211_scan_ssid ss_ssid[IEEE80211_SCAN_MAX_SSID];
+						/* ssid's to probe/match */
+						/* ordered channel set */
+	struct ieee80211_channel *ss_chans[IEEE80211_SCAN_MAX];
+	u_int16_t ss_next;			/* ix of next chan to scan */
+	u_int16_t ss_last;			/* ix + 1 of last chan to scan */
+	u_int16_t ss_scan_bw;			/* scan bandwidth */
+	unsigned long ss_mindwell;		/* min dwell on channel */
+	unsigned long ss_mindwell_passive;	/* min dwell time on a passive channel */
+	unsigned long ss_maxdwell;		/* max dwell on channel */
+	unsigned long ss_maxdwell_passive;	/* max dwell time on a passive channel */
+	u_int ss_duration;			/* used for calling ieee80211_start_scan() */
+	u_int8_t is_scan_valid;			/* was the channel scan initiated by supplicant */
+};
+
+/*
+ * The upper 16 bits of the flags word is used to communicate
+ * information to the scanning code that is NOT recorded in
+ * ss_flags.  It might be better to split this stuff out into
+ * a separate variable to avoid confusion.
+ */
+#define	IEEE80211_SCAN_FLUSH	0x00010000		/* flush candidate table */
+#define	IEEE80211_SCAN_NOSSID	0x00020000		/* don't update ssid list */
+#define	IEEE80211_SCAN_USECACHE	0x00040000		/* Must use a result from the cache */
+#define	IEEE80211_SCAN_KEEPMODE	0x00080000		/* Must keep the same wireless mode (11a, 11g, or 11at, etc) */
+#define	IEEE80211_SCAN_BW40	0x00100000		/* Set scan bandwidth as 40M */
+#define	IEEE80211_SCAN_BW80	0x00200000		/* Set scan bandwidth as 80M */
+#define	IEEE80211_SCAN_BW160	0x00400000		/* Set scan bandwidth as 160M */
+
+
+/*
+ * Parameters for managing cache entries:
+ *
+ * o a station with STA_FAILS_MAX failures is not considered
+ *   when picking a candidate
+ * o a station that hasn't had an update in STA_PURGE_SCANS
+ *   (background) scans is discarded
+ * o after STA_FAILS_AGE seconds we clear the failure count
+ */
+#define	STA_FAILS_MAX	2		/* assoc failures before ignored */
+#define	STA_FAILS_AGE	(2 * 60)	/* time before clearing fails (secs) */
+#define	STA_PURGE_SCANS	2		/* age for purging sta entries (scans) */
+#define	AP_PURGE_SCANS	999		/* age for purging ap entries (scans) */
+#define	AP_PURGE_SCS	60		/* age for purging ap entries (SCS) */
+
+#define RSSI_LPF_LEN	10
+#define	RSSI_EP_MULTIPLIER	(1<<7)	/* pow2 to optimize out * and / */
+#define RSSI_IN(x)		((x) * RSSI_EP_MULTIPLIER)
+#define LPF_RSSI(x, y, len)	(((x) * ((len) - 1) + (y)) / (len))
+#define RSSI_LPF(x, y) do {						\
+    if ((y) >= -20)							\
+	    x = LPF_RSSI((x), RSSI_IN((y)), RSSI_LPF_LEN);		\
+} while (0)
+#define	EP_RND(x, mul) \
+	((((x)%(mul)) >= ((mul)/2)) ? howmany(x, mul) : (x)/(mul))
+#define	RSSI_GET(x)	EP_RND(x, RSSI_EP_MULTIPLIER)
+#define	ISPROBE(_st)	((_st) == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
+#define	STA_HASHSIZE	32
+/* simple hash is enough for variation of macaddr */
+#define	STA_HASH(addr)	\
+	(((const u_int8_t *)(addr))[IEEE80211_ADDR_LEN - 1] % STA_HASHSIZE)
+
+struct ieee80211com;
+void ieee80211_scan_attach(struct ieee80211com *);
+void ieee80211_scan_detach(struct ieee80211com *);
+void ieee80211_scan_vattach(struct ieee80211vap *);
+void ieee80211_scan_vdetach(struct ieee80211vap *);
+
+void ieee80211_scan_dump_channels(const struct ieee80211_scan_state *);
+
+#define	IEEE80211_SCAN_FOREVER	0x7fffffff
+int ieee80211_start_scan(struct ieee80211vap *, int, u_int, u_int,
+	const struct ieee80211_scan_ssid ssids[]);
+int ieee80211_check_scan(struct ieee80211vap *, int, u_int, u_int,
+	const struct ieee80211_scan_ssid ssids[],
+	int (*action)(struct ieee80211vap *, const struct ieee80211_scan_entry *));
+int ieee80211_should_scan(struct ieee80211vap *vap);
+int ieee80211_bg_scan(struct ieee80211vap *);
+void ieee80211_cancel_scan(struct ieee80211vap *);
+void ieee80211_cancel_scan_no_wait(struct ieee80211vap *vap);
+
+void ieee80211_add_scs_off_chan(struct ieee80211vap *, const struct ieee80211_scanparams *,
+	const struct ieee80211_frame *, int , int , int);
+void ieee80211_scan_scs_sample(struct ieee80211vap *vap);
+void ieee80211_scan_scs_sample_cancel(struct ieee80211vap *vap);
+
+int ieee80211_scan_dfs_action(struct ieee80211vap *, const struct ieee80211_scan_entry *);
+
+struct ieee80211_scanparams;
+void ieee80211_parse_ap_scan_result(struct ieee80211_scan_state *, const struct ieee80211_scanparams *,
+	const struct ieee80211_frame *, int, int, int, int);
+void ieee80211_add_scan(struct ieee80211vap *, const struct ieee80211_scanparams *,
+	const struct ieee80211_frame *, int, int, int);
+void ieee80211_scan_timeout(unsigned long arg);
+
+void ieee80211_scan_assoc_success(struct ieee80211com *,
+	const u_int8_t mac[IEEE80211_ADDR_LEN]);
+enum {
+	IEEE80211_SCAN_FAIL_TIMEOUT	= 1,	/* no response to mgmt frame */
+	IEEE80211_SCAN_FAIL_STATUS	= 2	/* negative response to " " */
+};
+void ieee80211_scan_assoc_fail(struct ieee80211com *,
+	const u_int8_t mac[IEEE80211_ADDR_LEN], int);
+void ieee80211_scan_flush(struct ieee80211com *);
+void ieee80211_scan_remove(struct ieee80211vap *);
+struct ieee80211_channel *ieee80211_scan_pickchannel(struct ieee80211com *ic, int flags);
+void ieee80211_scan_refresh_scan_module_chan_list(struct ieee80211com *ic, struct ieee80211vap *vap);
+
+struct ieee80211_scan_entry;
+typedef int ieee80211_scan_iter_func(void *, const struct ieee80211_scan_entry *);
+int ieee80211_scan_iterate(struct ieee80211com *, ieee80211_scan_iter_func *, void *);
+
+/*
+ * Parameters supplied when adding/updating an entry in a
+ * scan cache.  Pointer variables should be set to NULL
+ * if no data is available.  Pointer references can be to
+ * local data; any information that is saved will be copied.
+ * All multi-byte values must be in host byte order.
+ */
+struct ieee80211_scanparams {
+	u_int16_t capinfo;	/* 802.11 capabilities */
+	u_int16_t fhdwell;	/* FHSS dwell interval */
+	u_int8_t chan;		/* */
+	u_int8_t bchan;
+	u_int8_t fhindex;
+	u_int8_t erp;
+	u_int16_t bintval;
+	u_int8_t timoff;
+	u_int8_t *tim;
+	u_int8_t *tstamp;
+	u_int8_t *country;
+	u_int8_t *ssid;
+	u_int8_t *rates;
+	u_int8_t *xrates;
+	u_int8_t *htcap;
+	u_int8_t *htinfo;
+	u_int8_t *csa;
+	u_int8_t *csa_tsf;
+	u_int8_t *measreq;
+	u_int8_t *wpa;
+	u_int8_t *rsn;
+	u_int8_t *wme;
+	u_int8_t *wsc;
+	u_int8_t *ath;
+	u_int8_t *qtn;
+	u_int8_t *vhtcap;
+	u_int8_t *vhtop;
+	u_int8_t *pwr_constraint;
+	u_int8_t *obss_scan;
+	u_int8_t *pairing_ie;
+	int8_t	local_max_txpwr;
+	struct ieee80211_channel *rxchan;
+	uint8_t extender_role;
+	uint8_t *ext_bssid_ie;
+	uint8_t *bssload;
+	uint8_t *mdie;
+};
+
+/*
+ * Scan cache entry format used when exporting data from a policy
+ * module; this data may be represented some other way internally.
+ */
+struct ieee80211_scan_entry {
+	u_int8_t se_macaddr[IEEE80211_ADDR_LEN];
+	u_int8_t se_bssid[IEEE80211_ADDR_LEN];
+	u_int8_t se_ssid[2 + IEEE80211_NWID_LEN];
+	u_int8_t se_rates[2 + IEEE80211_RATE_MAXSIZE];
+	u_int8_t se_xrates[2 + IEEE80211_RATE_MAXSIZE];
+	u_int32_t se_rstamp;		/* recv timestamp */
+	union {
+		u_int8_t data[8];
+		__le64 tsf;
+	} se_tstamp;			/* from last rcv'd beacon */
+	u_int16_t se_intval;		/* beacon interval (host byte order) */
+	u_int16_t se_capinfo;		/* capabilities (host byte order) */
+	struct ieee80211_channel *se_chan;/* channel where sta found */
+	u_int16_t se_timoff;		/* byte offset to TIM ie */
+	u_int16_t se_fhdwell;		/* FH only (host byte order) */
+	u_int8_t se_fhindex;		/* FH only */
+	u_int8_t se_erp;		/* ERP from beacon/probe resp */
+	int8_t se_rssi;			/* avg'd recv ssi */
+	u_int8_t se_dtimperiod;		/* DTIM period */
+	u_int8_t *se_wpa_ie;		/* captured WPA ie */
+	u_int8_t *se_rsn_ie;		/* captured RSN ie */
+	u_int8_t *se_wme_ie;		/* captured WME ie */
+	u_int8_t *se_wsc_ie;		/* captured WSC ie */
+	u_int8_t *se_htcap_ie;		/* captured HT Capability Info ie */
+	u_int8_t *se_htinfo_ie;		/* captured HT Information ie */
+	u_int8_t *se_vhtcap_ie;		/* captured VHT Capability Info ie */
+	u_int8_t *se_vhtop_ie;		/* captured VHT Operation Info ie */
+	u_int8_t *se_ath_ie;		/* captured Atheros ie */
+	u_int8_t *se_qtn_ie;		/* captured qtn ie */
+	u_int8_t se_qtn_ie_flags;	/* captured Quantenna flags */
+	u_int8_t se_is_qtn_dev;		/* 1 - is QTN device, 0 - non-QTN device */
+	u_int8_t se_ext_role;		/* 0 - default mode, 1 - MBS, 2 - RBS */
+	u_int8_t *se_ext_bssid_ie;	/* captured extender bssid ie */
+	u_int8_t *se_pairing_ie;	/* captured pairing ie */
+	u_int se_age;			/* age of entry (0 on create) */
+	int8_t local_max_txpwr;		/* local max transmit power from Beacon/Probe response */
+	u_int8_t *se_bss_load_ie;	/* bss load ie */
+	u_int8_t *se_md_ie;		/* mobility domain ie */
+};
+
+/*
+ * Template for an in-kernel scan policy module.
+ * Modules register with the scanning code and are
+ * typically loaded as needed.
+ */
+struct ieee80211_scanner {
+	const char *scan_name;		/* printable name */
+	int (*scan_attach)(struct ieee80211_scan_state *);
+	int (*scan_detach)(struct ieee80211_scan_state *);
+	int (*scan_start)(struct ieee80211_scan_state *, struct ieee80211vap *);
+	int (*scan_restart)(struct ieee80211_scan_state *, struct ieee80211vap *);
+	int (*scan_cancel)(struct ieee80211_scan_state *, struct ieee80211vap *);
+	int (*scan_end)(struct ieee80211_scan_state *, struct ieee80211vap *,
+		int (*action)(struct ieee80211vap *, const struct ieee80211_scan_entry *),
+		u_int32_t);
+	int (*scan_flush)(struct ieee80211_scan_state *);
+	void (*scan_remove)(struct ieee80211_scan_state *, struct ieee80211_node *);
+	struct ieee80211_channel *(*scan_pickchan)(struct ieee80211com *ic,
+			struct ieee80211_scan_state *, int);
+	/* add an entry to the cache */
+	int (*scan_add)(struct ieee80211_scan_state *,
+		const struct ieee80211_scanparams *,
+		const struct ieee80211_frame *, int, int, int);
+	/* age and/or purge entries in the cache */
+	void (*scan_age)(struct ieee80211_scan_state *);
+	/* note that association failed for an entry */
+	void (*scan_assoc_fail)(struct ieee80211_scan_state *,
+		const u_int8_t macaddr[IEEE80211_ADDR_LEN], int);
+	/* note that association succeed for an entry */
+	void (*scan_assoc_success)(struct ieee80211_scan_state *,
+		const u_int8_t macaddr[IEEE80211_ADDR_LEN]);
+	/* iterate over entries in the scan cache */
+	int (*scan_iterate)(struct ieee80211_scan_state *,
+		ieee80211_scan_iter_func *, void *);
+	/* lock the ap scan private data */
+	int (*scan_lock)(struct ieee80211_scan_state *);
+	/* unlock the ap scan private data */
+	void (*scan_unlock)(struct ieee80211_scan_state *,
+		int bh_disabled);
+	/* default action to take when found scan match */
+	int (*scan_default)(struct ieee80211vap *,
+		const struct ieee80211_scan_entry *);
+};
+
+
+struct sta_entry {
+	struct ieee80211_scan_entry base;
+	TAILQ_ENTRY(sta_entry) se_list;
+	LIST_ENTRY(sta_entry) se_hash;
+	u_int8_t se_fails;		/* failure to associate count */
+	u_int8_t se_seen;		/* seen during current scan */
+	u_int8_t se_notseen;		/* not seen in previous scans */
+	u_int32_t se_avgrssi;		/* LPF rssi state */
+	unsigned long se_lastupdate;	/* time of last update */
+	unsigned long se_lastfail;	/* time of last failure */
+	unsigned long se_lastassoc;	/* time of last association */
+	u_int se_scangen;		/* iterator scan gen# */
+	int se_inuse;			/* indicate that entry is in use and cannot be freed */
+	int se_request_to_free;		/* indicate that entry must be freed after se_inuse cleared up */
+};
+
+struct sta_table {
+	spinlock_t st_lock;			/* on scan table */
+	TAILQ_HEAD(, sta_entry) st_entry;	/* all entries */
+	ATH_LIST_HEAD(, sta_entry) st_hash[STA_HASHSIZE];
+	uint32_t st_entry_num;
+	spinlock_t st_scanlock;			/* on st_scangen */
+	u_int st_scangen;			/* gen# for iterator */
+	int st_newscan;
+	struct IEEE80211_TQ_STRUCT st_actiontq;	/* tasklet for "action" */
+	struct ieee80211_scan_entry st_selbss;	/* selected bss for action tasklet */
+	int (*st_action)(struct ieee80211vap *, const struct ieee80211_scan_entry *);
+};
+
+struct ap_scan_list {
+	TAILQ_HEAD(, ap_scan_entry) asl_head;
+};
+
+struct ap_scan_entry {
+	struct ieee80211_scan_entry base;
+	TAILQ_ENTRY(ap_scan_entry) ase_list;
+	unsigned long se_lastupdate;	/* time of last update */
+	u_int32_t se_avgrssi;		/* LPF rssi state */
+	u_int8_t se_seen;	/* seen during current scan */
+	u_int8_t se_notseen;	/* not seen in previous scans */
+	u_int32_t se_inuse;	/* indicate that entry is in use and cannot be freed */
+	u_int8_t se_request_to_free;	/* request to free entry after se_inuse cleared. */
+};
+
+struct ap_state {
+	unsigned int as_numbeacons[IEEE80211_CHAN_MAX];
+	uint8_t as_obss_chanlayout[IEEE80211_CHAN_MAX];
+
+	int as_maxrssi[IEEE80211_CHAN_MAX];
+	int as_cci[IEEE80211_CHAN_MAX];		/* CCI, Co-Channel Interference */
+	int as_aci[IEEE80211_CHAN_MAX];		/* ACI, Adjacent Channel Interference */
+	int as_numpkts[IEEE80211_CHAN_MAX];
+	int as_chanmetric[IEEE80211_CHAN_MAX];
+	uint32_t as_chanmetric_timestamp[IEEE80211_CHAN_MAX];
+	uint32_t as_chanmetric_pref[IEEE80211_CHAN_MAX];
+	struct IEEE80211_TQ_STRUCT as_actiontq;	/* tasklet for "action" */
+	struct ieee80211_scan_entry as_selbss;	/* selected bss for action tasklet */
+	int (*as_action)(struct ieee80211vap *, const struct ieee80211_scan_entry *);
+	struct ap_scan_list as_scan_list[IEEE80211_CHAN_MAX];
+	uint32_t as_entry_num;			/* scan entry number */
+	spinlock_t asl_lock;			/* to protect as_scan_list */
+
+	u_int8_t as_chan_xped[IEEE80211_CHAN_BYTES];	/* whether once used as working channel */
+	uint32_t as_scs_ranking_cnt;
+
+	/* CCA interference */
+#define SCS_CCA_INTF_INVALID	0xFFFF
+#define SCS_CCA_IDLE_INVALID	0xFFFF
+	uint16_t as_cca_intf_smth;
+	uint16_t as_cca_intf[IEEE80211_CHAN_MAX];
+	uint16_t as_cca_intf_pri[IEEE80211_CHAN_MAX];
+	uint16_t as_cca_intf_sec[IEEE80211_CHAN_MAX];
+	uint16_t as_cca_intf_sec40[IEEE80211_CHAN_MAX];
+	uint32_t as_cca_intf_jiffies[IEEE80211_CHAN_MAX];
+	uint32_t as_pmbl_err_ap[IEEE80211_CHAN_MAX];
+	uint32_t as_pmbl_err_sta[IEEE80211_CHAN_MAX];
+	/* tx and rx time */
+	uint32_t as_tx_ms;
+	uint32_t as_rx_ms;
+	uint32_t as_tx_ms_smth;
+	uint32_t as_rx_ms_smth;
+	/* rssi from all STAs, used to calculate attenuation */
+#define SCS_RSSI_UNINITED		-1
+#define SCS_RSSI_VALID(_rssi)		(((_rssi) < -1) && ((_rssi) > -1200))
+#define SCS_RSSI_PRECISION_RECIP	10	/* 0.1db */
+#define SCS_ATTEN_UNINITED		0
+#define SCS_ATTEN_VALID(_atten)         (_atten != SCS_ATTEN_UNINITED)
+	int8_t	as_sta_atten_num;
+	int32_t	as_sta_atten_sum;
+	int32_t	as_sta_atten_min;
+	int32_t	as_sta_atten_max;
+	int32_t as_sta_atten_expect;    /* the attenuation expected in low power channel */
+	int32_t as_dfs_reentry_cnt;
+	int32_t as_dfs_reentry_level;
+	uint32_t as_age;
+};
+
+void ieee80211_add_scan_entry(struct ieee80211_scan_entry *ise,
+			const struct ieee80211_scanparams *sp,
+			const struct ieee80211_frame *wh,
+			int subtype, int rssi, int rstamp);
+int ieee80211_scan_check_secondary_channel(struct ieee80211_scan_state *ss,
+			struct ieee80211_scan_entry *ise);
+struct ieee80211_channel *ieee80211_scan_switch_pri_chan(struct ieee80211_scan_state *ss,
+			struct ieee80211_channel *chan_pri);
+struct ieee80211_channel *ieee80211_scs_switch_pri_chan(struct ieee80211_scan_state *ss,
+			struct ieee80211_channel *chan_pri);
+int ieee80211_wps_active(uint8_t *wsc_ie);
+void ieee80211_dump_scan_res(struct ieee80211_scan_state *ss);
+int ieee80211_get_type_of_neighborhood(struct ieee80211com *ic);
+void ieee80211_check_type_of_neighborhood(struct ieee80211com *ic);
+char * ieee80211_neighborhood_type2str(int type);
+
+const struct ieee80211_scanner *ieee80211_scanner_get(enum ieee80211_opmode,
+	int);
+void ieee80211_scanner_register(enum ieee80211_opmode,
+	const struct ieee80211_scanner *);
+void ieee80211_scanner_unregister(enum ieee80211_opmode,
+	const struct ieee80211_scanner *);
+void ieee80211_scanner_unregister_all(const struct ieee80211_scanner *);
+
+struct ap_scan_iter {
+	struct ieee80211vap *vap;
+	char *current_env;
+	char *end_buf;
+	int32_t	ap_counts;
+	int mode;
+};
+
+struct iwscanreq {
+	struct ieee80211vap *vap;
+	char *current_ev;
+	char *end_buf;
+	int mode;
+	struct iw_request_info *info;
+};
+
+#define QFDR_SIWSCAN            1
+#define QFDR_SIWSCAN_SIMPLE     2
+#define QFDR_GIWSCAN            3
+#define QFDR_AP_SCAN_RESULT     4
+#define QFDR_REMOTE_CMD         0x1000
+
+struct qfdr_remote_aplist_req {
+	int32_t type;
+	char dev_name[IFNAMSIZ];
+	struct iw_request_info info;
+	int32_t extra_len;
+};
+
+struct qfdr_remote_scan_req {
+	int32_t type;
+	char dev_name[IFNAMSIZ];
+	u_int16_t flags;
+	u_int16_t length;
+	char pointer[0];
+};
+
+struct qfdr_remote_aplist_rep {
+	int32_t res;
+	int32_t type;
+	int32_t ap_counts;
+	int32_t length;
+	char extra[0];
+};
+
+typedef int (*qfdr_remote_siwscan_hook_t)(char *dev_name, struct iw_point *data);
+void ieee80211_register_qfdr_remote_siwscan_hook(qfdr_remote_siwscan_hook_t hook);
+int qfdr_siwscan_for_remote(struct qfdr_remote_scan_req *req_remote);
+
+typedef int (*qfdr_remote_giwscan_hook_t)(struct iwscanreq *req);
+void ieee80211_register_qfdr_remote_giwscan_hook(qfdr_remote_giwscan_hook_t hook);
+struct qfdr_remote_aplist_rep *qfdr_giwscan_for_remote(struct qfdr_remote_aplist_req *req_remote);
+
+typedef int (*qfdr_remote_ap_scan_results_hook_t)(struct ap_scan_iter *iter);
+void ieee80211_register_qfdr_remote_ap_scan_results_hook(qfdr_remote_ap_scan_results_hook_t hook);
+struct qfdr_remote_aplist_rep *qfdr_ap_scan_results_for_remote(struct qfdr_remote_aplist_req *req_remote);
+#endif /* _NET80211_IEEE80211_SCAN_H_ */
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_tdls.h b/drivers/qtn/include/kernel/net80211/ieee80211_tdls.h
new file mode 100644
index 0000000..8fd82e3
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_tdls.h
@@ -0,0 +1,289 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2010-2013 Quantenna Communications, Inc.            **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  File        : ieee80211_tdls.h                                           **
+**  Description : Tunnelled Direct-Link Setup                                **
+**                                                                           **
+**  This module implements portions of the IEEE Std 802.11z specification,   **
+** as well as a proprietary discovery mechanism.                             **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH0*/
+
+#ifndef _NET80211_IEEE80211_TDLS_H_
+#define _NET80211_IEEE80211_TDLS_H_
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include "net80211/if_llc.h"
+#include "net80211/if_ethersubr.h"
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211.h"
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_dot11_msg.h"
+#include "net80211/ieee80211_linux.h"
+
+
+struct ieee80211_tdls_params {
+	/* Header fields */
+	u_int8_t	*sa;
+	u_int8_t	*da;
+	/* action code */
+	u_int8_t	act;
+
+	/* Fixed length parameters */
+	u_int16_t	caps;
+	u_int16_t	reason;
+	u_int16_t	status;
+	u_int8_t	diag_token;
+	u_int8_t	target_chan;
+	u_int8_t	reg_class;
+
+	/* TLVs from 802.11z */
+	u_int8_t	*rates;
+	u_int8_t	*country;
+	u_int8_t	*xrates;
+	u_int8_t	*supp_chan;
+	u_int8_t	*sec_chan_off;
+	u_int8_t	*rsn;
+	u_int8_t	*ext_cap;
+	u_int8_t	*edca;
+	u_int8_t	*qos_cap;
+	u_int8_t	*ftie;
+	u_int8_t	*tpk_timeout;
+	u_int8_t	*sup_reg_class;
+	u_int8_t	*htcap;
+	u_int8_t	*htinfo;
+	u_int8_t	*vhtcap;
+	u_int8_t	*vhtop;
+
+	u_int8_t	*bss_2040_coex;
+	struct ieee80211_ie_aid			*aid;
+	struct ieee80211_tdls_link_id		*link_id;
+	struct ieee80211_tdls_wkup_sched	*wkup_sched;
+	struct ieee80211_tdls_cs_timing		*cs_timing;
+	struct ieee80211_tdls_pti_ctrl		*pti_ctrl;
+	struct ieee80211_tdls_pu_buf_stat	*pu_buf_stat;
+	struct ieee80211_ie_wbchansw		*wide_bw_cs;
+	struct ieee80211_ie_vtxpwren		*vht_tx_pw_env;
+
+	/* Proprietary TLVs */
+	u_int8_t	*qtn_info;
+	u_int8_t	*qtn_brmacs;
+};
+
+#define	DEFAULT_TDLS_TIMEOUT_TIME		30
+#define	DEFAULT_TDLS_DISCOVER_INTERVAL		60
+#define DEFAULT_TDLS_LIFE_CYCLE			(6 * DEFAULT_TDLS_DISCOVER_INTERVAL)
+
+#define DEFAULT_TDLS_PATH_SEL_MODE		0
+#define DEFAULT_TDLS_PATH_SEL_PPS_THRSHLD	20
+#define DEFAULT_TDLS_PATH_SEL_RATE_THRSHLD	24
+#define DEFAULT_TDLS_RATE_DETECTION_PKT_CNT	1024
+#define DEFAULT_TDLS_RATE_DETECTION_BURST_CNT	4
+#define DEFAULT_TDLS_RATE_DETECTION_WAITING_T	12
+#define DEFAULT_TDLS_LINK_WEIGHT		5
+#define DEFAULT_TDLS_LINK_DISABLE_SCALE		2
+#define DEFAULT_TDLS_LINK_SWITCH_INV		2
+#define	DEFAULT_TDLS_PHY_RATE_WEIGHT		6
+
+#define TDLS_INVALID_CHANNEL_NUM		0
+
+#define DEFAULT_TDLS_VERBOSE			1
+#define DEFAULT_TDLS_MIN_RSSI			(-900)
+#define	DEFAULT_TDLS_CH_SW_NEGO_TIME		10000
+#define DEFAULT_TDLS_CH_SW_TIMEOUT		10000
+#define DEFAULT_TDLS_CH_SW_PROC_TIME		3000
+#define	DEFAULT_TDLS_CH_SW_OC_MARGIN		24000
+#define	DEFAULT_TDLS_FIXED_OFF_CHAN		TDLS_INVALID_CHANNEL_NUM
+
+#define	DEFAULT_TDLS_UAPSD_INDICATION_WND	(150)
+#define	DEFAULT_TDLS_SETUP_EXPIRE_DURATION	(20)
+
+const char *
+ieee80211_tdls_action_name_get(u_int8_t action);
+
+const char *
+ieee80211_tdls_status_string_get(uint8_t stats);
+
+int
+ieee80211_cfg_tdls_add(struct ieee80211vap *vap, u_int8_t *mac);
+
+int
+ieee80211_tdls_cfg_disc_int(struct ieee80211vap *vap, int value);
+
+int
+ieee80211_tdls_get_smoothed_rssi(struct ieee80211vap *vap, struct ieee80211_node *ni);
+
+int
+ieee80211_tdls_add_bridge_entry_for_peer(struct ieee80211_node *peer_ni);
+
+int
+ieee80211_tdls_disable_peer_link(struct ieee80211_node *ni);
+
+int
+ieee80211_tdls_enable_peer_link(struct ieee80211vap *vap, struct ieee80211_node *ni);
+
+void
+ieee80211_tdls_chan_switch_timeout(unsigned long arg);
+
+int
+ieee80211_tdls_send_chan_switch_req(struct ieee80211_node *peer_ni,
+		struct ieee80211_tdls_action_data *data);
+
+int
+ieee80211_tdls_send_chan_switch_resp(struct ieee80211_node *peer_ni,
+		struct ieee80211_tdls_action_data *data);
+
+void
+ieee80211_tdls_recv_disc_req(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls);
+
+void
+ieee80211_tdls_recv_disc_resp(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls);
+
+void
+ieee80211_tdls_recv_setup_req(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls);
+
+void
+ieee80211_tdls_recv_setup_resp(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls);
+
+void
+ieee80211_tdls_recv_setup_confirm(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls);
+
+void
+ieee80211_tdls_recv_teardown(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls);
+
+void
+ieee80211_tdls_recv_chan_switch_req(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls);
+
+void
+ieee80211_tdls_recv_chan_switch_resp(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls);
+
+int
+ieee80211_tdls_send_action_frame(struct net_device *ndev,
+		struct ieee80211_tdls_action_data *data);
+
+int
+ieee80211_tdls_send_event(struct ieee80211_node *peer_ni,
+		enum ieee80211_tdls_event event, void *data);
+
+int
+ieee80211_tdls_return_to_base_channel(struct ieee80211vap *vap, int ap_disassoc);
+
+int
+ieee80211_tdls_del_key(struct ieee80211vap *vap, struct ieee80211_node *ni);
+
+void
+ieee80211_tdls_free_peer_ps_info(struct ieee80211vap *vap);
+
+void
+ieee80211_tdls_vattach(struct ieee80211vap *vap);
+
+void
+ieee80211_tdls_vdetach(struct ieee80211vap *vap);
+
+int
+ieee80211_tdls_node_leave(struct ieee80211vap *vap, struct ieee80211_node *ni);
+
+int
+ieee80211_tdls_teardown_all_link(struct ieee80211vap *vap);
+
+int
+ieee80211_tdls_free_all_peers(struct ieee80211vap *vap);
+
+int
+ieee80211_tdls_free_all_inactive_peers(struct ieee80211vap *vap);
+
+int
+ieee80211_tdls_clear_disc_timer(struct ieee80211vap *vap);
+
+int
+ieee80211_tdls_start_disc_timer(struct ieee80211vap *vap);
+
+void
+ieee80211_tdls_node_expire(unsigned long arg);
+
+int
+ieee80211_tdls_start_node_expire_timer(struct ieee80211vap *vap);
+
+int
+ieee80211_tdls_init_node_expire_timer(struct ieee80211vap *vap);
+
+int
+ieee80211_tdls_clear_node_expire_timer(struct ieee80211vap *vap);
+
+int
+ieee80211_tdls_set_link_timeout(struct ieee80211vap *vap,
+		struct ieee80211_node *ni);
+
+int
+ieee80211_tdls_pend_disassociation(struct ieee80211vap *vap,
+	enum ieee80211_state nstate, int arg);
+
+int
+ieee80211_tdls_init_chan_switch_timer(struct ieee80211vap *vap);
+
+int
+ieee80211_tdls_start_channel_switch(struct ieee80211vap *vap,
+		struct ieee80211_node *peer_ni);
+
+void
+ieee80211_tdls_update_node_status(struct ieee80211_node *ni,
+		enum ni_tdls_status stats);
+
+void
+ieee80211_tdls_update_uapsd_indicication_windows(struct ieee80211vap *vap);
+
+int ieee80211_tdls_update_link_timeout(struct ieee80211vap *vap);
+
+void ieee80211_tdls_trigger_rate_detection(unsigned long arg);
+
+#endif
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_tpc.h b/drivers/qtn/include/kernel/net80211/ieee80211_tpc.h
new file mode 100644
index 0000000..bc657c2
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_tpc.h
@@ -0,0 +1,136 @@
+/*-
+ * Copyright (c) 2013 Quantenna
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieeee80211_tpc.h 5000 2013-01-25 10:22:59Z casper $
+ */
+#ifndef _NET80211_IEEE80211_TPC_H
+#define _NET80211_IEEE80211_TPC_H
+
+#include <net80211/ieee80211_var.h>	/* struct ieee80211com */
+#include <net80211/ieee80211_node.h>	/* struct ieee80211_node */
+#include <linux/timer.h>		/* struct timer_list */
+#include <linux/types.h>		/*struct list_head */
+
+#define TPC_INTERVAL_DEFAULT		30	/* default tpc request interval */
+#define TPC_INTERVAL_MIN		1	/* minimum tpc interval */
+
+/* #define USE_IEEE80211_DPRINT */
+#ifndef USE_IEEE80211_DPRINT
+#define TPC_DBG(vap, fmt, arg...)	printk(KERN_INFO "TPC:" fmt, ##arg)
+#else
+#define TPC_DBG(vap, fmt, arg...)	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH | IEEE80211_MSG_TPC, fmt, ##arg)
+#endif
+
+struct ieee80211_tpc_query_info {
+	void	*target;
+	int	is_run;
+	int32_t	query_interval;
+	struct timer_list query_timer;
+};
+
+struct pwr_info_per_vap
+{
+	int8_t			max_in_minpwr;
+	struct ieee80211vap	*vap;
+};
+
+enum ieee80211_measurement_status {
+	MEAS_STATUS_IDLE = 0,
+	MEAS_STATUS_RUNNING,
+	MEAS_STATUS_DISCRAD
+};
+
+struct ieee80211_global_measure_info {
+	enum ieee80211_measurement_status status;
+	struct ieee80211_node	*ni;
+	u_int8_t frame_token;
+	u_int8_t type;
+	union {
+		struct {
+			u_int8_t channel;
+			u_int64_t tsf;
+			u_int16_t duration_tu;
+		} basic;
+		struct {
+			u_int8_t channel;
+			u_int64_t tsf;
+			u_int16_t duration_tu;
+		} cca;
+		struct {
+			u_int8_t channel;
+			u_int64_t tsf;
+			u_int16_t duration_tu;
+		} rpi;
+		struct {
+			u_int8_t op_class;
+			u_int8_t channel;
+			u_int16_t upper_interval;
+			u_int16_t duration_tu;
+		} chan_load;
+		struct {
+			u_int8_t op_class;
+			u_int8_t channel;
+			u_int16_t upper_interval;
+			u_int16_t duration_tu;
+		} noise_his;
+	} param;
+	union {
+		uint8_t	basic;
+		uint8_t cca;
+		uint8_t rpi[MEAS_RPI_HISTOGRAM_SIZE];
+		uint8_t chan_load;
+		struct {
+			u_int8_t anpi;
+			u_int8_t ipi[11];
+		} noise_his;
+	} results;
+};
+
+int ieee80211_tpc_query_init(struct ieee80211_tpc_query_info *info, struct ieee80211com *ic, int query_interval);
+void ieee80211_tpc_query_deinit(struct ieee80211_tpc_query_info *info);
+int ieee80211_tpc_query_config_interval(struct ieee80211_tpc_query_info *info, int interval);
+int ieee80211_tpc_query_get_interval(struct ieee80211_tpc_query_info *info);
+int ieee80211_tpc_query_start(struct ieee80211_tpc_query_info *info);
+int ieee80211_tpc_query_stop(struct ieee80211_tpc_query_info *info);
+int ieee80211_tpc_query_state(struct ieee80211_tpc_query_info *info);
+int8_t ieee80211_update_tx_power(struct ieee80211com *ic, int8_t txpwr);
+int ieee80211_parse_local_max_txpwr(struct ieee80211vap *vap, struct ieee80211_scanparams *scan);
+void get_max_in_minpwr(void *arg, struct ieee80211_node *ni);
+void ieee80211_doth_measurement_init(struct ieee80211com *ic);
+void ieee80211_doth_measurement_deinit(struct ieee80211com *ic);
+void ieee80211_action_finish_measurement(struct ieee80211com *ic, u_int8_t result);
+int ieee80211_action_trigger_measurement(struct ieee80211com *ic);
+int ieee80211_action_measurement_report_fail(struct ieee80211_node *ni,
+					u_int8_t type,
+					u_int8_t report_mode,
+					u_int8_t token,
+					u_int8_t meas_token);
+
+#endif
diff --git a/drivers/qtn/include/kernel/net80211/ieee80211_var.h b/drivers/qtn/include/kernel/net80211/ieee80211_var.h
new file mode 100644
index 0000000..986d9f9
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/ieee80211_var.h
@@ -0,0 +1,2602 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_var.h 2607 2007-07-25 15:20:59Z mrenzmann $
+ */
+#ifndef _NET80211_IEEE80211_VAR_H_
+#define _NET80211_IEEE80211_VAR_H_
+
+/*
+ * Definitions for IEEE 802.11 drivers.
+ */
+#define IEEE80211_DEBUG
+
+#include <compat.h>
+#include "qtn/lhost_muc_comm.h"
+#ifdef CONFIG_QVSP
+#include "qtn/qvsp_data.h"
+#endif
+
+#include "net80211/ieee80211_linux.h"
+
+#include <common/queue.h>
+#include <common/ruby_pm.h>
+
+#include "net80211/_ieee80211.h"
+#include "net80211/ieee80211.h"
+#include "net80211/ieee80211_crypto.h"
+#include "net80211/ieee80211_ioctl.h"		/* for ieee80211_stats */
+#include "net80211/ieee80211_node.h"
+#include "net80211/ieee80211_power.h"
+#include "net80211/ieee80211_proto.h"
+#include "net80211/ieee80211_scan.h"
+#include "net80211/ieee80211_tpc.h"
+#include "net80211/ieee80211_tdls.h"
+#include "net80211/ieee80211_chan_select.h"
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+#include <linux/if_bridge.h>
+#include <linux/net/bridge/br_public.h>
+#endif
+
+#define	IEEE80211_BGSCAN_INTVAL_MIN	15	/* min bg scan intvl (secs) */
+#define	IEEE80211_BGSCAN_INTVAL_DEFAULT	(5*60)	/* default bg scan intvl */
+
+#define	IEEE80211_BGSCAN_IDLE_MIN	100	/* min idle time (ms) */
+#define	IEEE80211_BGSCAN_IDLE_DEFAULT	250	/* default idle time (ms) */
+
+#define IEEE80211_USE_QTN_BGSCAN(vap)	\
+	(((vap->iv_ic)->ic_flags_ext & IEEE80211_FEXT_REPEATER) &&	\
+	((vap)->iv_opmode == IEEE80211_M_STA) &&	\
+	((vap)->iv_ic->ic_bsschan != IEEE80211_CHAN_ANYC))
+
+#define IEEE80211_COVERAGE_CLASS_MAX	31	/* max coverage class */
+#define IEEE80211_REGCLASSIDS_MAX	10	/* max regclass id list */
+
+#define	IEEE80211_PS_SLEEP		0x1	/* STA is in power saving mode */
+#define	IEEE80211_PS_MAX_QUEUE		50	/* maximum saved packets */
+
+#define	IEEE80211_XR_BEACON_FACTOR	3	/* factor between xr Beacon interval and normal beacon interval */
+#define	IEEE80211_XR_DEFAULT_RATE_INDEX	0
+#define	IEEE80211_XR_FRAG_THRESHOLD	540
+#define	IEEE80211_BLACKLIST_TIMEOUT	90	/* Default blacklist timeout (secs) */
+
+#define	IEEE80211_FIXED_RATE_NONE	-1
+
+#define IEEE80211_MIN_NON_OCCUPANCY_PERIOD	5
+#define IEEE80211_MAX_NON_OCCUPANCY_PERIOD	1800
+
+#define IEEE80211_NUM_BEACONS_TO_MISS	100 /* beacons allowed to miss before rescan */
+
+#define	IEEE80211_MS_TO_TU(x)		(((x) * 1000) / 1024)
+#define	IEEE80211_TU_TO_MS(x)		(IEEE80211_TU_TO_USEC(x) / 1000)
+#define	IEEE80211_TU_TO_USEC(x)		((x) * 1024)
+#define	IEEE80211_TU_TO_JIFFIES(x)	((IEEE80211_TU_TO_MS(x) * HZ) / 1000)
+#define	IEEE80211_JIFFIES_TO_TU(x)	IEEE80211_MS_TO_TU((x) * 1000 / HZ)
+#define IEEE80211_SEC_TO_USEC(x)	((x) * 1000 * 1000)
+#define IEEE80211_MS_TO_USEC(x)		((x) * 1000)
+#define IEEE80211_USEC_TO_MS(x)		((x) / 1000)
+#define IEEE80211_MS_TO_JIFFIES(x)	((x) * HZ / 1000)
+
+
+#define IEEE80211_RX_AGG_TIMEOUT_DEFAULT	(IEEE80211_USEC_TO_MS(QTN_RX_REORDER_BUF_TIMEOUT_US))
+
+#define IEEE80211_MAX_AMPDU_SUBFRAMES			(64)
+#define IEEE80211_TX_BA_REQUEST_RETRY_TIMEOUT		(5 * HZ)
+#define IEEE80211_TX_BA_REQUEST_NEW_ATTEMPT_TIMEOUT	(15 * HZ)
+#define IEEE80211_TX_BA_REQUEST_RELAX_TIMEOUT		(75/*ms*/ * HZ / 1000)
+#define IEEE80211_TX_BA_REQUEST_LONG_RELAX_TIMEOUT	(1 * HZ)
+
+#define	IEEE80211_APPIE_MAX	1024
+
+#define IEEE80211_QTN_NUM_RF_STREAMS	4
+
+#define IEEE80211K_RM_MEASURE_STA_TIMEOUT	(HZ / 10)
+#define IEEE80211_MEASUREMENT_REQ_TIMEOUT(offset, du)	(((offset + du) * HZ / 1000) + HZ)
+
+#define IEEE80211_RSSI_FACTOR	10
+
+#define IEEE80211_PWRCONSTRAINT_VAL(ic) \
+	(((ic)->ic_bsschan->ic_maxregpower - (ic)->ic_pwr_constraint) > 0 ? \
+	    (ic)->ic_pwr_constraint : 0)
+
+#define	SM(_v, _f)	(((_v) << _f##_S) & _f)
+#define	MS(_v, _f)	(((_v) & _f) >> _f##_S)
+
+/* Variants of the SM and MS macros that don't require a shift position macro */
+#define	MS_OP(_v, _f)	(((_v) & (_f)) >> __builtin_ctz(_f))
+#define	SM_OP(_v, _f)	(((_v) << __builtin_ctz(_f)) & (_f))
+
+#define	MIN(_a, _b)	((_a)<(_b)?(_a):(_b))
+#define	MAX(_a, _b)	((_a)>(_b)?(_a):(_b))
+#define ABS(_x)		(((_x) > 0) ? (_x) : (0 - (_x)))
+
+#define IS_MULTIPLE_BITS_SET(_x)	(((unsigned)(_x)) & (((unsigned)(_x)) - 1))
+
+/* For Little-endian */
+#define ntohll(x)  be64_to_cpu(x)
+#define htonll(x)  cpu_to_be64(x)
+
+/* Power constraing override */
+
+#define PWR_CONSTRAINT_SAVE_INIT	0xff
+#define PWR_CONSTRAINT_PC_DEF		19
+#define PWR_CONSTRAINT_RSSI_DEF		42
+#define PWR_CONSTRAINT_OFFSET		4
+
+struct ieee80211_pc_over {
+	uint8_t					pco_set;
+	struct timer_list		pco_timer;
+	uint16_t				pco_pwr_constraint;
+	uint8_t					pco_rssi_threshold;
+	uint8_t					pco_sec_offset;
+	uint8_t					pco_pwr_constraint_save;
+};
+
+#define	IEEE80211_EXTENDER_DEFAULT_MBS_WGT		10
+#define	IEEE80211_EXTENDER_DEFAULT_RBS_WGT		6
+#define IEEE80211_EXTENDER_DEFAULT_MBS_BEST_RSSI	20
+#define	IEEE80211_EXTENDER_DEFAULT_RBS_BEST_RSSI	20
+#define IEEE80211_EXTENDER_DEFAULT_MBS_RSSI_MARGIN	6
+
+#define IEEE80211_EXTWDS_MAX_PSEUDO_RSSI		70
+#define IEEE80211_EXTWDS_MIN_PSEUDO_RSSI		0
+#define IEEE80211_EXTWDS_MBS_BEST_RATE_RSSI		75
+#define IEEE80211_EXTWDS_BEST_RATE_BDRY_RSSI		30
+
+#define IEEE80211_PSEUDO_RSSI_TRANSITON_FACTOR		90
+
+#define IEEE80211_EXTENDER_SCAN_MBS_INTERVAL		15	/* seconds */
+#define IEEE80211_EXTENDER_MBS_INVALID_TIMEOUT		5	/* seconds */
+
+#define STA_DFS_STRICT_TX_CHAN_CLOSE_TIME_MAX		1000	/* ms */
+#define STA_DFS_STRICT_TX_CHAN_CLOSE_TIME_MIN		1	/* ms */
+#define STA_DFS_STRICT_TX_CHAN_CLOSE_TIME_DEFAULT	1000	/* ms */
+
+#ifdef CONFIG_QHOP
+#define RBS_DFS_TX_CHAN_CLOSE_TIME_MAX           1000    /* ms */
+#define RBS_DFS_TX_CHAN_CLOSE_TIME_MIN           1       /* ms */
+#define RBS_DFS_TX_CHAN_CLOSE_TIME_DEFAULT       500    /* ms */
+#endif
+
+/*
+ * All Non-DFS channels are AVAILABLE by default
+ * [AP] Any DFS channel is set to AVAILABLE:
+ *	(i) only after CAC_DONE,
+ *	(ii) and no radar was found
+ * [STA] All DFS channels are NON_AVAILABLE by default.
+ *
+ */
+#define IEEE80211_CHANNEL_STATUS_NON_AVAILABLE			(0x1)
+#define IEEE80211_CHANNEL_STATUS_AVAILABLE			(0x2)
+/* A DFS channel will stay in the NOT_AVAILABLE_RADAR_DETECTED state during the non-occupancy period */
+#define IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_RADAR_DETECTED	(0x4)
+/* All DFS channels are marked as NOT_AVAILABLE_CAC_REQUIRED by default */
+#define IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_CAC_REQUIRED	(0x8)
+
+#define ieee80211_is_chan_available(channel_to_check) \
+	(IEEE80211_CHANNEL_STATUS_AVAILABLE == ic->ic_get_chan_availability_status_by_chan_num(ic, (channel_to_check)))
+#define ieee80211_is_chan_not_available(channel_to_check) \
+	(IEEE80211_CHANNEL_STATUS_NON_AVAILABLE == ic->ic_get_chan_availability_status_by_chan_num(ic, (channel_to_check)))
+#define ieee80211_is_chan_radar_detected(channel_to_check) \
+	(IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_RADAR_DETECTED == ic->ic_get_chan_availability_status_by_chan_num(ic, (channel_to_check)))
+#define ieee80211_is_chan_cac_required(channel_to_check) \
+	(IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_CAC_REQUIRED == ic->ic_get_chan_availability_status_by_chan_num(ic, (channel_to_check)))
+
+
+#define IEEE80211_COC_REASON_SUCCESS				(0)
+#define IEEE80211_COC_REASON_REGION_IS_US			(-1)
+#define IEEE80211_COC_REASON_CSA_NOT_TRIGGERED			(-2)
+#define IEEE80211_COC_BEST_NON_DFS_CHANNEL_NOT_FOUND		(-3)
+#define IEEE80211_COC_BEST_AVAILABLE_DFS_CHANNEL_NOT_FOUND	(-4)
+#define IEEE80211_COC_BEST_CHANNEL_NOT_FOUND			(-5)
+#define IEEE80211_COC_REASON_ANY				(-6)
+
+#ifndef SYSTEM_BUILD
+#define ic2dev(ic)      ((struct ieee80211vap *)(TAILQ_FIRST(&(ic)->ic_vaps)) ? \
+                        ((struct ieee80211vap *)(TAILQ_FIRST(&(ic)->ic_vaps)))->iv_dev : NULL)
+#else
+#define ic2dev(ic)      NULL
+#endif
+
+#define COC_DBG_QEVT(qevtdev, ...)    do {\
+                                                printk(__VA_ARGS__);\
+                                                ieee80211_eventf(qevtdev, __VA_ARGS__);\
+                                        } while (0)
+
+/**
+ * PM LEVEL trigger reasons from WLAN module only.
+ */
+enum IEEE80211_PM_LEVEL_CHANGE_TRIGGER_REASON {
+	IEEE80211_PM_LEVEL_REMAIN_CHANNEL_WORK = 1,
+	IEEE80211_PM_LEVEL_CCA_WORK,
+	IEEE80211_PM_LEVEL_TPUT_ABOVE_UPPER_THRSH,
+	IEEE80211_PM_LEVEL_TPUT_BELOW_LOWER_THRSH,
+	IEEE80211_PM_LEVEL_VAP_ATTACH,
+	IEEE80211_PM_LEVEL_VAP_DETACH,
+	IEEE80211_PM_LEVEL_RCVD_ADDBA_REQ,
+	IEEE80211_PM_LEVEL_RCVD_ADDBA_RESP,
+	IEEE80211_PM_LEVEL_SWBCN_MISS,
+	IEEE80211_PM_LEVEL_JOIN_BSS,
+	IEEE80211_PM_LEVEL_LEAVE_BSS,
+	IEEE80211_PM_LEVEL_INACTIVITY_IN_WDS,
+	IEEE80211_PM_LEVEL_NODE_JOIN,
+	IEEE80211_PM_LEVEL_NODE_LEFT,
+	IEEE80211_PM_LEVEL_DEVICE_INIT,
+	IEEE80211_PM_LEVEL_SWBCN_MISS_2,
+	IEEE80211_PM_LEVEL_NEW_STATE_IEEE80211_S_RUN,
+	IEEE80211_PM_LEVEL_SCAN_START,
+	IEEE80211_PM_LEVEL_SCAN_STOP,
+	IEEE80211_PM_LEVEL_SIWFREQ,
+	IEEE80211_PM_LEVEL_SIWSCAN,
+	IEEE80211_PM_LEVEL_STOP_OCAC_SDFS,
+	IEEE80211_PM_LEVEL_BCN_SCHEME_CHANGED_FOR_2VAPS,
+	IEEE80211_PM_LEVEL_OCAC_SDFS_TIMER,
+	IEEE80211_PM_LEVEL_BCN_SCHEME_CHANGED,
+	IEEE80211_PM_LEVEL_CAC_COMPLETED,
+	IEEE80211_PM_LEVEL_CSA_DFS_ACTION,
+	IEEE80211_PM_LEVEL_ICAC_COMPLETE_ACTION,
+	IEEE80211_PM_LEVEL_REASON_MAX,
+};
+
+/*
+ * 802.11 control state is split into a common portion that maps
+ * 1-1 to a physical device and one or more "Virtual AP's" (VAP)
+ * that are bound to an ieee80211com instance and share a single
+ * underlying device.  Each VAP has a corresponding OS device
+ * entity through which traffic flows and that applications use
+ * for issuing ioctls, etc.
+ */
+
+/*
+ * Data common to one or more virtual AP's.  State shared by
+ * the underlying device and the net80211 layer is exposed here;
+ * e.g. device-specific callbacks.
+ */
+struct ieee80211vap;
+
+enum ieee80211_shortrange_flags {
+	IEEE80211_BKUP_TXPOWER_NORMAL = 0,
+	IEEE80211_APPLY_LOWGAIN_TXPOWER = 1,
+	IEEE80211_APPLY_TXPOWER_NORMAL = 2,
+	IEEE80211_INIT_TXPOWER_TABLE = 3
+};
+
+enum ieee8211_scs_cnt {
+	IEEE80211_SCS_CNT_TRIGGER = 0,
+	IEEE80211_SCS_CNT_QOSNULL_NOTREADY,
+	IEEE80211_SCS_CNT_IN_SCAN,
+	IEEE80211_SCS_CNT_RADAR,
+	IEEE80211_SCS_CNT_TRAFFIC_HEAVY,
+	IEEE80211_SCS_CNT_IOCTL,
+	IEEE80211_SCS_CNT_COMPLETE,
+	IEEE80211_SCS_CNT_MAX,
+};
+
+enum ieee80211_phy_mode{
+        IEEE80211_11AC_ONLY = 0x1,
+        IEEE80211_11N_ONLY = 0x2
+};
+
+#define	IEEE80211_MAX_TDLS_NODES	16
+
+struct ieee80211_tdls_scs_stats {
+	uint8_t s_addr[IEEE80211_ADDR_LEN];	/* Sender address */
+	uint8_t r_addr[IEEE80211_ADDR_LEN];	/* Receiver address */
+	uint16_t tx_time;	/* Tx time - us */
+	uint16_t is_latest;	/* latest statistics data flag */
+} __packed;
+
+struct ieee80211_tdls_scs_entry {
+	LIST_ENTRY(ieee80211_tdls_scs_entry) entry;
+	struct ieee80211_tdls_scs_stats stats;
+};
+
+struct ieee80211_scs {
+	uint32_t		scs_smpl_dwell_time;
+	uint32_t		scs_sample_intv;
+	uint32_t		scs_thrshld_smpl_pktnum;
+	uint32_t		scs_thrshld_smpl_airtime;
+	uint32_t		scs_thrshld_atten_inc;
+	uint32_t		scs_thrshld_dfs_reentry;
+	uint32_t		scs_thrshld_dfs_reentry_minrate;
+	uint32_t		scs_thrshld_dfs_reentry_intf;
+	uint32_t		scs_thrshld_loaded;               /* unit: thousandth of air time */
+	uint32_t		scs_thrshld_aging_nor;            /* unit: minute */
+	uint32_t		scs_thrshld_aging_dfsreent;       /* unit: minute */
+	uint16_t		scs_enable;	/* 1 - channel switching can be triggered; 0 - don't change channel */
+	int16_t			scs_debug_enable;
+	uint16_t		scs_smpl_enable;
+	uint8_t			scs_stats_on;	/* 1 - scs stats on; 0 - scs stats off */
+	uint8_t			scs_report_only;
+	struct timer_list	scs_compare_timer;
+	uint32_t		scs_cca_idle_thrshld;
+	uint32_t		scs_cca_intf_hi_thrshld;
+	uint32_t		scs_cca_intf_lo_thrshld;
+	uint32_t		scs_cca_intf_ratio;
+	uint32_t		scs_cca_intf_dfs_margin;
+	uint32_t		scs_pmbl_err_thrshld;
+	uint32_t		scs_cca_sample_dur;
+#define SCS_CCA_INTF_SMTH_FCTR_NOXP		0
+#define SCS_CCA_INTF_SMTH_FCTR_XPED		1
+#define SCS_CCA_INTF_SMTH_FCTR_NUM		2
+	uint8_t			scs_cca_intf_smth_fctr[SCS_CCA_INTF_SMTH_FCTR_NUM];
+#define SCS_RSSI_SMTH_FCTR_UP			0
+#define SCS_RSSI_SMTH_FCTR_DOWN			1
+#define SCS_RSSI_SMTH_FCTR_NUM			2
+	uint8_t			scs_rssi_smth_fctr[SCS_RSSI_SMTH_FCTR_NUM];
+	uint8_t			scs_chan_mtrc_mrgn;
+	uint8_t			scs_leavedfs_chan_mtrc_mrgn;/* DFS-to-Non-DFS channel switch margin */
+	int8_t			scs_atten_adjust;
+	uint32_t		scs_cnt[IEEE80211_SCS_CNT_MAX];
+	uint16_t		scs_atten_sw_enable;
+	int16_t			scs_last_smpl_chan;	/* index into the channel array */
+	struct brcm_rxglitch_thrshld_pair *scs_brcm_rxglitch_thrshlds;
+	uint32_t		scs_brcm_rxglitch_thrshlds_scale;
+	uint32_t		scs_pmbl_err_smth_fctr;
+	uint32_t		scs_pmbl_err_range;
+	uint32_t		scs_pmbl_err_mapped_intf_range;    /* pmbl err range mapped to percent of cca intf */
+	uint32_t		scs_sp_wf;              /* short preamble weight factor */
+	uint32_t		scs_lp_wf;              /* long preamble weight factor */
+	uint32_t		scs_sp_err_smthed;                 /* 1s based */
+	uint32_t		scs_lp_err_smthed;                 /* 1s based */
+	uint32_t		scs_cca_intf_smthed;
+	uint32_t		scs_cca_intf_smthed_jiffies;
+	uint16_t		scs_pmp_rpt_cca_smth_fctr;
+	uint16_t		scs_pmp_rx_time_smth_fctr;
+	uint16_t		scs_pmp_tx_time_smth_fctr;
+	uint16_t		scs_pmp_stats_stable_percent;
+	uint16_t		scs_pmp_stats_stable_range;
+	uint16_t		scs_pmp_stats_clear_interval;
+	uint16_t		scs_as_rx_time_smth_fctr;
+	uint16_t		scs_as_tx_time_smth_fctr;
+	uint16_t		scs_cca_idle_smthed;
+	uint16_t		scs_cca_idle_smth_fctr;
+	uint16_t		scs_cca_threshold_type;		/* 0:normal, 1:less sensitive, 2:more sensitive */
+	uint16_t		scs_sample_type;
+	uint16_t		scs_des_smpl_chan;	/* desired OC sampling channel, index into the channel array */
+	uint16_t		scs_smpl_chan_offset;	/* the offset of a sampling channel within a channel set,
+							   e.g. for 80MHz channel set, it's one of 0, 1, 2, 3 */
+
+	uint16_t		scs_burst_enable; /* flag of enable/disable burst channel switching */
+	uint16_t		scs_burst_window; /* sliding window of time of checking burst */
+	uint16_t		scs_burst_thresh; /* burst threshold */
+	uint16_t		scs_burst_pause_time; /* pause time after burst happens */
+	uint16_t		scs_burst_force_switch; /* enable/disable to switch channel */
+	uint16_t		scs_burst_is_paused;
+	unsigned long		scs_burst_pause_jiffies;
+	uint32_t		scs_burst_queue[IEEE80211_SCS_BURST_THRESH_MAX]; /* queue for SCS event whose life time is valid */
+
+	ATH_LIST_HEAD(, ieee80211_tdls_scs_entry) scs_tdls_list[IEEE80211_NODE_HASHSIZE];
+	spinlock_t		scs_tdls_lock;
+};
+
+#define IEEE80211_SCS_CNT_INC(_scs, _id)	((_scs)->scs_cnt[_id]++)
+#define IEEE80211_IS_SCS_OFF_CHAN_SAMPLING(_ic)	((_ic)->ic_flags_qtn & IEEE80211_QTN_SAMP_CHAN)
+
+#define SCS_BEST_CHAN_INVALID		0
+
+#define IEEE80211_SCS_MEASURE_INIT_TIMER	3
+#define IEEE80211_SCS_MEASURE_TIMER_INTVAL	5
+#define IEEE80211_MAX_STA_CCA_ENABLED		2
+#define IEEE80211_CCA_IDLE_THRSHLD		40
+#define IEEE80211_CCA_INTFR_HIGH_THRSHLD	50
+#define IEEE80211_CCA_INTFR_LOW_THRSHLD		30
+#define IEEE80211_CCA_INTFR_RATIO		20
+#define IEEE80211_CCA_INTFR_DFS_MARGIN		0
+#define IEEE80211_PMBL_ERR_THRSHLD		300
+
+#define SCS_NODE_TRAFFIC_IDLE        0
+#define SCS_NODE_TRAFFIC_LOADED      1
+#define SCS_NODE_TRAFFIC_TYPE_NUM    2
+#define SCS_NODE_NOTINTFED           0
+#define SCS_NODE_INTFED              1
+#define SCS_NODE_INTF_TYPE_NUM       2
+
+struct ieee80211_ocac_counts {
+	uint32_t		ap_not_running;
+	uint32_t		chan_scanning;
+	uint32_t		curchan_dfs;
+	uint32_t		init_offchan;
+	uint32_t		no_offchan;
+	uint32_t		pick_offchan;
+	uint32_t		invalid_offchan;
+	uint32_t		set_bcn_intval;
+	uint32_t		restore_bcn_intval;
+	uint32_t		pm_update;
+	uint32_t		unsupported_mbssid;
+	uint32_t		beacon_scheme0;
+	uint32_t		wds_exist;
+	uint32_t		set_run;
+	uint32_t		set_pend;
+	uint32_t		skip_set_run;
+	uint32_t		skip_set_pend;
+	uint32_t		alloc_skb_error;
+	uint32_t		set_frame_error;
+	uint32_t		hostlink_err;
+	uint32_t		hostlink_ok;
+	uint32_t		cac_failed;
+	uint32_t		cac_success;
+	uint32_t		radar_detected;
+	uint32_t		csw_rpt_only;
+	uint32_t		csw_fail_intf;
+	uint32_t		csw_fail_radar;
+	uint32_t		csw_fail_csa;
+	uint32_t		csw_success;
+	uint32_t		clean_stats_reset;
+	uint32_t		clean_stats_start;
+	uint32_t		clean_stats_stop;
+	uint32_t		tasklet_off_chan;
+	uint32_t		tasklet_data_chan;
+	uint32_t		intr_off_chan;
+	uint32_t		intr_data_chan;
+	uint32_t		cac_in_neighbourhood;
+	uint32_t		no_channel_change_eu;
+};
+
+struct ieee80211_ocac_params {
+	uint16_t		traffic_ctrl;	/* use qosnull frame to control the traffic */
+	uint16_t		secure_dwell_ms;	/* milliseconds, the time on off channel
+								within one off-channel action, using qosnull
+								with large NAV to protect the traffic */
+	uint16_t		dwell_time_ms;	/* milliseconds, the time on off channel
+							within 1 beacon interval */
+	uint16_t		duration_secs;	/* seconds, the total time for one channel */
+	uint16_t		cac_time_secs;	/* seconds, the total time on off channel
+							for one channel */
+	uint16_t		wea_dwell_time_ms;	/* milliseconds, the time on weather channel
+							within 1 beacon interval */
+	uint32_t		wea_duration_secs;	/* seconds, the total time for weather channel */
+	uint32_t		wea_cac_time_secs;	/* seconds, the total time on off channel
+							for weather channel */
+	uint16_t		thresh_fat;	/* percent, the threshold of FAT used to decide
+							to run off-channel CAC */
+	uint16_t		thresh_traffic;	/* percent, the threshold of traffic used to
+							decide to run ocac */
+	uint16_t		thresh_fat_dec;	/* percent, the threshold of consecutive FAT decrease,
+							used to monitor the traffic variation */
+	uint16_t		thresh_cca_intf;	/* percent, the threshold of cca interference to
+							decide to jump to off channel */
+	uint16_t		offset_txhalt;	/* milliseconds, the offset after sending
+							beacon to halt tx in MuC */
+	uint16_t		offset_offchan;	/* milliseconds, the offset after halt tx to
+							switch to off channel in MuC*/
+	uint16_t		timer_interval;	/* the ocac_timer interval */
+	uint16_t		beacon_interval;	/* TUs, the beacon interval for OCAC */
+	uint16_t		auto_first_dfs_channel; /* Auto SDFS channel supplied */
+};
+
+struct ieee80211_ocac_cfg {
+	uint8_t			ocac_enable;
+	uint8_t			ocac_debug_level;
+	uint8_t			ocac_report_only;	/* report mode, don't switch channel */
+	uint16_t		ocac_chan_ieee;		/* ieee channel number, "0" means auto */
+	uint16_t		ocac_timer_expire_init;	/* the ocac_timer expire when starting ocac */
+	char			ocac_region[4];		/* the radar mode indicated by region */
+	struct ieee80211_ocac_params	ocac_params;
+};
+
+#define QTN_OCAC_TSF_LOG_DEPTH	16
+struct ieee80211_ocac_tsflog {
+	uint32_t		log_index;
+	uint64_t		tsf_log[QTN_OCAC_TSF_LOG_DEPTH][OCAC_TSF_LOG_NUM];
+};
+
+struct ieee80211_ocac_rx_state {
+	uint64_t		timestamp;		/* jiffies, when this beacon was received */
+	uint8_t			ta[IEEE80211_ADDR_LEN];	/* Transmitter Address: sender of this beacon */
+	uint8_t			state;			/* NONE, BACKOFF or ONGOING */
+	uint8_t			param;
+};
+
+struct ieee80211_ocac {
+#define OCAC_UNAVAILABLE	0x0
+#define OCAC_AVAILABLE		0x1				/* No neighbouring AP doing OCAC */
+	uint8_t			ocac_available;
+	uint8_t			ocac_backoff_in_progress;	/* 1 => if BACKOFF sequence is in progress */
+	uint8_t			ocac_backoff_count;		/* [8, 64]; random backoff */
+	struct completion	ocac_backoff_completion;	/* Signalled when random backoff is done */
+
+	uint8_t			ocac_running;
+	uint8_t			ocac_bcn_intval_set;
+	uint8_t			ocac_repick_dfs_chan;
+	uint32_t		ocac_accum_duration_secs;	/* seconds, the accumulated off-channel
+									CAC time for one channel*/
+	uint32_t		ocac_accum_cac_time_ms;		/* milliseconds, the accumulated time on
+									off channel for one channel*/
+	struct timer_list		ocac_timer;
+	struct ieee80211_channel	*ocac_chan;		/* current off channel for CAC */
+	struct ieee80211_ocac_cfg	ocac_cfg;
+	struct ieee80211_ocac_counts	ocac_counts;
+	struct ieee80211_ocac_tsflog	ocac_tsflog;
+
+	spinlock_t			ocac_lock;		/* Lock for ocac_rx_state */
+	struct ieee80211_ocac_rx_state	ocac_rx_state;
+};
+
+#define IEEE80211_SET_CHANNEL_DEFERRED_CANCEL	0x80000000
+#define IEEE80211_SET_CHANNEL_TSF_OFFSET	0x40000000
+
+enum ieee80211_scan_frame_flags {
+	IEEE80211_SCAN_FRAME_START = 0,
+	IEEE80211_SCAN_FRAME_PRBREQ = 1,
+	IEEE80211_SCAN_FRAME_FINISH = 2,
+	IEEE80211_SCAN_FRAME_ALL = 3
+};
+
+struct qtn_bgscan_param	{
+	u_int16_t	dwell_msecs_active;
+	u_int16_t	dwell_msecs_passive;
+	u_int16_t	duration_msecs_active;
+	u_int16_t	duration_msecs_passive_fast;
+	u_int16_t	duration_msecs_passive_normal;
+	u_int16_t	duration_msecs_passive_slow;
+	u_int16_t	thrshld_fat_passive_fast;
+	u_int16_t	thrshld_fat_passive_normal;
+	u_int16_t	debug_flags;
+};
+
+struct channel_change_event {
+	u_int8_t	cce_previous;
+	u_int8_t	cce_current;
+};
+
+struct ieee80211_phy_stats {
+	u_int32_t	tstamp;
+
+	u_int32_t	assoc;
+
+	u_int32_t	atten;
+	u_int32_t	cca_total;
+	u_int32_t	cca_tx;
+	u_int32_t	cca_rx;
+	u_int32_t	cca_int;
+	u_int32_t	cca_idle;
+
+	u_int32_t	rx_pkts;
+	u_int32_t	rx_gain;
+	u_int32_t	rx_cnt_crc;
+	u_int32_t	rx_noise;
+
+	u_int32_t	tx_pkts;
+	u_int32_t	tx_defers;
+	u_int32_t	tx_touts;
+	u_int32_t	tx_retries;
+
+	u_int32_t	cnt_sp_fail;
+	u_int32_t	cnt_lp_fail;
+	u_int32_t	last_tx_scale;
+	u_int32_t	last_rx_mcs;
+	u_int32_t	last_tx_mcs;
+
+	u_int32_t	last_rssi;
+	u_int32_t	last_rssi_array[IEEE80211_QTN_NUM_RF_STREAMS];
+
+	u_int32_t	last_rcpi;
+
+	u_int32_t	last_evm;
+	u_int32_t	last_evm_array[IEEE80211_QTN_NUM_RF_STREAMS];
+};
+
+#define BRCM_RXGLITCH_INVALID          0xFFFFFFFF
+#define BRCM_RXGLITCH_TOP              0xFFFFFF
+#define BRCM_RXGLITCH_THRSHLD_SCALE_MAX        0xFF
+#define BRCM_RXGLITCH_MAX_PER_INTVL            300000
+#define BRCM_RXGLITCH_NEXT_TRIG_THRSHLD        20      /* percent */
+#define BRCM_RSSI_MIN                  -120
+#define BRCM_RXGLITH_THRSHLD_HIPWR     0
+#define BRCM_RXGLITH_THRSHLD_LOWPWR    1
+#define BRCM_RXGLITH_THRSHLD_PWR_NUM   2
+#define BRCM_RXGLITH_THRSHLD_STEP      5
+struct brcm_rxglitch_thrshld_pair {
+	int rssi;
+	uint32_t rxglitch;
+};
+
+struct ip_mac_mapping {
+	struct		ip_mac_mapping *next;
+	__be32		ip_addr;
+	u_int8_t	mac[ETH_ALEN];
+};
+
+#define MAX_USER_DEFINED_MAGIC_LEN	256
+struct ieee80211_wowlan_pattern {
+	uint32_t	len;
+	uint8_t		magic_pattern[MAX_USER_DEFINED_MAGIC_LEN];
+};
+
+struct ieee80211_wowlan {
+	uint16_t	host_state;
+	uint16_t	wowlan_match;
+	uint16_t	L2_ether_type;
+	uint16_t	L3_udp_port;
+	struct ieee80211_wowlan_pattern	pattern;
+};
+
+/*
+ * Channel occupancy record
+ */
+struct ieee80211_chan_occupy_record {
+	uint32_t occupy_start;			/* time in seconds when channel was selected */
+	uint8_t cur_chan;			/* channel for which time is recorded */
+	uint8_t prev_chan;			/* previous channel */
+	uint32_t duration[IEEE80211_CHAN_MAX];	/* time spent on a channel in seconds */
+	uint32_t times[IEEE80211_CHAN_MAX];	/* number of times channel was used */
+};
+
+#define DM_TXPOWER_FACTOR_MAX	8
+#define DM_TXPOWER_FACTOR_MIN	0
+#define DM_ACI_FACTOR_MAX	0
+#define DM_ACI_FACTOR_MIN	-4
+#define DM_CCI_FACTOR_MAX	0
+#define DM_CCI_FACTOR_MIN	-4
+#define DM_DFS_FACTOR_MAX	32
+#define DM_DFS_FACTOR_MIN	0
+#define DM_BEACON_FACTOR_MAX	0
+#define DM_BEACON_FACTOR_MIN	-4
+
+#define DM_FLAG_TXPOWER_FACTOR_PRESENT	0x1
+#define DM_FLAG_ACI_FACTOR_PRESENT	0x2
+#define DM_FLAG_CCI_FACTOR_PRESENT	0x4
+#define DM_FLAG_DFS_FACTOR_PRESENT	0x8
+#define DM_FLAG_BEACON_FACTOR_PRESENT	0x10
+
+struct ieee80211_dm_factor {
+	uint32_t flags;
+	int txpower_factor;
+	int aci_factor;
+	int cci_factor;
+	int dfs_factor;
+	int beacon_factor;
+};
+
+#if defined(QBMPS_ENABLE)
+
+#define BMPS_MODE_OFF		0
+#define BMPS_MODE_MANUAL	1
+#define BMPS_MODE_AUTO		2
+
+#define	BMPS_TPUT_THRESHOLD_UPPER	8000	/* 8 mbps */
+#define	BMPS_TPUT_THRESHOLD_LOWER	4000	/* 4 mbps */
+#define BMPS_TPUT_MEASURE_PERIOD_MS	5000	/* 5 seconds */
+
+struct bmps_tput_measure {
+	struct timer_list tput_timer;		/* STA BMPS timer for TX/RX */
+						/* tput measurement */
+	uint32_t prev_tx_bytes;			/* # of TX bytes in previous measurement */
+	uint32_t prev_rx_bytes;			/* # of RX bytes in previous measurement */
+	uint32_t tput_kbps;			/* TX & RX overall throughput in kbps */
+};
+#endif
+
+/*
+ * Operating class table for regulatory region to operating class conversion.
+ */
+struct operating_class_table {
+	uint8_t index;
+	uint8_t global_index;
+	uint8_t bandwidth;
+	uint8_t chan_set[IEEE80211_CHAN_BYTES];
+	uint16_t behavior;
+};
+
+struct region_to_oper_class {
+	const char *region_name;
+	uint8_t class_num_5g;
+	uint8_t classes_5g[IEEE80211_OPER_CLASS_BYTES];
+	uint8_t class_num_24g;
+	uint8_t classes_24g[IEEE80211_OPER_CLASS_BYTES_24G];
+	const struct operating_class_table * const class_table;
+};
+
+/*
+ * Logical SSID group used for defining association limits per group of VAPs.
+ */
+struct ssid_logical_group {
+	u_int16_t limit;
+	u_int16_t reserve;
+	u_int16_t assocs;
+};
+
+struct offchan_protect {
+	struct timer_list offchan_stop_expire;	/* off channel suspend/resume expiration timer */
+	uint32_t offchan_suspend_cnt;	/* off channel suspending counter */
+	unsigned long offchan_timeout;	/* off channel suspending timeout, in jiffies */
+};
+
+struct sta_dfs_info {
+	struct timer_list sta_radar_timer;
+	struct timer_list sta_silence_timer;
+	bool	sta_dfs_radar_detected_timer;
+	int	sta_dfs_radar_detected_channel;
+	unsigned long	sta_dfs_tx_chan_close_time;
+	uint8_t	sta_dfs_strict_mode;
+	bool	sta_dfs_strict_msr_cac;
+	bool	allow_measurement_report;
+};
+
+#ifdef CONFIG_QHOP
+struct rbs_mbs_dfs_info {
+        bool    rbs_mbs_allow_tx_frms_in_cac;
+        bool    rbs_allow_qhop_report;
+        bool    mbs_allow_csa;
+        struct timer_list rbs_dfs_radar_timer;
+	unsigned long rbs_dfs_tx_chan_close_time;
+};
+#endif
+
+struct ieee80211_vopt_info {
+	uint8_t state;
+	uint8_t	cur_state;
+	uint8_t	bf;
+	uint8_t	bbf;
+	uint8_t	pppc;
+	uint8_t airfair;
+	uint32_t scs;
+	uint32_t ocac;
+	uint32_t qtm;
+};
+
+struct ieee80211com {
+	/* MATS FIX The member ic_dev is not used in QDRV and should be removed */
+	struct net_device *ic_dev;		/* associated device */
+	struct ieee80211_channel * (*ic_findchannel)(struct ieee80211com *ic, int ieee, int mode);
+	ieee80211com_lock_t ic_comlock;		/* state update lock */
+	ieee80211com_lock_t ic_vapslock;	/* vap state machine lock */
+	TAILQ_HEAD(, ieee80211vap) ic_vaps;	/* list of vap instances */
+	enum ieee80211_phytype ic_phytype;	/* XXX wrong for multi-mode */
+	enum ieee80211_phymode ic_phymode;	/* Phy Mode */
+	enum ieee80211_phymode ic_phymode_save;	/* Phy Mode save */
+	int fixed_legacy_rate_mode;		/* Legacy rate mode */
+	int ic_radar_bw;			/* Radar mode */
+	enum ieee80211_opmode ic_opmode;	/* operation mode */
+	struct ifmedia ic_media;		/* interface media config */
+	u_int8_t ic_myaddr[IEEE80211_ADDR_LEN];
+	struct timer_list ic_inact;		/* mgmt/inactivity timer */
+	struct offchan_protect ic_offchan_protect;
+
+	uint32_t ic_ver_sw;
+	uint16_t ic_ver_hw;
+	uint16_t ic_ver_platform_id;
+	uint32_t ic_ver_timestamp;
+	uint32_t ic_ver_flags;
+
+	u_int32_t ic_flags;			/* state flags */
+	u_int32_t ic_flags_ext;			/* extension of state flags */
+	u_int32_t ic_flags_qtn;			/* Quantenna specific flags */
+	u_int32_t ic_caps;			/* capabilities */
+	enum ieee80211_vht_mcs_supported ic_vht_mcs_cap;	/* VHT MCS capability */
+	enum ieee80211_ht_nss ic_ht_nss_cap;	/* Current HT Max spatial streams */
+	enum ieee80211_vht_nss ic_vht_nss_cap;	/* Current VHT Max spatial streams */
+	enum ieee80211_vht_nss ic_vht_nss_cap_24g;	/* Current 2.4G VHT Max spatial streams*/
+	u_int8_t ic_ath_cap;			/* Atheros adv. capabilities */
+	u_int8_t ic_promisc;			/* vap's needing promisc mode */
+	u_int8_t ic_allmulti;			/* vap's needing all multicast*/
+	u_int8_t ic_nopened;			/* vap's been opened */
+	struct ieee80211_rateset ic_sup_rates[IEEE80211_MODE_MAX];
+	struct ieee80211_rateset ic_sup_xr_rates;
+	struct ieee80211_rateset ic_sup_half_rates;
+	struct ieee80211_rateset ic_sup_quarter_rates;
+	struct ieee80211_rateset ic_sup_ht_rates[IEEE80211_MODE_MAX];
+	u_int16_t		ic_modecaps;	/* set of mode capabilities */
+	u_int16_t		ic_curmode;	/* current mode */
+	u_int16_t		ic_lintval;	/* beacon interval */
+	u_int16_t		ic_lintval_backup;	/* beacon interval for backup */
+	u_int16_t		ic_holdover;	/* PM hold over duration */
+	u_int16_t		ic_bmisstimeout;/* beacon miss threshold (ms) */
+	u_int16_t		ic_txpowlimit;	/* global tx power limit */
+	u_int32_t		ic_sample_rate;	/* sampling rate in seconds */
+
+	u_int16_t		ic_newtxpowlimit; /* tx power limit to change to (in 0.5 dBm) */
+	u_int16_t		ic_uapsdmaxtriggers; /* max triggers that could arrive */
+	u_int8_t		ic_coverageclass; /* coverage class */
+	int8_t			ic_pwr_adjust_scancnt; /* Num of scans after which gain settings toggle */
+	uint32_t		ic_bcn_hang_timeout; /* Beacon hang timeout */
+
+	int rts_cts_prot; /* RTS-CTS protection support */
+
+	/* 11n Capabilities */
+	struct ieee80211_htcap ic_htcap; /* HT capabilities */
+	int ldpc_enabled; /* LDPC support */
+	int stbc_enabled; /* STBC support */
+
+	/* 11n info */
+	struct ieee80211_htinfo ic_htinfo; /* HT information */
+
+	/* 11n beamforming */
+	u_int16_t ic_bfgrouping;
+	u_int16_t ic_bfcoeffsize;
+
+	int ic_txbf_period;
+
+	/* 2.4G band vht capabilities */
+	struct ieee80211_vhtcap ic_vhtcap_24g;
+	struct ieee80211_vhtop  ic_vhtop_24g;
+
+	/* 5G band capabilities */
+	struct ieee80211_vhtcap ic_vhtcap;
+	struct ieee80211_vhtop	ic_vhtop;
+
+	struct ieee80211_scs ic_scs; /* SCS related information */
+	struct delayed_work ic_scs_sample_work;	/* SCS (ACI/CCI Detection and Mitigation) workqueue */
+	struct ieee80211_ocac ic_ocac; /* OCAC related information*/
+	uint32_t ic_11n_40_only_mode;
+	uint16_t ic_vht_opmode_notif;	/* Override OpMode Notification IE, for WFA Testbed */
+	uint8_t ic_non_ht_sta;
+	uint8_t ic_ht_20mhz_only_sta;
+	uint8_t ic_non_ht_non_member;
+	uint8_t ic_pppc_select_enable;
+	uint8_t ic_pppc_select_enable_backup;
+	uint8_t ic_pppc_step_db;
+	uint8_t	ic_gi_fixed;		/* Enable fixed GI setting */
+	uint8_t	ic_gi_select_enable;	/* Enable dynamic GI selection */
+	uint8_t ic_bw_fixed;		/* Fixed bw setting */
+	uint8_t ic_def_matrix;		/* Default expansion matrices */
+	uint8_t ic_sta_cc;		/* Channel change due to noise at STA */
+	uint8_t ic_sta_cc_brcm;		/* Channel change due to noise at brcm STA */
+	uint8_t ic_tx_qos_sched;        /* tx qos sched index for hold-time table */
+	uint8_t ic_local_rts;		/* Use RTS on local node */
+	uint8_t ic_peer_rts_mode;	/* Config for informing peer nodes to use RTS */
+	uint8_t ic_dyn_peer_rts;	/* Dynamic peer RTS current status */
+	uint8_t ic_peer_rts;		/* Inform peer nodes to use RTS */
+	uint8_t ic_dyn_wmm;		/* Dynamic WMM enabled */
+	uint8_t ic_emi_power_switch_enable;
+	uint8_t ic_dfs_channels_deactive; /* Deactive all DFS channels */
+	uint8_t ic_beaconing_scheme;
+	uint8_t ic_dfs_csa_cnt;
+	uint8_t ic_weachan_cac_allowed;
+
+	/*
+	 * Channel state:
+	 *
+	 * ic_channels is the set of available channels for the device;
+	 *    it is setup by the driver
+	 * ic_nchans is the number of valid entries in ic_channels
+	 * ic_chan_avail is a bit vector of these channels used to check
+	 *    whether a channel is available w/o searching the channel table.
+	 * ic_chan_active is a (potentially) constrained subset of
+	 *    ic_chan_avail that reflects any mode setting or user-specified
+	 *    limit on the set of channels to use/scan
+	 * ic_curchan is the current channel the device is set to; it may
+	 *    be different from ic_bsschan when we are off-channel scanning
+	 *    or otherwise doing background work
+	 * ic_bsschan is the channel selected for operation; it may
+	 *    be undefined (IEEE80211_CHAN_ANYC)
+	 * ic_prevchan is a cached ``previous channel'' used to optimize
+	 *    lookups when switching back+forth between two channels
+	 *    (e.g. for dynamic turbo)
+	 */
+	uint8_t ic_rf_chipid;                   /* RFIC chip ID */
+	int ic_nchans;				/* # entries in ic_channels */
+	struct ieee80211_channel ic_channels[IEEE80211_CHAN_MAX+1];
+	struct ieee80211req_csw_record	ic_csw_record;	/* channel switch record */
+	struct ieee80211_chan_occupy_record ic_chan_occupy_record;
+	uint32_t ic_csw_reason;				/* reason for the last channel switch */
+	u_int8_t ic_csw_mac[IEEE80211_ADDR_LEN];	/* client mac addr when SCS channel switch */
+	struct ieee80211_assoc_history ic_assoc_history;
+	u_int8_t ic_chan_avail[IEEE80211_CHAN_BYTES];
+	u_int8_t ic_chan_active[IEEE80211_CHAN_BYTES];
+
+	u_int8_t ic_chan_availability_status[IEEE80211_CHAN_MAX+1];
+
+	u_int8_t ic_chan_pri_inactive[IEEE80211_CHAN_BYTES];	/* channel not used as primary */
+	u_int8_t ic_is_inactive_usercfg[IEEE80211_CHAN_BYTES];  /* 0x1-regulatory 0x2-user override */
+	u_int8_t ic_is_inactive_autochan_only[IEEE80211_CHAN_BYTES];
+	u_int8_t ic_chan_dfs_required[IEEE80211_CHAN_BYTES];	/* channel is DFS required */
+	u_int8_t ic_chan_weather_radar[IEEE80211_CHAN_BYTES];	/* weather radar channel */
+	u_int8_t ic_chan_disabled[IEEE80211_CHAN_BYTES];	/* channels are disabled */
+
+	u_int8_t ic_chan_active_20[IEEE80211_CHAN_BYTES];
+	u_int8_t ic_chan_active_40[IEEE80211_CHAN_BYTES];
+	u_int8_t ic_chan_active_80[IEEE80211_CHAN_BYTES];
+	u_int8_t ic_bw_auto_select;
+	u_int8_t ic_max_system_bw;
+	u_int8_t ic_bss_bw;			/* BSS channel width, in station mode only */
+	struct ieee80211_vopt_info ic_vopt;
+
+	struct ieee80211_channel *ic_curchan;	/* current channel */
+	struct ieee80211_channel *ic_bsschan;	/* bss channel */
+	struct ieee80211_channel *ic_prevchan;	/* previous channel */
+	struct ieee80211_channel *ic_scanchan;	/* scanning channel */
+	int16_t ic_channoise;			/* current channel noise in dBm */
+	struct ieee80211_channel *ic_des_chan;	/* desired channel */
+	int ic_des_chan_after_init_cac;	/* Saved desired chan to switch after Initial CAC */
+	int ic_des_chan_after_init_scan;/* Saved desired chan to switch after Initial Scan */
+	int ic_ignore_init_scan_icac;/* Ignore init cac in case of set channel for wireless_conf arguement fails */
+
+	int ic_chan_is_set;
+	u_int16_t ic_des_mode;			/* desired mode */
+	/* regulatory class ids */
+	u_int ic_nregclass;			/* # entries in ic_regclassids */
+	u_int8_t ic_regclassids[IEEE80211_REGCLASSIDS_MAX];
+	struct ieee80211_channel *ic_fast_reass_chan;	/* fast reassociate channel */
+#define IEEE80211_FAST_REASS_SCAN_MAX 3
+	u_int8_t ic_fast_reass_scan_cnt;	/* Number of times tried to do fast reassoc */
+
+	/* scan-related state */
+	struct ieee80211_scan_state *ic_scan;	/* scan state */
+	enum ieee80211_roamingmode ic_roaming;	/* roaming mode */
+	unsigned long ic_lastdata;		/* time of last data frame */
+	unsigned long ic_lastscan;		/* time last scan completed */
+
+	/* NB: this is the union of all vap stations/neighbors */
+	struct ieee80211_node_table ic_sta;	/* stations/neighbors */
+
+	/* XXX multi-bss: split out common/vap parts? */
+	struct ieee80211_wme_state ic_wme;	/* WME/WMM state */
+	uint8_t ic_vap_pri_wme;			/* enable automatic adjusting wme bss param based on vap priority */
+	uint8_t ic_airfair;			/* airtime fairness */
+
+	/* XXX multi-bss: can per-vap be done/make sense? */
+	enum ieee80211_protmode	ic_protmode;	/* 802.11g protection mode */
+	u_int16_t ic_nonerpsta;			/* # non-ERP stations */
+	u_int16_t ic_longslotsta;		/* # long slot time stations */
+	u_int16_t ic_sta_assoc_limit;		/* total assoc limit per interface */
+	u_int16_t ic_sta_assoc;			/* stations associated(including WDS node) */
+	u_int16_t ic_wds_links;			/* WDS links created */
+	u_int16_t ic_dt_sta_assoc;		/* dturbo capable stations */
+	u_int16_t ic_xr_sta_assoc;		/* XR stations associated */
+	u_int16_t ic_nonqtn_sta;		/* Non-Quantenna peers */
+
+	struct ssid_logical_group ic_ssid_grp[IEEE80211_MAX_BSS_GROUP]; /* VAPs logical group assocication limits */
+
+	/* dwell times for channel scanning */
+	u_int16_t ic_mindwell_active;
+	u_int16_t ic_mindwell_passive;
+	u_int16_t ic_maxdwell_active;
+	u_int16_t ic_maxdwell_passive;
+	struct qtn_bgscan_param	ic_qtn_bgscan;
+
+	/* Adding Wireless stats per MAC. Per Vap is maintained in vap structure */
+	struct iw_statistics ic_iwstats;
+	/*
+	 * Spectrum Management.
+	 */
+	u_int16_t ic_country_code;
+	uint16_t ic_spec_country_code;	/* specific country code for EU region */
+	int ic_country_outdoor;
+	struct ieee80211_ie_country ic_country_ie; /* country info element */
+	uint8_t ic_oper_class[IEEE80211_OPER_CLASS_BYTES];	/* Supported operating class */
+	const struct region_to_oper_class *ic_oper_class_table;
+	/*
+	 *  current channel power constraint for Power Constraint IE.
+	 *
+	 *  NB: local power constraint depends on the channel, but assuming it must
+	 *     be detected dynamically, we cannot maintain a table (i.e., will not
+	 *     know value until change to channel and detect).
+	 */
+	u_int8_t ic_pwr_constraint;
+	struct ieee80211_pc_over ic_pco; /* Power constraint override related information*/
+	u_int8_t ic_chanchange_tbtt;
+	u_int8_t ic_chanchange_chan;
+
+	u_int8_t ic_csa_count;			/* last csa count */
+	u_int8_t ic_csa_mode;			/* last csa mode */
+	int32_t ic_csa_reason;
+#define IEEE80211_CSA_F_BEACON     0x1
+#define IEEE80211_CSA_F_ACTION     0x2
+	uint32_t ic_csa_flag;
+#define IEEE80211_CSA_FRM_BEACON   0
+#define IEEE80211_CSA_FRM_ACTION   1
+#define IEEE80211_CSA_FRM_MAX      2
+	int32_t ic_csa_frame[IEEE80211_CSA_FRM_MAX];
+	struct ieee80211_channel *ic_csa_chan;	/* csa channel */
+	struct completion csa_completion;		/* complete event of csa counter, wake up MuC command */
+	struct workqueue_struct *csa_work_queue;
+	struct work_struct csa_work;		/* work queue to perform csa action */
+	void (*finish_csa)(unsigned long);
+	u_int8_t ic_cca_token;			/* last cca token */
+
+	/* upcoming cca measurement */
+	u_int64_t ic_cca_start_tsf;		/* tsf at which cca measurement will occur */
+	u_int16_t ic_cca_duration_tu;		/* duration of cca measurement in TU */
+	u_int8_t ic_cca_chan;			/* channel of cca measurement */
+
+	u_int8_t ic_ieee_alt_chan;		/* if not zero jump to this channel if radar is detected */
+	u_int8_t ic_ieee_best_alt_chan;		/* tracks best alternate chan to fast-switch into if radar is detected */
+	u_int32_t ic_non_occupancy_period;	/* radar non-occupancy period. */
+
+	/* boot time CAC*/
+	int32_t ic_max_boot_cac_duration;
+	unsigned long ic_boot_cac_end_jiffy;
+        struct timer_list icac_timer;
+
+	u_int8_t ic_mode_get_phy_stats;
+
+	u_int8_t ic_legacy_retry_limit;
+	u_int8_t ic_retry_count;
+
+	u_int32_t ic_rx_agg_timeout;
+	u_int32_t ic_ndpa_dur;
+	u_int32_t ic_su_txbf_pkt_cnt;
+	u_int32_t ic_mu_txbf_pkt_cnt;
+	u_int32_t ic_tx_max_ampdu_size;
+
+	u_int16_t ic_mu_debug_level;
+	u_int8_t  ic_mu_enable;
+
+	u_int8_t ic_rts_bw_dyn;
+	u_int8_t ic_dup_rts;
+	u_int8_t ic_cts_bw;
+	u_int8_t use_non_ht_duplicate_for_mu;
+	u_int8_t rx_bws_support_for_mu_ndpa;
+
+	u_int8_t cca_fix_disable;
+	u_int8_t auto_cca_enable;
+	u_int8_t ic_opmode_bw_switch_en;
+
+	struct muc_tx_stats *ic_muc_tx_stats;
+
+
+	/* virtual ap create/delete */
+	struct ieee80211vap *(*ic_vap_create)(struct ieee80211com *,
+		const char *, int, int, int, struct net_device *);
+	void (*ic_vap_delete)(struct ieee80211vap *);
+	uint8_t (*ic_get_vap_idx)(struct ieee80211vap *);
+	/* send/recv 802.11 management frame */
+	int (*ic_send_mgmt)(struct ieee80211_node *, int, int);
+	void (*ic_recv_mgmt)(struct ieee80211_node *, struct sk_buff *, int,
+		int, u_int32_t);
+	/* send an 802.11 encapsulated frame to the driver */
+	int (*ic_send_80211)(struct ieee80211com *, struct ieee80211_node *ni,
+				struct sk_buff *skb, uint32_t priority, uint8_t is_mgmt);
+	/* reset device state after 802.11 parameter/state change */
+	int (*ic_init)(struct ieee80211com *);
+	int (*ic_reset)(struct ieee80211com *);
+	void (*ic_queue_reset)(struct ieee80211_node *);
+	/* update device state for 802.11 slot time change */
+	void (*ic_updateslot)(struct ieee80211com *);
+	/* new station association callback/notification */
+	void (*ic_newassoc)(struct ieee80211_node *, int);
+	void (*ic_disassoc)(struct ieee80211_node *);
+	void (*ic_node_update)(struct ieee80211_node *);
+	/* node state management */
+	struct ieee80211_node *(*ic_node_alloc)(struct ieee80211_node_table *,
+		struct ieee80211vap *, const uint8_t *, uint8_t tmp_node);
+	void (*ic_node_free)(struct ieee80211_node *);
+	void (*ic_qdrv_node_free)(struct ieee80211_node *);
+	void (*ic_node_cleanup)(struct ieee80211_node *);
+	u_int8_t (*ic_node_getrssi)(const struct ieee80211_node *);
+	u_int8_t (*ic_node_move_data)(const struct ieee80211_node *);
+
+	void (*ic_iterate_nodes)(struct ieee80211_node_table *, ieee80211_iter_func *,
+		void *, int ignore_blacklist);
+	void (*ic_iterate_dev_nodes)(struct net_device *,
+				     struct ieee80211_node_table *,
+				     ieee80211_iter_func *, void *, int);
+
+	/* scanning support */
+	void (*ic_initiate_scan)(struct ieee80211vap *vap);
+	void (*ic_scan_start)(struct ieee80211com *);
+	void (*ic_scan_end)(struct ieee80211com *);
+	int (*ic_check_channel)(struct ieee80211com *ic, struct ieee80211_channel *chan,
+				int fast_switch, int is_requested_chan);
+	void (*ic_set_channel)(struct ieee80211com *);
+	void (*ic_bridge_set_dest_addr)(struct sk_buff *skb, void *eh1);
+	void (*ic_get_tsf)(uint64_t *tsf);
+	int (*ic_bmps_set_frame)(struct ieee80211com *ic, struct ieee80211_node *ni,
+				struct sk_buff *skb);
+	int (*ic_bmps_release_frame)(struct ieee80211com *ic);
+	void (*ic_scs_update_scan_stats)(struct ieee80211com *ic);
+	int (*ic_sample_channel)(struct ieee80211vap *vap, struct ieee80211_channel *chan);
+	int (*ic_sample_channel_cancel)(struct ieee80211vap *vap);
+	int (*ic_bgscan_start)(struct ieee80211com *ic);
+	int (*ic_bgscan_channel)(struct ieee80211vap *vap, struct ieee80211_channel *chan,
+				 int scan_mode, int dwelltime);
+	void (*ic_set_channel_deferred)(struct ieee80211com *, u_int64_t tsf, int flags);
+	int  (*ic_set_start_cca_measurement)(struct ieee80211com *ic,
+					     const struct ieee80211_channel *cca_channel,
+					     uint64_t start_tsf, u_int32_t duration);
+	int (*ic_do_measurement)(struct ieee80211com *ic);
+	void (*ic_finish_measurement)(struct ieee80211com *ic, u_int8_t result);
+	void (*ic_send_csa_frame)(struct ieee80211vap *vap, uint8_t csa_mode,
+				  uint8_t csa_chan, uint8_t csa_count, uint64_t tsf);
+	int (*ic_set_ocac)(struct ieee80211vap *vap, struct ieee80211_channel *chan);
+	int (*ic_ocac_release_frame)(struct ieee80211com *ic, int force);
+
+	int (*ic_set_beaconing_scheme)(struct ieee80211vap *vap, int param, int value);
+
+	/* u-apsd support */
+	void (*ic_uapsd_flush)(struct ieee80211_node *);
+
+	/* set coverage class */
+	void (*ic_set_coverageclass)(struct ieee80211com *);
+
+	/* mhz to ieee conversion */
+	u_int (*ic_mhz2ieee)(struct ieee80211com *, u_int, u_int);
+	void (*ic_setparam)(struct ieee80211_node *, int, int, unsigned char *, int);
+	int (*ic_getparam)(struct ieee80211_node *, int, int *, unsigned char *, int *);
+	void (*ic_register_node)(struct ieee80211_node *ni);
+	void (*ic_unregister_node)(struct ieee80211_node *ni);
+	int (*ic_get_phy_stats)(struct net_device *dev, struct ieee80211com *ic,
+				struct ieee80211_phy_stats *ps, uint8_t all_stats);
+	int (*ic_ncbeamforming)(struct ieee80211_node *, struct sk_buff *act_frame);
+	void (*ic_htaddba)(struct ieee80211_node *, int, int);
+	void (*ic_htdelba)(struct ieee80211_node *, int, int);
+
+	void (*ic_join_bss)(struct ieee80211vap *vap);
+	void (*ic_beacon_update)(struct ieee80211vap *vap);
+	void (*ic_beacon_stop)(struct ieee80211vap *vap);
+
+	void (*ic_setkey)(struct ieee80211vap *vap, const struct ieee80211_key *k,
+		const u_int8_t mac[IEEE80211_ADDR_LEN]);
+	void (*ic_delkey)(struct ieee80211vap *vap, const struct ieee80211_key *k,
+		const u_int8_t mac[IEEE80211_ADDR_LEN]);
+
+	/* L2 external filter */
+	int (*ic_set_l2_ext_filter)(struct ieee80211vap *, int);
+	int (*ic_set_l2_ext_filter_port)(struct ieee80211vap *, int);
+	int (*ic_get_l2_ext_filter_port)(void);
+	void (*ic_send_to_l2_ext_filter)(struct ieee80211vap *, struct sk_buff *);
+
+	int (*ic_mac_reserved)(const uint8_t *addr);
+
+	/* Stats support */
+	void (*ic_get_wlanstats)(struct ieee80211com *, struct iw_statistics *);
+	/* Change of the MIMO power save mode for the STA */
+	void (*ic_smps)(struct ieee80211_node *, int);
+	/* TKIP MIC failure report */
+	void (*ic_tkip_mic_failure)(struct ieee80211vap *, int count);
+	/* DFS radar detection handling */
+	void (*ic_radar_detected)(struct ieee80211com* ic, u_int8_t new_ieee);
+	/* DFS radar selection function */
+	struct ieee80211_channel *(*ic_select_channel)(u_int8_t new_ieee);
+	/* DFS action when channel scan is done*/
+	void (*ic_dfs_action_scan_done)(void);
+	/* Check if current region belongs to EU region */
+	bool (*ic_dfs_is_eu_region)(void);
+
+
+	void (*ic_mark_channel_availability_status)(struct ieee80211com *ic, struct ieee80211_channel *chan, uint8_t usable);
+
+	void (*ic_set_chan_availability_status_by_chan_num)(struct ieee80211com *ic,
+			struct ieee80211_channel *chan, uint8_t usable);
+	int (*ic_get_chan_availability_status_by_chan_num)(struct ieee80211com *ic, struct ieee80211_channel *chan);
+
+	void (*ic_mark_channel_dfs_cac_status)(struct ieee80211com *ic, struct ieee80211_channel *chan, u_int32_t cac_flag, bool set);
+	void (*ic_dump_chan_availability_status)(struct ieee80211com *ic);
+
+	int (*ic_ap_next_cac)(struct ieee80211com *ic, struct ieee80211vap *vap,
+			unsigned long cac_period,
+			struct ieee80211_channel **qdrv_radar_cb_cac_chan,
+			u_int32_t flags);
+
+	bool (*ic_dfs_chans_available_for_cac)(struct ieee80211com *ic, struct ieee80211_channel *ch);
+	int (*ic_is_dfs_chans_available_for_dfs_reentry)(struct ieee80211com *ic, struct ieee80211vap *);
+	int  (*ic_get_init_cac_duration)(struct ieee80211com *ic);
+	void (*ic_set_init_cac_duration)(struct ieee80211com *ic, int val);
+	void (*ic_start_icac_procedure)(struct ieee80211com *ic);
+	void (*ic_stop_icac_procedure)(struct ieee80211com *ic);
+	bool (*ic_chan_compare_equality)(struct ieee80211com *ic,
+					struct ieee80211_channel *curr_chan,
+					struct ieee80211_channel *new_chan);
+
+
+	/* DFS select channel */
+	void (*ic_dfs_select_channel)(int channel);
+	void (*ic_wmm_params_update)(struct ieee80211vap *);
+	void (*ic_power_table_update)(struct ieee80211vap *vap,
+			struct ieee80211_channel *chan);
+	/* tdls parameters configuration */
+	void (*ic_set_tdls_param)(struct ieee80211_node *ni, int cmd, int value);
+	uint32_t (*ic_get_tdls_param)(struct ieee80211_node *ni, int cmd);
+	int (*ic_rxtx_phy_rate)(const struct ieee80211_node *, const int is_rx,
+			uint8_t *nss, uint8_t *mcs, uint32_t * phy_rate);
+	int (*ic_rssi)(const struct ieee80211_node *);
+	int (*ic_smoothed_rssi)(const struct ieee80211_node *);
+	int (*ic_snr)(const struct ieee80211_node *);
+	int (*ic_hw_noise)(const struct ieee80211_node *);
+	int (*ic_max_queue)(const struct ieee80211_node *);
+	u_int32_t (*ic_tx_failed)(const struct ieee80211_node *);
+	/* Convert mcs to phy rate in Kbps */
+	u_int32_t (*ic_mcs_to_phyrate)(u_int8_t bw, u_int8_t sgi, u_int8_t mcs,
+			u_int8_t nss, u_int8_t vht);
+	void (*ic_chan_switch_record)(struct ieee80211com *ic, struct ieee80211_channel *new_chan,
+			uint32_t reason);
+	void (*ic_chan_switch_reason_record)(struct ieee80211com *ic, int reason);
+	void (*ic_dfs_chan_switch_notify)(struct net_device *dev, struct ieee80211_channel *new_chan);
+	int (*ic_radar_test_mode_enabled)(void);
+	/* Count of the number of nodes allocated - for debug */
+	int ic_node_count;
+
+	void (*ic_node_auth_state_change)(struct ieee80211_node *ni, int deauth_auth);
+	void (*ic_new_assoc)(struct ieee80211_node *ni);
+
+	void (*ic_power_save)(struct ieee80211_node *ni, int enable);
+	int (*ic_remain_on_channel)(struct ieee80211com *ic, struct ieee80211_node *ni,
+			struct ieee80211_channel *off_chan, int bandwidth, uint64_t start_tsf,
+			uint32_t timeout, uint32_t duration, int flags);
+
+	int (*ic_mark_dfs_channels)(struct ieee80211com *ic, int nchans, struct ieee80211_channel *chans);
+	int (*ic_mark_weather_radar_chans)(struct ieee80211com *ic, int nchans, struct ieee80211_channel *chans);
+	void (*ic_use_rtscts)(struct ieee80211com *ic);
+	void (*ic_send_notify_chan_width_action)(struct ieee80211vap *vap, struct ieee80211_node *ni, u_int32_t width);
+
+	void (*ic_sta_set_xmit)(int enable);
+	void (*ic_set_radar)(int enable);
+	void (*ic_enable_sta_dfs)(int enable);
+	int (*ic_radar_detections_num)(uint32_t chan);
+	void (*ic_complete_cac)(void);
+	int (*ic_config_channel_list)(struct ieee80211com *ic, int ic_nchans);
+	void (*ic_set_11g_erp)(struct ieee80211vap *vap, int on);
+#ifdef CONFIG_QVSP
+	/* Functions at sta to apply commands from AP */
+	void (*ic_vsp_strm_state_set)(struct ieee80211com *ic, uint8_t strm_state,
+			const struct ieee80211_qvsp_strm_id *strm_id, struct ieee80211_qvsp_strm_dis_attr *attr);
+	void (*ic_vsp_change_stamode)(struct ieee80211com *ic, uint8_t stamode);
+	void (*ic_vsp_configure)(struct ieee80211com *ic, uint32_t index, uint32_t value);
+	void (*ic_vsp_set)(struct ieee80211com *ic, uint32_t index, uint32_t value);
+	int (*ic_vsp_get)(struct ieee80211com *ic, uint32_t index, uint32_t *value);
+
+	/* Callbacks at AP to send commands to sta */
+	void (*ic_vsp_cb_strm_ctrl)(void *token, struct ieee80211_node *node, uint8_t strm_state,
+			struct ieee80211_qvsp_strm_id *strm_id, struct ieee80211_qvsp_strm_dis_attr *attr);
+	void (*ic_vsp_cb_cfg)(void *token, uint32_t index, uint32_t value);
+
+	/* Callback to stream throttler external to vsp module */
+	void (*ic_vsp_cb_strm_ext_throttler)(void *token, struct ieee80211_node *node,
+			uint8_t strm_state, const struct ieee80211_qvsp_strm_id *strm_id,
+			struct ieee80211_qvsp_strm_dis_attr *attr, uint32_t throt_intvl);
+
+	void (*ic_vsp_cb_logger)(void *token, uint32_t index, uint32_t value);
+	void (*ic_vsp_reset)(struct ieee80211com *ic);
+	void (*ic_enable_xmit)(struct ieee80211com *ic);
+	void (*ic_disable_xmit)(struct ieee80211com *ic);
+
+	struct {
+		uint8_t		set;
+		uint32_t	value;
+	} vsp_cfg[QVSP_CFG_MAX];
+
+	/* BA throttling for 3rd party client control */
+	uint32_t ic_vsp_ba_throt_num;
+#endif
+	struct channel_change_event	ic_dfs_cce;
+	struct channel_change_event	ic_aci_cci_cce;
+
+	/* association ID bitmap */
+	u_int32_t ic_aid_bitmap[howmany(QTN_NODE_TBL_SIZE_LHOST, 32)];
+
+#ifdef DOT11K_PM_INTERVAL
+	/* Number of pm intervals(interface with pm_interval module) */
+	u_int8_t ic_pm_intervals;
+#endif
+	/* Compatibility fix with other vendor chipset */
+	uint32_t ic_vendor_fix;
+	struct ip_mac_mapping *ic_ip_mac_mapping;
+
+	/* power management */
+	uint8_t ic_pm_enabled;
+	struct delayed_work pm_work;
+	int ic_pm_state[QTN_PM_IOCTL_MAX];
+	struct timer_list ic_pm_period_change;	/* CoC period change timer */
+
+#if defined(QBMPS_ENABLE)
+	struct bmps_tput_measure ic_bmps_tput_check;	/* for BMPS tput measurement */
+#endif
+	/* hold the calling task until the scan completes */
+	wait_queue_head_t	ic_scan_comp;
+
+	struct ieee80211_node *ic_node_idx_ni[QTN_NCIDX_MAX];
+
+	/* Soc mac addr of the STB*/
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+	u_int8_t soc_addr[IEEE80211_ADDR_LEN];
+	/* Soc IP addr of the STB*/
+	u_int32_t ic_soc_ipaddr;
+#endif
+	/* tpc query info */
+	struct ieee80211_tpc_query_info ic_tpc_query_info;
+	int8_t (*ic_get_local_txpow)(struct ieee80211com *ic);
+	int (*ic_get_local_link_margin)(struct ieee80211_node *ni, int8_t *result);
+	/* measurement request */
+	struct ieee80211_global_measure_info ic_measure_info;
+
+	int (*ic_get_shared_vap_stats)(struct ieee80211vap *vap);
+	int (*ic_reset_shared_vap_stats)(struct ieee80211vap *vap);
+	int (*ic_get_shared_node_stats)(struct ieee80211_node *ni);
+	int (*ic_reset_shared_node_stats)(struct ieee80211_node *ni);
+	void (*ic_get_dscp2ac_map)(const uint8_t vapid, uint8_t *dscp2ac);
+	void (*ic_set_dscp2ac_map)(const uint8_t vapid, uint8_t *ip_dscp, uint8_t listlen, uint8_t ac);
+	void (*ic_set_dscp2tid_map)(const uint8_t vapid, const uint8_t *dscp2tid);
+	void (*ic_get_dscp2tid_map)(const uint8_t vapid, uint8_t *dscp2tid);
+
+	struct timer_list	ic_ba_setup_detect;	/*timer for detecting whether it is suitable to enable/disable AMPDU*/
+
+	int (*ic_get_cca_adjusting_status)(void);
+
+	struct ieee80211_wowlan ic_wowlan; /* WOWLAN related information */
+	uint8_t ic_extender_role;		/* Extender role */
+	uint8_t ic_extender_mbs_wgt;		/* MBS RSSI weight */
+	uint8_t ic_extender_rbs_wgt;		/* RBS RSSI weight */
+	uint8_t ic_extender_mbs_best_rssi;	/* MBS best RSSI threshold */
+	uint8_t ic_extender_rbs_best_rssi;	/* MBS best RSSI threshold */
+	uint8_t ic_extender_verbose;	/* EXTENDER Debug Level */
+	uint8_t ic_extender_mbs_rssi_margin;	/* MBS RSSI margin, used in link down detection only */
+	uint8_t ic_extender_mbs_bssid[IEEE80211_ADDR_LEN];
+	uint8_t ic_extender_mbs_ocac;
+	uint8_t ic_extender_rbs_num;
+	uint8_t ic_extender_rbs_bssid[QTN_MAX_RBS_NUM][IEEE80211_ADDR_LEN];
+	unsigned long ic_extender_mbs_detected_jiffies;
+#define QTN_EXTENDER_RSSI_MAX_COUNT	10
+	uint8_t ic_extender_rssi_continue;		/* record continuous RSSI event times */
+	struct timer_list ic_extender_scan_timer;	/* timer used by RBS to search MBS */
+	uint32_t ic_scan_opchan_enable;
+	uint32_t ic_extender_bgscanintvl;
+	uint8_t  ic_extender_rbs_bw;		/* recorded bandwidth of RBS */
+	uint32_t ic_tqew_descr_limit;		/* tqew desc limit */
+
+	uint32_t ic_scan_tbl_len_max;
+
+	int ic_scan_results_check;
+	struct timer_list ic_scan_results_expire; /* scan results expire timer */
+	int hostap_wpa_state;
+
+	/* VHT related callbacks */
+	void (*ic_send_vht_grp_id_act)(struct ieee80211vap *vap, struct ieee80211_node *ni);
+	struct timer_list ic_obss_timer;
+	uint8_t ic_obss_scan_enable;
+	uint8_t ic_obss_scan_count;
+	struct ieee80211_obss_scan_ie ic_obss_ie;
+	void (*ic_coex_stats_update)(struct ieee80211com *ic, uint32_t value);
+	struct ieee80211_dm_factor ic_dm_factor;
+	uint32_t ic_vap_default_state;	/* 1 - enabled, 0 - disabled*/
+	int32_t ic_neighbor_count;	/* Neighbor APs' count */
+	uint8_t ic_neighbor_cnt_sparse; /* Threshold of neighbor AP count when it's sparse */
+	uint8_t ic_neighbor_cnt_dense;	/* Threshold of neighbor AP count when it's dense */
+
+	/* tx airtime callbacks */
+	uint32_t (*ic_tx_airtime)(const struct ieee80211_node *ni);
+	uint32_t (*ic_tx_accum_airtime)(const struct ieee80211_node *ni);
+	void     (*ic_tx_airtime_control)(struct ieee80211vap *vap, uint32_t value);
+	uint32_t (*ic_rx_airtime) (const struct ieee80211_node *ni);
+	uint32_t (*ic_rx_accum_airtime) (const struct ieee80211_node *ni);
+	/* mu group update callback */
+	void	(*ic_mu_group_update)(struct ieee80211com *ic, struct qtn_mu_group_update_args *state);
+	int (*ic_get_cca_stats)(struct net_device *dev, struct ieee80211com *ic,
+				struct qtn_exp_cca_stats *cs);
+
+	/* sync rx reorder window on receiving BAR
+	 * 0 - disabled
+	 * 1 - enabled for QTN devices only
+	 * 2 - enabled for all devices
+	 */
+	uint8_t ic_rx_bar_sync;
+	uint8_t bb_deafness_war_disable;
+	struct sta_dfs_info sta_dfs_info;
+#ifdef CONFIG_QHOP
+	struct rbs_mbs_dfs_info rbs_mbs_dfs_info;
+#endif
+	uint8_t ic_20_40_coex_enable;
+
+	void (*ic_update_ocac_state_ie)(struct ieee80211com *ic, uint8_t state, uint8_t param);
+	uint32_t ic_allow_11b;
+
+	/* Initial channel selection */
+	struct autochan_ranking_params ic_autochan_ranking_params;
+	struct ieee80211_chanset_table ic_autochan_table;
+	int ic_autochan_scan_type[CHAN_SELECT_SCAN_MAX];
+	int ic_autochan_last_scan_bw;
+	int ic_autochan_scan_flags;
+	int ic_autochan_dbg_level;
+	uint32_t ic_pm_reason;
+	uint32_t ic_coc_cc_reason;
+	uint32_t ic_coc_move_to_ndfs;
+	uint32_t ic_flags_11k;
+	uint32_t ic_flags_11v;
+
+#if defined(PLATFORM_QFDR)
+	/* flags to control rejection of authentication requests */
+#define QFDR_F_REJECT_AUTH	0x1
+#define QFDR_F_IGNORE_PROBE_REQ	0x2
+	uint8_t ic_reject_auth;
+#endif
+};
+
+static __inline__ uint32_t ieee80211_pm_period_tu(const struct ieee80211com *ic)
+{
+	return IEEE80211_MS_TO_TU(ic->ic_pm_state[QTN_PM_PDUTY_PERIOD_MS]);
+}
+
+struct vlan_group;
+struct eapolcom;
+struct ieee80211_aclator;
+
+struct ieee80211_nsparams {
+	enum ieee80211_state newstate;
+	int arg;
+	int result;
+};
+
+#define IW_MAX_SPY 8
+struct ieee80211_spy {
+        u_int8_t mac[IW_MAX_SPY * IEEE80211_ADDR_LEN];
+        u_int32_t ts_rssi[IW_MAX_SPY];   /* ts of rssi value from last read */
+        u_int8_t thr_low;	/* 1 byte rssi value, 0 = threshold is off */
+        u_int8_t thr_high;	/* 1 byte rssi value */
+        u_int8_t num;
+};
+
+#define MAX_PROC_IEEE80211_SIZE 16383
+#define PROC_IEEE80211_PERM 0644
+
+struct proc_ieee80211_priv {
+     int rlen;
+     int max_rlen;
+     char *rbuf;
+
+     int wlen;
+     int max_wlen;
+     char *wbuf;
+};
+
+struct ieee80211_proc_entry {
+	char *name;
+	struct file_operations *fileops;
+	struct proc_dir_entry *entry;
+	struct ieee80211_proc_entry *next;
+};
+
+struct ieee80211_app_ie_t {
+	u_int32_t		length;		/* buffer length */
+	struct ieee80211_ie    *ie;		/* buffer containing one or more IEs */
+};
+
+#define IEEE80211_PPQ_DEF_MAX_RETRY	1
+#define REPLACE_PPQ_ENTRY_HEAD(x, e) do {\
+	if ((x) == NULL) {\
+		(x) = (e);\
+	} else {\
+		(e)->next = (x);\
+		(x) = (e);\
+	}\
+} while (0)
+
+enum ppq_fail_reason {
+	PPQ_FAIL_TIMEOUT = 1,
+	PPQ_FAIL_NODELEAVE,
+	PPQ_FAIL_STOP,
+	PPQ_FAIL_MAX,
+};
+
+enum coex_bw_switch{
+	WLAN_COEX_STATS_BW_ACTION,
+	WLAN_COEX_STATS_BW_ASSOC,
+	WLAN_COEX_STATS_BW_SCAN,
+};
+struct ieee80211_pairing_pending_entry {
+	struct ieee80211_pairing_pending_entry *next;
+
+	struct sk_buff *skb;
+	struct ieee80211_node *ni;
+
+	/* response parameters you expect */
+	u_int8_t expected_category;
+	u_int8_t expected_action;
+	u_int8_t expected_token;
+
+	unsigned long expire;
+	unsigned long next_expire_jiffies;
+	u_int32_t max_retry;
+	u_int32_t retry_cnt;
+
+	ppq_callback_success fn_success;
+	ppq_callback_fail fn_fail;
+};
+
+struct ieee80211_pairing_pending_queue {
+	struct ieee80211_pairing_pending_entry *next;
+	spinlock_t lock;
+	struct timer_list timer;
+	unsigned long next_expire_jiffies;
+};
+
+void ieee80211_ppqueue_remove_with_response(struct ieee80211_pairing_pending_queue *queue,
+					struct ieee80211_node *ni,
+					u_int8_t category,
+					u_int8_t action,
+					u_int8_t token);
+void ieee80211_ppqueue_remove_node_leave(struct ieee80211_pairing_pending_queue *queue,
+				struct ieee80211_node *ni);
+void ieee80211_ppqueue_remove_with_cat_action(struct ieee80211_pairing_pending_queue *queue,
+				u_int8_t category,
+				u_int8_t action);
+void ieee80211_ppqueue_init(struct ieee80211vap *vap);
+void ieee80211_ppqueue_deinit(struct ieee80211vap *vap);
+struct sk_buff *ieee80211_ppqueue_pre_tx(struct ieee80211_node *ni,
+				struct sk_buff *skb,
+				u_int8_t category,
+				u_int8_t action,
+				u_int8_t token,
+				unsigned long expire,
+				ppq_callback_success fn_success,
+				ppq_callback_fail fn_fail);
+
+typedef struct _ieee80211_11k_sub_element {
+	SLIST_ENTRY(_ieee80211_11k_sub_element) next;
+	uint8_t sub_id;
+	uint8_t data[0]; /* append differenet sub element data*/
+} ieee80211_11k_sub_element ;
+
+typedef SLIST_HEAD(,_ieee80211_11k_sub_element) ieee80211_11k_sub_element_head;
+
+struct tdls_peer_ps_info {
+	LIST_ENTRY(tdls_peer_ps_info) peer_hash;
+	uint8_t peer_addr[IEEE80211_ADDR_LEN];
+	uint32_t tdls_path_down_cnt;		/* Teardown counter of this TDLS link */
+	uint32_t tdls_link_disabled_ints;	/* Intervals that disable TDLS link */
+};
+
+struct ieee80211_extender_wds_info {
+	LIST_ENTRY(ieee80211_extender_wds_info) peer_wds_hash;
+	uint8_t peer_addr[IEEE80211_ADDR_LEN];
+	struct ieee80211_qtn_ext_role extender_ie;
+};
+/**
+ * Interworking Information
+ */
+struct interworking_info {
+	uint8_t an_type;                        /* access network type */
+	uint8_t hessid[IEEE80211_ADDR_LEN];     /* homogeneous essid */
+};
+
+/*
+ * Station profile specific to dual band mode.
+ * Each band will maintain profile to be referred while band change.
+ * Station profile is initialized while bootup from user configurations.
+ */
+struct ieee80211_sta_profile {
+        int phy_mode;
+        int vht;
+        int bw;
+        int vsp;
+        int scs;
+        int pmf;
+};
+
+struct bcast_pps_info {
+	u_int16_t	max_bcast_pps;		/* Max broadcast packets allowed per second */
+	u_int16_t	rx_bcast_counter;	/* Counter to record no. of broadcast packets processed in wireless ingress path */
+	unsigned long	rx_bcast_pps_start_time;/* Timestamp in jiffies referred to, to reset the rx_bcast_counter */
+	u_int16_t	tx_bcast_counter;	/* Counter to record no. of broadcast packets processed in EMAC/PCIe ingress path */
+	unsigned long	tx_bcast_pps_start_time;/* Timestamp in jiffies referred to, to reset the tx_bcast_counter */
+};
+
+#define IEEE80211_RX_AMSDU_THRESHOLD_CCA	500
+#define IEEE80211_RX_AMSDU_THRESHOLD_PMBL	1000
+#define IEEE80211_RX_AMSDU_PMBL_WF_SP		10
+#define IEEE80211_RX_AMSDU_PMBL_WF_LP		100
+
+struct ieee80211vap {
+	struct net_device *iv_dev;		/* associated device */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	struct rtnl_link_stats64 iv_devstats;		/* interface 64bits statistics */
+#else
+	struct net_device_stats	iv_devstats;	/* interface statistics */
+#endif
+	struct ifmedia iv_media;			/* interface media config */
+	struct iw_statistics iv_iwstats;		/* wireless statistics block */
+	struct ctl_table_header	*iv_sysctl_header;
+	struct ctl_table *iv_sysctls;
+	struct proc_dir_entry *iv_proc;
+	struct ieee80211_proc_entry *iv_proc_entries;
+	struct vlan_group *iv_vlgrp;		/* vlan group state */
+        struct ieee80211_sta_profile    iv_2_4ghz_prof;	/* 2.4ghz station profile */
+        struct ieee80211_sta_profile    iv_5ghz_prof;	/* 5ghz station profile */
+	enum ieee80211_phy_band        iv_pref_band;   /* preferred band in dual band mode */
+
+	TAILQ_ENTRY(ieee80211vap) iv_next;	/* list of vap instances */
+	u_int iv_unit;				/* virtual AP unit */
+	struct ieee80211com *iv_ic;		/* back ptr to common state */
+	u_int32_t iv_debug;			/* debug msg flags */
+
+#define QTN_RX_AMSDU_DISABLE	0
+#define QTN_RX_AMSDU_ENABLE	1
+#define QTN_RX_AMSDU_DYNAMIC	2
+	u_int32_t iv_rx_amsdu_enable;		/* RX AMSDU mode: 0-disable, 1-enable, 2-dynamic */
+	u_int32_t iv_rx_amsdu_threshold_cca;	/* the threshold of cca intf for dynamic RX AMSDU */
+	u_int32_t iv_rx_amsdu_threshold_pmbl;	/* the threshold of preamble error for dynamic RX AMSDU */
+	u_int32_t iv_rx_amsdu_pmbl_wf_lp;	/* the weight factor of long preamble for calculating pmbl error */
+	u_int32_t iv_rx_amsdu_pmbl_wf_sp;	/* the weight factor of short preamble for calculating pmbl error */
+
+	struct ieee80211_stats iv_stats;	/* statistics */
+
+	uint32_t tdls_discovery_interval;
+	uint32_t tdls_node_life_cycle;
+	uint8_t tdls_uapsd_indicat_wnd;		/* dot11TDLSPeerUAPSDIndicationWindow */
+	uint8_t tdls_path_sel_weight;			/* Weight of path selection algorithm */
+	struct timer_list tdls_rate_detect_timer;	/* TDLS rate detection timer */
+	struct delayed_work tdls_rate_detect_work;	/* TDLS rate detetion work */
+	struct delayed_work tdls_link_switch_work;	/* TDLS link switch queue work */
+	struct timer_list tdls_node_expire_timer;	/* TDLS node expire timer */
+	ATH_LIST_HEAD(, tdls_peer_ps_info) tdls_ps_hash[IEEE80211_NODE_HASHSIZE];
+	spinlock_t tdls_ps_lock;
+	uint8_t tdls_path_sel_prohibited;
+	uint32_t tdls_over_qhop_en;
+	uint32_t tdls_timeout_time;
+	uint32_t tdls_training_pkt_cnt;		/* TDLS training packet count */
+	uint32_t tdls_path_sel_pps_thrshld;	/* TDLS path select packet per second threshold */
+	uint32_t tdls_path_sel_rate_thrshld;	/* TDLS path select minium rate threshold */
+	uint32_t tdls_verbose;			/* TDLS debug info level */
+	int32_t tdls_min_valid_rssi;		/* The mininum RSSI value to allow to setup TDLS link*/
+	int32_t tdls_switch_ints;		/* The path switch intervals */
+	uint32_t tdls_phy_rate_wgt;		/* The weight of accumulated phy rate */
+	struct timer_list tdls_disassoc_timer;	/* TDLS disassoication timer */
+	enum ieee80211_state tdls_pending_state;	/* VAP pending state */
+	uint8_t tdls_fixed_off_chan;		/* TDLS fixed off channel */
+	uint8_t tdls_fixed_off_chan_bw;		/* TDLS fixed off channel bandwidth */
+	int tdls_pending_arg;			/* VAP pending argument */
+	uint8_t tdls_target_chan;		/* TDLS target off channel */
+	uint8_t tdls_off_chan_bw;		/* TDLS off channel bandwidth */
+	uint32_t tdls_cs_time;			/* TDLS channel switch time */
+	uint32_t tdls_cs_timeout;		/* TDLS channel switch timeout */
+	uint32_t tdls_cs_duration;		/* TDLS off channel duration in us */
+	uint8_t tdls_chan_switching;		/* TDLS channel switch in progress */
+	uint8_t tdls_cs_disassoc_pending;	/* Disassociation pending to wait channel switch finish */
+	struct ieee80211_node *tdls_cs_node;	/* The peer node channel switch is in progress */
+
+	int iv_monitor_nods_only;		/* in monitor mode only nods traffic */
+	int iv_monitor_txf_len;			/* in monitor mode, truncate tx packets */
+	int iv_monitor_phy_errors;		/* in monitor mode, accept phy errors */
+	int iv_monitor_crc_errors;		/* in monitor mode, accept crc errors */
+
+	int (*iv_newstate)(struct ieee80211vap *, enum ieee80211_state, int);
+	u_int8_t iv_myaddr[IEEE80211_ADDR_LEN];
+	u_int8_t iv_ds_pc_addr[IEEE80211_ADDR_LEN]; /* In STA mode, sta is connected to this PC */
+	u_int32_t iv_flags;			/* state flags */
+	u_int32_t iv_flags_ext;			/* extension of state flags */
+	u_int32_t iv_caps;			/* capabilities */
+	u_int8_t iv_ath_cap;			/* Atheros adv. capabilities */
+	enum ieee80211_opmode iv_opmode;	/* operation mode */
+	enum ieee80211_state iv_state;		/* state machine state */
+	struct timer_list iv_mgtsend;		/* mgmt frame response timer */
+	struct ieee80211_node *iv_mgmt_retry_ni;	/* mgmt frame retry parameter - ni */
+	int iv_mgmt_retry_type;		/* mgmt frame retry parameter - type */
+	int iv_mgmt_retry_arg;			/* mgmt frame retry parameter - arg */
+#define IEEE80211_MAX_MGMT_RETRY		3
+	u_int32_t iv_mgmt_retry_cnt;		/* mgmt frame retry count */
+
+						/* inactivity timer settings */
+	int iv_inact_init;			/* setting for new station */
+	int iv_inact_auth;			/* auth but not assoc setting */
+	int iv_inact_run;			/* authorized setting */
+	int iv_inact_probe;			/* inactive probe time */
+
+	int iv_des_nssid;			/* # desired ssids */
+	struct ieee80211_scan_ssid iv_des_ssid[1];/* desired ssid table */
+	u_int8_t iv_des_bssid[IEEE80211_ADDR_LEN];
+	int iv_nicknamelen;			/* XXX junk */
+	u_int8_t	 iv_nickname[IEEE80211_NWID_LEN];
+	u_int iv_bgscanidle;			/* bg scan idle threshold */
+	u_int iv_bgscanintvl;			/* bg scan min interval */
+	u_int iv_scanvalid;			/* scan cache valid threshold */
+	struct ieee80211_roam iv_roam;		/* sta-mode roaming state */
+
+	u_int16_t iv_max_aid;
+	u_int16_t iv_sta_assoc;			/* stations associated */
+	u_int16_t iv_non_qtn_sta_assoc;		/* non-qtn stations associated */
+	u_int16_t iv_sta_assoc_limit;	/* max associated stations number */
+	u_int16_t iv_ps_sta;			/* stations in power save */
+	u_int16_t iv_ps_pending;		/* ps sta's w/ pending frames */
+	u_int16_t iv_ap_buffered;		/* data buffered on AP */
+	u_int8_t *iv_tim_bitmap;		/* power-save stations w/ data*/
+	u_int16_t iv_tim_len;			/* ic_tim_bitmap size (bytes) */
+	u_int8_t iv_dtim_period;		/* DTIM period */
+	u_int8_t iv_dtim_count;			/* DTIM count from last bcn */
+						/* set/unset aid pwrsav state */
+	void (*iv_set_tim)(struct ieee80211_node *, int);
+	u_int8_t iv_uapsdinfo;			/* sta mode QoS Info flags */
+	struct ieee80211_node *iv_bss;		/* information for this node */
+	int   iv_fixed_rate;  /* 802.11 rate or -1 */
+	u_int32_t iv_rtsthreshold;
+	u_int16_t iv_fragthreshold;
+	u_int16_t iv_txmin;			/* min tx retry count */
+	u_int16_t iv_txmax;			/* max tx retry count */
+	u_int16_t iv_txlifetime;		/* tx lifetime */
+	int iv_inact_timer;			/* inactivity timer wait */
+	void *iv_opt_ie;			/* user-specified IE's */
+	u_int16_t iv_opt_ie_len;		/* length of ni_opt_ie */
+	u_int8_t iv_def_txkey;			/* default/group tx key index */
+	struct ieee80211_key iv_nw_keys[IEEE80211_WEP_NKID];
+	int (*iv_key_alloc)(struct ieee80211vap *, const struct ieee80211_key *);
+	int (*iv_key_delete)(struct ieee80211vap *, const struct ieee80211_key *,
+		const u_int8_t mac[IEEE80211_ADDR_LEN]);
+	int (*iv_key_set)(struct ieee80211vap *, const struct ieee80211_key *,
+		const u_int8_t mac[IEEE80211_ADDR_LEN]);
+	void (*iv_key_update_begin)(struct ieee80211vap *);
+	void (*iv_key_update_end)(struct ieee80211vap *);
+
+	const struct ieee80211_authenticator *iv_auth;/* authenticator glue */
+	void *iv_ec;				/* private auth state */
+	struct ieee80211vap *iv_xrvap;		/* pointer to XR VAP , if XR is enabled */
+	u_int16_t iv_xrbcnwait;			/* SWBA count incremented until it reaches XR_BECON_FACTOR */
+	struct timer_list iv_xrvapstart;	/* timer to start xr */
+	u_int8_t iv_chanchange_count; 		/* 11h counter for channel change */
+	int iv_mcast_rate; 			/* Multicast rate (Kbps) */
+
+	const struct ieee80211_aclator *iv_acl;	/* aclator glue */
+	void *iv_as;				/* private aclator state */
+
+	struct ieee80211_beacon_param_t *param;		/* beacon ie arguments list */
+	struct timer_list iv_swbmiss;		/* software beacon miss timer */
+	u_int16_t iv_swbmiss_period;		/* software beacon miss timer period */
+	struct timer_list iv_swberp;		/* software obss erp protection check timer */
+	u_int16_t iv_swberp_period;             /* software obss erp protection check period */
+#define IEEE80211_SWBMISS_WARNINGS 10	/* # of warnings before taking action on swbmiss */
+	u_int16_t iv_swbmiss_warnings;
+	int		  iv_bcn_miss_thr;			/* Beacon miss threshold */
+	u_int8_t  iv_link_loss_enabled;		/* Link loss - Controlled by user. By default is on */
+#if defined(QBMPS_ENABLE)
+	u_int8_t  iv_swbmiss_bmps_warning;	/* swbmiss warning for STA power-saving */
+	int8_t  iv_bmps_tput_high;		/* tput indication for STA power-saving */
+#endif
+
+	u_int8_t  iv_qtn_ap_cap;			    /* Quantenna flags from bcn/probe resp (station only) */
+	u_int8_t  iv_qtn_flags;					/* Quantenna capability flags */
+	u_int8_t  iv_is_qtn_dev;		/* 1 - is QTN dev, 0 - is not QTN dev */
+#define IEEE80211_QTN_AP					0x01
+#define IEEE80211_QTN_BRIDGEMODE_DISABLED	0x02
+	u_int8_t  iv_qtn_options;
+#define IEEE80211_QTN_NO_SSID_ASSOC_DISABLED	0x01
+
+	struct ieee80211_nsparams iv_nsparams;	/* new state parameters for tasklet for stajoin1 */
+	struct IEEE80211_TQ_STRUCT iv_stajoin1tq; /* tasklet for newstate action called from stajoin1tq */
+	uint16_t iv_vapnode_idx;		/* node_idx to use for tx of non specifically targetted frames */
+	uint8_t iv_ssid_group;			/* Logical group assigned for SSID, used for BSS association limits */
+	struct timer_list iv_sta_fast_rejoin;
+	uint8_t iv_sta_fast_rejoin_bssid[IEEE80211_ADDR_LEN];
+	uint8_t wds_mac[IEEE80211_ADDR_LEN];
+	struct ieee80211_key iv_wds_peer_key;
+
+	uint16_t		iv_extdr_flags;
+
+	struct ieee80211_spy iv_spy;            /* IWSPY support */
+	unsigned int		iv_nsdone;	/* Done with scheduled newstate tasklet */
+	struct ieee80211_app_ie_t app_ie[IEEE80211_APPIE_NUM_OF_FRAME];
+	u_int32_t		app_filter;
+	enum ieee80211_11n_htmode iv_htmode;	/* state machine state */
+	int32_t			iv_mcs_config;	/* MCS configuration for 11N and 11AC or -1 for autorate */
+#define IEEE80211_MCS_AUTO_RATE_ENABLE		-1
+
+	u_int32_t		iv_ht_flags;	/* HT mode mandatory flags */
+	u_int32_t		iv_vht_flags;	/* VHT mode htcap flags */
+	u_int8_t		iv_dsss_40MHz_ok;	/* is dsss/cck OKAY in 40MHz? */
+	u_int8_t		iv_non_gf_sta_present;	/* is non GF STA present? (always 1 for 88K) */
+	u_int8_t		iv_ht_anomaly_40MHz_present;	/* atleast one 20 MHz STA in 20/40 MHz BSS found */
+	u_int8_t		iv_ht_mixedmode_present; /* HT Non-HT mixed mode is desired */
+	u_int8_t		iv_dual_cts_required;	/* HT dual CTS protection is required */
+	u_int8_t		iv_lsig_txop_ok;	/* is lsig in TXOP ok */
+	u_int8_t		iv_stbc_beacon;		/* is current beacon a stbc beacon */
+	u_int16_t		iv_smps_force;		/* The overridden value for SMPS for the STA. */
+	u_int8_t		iv_implicit_ba;		/* Implicit block ack flags for the VAP. */
+	u_int16_t		iv_ba_control;		/* Block ack control - zero indicates accept no BAs,
+							   bit in position 'n' indicates accept and send BA for the given TID */
+	u_int16_t		iv_ba_old_control;	/* Old block ack control */
+	unsigned long		iv_blacklist_timeout;	/* MAC Filtering */
+	u_int16_t		iv_max_ba_win_size;	/* Maximum window size allowable */
+	u_int32_t		iv_rate_training_count; /* Rate training to new STAs - number of bursts */
+	u_int32_t		iv_rate_training_burst_count; /* Rate training to new STAs - packets per burst */
+	u_int8_t		iv_mc_legacy_rate;	/* Multicast legacy rates */
+	u_int8_t		iv_forward_unknown_mc;	/* Forward packets even if we have no bridge entry for them */
+	u_int8_t		iv_mc_to_uc;		/* Forward mcast/bcast as unicast */
+#define	IEEE80211_QTN_MC_TO_UC_LEGACY	0
+#define	IEEE80211_QTN_MC_TO_UC_NEVER	1
+#define	IEEE80211_QTN_MC_TO_UC_ALWAYS	2		/* For WFA testing only */
+	u_int8_t		iv_reliable_bcst;
+	u_int8_t		iv_ap_fwd_lncb;
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+	struct ieee80211_pairing_pending_queue	iv_ppqueue;		/* pairing pending queue */
+#endif
+	struct ieee80211_app_ie_t	qtn_pairing_ie;
+	u_int32_t iv_disconn_cnt;		/* count of disconnection event */
+	u_int32_t iv_disconn_seq;		/* sequence to query disconnection count */
+
+	struct timer_list	iv_test_traffic;	/* timer to start xr */
+	u_int32_t		iv_test_traffic_period;		/* Interval of periodically sending NULL packet to all associated STAs. 0 means disable */
+	uint32_t iv_11ac_enabled;	/* Enable/disable 11AC feature on Topaz */
+	uint8_t	iv_pri;			/* vap priority, used to calculate priority for per node per tid queue */
+	uint8_t iv_pmf;                 /* VAP PMF/802.11w capability options */
+	u_int8_t		iv_local_max_txpow;	/* local max transmit power, equal to regulatory max power minus power constraint */
+	u_int16_t iv_disassoc_reason;
+	struct ieee80211_wme_state iv_wme;	/* Per-VAP WME/WMM state for AP mode */
+	spinlock_t iv_extender_wds_lock;
+	ATH_LIST_HEAD(, ieee80211_extender_wds_info) iv_extender_wds_hash[IEEE80211_NODE_HASHSIZE];
+
+	uint8_t                 iv_tx_amsdu;            /* Enable/disable A-MSDU  */
+	uint8_t			iv_tx_amsdu_11n;	/* Enable/disable A-MSDU for 11n nodes */
+	uint8_t			iv_tx_max_amsdu;	/* Max TX A-MSDU */
+
+	/* 802.11u related */
+	uint8_t			interworking;           /* 1 - Enabled, 0 - Disabled */
+	struct interworking_info interw_info;		/* Interworking information */
+
+	uint8_t			hs20_enable;		/* Enable/Disable HS2.0 */
+	uint8_t			disable_dgaf;		/* Disable Downstream Group-Addressed Forwarding - used by HS2.0 */
+	uint8_t			proxy_arp;		/* 1 - Enabled,  0- Disabled */
+	uint8_t			iv_coex;
+	uint8_t			allow_tkip_for_vht;	/* 1 - TKIP is allowed, 0 - TKIP is not allowed */
+	uint8_t			is_block_all_assoc;	/* 1 - block, 0 - unblock */
+	uint8_t			tx_ba_disable;		/* 1 - TXBA disable, 0 - TXBA permitted */
+	uint8_t			rx_ba_decline;		/* 1 - RXBA decline, 0 - RXBA permitted */
+	uint8_t			iv_vap_state;		/* 1 - enabled, 0 - disabled */
+	uint8_t			iv_osen;		/* 1/0 - OSEN enabled/disabled */
+
+	uint8_t			sample_sta_count;
+	spinlock_t		sample_sta_lock;
+	struct list_head	sample_sta_list;
+	struct bcast_pps_info	bcast_pps;
+	uint8_t			iv_11ac_and_11n_flag;	/* 1 - IEEE80211_AC_ONLY, 2 - IEEE80211_N_ONLY */
+#define IEEE80211_FEXT_SYNC_CONFIG	0x00000001
+	uint32_t		iv_flags_ext2;
+	uint16_t		iv_mdid;		/* Mobility domain id */
+	uint8_t			iv_ft_over_ds;		/* ft over ds */
+	uint8_t			enable_iot_sts_war;
+#if defined(PLATFORM_QFDR)
+	uint16_t		iv_scan_only_freq;	/* WAR: trigger several following scans */
+#define QFDR_SCAN_ONLY_FREQ_ATTEMPTS	3
+	uint16_t		iv_scan_only_cnt;	/*      only for specific frequency   */
+#endif
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+	uint8_t			bsa_status;		/* bsa flag 1 - BSA is enabled, 0 - BSA is disabled */
+	ATH_LIST_HEAD(, bsa_deny_sta) deny_sta_list;
+	uint8_t			deny_sta_list_inited;
+#endif
+};
+MALLOC_DECLARE(M_80211_VAP);
+
+#define IEEE80211_BAND_IDX_MAX	7
+struct ieee80211_band_info {
+	uint8_t		band_chan_step;			/* step to next channel */
+	uint8_t		band_first_chan;		/* first channel in the band */
+	int16_t		band_chan_cnt;			/* channels in the band */
+};
+
+/*
+ * Note: A node table lock must be acquired or IRQ disabled to maintain atomic
+ * when calling this function, and must not be released until a node ref is taken
+ * or the returned pointer is discarded.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static inline struct ieee80211_node *ieee80211_get_wds_peer_node_noref(struct ieee80211vap *iv)
+{
+	return NULL;
+}
+#else
+static inline struct ieee80211_node *ieee80211_get_wds_peer_node_noref(struct ieee80211vap *iv)
+{
+	struct ieee80211com *ic = iv->iv_ic;
+
+	if (IEEE80211_NODE_IDX_VALID(iv->iv_vapnode_idx)) {
+		return ic->ic_node_idx_ni[IEEE80211_NODE_IDX_UNMAP(iv->iv_vapnode_idx)];
+	}
+	return NULL;
+}
+#endif
+
+static inline struct ieee80211_node *ieee80211_get_wds_peer_node_ref(struct ieee80211vap *iv)
+{
+	struct ieee80211com *ic = iv->iv_ic;
+	struct ieee80211_node *ni = NULL;
+
+	IEEE80211_NODE_LOCK_IRQ(&ic->ic_sta);
+	ni = ieee80211_get_wds_peer_node_noref(iv);
+	if (ni)
+		ieee80211_ref_node(ni);
+	IEEE80211_NODE_UNLOCK_IRQ(&ic->ic_sta);
+
+	return ni;
+}
+
+#define	IEEE80211_ADDR_NULL(a1)		(memcmp(a1, "\x00\x00\x00\x00\x00\x00", \
+						IEEE80211_ADDR_LEN) == 0)
+#define	IEEE80211_ADDR_BCAST(a1)	(memcmp(a1, "\xff\xff\xff\xff\xff\xff", \
+						IEEE80211_ADDR_LEN) == 0)
+#define	IEEE80211_ADDR_EQ(a1, a2)	(memcmp(a1, a2, IEEE80211_ADDR_LEN) == 0)
+#define	IEEE80211_ADDR_COPY(dst, src)	memcpy(dst, src, IEEE80211_ADDR_LEN)
+#define	IEEE80211_ADDR_SET_NULL(dst)	memset(dst, 0, IEEE80211_ADDR_LEN)
+
+/* ic_flags */
+#define	IEEE80211_F_FF		0x00000001	/* CONF: ATH FF enabled */
+#define	IEEE80211_F_TURBOP	0x00000002	/* CONF: ATH Turbo enabled*/
+#define	IEEE80211_F_PROMISC	0x00000004	/* STATUS: promiscuous mode */
+#define	IEEE80211_F_ALLMULTI	0x00000008	/* STATUS: all multicast mode */
+/* NB: this is intentionally setup to be IEEE80211_CAPINFO_PRIVACY */
+#define	IEEE80211_F_PRIVACY	0x00000010	/* CONF: privacy enabled */
+#define	IEEE80211_F_PUREG	0x00000020	/* CONF: 11g w/o 11b sta's */
+#define	IEEE80211_F_XRUPDATE	0x00000040	/* CONF: update beacon XR element*/
+#define	IEEE80211_F_SCAN	0x00000080	/* STATUS: scanning */
+#define	IEEE80211_F_CCA		0x00000100	/* CONF: beacon cca info */
+#define	IEEE80211_F_SIBSS	0x00000200	/* STATUS: start IBSS */
+/* NB: this is intentionally setup to be IEEE80211_CAPINFO_SHORT_SLOTTIME */
+#define	IEEE80211_F_SHSLOT	0x00000400	/* STATUS: use short slot time*/
+#define	IEEE80211_F_PMGTON	0x00000800	/* CONF: Power mgmt enable */
+#define	IEEE80211_F_DESBSSID	0x00001000	/* CONF: des_bssid is set */
+#define	IEEE80211_F_WME		0x00002000	/* CONF: enable WME use */
+#define	IEEE80211_F_BGSCAN	0x00004000	/* CONF: bg scan enabled */
+#define	IEEE80211_F_SWRETRY	0x00008000	/* CONF: sw tx retry enabled */
+#define IEEE80211_F_TXPOW_FIXED	0x00010000	/* TX Power: fixed rate */
+#define	IEEE80211_F_IBSSON	0x00020000	/* CONF: IBSS creation enable */
+#define	IEEE80211_F_SHPREAMBLE	0x00040000	/* STATUS: use short preamble */
+#define	IEEE80211_F_DATAPAD	0x00080000	/* CONF: do alignment pad */
+#define	IEEE80211_F_USEPROT	0x00100000	/* STATUS: protection enabled */
+#define	IEEE80211_F_USEBARKER	0x00200000	/* STATUS: use barker preamble*/
+#define	IEEE80211_F_TIMUPDATE	0x00400000	/* STATUS: update beacon tim */
+#define	IEEE80211_F_WPA1	0x00800000	/* CONF: WPA enabled */
+#define	IEEE80211_F_WPA2	0x01000000	/* CONF: WPA2 enabled */
+#define	IEEE80211_F_WPA		0x01800000	/* CONF: WPA/WPA2 enabled */
+#define	IEEE80211_F_DROPUNENC	0x02000000	/* CONF: drop unencrypted */
+#define	IEEE80211_F_COUNTERM	0x04000000	/* CONF: TKIP countermeasures */
+#define	IEEE80211_F_HIDESSID	0x08000000	/* CONF: hide SSID in beacon */
+#define IEEE80211_F_NOBRIDGE    0x10000000	/* CONF: disable internal bridge */
+
+#define	IEEE80211_F_WMEUPDATE	0x20000000	/* STATUS: update beacon wme */
+#define IEEE80211_F_DOTH	0x40000000	/* enable 11.h */
+#define IEEE80211_F_CHANSWITCH	0x80000000	/* force chanswitch */
+
+/* ic_flags_ext */
+#define	IEEE80211_FEXT_WDS	0x00000001	/* CONF: 4 addr allowed */
+#define IEEE80211_FEXT_COUNTRYIE 0x00000002	/* CONF: enable country IE */
+#define IEEE80211_FEXT_SCAN_PENDING 0x00000004	/* STATE: scan pending */
+#define	IEEE80211_FEXT_BGSCAN	0x00000008	/* STATE: enable full bgscan completion */
+#define IEEE80211_FEXT_UAPSD	0x00000010	/* CONF: enable U-APSD */
+#define IEEE80211_FEXT_SLEEP	0x00000020	/* STATUS: sleeping */
+#define IEEE80211_FEXT_EOSPDROP	0x00000040	/* drop uapsd EOSP frames for test */
+#define	IEEE80211_FEXT_MARKDFS	0x00000080	/* Enable marking of dfs interference */
+#define IEEE80211_FEXT_REGCLASS	0x00000100	/* CONF: send regclassids in country ie */
+#define IEEE80211_FEXT_ERPUPDATE 0x00000200	/* STATUS: update ERP element */
+#define IEEE80211_FEXT_SWBMISS 0x00000400	/* CONF: use software beacon timer */
+#define IEEE80211_FEXT_DROPUNENC_EAPOL 0x00000800      /* CONF: drop unencrypted eapol frames */
+#define IEEE80211_FEXT_APPIE_UPDATE 0x00001000	/* STATE: beacon APP IE updated */
+#define IEEE80211_FEXT_11N_PROTECT	0x00002000	/* Enable 11n protection */
+#define IEEE80211_FEXT_AMPDU		0x00004000	/* CONF: A-MPDU supported */
+#define IEEE80211_FEXT_AMSDU		0x00008000	/* CONF: A-MSDU supported */
+#define IEEE80211_FEXT_USEHT20		0x00010000	/* use HT20 channel in 20/40 mode*/
+#define	IEEE80211_FEXT_PURE11N		0x00020000	/* CONF: 11n w/o non ht sta's */
+#define	IEEE80211_FEXT_REPEATER		0x00040000	/* CONF: Work as a repeater */
+#define IEEE80211_FEXT_SCAN_20		0x00080000	/* Temporarily use 20MHz channel when changing channel */
+#define IEEE80211_FEXT_DFS_FAST_SWITCH	0x00100000	/* on detection of radar, select a non-DFS channel and switch immediately */
+#define IEEE80211_FEXT_SCAN_NO_DFS	0x00200000	/* on detection of radar, only scan non-DFS channels */
+#define IEEE80211_FEXT_SCAN_FAST_REASS	0x00400000	/* Fast reassociation after power up
+							   (remember the previous channel) */
+#define IEEE80211_FEXT_TPC		0x00800000	/* TPC feature enable or disable bit */
+#define IEEE80211_FEXT_TDLS_PROHIB	0x01000000	/* STATION prohibit TDLS function */
+#define IEEE80211_FEXT_TDLS_CS_PROHIB	0x02000000	/* TDLS channel switch is prohibited */
+#define IEEE80211_FEXT_TDLS_CS_PASSIVE	0x04000000	/* Passive TDLS channel switch */
+#define	IEEE80211_FEXT_AP_TDLS_PROHIB	0x08000000	/* AP prohibit TDLS function */
+#define IEEE80211_FEXT_SPECIFIC_SCAN	0x10000000	/* Just perform specific SSID scan */
+#define IEEE80211_FEXT_SCAN_40		0x20000000	/* Temporarily use 40MHz channel when changing channel */
+#define IEEE80211_FEXT_24GVHT		0x40000000	/* VHT support(256-QAM) on 2.4G band  */
+#define IEEE80211_FEXT_BG_PROTECT	0x80000000	/* 802.11bg protect */
+
+#define IEEE80211_FEXT_TDLS_DISABLED	(IEEE80211_FEXT_AP_TDLS_PROHIB | IEEE80211_FEXT_TDLS_PROHIB)
+
+#define IEEE80211_11K_NEIGHREPORT	0x00000001	/* Neighbor Report - 802.11k */
+
+#define IEEE80211_COM_NEIGHREPORT_ENABLE(_ic)	((_ic)->ic_flags_11k |= IEEE80211_11K_NEIGHREPORT)
+#define IEEE80211_COM_NEIGHREPORT_DISABLE(_ic)	((_ic)->ic_flags_11k &= ~IEEE80211_11K_NEIGHREPORT)
+#define IEEE80211_COM_NEIGHREPORT_ENABLED(_ic)	((_ic)->ic_flags_11k & IEEE80211_11K_NEIGHREPORT)
+
+/* ic 11v flags */
+#define IEEE80211_11V_BTM		0x00000001	/* BSS transition management - 802.11v - WNM */
+
+#define IEEE80211_COM_BTM_ENABLE(_ic)		((_ic)->ic_flags_11v |= IEEE80211_11V_BTM)
+#define IEEE80211_COM_BTM_DISABLE(_ic)		((_ic)->ic_flags_11v &= ~IEEE80211_11V_BTM)
+#define IEEE80211_COM_BTM_ENABLED(_ic)		((_ic)->ic_flags_11v & IEEE80211_11V_BTM)
+
+/* ic_flags_qtn */
+/* ic_flags_qtn */
+#define IEEE80211_QTN_BCM_WAR		0x00000001	/* Workaround: rx odd length last aggregate */
+#define IEEE80211_QTN_RADAR_SCAN_START	0x00000002	/* Radar: Start scan after non-occupancy timer expiry */
+#define IEEE80211_QTN_PRINT_CH_INUSE	0x00000004	/* Enable printing of channels in Use. */
+#define IEEE80211_QTN_BGSCAN		0x00000008	/* Quantenna background scanning */
+#define IEEE80211_QTN_MONITOR		0x00000010	/* Quantenna sniffer mode */
+#define IEEE80211_QTN_BMPS		0x00000020	/* Quantenna STA BMPS (power-saving) mode */
+#define IEEE80211_QTN_SAMP_CHAN		0x00000040	/* Quantenna SCS sample channel */
+
+static inline int
+ieee80211_is_repeater(struct ieee80211com *ic)
+{
+	if (!(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER))
+		return 0;
+
+	return 1;
+}
+
+static inline int
+ieee80211_is_repeater_associated(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+
+	if (!ieee80211_is_repeater(ic))
+		return 0;
+
+	/* 1st VAP is for STA interface */
+	vap = TAILQ_FIRST(&ic->ic_vaps);
+	if (vap && vap->iv_state == IEEE80211_S_RUN)
+		return 1;
+
+	return 0;
+}
+
+#define IEEE80211_COM_UAPSD_ENABLE(_ic)		((_ic)->ic_flags_ext |= IEEE80211_FEXT_UAPSD)
+#define IEEE80211_COM_UAPSD_DISABLE(_ic)	((_ic)->ic_flags_ext &= ~IEEE80211_FEXT_UAPSD)
+#define IEEE80211_COM_UAPSD_ENABLED(_ic)	((_ic)->ic_flags_ext & IEEE80211_FEXT_UAPSD)
+#define IEEE80211_COM_GOTOSLEEP(_ic)		((_ic)->ic_flags_ext |= IEEE80211_FEXT_GOTOSLEEP)
+#define IEEE80211_COM_WAKEUP(_ic)		((_ic)->ic_flags_ext &= ~IEEE80211_FEXT_SLEEP)
+#define IEEE80211_COM_IS_SLEEPING(_ic)		((_ic)->ic_flags_ext & IEEE80211_FEXT_SLEEP)
+
+#define IEEE80211_COM_WDS_IS_NONE(_ic)		((_ic)->ic_extender_role == IEEE80211_EXTENDER_ROLE_NONE)
+#define IEEE80211_COM_WDS_IS_RBS(_ic)		((_ic)->ic_extender_role == IEEE80211_EXTENDER_ROLE_RBS)
+#define IEEE80211_COM_WDS_IS_MBS(_ic)		((_ic)->ic_extender_role == IEEE80211_EXTENDER_ROLE_MBS)
+
+#define IEEE80211_BG_PROTECT_ENABLED(_ic)	((_ic)->ic_flags_ext & IEEE80211_FEXT_BG_PROTECT)
+#define IEEE80211_11N_PROTECT_ENABLED(_ic)	((_ic)->ic_flags_ext & IEEE80211_FEXT_11N_PROTECT)
+
+#define IEEE80211_IS_TKIP_ALLOWED(_ic)          (WPA_TKIP_SUPPORT || ((_ic)->ic_country_code != CTRY_UNITED_STATES))
+
+#define IEEE80211_VAP_UAPSD_ENABLE(_v)	((_v)->iv_flags_ext |= IEEE80211_FEXT_UAPSD)
+#define IEEE80211_VAP_UAPSD_DISABLE(_v)	((_v)->iv_flags_ext &= ~IEEE80211_FEXT_UAPSD)
+#define IEEE80211_VAP_UAPSD_ENABLED(_v)	((_v)->iv_flags_ext & IEEE80211_FEXT_UAPSD)
+#define IEEE80211_VAP_GOTOSLEEP(_v)	((_v)->iv_flags_ext |= IEEE80211_FEXT_SLEEP)
+#define IEEE80211_VAP_WAKEUP(_v)	((_v)->iv_flags_ext &= ~IEEE80211_FEXT_SLEEP)
+#define IEEE80211_VAP_IS_SLEEPING(_v)	((_v)->iv_flags_ext & IEEE80211_FEXT_SLEEP)
+#define IEEE80211_VAP_EOSPDROP_ENABLE(_v)  ((_v)->iv_flags_ext |= IEEE80211_FEXT_EOSPDROP)
+#define IEEE80211_VAP_EOSPDROP_DISABLE(_v) ((_v)->iv_flags_ext &= ~IEEE80211_FEXT_EOSPDROP)
+#define IEEE80211_VAP_EOSPDROP_ENABLED(_v) ((_v)->iv_flags_ext & IEEE80211_FEXT_EOSPDROP)
+#define IEEE80211_VAP_DROPUNENC_EAPOL_ENABLE(_v)  ((_v)->iv_flags_ext |= IEEE80211_FEXT_DROPUNENC_EAPOL)
+#define IEEE80211_VAP_DROPUNENC_EAPOL_DISABLE(_v) ((_v)->iv_flags_ext &= ~IEEE80211_FEXT_DROPUNENC_EAPOL)
+#define IEEE80211_VAP_DROPUNENC_EAPOL(_v) ((_v)->iv_flags_ext & IEEE80211_FEXT_DROPUNENC_EAPOL)
+
+#define IEEE80211_VAP_WDS_ANY(_v)	((_v)->iv_opmode == IEEE80211_M_WDS)
+#define IEEE80211_VAP_WDS_IS_RBS(_v)	(((_v)->iv_opmode == IEEE80211_M_WDS) && \
+					(((_v)->iv_extdr_flags & IEEE80211_QTN_WDS_MASK) == IEEE80211_QTN_WDS_RBS))
+#define IEEE80211_VAP_WDS_IS_MBS(_v)	(((_v)->iv_opmode == IEEE80211_M_WDS) && \
+					(((_v)->iv_extdr_flags & IEEE80211_QTN_WDS_MASK) == IEEE80211_QTN_WDS_MBS))
+#define IEEE80211_VAP_WDS_BASIC(_v)   (((_v)->iv_opmode == IEEE80211_M_WDS) && \
+					(((_v)->iv_extdr_flags & IEEE80211_QTN_WDS_MASK) == IEEE80211_QTN_WDS_ONLY))
+
+static __inline__ uint16_t ieee80211_extdr_get_flags(uint32_t comb)
+{
+	return (comb & IEEE80211_QTN_EXTDR_ALLMASK);
+}
+
+static __inline__ uint16_t ieee80211_extdr_get_mask(uint32_t comb)
+{
+	return ((comb >> IEEE80211_QTN_EXTDR_MASK_SHIFT) & IEEE80211_QTN_EXTDR_ALLMASK);
+}
+
+static inline void
+ieee80211_vap_set_extdr_flags(struct ieee80211vap *vap, uint32_t etdr_comb)
+{
+	int flags;
+	int mask;
+
+	flags = ieee80211_extdr_get_flags(etdr_comb);
+	mask = ieee80211_extdr_get_mask(etdr_comb);
+
+	vap->iv_extdr_flags &= ~mask;
+	vap->iv_extdr_flags |= flags;
+}
+
+#define	IEEE80211_VAP_WDS_SET_RBS(_v)	do {(_v)->iv_extdr_flags &= ~IEEE80211_QTN_WDS_MASK; \
+					     (_v)->iv_extdr_flags |= IEEE80211_QTN_WDS_RBS ;} while(0)
+#define	IEEE80211_VAP_WDS_SET_MBS(_v)	do {(_v)->iv_extdr_flags &= ~IEEE80211_QTN_WDS_MASK; \
+					     (_v)->iv_extdr_flags |= IEEE80211_QTN_WDS_MBS;} while(0)
+#define IEEE80211_VAP_WDS_SET_NONE(_v)	do {(_v)->iv_extdr_flags &= ~IEEE80211_QTN_WDS_MASK; \
+					     (_v)->iv_extdr_flags |= IEEE80211_QTN_WDS_ONLY;} while(0)
+
+/* ic_caps */
+#define	IEEE80211_C_WEP		0x00000001	/* CAPABILITY: WEP available */
+#define	IEEE80211_C_TKIP	0x00000002	/* CAPABILITY: TKIP available */
+#define	IEEE80211_C_AES		0x00000004	/* CAPABILITY: AES OCB avail */
+#define	IEEE80211_C_AES_CCM	0x00000008	/* CAPABILITY: AES CCM avail */
+#define	IEEE80211_C_11N		0x00000010	/* CAPABILITY: 11n HT available */
+#define	IEEE80211_C_CKIP	0x00000020	/* CAPABILITY: CKIP available */
+#define	IEEE80211_C_FF		0x00000040	/* CAPABILITY: ATH FF avail */
+#define	IEEE80211_C_TURBOP	0x00000080	/* CAPABILITY: ATH Turbo avail*/
+#define	IEEE80211_C_IBSS	0x00000100	/* CAPABILITY: IBSS available */
+#define	IEEE80211_C_PMGT	0x00000200	/* CAPABILITY: Power mgmt */
+#define	IEEE80211_C_HOSTAP	0x00000400	/* CAPABILITY: HOSTAP avail */
+#define	IEEE80211_C_AHDEMO	0x00000800	/* CAPABILITY: Old Adhoc Demo */
+#define	IEEE80211_C_SWRETRY	0x00001000	/* CAPABILITY: sw tx retry */
+#define	IEEE80211_C_TXPMGT	0x00002000	/* CAPABILITY: tx power mgmt */
+#define	IEEE80211_C_SHSLOT	0x00004000	/* CAPABILITY: short slottime */
+#define	IEEE80211_C_SHPREAMBLE	0x00008000	/* CAPABILITY: short preamble */
+#define	IEEE80211_C_MONITOR	0x00010000	/* CAPABILITY: monitor mode */
+#define	IEEE80211_C_TKIPMIC	0x00020000	/* CAPABILITY: TKIP MIC avail */
+#define	IEEE80211_C_WPA1	0x00800000	/* CAPABILITY: WPA1 avail */
+#define	IEEE80211_C_WPA2	0x01000000	/* CAPABILITY: WPA2 avail */
+#define	IEEE80211_C_WPA		0x01800000	/* CAPABILITY: WPA1+WPA2 avail*/
+#define	IEEE80211_C_BURST	0x02000000	/* CAPABILITY: frame bursting */
+#define	IEEE80211_C_WME		0x04000000	/* CAPABILITY: WME avail */
+#define	IEEE80211_C_WDS		0x08000000	/* CAPABILITY: 4-addr support */
+#define IEEE80211_C_WME_TKIPMIC	0x10000000	/* CAPABILITY: TKIP MIC for QoS frame */
+#define	IEEE80211_C_BGSCAN	0x20000000	/* CAPABILITY: bg scanning */
+#define	IEEE80211_C_UAPSD	0x40000000	/* CAPABILITY: UAPSD */
+#define	IEEE80211_C_UEQM	0x80000000	/* CAPABILITY: Unequal Modulation */
+/* XXX protection/barker? */
+
+#define	IEEE80211_C_CRYPTO	0x0000002f	/* CAPABILITY: crypto alg's */
+
+/* HT flags */
+#define IEEE80211_HTF_CBW_40MHZ_ONLY	0x00000001
+#define IEEE80211_HTF_SHORTGI20_ONLY	0x00000002
+#define IEEE80211_HTF_SHORTGI40_ONLY	0x00000004
+#define IEEE80211_HTF_GF_MODE_ONLY		0x00000008
+#define IEEE80211_HTF_NSS_2_ONLY		0x00000010
+#define IEEE80211_HTF_TXSTBC_ONLY		0x00000020
+#define IEEE80211_HTF_RXSTBC_ONLY		0x00000040
+#define IEEE80211_HTF_DSSS_40MHZ_ONLY	0x00000080
+#define IEEE80211_HTF_PSMP_SUPPORT_ONLY	0x00000100
+#define IEEE80211_HTF_LSIG_TXOP_ONLY	0x00000200
+#define IEEE80211_HTF_HTINFOUPDATE		0x00000400
+#define IEEE80211_HTF_SHORTGI_ENABLED	0x00000800
+#define IEEE80211_HTF_LDPC_ENABLED	0x00001000
+#define IEEE80211_HTF_LDPC_ALLOW_NON_QTN	0x00002000
+#define IEEE80211_HTF_STBC_ENABLED	0x00004000
+
+/* Key management Capabilities */
+#define WPA_KEY_MGMT_IEEE8021X BIT(0)
+#define WPA_KEY_MGMT_PSK BIT(1)
+#define WPA_KEY_MGMT_NONE BIT(2)
+#define WPA_KEY_MGMT_IEEE8021X_NO_WPA BIT(3)
+#define WPA_KEY_MGMT_WPA_NONE BIT(4)
+#define WPA_KEY_MGMT_FT_IEEE8021X BIT(5)
+#define WPA_KEY_MGMT_FT_PSK BIT(6)
+#define WPA_KEY_MGMT_IEEE8021X_SHA256 BIT(7)
+#define WPA_KEY_MGMT_PSK_SHA256 BIT(8)
+#define WPA_KEY_MGMT_WPS BIT(9)
+
+/* Atheros ABOLT definitions */
+#define IEEE80211_ABOLT_TURBO_G		0x01	/* Legacy Turbo G */
+#define IEEE80211_ABOLT_TURBO_PRIME	0x02	/* Turbo Prime */
+#define IEEE80211_ABOLT_COMPRESSION	0x04	/* Compression */
+#define IEEE80211_ABOLT_FAST_FRAME	0x08	/* Fast Frames */
+#define IEEE80211_ABOLT_BURST		0x10	/* Bursting */
+#define IEEE80211_ABOLT_WME_ELE		0x20	/* WME based cwmin/max/burst tuning */
+#define IEEE80211_ABOLT_XR		0x40	/* XR */
+#define IEEE80211_ABOLT_AR		0x80	/* AR switches out based on adjaced non-turbo traffic */
+
+/* Atheros Advanced Capabilities ABOLT definition */
+#define IEEE80211_ABOLT_ADVCAP	(IEEE80211_ABOLT_TURBO_PRIME | \
+				 IEEE80211_ABOLT_COMPRESSION | \
+				 IEEE80211_ABOLT_FAST_FRAME | \
+				 IEEE80211_ABOLT_XR | \
+				 IEEE80211_ABOLT_AR | \
+				 IEEE80211_ABOLT_BURST | \
+				 IEEE80211_ABOLT_WME_ELE)
+
+/* check if a capability was negotiated for use */
+#define	IEEE80211_ATH_CAP(vap, ni, bit) \
+	((ni)->ni_ath_flags & (vap)->iv_ath_cap & (bit))
+
+/* flags to VAP create function */
+#define IEEE80211_VAP_XR		0x10000	/* create a XR VAP without registering net device with OS */
+
+int ieee80211_ifattach(struct ieee80211com *);
+void ieee80211_ifdetach(struct ieee80211com *);
+int ieee80211_vap_setup(struct ieee80211com *, struct net_device *,
+	const char *, int, int, int);
+int ieee80211_vap_attach(struct ieee80211vap *, ifm_change_cb_t, ifm_stat_cb_t);
+void ieee80211_vap_detach(struct ieee80211vap *);
+void ieee80211_vap_detach_late(struct ieee80211vap *);
+void ieee80211_announce(struct ieee80211com *);
+void ieee80211_announce_channels(struct ieee80211com *);
+int ieee80211_media_change(void *);
+void ieee80211_media_status(void *, struct ifmediareq *);
+int ieee80211_rate2media(struct ieee80211com*, int, enum ieee80211_phymode);
+int ieee80211_media2rate(int);
+int ieee80211_mcs2media(struct ieee80211com*, int, enum ieee80211_phymode);
+int ieee80211_media2mcs(int);
+int ieee80211_mcs2rate(int mcs, int mode, int sgi, int vht);
+int ieee80211_rate2mcs(int rate, int mode, int sgi);
+u_int ieee80211_get_chanflags(enum ieee80211_phymode mode);
+u_int ieee80211_mhz2ieee(u_int, u_int);
+u_int ieee80211_chan2ieee(struct ieee80211com *,	const struct ieee80211_channel *);
+u_int ieee80211_ieee2mhz(u_int, u_int);
+struct ieee80211_channel *ieee80211_find_channel(struct ieee80211com *, int, int);
+int ieee80211_setmode(struct ieee80211com *, enum ieee80211_phymode);
+void ieee80211_reset_erp(struct ieee80211com *, enum ieee80211_phymode);
+enum ieee80211_phymode ieee80211_chan2mode(const struct ieee80211_channel *);
+int ieee80211_country_string_to_countryid( const char *input_str, u_int16_t *p_iso_code );
+int ieee80211_countryid_to_country_string( const u_int16_t iso_code, char *output_str );
+int ieee80211_region_to_operating_class(struct ieee80211com *ic, char *region_str);
+void ieee80211_get_prichan_list_by_operating_class(struct ieee80211com *ic, int bw,
+			uint8_t *chan_list, uint32_t flag);
+int ieee80211_get_current_operating_class(uint16_t iso_code, int chan, int bw);
+void ieee80211_build_countryie(struct ieee80211com *);
+int ieee80211_media_setup(struct ieee80211com *, struct ifmedia *, u_int32_t,
+	ifm_change_cb_t, ifm_stat_cb_t);
+void ieee80211_param_to_qdrv(struct ieee80211vap *vap,
+	int param, int value, unsigned char *data, int len);
+void ieee80211_param_from_qdrv(struct ieee80211vap *vap,
+	int param, int *value, unsigned char *data, int *len);
+int ieee80211_param_scs_set(struct net_device *dev, struct ieee80211vap *vap, u_int32_t value);
+int ieee80211_param_ocac_set(struct net_device *dev, struct ieee80211vap *vap, u_int32_t value);
+void get_node_info(void *s, struct ieee80211_node *ni);
+void get_node_assoc_state(void *s, struct ieee80211_node *ni);
+void get_node_ver(void *s, struct ieee80211_node *ni);
+void get_node_tx_stats(void *s, struct ieee80211_node *ni);
+void get_node_rx_stats(void *s, struct ieee80211_node *ni);
+void ieee80211_update_node_assoc_qual(struct ieee80211_node *ni);
+u_int8_t ieee80211_bridgemode_set(struct ieee80211vap *vap, u_int8_t config_change);
+void ieee80211_channel_switch_post(struct ieee80211com *ic);
+void ieee80211_eap_output(struct net_device *dev, const void *eap_msg, int eap_msg_len);
+int ieee80211_blacklist_check(struct ieee80211_node *ni);
+void ieee80211_remove_node_blacklist_timeout(struct ieee80211_node *ni);
+int ieee80211_pwr_adjust(struct ieee80211vap *vap, int rxgain_state);
+void ieee80211_pm_queue_work_custom(struct ieee80211com *ic, unsigned long delay);
+void ieee80211_pm_queue_work(struct ieee80211com *ic);
+void ieee80211_beacon_interval_set(struct ieee80211com *ic, int value);
+void ieee80211_ocac_update_params(struct ieee80211com *ic, const char *region);
+
+void
+ieee80211_set_recv_ctrlpkts(struct ieee80211vap *vap);
+struct ieee80211_channel * findchannel(struct ieee80211com *ic, int ieee, int mode);
+struct ieee80211_channel * findchannel_any(struct ieee80211com *ic, int ieee, int prefer_mode);
+struct ieee80211_channel* ieee80211_chk_update_pri_chan(struct ieee80211com *ic,
+		struct ieee80211_channel *chan, uint32_t rank_by_pwr, const char* caller, int print_warning);
+void ieee80211_scs_metric_update_timestamps(struct ap_state *as);
+void ieee80211_scs_update_tdls_stats(struct ieee80211com *ic, struct ieee80211_tdls_scs_stats *scs_stats);
+void ieee80211_scs_free_node_tdls_stats(struct ieee80211com *ic, struct ieee80211_node *ni);
+void ieee80211_scs_free_tdls_stats_list(struct ieee80211com *ic);
+int ieee80211_scs_clean_stats(struct ieee80211com *ic, uint32_t level, int clear_dfs_reentry);
+void ieee80211_scs_node_clean_stats(void *s, struct ieee80211_node *ni);
+void ieee80211_scs_show_ranking_stats(struct ieee80211com *ic, int show_input, int show_result);
+void ieee80211_show_initial_ranking_stats(struct ieee80211com *ic);
+void ieee80211_scs_update_ranking_table_by_scan(struct ieee80211com *ic);
+void ieee80211_scs_adjust_cca_threshold(struct ieee80211com *ic);
+int ieee80211_scs_get_scaled_scan_info(struct ieee80211com *ic, int chan_ieee,
+		struct qtn_scs_scan_info *p_scan_info);
+int ieee80211_dual_sec_chan_supported(struct ieee80211com *ic, int chan);
+void ieee80211_update_sec_chan_offset(struct ieee80211_channel *chan, int offset);
+int ieee80211_get_ap_sec_chan_offset(const struct ieee80211_scan_entry *se);
+int ieee80211_get_bw(struct ieee80211com *ic);
+int ieee80211_get_cap_bw(struct ieee80211com *ic);
+int ieee80211_get_max_ap_bw(const struct ieee80211_scan_entry *se);
+int ieee80211_get_max_node_bw(struct ieee80211_node *ni);
+int ieee80211_get_max_system_bw(struct ieee80211com *ic);
+int ieee80211_get_max_channel_bw(struct ieee80211com *ic, int channel);
+int ieee80211_get_max_bw(struct ieee80211vap *vap, struct ieee80211_node *ni, uint32_t chan);
+void ieee80211_update_bw_capa(struct ieee80211vap *vap, int bw);
+void ieee80211_change_bw(struct ieee80211vap *vap, int bw, int delay_chan_switch);
+int ieee80211_get_mu_grp(struct ieee80211com *ic, struct qtn_mu_grp_args *mu_grp_tbl);
+int ieee80211_find_sec_chan(struct ieee80211_channel *chan);
+int ieee80211_find_sec40u_chan(struct ieee80211_channel *chan);
+int ieee80211_find_sec40l_chan(struct ieee80211_channel *chan);
+int ieee80211_find_sec_chan_by_operating_class(struct ieee80211com *ic, int chan, uint32_t preference);
+int ieee80211_is_channel_disabled(struct ieee80211com *ic, int channel, int bw);
+
+int ieee80211_rst_dev_stats(struct ieee80211vap *vap);
+
+int ieee80211_swfeat_is_supported(uint16_t feat, uint8_t print_msg);
+
+void ieee80211_csa_finish(struct work_struct *work);
+void ieee80211_finish_csa(unsigned long arg);
+int ieee80211_enter_csa(struct ieee80211com *ic, struct ieee80211_channel *chan,
+		void (*finish_csa)(unsigned long arg), uint32_t reason,
+		uint8_t csa_count, uint8_t csa_mode, uint32_t flag);
+void ieee80211_obss_scan_timer(unsigned long arg);
+void ieee80211_start_obss_scan_timer(struct ieee80211vap *vap);
+int ieee80211_scs_pick_channel(struct ieee80211com *ic, int pick_flags, uint32_t cc_flag);
+void ieee80211_parse_cipher_key(struct ieee80211vap *vap, void *ie, uint16_t len);
+void ieee80211_ap_pick_alternate_channel(struct ieee80211com *ic,
+			struct ieee80211_channel *bestchan,
+			struct ieee80211_channel *fs1_bestchan,
+			struct ieee80211_channel *fs1_secbestchan,
+			struct ieee80211_channel *fs2_bestchan,
+			struct ieee80211_channel *fs2_secbestchan);
+void ieee80211_update_alternate_channels(struct ieee80211com *ic,
+			struct ieee80211_channel *bestchan,
+			struct ieee80211_channel **fs_bestchan,
+			struct ieee80211_channel **fs_secbestchan,
+			int (*compare_fn)(struct ieee80211com *, int, int));
+
+int ieee80211_vap_wds_mode_change(struct ieee80211vap *vap);
+char *ieee80211_wireless_get_hw_desc(void);
+struct ieee80211_channel *ieee80211_find_channel_by_ieee(struct ieee80211com *ic, int chan_ieee);
+void ieee80211_add_sec_chan_off(u_int8_t **frm, struct ieee80211com *ic, u_int8_t csa_chan);
+uint8_t ieee80211_wband_chanswitch_ie_len(uint32_t bw);
+uint8_t ieee80211_sec_chan_off_ie_len(void);
+
+void ieee80211_find_ht_pri_sec_chan(struct ieee80211vap *vap,
+		const struct ieee80211_scan_entry *se, uint8_t *pri_chan, uint8_t *sec_chan);
+uint8_t ieee80211_find_ht_center_chan(struct ieee80211vap *vap,
+		const struct ieee80211_scan_entry *se);
+int ieee80211_20_40_operation_permitted(struct ieee80211com *ic,
+	struct ieee80211_channel *chan, uint8_t se_pri_chan, uint8_t se_sec_chan);
+
+void ieee80211_off_channel_timeout(unsigned long arg);
+void ieee80211_off_channel_resume(struct ieee80211vap *vap);
+void ieee80211_off_channel_suspend(struct ieee80211vap *vap, uint32_t timeout);
+u_int8_t *ieee80211_add_qtn_pairing_ie(u_int8_t *frm, struct ieee80211_app_ie_t *pairing_ie);
+
+int get_max_supported_chwidth(struct ieee80211_node *ni);
+uint8_t recalc_opmode(struct ieee80211_node *ni, uint8_t opmode);
+
+/*
+ * Key update synchronization methods.  XXX should not be visible.
+ */
+static __inline void
+ieee80211_key_update_begin(struct ieee80211vap *vap)
+{
+	vap->iv_key_update_begin(vap);
+}
+static __inline void
+ieee80211_key_update_end(struct ieee80211vap *vap)
+{
+	vap->iv_key_update_end(vap);
+}
+/* Check if the channel is valid */
+static __inline int
+is_channel_valid(int chan)
+{
+	if ((chan >= IEEE80211_CHAN_MAX) || !chan)
+		return 0;
+
+	return 1;
+}
+
+static __inline int
+is_ieee80211_chan_valid(struct ieee80211_channel *chan)
+{
+	if (!chan || (chan == IEEE80211_CHAN_ANYC))
+		return 0;
+
+	return 1;
+}
+
+/*
+ * XXX these need to be here for IEEE80211_F_DATAPAD
+ */
+
+/*
+ * Return the space occupied by the 802.11 header and any
+ * padding required by the driver.  This works for a
+ * management or data frame.
+ */
+static __inline int
+ieee80211_hdrspace(struct ieee80211com *ic, const void *data)
+{
+	int size;
+	if((ic->ic_caps & IEEE80211_C_11N) == IEEE80211_C_11N)
+		size = ieee80211_hdrsize(IEEE80211_HT_CAPABLE, data);
+	else
+		size = ieee80211_hdrsize(IEEE80211_NON_HT_CAPABLE, data);
+
+	if (ic->ic_flags & IEEE80211_F_DATAPAD)
+		size = roundup(size, sizeof(u_int32_t));
+	return size;
+}
+
+/*
+ * Like ieee80211_hdrspace, but handles any type of frame.
+ */
+static __inline int
+ieee80211_anyhdrspace(struct ieee80211com *ic, const void *data)
+{
+	int size;
+	if((ic->ic_caps & IEEE80211_C_11N) == IEEE80211_C_11N)
+		size =  ieee80211_anyhdrsize(IEEE80211_HT_CAPABLE, data);
+	else
+		size =  ieee80211_anyhdrsize(IEEE80211_NON_HT_CAPABLE, data);
+
+	if (ic->ic_flags & IEEE80211_F_DATAPAD)
+		size = roundup(size, sizeof(u_int32_t));
+	return size;
+}
+
+static __inline char *
+ieee80211_bw2str(int bw)
+{
+	char *bwstr = "invalid";
+
+	switch (bw) {
+	case BW_HT20:
+		bwstr = IEEE80211_BWSTR_20;
+		break;
+	case BW_HT40:
+		bwstr = IEEE80211_BWSTR_40;
+		break;
+	case BW_HT80:
+		bwstr = IEEE80211_BWSTR_80;
+		break;
+	case BW_HT160:
+		bwstr = IEEE80211_BWSTR_160;
+		break;
+	default:
+		break;
+	}
+
+	return bwstr;
+}
+
+static __inline int
+ieee80211_tx_amsdu_disabled(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (!vap->iv_tx_amsdu)
+		return 1;
+
+	if (!IEEE80211_NODE_IS_VHT(ni) &&
+			IEEE80211_NODE_IS_HT(ni) &&
+			!vap->iv_tx_amsdu_11n)
+		return 1;
+
+	return 0;
+}
+
+
+#define IEEE80211_MSG_11N	0x80000000	/* 11n mode debug */
+#define	IEEE80211_MSG_DEBUG	0x40000000	/* IFF_DEBUG equivalent */
+#define	IEEE80211_MSG_DUMPPKTS	0x20000000	/* IFF_LINK2 equivalent */
+#define	IEEE80211_MSG_CRYPTO	0x10000000	/* crypto work */
+#define	IEEE80211_MSG_INPUT	0x08000000	/* input handling */
+#define	IEEE80211_MSG_XRATE	0x04000000	/* rate set handling */
+#define	IEEE80211_MSG_ELEMID	0x02000000	/* element id parsing */
+#define	IEEE80211_MSG_NODE	0x01000000	/* node handling */
+#define	IEEE80211_MSG_ASSOC	0x00800000	/* association handling */
+#define	IEEE80211_MSG_AUTH	0x00400000	/* authentication handling */
+#define	IEEE80211_MSG_SCAN	0x00200000	/* scanning */
+#define	IEEE80211_MSG_OUTPUT	0x00100000	/* output handling */
+#define	IEEE80211_MSG_STATE	0x00080000	/* state machine */
+#define	IEEE80211_MSG_POWER	0x00040000	/* power save handling */
+#define	IEEE80211_MSG_DOT1X	0x00020000	/* 802.1X authenticator */
+#define	IEEE80211_MSG_NODEBSS	0x00010000	/* BSS node handling */
+#define	IEEE80211_MSG_RADIUS	0x00008000	/* 802.1X radius client */
+#define	IEEE80211_MSG_RADDUMP	0x00004000	/* dump 802.1X radius packets */
+#define	IEEE80211_MSG_RADKEYS	0x00002000	/* dump 802.1X keys */
+#define	IEEE80211_MSG_WPA	0x00001000	/* WPA/RSN protocol */
+#define	IEEE80211_MSG_ACL	0x00000800	/* ACL handling */
+#define	IEEE80211_MSG_WME	0x00000400	/* WME protocol */
+#define	IEEE80211_MSG_SUPG	0x00000200	/* SUPERG */
+#define	IEEE80211_MSG_DOTH	0x00000100	/* 11.h */
+#define	IEEE80211_MSG_INACT	0x00000080	/* inactivity handling */
+#define	IEEE80211_MSG_ROAM	0x00000040	/* sta-mode roaming */
+#define IEEE80211_MSG_ACTION	0x00000020	/* action management frames */
+#define IEEE80211_MSG_TPC	0x00000010	/* transmit power control */
+#define	IEEE80211_MSG_VSP	0x00000008	/* VSP */
+#define IEEE80211_MSG_VHT	0x00000004	/* 11ac mode debug-VHT*/
+#define	IEEE80211_MSG_TDLS	0x00000002	/* TDLS */
+#define	IEEE80211_MSG_EXTDR	0x00000001	/* Extender: QHOP or Repeater */
+
+#define	IEEE80211_MSG_ANY	0xffffffff	/* anything */
+
+#define IEEE80211_TDLS_MSG_WARN	1
+#define IEEE80211_TDLS_MSG_DBG	2
+
+#define IEEE80211_EXTENDER_MSG_DISABLE	0
+#define IEEE80211_EXTENDER_MSG_WARN	1
+#define IEEE80211_EXTENDER_MSG_DBG	2
+
+#ifdef IEEE80211_DEBUG
+
+#define	ieee80211_msg(_vap, _m)	((_vap)->iv_debug & (_m))
+
+#define	ieee80211_tdls_msg(_vap, _n)	((_n) <= (_vap)->tdls_verbose)
+
+#define	ieee80211_extender_msg(_vap, _n)	((_n) <= (_vap)->iv_ic->ic_extender_verbose)
+
+#define	IEEE80211_DPRINTF(_vap, _m, _fmt, ...) do {			\
+	if (unlikely(ieee80211_msg(_vap, _m)))				\
+		ieee80211_note(_vap, _fmt, __VA_ARGS__);		\
+} while (0)
+
+#define	IEEE80211_TDLS_DPRINTF(_vap, _m, _n, _fmt, ...) do {	\
+	if (unlikely(ieee80211_msg(_vap, _m) && ieee80211_tdls_msg(_vap, _n)))	\
+		ieee80211_note(_vap, _fmt, __VA_ARGS__);		\
+} while (0)
+
+#define	IEEE80211_EXTENDER_DPRINTF(_vap, _n, _fmt, ...) do {	\
+	if (unlikely(ieee80211_extender_msg(_vap, _n))) \
+		ieee80211_note(_vap, _fmt, __VA_ARGS__);		\
+} while (0)
+
+#define	IEEE80211_NOTE(_vap, _m, _ni, _fmt, ...) do {			\
+	if (unlikely(ieee80211_msg(_vap, _m)))				\
+		ieee80211_note_mac(_vap, (_ni)->ni_macaddr, _fmt, __VA_ARGS__);\
+} while (0)
+
+#define	IEEE80211_NOTE_MAC(_vap, _m, _mac, _fmt, ...) do {		\
+	if (unlikely(ieee80211_msg(_vap, _m)))				\
+		ieee80211_note_mac(_vap, _mac, _fmt, __VA_ARGS__);	\
+} while (0)
+
+#define	IEEE80211_NOTE_FRAME(_vap, _m, _wh, _fmt, ...) do {		\
+	if (unlikely(ieee80211_msg(_vap, _m)))				\
+		ieee80211_note_frame(_vap, _wh, _fmt, __VA_ARGS__);	\
+} while (0)
+
+#define	IEEE80211_DPRINTF_NODEREF(_ni, _func, _line) do {				\
+	if (unlikely(!_ni || !_ni->ni_vap)) {						\
+		printk("%s:%u epic fail ni=%p\n", _func, _line, _ni);			\
+		break;									\
+	} else if (unlikely(ieee80211_msg(_ni->ni_vap, (_ni == _ni->ni_vap->iv_bss) ?	\
+				IEEE80211_MSG_NODEBSS : IEEE80211_MSG_NODE))) {		\
+		ieee80211_note(_ni->ni_vap,						\
+			"[%s]%s:%u: nodecnt=%u ni=%p tbl=%u refcnt=%d\n",		\
+			ether_sprintf(_ni->ni_macaddr),					\
+			_func, _line,							\
+			_ni->ni_ic ? _ni->ni_ic->ic_node_count : 0,			\
+			_ni, (_ni->ni_table != NULL),					\
+			ieee80211_node_refcnt(_ni));					\
+	}										\
+} while (0)
+
+void ieee80211_note(struct ieee80211vap *, const char *, ...);
+void ieee80211_note_mac(struct ieee80211vap *,
+	const u_int8_t mac[IEEE80211_ADDR_LEN], const char *, ...);
+void ieee80211_note_frame(struct ieee80211vap *,
+	const struct ieee80211_frame *, const char *, ...);
+
+#define	ieee80211_msg_debug(_vap) \
+	ieee80211_msg(_vap, IEEE80211_MSG_DEBUG)
+#define	ieee80211_msg_dumppkts(_vap) \
+	ieee80211_msg(_vap, IEEE80211_MSG_DUMPPKTS)
+#define	ieee80211_msg_input(_vap) \
+	ieee80211_msg(_vap, IEEE80211_MSG_INPUT)
+#define	ieee80211_msg_radius(_vap) \
+	ieee80211_msg(_vap, IEEE80211_MSG_RADIUS)
+#define	ieee80211_msg_dumpradius(_vap) \
+	ieee80211_msg(_vap, IEEE80211_MSG_RADDUMP)
+#define	ieee80211_msg_dumpradkeys(_vap) \
+	ieee80211_msg(_vap, IEEE80211_MSG_RADKEYS)
+#define	ieee80211_msg_scan(_vap) \
+	ieee80211_msg(_vap, IEEE80211_MSG_SCAN)
+#define	ieee80211_msg_assoc(_vap) \
+	ieee80211_msg(_vap, IEEE80211_MSG_ASSOC)
+#else /* IEEE80211_DEBUG */
+#define	IEEE80211_DPRINTF(_vap, _m, _fmt, ...)
+#define	IEEE80211_NOTE(_vap, _m, _wh, _fmt, ...)
+#define	IEEE80211_NOTE_FRAME(_vap, _m, _wh, _fmt, ...)
+#define	IEEE80211_NOTE_MAC(_vap, _m, _mac, _fmt, ...)
+#define	IEEE80211_DPRINTF_NODEREF(_ni, _func, _line)
+#endif /* IEEE80211_DEBUG */
+
+#ifdef CONFIG_QHOP
+/* Some prototypes QHOP implementation */
+extern int  ieee80211_scs_is_wds_rbs_node(struct ieee80211com *ic);
+extern void ieee80211_dfs_send_csa(struct ieee80211vap *vap, uint8_t new_chan);
+#endif
+
+struct ieee80211_band_info *ieee80211_get_band_info(int band_idx);
+
+#if defined(QBMPS_ENABLE)
+extern int ieee80211_wireless_set_sta_bmps(struct ieee80211vap *vap, struct ieee80211com *ic, int value);
+extern int ieee80211_sta_bmps_update(struct ieee80211vap *vap);
+#endif
+
+extern int ieee80211_is_idle_state(struct ieee80211com *ic);
+extern int ieee80211_is_on_weather_channel(struct ieee80211com *ic, struct ieee80211_channel *chan);
+
+extern uint8_t g_l2_ext_filter;
+extern uint8_t g_l2_ext_filter_port;
+#endif /* _NET80211_IEEE80211_VAR_H_ */
diff --git a/drivers/qtn/include/kernel/net80211/if_athproto.h b/drivers/qtn/include/kernel/net80211/if_athproto.h
new file mode 100644
index 0000000..d0eba55
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/if_athproto.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2004 Atheros Communications Inc.  All rights reserved.
+ *
+ * $Id: if_athproto.h 2606 2007-07-25 15:14:52Z mrenzmann $
+ */
+
+#ifndef _NET_IF_ATH_PROTO_H_
+#define _NET_IF_ATH_PROTO_H_
+
+/*
+ * Atheros proprietary protocol info.
+ */
+
+/*
+ * Atheros RSSI, Signal, Noise, etc.
+ */
+
+#define ATH_DEFAULT_NOISE	-95	/* dBm */
+
+/*
+ * Atheros proprietary SuperG defines.
+ */
+
+#define ATH_ETH_TYPE		0x88bd
+#define ATH_SNAP_ORGCODE_0	0x00
+#define ATH_SNAP_ORGCODE_1	0x03
+#define ATH_SNAP_ORGCODE_2	0x7f
+
+struct athl2p_tunnel_hdr {
+#if (_BYTE_ORDER == _LITTLE_ENDIAN)
+	u_int32_t offset:11,
+	seqNum:11,
+	optHdrLen32:2,
+	frameType:2,
+	proto:6;
+#else /* big endian */
+	u_int32_t proto:6,
+	frameType:2,
+	optHdrLen32:2,
+	seqNum:11,
+	offset:11;
+#endif
+} __packed;
+
+/*
+ * The following defines control compiling Atheros-specific features
+ * (see BuildCaps.inc):
+ *
+ *   ATH_SUPERG_FF 
+ *      set to 1 for fast-frame
+ */
+
+#define ATH_L2TUNNEL_PROTO_FF 0
+/*
+ * FF max payload: 
+ * 802.2 + FFHDR + HPAD + 802.3 + 802.2 + 1500 + SPAD + 802.3 + 802.2 + 1500:
+ *   8   +   4   +  4   +   14  +   8   + 1500 +  6   +   14  +   8   + 1500
+ * = 3066
+ */
+#define ATH_FF_MAX_HDR_PAD	4
+#define ATH_FF_MAX_SEP_PAD	6
+#define ATH_FF_MAX_HDR		30
+#define ATH_FF_MAX_PAYLOAD	3066
+#define ATH_FF_MAX_LEN (ATH_FF_MAX_PAYLOAD + IEEE80211_CRC_LEN + \
+	(IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_CRCLEN))
+
+/*
+ * default value for the minimum txq depth required for an ath_buf to be
+ * placed on the FF staging queue. this value should be >=3 due to interaction
+ * with HW compression.
+ */
+#define ATH_FF_TXQMIN		3
+
+/* 
+ * default maximum age an ath_buf is allowed to remain on the staging queue.
+ * When this number of ath_bufs have been queued on the txq, after an ath_buf
+ * was placed on the staging queue, that ath_buf on the staging queue will be
+ * flushed.
+ */
+#define ATH_FF_STAGEQAGEMAX	5
+
+/*
+ * Reserve enough buffer header length to avoid reallocation on fast-frame
+ * rx and tx.
+ */
+#define USE_HEADERLEN_RESV	1
+
+#endif /* _NET_IF_ATH_PROTO_H_ */
diff --git a/drivers/qtn/include/kernel/net80211/if_ethersubr.h b/drivers/qtn/include/kernel/net80211/if_ethersubr.h
new file mode 100644
index 0000000..8138928
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/if_ethersubr.h
@@ -0,0 +1,80 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ *    redistribution must be conditioned upon including a substantially
+ *    similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: if_ethersubr.h 2028 2007-01-30 03:51:52Z proski $
+ */
+
+#ifndef _NET_IF_ETHERSUBR_H_
+#define _NET_IF_ETHERSUBR_H_
+
+#include <compat.h>
+
+#define	ETHER_ADDR_LEN		6	/* length of an Ethernet address */
+#define	ETHER_TYPE_LEN		2	/* length of the Ethernet type field */
+#define	ETHER_CRC_LEN		4	/* length of the Ethernet CRC */
+#define	ETHER_HDR_LEN		(ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
+#define	ETHER_MAX_LEN		1518
+#define	ETHER_MIN_LEN		64
+#define ETHER_JUMBO_MAX_LEN	4096
+
+#define	ETHERMTU	(ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN)
+
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
+struct	ether_header {
+	u_char ether_dhost[ETHER_ADDR_LEN];
+	u_char ether_shost[ETHER_ADDR_LEN];
+	__be16 ether_type;
+} __packed;
+
+#ifndef ETHERTYPE_80211MGT
+#define	ETHERTYPE_80211MGT	0x890D
+#endif
+
+/*
+ * Structure of a 48-bit Ethernet address.
+ */
+struct	ether_addr {
+	u_char octet[ETHER_ADDR_LEN];
+} __packed;
+
+#define	ETHER_IS_MULTICAST(addr) (*(addr) & 0x01) /* is address mcast/bcast? */
+
+#define VLAN_PRI_SHIFT	13		/* Shift to find VLAN user priority */
+#define VLAN_PRI_MASK	7		/* Mask for user priority bits in VLAN */
+
+
+#endif /* _NET_IF_ETHERSUBR_H_ */
diff --git a/drivers/qtn/include/kernel/net80211/if_llc.h b/drivers/qtn/include/kernel/net80211/if_llc.h
new file mode 100644
index 0000000..6b17f2f
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/if_llc.h
@@ -0,0 +1,212 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ *    redistribution must be conditioned upon including a substantially
+ *    similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: if_llc.h 2028 2007-01-30 03:51:52Z proski $
+ */
+
+/*
+ * Copyright (c) 1988, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NET_IF_LLC_H_
+#define _NET_IF_LLC_H_
+
+#include <compat.h>
+#include "if_ethersubr.h"
+
+/*
+ * IEEE 802.2 Link Level Control headers, for use in conjunction with
+ * 802.{3,4,5} media access control methods.
+ *
+ * Headers here do not use bit fields due to shortcomings in many
+ * compilers.
+ */
+
+struct llc {
+	u_int8_t llc_dsap;
+	u_int8_t llc_ssap;
+	union {
+	    struct {
+		u_int8_t control;
+		u_int8_t format_id;
+		u_int8_t class;
+		u_int8_t window_x2;
+	    } __packed type_u;
+	    struct {
+		u_int8_t num_snd_x2;
+		u_int8_t num_rcv_x2;
+	    } __packed type_i;
+	    struct {
+		u_int8_t control;
+		u_int8_t num_rcv_x2;
+	    } __packed type_s;
+	    struct {
+	        u_int8_t control;
+		/*
+		 * We cannot put the following fields in a structure because
+		 * the structure rounding might cause padding.
+		 */
+		u_int8_t frmr_rej_pdu0;
+		u_int8_t frmr_rej_pdu1;
+		u_int8_t frmr_control;
+		u_int8_t frmr_control_ext;
+		u_int8_t frmr_cause;
+	    } __packed type_frmr;
+	    struct {
+		u_int8_t control;
+		u_int8_t org_code[3];
+		__be16 ether_type;
+	    } __packed type_snap;
+	    struct {
+		u_int8_t control;
+		u_int8_t control_ext;
+	    } __packed type_raw;
+	} llc_un /* XXX __packed ??? */;
+} __packed;
+
+struct frmrinfo {
+	u_int8_t frmr_rej_pdu0;
+	u_int8_t frmr_rej_pdu1;
+	u_int8_t frmr_control;
+	u_int8_t frmr_control_ext;
+	u_int8_t frmr_cause;
+} __packed;
+
+#define QTN_OUIE_TYPE_TRAINING		0x1
+#define QTN_OUIE_TYPE_QEVENT		0x2
+#define QTN_OUIE_WIFI_CONTROL_MIN	0x40
+#define QTN_OUIE_WIFI_CONTROL_MAX	0x48
+
+struct oui_extended_ethertype {
+	uint8_t	oui[3];
+	__be16	type;
+} __attribute__((packed));
+
+struct qtn_dummy_frame {
+	struct ether_header		eh;
+	struct llc			llc;
+	struct oui_extended_ethertype	ouie;
+} __attribute__((packed));
+
+#define	llc_control		llc_un.type_u.control
+#define	llc_control_ext		llc_un.type_raw.control_ext
+#define	llc_fid			llc_un.type_u.format_id
+#define	llc_class		llc_un.type_u.class
+#define	llc_window		llc_un.type_u.window_x2
+#define	llc_frmrinfo 		llc_un.type_frmr.frmr_rej_pdu0
+#define	llc_frmr_pdu0		llc_un.type_frmr.frmr_rej_pdu0
+#define	llc_frmr_pdu1		llc_un.type_frmr.frmr_rej_pdu1
+#define	llc_frmr_control	llc_un.type_frmr.frmr_control
+#define	llc_frmr_control_ext	llc_un.type_frmr.frmr_control_ext
+#define	llc_frmr_cause		llc_un.type_frmr.frmr_cause
+#define	llc_snap		llc_un.type_snap
+
+/*
+ * Don't use sizeof(struct llc_un) for LLC header sizes
+ */
+#define LLC_ISFRAMELEN 4
+#define LLC_UFRAMELEN  3
+#define LLC_FRMRLEN    7
+#define LLC_SNAPFRAMELEN 8
+
+/*
+ * Unnumbered LLC format commands
+ */
+#define LLC_UI		0x3
+#define LLC_UI_P	0x13
+#define LLC_DISC	0x43
+#define	LLC_DISC_P	0x53
+#define LLC_UA		0x63
+#define LLC_UA_P	0x73
+#define LLC_TEST	0xe3
+#define LLC_TEST_P	0xf3
+#define LLC_FRMR	0x87
+#define	LLC_FRMR_P	0x97
+#define LLC_DM		0x0f
+#define	LLC_DM_P	0x1f
+#define LLC_XID		0xaf
+#define LLC_XID_P	0xbf
+#define LLC_SABME	0x6f
+#define LLC_SABME_P	0x7f
+
+/*
+ * Supervisory LLC commands
+ */
+#define	LLC_RR		0x01
+#define	LLC_RNR		0x05
+#define	LLC_REJ		0x09
+
+/*
+ * Info format - dummy only
+ */
+#define	LLC_INFO	0x00
+
+/*
+ * ISO PDTR 10178 contains among others
+ */
+#define LLC_X25_LSAP	0x7e
+#define LLC_SNAP_LSAP	0xaa
+#define LLC_ISO_LSAP	0xfe
+
+#endif /* _NET_IF_LLC_H_ */
diff --git a/drivers/qtn/include/kernel/net80211/if_media.h b/drivers/qtn/include/kernel/net80211/if_media.h
new file mode 100644
index 0000000..b11c0d0
--- /dev/null
+++ b/drivers/qtn/include/kernel/net80211/if_media.h
@@ -0,0 +1,530 @@
+/*	$NetBSD: if_media.h,v 1.3 1997/03/26 01:19:27 thorpej Exp $	*/
+/* $FreeBSD: src/sys/net/if_media.h,v 1.18 2002/07/14 21:58:19 kbyanc Exp $ */
+/*	$Id: if_media.h 1441 2006-02-06 16:03:21Z mrenzmann $	*/
+
+/*
+ * Copyright (c) 1997
+ *	Jonathan Stone and Jason R. Thorpe.  All rights reserved.
+ *
+ * This software is derived from information provided by Matt Thomas.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by Jonathan Stone
+ *	and Jason R. Thorpe for the NetBSD Project.
+ * 4. The names of the authors may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: if_media.h 1441 2006-02-06 16:03:21Z mrenzmann $
+ */
+
+#ifndef _NET_IF_MEDIA_H_
+#define _NET_IF_MEDIA_H_
+
+#include "net80211/ieee80211_linux.h"
+
+/*
+ * Prototypes and definitions for BSD/OS-compatible network interface
+ * media selection.
+ *
+ * Where it is safe to do so, this code strays slightly from the BSD/OS
+ * design.  Software which uses the API (device drivers, basically)
+ * shouldn't notice any difference.
+ *
+ * Many thanks to Matt Thomas for providing the information necessary
+ * to implement this interface.
+ */
+
+struct ifmediareq {
+	char ifm_name[IFNAMSIZ];	/* if name, e.g. "en0" */
+	int ifm_current;			/* current media options */
+	int ifm_mask;			/* don't care mask */
+	int ifm_status;			/* media status */
+	int ifm_active;			/* active options */
+	int ifm_count;			/* # entries in ifm_ulist array */
+	int __user *ifm_ulist;		/* media words */
+};
+#define	SIOCSIFMEDIA	_IOWR('i', 55, struct ifreq)	/* set net media */
+#define	SIOCGIFMEDIA	_IOWR('i', 56, struct ifmediareq) /* get net media */
+
+#ifdef __KERNEL__
+
+#include <common/queue.h>
+
+/*
+ * Driver callbacks for media status and change requests.
+ */
+struct net_device;
+typedef	int (*ifm_change_cb_t)(void *);
+typedef	void (*ifm_stat_cb_t)(void *, struct ifmediareq *);
+
+/*
+ * In-kernel representation of a single supported media type.
+ */
+struct ifmedia_entry {
+	LIST_ENTRY(ifmedia_entry) ifm_list;
+	int ifm_media;		/* description of this media attachment */
+	int ifm_data;		/* for driver-specific use */
+	void *ifm_aux;		/* for driver-specific use */
+};
+
+/*
+ * One of these goes into a network interface's softc structure.
+ * It is used to keep general media state.
+ */
+struct ifmedia {
+	int ifm_mask;			/* mask of changes we don't care about */
+	int ifm_media;			/* current user-set media word */
+	struct ifmedia_entry *ifm_cur;	/* currently selected media */
+	ATH_LIST_HEAD(, ifmedia_entry) ifm_list; /* list of all supported media */
+	ifm_change_cb_t	ifm_change;	/* media change driver callback */
+	ifm_stat_cb_t ifm_status;	/* media status driver callback */
+};
+
+/* Initialize an interface's struct if_media field. */
+void ifmedia_init(struct ifmedia *, int, ifm_change_cb_t, ifm_stat_cb_t);
+
+/* Remove all mediums from a struct ifmedia.  */
+void ifmedia_removeall(struct ifmedia *);
+
+/* Add one supported medium to a struct ifmedia. */
+void ifmedia_add(struct ifmedia *, int, int, void *);
+
+/* Add an array (of ifmedia_entry) media to a struct ifmedia. */
+void ifmedia_list_add(struct ifmedia *, struct ifmedia_entry *, int);
+
+/* Set default media type on initialization. */
+void ifmedia_set(struct ifmedia *, int);
+
+/* Common ioctl function for getting/setting media, called by driver. */
+int ifmedia_ioctl(struct net_device *, struct ifreq *, struct ifmedia *, u_long);
+
+#endif /*_KERNEL */
+
+/*
+ * if_media Options word:
+ *	Bits	Use
+ *	----	-------
+ *	0-4	Media variant
+ *	5-7	Media type
+ *	8-15	Type specific options
+ *	16-18	Mode (for multi-mode devices)
+ *	19	RFU
+ *	20-27	Shared (global) options
+ *	28-31	Instance
+ */
+
+/*
+ * Ethernet
+ */
+#define	IFM_ETHER	0x00000020
+#define	IFM_10_T	3		/* 10BaseT - RJ45 */
+#define	IFM_10_2	4		/* 10Base2 - Thinnet */
+#define	IFM_10_5	5		/* 10Base5 - AUI */
+#define	IFM_100_TX	6		/* 100BaseTX - RJ45 */
+#define	IFM_100_FX	7		/* 100BaseFX - Fiber */
+#define	IFM_100_T4	8		/* 100BaseT4 - 4 pair cat 3 */
+#define	IFM_100_VG	9		/* 100VG-AnyLAN */
+#define	IFM_100_T2	10		/* 100BaseT2 */
+#define	IFM_1000_SX	11		/* 1000BaseSX - multi-mode fiber */
+#define	IFM_10_STP	12		/* 10BaseT over shielded TP */
+#define	IFM_10_FL	13		/* 10BaseFL - Fiber */
+#define	IFM_1000_LX	14		/* 1000baseLX - single-mode fiber */
+#define	IFM_1000_CX	15		/* 1000baseCX - 150ohm STP */
+#define	IFM_1000_T	16		/* 1000baseT - 4 pair cat 5 */
+#define	IFM_HPNA_1	17		/* HomePNA 1.0 (1Mb/s) */
+/* note 31 is the max! */
+
+#define	IFM_ETH_MASTER	0x00000100	/* master mode (1000baseT) */
+
+/*
+ * Token ring
+ */
+#define	IFM_TOKEN	0x00000040
+#define	IFM_TOK_STP4	3		/* Shielded twisted pair 4m - DB9 */
+#define	IFM_TOK_STP16	4		/* Shielded twisted pair 16m - DB9 */
+#define	IFM_TOK_UTP4	5		/* Unshielded twisted pair 4m - RJ45 */
+#define	IFM_TOK_UTP16	6		/* Unshielded twisted pair 16m - RJ45 */
+#define	IFM_TOK_STP100  7		/* Shielded twisted pair 100m - DB9 */
+#define	IFM_TOK_UTP100  8		/* Unshielded twisted pair 100m - RJ45 */
+#define	IFM_TOK_ETR	0x00000200	/* Early token release */
+#define	IFM_TOK_SRCRT	0x00000400	/* Enable source routing features */
+#define	IFM_TOK_ALLR	0x00000800	/* All routes / Single route bcast */
+#define	IFM_TOK_DTR	0x00002000	/* Dedicated token ring */
+#define	IFM_TOK_CLASSIC	0x00004000	/* Classic token ring */
+#define	IFM_TOK_AUTO	0x00008000	/* Automatic Dedicate/Classic token ring */
+
+/*
+ * FDDI
+ */
+#define	IFM_FDDI	0x00000060
+#define	IFM_FDDI_SMF	3		/* Single-mode fiber */
+#define	IFM_FDDI_MMF	4		/* Multi-mode fiber */
+#define	IFM_FDDI_UTP	5		/* CDDI / UTP */
+#define	IFM_FDDI_DA	0x00000100	/* Dual attach / single attach */
+
+/*
+ * IEEE 802.11 Wireless
+ */
+#define	IFM_IEEE80211	0x00000080
+/* NB: 0,1,2 are auto, manual, none defined below */
+#define	IFM_IEEE80211_FH1	3	/* Frequency Hopping 1Mbps */
+#define	IFM_IEEE80211_FH2	4	/* Frequency Hopping 2Mbps */
+#define	IFM_IEEE80211_DS1	5	/* Direct Sequence 1Mbps */
+#define	IFM_IEEE80211_DS2	6	/* Direct Sequence 2Mbps */
+#define	IFM_IEEE80211_DS5	7	/* Direct Sequence 5.5Mbps */
+#define	IFM_IEEE80211_DS11	8	/* Direct Sequence 11Mbps */
+#define	IFM_IEEE80211_DS22	9	/* Direct Sequence 22Mbps */
+#define IFM_IEEE80211_OFDM1_50  10      /* OFDM 1.5Mbps */
+#define IFM_IEEE80211_OFDM2_25  11      /* OFDM 2.25Mbps */
+#define IFM_IEEE80211_OFDM3     12      /* OFDM 3Mbps */
+#define IFM_IEEE80211_OFDM4_50  13      /* OFDM 4.5Mbps */
+#define IFM_IEEE80211_OFDM6     14      /* OFDM 6Mbps */
+#define IFM_IEEE80211_OFDM9     15      /* OFDM 9Mbps */
+#define IFM_IEEE80211_OFDM12    16      /* OFDM 12Mbps */
+#define IFM_IEEE80211_OFDM13_5  17      /* OFDM 13.5Mpbs */
+#define IFM_IEEE80211_OFDM18    18      /* OFDM 18Mbps */
+#define IFM_IEEE80211_OFDM24    19      /* OFDM 24Mbps */
+#define IFM_IEEE80211_OFDM27    20      /* OFDM 27Mbps */
+#define IFM_IEEE80211_OFDM36    21      /* OFDM 36Mbps */
+#define IFM_IEEE80211_OFDM48    22      /* OFDM 48Mbps */
+#define IFM_IEEE80211_OFDM54    23      /* OFDM 54Mbps */
+#define IFM_IEEE80211_OFDM72    24      /* OFDM 72Mbps */
+
+#define IFM_IEEE80211_OFDM_HT_LEG_6   1
+#define IFM_IEEE80211_OFDM_HT_LEG_9   2
+#define IFM_IEEE80211_OFDM_HT_LEG_12  3
+#define IFM_IEEE80211_OFDM_HT_LEG_18  4
+#define IFM_IEEE80211_OFDM_HT_LEG_24  5
+#define IFM_IEEE80211_OFDM_HT_LEG_36  6
+#define IFM_IEEE80211_OFDM_HT_LEG_48  7
+#define IFM_IEEE80211_OFDM_HT_LEG_54  8
+#define IFM_IEEE80211_OFDM_HT_0    	  9      /* 11N - MCS 0  -6.5Mbps */
+#define IFM_IEEE80211_OFDM_HT_1    	  10      /* 11N - MCS 1  -13Mbps */
+#define IFM_IEEE80211_OFDM_HT_2    	  11      /* 11N - MCS 2  -19.5Mbps */
+#define IFM_IEEE80211_OFDM_HT_3    	  12      /* 11N - MCS 3  -26Mbps */
+#define IFM_IEEE80211_OFDM_HT_4    	  13      /* 11N - MCS 4  -39Mbps */
+#define IFM_IEEE80211_OFDM_HT_5    	  14      /* 11N - MCS 5  -52Mbps */
+#define IFM_IEEE80211_OFDM_HT_6    	  15      /* 11N - MCS 6  -58.5Mbps */
+#define IFM_IEEE80211_OFDM_HT_7    	  16      /* 11N - MCS 7  -65Mbps */
+#define IFM_IEEE80211_OFDM_HT_8    	  17      /* 11N - MCS 8  -13Mbps */
+#define IFM_IEEE80211_OFDM_HT_9    	  18      /* 11N - MCS 9  -26Mbps */
+#define IFM_IEEE80211_OFDM_HT_10   	  19     /* 11N - MCS 10 -39Mbps */
+#define IFM_IEEE80211_OFDM_HT_11   	  20     /* 11N - MCS 11 -52Mbps */
+#define IFM_IEEE80211_OFDM_HT_12   	  21     /* 11N - MCS 12 -78Mbps */
+#define IFM_IEEE80211_OFDM_HT_13   	  22     /* 11N - MCS 13 -104Mbps */
+#define IFM_IEEE80211_OFDM_HT_14   	  23     /* 11N - MCS 14 -117Mbps */
+#define IFM_IEEE80211_OFDM_HT_15   	  24     /* 11N - MCS 15 -130Mbps */
+
+#define	IFM_IEEE80211_ADHOC	0x00000100	/* Operate in Adhoc mode */
+#define	IFM_IEEE80211_HOSTAP	0x00000200	/* Operate in Host AP mode */
+#define	IFM_IEEE80211_IBSS	0x00000400	/* Operate in IBSS mode */
+#define	IFM_IEEE80211_WDS	0x00000800	/* Operate in WDS mode */
+#define	IFM_IEEE80211_TURBO	0x00001000	/* Operate in turbo mode */
+#define	IFM_IEEE80211_MONITOR	0x00002000	/* Operate in monitor mode */
+
+/* operating mode for multi-mode devices */
+#define	IFM_IEEE80211_11A	0x00010000	/* 5Ghz, OFDM mode */
+#define	IFM_IEEE80211_11B	0x00020000	/* Direct Sequence mode */
+#define	IFM_IEEE80211_11G	0x00030000	/* 2Ghz, CCK mode */
+#define	IFM_IEEE80211_FH	0x00040000	/* 2Ghz, GFSK mode */
+#define	IFM_IEEE80211_11NA	0x00070000	/* 5Ghz, OFDM mode, HT */
+#define	IFM_IEEE80211_11NG	0x00080000	/* 2Ghz, OFDM mode, HT */
+#define IFM_IEEE80211_11NG_HT40PM  0x00090000	/* 2G HT40 */
+#define IFM_IEEE80211_11NA_HT40PM  0x000A0000	 /* 5G HT40 */
+#define IFM_IEEE80211_11AC_VHT20PM 0x000B0000	/* 5G VHT20 */
+#define IFM_IEEE80211_11AC_VHT40PM 0x000C0000	/* 5G VHT40 */
+#define IFM_IEEE80211_11AC_VHT80PM 0x000D0000	/* 5G VHT80 */
+#define IFM_IEEE80211_11AC_VHT160PM 0x000E0000	/* 5G VHT160 */
+
+/*
+ * Shared media sub-types
+ */
+#define	IFM_AUTO	0		/* Autoselect best media */
+#define	IFM_MANUAL	1		/* Jumper/dipswitch selects media */
+#define	IFM_NONE	2		/* Deselect all media */
+
+/*
+ * Shared options
+ */
+#define	IFM_FDX		0x00100000	/* Force full duplex */
+#define	IFM_HDX		0x00200000	/* Force half duplex */
+#define	IFM_FLAG0	0x01000000	/* Driver defined flag */
+#define	IFM_FLAG1	0x02000000	/* Driver defined flag */
+#define	IFM_FLAG2	0x04000000	/* Driver defined flag */
+#define	IFM_LOOP	0x08000000	/* Put hardware in loopback */
+
+/*
+ * Masks
+ */
+#define	IFM_NMASK	0x000000e0	/* Network type */
+#define	IFM_TMASK	0x0000001f	/* Media sub-type */
+#define	IFM_IMASK	0xf0000000	/* Instance */
+#define	IFM_ISHIFT	28		/* Instance shift */
+#define	IFM_OMASK	0x0000ff00	/* Type specific options */
+#define	IFM_MMASK	0x000f0000	/* Mode */
+#define	IFM_MSHIFT	16		/* Mode shift */
+#define	IFM_GMASK	0x0ff00000	/* Global options */
+
+/*
+ * Status bits
+ */
+#define	IFM_AVALID	0x00000001	/* Active bit valid */
+#define	IFM_ACTIVE	0x00000002	/* Interface attached to working net */
+
+/*
+ * Macros to extract various bits of information from the media word.
+ */
+#define	IFM_TYPE(x)		((x) & IFM_NMASK)
+#define	IFM_SUBTYPE(x)		((x) & IFM_TMASK)
+#define	IFM_TYPE_OPTIONS(x)	((x) & IFM_OMASK)
+#define	IFM_INST(x)		(((x) & IFM_IMASK) >> IFM_ISHIFT)
+#define	IFM_OPTIONS(x)		((x) & (IFM_OMASK|IFM_GMASK))
+#define	IFM_MODE(x)		((x) & IFM_MMASK)
+
+#define	IFM_INST_MAX		IFM_INST(IFM_IMASK)
+
+/*
+ * Macro to create a media word.
+ */
+#define	IFM_MAKEWORD(type, subtype, options, instance)			\
+	((type) | (subtype) | (options) | ((instance) << IFM_ISHIFT))
+#define	IFM_MAKEMODE(mode) \
+	(((mode) << IFM_MSHIFT) & IFM_MMASK)
+
+/*
+ * NetBSD extension not defined in the BSDI API.  This is used in various
+ * places to get the canonical description for a given type/subtype.
+ *
+ * NOTE: all but the top-level type descriptions must contain NO whitespace!
+ * Otherwise, parsing these in ifconfig(8) would be a nightmare.
+ */
+struct ifmedia_description {
+	int	ifmt_word;		/* word value; may be masked */
+	const char *ifmt_string;	/* description */
+};
+
+#define	IFM_TYPE_DESCRIPTIONS {						\
+	{ IFM_ETHER,		"Ethernet" },				\
+	{ IFM_TOKEN,		"Token ring" },				\
+	{ IFM_FDDI,		"FDDI" },				\
+	{ IFM_IEEE80211,	"IEEE 802.11 Wireless Ethernet" },	\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_ETHERNET_DESCRIPTIONS {				\
+	{ IFM_10_T,	"10baseT/UTP" },				\
+	{ IFM_10_2,	"10base2/BNC" },				\
+	{ IFM_10_5,	"10base5/AUI" },				\
+	{ IFM_100_TX,	"100baseTX" },					\
+	{ IFM_100_FX,	"100baseFX" },					\
+	{ IFM_100_T4,	"100baseT4" },					\
+	{ IFM_100_VG,	"100baseVG" },					\
+	{ IFM_100_T2,	"100baseT2" },					\
+	{ IFM_10_STP,	"10baseSTP" },					\
+	{ IFM_10_FL,	"10baseFL" },					\
+	{ IFM_1000_SX,	"1000baseSX" },					\
+	{ IFM_1000_LX,	"1000baseLX" },					\
+	{ IFM_1000_CX,	"1000baseCX" },					\
+	{ IFM_1000_T,	"1000baseTX" },					\
+	{ IFM_1000_T,	"1000baseT" },					\
+	{ IFM_HPNA_1,	"homePNA" },					\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_ETHERNET_ALIASES {					\
+	{ IFM_10_T,	"UTP" },					\
+	{ IFM_10_T,	"10UTP" },					\
+	{ IFM_10_2,	"BNC" },					\
+	{ IFM_10_2,	"10BNC" },					\
+	{ IFM_10_5,	"AUI" },					\
+	{ IFM_10_5,	"10AUI" },					\
+	{ IFM_100_TX,	"100TX" },					\
+	{ IFM_100_T4,	"100T4" },					\
+	{ IFM_100_VG,	"100VG" },					\
+	{ IFM_100_T2,	"100T2" },					\
+	{ IFM_10_STP,	"10STP" },					\
+	{ IFM_10_FL,	"10FL" },					\
+	{ IFM_1000_SX,	"1000SX" },					\
+	{ IFM_1000_LX,	"1000LX" },					\
+	{ IFM_1000_CX,	"1000CX" },					\
+	{ IFM_1000_T,	"1000TX" },					\
+	{ IFM_1000_T,	"1000T" },					\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_ETHERNET_OPTION_DESCRIPTIONS {			\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_TOKENRING_DESCRIPTIONS {				\
+	{ IFM_TOK_STP4,	"DB9/4Mbit" },					\
+	{ IFM_TOK_STP16, "DB9/16Mbit" },				\
+	{ IFM_TOK_UTP4,	"UTP/4Mbit" },					\
+	{ IFM_TOK_UTP16, "UTP/16Mbit" },				\
+	{ IFM_TOK_STP100, "STP/100Mbit" },				\
+	{ IFM_TOK_UTP100, "UTP/100Mbit" },				\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_TOKENRING_ALIASES {					\
+	{ IFM_TOK_STP4,	"4STP" },					\
+	{ IFM_TOK_STP16, "16STP" },					\
+	{ IFM_TOK_UTP4,	"4UTP" },					\
+	{ IFM_TOK_UTP16, "16UTP" },					\
+	{ IFM_TOK_STP100, "100STP" },					\
+	{ IFM_TOK_UTP100, "100UTP" },					\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_TOKENRING_OPTION_DESCRIPTIONS {			\
+	{ IFM_TOK_ETR,	"EarlyTokenRelease" },				\
+	{ IFM_TOK_SRCRT, "SourceRouting" },				\
+	{ IFM_TOK_ALLR,	"AllRoutes" },					\
+	{ IFM_TOK_DTR,	"Dedicated" },					\
+	{ IFM_TOK_CLASSIC,"Classic" },					\
+	{ IFM_TOK_AUTO,	" " },						\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_FDDI_DESCRIPTIONS {					\
+	{ IFM_FDDI_SMF, "Single-mode" },				\
+	{ IFM_FDDI_MMF, "Multi-mode" },					\
+	{ IFM_FDDI_UTP, "UTP" },					\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_FDDI_ALIASES {					\
+	{ IFM_FDDI_SMF,	"SMF" },					\
+	{ IFM_FDDI_MMF,	"MMF" },					\
+	{ IFM_FDDI_UTP,	"CDDI" },					\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_FDDI_OPTION_DESCRIPTIONS {				\
+	{ IFM_FDDI_DA, "Dual-attach" },					\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_IEEE80211_DESCRIPTIONS {				\
+	{ IFM_IEEE80211_FH1, "FH/1Mbps" },				\
+	{ IFM_IEEE80211_FH2, "FH/2Mbps" },				\
+	{ IFM_IEEE80211_DS1, "DS/1Mbps" },				\
+	{ IFM_IEEE80211_DS2, "DS/2Mbps" },				\
+	{ IFM_IEEE80211_DS5, "DS/5.5Mbps" },				\
+	{ IFM_IEEE80211_DS11, "DS/11Mbps" },				\
+	{ IFM_IEEE80211_DS22, "DS/22Mbps" },				\
+	{ IFM_IEEE80211_OFDM1_50, "OFDM/1.50Mbps" },			\
+	{ IFM_IEEE80211_OFDM2_25, "OFDM/2.25Mbps" },			\
+	{ IFM_IEEE80211_OFDM3, "OFDM/3Mbps" },				\
+	{ IFM_IEEE80211_OFDM4_50, "OFDM/4.5Mbps" },			\
+	{ IFM_IEEE80211_OFDM6, "OFDM/6Mbps" },				\
+	{ IFM_IEEE80211_OFDM9, "OFDM/9Mbps" },				\
+	{ IFM_IEEE80211_OFDM12, "OFDM/12Mbps" },			\
+	{ IFM_IEEE80211_OFDM13_5, "OFDM/13.5Mbps" },			\
+	{ IFM_IEEE80211_OFDM18, "OFDM/18Mbps" },			\
+	{ IFM_IEEE80211_OFDM24, "OFDM/24Mbps" },			\
+	{ IFM_IEEE80211_OFDM27, "OFDM/27Mbps" },			\
+	{ IFM_IEEE80211_OFDM36, "OFDM/36Mbps" },			\
+	{ IFM_IEEE80211_OFDM48, "OFDM/48Mbps" },			\
+	{ IFM_IEEE80211_OFDM54, "OFDM/54Mbps" },			\
+	{ IFM_IEEE80211_OFDM72, "OFDM/72Mbps" },			\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_IEEE80211_ALIASES {					\
+	{ IFM_IEEE80211_FH1, "FH1" },					\
+	{ IFM_IEEE80211_FH2, "FH2" },					\
+	{ IFM_IEEE80211_FH1, "FrequencyHopping/1Mbps" },		\
+	{ IFM_IEEE80211_FH2, "FrequencyHopping/2Mbps" },		\
+	{ IFM_IEEE80211_DS1, "DS1" },					\
+	{ IFM_IEEE80211_DS2, "DS2" },					\
+	{ IFM_IEEE80211_DS5, "DS5.5" },					\
+	{ IFM_IEEE80211_DS11, "DS11" },					\
+	{ IFM_IEEE80211_DS22, "DS22" },					\
+	{ IFM_IEEE80211_DS1, "DirectSequence/1Mbps" },			\
+	{ IFM_IEEE80211_DS2, "DirectSequence/2Mbps" },			\
+	{ IFM_IEEE80211_DS5, "DirectSequence/5.5Mbps" },		\
+	{ IFM_IEEE80211_DS11, "DirectSequence/11Mbps" },		\
+	{ IFM_IEEE80211_DS22, "DirectSequence/22Mbps" },		\
+	{ IFM_IEEE80211_OFDM1_50, "OFDM1.50Mpbs" },			\
+	{ IFM_IEEE80211_OFDM2_25, "OFDM2.25Mbps" },			\
+	{ IFM_IEEE80211_OFDM3, "OFDM3Mbps" },				\
+	{ IFM_IEEE80211_OFDM4_50, "OFDM4.5Mbps" },			\
+	{ IFM_IEEE80211_OFDM6, "OFDM6" },				\
+	{ IFM_IEEE80211_OFDM9, "OFDM9" },				\
+	{ IFM_IEEE80211_OFDM12, "OFDM12" },				\
+	{ IFM_IEEE80211_OFDM13_5, "OFDM13.5Mbps" },			\
+	{ IFM_IEEE80211_OFDM18, "OFDM18" },				\
+	{ IFM_IEEE80211_OFDM24, "OFDM24" },				\
+	{ IFM_IEEE80211_OFDM27, "OFDM27" },				\
+	{ IFM_IEEE80211_OFDM36, "OFDM36" },				\
+	{ IFM_IEEE80211_OFDM48, "OFDM48" },				\
+	{ IFM_IEEE80211_OFDM54, "OFDM54" },				\
+	{ IFM_IEEE80211_OFDM72, "OFDM72" },				\
+	{ IFM_IEEE80211_DS1, "CCK1" },					\
+	{ IFM_IEEE80211_DS2, "CCK2" },					\
+	{ IFM_IEEE80211_DS5, "CCK5.5" },				\
+	{ IFM_IEEE80211_DS11, "CCK11" },				\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_IEEE80211_OPTION_DESCRIPTIONS {			\
+	{ IFM_IEEE80211_ADHOC, "adhoc" },				\
+	{ IFM_IEEE80211_HOSTAP, "hostap" },				\
+	{ IFM_IEEE80211_IBSS, "ibss" },					\
+	{ IFM_IEEE80211_WDS, "wds" },					\
+	{ IFM_IEEE80211_TURBO, "turbo" },				\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_IEEE80211_MODE_DESCRIPTIONS {			\
+	{ IFM_IEEE80211_11A, "11a" },					\
+	{ IFM_IEEE80211_11B, "11b" },					\
+	{ IFM_IEEE80211_11G, "11g" },					\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_SHARED_DESCRIPTIONS {				\
+	{ IFM_AUTO,	"autoselect" },					\
+	{ IFM_MANUAL,	"manual" },					\
+	{ IFM_NONE,	"none" },					\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SUBTYPE_SHARED_ALIASES {					\
+	{ IFM_AUTO,	"auto" },					\
+	{ 0, NULL },							\
+}
+
+#define	IFM_SHARED_OPTION_DESCRIPTIONS {				\
+	{ IFM_FDX,	"full-duplex" },				\
+	{ IFM_HDX,	"half-duplex" },				\
+	{ IFM_FLAG0,	"flag0" },					\
+	{ IFM_FLAG1,	"flag1" },					\
+	{ IFM_FLAG2,	"flag2" },					\
+	{ IFM_LOOP,	"hw-loopback" },				\
+	{ 0, NULL },							\
+}
+
+#endif	/* _NET_IF_MEDIA_H_ */
diff --git a/drivers/qtn/include/kernel/qtn/br_types.h b/drivers/qtn/include/kernel/qtn/br_types.h
new file mode 100644
index 0000000..f792083
--- /dev/null
+++ b/drivers/qtn/include/kernel/qtn/br_types.h
@@ -0,0 +1,18 @@
+/*
+ * Linux ethernet bridge; shared types with Quantenna FWT
+ *
+ * (C) Copyright 2013 Quantenna Communications Inc.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _BR_TYPES_H
+#define _BR_TYPES_H
+
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+
+#endif
diff --git a/drivers/qtn/include/kernel/qtn/busmon.h b/drivers/qtn/include/kernel/qtn/busmon.h
new file mode 100644
index 0000000..01f81f8
--- /dev/null
+++ b/drivers/qtn/include/kernel/qtn/busmon.h
@@ -0,0 +1,75 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2013 Quantenna Communications, Inc                  **
+**                            All Rights Reserved                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef __TOPAZ_BUSMON_H
+#define __TOPAZ_BUSMON_H
+
+#define TOPAZ_BUSMON_MAX_RANGES	4
+
+struct topaz_busmon_range {
+	uintptr_t start;
+	uintptr_t end;
+};
+
+void topaz_busmon_range_check(uint8_t bus,
+                              const struct topaz_busmon_range *range,
+                              size_t nranges, bool outside);
+
+static inline void topaz_busmon_range_check_disable(uint8_t bus)
+{
+	topaz_busmon_range_check(bus, NULL, 0, 0);
+}
+
+void topaz_busmon_timeout(uint8_t bus, uint16_t timeout, bool enable);
+
+static inline void topaz_busmon_timeout_en(uint8_t bus, uint16_t timeout)
+{
+	topaz_busmon_timeout(bus, timeout, 1);
+}
+
+static inline void topaz_busmon_timeout_dis(uint8_t bus)
+{
+	topaz_busmon_timeout(bus, 0, 0);
+}
+
+#endif /* __TOPAZ_BUSMON_H */
+
diff --git a/drivers/qtn/include/kernel/qtn/qdrv_sch_data.h b/drivers/qtn/include/kernel/qtn/qdrv_sch_data.h
new file mode 100644
index 0000000..549b38a
--- /dev/null
+++ b/drivers/qtn/include/kernel/qtn/qdrv_sch_data.h
@@ -0,0 +1,105 @@
+/*SH1
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2012 Quantenna Communications, Inc.                 **
+**                            All Rights Reserved                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH1*/
+
+#ifndef _QDRV_SCH_DATA_H
+#define _QDRV_SCH_DATA_H
+
+#include <qtn/qtn_global.h>
+#include <qdrv_sch_const.h>
+#include <common/queue.h>
+
+#define QDRV_SCH_SHARED_AC_DATA_DEQUEUE_LIMIT	4
+
+struct qdrv_sch_shared_data;
+
+struct qdrv_sch_node_band_data {
+	TAILQ_ENTRY(qdrv_sch_node_band_data) nbd_next;
+	struct sk_buff_head queue;
+	uint32_t sent;
+	uint32_t dropped;
+	uint32_t dropped_victim;
+};
+
+struct qdrv_sch_node_data {
+	struct qdrv_sch_node_band_data bands[QDRV_SCH_BANDS];
+	struct qdrv_sch_shared_data *shared_data;
+	struct Qdisc *qdisc;
+	uint16_t used_tokens;
+	uint16_t muc_queued;
+	uint8_t over_thresh;
+	uint32_t over_thresh_cnt;
+	uint32_t low_rate;
+	struct device_attribute sysfs_attr;
+};
+
+static inline struct qdrv_sch_node_data *
+qdrv_sch_get_node_data(struct qdrv_sch_node_band_data *nbd, uint8_t band)
+{
+	return container_of(nbd, struct qdrv_sch_node_data, bands[band]);
+}
+
+struct qdrv_sch_shared_band_data {
+	int consec_dequeues;
+	TAILQ_HEAD(, qdrv_sch_node_band_data) active_nodes;
+};
+
+struct qdrv_sch_shared_data {
+	struct list_head entry;
+	spinlock_t lock;
+	char dev_name[IFNAMSIZ];
+	uint8_t queuing_alg;
+	int16_t total_tokens;
+	int16_t users;
+	struct sk_buff *held_skb;
+	struct Qdisc *held_skb_sch;
+	int16_t available_tokens;
+	uint16_t reserved_tokens_per_user;
+	uint16_t random_drop_threshold;
+	void (*drop_callback)(struct sk_buff *);
+	struct qdrv_sch_shared_band_data bands[QDRV_SCH_BANDS];
+};
+
+#define qdrv_sch_shared_data_lock(qsh, flags)	spin_lock_irqsave(&qsh->lock, flags)
+#define qdrv_sch_shared_data_unlock(qsh, flags)	spin_unlock_irqrestore(&qsh->lock, flags)
+
+#endif
+
diff --git a/drivers/qtn/include/shared/compat.h b/drivers/qtn/include/shared/compat.h
new file mode 100644
index 0000000..500de12
--- /dev/null
+++ b/drivers/qtn/include/shared/compat.h
@@ -0,0 +1,135 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ *    redistribution must be conditioned upon including a substantially
+ *    similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: compat.h 2601 2007-07-24 14:14:47Z kelmo $
+ */
+#ifndef _ATH_COMPAT_H_
+#define _ATH_COMPAT_H_
+
+/* Compatibility with older Linux kernels */
+#if defined(__KERNEL__) || (defined(__linux__) && __linux__)
+#include <linux/types.h>
+#endif
+#if !defined(__KERNEL__) || !defined (__bitwise)
+#define __le16 u_int16_t
+#define __le32 u_int32_t
+#define __le64 u_int64_t
+#define __be16 u_int16_t
+#define __be32 u_int32_t
+#define __be64 u_int64_t
+#define __force
+#endif
+
+#ifndef container_of
+#define container_of(ptr, type, member) ({			\
+        const typeof( ((type *)0)->member ) *__mptr = (ptr);	\
+        (type *)( (char *)__mptr - offsetof(type,member) );})
+#endif
+
+/*
+ * BSD/Linux compatibility shims.  These are used mainly to
+ * minimize differences when importing necesary BSD code.
+ */
+#ifndef NBBY
+#define	NBBY	8			/* number of bits/byte */
+#endif
+
+/* roundup() appears in Linux 2.6.18 */
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#endif
+
+#ifndef roundup
+#define	roundup(x, y)	((((x)+((y)-1))/(y))*(y))  /* to any y */
+#endif
+
+#ifndef howmany
+#define	howmany(x, y)	(((x)+((y)-1))/(y))
+#endif
+
+/* Bit map related macros. */
+#define	setbit(a,i)	((a)[(i)/NBBY] |= 1<<((i)%NBBY))
+#define	clrbit(a,i)	((a)[(i)/NBBY] &= ~(1<<((i)%NBBY)))
+#define	isset(a,i)	((a)[(i)/NBBY] & (1<<((i)%NBBY)))
+#define	isclr(a,i)	(((a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
+
+#ifndef __packed
+#define	__packed	__attribute__((__packed__))
+#endif
+
+#define	__printflike(_a,_b) \
+	__attribute__ ((__format__ (__printf__, _a, _b)))
+#define	__offsetof(t,m)	offsetof(t,m)
+
+#ifndef ALIGNED_POINTER
+/*
+ * ALIGNED_POINTER is a boolean macro that checks whether an address
+ * is valid to fetch data elements of type t from on this architecture.
+ * This does not reflect the optimal alignment, just the possibility
+ * (within reasonable limits). 
+ *
+ */
+#define ALIGNED_POINTER(p,t)	1
+#endif
+
+#ifdef __KERNEL__
+#define	KASSERT(exp, msg) do {			\
+	if (unlikely(!(exp))) {			\
+		printk msg;			\
+		BUG();				\
+	}					\
+} while (0)
+#endif /* __KERNEL__ */
+
+/*
+ * NetBSD/FreeBSD defines for file version.
+ */
+#define	__FBSDID(_s)
+#define	__KERNEL_RCSID(_n,_s)
+
+/*
+ * Fixes for Linux API changes
+ */
+#ifdef __KERNEL__
+
+#include <linux/version.h>
+#define ATH_REGISTER_SYSCTL_TABLE(t) register_sysctl_table(t)
+
+#endif /* __KERNEL__ */
+
+/* FIXME: this needs changing if we need to support TCM/SRAM for time critical code */
+#define __tcm_text
+
+#endif /* _ATH_COMPAT_H_ */
diff --git a/drivers/qtn/include/shared/net80211/_ieee80211.h b/drivers/qtn/include/shared/net80211/_ieee80211.h
new file mode 100644
index 0000000..df619c8
--- /dev/null
+++ b/drivers/qtn/include/shared/net80211/_ieee80211.h
@@ -0,0 +1,1522 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: _ieee80211.h 2749 2007-10-16 08:58:14Z kelmo $
+ */
+#ifndef _NET80211__IEEE80211_H_
+#define _NET80211__IEEE80211_H_
+
+#include <compat.h>
+#ifdef __KERNEL__
+#include <linux/in6.h>
+#endif
+
+enum ieee80211_phytype {
+	IEEE80211_T_DS,			/* direct sequence spread spectrum */
+	IEEE80211_T_FH,			/* frequency hopping */
+	IEEE80211_T_OFDM,		/* frequency division multiplexing */
+	IEEE80211_T_TURBO,		/* high rate OFDM, aka turbo mode */
+	IEEE80211_T_HT,			/* HT - full GI */
+	IEEE80211_T_MAX
+};
+#define	IEEE80211_T_CCK	IEEE80211_T_DS	/* more common nomenclature */
+
+/*
+ * XXX not really a mode; there are really multiple PHY's
+ * Please update ieee80211_chanflags when the definition of
+ * ieee80211_phymode changed
+ */
+enum ieee80211_phymode {
+	IEEE80211_MODE_AUTO	= 0,	/* autoselect */
+	IEEE80211_MODE_11A	= 1,	/* 5GHz, OFDM */
+	IEEE80211_MODE_11B	= 2,	/* 2GHz, CCK */
+	IEEE80211_MODE_11G	= 3,	/* 2GHz, OFDM */
+	IEEE80211_MODE_FH	= 4,	/* 2GHz, GFSK */
+	IEEE80211_MODE_TURBO_A	= 5,	/* 5GHz, OFDM, 2x clock dynamic turbo */
+	IEEE80211_MODE_TURBO_G	= 6,	/* 2GHz, OFDM, 2x clock  dynamic turbo*/
+	IEEE80211_MODE_11NA		= 7,	/* 5GHz, HT20 */
+	IEEE80211_MODE_11NG		= 8,	/* 2GHz, HT20 */
+	IEEE80211_MODE_11NG_HT40PM	= 9,	/* 2GHz HT40 */
+	IEEE80211_MODE_11NA_HT40PM	= 10,	/* 5GHz HT40 */
+	IEEE80211_MODE_11AC_VHT20PM	= 11,	/* 5GHz VHT20 */
+	IEEE80211_MODE_11AC_VHT40PM	= 12,	/* 5GHz VHT40 */
+	IEEE80211_MODE_11AC_VHT80PM	= 13,	/* 5GHz VHT80 */
+	IEEE80211_MODE_11AC_VHT160PM	= 14,	/* 5GHz VHT160 */
+	IEEE80211_MODE_MAX		= 15,	/* Always keep this last */
+};
+
+/*
+#define IEEE80211_BM_11A	(1 << IEEE80211_MODE_11A)
+#define IEEE80211_BM_11B	(1 << IEEE80211_MODE_11B)
+#define IEEE80211_BM_11G	(1 << IEEE80211_MODE_11B)
+#define IEEE80211_BM_11NA	(1 << IEEE80211_MODE_11B)
+#define IEEE80211_BM_11NG	(1 << IEEE80211_MODE_11B)
+#define IEEE80211_BM_11NG_PLUS	(1 << IEEE80211_MODE_11B)
+#define IEEE80211_BM_11NG_MINUS	(1 << IEEE80211_MODE_11B)
+#define IEEE80211_BM_11NA_PLUS	(1 << IEEE80211_MODE_11B)
+#define IEEE80211_BM_11NA_MINUS	(1 << IEEE80211_MODE_11B)
+*/
+
+#define IEEE80211_IS_11A(_c) \
+	((_c)->ic_phymode == IEEE80211_MODE_11A)
+
+#define IEEE80211_IS_11NA_20(_c) \
+	((_c)->ic_phymode == IEEE80211_MODE_11NA)
+
+#define IEEE80211_IS_11NA_40(_c) \
+	((_c)->ic_phymode == IEEE80211_MODE_11NA_HT40PM)
+
+#define IS_IEEE80211_11NA(_c) \
+	(IEEE80211_IS_11NA_20(_c) || IEEE80211_IS_11NA_40(_c))
+
+#define IEEE80211_IS_VHT_20(_c) \
+	((_c)->ic_phymode == IEEE80211_MODE_11AC_VHT20PM)
+
+#define IEEE80211_IS_VHT_40(_c) \
+	((_c)->ic_phymode == IEEE80211_MODE_11AC_VHT40PM)
+
+#define IEEE80211_IS_VHT_80(_c) \
+	((_c)->ic_phymode == IEEE80211_MODE_11AC_VHT80PM)
+
+#define IEEE80211_IS_VHT_160(_c) \
+	((_c)->ic_phymode == IEEE80211_MODE_11AC_VHT160PM)
+
+#define IS_IEEE80211_VHT_ENABLED(_c) \
+	(IEEE80211_IS_VHT_160(_c) || IEEE80211_IS_VHT_80(_c)\
+	|| IEEE80211_IS_VHT_40(_c) || IEEE80211_IS_VHT_20(_c))
+
+#define IS_IEEE80211_5G_BAND(_c) \
+	(IEEE80211_IS_11A(_c) || IS_IEEE80211_11NA(_c) || IS_IEEE80211_VHT_ENABLED(_c))
+
+#define IEEE80211_IS_11B(_c) \
+	((_c)->ic_phymode == IEEE80211_MODE_11B)
+
+#define IEEE80211_IS_11G(_c) \
+	((_c)->ic_phymode == IEEE80211_MODE_11G)
+
+#define IEEE80211_IS_11NG_20(_c) \
+	((_c)->ic_phymode == IEEE80211_MODE_11NG)
+
+#define IEEE80211_IS_11NG_40(_c) \
+	((_c)->ic_phymode == IEEE80211_MODE_11NG_HT40PM)
+
+#define IS_IEEE80211_11NG(_c) \
+	(IEEE80211_IS_11NG_20(_c) || IEEE80211_IS_11NG_40(_c))
+
+#define IS_IEEE80211_24G_BAND(_c) \
+	(IEEE80211_IS_11B(_c) || IEEE80211_IS_11G(_c) || IS_IEEE80211_11NG(_c))
+
+#define IEEE80211_MODE_IS_11B(m) \
+	((m) == IEEE80211_MODE_11B)
+
+#define IEEE80211_MODE_IS_11G(m) \
+	((m) == IEEE80211_MODE_11G)
+
+#define IEEE80211_MODE_IS_11NG_20(m) \
+	((m) == IEEE80211_MODE_11NG)
+
+#define IEEE80211_MODE_IS_11NG_40(m) \
+	((m) == IEEE80211_MODE_11NG_HT40PM)
+
+#define IS_IEEE80211_MODE_24G_BAND(m) \
+	(IEEE80211_MODE_IS_11B(m) || IEEE80211_MODE_IS_11G(m) || IEEE80211_MODE_IS_11NG_20(m) || IEEE80211_MODE_IS_11NG_40(m))
+
+#define IS_IEEE80211_11NG_VHT_ENABLED(_c) \
+	(IS_IEEE80211_11NG(_c) && ((_c)->ic_flags_ext & IEEE80211_FEXT_24GVHT))
+
+#define IS_IEEE80211_DUALBAND_VHT_ENABLED(_c) \
+	(IS_IEEE80211_VHT_ENABLED(_c) || IS_IEEE80211_11NG_VHT_ENABLED(_c))
+
+enum ieee80211_opmode {
+	IEEE80211_M_STA		= 1,	/* infrastructure station */
+	IEEE80211_M_IBSS	= 0,	/* IBSS (adhoc) station */
+	IEEE80211_M_AHDEMO	= 3,	/* Old lucent compatible adhoc demo */
+	IEEE80211_M_HOSTAP	= 6,	/* Software Access Point */
+	IEEE80211_M_MONITOR	= 8,	/* Monitor mode */
+	IEEE80211_M_WDS		= 2	/* WDS link */
+};
+
+enum ieee80211_phy_band {
+	IEEE80211_2_4Ghz	= 0,
+	IEEE80211_5Ghz		= 1
+};
+
+/*
+ * 802.11n
+ */
+
+enum ieee80211_11n_htmode {
+	IEEE80211_11N_HTAUTO	=  0,
+	IEEE80211_11N_HT20	=  1,
+	IEEE80211_11N_HT40PLUS	=  2,
+	IEEE80211_11N_HT40MINUS	=  3
+};
+
+enum ieee80211_cwm_mode {
+	IEEE80211_CWM_MODE20,
+	IEEE80211_CWM_MODE2040,
+	IEEE80211_CWM_MODE40,
+	IEEE80211_CWM_MODEMAX
+
+};
+
+enum ieee80211_cwm_extprotspacing {
+	IEEE80211_CWM_EXTPROTSPACING20,
+	IEEE80211_CWM_EXTPROTSPACING25,
+	IEEE80211_CWM_EXTPROTSPACINGMAX
+};
+
+enum ieee80211_cwm_width {
+	IEEE80211_CWM_WIDTH20,
+	IEEE80211_CWM_WIDTH40,
+	IEEE80211_CWM_WIDTH80,
+	IEEE80211_CWM_WIDTH160,	/* or 80+80 Mhz */
+};
+
+enum ieee80211_cwm_extprotmode {
+	IEEE80211_CWM_EXTPROTNONE,	/* no protection */
+	IEEE80211_CWM_EXTPROTCTSONLY,	/* CTS to self */
+	IEEE80211_CWM_EXTPROTRTSCTS,	/* RTS-CTS */
+	IEEE80211_CWM_EXTPROTMAX
+};
+
+/* CWM (Channel Width Management) Information */
+struct ieee80211_cwm {
+
+	/* Configuration */
+	enum ieee80211_cwm_mode		cw_mode;		/* CWM mode */
+	int8_t				cw_extoffset;		/* CWM Extension Channel Offset */
+	enum ieee80211_cwm_extprotmode	cw_extprotmode;		/* CWM Extension Channel Protection Mode */
+	enum ieee80211_cwm_extprotspacing cw_extprotspacing;	/* CWM Extension Channel Protection Spacing */
+
+	/* State */
+	 enum ieee80211_cwm_width	cw_width;		/* CWM channel width */
+};
+
+
+enum ieee80211_fixed_rate_status {
+	DISABLED = 0,
+	ENABLED = 1,	/* ieee rate */
+};
+
+/* Holds the fixed rate information for each VAP */
+#define IEEE80211_MAX_FIXED_RATES		4
+struct ieee80211_fixed_rate {
+	enum ieee80211_fixed_rate_status	status;
+	u_int8_t				rate[IEEE80211_MAX_FIXED_RATES];
+	u_int32_t				retries;
+	u_int32_t				flag;
+#define IEEE80211_FIXED_RATE_F_11AC		0x1
+};
+
+/*
+ * 802.11g protection mode.
+ */
+enum ieee80211_protmode {
+	IEEE80211_PROT_NONE	= 0,	/* no protection */
+	IEEE80211_PROT_CTSONLY	= 1,	/* CTS to self */
+	IEEE80211_PROT_RTSCTS	= 2,	/* RTS-CTS */
+};
+
+/*
+ * Authentication mode.
+ */
+enum ieee80211_authmode {
+	IEEE80211_AUTH_NONE	= 0,
+	IEEE80211_AUTH_OPEN	= 1,	/* open */
+	IEEE80211_AUTH_SHARED	= 2,	/* shared-key */
+	IEEE80211_AUTH_8021X	= 3,	/* 802.1x */
+	IEEE80211_AUTH_AUTO	= 4,	/* auto-select/accept */
+	/* NB: these are used only for ioctls */
+	IEEE80211_AUTH_WPA	= 5,	/* WPA/RSN w/ 802.1x/PSK */
+};
+
+/*
+ * Roaming mode is effectively who controls the operation
+ * of the 802.11 state machine when operating as a station.
+ * State transitions are controlled either by the driver
+ * (typically when management frames are processed by the
+ * hardware/firmware), the host (auto/normal operation of
+ * the 802.11 layer), or explicitly through ioctl requests
+ * when applications like wpa_supplicant want control.
+ */
+enum ieee80211_roamingmode {
+	IEEE80211_ROAMING_DEVICE= 0,	/* driver/hardware control */
+	IEEE80211_ROAMING_AUTO	= 1,	/* 802.11 layer control */
+	IEEE80211_ROAMING_MANUAL= 2,	/* application control */
+};
+
+/*
+ * Scanning mode controls station scanning work; this is
+ * used only when roaming mode permits the host to select
+ * the bss to join/channel to use.
+ */
+enum ieee80211_scanmode {
+	IEEE80211_SCAN_DEVICE	= 0,	/* driver/hardware control */
+	IEEE80211_SCAN_BEST	= 1,	/* 802.11 layer selects best */
+	IEEE80211_SCAN_FIRST	= 2,	/* take first suitable candidate */
+};
+
+/* ba state describes the block ack state. block ack should only be sent if
+ * ba state is set to established
+ */
+enum ieee80211_ba_state	{
+	IEEE80211_BA_NOT_ESTABLISHED = 0,
+	IEEE80211_BA_ESTABLISHED = 1,
+	IEEE80211_BA_REQUESTED = 2,
+	IEEE80211_BA_FAILED = 5,
+	IEEE80211_BA_BLOCKED = 6,
+};
+
+#define IEEE80211_BA_IS_COMPLETE(_state) (\
+	(_state) == IEEE80211_BA_ESTABLISHED || \
+	(_state) == IEEE80211_BA_BLOCKED || \
+	(_state) == IEEE80211_BA_FAILED) \
+
+/* ba type describes the block acknowledgement type */
+enum ieee80211_ba_type {
+	IEEE80211_BA_DELAYED = 0,
+	IEEE80211_BA_IMMEDIATE = 1,
+};
+
+#define IEEE80211_OPER_CLASS_MAX	256
+#define	IEEE80211_OPER_CLASS_BYTES	32
+#define	IEEE80211_OPER_CLASS_BYTES_24G	8
+
+#define IEEE80211_OC_BEHAV_DFS_50_100		0x0001
+#define IEEE80211_OC_BEHAV_NOMADIC		0x0002
+#define IEEE80211_OC_BEHAV_LICEN_EXEP		0x0004
+#define IEEE80211_OC_BEHAV_CCA_ED		0x0008
+#define IEEE80211_OC_BEHAV_ITS_NONMOB		0x0010
+#define IEEE80211_OC_BEHAV_ITS_MOBILE		0x0020
+#define IEEE80211_OC_BEHAV_CHAN_LOWWER		0x0040
+#define IEEE80211_OC_BEHAV_CHAN_UPPER		0x0080
+#define IEEE80211_OC_BEHAV_80PLUS		0x0100
+#define IEEE80211_OC_BEHAV_EIRP_TXPOWENV	0x0200
+
+#define IEEE80211_OBSS_CHAN_PRI20		0x01
+#define IEEE80211_OBSS_CHAN_SEC20		0x02
+#define IEEE80211_OBSS_CHAN_PRI40		0x04
+#define IEEE80211_OBSS_CHAN_SEC40		0x08
+
+#define IEEE80211_OBSS_CHAN_MASK		0x0F
+
+#define IEEE80211_IS_OBSS_CHAN_SECONDARY(_c) \
+	(((_c) & IEEE80211_OBSS_CHAN_SEC20) == IEEE80211_OBSS_CHAN_SEC20)
+
+enum ieee80211_neighborhood_type {
+	IEEE80211_NEIGHBORHOOD_TYPE_SPARSE = 0,
+	IEEE80211_NEIGHBORHOOD_TYPE_DENSE = 1,
+	IEEE80211_NEIGHBORHOOD_TYPE_VERY_DENSE = 2,
+	IEEE80211_NEIGHBORHOOD_TYPE_MAX	= IEEE80211_NEIGHBORHOOD_TYPE_VERY_DENSE,
+	IEEE80211_NEIGHBORHOOD_TYPE_UNKNOWN = IEEE80211_NEIGHBORHOOD_TYPE_MAX + 1
+};
+
+#define IEEE80211_NEIGHBORHOOD_TYPE_SPARSE_DFT_THRSHLD	3
+#define IEEE80211_NEIGHBORHOOD_TYPE_DENSE_DFT_THRSHLD	15
+
+/* power index definition */
+enum ieee80211_power_index_beamforming {
+	PWR_IDX_BF_OFF = 0,
+	PWR_IDX_BF_ON = 1,
+	PWR_IDX_BF_MAX = 2
+};
+
+enum ieee80211_power_index_spatial_stream {
+	PWR_IDX_1SS = 0,
+	PWR_IDX_2SS = 1,
+	PWR_IDX_3SS = 2,
+	PWR_IDX_4SS = 3,
+	PWR_IDX_SS_MAX = 4
+};
+
+enum ieee80211_power_index_bandwidth {
+	PWR_IDX_20M = 0,
+	PWR_IDX_40M = 1,
+	PWR_IDX_80M = 2,
+	PWR_IDX_BW_MAX = 3
+};
+
+/* MFP capabilities (ieee80211w) */
+enum ieee80211_mfp_capabilities {
+	IEEE80211_MFP_NO_PROTECT = 0,
+	IEEE80211_MFP_PROTECT_CAPABLE = 2,
+	IEEE80211_MFP_PROTECT_REQUIRE = 3
+};
+
+/*
+ * Channels are specified by frequency and attributes.
+ */
+struct ieee80211_channel {
+	u_int16_t ic_freq;	/* setting in Mhz */
+	u_int32_t ic_flags;	/* see below */
+	u_int8_t ic_ieee;	/* IEEE channel number */
+	int8_t ic_maxregpower;	/* maximum regulatory tx power in dBm */
+	int8_t ic_maxpower;	/* maximum tx power in dBm for the current bandwidth with beam-forming off */
+	int8_t ic_minpower;	/* minimum tx power in dBm */
+	int8_t ic_maxpower_normal;	/* backup max tx power for short-range workaround */
+	int8_t ic_minpower_normal;	/* backup min tx power for short-range workaround */
+	int8_t ic_maxpower_table[PWR_IDX_BF_MAX][PWR_IDX_SS_MAX][PWR_IDX_BW_MAX];	/* the maximum powers for different cases */
+	u_int32_t ic_radardetected; /* number that radar signal has been detected on this channel */
+	u_int8_t ic_center_f_40MHz;
+	u_int8_t ic_center_f_80MHz;
+	u_int8_t ic_center_f_160MHz;
+	u_int32_t ic_ext_flags;
+};
+
+#define IEEE80211_2GBAND_START_FREQ	2407
+#define IEEE80211_4GBAND_START_FREQ	4000
+#define IEEE80211_5GBAND_START_FREQ	5000
+
+#define	IEEE80211_CHAN_MAX	255
+#define	IEEE80211_CHAN_BYTES	32	/* howmany(IEEE80211_CHAN_MAX, NBBY) */
+#define	IEEE80211_CHAN_ANY	0xffff	/* token for ``any channel'' */
+#define	IEEE80211_CHAN_ANYC 	((struct ieee80211_channel *) IEEE80211_CHAN_ANY)
+
+#define IEEE80211_SUBCHANNELS_OF_20MHZ	1
+#define IEEE80211_SUBCHANNELS_OF_40MHZ	2
+#define IEEE80211_SUBCHANNELS_OF_80MHZ	4
+
+#define IEEE80211_MIN_DUAL_EXT_CHAN_24G		5
+#define IEEE80211_MAX_DUAL_EXT_CHAN_24G		9
+#define IEEE80211_MAX_DUAL_EXT_CHAN_24G_US	7
+
+#define	IEEE80211_RADAR_11HCOUNT		1
+#define IEEE80211_DEFAULT_CHANCHANGE_TBTT_COUNT	10
+#define	IEEE80211_RADAR_TEST_MUTE_CHAN	36	/* Move to channel 36 for mute test */
+
+/* bits 0-3 are for private use by drivers */
+/* channel attributes */
+#define IEEE80211_CHAN_TURBO	0x00000010	/* Turbo channel */
+#define IEEE80211_CHAN_CCK	0x00000020	/* CCK channel */
+#define IEEE80211_CHAN_OFDM	0x00000040	/* OFDM channel */
+#define IEEE80211_CHAN_2GHZ	0x00000080	/* 2 GHz spectrum channel. */
+#define IEEE80211_CHAN_5GHZ	0x00000100	/* 5 GHz spectrum channel */
+#define IEEE80211_CHAN_PASSIVE	0x00000200	/* Only passive scan allowed */
+#define IEEE80211_CHAN_DYN	0x00000400	/* Dynamic CCK-OFDM channel */
+#define IEEE80211_CHAN_GFSK	0x00000800	/* GFSK channel (FHSS PHY) */
+#define IEEE80211_CHAN_RADAR	0x00001000	/* Status: Radar found on channel */
+#define IEEE80211_CHAN_STURBO	0x00002000	/* 11a static turbo channel only */
+#define IEEE80211_CHAN_HALF	0x00004000	/* Half rate channel */
+#define IEEE80211_CHAN_QUARTER	0x00008000	/* Quarter rate channel */
+#define IEEE80211_CHAN_HT20	0x00010000	/* HT 20 channel */
+#define IEEE80211_CHAN_HT40U	0x00020000	/* HT 40 with ext channel above */
+#define IEEE80211_CHAN_HT40D	0x00040000	/* HT 40 with ext channel below */
+#define IEEE80211_CHAN_HT40	0x00080000	/* HT 40 with ext channel above/below */
+#define IEEE80211_CHAN_DFS	0x00100000	/* Configuration: DFS-required channel */
+#define IEEE80211_CHAN_DFS_CAC_DONE	0x00200000     /* Status: CAC completed */
+#define IEEE80211_CHAN_VHT80		0x00400000	/* VHT 80 */
+#define IEEE80211_CHAN_DFS_OCAC_DONE	0x00800000	/* Status: Off-channel CAC completed */
+#define IEEE80211_CHAN_DFS_CAC_IN_PROGRESS	0x01000000	/* Status: Valid CAC is in progress */
+#define IEEE80211_CHAN_WEATHER		0x02000000	/* Configuration: weather channel for 20MHz */
+#define IEEE80211_CHAN_WEATHER_40M	0x04000000	/* Configuration: weather channel for 40MHz */
+#define IEEE80211_CHAN_WEATHER_80M	0x08000000	/* Configuration: weather channel for 80MHz */
+
+#define IEEE80211_DEFAULT_2_4_GHZ_CHANNEL	6
+#define IEEE80211_DEFAULT_5_GHZ_CHANNEL		36
+
+#define IEEE80211_MAX_2_4_GHZ_CHANNELS	13
+#define IEEE80211_MAX_5_GHZ_CHANNELS	30
+#define IEEE80211_MAX_DUAL_CHANNELS     (IEEE80211_MAX_2_4_GHZ_CHANNELS + IEEE80211_MAX_5_GHZ_CHANNELS)
+#define CHIPID_2_4_GHZ					0
+#define CHIPID_5_GHZ					1
+#define CHIPID_DUAL                                     2
+
+/*11AC - 40MHZ flags */
+#define IEEE80211_CHAN_VHT40U	IEEE80211_CHAN_HT40U	/* VHT 40 with ext channel above */
+#define IEEE80211_CHAN_VHT40D	IEEE80211_CHAN_HT40D	/* VHT 40 with ext channel below */
+#define IEEE80211_CHAN_VHT40	IEEE80211_CHAN_HT40	/* VHT 40 with ext channel above/below */
+/*11AC - 20MHZ flags */
+#define IEEE80211_CHAN_VHT20	IEEE80211_CHAN_HT20	/* VHT 20 channel */
+
+/* below are channel ext attributes(ic_ext_flags) */
+/* 11AC - 80MHZ flags */
+#define IEEE80211_CHAN_VHT80_LL	0x00000001
+#define IEEE80211_CHAN_VHT80_LU	0x00000002
+#define IEEE80211_CHAN_VHT80_UL	0x00000004
+#define IEEE80211_CHAN_VHT80_UU	0x00000008
+#define IEEE80211_CHAN_TDLS_OFF_CHAN	0x00000010
+
+/*
+ * Useful combinations of channel characteristics.
+ */
+#define	IEEE80211_CHAN_FHSS \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_GFSK)
+#define	IEEE80211_CHAN_A \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM)
+#define	IEEE80211_CHAN_B \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_CCK)
+#define	IEEE80211_CHAN_PUREG \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_OFDM)
+#define	IEEE80211_CHAN_G \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN)
+#define IEEE80211_CHAN_108A \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_TURBO)
+#define	IEEE80211_CHAN_108G \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_TURBO)
+#define	IEEE80211_CHAN_ST \
+	(IEEE80211_CHAN_108A | IEEE80211_CHAN_STURBO)
+#define	IEEE80211_CHAN_11N \
+	(IEEE80211_CHAN_HT20)
+#define	IEEE80211_CHAN_11NG \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_HT20)
+#define	IEEE80211_CHAN_11NA \
+	(IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_HT20)
+#define	IEEE80211_CHAN_11NG_HT40U \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_HT20 | \
+	 IEEE80211_CHAN_HT40U)
+#define	IEEE80211_CHAN_11NG_HT40D \
+	(IEEE80211_CHAN_2GHZ |IEEE80211_CHAN_OFDM |  IEEE80211_CHAN_HT20 | \
+	 IEEE80211_CHAN_HT40D)
+#define	IEEE80211_CHAN_11NA_HT40U \
+	(IEEE80211_CHAN_5GHZ |IEEE80211_CHAN_OFDM |  IEEE80211_CHAN_HT20 | \
+	 IEEE80211_CHAN_HT40U)
+#define	IEEE80211_CHAN_11NA_HT40D \
+	(IEEE80211_CHAN_5GHZ |IEEE80211_CHAN_OFDM |  IEEE80211_CHAN_HT20 | \
+	 IEEE80211_CHAN_HT40D)
+#define	IEEE80211_CHAN_11NG_HT40 \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_HT20 | \
+	 IEEE80211_CHAN_HT40)
+#define	IEEE80211_CHAN_11NA_HT40 \
+	(IEEE80211_CHAN_5GHZ |IEEE80211_CHAN_OFDM |  IEEE80211_CHAN_HT20 | \
+	 IEEE80211_CHAN_HT40)
+
+#define IEEE80211_CHAN_11AC \
+        (IEEE80211_CHAN_5GHZ |IEEE80211_CHAN_OFDM |  IEEE80211_CHAN_VHT20 )
+#define IEEE80211_CHAN_11AC_VHT40 \
+	(IEEE80211_CHAN_5GHZ |IEEE80211_CHAN_OFDM |  IEEE80211_CHAN_VHT20 | \
+	 IEEE80211_CHAN_VHT40)
+#define IEEE80211_CHAN_11AC_VHT80 \
+	(IEEE80211_CHAN_5GHZ |IEEE80211_CHAN_OFDM |  IEEE80211_CHAN_VHT20 | \
+	 IEEE80211_CHAN_VHT40 | IEEE80211_CHAN_VHT80 )
+
+#define	IEEE80211_CHAN_ALL \
+	(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_HT20 | \
+	 IEEE80211_CHAN_HT40U | IEEE80211_CHAN_HT40D | IEEE80211_CHAN_HT40| \
+	 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_DYN| \
+	 IEEE80211_CHAN_VHT20 | IEEE80211_CHAN_VHT40 | IEEE80211_CHAN_VHT80)
+
+#define	IEEE80211_CHAN_ALLTURBO \
+	(IEEE80211_CHAN_ALL | IEEE80211_CHAN_TURBO | IEEE80211_CHAN_STURBO)
+
+#define IEEE80211_CHAN_ANYN \
+	(IEEE80211_CHAN_HT20 | IEEE80211_CHAN_HT40U | IEEE80211_CHAN_HT40D | \
+		IEEE80211_CHAN_HT40 )
+
+#define	IEEE80211_CHAN_HT40_DUAL_EXT \
+	(IEEE80211_CHAN_HT40U | IEEE80211_CHAN_HT40D)
+
+#define	IEEE80211_IS_CHAN_CACDONE(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_DFS_CAC_DONE) != 0)
+#define	IEEE80211_IS_CHAN_CAC_IN_PROGRESS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_DFS_CAC_IN_PROGRESS) != 0)
+
+#define IEEE80211_IS_CHAN_FHSS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_FHSS) == IEEE80211_CHAN_FHSS)
+#define	IEEE80211_IS_CHAN_A(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_A) == IEEE80211_CHAN_A)
+#define	IEEE80211_IS_CHAN_B(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_B) == IEEE80211_CHAN_B)
+#define	IEEE80211_IS_CHAN_PUREG(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_PUREG) == IEEE80211_CHAN_PUREG)
+#define	IEEE80211_IS_CHAN_G(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_G) == IEEE80211_CHAN_G)
+#define	IEEE80211_IS_CHAN_ANYG(_c) \
+	(IEEE80211_IS_CHAN_PUREG(_c) || IEEE80211_IS_CHAN_G(_c))
+#define	IEEE80211_IS_CHAN_ST(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_ST) == IEEE80211_CHAN_ST)
+#define	IEEE80211_IS_CHAN_108A(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_108A) == IEEE80211_CHAN_108A)
+#define	IEEE80211_IS_CHAN_108G(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_108G) == IEEE80211_CHAN_108G)
+
+#define	IEEE80211_IS_CHAN_2GHZ(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_2GHZ) != 0)
+#define	IEEE80211_IS_CHAN_5GHZ(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_5GHZ) != 0)
+#define	IEEE80211_IS_CHAN_OFDM(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_OFDM) != 0)
+#define	IEEE80211_IS_CHAN_CCK(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_CCK) != 0)
+#define	IEEE80211_IS_CHAN_GFSK(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_GFSK) != 0)
+#define	IEEE80211_IS_CHAN_TURBO(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_TURBO) != 0)
+#define	IEEE80211_IS_CHAN_STURBO(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_STURBO) != 0)
+#define	IEEE80211_IS_CHAN_DTURBO(_c) \
+	(((_c)->ic_flags & \
+	(IEEE80211_CHAN_TURBO | IEEE80211_CHAN_STURBO)) == IEEE80211_CHAN_TURBO)
+#define	IEEE80211_IS_CHAN_HALF(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_HALF) != 0)
+#define	IEEE80211_IS_CHAN_QUARTER(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_QUARTER) != 0)
+#define	IEEE80211_IS_CHAN_11N(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11N) == IEEE80211_CHAN_11N)
+#define	IEEE80211_IS_CHAN_11NG(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11NG) == IEEE80211_CHAN_11NG)
+#define	IEEE80211_IS_CHAN_11NA(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11NA) == IEEE80211_CHAN_11NA)
+#define IEEE80211_IS_CHAN_11AC(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_11AC) == IEEE80211_CHAN_11AC)
+#define	IEEE80211_IS_CHAN_HT40PLUS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_HT40U) == IEEE80211_CHAN_HT40U)
+#define	IEEE80211_IS_CHAN_HT40MINUS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_HT40D) == IEEE80211_CHAN_HT40D)
+#define	IEEE80211_IS_CHAN_HT40(_c) \
+	(IEEE80211_IS_CHAN_HT40PLUS((_c)) || IEEE80211_IS_CHAN_HT40MINUS((_c)))
+#define	IEEE80211_IS_CHAN_11NG_HT40PLUS(_c) \
+	(IEEE80211_IS_CHAN_11NG((_c)) && IEEE80211_IS_CHAN_HT40PLUS((_c)))
+#define	IEEE80211_IS_CHAN_11NG_HT40MINUS(_c) \
+	(IEEE80211_IS_CHAN_11NG((_c)) && IEEE80211_IS_CHAN_HT40MINUS((_c)))
+#define	IEEE80211_IS_CHAN_11NA_HT40PLUS(_c) \
+	(IEEE80211_IS_CHAN_11NA((_c)) && IEEE80211_IS_CHAN_HT40PLUS((_c)))
+#define	IEEE80211_IS_CHAN_11NA_HT40MINUS(_c) \
+	(IEEE80211_IS_CHAN_11NA((_c)) && IEEE80211_IS_CHAN_HT40MINUS((_c)))
+#define IEEE80211_IS_CHAN_ANYN(_c) 1
+	//(((_c)->ic_flags & IEEE80211_CHAN_ANYN))
+
+#define	IEEE80211_IS_CHAN_VHT40PLUS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_VHT40U) == IEEE80211_CHAN_VHT40U)
+#define	IEEE80211_IS_CHAN_VHT40MINUS(_c) \
+	(((_c)->ic_flags & IEEE80211_CHAN_VHT40D) == IEEE80211_CHAN_VHT40D)
+#define IEEE80211_IS_CHAN_VHT40(_c) \
+	(IEEE80211_IS_CHAN_VHT40PLUS(_c) || IEEE80211_IS_CHAN_VHT40MINUS(_c))
+#define IEEE80211_IS_CHAN_11AC_VHT40PLUS(_c) \
+	(IEEE80211_IS_CHAN_11AC(_c) && IEEE80211_IS_CHAN_VHT40PLUS(_c))
+#define IEEE80211_IS_CHAN_11AC_VHT40MINUS(_c) \
+	(IEEE80211_IS_CHAN_11AC(_c) && IEEE80211_IS_CHAN_VHT40MINUS(_c))
+
+#define IEEE80211_IS_CHAN_VHT80_EDGEPLUS(_c) \
+	(((_c)->ic_ext_flags & IEEE80211_CHAN_VHT80_LL) == IEEE80211_CHAN_VHT80_LL)
+#define IEEE80211_IS_CHAN_VHT80_CNTRPLUS(_c) \
+	(((_c)->ic_ext_flags & IEEE80211_CHAN_VHT80_LU) == IEEE80211_CHAN_VHT80_LU)
+#define IEEE80211_IS_CHAN_VHT80_CNTRMINUS(_c) \
+	(((_c)->ic_ext_flags & IEEE80211_CHAN_VHT80_UL) == IEEE80211_CHAN_VHT80_UL)
+#define IEEE80211_IS_CHAN_VHT80_EDGEMINUS(_c) \
+	(((_c)->ic_ext_flags & IEEE80211_CHAN_VHT80_UU) == IEEE80211_CHAN_VHT80_UU)
+#define IEEE80211_IS_CHAN_VHT80(_c) \
+	(IEEE80211_IS_CHAN_VHT80_EDGEPLUS(_c) || IEEE80211_IS_CHAN_VHT80_EDGEMINUS(_c) || \
+	 IEEE80211_IS_CHAN_VHT80_CNTRPLUS(_c) || IEEE80211_IS_CHAN_VHT80_CNTRMINUS(_c))
+#define IEEE80211_IS_CHAN_11AC_VHT80_EDGEPLUS(_c) \
+	(IEEE80211_IS_CHAN_11AC(_c) && IEEE80211_IS_CHAN_VHT80_EDGEPLUS(_c))
+#define IEEE80211_IS_CHAN_11AC_VHT80_CNTRPLUS(_c) \
+	(IEEE80211_IS_CHAN_11AC(_c) && IEEE80211_IS_CHAN_VHT80_CNTRPLUS(_c))
+#define IEEE80211_IS_CHAN_11AC_VHT80_CNTRMINUS(_c) \
+	(IEEE80211_IS_CHAN_11AC(_c) && IEEE80211_IS_CHAN_VHT80_CNTRMINUS(_c))
+#define IEEE80211_IS_CHAN_11AC_VHT80_EDGEMINUS(_c) \
+	(IEEE80211_IS_CHAN_11AC(_c) && IEEE80211_IS_CHAN_VHT80_EDGEMINUS(_c))
+
+/* mode specific macros */
+
+#define IEEE80211_IS_11NG_MODE(_mode) \
+		(((_mode) == IEEE80211_MODE_11NG) ||  \
+		 ((_mode) == IEEE80211_MODE_11NG_HT40PLM))
+
+#define IEEE80211_IS_11NA_MODE(_mode) \
+		(((_mode) == IEEE80211_MODE_11NA) ||  \
+		 ((_mode) == IEEE80211_MODE_11NA_HT40PM) )
+
+#define IEEE80211_IS_11N_MODE(_mode) \
+			(IEEE80211_IS_11NA_MODE((_mode)) || IEEE80211_IS_11NG_MODE((_mode)))
+
+/* ni_chan encoding for FH phy */
+#define	IEEE80211_FH_CHANMOD		80
+#define	IEEE80211_FH_CHAN(set,pat)	(((set) - 1) * IEEE80211_FH_CHANMOD + (pat))
+#define	IEEE80211_FH_CHANSET(chan)	((chan) / IEEE80211_FH_CHANMOD + 1)
+#define	IEEE80211_FH_CHANPAT(chan)	((chan) % IEEE80211_FH_CHANMOD)
+
+#define IEEE80211_HTCAP_TXBF_CAP_LEN	4
+
+/* Peer RTS config */
+#define IEEE80211_PEER_RTS_OFF		0
+#define IEEE80211_PEER_RTS_PMP		1
+#define IEEE80211_PEER_RTS_DYN		2
+#define IEEE80211_PEER_RTS_MAX		2
+#define IEEE80211_PEER_RTS_DEFAULT	IEEE80211_PEER_RTS_DYN
+
+/* Dynamic WMM */
+#define IEEE80211_DYN_WMM_OFF			0
+#define IEEE80211_DYN_WMM_ON			1
+#define IEEE80211_DYN_WMM_DEFAULT		IEEE80211_DYN_WMM_ON
+
+#define IEEE80211_DYN_WMM_LOCAL_AIFS_DELTA	-2
+#define IEEE80211_DYN_WMM_LOCAL_CWMIN_DELTA	-2
+#define IEEE80211_DYN_WMM_LOCAL_CWMAX_DELTA	-3
+#define IEEE80211_DYN_WMM_LOCAL_AIFS_MIN	{3, 3, 2, 1}
+#define IEEE80211_DYN_WMM_LOCAL_CWMIN_MIN	{2, 4, 2, 2}
+#define IEEE80211_DYN_WMM_LOCAL_CWMAX_MIN	{4, 6, 3, 3}
+#define IEEE80211_DYN_WMM_BSS_AIFS_DELTA	2
+#define IEEE80211_DYN_WMM_BSS_CWMIN_DELTA	2
+#define IEEE80211_DYN_WMM_BSS_CWMAX_DELTA	3
+#define IEEE80211_DYN_WMM_BSS_AIFS_MAX		{4, 5, 4, 3}
+#define IEEE80211_DYN_WMM_BSS_CWMIN_MAX		{4, 6, 4, 3}
+#define IEEE80211_DYN_WMM_BSS_CWMAX_MAX		{6, 8, 6, 5}
+
+/*
+ * 802.11 rate set.
+ */
+#define	IEEE80211_RATE_SIZE	8		/* 802.11 standard */
+#define	IEEE80211_AG_RATE_MAXSIZE	12		/* Non 11n Rates */
+
+#define	IEEE80211_RATE_MAXSIZE		30		/* max rates we'll handle */
+#define	IEEE80211_HT_RATE_MAXSIZE	77		/* Total number of 802.11n rates */
+#define	IEEE80211_HT_RATE_SIZE		128
+#define IEEE80211_SANITISE_RATESIZE(_rsz) \
+	((_rsz > IEEE80211_RATE_MAXSIZE) ? IEEE80211_RATE_MAXSIZE : _rsz)
+
+
+/* For legacy hardware - leaving it as is for now */
+
+#define IEEE80211_RATE_MCS	0x8000
+#define IEEE80211_RATE_MCS_VAL	0x7FFF
+
+//#define IEEE80211_RATE_IDX_ENTRY(val, idx) (val&(0xff<<(8*idx))>>(idx*8))
+#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8)))
+
+/*
+ * 11n A-MPDU & A-MSDU limits . XXX
+ */
+#define IEEE80211_AMPDU_LIMIT_MIN   (1 * 1024)
+#define IEEE80211_AMPDU_LIMIT_MAX   (64 * 1024 - 1)
+#define IEEE80211_AMSDU_LIMIT_MAX   4096
+
+
+/*
+ * 11ac MPDU size limit
+ */
+#define IEEE80211_MPDU_VHT_1K		1500
+#define IEEE80211_MPDU_VHT_4K		3895
+#define IEEE80211_MPDU_VHT_8K		7991
+#define IEEE80211_MPDU_VHT_11K		11454
+#define IEEE80211_MPDU_ENCAP_OVERHEAD_MAX	64 /* enough for mpdu header 36 + crypto 20 + fcs 4 */
+
+/*
+ * 11n and 11ac AMSDU sizes
+ */
+#define IEEE80211_AMSDU_NONE		0
+#define IEEE80211_AMSDU_HT_4K		3839
+#define IEEE80211_AMSDU_HT_8K		7935
+#define IEEE80211_AMSDU_VHT_1K		(IEEE80211_MPDU_VHT_1K - IEEE80211_MPDU_ENCAP_OVERHEAD_MAX)
+#define IEEE80211_AMSDU_VHT_4K		(IEEE80211_MPDU_VHT_4K - IEEE80211_MPDU_ENCAP_OVERHEAD_MAX)
+#define IEEE80211_AMSDU_VHT_8K		(IEEE80211_MPDU_VHT_8K - IEEE80211_MPDU_ENCAP_OVERHEAD_MAX)
+#define IEEE80211_AMSDU_VHT_11K		(IEEE80211_MPDU_VHT_11K - IEEE80211_MPDU_ENCAP_OVERHEAD_MAX)
+
+/*
+ * 11n MCS set limits
+ */
+#define IEEE80211_HT_MAXMCS_SET				10
+#define IEEE80211_HT_MAXMCS_SET_SUPPORTED		10
+#define IEEE80211_HT_MAXMCS_BASICSET_SUPPORTED		2
+#define IEEE80211_MCS_PER_STREAM			8
+/*
+ * B0-B2: MCS index, B3-B6: MCS set index, B8: BASIC RATE
+ */
+#define IEEE80211_HT_BASIC_RATE		0x80
+#define IEEE80211_HT_MCS_MASK		0x07
+#define IEEE80211_HT_MCS_SET_MASK	0x78
+#define IEEE80211_HT_RATE_TABLE_IDX_MASK	0x7F
+
+#define IEEE80211_HT_MCS_VALUE(_v) \
+		((_v) & IEEE80211_HT_MCS_MASK)
+
+#define IEEE80211_HT_MCS_SET_IDX(_v) \
+		(((_v) & IEEE80211_HT_MCS_SET_MASK) >> 3)
+
+#define IEEE80211_HT_IS_BASIC_RATE(_v) \
+		(((_v) & IEEE80211_HT_BASIC_RATE) == IEEE80211_HT_BASIC_RATE)
+
+/* index number in the set will be (_i - 1) if (_i != 0) */
+#define IEEE80211_HT_MCS_IDX(_m,_i) \
+		{ \
+			u_int8_t temp = (_m); \
+			_i = 0; \
+			while (temp) \
+			{ \
+				temp = temp >> 1; \
+				_i++; \
+			} \
+			if(_i) \
+				_i--; \
+			else \
+				_i = 0xFF; \
+		}
+
+/* rate table index = (MCS set index << 3) + MCS index */
+#define IEEE80211_HT_RATE_TABLE_IDX(_s,_i) \
+		(((_s) << 3) + (_i))
+
+/* Supported rate label */
+#define IEEE80211_RATE_11MBPS	22
+
+#if 0
+/* integer portion of HT rates */
+u_int16_t ht_rate_table_20MHz_400[] = {
+							7,
+							14,
+							21,
+							28,
+							43,
+							57,
+							65,
+							72,
+							14,
+							28,
+							43,
+							57,
+							86,
+							115,
+							130,
+							144
+						};
+
+u_int16_t ht_rate_table_20MHz_800[] = {
+							6,
+							13,
+							19,
+							26,
+							39,
+							52,
+							58,
+							65,
+							13,
+							26,
+							39,
+							52,
+							78,
+							104,
+							117,
+							130
+						};
+
+u_int16_t ht_rate_table_40MHz_400[] = {
+							15,
+							30,
+							45,
+							60,
+							90,
+							120,
+							135,
+							150,
+							30,
+							60,
+							90,
+							120,
+							180,
+							240,
+							270,
+							300
+						};
+
+u_int16_t ht_rate_table_40MHz_800[] = {
+							13,
+							27,
+							40,
+							54,
+							81,
+							108,
+							121,
+							135,
+							27,
+							54,
+							81,
+							108,
+							162,
+							216,
+							243,
+							270
+						};
+#endif
+
+struct ieee80211_rateset {
+	u_int8_t		rs_legacy_nrates; /* Number of legacy rates */
+	u_int8_t		rs_nrates; /* Total rates = Legacy + 11n */
+	u_int8_t		rs_rates[IEEE80211_RATE_MAXSIZE];
+};
+
+struct ieee80211_ht_rateset {
+	u_int8_t		rs_legacy_nrates; /* Number of legacy rates */
+	u_int8_t		rs_nrates; /* Total rates = Legacy + 11n */
+	u_int8_t		rs_rates[IEEE80211_HT_RATE_MAXSIZE];
+};
+
+struct ieee80211_roam {
+	int8_t			rssi11a;	/* rssi thresh for 11a bss */
+	int8_t			rssi11b;	/* for 11g sta in 11b bss */
+	int8_t			rssi11bOnly;	/* for 11b sta */
+	u_int8_t		pad1;
+	u_int8_t		rate11a;	/* rate thresh for 11a bss */
+	u_int8_t		rate11b;	/* for 11g sta in 11b bss */
+	u_int8_t		rate11bOnly;	/* for 11b sta */
+	u_int8_t		pad2;
+};
+struct ieee80211_htcap {
+	u_int16_t		cap;		/* HT capabilities */
+	u_int8_t		numtxspstr; /* Number of Tx spatial streams */
+	u_int8_t		numrxstbcstr;	/* Number of Rx stbc streams */
+	u_int8_t		pwrsave;	/* HT power save mode */
+	u_int8_t		mpduspacing;	/* MPDU density */
+	u_int16_t		maxmsdu;	/* Max MSDU size */
+	u_int16_t		maxampdu;	/* maximum rx A-MPDU factor */
+	u_int8_t		mcsset[IEEE80211_HT_MAXMCS_SET_SUPPORTED]; /* HT MCS set */
+	u_int16_t		maxdatarate;	/* HT max data rate */
+	u_int16_t		extcap;		/* HT extended capability */
+	u_int8_t		mcsparams;	/* HT MCS params */
+	u_int8_t		hc_txbf[IEEE80211_HTCAP_TXBF_CAP_LEN];	/* HT transmit beamforming capabilities */
+} __packed;
+
+struct ieee80211_htinfo {
+	u_int8_t		ctrlchannel;	/* control channel */
+	u_int8_t		byte1;		/* ht ie byte 1 */
+	u_int8_t		byte2;		/* ht ie byte 2 */
+	u_int8_t		byte3;		/* ht ie byte 3 */
+	u_int8_t		byte4;		/* ht ie byte 4 */
+	u_int8_t		byte5;		/* ht ie byte 5 */
+	u_int8_t		sigranularity;	/* signal granularity */
+	u_int8_t		choffset;	/* external channel offset */
+	u_int8_t		opmode;		/* operational mode */
+	u_int8_t		basicmcsset[IEEE80211_HT_MAXMCS_BASICSET_SUPPORTED]; /* basic MCS set */
+} __packed;
+
+/* VHT capabilities MIB */
+
+/* Maximum MPDU Length B0-1 */
+enum ieee80211_vht_maxmpdu {
+	IEEE80211_VHTCAP_MAX_MPDU_3895 = 0,
+	IEEE80211_VHTCAP_MAX_MPDU_7991,
+	IEEE80211_VHTCAP_MAX_MPDU_11454,
+	IEEE80211_VHTCAP_MAX_MPDU_RESERVED,
+};
+
+/* Supported Channel Width Set B2-3 */
+enum ieee80211_vht_chanwidth {
+	IEEE80211_VHTCAP_CW_80M_ONLY = 0,
+	IEEE80211_VHTCAP_CW_160M,
+	IEEE80211_VHTCAP_CW_160_AND_80P80M,
+	IEEE80211_VHTCAP_CW_RESERVED,
+};
+
+/* RX STBC B8-10 */
+enum ieee80211_vht_rxstbc {
+	IEEE80211_VHTCAP_RX_STBC_NA = 0,
+	IEEE80211_VHTCAP_RX_STBC_UPTO_1,
+	IEEE80211_VHTCAP_RX_STBC_UPTO_2,
+	IEEE80211_VHTCAP_RX_STBC_UPTO_3,
+	IEEE80211_VHTCAP_RX_STBC_UPTO_4,
+};
+
+/* RX STS B13-15 */
+enum ieee80211_vht_rxsts {
+	IEEE80211_VHTCAP_RX_STS_1 = 0,
+	IEEE80211_VHTCAP_RX_STS_2,
+	IEEE80211_VHTCAP_RX_STS_3,
+	IEEE80211_VHTCAP_RX_STS_4,
+	IEEE80211_VHTCAP_RX_STS_5,
+	IEEE80211_VHTCAP_RX_STS_6,
+	IEEE80211_VHTCAP_RX_STS_7,
+	IEEE80211_VHTCAP_RX_STS_8,
+	IEEE80211_VHTCAP_RX_STS_INVALID = 0xff
+};
+
+/* SOUNDING DIM B16-18 */
+enum ieee80211_vht_numsnd {
+	IEEE80211_VHTCAP_SNDDIM_1 = 0,
+	IEEE80211_VHTCAP_SNDDIM_2,
+	IEEE80211_VHTCAP_SNDDIM_3,
+	IEEE80211_VHTCAP_SNDDIM_4,
+	IEEE80211_VHTCAP_SNDDIM_5,
+	IEEE80211_VHTCAP_SNDDIM_6,
+	IEEE80211_VHTCAP_SNDDIM_7,
+	IEEE80211_VHTCAP_SNDDIM_8
+};
+
+/* Maximum A-MPDU Length exponent B23-25 */
+/* 2^(13 + Max A-MPDU) -1 */
+enum ieee80211_vht_maxampduexp {
+	IEEE80211_VHTCAP_MAX_A_MPDU_8191,		/* (2^13) -1 */
+	IEEE80211_VHTCAP_MAX_A_MPDU_16383,	/* (2^14) -1 */
+	IEEE80211_VHTCAP_MAX_A_MPDU_32767,	/* (2^15) -1 */
+	IEEE80211_VHTCAP_MAX_A_MPDU_65535,	/* (2^16) -1 */
+	IEEE80211_VHTCAP_MAX_A_MPDU_131071,	/* (2^17) -1 */
+	IEEE80211_VHTCAP_MAX_A_MPDU_262143,	/* (2^18) -1 */
+	IEEE80211_VHTCAP_MAX_A_MPDU_524287,	/* (2^19) -1 */
+	IEEE80211_VHTCAP_MAX_A_MPDU_1048575,	/* (2^20) -1 */
+};
+
+/* VHT link Adaptation capable B26-27 */
+enum ieee80211_vht_lnkadptcap {
+	IEEE80211_VHTCAP_LNKADAPTCAP_NO_FEEDBACK,
+	IEEE80211_VHTCAP_LNKADAPTCAP_RESERVED,
+	IEEE80211_VHTCAP_LNKADAPTCAP_UNSOLICITED,
+	IEEE80211_VHTCAP_LNKADAPTCAP_BOTH,
+};
+
+/* VHT MCS supported */
+enum ieee80211_vht_mcs_supported {
+	IEEE80211_VHT_MCS_0_7,
+	IEEE80211_VHT_MCS_0_8,
+	IEEE80211_VHT_MCS_0_9,
+	IEEE80211_VHT_MCS_NA,	// Spatial stream not supported
+};
+
+/* VHT NSS */
+enum ieee80211_vht_nss {
+	IEEE80211_VHT_NSS1 = 1,
+	IEEE80211_VHT_NSS2,
+	IEEE80211_VHT_NSS3,
+	IEEE80211_VHT_NSS4,
+	IEEE80211_VHT_NSS5,
+	IEEE80211_VHT_NSS6,
+	IEEE80211_VHT_NSS7,
+	IEEE80211_VHT_NSS8,
+};
+
+struct ieee80211_vhtcap {
+	u_int32_t			cap_flags;
+	u_int32_t			maxmpdu;
+	u_int32_t			chanwidth;
+	u_int32_t			rxstbc;
+	u_int8_t			bfstscap;
+	u_int8_t			numsounding;
+	u_int32_t			maxampduexp;
+	u_int32_t			lnkadptcap;
+	u_int16_t			rxmcsmap;
+	u_int16_t			rxlgimaxrate;
+	u_int16_t			txmcsmap;
+	u_int16_t			txlgimaxrate;
+	u_int8_t			bfstscap_save;
+} __packed;
+
+/* VHT capability macros */
+#define VHT_SUPPORTS_MCS0_9_FOR_4SS_BIT	0x0080
+#define VHT_SUPPORTS_MCS0_8_FOR_4SS_BIT	0x0040
+#define VHT_SUPPORTS_MCS0_9_FOR_3SS_BIT	0x0020
+#define VHT_SUPPORTS_MCS0_8_FOR_3SS_BIT	0x0010
+#define VHT_SUPPORTS_MCS0_9_FOR_2SS_BIT	0x0008
+#define VHT_SUPPORTS_MCS0_8_FOR_2SS_BIT	0x0004
+#define VHT_SUPPORTS_MCS0_9_FOR_1SS_BIT	0x0002
+#define VHT_SUPPORTS_MCS0_8_FOR_1SS_BIT	0x0001
+
+#define IEEE80211_VHT_HAS_4SS(rxmcsmap) \
+	!((rxmcsmap & VHT_SUPPORTS_MCS0_9_FOR_4SS_BIT) && \
+	(rxmcsmap & VHT_SUPPORTS_MCS0_8_FOR_4SS_BIT))
+
+#define IEEE80211_VHT_HAS_3SS(rxmcsmap) \
+	!((rxmcsmap & VHT_SUPPORTS_MCS0_9_FOR_3SS_BIT) && \
+	(rxmcsmap & VHT_SUPPORTS_MCS0_8_FOR_3SS_BIT))
+
+#define IEEE80211_VHT_HAS_2SS(rxmcsmap) \
+	!((rxmcsmap & VHT_SUPPORTS_MCS0_9_FOR_2SS_BIT) && \
+	(rxmcsmap & VHT_SUPPORTS_MCS0_8_FOR_2SS_BIT))
+
+#define IEEE80211_VHT_SUPPORTS_MCS0_8_FOR_4SS(rxmcsmap)	\
+	((rxmcsmap & VHT_SUPPORTS_MCS0_8_FOR_4SS_BIT) && \
+	!(rxmcsmap & VHT_SUPPORTS_MCS0_9_FOR_4SS_BIT))
+
+#define IEEE80211_VHT_SUPPORTS_MCS0_9_FOR_4SS(rxmcsmap)	\
+	((rxmcsmap & VHT_SUPPORTS_MCS0_9_FOR_4SS_BIT) && \
+	!(rxmcsmap & VHT_SUPPORTS_MCS0_8_FOR_4SS_BIT))
+
+#define IEEE80211_VHT_SUPPORTS_MCS0_8_FOR_3SS(rxmcsmap)	\
+	((rxmcsmap & VHT_SUPPORTS_MCS0_8_FOR_3SS_BIT) && \
+	!(rxmcsmap & VHT_SUPPORTS_MCS0_9_FOR_3SS_BIT))
+
+#define IEEE80211_VHT_SUPPORTS_MCS0_9_FOR_3SS(rxmcsmap)	\
+	((rxmcsmap & VHT_SUPPORTS_MCS0_9_FOR_3SS_BIT) && \
+	!(rxmcsmap & VHT_SUPPORTS_MCS0_8_FOR_3SS_BIT))
+
+#define IEEE80211_VHT_SUPPORTS_MCS0_8_FOR_2SS(rxmcsmap)	\
+	((rxmcsmap & VHT_SUPPORTS_MCS0_8_FOR_2SS_BIT) && \
+	!(rxmcsmap & VHT_SUPPORTS_MCS0_9_FOR_2SS_BIT))
+
+#define IEEE80211_VHT_SUPPORTS_MCS0_9_FOR_2SS(rxmcsmap)	\
+	((rxmcsmap & VHT_SUPPORTS_MCS0_9_FOR_2SS_BIT) && \
+	!(rxmcsmap & VHT_SUPPORTS_MCS0_8_FOR_2SS_BIT))
+
+#define IEEE80211_VHT_SUPPORTS_MCS0_8_FOR_1SS(rxmcsmap)	\
+	((rxmcsmap & VHT_SUPPORTS_MCS0_8_FOR_1SS_BIT) && \
+	!(rxmcsmap & VHT_SUPPORTS_MCS0_9_FOR_1SS_BIT))
+
+#define IEEE80211_VHT_SUPPORTS_MCS0_9_FOR_1SS(rxmcsmap)	\
+	((rxmcsmap & VHT_SUPPORTS_MCS0_9_FOR_1SS_BIT) && \
+	!(rxmcsmap & VHT_SUPPORTS_MCS0_8_FOR_1SS_BIT))
+
+/* VHT Operation element */
+/* VHT Operation Information subfields */
+enum ieee80211_vhtop_chanwidth {
+	IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ,
+	IEEE80211_VHTOP_CHAN_WIDTH_80MHZ,
+	IEEE80211_VHTOP_CHAN_WIDTH_160MHZ,
+	IEEE80211_VHTOP_CHAN_WIDTH_80PLUS80MHZ,
+};
+
+#define IEEE80211_VHT_MAXMCS_SET_SUPPORTED	10
+
+struct ieee80211_vhtop {
+	u_int32_t			chanwidth;
+	u_int8_t			centerfreq0;
+	u_int8_t			centerfreq1;
+	u_int16_t			basicvhtmcsnssset;
+} __packed;
+
+/* VHT Operating Mode Notification element */
+#define IEEE80211_VHT_OPMODE_NOTIF_DEFAULT	0xFFFF
+
+/* Max number of MU groups */
+#define IEEE80211_MU_GRP_NUM_MAX	64
+
+/* Max number of nodes in a MU group */
+#define IEEE80211_MU_GRP_NODES_MAX	4
+
+/* GROUP IDs which are used for SU PPDU as per IEEE P802.11ac/D5.0,
+ * chapter 9.17a Group ID and partial AID in VHT PPDUs  */
+#define IEEE80211_SU_GROUP_ID_0		0u
+#define IEEE80211_SU_GROUP_ID_63	63u
+
+#define IEEE80211_MAC_ADDRESS_GROUP_BIT 0x01
+
+/* VHT MU membership & user position arrays */
+struct ieee80211_vht_mu_grp {
+#define IEEE80211_VHT_GRP_1ST_BIT_OFFSET	1
+#define IEEE80211_VHT_GRP_MAX_BIT_OFFSET	62
+#define IEEE80211_VHT_GRP_MEMBERSHIP_ARRAY_SIZE	(IEEE80211_MU_GRP_NUM_MAX/(sizeof(u_int8_t)*8))
+#define IEEE80211_VHT_USR_POS_ARRAY_SIZE	((IEEE80211_MU_GRP_NODES_MAX >> 1)*	\
+							IEEE80211_MU_GRP_NUM_MAX/(sizeof(u_int8_t)*8))
+	u_int8_t member[IEEE80211_VHT_GRP_MEMBERSHIP_ARRAY_SIZE];
+	u_int8_t pos[IEEE80211_VHT_USR_POS_ARRAY_SIZE];
+} __packed;
+
+#define QTN_MU_NODES_PER_GROUP 2 /* Max number of nodes currently supported */
+#define QTN_MU_QMAT_MAX_SLOTS 3
+
+struct ieee80211_mu_groups_update {
+	u_int8_t ncidx[QTN_MU_NODES_PER_GROUP * QTN_MU_QMAT_MAX_SLOTS];
+	struct ieee80211_vht_mu_grp grps[QTN_MU_NODES_PER_GROUP * QTN_MU_QMAT_MAX_SLOTS];
+} __packed;
+
+struct ieee80211_action_data {
+	u_int8_t cat;				/* category identifier */
+	u_int8_t action;			/* action identifier */
+	void *params;
+};
+
+struct ba_action_req {
+	u_int8_t		tid;		/* TID */
+	u_int16_t		seq;		/* sequence number of first frame to be block acked */
+	u_int8_t		frag;       /* fragment number of first frame to be block acked */
+	enum ieee80211_ba_type type;/* block ack type */
+	u_int16_t		buff_size;	/* suggested re-order buffer size */
+	u_int16_t		timeout;	/* block ack timeout if no transfer */
+};
+
+struct ba_action_resp {
+	u_int8_t		tid;		/* TID */
+	u_int16_t		seq;		/* sequence number of first frame to be block acked */
+	u_int8_t		frag;		/* fragment number of first frame to be block acked */
+	enum ieee80211_ba_type type;/* block ack type */
+	u_int16_t		buff_size;	/* actual re-order buffer size */
+	u_int16_t		reason;		/* block ack negotiation status */
+	u_int16_t		timeout;	/* negotiated block ack timeout if no transfer */
+};
+
+struct ba_action_del {
+	u_int8_t		tid;		/* TID */
+	u_int16_t		reason;		/* block ack termination reason */
+	u_int8_t		initiator;	/* initiator/ recipient of block ack negotiation */
+};
+
+struct ht_action_nc_beamforming {
+	u_int16_t		num_sts;		/* number of space time streams, Nc */
+	u_int16_t		num_txchains;	/* number of transmit chains, Nr */
+	u_int8_t		snr[2];			/* SNR for received space time streams */
+	u_int16_t		size_matrices;	/* size of beamforming matrices in bytes */
+	u_int8_t		*matrices;		/* pointer to beamforming matrices */
+	u_int8_t		bw_mode;		/* bwmode = 0 for 20Mhz and 1 for 40 M */
+
+};
+
+struct ht_action_channelswitch {
+	u_int8_t		ch_width;		/* switched channel width */
+};
+
+struct ht_action_sm_powersave {
+	u_int8_t		sm_power_mode;		/* new power mode */
+	u_int8_t		sm_power_enabled;	/* power save enabled */
+};
+
+struct ht_action_antennasel {
+	u_int8_t		antenna_sel;	/* antenna selection: bit number corresponds
+									to antenna number */
+};
+
+struct ht_action_mimo_ctrl {
+	u_int8_t		num_columns;	/* Nc in received beamforming matrices */
+	u_int8_t		num_rows;		/* Nr in received beamforming matrices */
+	u_int8_t		chan_width;	/* Channel Width 0=20, 1 =40 */
+	u_int8_t		num_grouping;	/* Ng in received beamforming matrices */
+	u_int8_t		num_coeffsize;	/* Nb in received beamforming matrices */
+	u_int8_t		snr[2];			/* SNR as seen by sender of action frame */
+	u_int32_t		matrices[1024];		/* pointer to beamforming matrices,
+										contents must be copied */
+};
+
+#ifdef CONFIG_QVSP
+/**
+ * The following structure definitions are for passing in data to the
+ * management send function to generate action frames for VSP.
+ */
+struct ieee80211_qvsp_act {
+	uint8_t oui[3];
+	uint8_t type;
+};
+
+struct ieee80211_qvsp_strm_id {
+	union {
+		struct in6_addr	ipv6;
+		__be32		ipv4;
+	} saddr;
+	union {
+		struct in6_addr ipv6;
+		__be32		ipv4;
+	} daddr;
+	__be16 sport;
+	__be16 dport;
+	uint8_t ip_version;
+	uint8_t ip_proto;
+	uint8_t ac;
+} __packed;
+
+#define IEEE8021_QVSP_MAX_ACT_ITEMS 32
+
+struct ieee80211_qvsp_strm_dis_attr {
+	uint32_t throt_policy;
+	uint32_t throt_rate;
+	uint32_t demote_rule;
+	uint32_t demote_state;
+};
+
+struct ieee80211_qvsp_act_strm_ctrl {
+	struct ieee80211_qvsp_act header;
+	uint8_t strm_state;
+	uint8_t count;
+	struct ieee80211_qvsp_strm_dis_attr dis_attr;
+	struct ieee80211_qvsp_strm_id strm_items[IEEE8021_QVSP_MAX_ACT_ITEMS];
+};
+
+struct ieee80211_qvsp_act_cfg_item {
+	uint32_t index;
+	uint32_t value;
+};
+
+struct ieee80211_qvsp_act_cfg {
+	struct ieee80211_qvsp_act header;
+	uint8_t count;
+	struct ieee80211_qvsp_act_cfg_item cfg_items[IEEE8021_QVSP_MAX_ACT_ITEMS];
+};
+#endif
+
+typedef void (*ppq_callback_success)(void *ctx);
+typedef void (*ppq_callback_fail)(void *ctx, int32_t reason);
+
+struct ieee80211_meas_request_ctrl {
+	u_int8_t meas_type;
+	unsigned long expire;
+	ppq_callback_success fn_success;
+	ppq_callback_fail fn_fail;
+	union {
+		struct _req_basic {
+			u_int64_t start_tsf;
+			u_int16_t duration_ms;
+			u_int8_t channel;
+		} basic;
+		struct _req_cca {
+			u_int64_t start_tsf;
+			u_int16_t duration_ms;
+			u_int8_t channel;
+		} cca;
+		struct _req_rpi {
+			u_int64_t start_tsf;
+			u_int16_t duration_ms;
+			u_int8_t channel;
+		} rpi;
+		struct _req_sta_stats {
+			void *sub_item;
+			u_int16_t duration_tu;
+			u_int8_t group_id;
+		} sta_stats;
+		struct _req_qtn_cca {
+			u_int16_t duration_tu;
+		} qtn_cca;
+		struct _req_chan_load {
+			u_int8_t channel;
+			u_int16_t duration_ms;
+		} chan_load;
+		struct _req_noise_his {
+			u_int8_t channel;
+			u_int16_t duration_ms;
+		} noise_his;
+		struct _req_beacon {
+			u_int8_t op_class;
+			u_int8_t channel;
+			u_int8_t duration_ms;
+			u_int8_t mode;
+			u_int8_t bssid[6];
+			/* optional ssid sub elment */
+			uint8_t *ssid;
+			uint8_t ssid_len;
+		} beacon;
+		struct _req_frame {
+			u_int8_t op_class;
+			u_int8_t channel;
+			u_int16_t duration_ms;
+			u_int8_t type;
+			u_int8_t mac_address[6];
+		} frame;
+		struct _req_tran_stream_cat {
+			u_int16_t duration_ms;
+			u_int8_t peer_sta[6];
+			u_int8_t tid;
+			u_int8_t bin0;
+		} tran_stream_cat;
+		struct _req_multicast_diag {
+			u_int16_t duration_ms;
+			u_int8_t group_mac[6];
+		} multicast_diag;
+	} u;
+};
+
+struct ieee80211_meas_report_ctrl {
+	u_int8_t meas_type;
+	u_int8_t report_mode;
+	u_int8_t token;		/* dialog token */
+	u_int8_t meas_token;	/* measurement token */
+	u_int8_t autonomous;	/* 1: autonomous report */
+	union {
+		struct _rep_basic {
+			u_int8_t channel;
+			u_int8_t basic_report;
+			u_int16_t duration_tu;
+			u_int64_t start_tsf;
+		} basic;
+		struct _rep_cca {
+			u_int8_t channel;
+			u_int8_t cca_report;
+			u_int16_t duration_tu;
+			u_int64_t start_tsf;
+		} cca;
+		struct _rep_rpi {
+			u_int64_t start_tsf;
+			u_int16_t duration_tu;
+			u_int8_t channel;
+			u_int8_t rpi_report[8];
+		} rpi;
+		struct _rep_sta_stats {
+			void *sub_item;
+			u_int16_t duration_tu;
+			u_int8_t group_id;
+		} sta_stats;
+		struct _rep_qtn_cca {
+			u_int16_t type;
+			/* CCA radio measurement report field */
+			u_int64_t start_tsf;
+			u_int16_t duration_ms;
+			u_int8_t channel;
+			u_int8_t qtn_cca_report;
+			union {
+				struct _rep_qtn_cca_info {
+					u_int16_t others_time;
+					u_int32_t sp_fail;
+					u_int32_t lp_fail;
+				} qtn_cca_info;
+				struct _rep_qtn_fat_info {
+					u_int16_t free_airtime;
+				} qtn_fat_info;
+				struct _rep_qtn_dfs_info {
+					u_int16_t dfs_enabled;
+					u_int8_t max_txpower;
+				} qtn_dfs_info;
+			} u;
+			u_int8_t *extra_ie;
+			u_int16_t extra_ie_len;
+		} qtn_cca;
+		struct _rep_chan_load {
+			u_int8_t op_class;
+			u_int8_t channel;
+			u_int16_t duration_tu;
+			u_int8_t channel_load;
+		} chan_load;
+		struct _rep_noise_his {
+			u_int8_t op_class;
+			u_int8_t channel;
+			u_int16_t duration_tu;
+			u_int8_t antenna_id;
+			u_int8_t anpi;
+			u_int8_t ipi[11];
+		} noise_his;
+		struct _rep_beacon {
+			u_int8_t op_class;
+			u_int8_t channel;
+			u_int16_t duration_tu;
+			u_int8_t reported_frame_info;
+			u_int8_t rcpi;
+			u_int8_t rsni;
+			u_int8_t bssid[6];
+			u_int8_t antenna_id;
+			u_int8_t parent_tsf[4];
+		} beacon;
+		struct _rep_frame {
+			void *sub_item;
+			u_int8_t op_class;
+			u_int8_t channel;
+			u_int16_t duration_tu;
+		} frame;
+		struct _rep_tran_stream_cat {
+			u_int16_t duration_tu;
+			u_int8_t peer_sta[6];
+			u_int8_t tid;
+			u_int8_t reason;
+			u_int32_t tran_msdu_cnt;
+			u_int32_t msdu_discard_cnt;
+			u_int32_t msdu_fail_cnt;
+			u_int32_t msdu_mul_retry_cnt;
+			u_int32_t qos_lost_cnt;
+			u_int32_t avg_queue_delay;
+			u_int32_t avg_tran_delay;
+			u_int8_t bin0_range;
+			u_int32_t bins[6];
+		} tran_stream_cat;
+		struct _rep_multicast_diag {
+			u_int16_t duration_tu;
+			u_int8_t group_mac[6];
+			u_int8_t reason;
+			u_int32_t mul_rec_msdu_cnt;
+			u_int16_t first_seq_num;
+			u_int16_t last_seq_num;
+			u_int16_t mul_rate;
+		} multicast_diag;
+	} u;
+};
+
+struct stastats_subele_vendor {
+	u_int32_t flags;
+	u_int8_t sequence;
+};
+
+struct frame_report_subele_frame_count {
+	u_int8_t ta[6];
+	u_int8_t bssid[6];
+	u_int8_t phy_type;
+	u_int8_t avg_rcpi;
+	u_int8_t last_rsni;
+	u_int8_t last_rcpi;
+	u_int8_t antenna_id;
+	u_int16_t frame_count;
+};
+
+/* TPC actions */
+struct ieee80211_action_tpc_request {
+	unsigned long expire;
+	ppq_callback_success fn_success;
+	ppq_callback_fail fn_fail;
+};
+
+struct ieee80211_action_tpc_report {
+	uint8_t		rx_token;
+	int8_t		tx_power;
+	int8_t		link_margin;
+};
+
+struct ppq_request_param {
+	unsigned long expire;
+	ppq_callback_success fn_success;
+	ppq_callback_fail fn_fail;
+};
+
+struct ieee80211_link_measure_request {
+	struct ppq_request_param ppq;
+};
+
+struct ieee80211_link_measure_report {
+	uint8_t token;
+	struct ieee80211_action_tpc_report tpc_report;
+	uint8_t recv_antenna_id;
+	uint8_t tran_antenna_id;
+	uint8_t rcpi;
+	uint8_t rsni;
+};
+
+struct ieee80211_neighbor_report_request {
+	struct ppq_request_param ppq;
+};
+
+struct ieee80211_neighbor_report_request_item {
+	uint8_t bssid[6];
+	uint32_t bssid_info;
+	uint8_t operating_class;
+	uint8_t channel;
+	uint8_t phy_type;
+};
+
+struct ieee80211_neighbor_report_response {
+	uint8_t token;
+	uint8_t bss_num;
+	struct ieee80211_neighbor_report_request_item *neighbor_report_ptr[32];
+};
+
+#define IEEE80211_MAXIMUM_TIMESTAMP_DIFF_NC_BF	1000000
+
+#define	IEEE80211_TXPOWER_MAX	100	/* .5 dBm units */
+#define	IEEE80211_TXPOWER_MIN	0	/* kill radio */
+
+#define	IEEE80211_DTIM_MAX	15	/* max DTIM period */
+#define	IEEE80211_DTIM_MIN	1	/* min DTIM period */
+#define	IEEE80211_DTIM_DEFAULT	3	/* default DTIM period */
+
+#define	IEEE80211_BINTVAL_MAX	5000	/* max beacon interval (TU's) */
+#define	IEEE80211_BINTVAL_MIN	25	/* min beacon interval (TU's) */
+#define	IEEE80211_BINTVAL_DEFAULT 100	/* default beacon interval (TU's) */
+#define IEEE80211_BINTVAL_VALID(_bi) \
+	((IEEE80211_BINTVAL_MIN <= (_bi)) && \
+	 ((_bi) <= IEEE80211_BINTVAL_MAX))
+#define IEEE80211_BINTVAL_SANITISE(_bi) \
+	(IEEE80211_BINTVAL_VALID(_bi) ? \
+	 (_bi) : IEEE80211_BINTVAL_DEFAULT)
+
+#define IEEE80211_SCAN_TBL_LEN_MAX_DFLT	2000
+#define IEEE80211_BEACON_HANG_TIMEOUT_DFLT	(5 * IEEE80211_BINTVAL_DEFAULT)
+
+#define IEEE80211_BWSTR_20	"20"
+#define IEEE80211_BWSTR_40	"40"
+#define IEEE80211_BWSTR_80	"80"
+#define IEEE80211_BWSTR_160	"160"
+#define IEEE80211_BWSTR_80P80	"80+80"
+
+#endif /* _NET80211__IEEE80211_H_ */
diff --git a/drivers/qtn/include/shared/net80211/ieee80211.h b/drivers/qtn/include/shared/net80211/ieee80211.h
new file mode 100644
index 0000000..5aaa2a2
--- /dev/null
+++ b/drivers/qtn/include/shared/net80211/ieee80211.h
@@ -0,0 +1,4040 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _NET80211_IEEE80211_H_
+#define _NET80211_IEEE80211_H_
+#include <compat.h>
+#include "net80211/_ieee80211.h"
+#include "net80211/ieee80211_qos.h"
+#include "net80211/ieee80211_dfs_reentry.h"
+
+/*
+ * 802.11 protocol definitions.
+ */
+
+#define	IEEE80211_ADDR_LEN	6		/* size of 802.11 address */
+/* is 802.11 address multicast/broadcast? */
+#define	IEEE80211_IS_MULTICAST(_a)	(*(_a) & 0x01)
+
+/* IEEE 802.11 PLCP header */
+struct ieee80211_plcp_hdr {
+	uint16_t	i_sfd;
+	uint8_t	i_signal;
+	uint8_t	i_service;
+	uint16_t	i_length;
+	uint16_t	i_crc;
+} __packed;
+
+#define IEEE80211_PLCP_SFD	0xF3A0
+#define IEEE80211_PLCP_SERVICE  0x00
+
+struct ieee80211_mac_addr {
+	uint8_t addr[IEEE80211_ADDR_LEN];
+};
+
+/*
+ * generic definitions for IEEE 802.11 frames
+ */
+struct ieee80211_frame {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t i_seq[2];
+	/* possibly followed by addr4[IEEE80211_ADDR_LEN]; */
+	/* see below */
+} __packed;
+
+struct ieee80211_qosframe {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t i_seq[2];
+	uint8_t i_qos[2];
+	/* possibly followed by addr4[IEEE80211_ADDR_LEN]; */
+	/* see below */
+} __packed;
+
+struct ieee80211_htframe {
+	uint8_t	i_fc[2];
+	uint8_t	i_dur[2];
+	uint8_t	i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t	i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t	i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t	i_seq[2];
+	uint8_t	i_qos[2];
+	uint8_t	i_ht[4];
+	/* possibly followed by addr4[IEEE80211_ADDR_LEN]; */
+	/* see below */
+} __packed;
+
+struct ieee80211_qoscntl {
+	uint8_t i_qos[2];
+};
+
+struct ieee80211_ht_qosframe {
+	uint8_t	i_fc[2];
+	uint8_t	i_dur[2];
+	uint8_t	i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t	i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t	i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t	i_seq[2];
+	uint8_t	i_qos[2];
+	uint8_t	i_ht[4];
+	/* possibly followed by addr4[IEEE80211_ADDR_LEN]; */
+	/* see below */
+} __packed;
+
+struct ieee80211_htcntl {
+	uint8_t	i_ht[4];
+};
+
+struct ieee80211_frame_addr4 {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t i_seq[2];
+	uint8_t i_addr4[IEEE80211_ADDR_LEN];
+} __packed;
+
+
+struct ieee80211_qosframe_addr4 {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t i_seq[2];
+	uint8_t i_addr4[IEEE80211_ADDR_LEN];
+	uint8_t i_qos[2];
+} __packed;
+
+#define IEEE80211_HT_CAPABLE		1
+#define IEEE80211_NON_HT_CAPABLE	0
+
+struct ieee80211_htframe_addr4 {
+	uint8_t	i_fc[2];
+	uint8_t	i_dur[2];
+	uint8_t	i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t	i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t	i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t	i_seq[2];
+	uint8_t	i_addr4[IEEE80211_ADDR_LEN];
+	uint8_t	i_ht[4];
+} __packed;
+
+struct ieee80211_ht_qosframe_addr4 {
+	uint8_t	i_fc[2];
+	uint8_t	i_dur[2];
+	uint8_t	i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t	i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t	i_addr3[IEEE80211_ADDR_LEN];
+	uint8_t	i_seq[2];
+	uint8_t	i_addr4[IEEE80211_ADDR_LEN];
+	uint8_t	i_qos[2];
+	uint8_t	i_ht[4];
+} __packed;
+
+#define IEEE80211_IS_4ADDRESS(__wh)	\
+		(((__wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
+
+struct ieee80211_ctlframe_addr2 {
+	uint8_t i_fc[2];
+	__le16 i_aidordur; /* AID or duration */
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+} __packed;
+
+struct ieee80211_vht_su_ndpa {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t i_diagtoken;
+	uint8_t i_sta1_info[2];
+} __packed;
+
+struct ieee80211_bar_frame {
+        u_int8_t i_fc[2];
+        u_int8_t i_dur[2];
+        u_int8_t i_addr1[IEEE80211_ADDR_LEN];
+        u_int8_t i_addr2[IEEE80211_ADDR_LEN];
+        u_int8_t i_bar_ctl[2];
+        __le16   i_back_seq;
+} __packed;
+
+struct ieee80211_vht_mu_ndpa {
+	uint8_t i_fc[2];
+	__le16 i_dur;
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t i_diagtoken;
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_vht_mu_rpt_poll {
+	uint8_t i_fc[2];
+	__le16 i_dur;
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	uint8_t i_fbseg_map;
+} __packed;
+
+struct ieee80211_auth {
+	uint16_t auth_alg;
+	uint16_t auth_transaction;
+	uint16_t status_code;
+	/* possibly followed by Challenge text */
+	uint8_t variable[0];
+} __packed;
+
+struct ieee80211_assoc {
+	uint16_t assoc_cap_info;
+	uint16_t assoc_status_code;
+	uint16_t assoc_aid;
+	/* information elements */
+	uint8_t variable[0];
+} __packed;
+
+#define	IEEE80211_FC0_VERSION_MASK		0x03
+#define	IEEE80211_FC0_VERSION_SHIFT		0
+#define	IEEE80211_FC0_VERSION_0			0x00
+#define	IEEE80211_FC0_TYPE_MASK			0x0c
+#define	IEEE80211_FC0_TYPE_SHIFT		2
+#define	IEEE80211_FC0_TYPE_MGT			0x00
+#define	IEEE80211_FC0_TYPE_CTL			0x04
+#define	IEEE80211_FC0_TYPE_DATA			0x08
+
+#define	IEEE80211_FC0_SUBTYPE_MASK		0xf0
+#define	IEEE80211_FC0_SUBTYPE_SHIFT		4
+/* for TYPE_MGT */
+#define	IEEE80211_FC0_SUBTYPE_ASSOC_REQ		0x00
+#define	IEEE80211_FC0_SUBTYPE_ASSOC_RESP	0x10
+#define	IEEE80211_FC0_SUBTYPE_REASSOC_REQ	0x20
+#define	IEEE80211_FC0_SUBTYPE_REASSOC_RESP	0x30
+#define	IEEE80211_FC0_SUBTYPE_PROBE_REQ		0x40
+#define	IEEE80211_FC0_SUBTYPE_PROBE_RESP	0x50
+#define	IEEE80211_FC0_SUBTYPE_BEACON		0x80
+#define	IEEE80211_FC0_SUBTYPE_ATIM		0x90
+#define	IEEE80211_FC0_SUBTYPE_DISASSOC		0xa0
+#define	IEEE80211_FC0_SUBTYPE_AUTH		0xb0
+#define	IEEE80211_FC0_SUBTYPE_DEAUTH		0xc0
+#define IEEE80211_FC0_SUBTYPE_ACTION		0xd0
+#define IEEE80211_FC0_SUBTYPE_ACTION_NOACK	0xe0
+/* for TYPE_CTL */
+#define	IEEE80211_FC0_SUBTYPE_VHT_RPT_POLL      0x40
+#define	IEEE80211_FC0_SUBTYPE_VHT_NDPA		0x50
+#define	IEEE80211_FC0_SUBTYPE_BAR		0x80
+#define	IEEE80211_FC0_SUBTYPE_BA		0x90
+#define	IEEE80211_FC0_SUBTYPE_PS_POLL		0xa0
+#define	IEEE80211_FC0_SUBTYPE_RTS		0xb0
+#define	IEEE80211_FC0_SUBTYPE_CTS		0xc0
+#define	IEEE80211_FC0_SUBTYPE_ACK		0xd0
+#define	IEEE80211_FC0_SUBTYPE_CF_END		0xe0
+#define	IEEE80211_FC0_SUBTYPE_CF_END_ACK	0xf0
+/* for TYPE_DATA (bit combination) */
+#define	IEEE80211_FC0_SUBTYPE_DATA		0x00
+#define	IEEE80211_FC0_SUBTYPE_CF_ACK		0x10
+#define	IEEE80211_FC0_SUBTYPE_CF_POLL		0x20
+#define	IEEE80211_FC0_SUBTYPE_CF_ACPL		0x30
+#define	IEEE80211_FC0_SUBTYPE_NODATA		0x40
+#define	IEEE80211_FC0_SUBTYPE_CFACK		0x50
+#define	IEEE80211_FC0_SUBTYPE_CFPOLL		0x60
+#define	IEEE80211_FC0_SUBTYPE_CF_ACK_CF_ACK	0x70
+#define	IEEE80211_FC0_SUBTYPE_QOS		0x80
+#define	IEEE80211_FC0_SUBTYPE_QOS_NULL		0xc0
+
+#define	IEEE80211_FC1_DIR_MASK			0x03
+#define	IEEE80211_FC1_DIR_NODS			0x00	/* STA->STA */
+#define	IEEE80211_FC1_DIR_TODS			0x01	/* STA->AP  */
+#define	IEEE80211_FC1_DIR_FROMDS		0x02	/* AP ->STA */
+#define	IEEE80211_FC1_DIR_DSTODS		0x03	/* AP ->AP  */
+
+#define	IEEE80211_FC1_MORE_FRAG			0x04
+#define	IEEE80211_FC1_RETRY			0x08
+#define	IEEE80211_FC1_PWR_MGT			0x10
+#define	IEEE80211_FC1_MORE_DATA			0x20
+#define	IEEE80211_FC1_PROT			0x40
+#define	IEEE80211_FC1_WEP			0x40
+#define	IEEE80211_FC1_ORDER			0x80
+
+#define	IEEE80211_SEQ_FRAG_MASK			0x000f
+#define	IEEE80211_SEQ_FRAG_SHIFT		0
+#define	IEEE80211_SEQ_SEQ_MASK			0xfff0
+#define	IEEE80211_SEQ_SEQ_SHIFT			4
+#define IEEE80211_SEQ_RANGE			4096
+#define IEEE80211_SEQ_ORDERLAG			64
+
+#define IEEE80211_MU_NDPA_TOKEN_MASK		0xFC
+#define IEEE80211_MU_NDPA_TOKEN_SHIFT		2
+#define IEEE80211_MU_NDPA_RSRV_MASK		0x03
+#define IEEE80211_MU_NDPA_RSRV_SHIFT		0
+
+#define IEEE80211_SEQ_ADD(seq, offset)  \
+	(((seq) + (offset)) & (IEEE80211_SEQ_RANGE - 1))
+#define IEEE80211_SEQ_SUB(seq, offset)					\
+	(((seq) + IEEE80211_SEQ_RANGE - (offset)) & (IEEE80211_SEQ_RANGE - 1))
+#define IEEE80211_SEQ_DIFF(seq_front, seq_back)				\
+	(((seq_front) + IEEE80211_SEQ_RANGE - (seq_back)) & (IEEE80211_SEQ_RANGE - 1))
+#define IEEE80211_SEQ_INORDER_LAG(seq_front, seq_back, seq_lag)		\
+	(IEEE80211_SEQ_DIFF((seq_front), (seq_back)) < (seq_lag))
+#define IEEE80211_SEQ_INORDER(seq_front, seq_back)			\
+	IEEE80211_SEQ_INORDER_LAG((seq_front), (seq_back), IEEE80211_SEQ_ORDERLAG)
+
+#define	IEEE80211_SEQ_LEQ(a,b)	((int)((a)-(b)) <= 0)
+#define	IEEE80211_SEQ_EQ(a,b)	((a) == (b))
+
+#define	IEEE80211_NWID_LEN			32
+
+#define	IEEE80211_QOS_TXOP			0x00ff
+/* bit 8 is reserved */
+#define	IEEE80211_QOS_ACKPOLICY			0x60
+#define	IEEE80211_QOS_ACKPOLICY_S		5
+#define	IEEE80211_QOS_EOSP			0x10
+#define	IEEE80211_QOS_EOSP_S			4
+#define	IEEE80211_QOS_TID			0x0f
+#define IEEE80211_QOS_A_MSDU_PRESENT 		0x80
+#define IEEE80211_QOS_BLOCK_ACK_POLICY          0x60
+
+/* bit 1 is reserved */
+#define IEEE80211_HTC0_TRQ				0x02
+#define IEEE80211_HTC0_MAI_MASK			0x3C
+#define IEEE80211_HTC0_MAI_SHIFT		2
+#define IEEE80211_HTC0_MFSI_LOW_MASK	0xC0
+#define IEEE80211_HTC0_MFSI_LOW_SHIFT	6
+
+#define IEEE80211_HTC1_MFSI_HIGH		0x01
+#define IEEE80211_HTC1_MFB_ASEL_MASK	0xFE
+#define IEEE80211_HTC1_MFB_ASEL_SHIFT	1
+
+#define IEEE80211_HTC2_CALIB_POS_MASK	0x03
+#define IEEE80211_HTC2_CALIB_POS_SHIFT	0
+#define IEEE80211_HTC2_CALIB_SEQ_MASK	0x0C
+#define IEEE80211_HTC2_CALIB_SEQ_SHIFT	2
+/* bits 4-5 are reserved */
+#define IEEE80211_HTC2_CSI_STEER_MASK	0xC0
+#define IEEE80211_HTC2_CSI_STEER_SHIFT	6
+
+#define IEEE80211_HTC3_NDP_ANNOUNCE		0x01
+/* bits 1-5 are reserved */
+#define IEEE80211_HTC3_AC_CONSTRAINT	0x40
+#define IEEE80211_HTC3_MORE_PPDU_RDG	0x80
+
+#define IEEE80211_BW_RANGE		25
+#define IEEE80211_CHAN_SPACE		5
+#define IEEE80211_SEC_CHAN_OFFSET	4
+#define IEEE80211_40M_CENT_FREQ_OFFSET	2
+
+/*
+ * Country/Region Codes from MS WINNLS.H
+ * Numbering from ISO 3166
+ * XXX belongs elsewhere
+ *
+ * First 2 entries taken from ieee80211.c ...
+ */
+enum CountryCode {
+    CTRY_DEBUG                = 0x1ff,   /* debug, = 511 radix 10 */
+    CTRY_DEFAULT              = 0,       /* default or not defined */
+
+    CTRY_AFGHANISTAN          = 4,       /* Afghanistan */
+    CTRY_ALBANIA              = 8,       /* Albania */
+    CTRY_ALGERIA              = 12,      /* Algeria */
+    CTRY_AMERICAN_SAMOA	      = 16,      /* American Samoa */
+    CTRY_ANDORRA              = 20,      /* Andorra */
+    CTRY_ANGOLA               = 24,      /* Angola */
+    CTRY_ANGUILLA             = 660,
+    CTRY_ANTARTICA            = 10,      /* Antartica */
+    CTRY_ANTIGUA              = 28,	     /* Antigua and Barbuda */
+    CTRY_ARGENTINA            = 32,      /* Argentina */
+    CTRY_ARMENIA              = 51,      /* Armenia */
+    CTRY_ARUBA                = 533,	 /* Aruba */
+    CTRY_AUSTRALIA            = 36,      /* Australia */
+    CTRY_AUSTRIA              = 40,      /* Austria */
+    CTRY_AZERBAIJAN           = 31,      /* Azerbaijan */
+    CTRY_BAHAMAS              = 44,	     /* Bahamas */
+    CTRY_BAHRAIN              = 48,      /* Bahrain */
+    CTRY_BANGLADESH           = 50,	     /* Bangladesh */
+    CTRY_BARBADOS             = 52,
+    CTRY_BELARUS              = 112,     /* Belarus */
+    CTRY_BELGIUM              = 56,      /* Belgium */
+    CTRY_BELIZE               = 84,      /* Belize */
+    CTRY_BENIN                = 204,
+    CTRY_BERMUDA              = 60,
+    CTRY_BHUTAN               = 64,
+    CTRY_BOLIVIA              = 68,      /* Bolivia */
+    CTRY_BOSNIA_AND_HERZEGOWINA = 70,
+    CTRY_BOTSWANA             = 72,
+    CTRY_BOUVET_ISLAND	      = 74,
+    CTRY_BRAZIL               = 76,      /* Brazil */
+    CTRY_BRITISH_INDIAN_OCEAN_TERRITORY = 86,
+    CTRY_BRUNEI_DARUSSALAM    = 96,      /* Brunei Darussalam */
+    CTRY_BULGARIA             = 100,     /* Bulgaria */
+    CTRY_BURKINA_FASO         = 854,
+    CTRY_BURUNDI              = 108,
+    CTRY_CAMBODIA             = 116,
+    CTRY_CAMEROON             = 120,
+    CTRY_CANADA               = 124,     /* Canada */
+    CTRY_CAPE_VERDE           = 132,
+    CTRY_CAYMAN_ISLANDS	      = 136,
+    CTRY_CENTRAL_AFRICAN_REPUBLIC = 140,
+    CTRY_CHAD                 = 148,
+    CTRY_CHILE                = 152,     /* Chile */
+    CTRY_CHINA                = 156,     /* People's Republic of China */
+    CTRY_CHRISTMAS_ISLAND     = 162,
+    CTRY_COCOS_ISLANDS	      = 166,
+    CTRY_COLOMBIA             = 170,     /* Colombia */
+    CTRY_COMOROS              = 174,
+    CTRY_CONGO                = 178,
+    CTRY_COOK_ISLANDS	      = 184,
+    CTRY_COSTA_RICA           = 188,     /* Costa Rica */
+    CTRY_COTE_DIVOIRE	      = 384,
+    CTRY_CROATIA              = 191,     /* Croatia */
+    CTRY_CYPRUS               = 196,
+    CTRY_CZECH                = 203,     /* Czech Republic */
+    CTRY_DENMARK              = 208,     /* Denmark */
+    CTRY_DJIBOUTI             = 262,
+    CTRY_DOMINICA             = 212,
+    CTRY_DOMINICAN_REPUBLIC   = 214,     /* Dominican Republic */
+    CTRY_ECUADOR              = 218,     /* Ecuador */
+    CTRY_EUROPE               = 200,     /* European Union */
+    CTRY_EGYPT                = 818,     /* Egypt */
+    CTRY_EL_SALVADOR          = 222,     /* El Salvador */
+    CTRY_EQUATORIAL_GUINEA    = 226,
+    CTRY_ERITREA              = 232,
+    CTRY_ESTONIA              = 233,     /* Estonia */
+    CTRY_ETHIOPIA             = 210,
+    CTRY_FALKLAND_ISLANDS     = 238,	 /* (Malvinas) */
+    CTRY_FAEROE_ISLANDS       = 234,     /* Faeroe Islands */
+    CTRY_FIJI                 = 242,
+    CTRY_FINLAND              = 246,     /* Finland */
+    CTRY_FRANCE               = 250,     /* France */
+    CTRY_FRANCE2              = 255,     /* France2 */
+    CTRY_FRENCH_GUIANA	      = 254,
+    CTRY_FRENCH_POLYNESIA     = 258,
+    CTRY_FRENCH_SOUTHERN_TERRITORIES	= 260,
+    CTRY_GABON                = 266,
+    CTRY_GAMBIA               = 270,
+    CTRY_GEORGIA              = 268,     /* Georgia */
+    CTRY_GERMANY              = 276,     /* Germany */
+    CTRY_GHANA                = 288,
+    CTRY_GIBRALTAR            = 292,
+    CTRY_GREECE               = 300,     /* Greece */
+    CTRY_GREENLAND            = 304,
+    CTRY_GRENADA              = 308,
+    CTRY_GUADELOUPE           = 312,
+    CTRY_GUAM                 = 316,
+    CTRY_GUATEMALA            = 320,     /* Guatemala */
+    CTRY_GUINEA               = 324,
+    CTRY_GUINEA_BISSAU	      = 624,
+    CTRY_GUYANA               = 328,
+    CTRY_HAITI                = 332,
+    CTRY_HONDURAS             = 340,     /* Honduras */
+    CTRY_HONG_KONG            = 344,     /* Hong Kong S.A.R., P.R.C. */
+    CTRY_HUNGARY              = 348,     /* Hungary */
+    CTRY_ICELAND              = 352,     /* Iceland */
+    CTRY_INDIA                = 356,     /* India */
+    CTRY_INDONESIA            = 360,     /* Indonesia */
+    CTRY_IRAN                 = 364,     /* Iran */
+    CTRY_IRAQ                 = 368,     /* Iraq */
+    CTRY_IRELAND              = 372,     /* Ireland */
+    CTRY_ISRAEL               = 376,     /* Israel */
+    CTRY_ITALY                = 380,     /* Italy */
+    CTRY_JAMAICA              = 388,     /* Jamaica */
+    CTRY_JAPAN                = 392,     /* Japan */
+    CTRY_JAPAN1               = 393,     /* Japan (JP1) */
+    CTRY_JAPAN2               = 394,     /* Japan (JP0) */
+    CTRY_JAPAN3               = 395,     /* Japan (JP1-1) */
+    CTRY_JAPAN4               = 396,     /* Japan (JE1) */
+    CTRY_JAPAN5               = 397,     /* Japan (JE2) */
+    CTRY_JAPAN6               = 399,	 /* Japan (JP6) */
+    CTRY_JAPAN7               = 900,	 /* Japan */
+    CTRY_JAPAN8               = 901,	 /* Japan */
+    CTRY_JAPAN9               = 902,	 /* Japan */
+    CTRY_JAPAN10	      = 903,	 /* Japan */
+    CTRY_JAPAN11	      = 904,	 /* Japan */
+    CTRY_JAPAN12	      = 905,	 /* Japan */
+    CTRY_JAPAN13	      = 906,	 /* Japan */
+    CTRY_JAPAN14	      = 907,	 /* Japan */
+    CTRY_JAPAN15	      = 908,	 /* Japan */
+    CTRY_JAPAN16	      = 909,	 /* Japan */
+    CTRY_JAPAN17	      = 910,	 /* Japan */
+    CTRY_JAPAN18	      = 911,	 /* Japan */
+    CTRY_JAPAN19	      = 912,	 /* Japan */
+    CTRY_JAPAN20	      = 913,	 /* Japan */
+    CTRY_JAPAN21	      = 914,	 /* Japan */
+    CTRY_JAPAN22	      = 915,	 /* Japan */
+    CTRY_JAPAN23	      = 916,	 /* Japan */
+    CTRY_JAPAN24	      = 917,	 /* Japan */
+    CTRY_JAPAN25	      = 918,	 /* Japan */
+    CTRY_JAPAN26	      = 919,	 /* Japan */
+    CTRY_JAPAN27	      = 920,	 /* Japan */
+    CTRY_JAPAN28	      = 921,	 /* Japan */
+    CTRY_JAPAN29	      = 922,	 /* Japan */
+    CTRY_JAPAN30	      = 923,	 /* Japan */
+    CTRY_JAPAN31	      = 924,	 /* Japan */
+    CTRY_JAPAN32	      = 925,	 /* Japan */
+    CTRY_JAPAN33	      = 926,	 /* Japan */
+    CTRY_JAPAN34	      = 927,	 /* Japan */
+    CTRY_JAPAN35	      = 928,	 /* Japan */
+    CTRY_JAPAN36	      = 929,	 /* Japan */
+    CTRY_JAPAN37	      = 930,	 /* Japan */
+    CTRY_JAPAN38	      = 931,	 /* Japan */
+    CTRY_JAPAN39	      = 932,	 /* Japan */
+    CTRY_JAPAN40	      = 933,	 /* Japan */
+    CTRY_JAPAN41	      = 934,	 /* Japan */
+    CTRY_JAPAN42	      = 935,	 /* Japan */
+    CTRY_JAPAN43	      = 936,	 /* Japan */
+    CTRY_JAPAN44	      = 937,	 /* Japan */
+    CTRY_JAPAN45	      = 938,	 /* Japan */
+    CTRY_JAPAN46	      = 939,	 /* Japan */
+    CTRY_JAPAN47	      = 940,	 /* Japan */
+    CTRY_JAPAN48	      = 941,	 /* Japan */
+    CTRY_JORDAN               = 400,     /* Jordan */
+    CTRY_KAZAKHSTAN           = 398,     /* Kazakhstan */
+    CTRY_KENYA                = 404,     /* Kenya */
+    CTRY_KOREA_NORTH          = 408,     /* North Korea */
+    CTRY_KOREA_ROC            = 410,     /* South Korea */
+    CTRY_KOREA_ROC2           = 411,     /* South Korea */
+    CTRY_KUWAIT               = 414,     /* Kuwait */
+    CTRY_LATVIA               = 428,     /* Latvia */
+    CTRY_LEBANON              = 422,     /* Lebanon */
+    CTRY_LIBYA                = 434,     /* Libya */
+    CTRY_LIECHTENSTEIN        = 438,     /* Liechtenstein */
+    CTRY_LITHUANIA            = 440,     /* Lithuania */
+    CTRY_LUXEMBOURG           = 442,     /* Luxembourg */
+    CTRY_MACAU                = 446,     /* Macau */
+    CTRY_MACEDONIA            = 807,     /* the Former Yugoslav Republic of Macedonia */
+    CTRY_MALAYSIA             = 458,     /* Malaysia */
+    CTRY_MEXICO               = 484,     /* Mexico */
+    CTRY_MONACO               = 492,     /* Principality of Monaco */
+    CTRY_MOROCCO              = 504,     /* Morocco */
+    CTRY_NEPAL                = 524,	 /* Nepal */
+    CTRY_NETHERLANDS          = 528,     /* Netherlands */
+    CTRY_NEW_ZEALAND          = 554,     /* New Zealand */
+    CTRY_NICARAGUA            = 558,     /* Nicaragua */
+    CTRY_NORWAY               = 578,     /* Norway */
+    CTRY_OMAN                 = 512,     /* Oman */
+    CTRY_PAKISTAN             = 586,     /* Islamic Republic of Pakistan */
+    CTRY_PANAMA               = 591,     /* Panama */
+    CTRY_PARAGUAY             = 600,     /* Paraguay */
+    CTRY_PERU                 = 604,     /* Peru */
+    CTRY_PHILIPPINES          = 608,     /* Republic of the Philippines */
+    CTRY_POLAND               = 616,     /* Poland */
+    CTRY_PORTUGAL             = 620,     /* Portugal */
+    CTRY_PUERTO_RICO          = 630,     /* Puerto Rico */
+    CTRY_QATAR                = 634,     /* Qatar */
+    CTRY_ROMANIA              = 642,     /* Romania */
+    CTRY_RUSSIA               = 643,     /* Russia */
+    CTRY_SAUDI_ARABIA         = 682,     /* Saudi Arabia */
+    CTRY_SINGAPORE            = 702,     /* Singapore */
+    CTRY_SLOVAKIA             = 703,     /* Slovak Republic */
+    CTRY_SLOVENIA             = 705,     /* Slovenia */
+    CTRY_SOUTH_AFRICA         = 710,     /* South Africa */
+    CTRY_SPAIN                = 724,     /* Spain */
+    CTRY_SRILANKA             = 144,	 /* Sri Lanka */
+    CTRY_SWEDEN               = 752,     /* Sweden */
+    CTRY_SWITZERLAND          = 756,     /* Switzerland */
+    CTRY_SYRIA                = 760,     /* Syria */
+    CTRY_TAIWAN               = 158,     /* Taiwan */
+    CTRY_THAILAND             = 764,     /* Thailand */
+    CTRY_TRINIDAD_Y_TOBAGO    = 780,     /* Trinidad y Tobago */
+    CTRY_TUNISIA              = 788,     /* Tunisia */
+    CTRY_TURKEY               = 792,     /* Turkey */
+    CTRY_UAE                  = 784,     /* U.A.E. */
+    CTRY_UKRAINE              = 804,     /* Ukraine */
+    CTRY_UNITED_KINGDOM       = 826,     /* United Kingdom */
+    CTRY_UNITED_STATES        = 840,     /* United States */
+    CTRY_UNITED_STATES_FCC49  = 842,     /* United States (Public Safety)*/
+    CTRY_URUGUAY              = 858,     /* Uruguay */
+    CTRY_UZBEKISTAN           = 860,     /* Uzbekistan */
+    CTRY_VENEZUELA            = 862,     /* Venezuela */
+    CTRY_VIET_NAM             = 704,     /* Viet Nam */
+    CTRY_YEMEN                = 887,     /* Yemen */
+    CTRY_ZIMBABWE             = 716      /* Zimbabwe */
+};
+
+#define IEEE80211_IE_ID_LEN_SIZE 2
+
+/*
+ * Generic information element
+ */
+struct ieee80211_ie {
+	uint8_t id;
+	uint8_t len;
+	uint8_t info[0];
+} __packed;
+
+/*
+ * Country information element.
+ */
+#define IEEE80211_COUNTRY_MAX_TRIPLETS (83)
+struct ieee80211_ie_country {
+	uint8_t country_id;
+	uint8_t country_len;
+	uint8_t country_str[3];
+	uint8_t country_triplet[IEEE80211_COUNTRY_MAX_TRIPLETS * 3];
+} __packed;
+
+/*
+ * Channel Switch Announcement information element.
+ */
+struct ieee80211_ie_csa {
+	uint8_t csa_id;	/* IEEE80211_ELEMID_CHANSWITCHANN */
+	uint8_t csa_len;	/* == 3 */
+	uint8_t csa_mode;	/* Channel Switch Mode: 1 == stop transmission until CS */
+	uint8_t csa_chan;	/* New Channel Number */
+	uint8_t csa_count;	/* TBTTs until Channel Switch happens */
+} __packed;
+
+/* for Spectrum Management Actions. Table 20e in 802.11h $7.4.1 */
+#define IEEE80211_ACTION_S_MEASUREMENT_REQUEST 0
+#define IEEE80211_ACTION_S_MEASUREMENT_REPORT  1
+#define IEEE80211_ACTION_S_TPC_REQUEST         2
+#define IEEE80211_ACTION_S_TPC_REPORT          3
+#define IEEE80211_ACTION_S_CHANSWITCHANN       4
+
+/* for csa_mode. It must be either 0 or 1. 1 means that the receiver shall stop
+ * sending until CS. 0 imposes no requirement. See 7.3.2.20 */
+#define IEEE80211_CSA_CAN_STOP_TX       0
+#define IEEE80211_CSA_MUST_STOP_TX      1
+
+/* minimal Channel Switch Count in the initial announcement */
+#define IEEE80211_CSA_PROTECTION_PERIOD 3
+
+/* maximum allowed deviance of measurement of intervals between CSA in Beacons */
+#define IEEE80211_CSA_SANITY_THRESHOLD 100
+
+/* Quantenna CSA tsf ie, to complement an 802.11h CSA ie. More timing precision */
+struct ieee80211_ie_qtn_csa_tsf {
+	uint8_t id;	/* IEEE80211_ELEMID_VENDOR */
+	uint8_t len;   /* length in bytes */
+	uint8_t qtn_ie_oui[3];		/* QTN_OUI - 0x00, 0x26, 0x86*/
+	uint8_t qtn_ie_type;		/* IE type */
+	uint64_t tsf;			/* TSF at which channel change happens. */
+} __packed;
+
+/* Quantenna SCS IE */
+#define QTN_SCS_IE_TYPE_STA_INTF_RPT		0x1
+#define QTN_SCS_IE_TYPE_STA_DFS_RPT		0x2
+#define QTN_SCS_IE_TYPE_STA_FAT_RPT		0x3
+#pragma pack(push)
+#pragma pack(1)
+struct ieee80211_ie_qtn_scs {
+	uint8_t id;			/* IEEE80211_ELEMID_VENDOR */
+	uint8_t len;                    /* length in bytes */
+	uint8_t qtn_ie_oui[3];		/* QTN_OUI - 0x00, 0x26, 0x86*/
+	uint8_t qtn_ie_type;		/* IE type */
+	uint8_t scs_ie_type;            /* for future expansion and backward compatibility */
+	/* following depends on scs_ie_type */
+	union {
+		struct {
+			uint32_t sp_fail;		/* short preamble failure in last second */
+			uint32_t lp_fail;		/* long preamble failure in last second */
+			uint16_t others_time;		/* rx + tx time for all nodes */
+		} cca_info;
+		struct {
+			uint16_t free_airtime;		/* free air time */
+		} fat_info;
+		struct {
+			uint16_t dfs_enabled;		/* whether station's DFS feature enabled */
+			uint8_t max_txpower;		/* station's tx power */
+		} dfs_info;
+	} u;
+	/* Warning: using this variable length field would cause backward compatibility issue
+	 * in future if want to add new fields */
+	uint16_t extra_ie_len;		/* extra ie len */
+	uint8_t extra_ie[0];		/* tdls stats */
+};
+#pragma pack(pop)
+#define QTN_SCS_IE_LEN_MIN			7    /* till scs ie type */
+#define QTN_SCS_IE_STA_INTF_RPT_LEN_MIN		(QTN_SCS_IE_LEN_MIN + 8)
+#define QTN_SCS_IE_STA_DFS_RPT_LEN_MIN		(QTN_SCS_IE_LEN_MIN + 3)
+#define QTN_SCS_IE_STA_FAT_RPT_LEN_MIN		(QTN_SCS_IE_LEN_MIN + 2)
+
+#define IEEE80211_IS_ALL_SET(__flags__, __msb__)	\
+	(((__flags__) & ((1 << ((__msb__)+1)) - 1)) == ((1 << ((__msb__)+1)) - 1))
+
+/* does frame have QoS sequence control data */
+#define	IEEE80211_QOS_HAS_SEQ(wh) \
+	(((wh)->i_fc[0] & \
+	  (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \
+	  (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
+
+#define WME_QOSINFO_COUNT	0x0f  /* Mask for Param Set Count field */
+#define	WMM_OUI_BYTES		0x00, 0x50, 0xf2
+/*
+ * WME/802.11e information element.
+ */
+struct ieee80211_ie_wme {
+	uint8_t wme_id;		/* IEEE80211_ELEMID_VENDOR */
+	uint8_t wme_len;	/* length in bytes */
+	uint8_t wme_oui[3];	/* 0x00, 0x50, 0xf2 */
+	uint8_t wme_type;	/* OUI type */
+	uint8_t wme_subtype;	/* OUI subtype */
+	uint8_t wme_version;	/* spec revision */
+	uint8_t wme_info;	/* QoS info */
+} __packed;
+
+/*
+ * WME/802.11e Tspec Element
+ */
+struct ieee80211_wme_tspec {
+	uint8_t ts_id;
+	uint8_t ts_len;
+	uint8_t ts_oui[3];
+	uint8_t ts_oui_type;
+	uint8_t ts_oui_subtype;
+	uint8_t ts_version;
+	uint8_t ts_tsinfo[3];
+	uint8_t ts_nom_msdu[2];
+	uint8_t ts_max_msdu[2];
+	uint8_t ts_min_svc[4];
+	uint8_t ts_max_svc[4];
+	uint8_t ts_inactv_intv[4];
+	uint8_t ts_susp_intv[4];
+	uint8_t ts_start_svc[4];
+	uint8_t ts_min_rate[4];
+	uint8_t ts_mean_rate[4];
+	uint8_t ts_max_burst[4];
+	uint8_t ts_min_phy[4];
+	uint8_t ts_peak_rate[4];
+	uint8_t ts_delay[4];
+	uint8_t ts_surplus[2];
+	uint8_t ts_medium_time[2];
+} __packed;
+
+/*
+ * WME AC parameter field
+ */
+
+struct ieee80211_wme_acparams {
+	uint8_t acp_aci_aifsn;
+	uint8_t acp_logcwminmax;
+	uint16_t acp_txop;
+} __packed;
+
+#define IEEE80211_WME_PARAM_LEN	24
+#define WME_NUM_TID		16	/* 16 tids */
+#define WME_NUM_AC		4	/* 4 AC categories */
+#define WME_TID_UNKNOWN		(-1)
+#define WME_TID_NONQOS		(-2)
+#define WME_TID_VALID(_tid)	(((_tid) >= 0) && ((_tid) < WME_NUM_TID))
+
+#define WME_PARAM_ACI		0x60	/* Mask for ACI field */
+#define WME_PARAM_ACI_S		5	/* Shift for ACI field */
+#define WME_PARAM_ACM		0x10	/* Mask for ACM bit */
+#define WME_PARAM_ACM_S		4	/* Shift for ACM bit */
+#define WME_PARAM_AIFSN		0x0f	/* Mask for aifsn field */
+#define WME_PARAM_AIFSN_S	0	/* Shift for aifsn field */
+#define WME_PARAM_LOGCWMIN	0x0f	/* Mask for CwMin field (in log) */
+#define WME_PARAM_LOGCWMIN_S	0	/* Shift for CwMin field */
+#define WME_PARAM_LOGCWMAX	0xf0	/* Mask for CwMax field (in log) */
+#define WME_PARAM_LOGCWMAX_S	4	/* Shift for CwMax field */
+
+#define WME_AC_TO_TID(_ac) (       \
+	((_ac) == WME_AC_VO) ? 6 : \
+	((_ac) == WME_AC_VI) ? 5 : \
+	((_ac) == WME_AC_BK) ? 1 : \
+	0)
+
+#define TID_TO_WME_AC(_tid)				\
+	((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE :	\
+	 ((_tid) < 3) ? WME_AC_BK :	\
+	 ((_tid) < 6) ? WME_AC_VI :	\
+	 WME_AC_VO)
+
+/*
+ * WME Parameter Element
+ */
+struct ieee80211_wme_param {
+	uint8_t param_id;
+	uint8_t param_len;
+	uint8_t param_oui[3];
+	uint8_t param_oui_type;
+	uint8_t param_oui_sybtype;
+	uint8_t param_version;
+	uint8_t param_qosInfo;
+	uint8_t param_reserved;
+	struct ieee80211_wme_acparams	params_acParams[WME_NUM_AC];
+} __packed;
+
+/*
+ * WME U-APSD qos info field defines
+ */
+#define WME_CAPINFO_UAPSD_EN			0x00000080
+#define WME_CAPINFO_UAPSD_VO			0x00000001
+#define WME_CAPINFO_UAPSD_VI			0x00000002
+#define WME_CAPINFO_UAPSD_BK			0x00000004
+#define WME_CAPINFO_UAPSD_BE			0x00000008
+#define WME_CAPINFO_UAPSD_ACFLAGS_SHIFT		0
+#define WME_CAPINFO_UAPSD_ACFLAGS_MASK		0xF
+#define WME_CAPINFO_UAPSD_MAXSP_SHIFT		5
+#define WME_CAPINFO_UAPSD_MAXSP_MASK		0x3
+#define WME_CAPINFO_IE_OFFSET			8
+#define WME_UAPSD_MAXSP(_qosinfo) (((_qosinfo) >> WME_CAPINFO_UAPSD_MAXSP_SHIFT) & WME_CAPINFO_UAPSD_MAXSP_MASK)
+#define WME_UAPSD_AC_ENABLED(_ac, _qosinfo) ( (1<<(3 - (_ac))) &   \
+		(((_qosinfo) >> WME_CAPINFO_UAPSD_ACFLAGS_SHIFT) & WME_CAPINFO_UAPSD_ACFLAGS_MASK) )
+
+struct ieee80211_extcap_param {
+	u_int8_t param_id;
+	u_int8_t param_len;
+	u_int8_t ext_cap[8];
+} __packed;
+
+
+#define IEEE8021P_PRIORITY_NUM			8
+#define IEEE80211_DSCP_MAX_EXCEPTIONS		21
+#define IP_DSCP_NUM				64
+
+/* byte 0 */
+#define IEEE80211_EXTCAP_20_40_COEXISTENCE      0x1
+/* byte 3 */
+#define IEEE80211_EXTCAP_BTM			0x08
+/* byte 7 */
+#define IEEE80211_EXTCAP_OPMODE_NOTIFICATION	0x40
+#define IEEE80211_EXTCAP_MAX_MSDU_IN_AMSDU	0xC0
+#define IEEE80211_EXTCAP_MAX_MSDU_IN_AMSDU_S	6
+
+/*
+ * 20/40 MHZ BSS coexistence information element.
+ */
+struct ieee80211_20_40_coex_param {
+	u_int8_t param_id;
+	u_int8_t param_len;
+	u_int8_t coex_param;
+} __packed;
+
+#define WLAN_20_40_BSS_COEX_INFO_REQ            BIT(0)
+#define WLAN_20_40_BSS_COEX_40MHZ_INTOL         BIT(1)
+#define WLAN_20_40_BSS_COEX_20MHZ_WIDTH_REQ     BIT(2)
+#define WLAN_20_40_BSS_COEX_OBSS_EXEMPT_REQ     BIT(3)
+#define WLAN_20_40_BSS_COEX_OBSS_EXEMPT_GRNT    BIT(4)
+
+/*
+ * 20/40 MHZ BSS intolerant channel report information element.
+ */
+struct ieee80211_20_40_in_ch_rep {
+	u_int8_t param_id;
+	u_int8_t param_len;
+	u_int8_t reg;
+	u_int8_t chan[0];
+} __packed;
+
+/*
+ * Overlapping BSS Scan Parameter information element.
+ */
+struct ieee80211_obss_scan_ie {
+	u_int8_t param_id;
+	u_int8_t param_len;
+	u_int16_t obss_passive_dwell;
+	u_int16_t obss_active_dwell;
+	u_int16_t obss_trigger_interval;
+	u_int16_t obss_passive_total;
+	u_int16_t obss_active_total;
+	u_int16_t obss_channel_width_delay;
+	u_int16_t obss_activity_threshold;
+} __packed;
+
+/*
+ * Atheros Advanced Capability information element.
+ */
+struct ieee80211_ie_athAdvCap {
+	uint8_t athAdvCap_id;		/* IEEE80211_ELEMID_VENDOR */
+	uint8_t athAdvCap_len;		/* length in bytes */
+	uint8_t athAdvCap_oui[3];	/* 0x00, 0x03, 0x7f */
+	uint8_t athAdvCap_type;		/* OUI type */
+	uint8_t athAdvCap_subtype;	/* OUI subtype */
+	uint8_t athAdvCap_version;	/* spec revision */
+	uint8_t athAdvCap_capability;	/* Capability info */
+	uint16_t athAdvCap_defKeyIndex;
+} __packed;
+
+/*
+ * Atheros XR information element.
+ */
+struct ieee80211_xr_param {
+	uint8_t param_id;
+	uint8_t param_len;
+	uint8_t param_oui[3];
+	uint8_t param_oui_type;
+	uint8_t param_oui_sybtype;
+	uint8_t param_version;
+	uint8_t param_Info;
+	uint8_t param_base_bssid[IEEE80211_ADDR_LEN];
+	uint8_t param_xr_bssid[IEEE80211_ADDR_LEN];
+	uint16_t param_xr_beacon_interval;
+	uint8_t param_base_ath_capability;
+	uint8_t param_xr_ath_capability;
+} __packed;
+
+/* Atheros capabilities */
+#define IEEE80211_ATHC_TURBOP	0x0001		/* Turbo Prime */
+#define IEEE80211_ATHC_COMP	0x0002		/* Compression */
+#define IEEE80211_ATHC_FF	0x0004		/* Fast Frames */
+#define IEEE80211_ATHC_XR	0x0008		/* Xtended Range support */
+#define IEEE80211_ATHC_AR	0x0010		/* Advanced Radar support */
+#define IEEE80211_ATHC_BURST	0x0020		/* Bursting - not negotiated */
+#define IEEE80211_ATHC_WME	0x0040		/* CWMin tuning */
+#define IEEE80211_ATHC_BOOST	0x0080		/* Boost */
+
+/*
+ * Quantenna Flags information element.
+ * Fields up to qtn_ie_implicit_ba_tid are backwards-compatible with Envy images.
+ */
+struct ieee80211_ie_qtn {
+	uint8_t qtn_ie_id;		/* IEEE80211_ELEMID_VENDOR */
+	uint8_t qtn_ie_len;		/* length in bytes */
+	uint8_t qtn_ie_oui[3];		/* QTN_OUI - 0x00, 0x26, 0x86 */
+	uint8_t qtn_ie_type;		/* IE type */
+	uint8_t qtn_ie_flags;		/* See below */
+
+	/* V2 fields */
+	uint8_t qtn_ie_implicit_ba_tid;/* Implicit block ACKs, set up directly after assoc */
+	uint8_t qtn_ie_my_flags;	/* See below */
+
+	/* V3 fields */
+	/* Implicit block ACK with variable size - overrides v2 implicit BA field. */
+	uint8_t qtn_ie_implicit_ba_tid_h;
+	uint8_t qtn_ie_implicit_ba_size; /* Size of implicit BA >> 2 */
+
+	/* V4 fields */
+	uint8_t qtn_ie_vsp_version;	/* VSP version */
+
+	/* V5 fields */
+	uint32_t qtn_ie_ver_sw;
+	uint16_t qtn_ie_ver_hw;
+	uint16_t qtn_ie_ver_platform_id;
+	uint32_t qtn_ie_ver_timestamp;
+	uint32_t qtn_ie_ver_flags;
+	uint32_t qtn_ie_rate_train;
+} __packed;
+
+#ifdef CONFIG_QVSP
+/*
+ * Quantenna WME information element.
+ */
+struct ieee80211_ie_qtn_wme {
+	uint8_t qtn_ie_id;		/* IEEE80211_ELEMID_VENDOR */
+	uint8_t qtn_ie_len;		/* length in bytes */
+	uint8_t qtn_ie_oui[3];		/* QTN_OUI - 0x00, 0x26, 0x86*/
+	uint8_t qtn_ie_type;		/* IE type */
+	uint8_t qtn_wme_ie_version;
+	struct ieee80211_wme_param qtn_wme_ie;
+} __packed;
+#endif
+
+#define QTN_PAIRING_TLV_HASH_LEN 32
+/*
+ * QTN Pairing TLV element.
+ *  Format:
+ *   Type(1byte)   |   len(2bytes)    |  SHA-256 hash(32bytes)
+ *      0x1        |        35        |     SHA-256 hash material of pairing
+ */
+struct ieee80211_ie_qtn_pairing_tlv {
+	uint8_t qtn_pairing_tlv_type;
+	uint16_t qtn_pairing_tlv_len;
+	uint8_t qtn_pairing_tlv_hash[QTN_PAIRING_TLV_HASH_LEN];
+} __packed;
+
+/*
+ * QTN Pairing IE
+ *  Format:
+ *  IE ID(1byte)    |     IE len(1byte)    |     IE OUI(3bytes)    | IE content(pairing)
+ *     0xdd         |       38             |        00 26 86       |     Pairing TLV
+ *
+ */
+struct ieee80211_ie_qtn_pairing {
+	uint8_t qtn_pairing_ie_id;
+	uint8_t qtn_pairing_ie_len;
+	uint8_t qtn_pairing_ie_oui[3];
+	struct ieee80211_ie_qtn_pairing_tlv qtn_pairing_tlv;
+} __packed;
+
+#define IEEE80211_QTN_IE_BA_SIZE_SH 2
+
+enum ieee80211_vsp_version {
+	IEEE80211_QTN_VSP_V_NONE,
+	IEEE80211_QTN_VSP_V1,
+};
+
+#ifdef CONFIG_QVSP
+
+/* Disable Station side control for QTM-Lite */
+#define IEEE80211_QTN_VSP_VERSION	IEEE80211_QTN_VSP_V_NONE
+struct ieee80211_ie_vsp_item {
+	uint8_t	index;
+	uint32_t	value;
+} __packed;
+
+struct ieee80211_ie_vsp {
+	uint8_t	id;
+	uint8_t	len;
+	uint8_t	oui[3];
+	uint8_t	type;
+	uint8_t	item_cnt;
+	struct ieee80211_ie_vsp_item item[0];
+} __packed;
+
+#else /* not CONFIG_QVSP */
+
+#define IEEE80211_QTN_VSP_VERSION	IEEE80211_QTN_VSP_V_NONE
+
+#endif /* CONFIG_QVSP */
+
+#define IEEE80211_QTN_TYPE_ENVY_LEGACY(qtnie) \
+	((qtnie)->qtn_ie_len <= (&(qtnie)->qtn_ie_my_flags - &(qtnie)->qtn_ie_oui[0]))
+#define IEEE80211_QTN_TYPE_ENVY(qtnie) \
+	((IEEE80211_QTN_TYPE_ENVY_LEGACY(qtnie)) || \
+	 ((qtnie)->qtn_ie_my_flags & IEEE80211_QTN_ENVY))
+
+#define IEEE80211_QTN_FLAGS_ENVY	(IEEE80211_QTN_BRIDGEMODE | IEEE80211_QTN_BF_VER1)
+#define IEEE80211_QTN_FLAGS_ENVY_DFLT	IEEE80211_QTN_BF_VER1
+#define IEEE80211_QTN_CAPS_DFLT		IEEE80211_QTN_BF_VER2 | IEEE80211_QTN_BF_VER3 | \
+					IEEE80211_QTN_BF_VER4 | IEEE80211_QTN_TX_AMSDU
+/*
+ * These flags are used in the following two fields.
+ * - qtn_ie_flags contains the sender's settings, except in an association response, where
+ *   it contains confirmation of the settings received from the peer station.  These flags
+ *   must remain backwards-compatible with Envy images.
+ * - qtn_ie_my_flags always contains the sender's settings.  It is not sent by Envy systems.
+ */
+#define IEEE80211_QTN_BRIDGEMODE	0x01		/* Use 4-addr headers */
+#define IEEE80211_QTN_BF_VER1		0x02		/* Envy beamforming */
+#define IEEE80211_QTN_BF_VER2		0x04		/* Ruby 2 stream beamforming */
+#define IEEE80211_QTN_LNCB		0x08		/* Multicast packets in the local network
+							 * control block are 4 address encapsulated.
+							 */
+#define IEEE80211_QTN_BF_VER3		0x10		/* Ruby 4 stream non-standard beamforming */
+#define IEEE80211_QTN_ENVY		0x20		/* Envy with 'my flags' field in the IE. */
+#define IEEE80211_QTN_BF_VER4		0x40		/* 4 strm standard bf with tone grouping */
+#define IEEE80211_QTN_TX_AMSDU		0x80		/* Ruby TX AMSDU */
+
+#define IEEE80211_QTN_IE_GE_V2(_qtnie)	((_qtnie->qtn_ie_len + IEEE80211_IE_ID_LEN_SIZE) >	\
+					offsetof(struct ieee80211_ie_qtn, qtn_ie_my_flags))
+#define IEEE80211_QTN_IE_GE_V3(_qtnie)	((_qtnie->qtn_ie_len + IEEE80211_IE_ID_LEN_SIZE) >	\
+					offsetof(struct ieee80211_ie_qtn, qtn_ie_implicit_ba_size))
+#define IEEE80211_QTN_IE_GE_V4(_qtnie)	((_qtnie->qtn_ie_len + IEEE80211_IE_ID_LEN_SIZE) >	\
+					offsetof(struct ieee80211_ie_qtn, qtn_ie_vsp_version))
+#define IEEE80211_QTN_IE_GE_V5(_qtnie)	((_qtnie->qtn_ie_len + IEEE80211_IE_ID_LEN_SIZE) >	\
+					offsetof(struct ieee80211_ie_qtn, qtn_ie_rate_train))
+
+/* Quantenna TDLS Discovery Response clients information element */
+struct ieee80211_ie_qtn_tdls_clients {
+	uint8_t        qtn_ie_id;              /* IEEE80211_ELEMID_VENDOR */
+	uint8_t        qtn_ie_len;
+	uint8_t        qtn_ie_oui[3];          /* 0x00, 0x26, 0x86 */
+	uint8_t        qtn_ie_type;            /* IEEE_QTN_IE_TYPE_TDLS_CLIENTS */
+	uint8_t        qtn_ie_mac_cnt;         /* Number of downstream MAC addresses */
+#define IEEE80211_QTN_IE_DOWNSTREAM_MAC_MAX    16
+	uint8_t        qtn_ie_mac[0];          /* Start of downstream MAC addresses */
+} __packed;
+
+/*
+ * Management Notification Frame
+ */
+struct ieee80211_mnf {
+	uint8_t mnf_category;
+	uint8_t mnf_action;
+	uint8_t mnf_dialog;
+	uint8_t mnf_status;
+} __packed;
+#define	MNF_SETUP_REQ	0
+#define	MNF_SETUP_RESP	1
+#define	MNF_TEARDOWN	2
+
+/*
+ * Management Action Frames
+ */
+
+/* generic frame format */
+struct ieee80211_action {
+	uint8_t	ia_category;
+	uint8_t	ia_action;
+} __packed;
+
+/* categories */
+#define IEEE80211_ACTION_CAT_SPEC_MGMT		0	/* Spectrum MGMT */
+#define IEEE80211_ACTION_CAT_QOS		1	/* qos */
+#define IEEE80211_ACTION_CAT_DLS		2	/* dls */
+#define IEEE80211_ACTION_CAT_BA			3	/* block ack */
+#define IEEE80211_ACTION_CAT_PUBLIC		4	/* Public */
+#define IEEE80211_ACTION_CAT_RM			5	/* Radio measurement */
+#define IEEE80211_ACTION_CAT_FBSS		6	/* Fast BSS */
+#define IEEE80211_ACTION_CAT_HT			7	/* HT */
+#define IEEE80211_ACTION_CAT_SA_QUERY		8	/* SA Query */
+#define IEEE80211_ACTION_CAT_PROT_DUAL_PA	9	/* Protected Dual of Public Action */
+#define IEEE80211_ACTION_CAT_WNM		10	/* WNM */
+#define IEEE80211_ACTION_CAT_UNPROT_WNM		11	/* Unprotected WNM */
+#define IEEE80211_ACTION_CAT_TDLS		12	/* TDLS */
+#define IEEE80211_ACTION_CAT_MESH		13	/* Mesh */
+#define IEEE80211_ACTION_CAT_MULTIHOP		14	/* Multihop */
+#define IEEE80211_ACTION_CAT_SELF_PROT		15	/* self protected */
+
+#define IEEE80211_ACTION_CAT_VHT		21	/* VHT */
+#define IEEE80211_ACTION_CAT_VEND_PROT	126	/* Protected Vendor specific Action frame */
+#define IEEE80211_ACTION_CAT_VENDOR		0x7F	/* Vendor specific Action frame */
+
+/* Public Action Frames (7.4.7.1) */
+#define IEEE80211_ACTION_PUB_GAS_IREQ		10  /* GAS Service Initial Request */
+#define IEEE80211_ACTION_PUB_GAS_IRESP		11  /* GAS Service Initial Response */
+#define IEEE80211_ACTION_PUB_GAS_CREQ		12  /* GAS Comeback Request */
+#define IEEE80211_ACTION_PUB_GAS_CRESP		13  /* GAS Comeback Response */
+#define IEEE80211_ACTION_PUB_TDLS_DISC_RESP	14  /* TDLS Discovery Response */
+#define IEEE80211_ACTION_PUB_20_40_COEX         0  /* 20/40 coex */
+
+static __inline__ int ieee80211_action_is_a_gas(const struct ieee80211_action *ia)
+{
+	return (ia->ia_category == IEEE80211_ACTION_CAT_PUBLIC) &&
+		(ia->ia_action >= IEEE80211_ACTION_PUB_GAS_IREQ) &&
+		(ia->ia_action <= IEEE80211_ACTION_PUB_GAS_CRESP);
+}
+
+/* TDLS Action Frame details (7.4.11) */
+#define IEEE80211_ACTION_TDLS_SETUP_REQ        0   /* Setup Request */
+#define IEEE80211_ACTION_TDLS_SETUP_RESP       1   /* Setup Response */
+#define IEEE80211_ACTION_TDLS_SETUP_CONFIRM    2   /* Setup Confirm */
+#define IEEE80211_ACTION_TDLS_TEARDOWN         3   /* Teardown */
+#define IEEE80211_ACTION_TDLS_PTI              4   /* Peer Traffic Indication */
+#define IEEE80211_ACTION_TDLS_CS_REQ           5   /* Channel Switch Request */
+#define IEEE80211_ACTION_TDLS_CS_RESP          6   /* Channel Switch Response */
+#define IEEE80211_ACTION_TDLS_PEER_PSM_REQ     7   /* Peer PSM Request */
+#define IEEE80211_ACTION_TDLS_PEER_PSM_RESP    8   /* Peer PSM Response */
+#define IEEE80211_ACTION_TDLS_PEER_TRAF_RESP   9   /* Peer Traffic Response */
+#define IEEE80211_ACTION_TDLS_DISC_REQ         10  /* Discovery Request */
+
+struct ieee80211_ie_power_capability {
+	uint8_t	id;
+	uint8_t	len;
+	uint8_t	min_txpwr;
+	uint8_t	max_txpwr;
+} __packed;
+
+struct ieee80211_ie_tpc_report {
+	uint8_t	id;
+	uint8_t	len;
+	uint8_t tran_power;
+	uint8_t link_margin;
+} __packed;
+
+#define IEEE80211_CCA_REQMODE_PARALLEL	(1 << 0)
+#define IEEE80211_CCA_REQMODE_ENABLE	(1 << 1)
+#define IEEE80211_CCA_REQMODE_REQUEST	(1 << 2)
+#define IEEE80211_CCA_REQMODE_REPORT	(1 << 3)
+#define IEEE80211_CCA_REQMODE_DURA_MAN	(1 << 4)
+
+#define IEEE80211_CCA_REPMODE_LATE	(1 << 0)
+#define IEEE80211_CCA_REPMODE_INCAP	(1 << 1)
+#define IEEE80211_CCA_REPMODE_REFUSE	(1 << 2)
+
+/* Spectrum Management */
+#define IEEE80211_CCA_MEASTYPE_BASIC	0x00	/* Basic Request */
+#define IEEE80211_CCA_MEASTYPE_CCA	0x01	/* Clear Channel Assessment Request */
+#define IEEE80211_CCA_MEASTYPE_RPI	0x02	/* Receiver Power Indicator (RPI) histogram Request */
+/* Radio Measurement */
+#define IEEE80211_RM_MEASTYPE_CH_LOAD	0x03	/* Channel Load Request */
+#define IEEE80211_RM_MEASTYPE_NOISE	0x04	/* Noise histogram Request */
+#define IEEE80211_RM_MEASTYPE_BEACON	0x05	/* Beacon Request */
+#define IEEE80211_RM_MEASTYPE_FRAME	0x06	/* Frame Request */
+#define IEEE80211_RM_MEASTYPE_STA	0x07	/* STA statistics Request */
+#define IEEE80211_RM_MEASTYPE_LCI	0x08	/* LCI Request */
+#define IEEE80211_RM_MEASTYPE_CATEGORY	0x09	/* Transmit stream/Category Request */
+#define IEEE80211_RM_MEASTYPE_MUL_DIAG	0x0A	/* Multicast diagnostics request */
+#define IEEE80211_RM_MEASTYPE_LOC_CIVIC	0x0B	/* Location Civic request */
+#define IEEE80211_RM_MEASTYPE_LOC_ID	0x0C	/* Location Identifier request */
+#define IEEE80211_RM_MEASTYPE_QTN_CCA	0xFE	/* QTN CCA extension */
+#define IEEE80211_RM_MEASTYPE_PAUSE	0xFF	/* Measurement Pause Request */
+
+/* for Radio Measurement Actions. Table 7-57a in 802.11k $7.4.6 */
+#define IEEE80211_ACTION_R_MEASUREMENT_REQUEST	0
+#define IEEE80211_ACTION_R_MEASUREMENT_REPORT	1
+#define IEEE80211_ACTION_R_LINKMEASURE_REQUEST	2
+#define IEEE80211_ACTION_R_LINKMEASURE_REPORT	3
+#define IEEE80211_ACTION_R_NEIGHBOR_REQUEST	4
+#define IEEE80211_ACTION_R_NEIGHBOR_REPORT	5
+
+struct ieee80211_action_sm_measurement_header {
+	uint8_t	ia_category;
+	uint8_t	ia_action;
+	uint8_t	am_token;
+	uint8_t	am_data[0];
+}__packed;
+
+/* RM measurement capabiltiy bits */
+/* byte 0 */
+#define IEEE80211_RM_LINK_REPORT_CAP		0x01
+#define IEEE80211_RM_NEIGH_REPORT_CAP		0x02
+#define	IEEE80211_RM_BEACON_PASSIVE_REPORT_CAP	0x10
+#define	IEEE80211_RM_BEACON_ACTIVE_REPORT_CAP	0x20
+#define	IEEE80211_RM_BEACON_TABLE_REPORT_CAP	0x40
+/* RRM - radio resource measurement enabled capabilities element */
+struct ieee80211_ie_rrm {
+	uint8_t id;
+	uint8_t len;
+	uint8_t cap[5];
+}__packed;
+
+/* RM - radio measurement request */
+struct ieee80211_action_radio_measure_request {
+	struct ieee80211_action	am_header;
+	uint8_t	am_token;
+	uint16_t	am_rep_num;
+	uint8_t	am_data[0];
+}__packed;
+
+/* RM - radio measurement report */
+struct ieee80211_action_radio_measure_report {
+	struct ieee80211_action	am_header;
+	uint8_t	am_token;
+	uint8_t	am_data[0];
+}__packed;
+
+/*
+ * 802.11h measurement request/report element
+ * 802.11k measurement request/report element
+ * common part
+ */
+struct ieee80211_ie_measure_comm {
+	uint8_t id;		/* IEEE80211_ELEMID_MEASREQ = 38 */
+	uint8_t len;		/* 14 for known types */
+	uint8_t token;	/* Non-zero number for diff. measurement reqs. */
+	uint8_t mode;	/* bits: 1 enable, 2 req, 3 report, 0,4-7 reserved */
+	uint8_t type;	/* basic = 0, cca = 1, rpi histogram = 2 */
+	uint8_t data[0];	/* variable format according to meas_type */
+} __packed;
+
+struct ieee80211_ie_measreq {
+	uint8_t chan_num;	/* channel number */
+	uint64_t start_tsf;	/* starting time in tsf */
+	uint16_t duration_tu;	/* measurement duration in TU */
+} __packed;
+
+/*
+ * 802.11k measurement request element of sta statistics
+ * for PM module collect sta statistics
+ * See 802.11k 2003 7.3.2.21.8
+ */
+struct ieee80211_ie_measreq_sta_stat {
+	uint8_t peer_mac[IEEE80211_ADDR_LEN];	/* Peer Mac Address */
+	uint16_t random_interval;	/* randomization interval */
+	uint16_t duration_tu;	/* measurement duration in TU */
+	uint8_t group_id;		/*	group identity	*/
+	uint8_t data[0];	/*	Optional sub-elements in variable length	*/
+} __packed;
+
+struct ieee80211_ie_measreq_chan_load {
+	uint8_t operating_class;
+	uint8_t channel_num;
+	uint16_t random_interval_tu;
+	uint16_t duration_tu;
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_ie_measreq_noise_his {
+	uint8_t operating_class;
+	uint8_t channel_num;
+	uint16_t random_interval_tu;
+	uint16_t duration_tu;
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_ie_measreq_beacon {
+	uint8_t operating_class;
+	uint8_t channel_num;
+	uint16_t random_interval_tu;
+	uint16_t duration_tu;
+	uint8_t measure_mode;
+	uint8_t bssid[IEEE80211_ADDR_LEN];
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_ie_measreq_frame {
+	uint8_t operating_class;
+	uint8_t channel_num;
+	uint16_t random_interval_tu;
+	uint16_t duration_tu;
+	uint8_t frame_request_type;
+#define FRAME_COUNT_REPORT	1
+
+	uint8_t mac_addr[IEEE80211_ADDR_LEN];
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_ie_measreq_trans_stream_cat {
+	uint16_t random_interval_tu;
+	uint16_t duration_tu;
+	uint8_t peer_sta_addr[IEEE80211_ADDR_LEN];
+	uint8_t tid;
+	uint8_t bin0_range;
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_ie_measreq_multicast_diag {
+	uint16_t random_interval_tu;
+	uint16_t duration_tu;
+	uint8_t group_mac_addr[IEEE80211_ADDR_LEN];
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_subie_multicast_triggered_reporting {
+	uint8_t sub_id;
+	uint8_t len;
+	uint8_t condition;
+	uint8_t inactivity_timeout;
+	uint8_t reactivation_delay;
+} __packed;
+
+struct ieee80211_action_rm_link_measure_request {
+	struct ieee80211_action	at_header;
+	uint8_t token;
+	uint8_t tran_power_used;
+	uint8_t max_tran_power;
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_action_rm_neighbor_report_request {
+	struct ieee80211_action	at_header;
+	uint8_t token;
+	uint8_t data[0];
+} __packed;
+
+/*
+ * 802.11h measurement report element
+ * see 8.4.2.24 IEEE 802.11-2012
+ */
+struct ieee80211_ie_measrep_basic {
+	uint8_t	chan_num;	/* channel number */
+	uint64_t start_tsf;	/* starting time in tsf */
+	uint16_t duration_tu;	/* measurement duration in TU */
+	uint8_t	basic_report;	/* basic report data */
+} __packed;
+
+struct ieee80211_ie_measrep_cca {
+	uint8_t	chan_num;	/* channel number */
+	uint64_t start_tsf;	/* starting time in tsf */
+	uint16_t duration_tu;	/* measurement duration in TU */
+	uint8_t	cca_report;	/* cca report data */
+#define IEEE80211_MEASURE_BASIC_REPORT_BSS		(1 << 0)
+#define IEEE80211_MEASURE_BASIC_REPORT_OFDM_PRE		(1 << 1)
+#define IEEE80211_MEASURE_BASIC_REPORT_UNDEF		(1 << 2)
+#define IEEE80211_MEASURE_BASIC_REPORT_RADAR		(1 << 3)
+#define IEEE80211_MEASURE_BASIC_REPORT_UMMEASURE	(1 << 4)
+} __packed;
+
+struct ieee80211_ie_measrep_rpi {
+	uint8_t	chan_num;	/* channel number */
+	uint64_t start_tsf;	/* starting time in tsf */
+	uint16_t duration_tu;	/* measurement duration in TU */
+	uint8_t	rpi_report[8];	/* rpi report data */
+} __packed;
+
+/*
+ * 802.11k measurement report element of sta statistics
+ * for PM module collect sta statistics
+ * See 802.11k 2003 7.3.2.22.8
+ */
+struct ieee80211_ie_measrep_sta_stat {
+	uint16_t duration_tu;	/* measurement duration in TU */
+	uint8_t group_id;		/*	group identity	*/
+	uint8_t data[0];	/*	Optional sub-elements in variable length	*/
+} __packed;
+
+#define IEEE80211_RM_MEAS_SUBTYPE_LEN_MIN	2
+
+/* Quantenna RM group ie, to complement an 802.11k group ie. Node statistics & parameters */
+struct ieee80211_ie_qtn_rm_measure_sta {
+	uint8_t id;	/* IEEE80211_ELEMID_VENDOR */
+	uint8_t len;   /* length in bytes */
+	uint8_t qtn_ie_oui[3];		/* QTN_OUI - 0x00, 0x26, 0x86*/
+	uint8_t seq;	/* sequence */
+	uint8_t type;				/* Which group (special or all) contains in the data. */
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_ie_qtn_rm_txstats {
+	uint64_t tx_bytes;
+	uint32_t tx_pkts;
+	uint32_t tx_discard;
+	/**
+	 * The number of dropped data packets failed to transmit through
+	 * wireless media for each traffic category(TC).
+	 */
+	uint32_t tx_wifi_drop[WME_AC_NUM];
+	uint32_t tx_err;
+	uint32_t tx_ucast;		/* unicast */
+	uint32_t tx_mcast;		/* multicast */
+	uint32_t tx_bcast;		/* broadcast */
+} __packed;
+
+struct ieee80211_ie_qtn_rm_rxstats {
+	uint64_t rx_bytes;
+	uint32_t rx_pkts;
+	uint32_t rx_discard;
+	uint32_t rx_err;
+	uint32_t rx_ucast;		/* unicast */
+	uint32_t rx_mcast;		/* multicast */
+	uint32_t rx_bcast;		/* broadcast */
+} __packed;
+
+struct ieee80211_ie_qtn_rm_sta_all {
+	struct ieee80211_ie_qtn_rm_txstats tx_stats;
+	struct ieee80211_ie_qtn_rm_rxstats rx_stats;
+	u_int32_t max_queued;
+	u_int32_t link_quality;
+	u_int32_t rssi_dbm;
+	u_int32_t bandwidth;
+	u_int32_t snr;
+	u_int32_t tx_phy_rate;
+	u_int32_t rx_phy_rate;
+	u_int32_t cca;	/* Reserved for cca */
+	u_int32_t br_ip;
+	u_int32_t rssi;
+	u_int32_t hw_noise;
+	u_int8_t soc_macaddr[IEEE80211_ADDR_LEN];
+	u_int32_t soc_ipaddr;
+} __packed;
+
+/*
+ * Statistics Group data format for STB
+ */
+struct ieee80211_ie_rm_sta_grp221 {
+	uint8_t soc_macaddr[IEEE80211_ADDR_LEN];
+	uint8_t rssi;
+	uint8_t phy_noise;
+} __packed;
+
+/* dot11Counters Group */
+struct ieee80211_rm_sta_stats_group0 {
+	uint32_t dot11TransmittedFragmentCount;
+	uint32_t dot11MulticastTransmittedFrameCount;
+	uint32_t dot11FailedCount;
+	uint32_t dot11ReceivedFragmentCount;
+	uint32_t dot11MulticastReceivedFrameCount;
+	uint32_t dot11FCSErrorCount;
+	uint32_t dot11TransmittedFrameCount;
+} __packed;
+
+/* dot11MACStatistics Group */
+struct ieee80211_rm_sta_stats_group1 {
+	uint32_t dot11RetryCount;
+	uint32_t dot11MultipleRetryCount;
+	uint32_t dot11FrameDuplicateCount;
+	uint32_t dot11RTSSuccessCount;
+	uint32_t dot11RTSFailureCount;
+	uint32_t dot11ACKFailureCount;
+} __packed;
+
+/* dot11QosCounters Group for UP0-UP7 */
+struct ieee80211_rm_sta_stats_group2to9 {
+	uint32_t dot11QosTransmittedFragmentCount;
+	uint32_t dot11QosFailedCount;
+	uint32_t dot11QosRetryCount;
+	uint32_t dot11QosMultipleRetryCount;
+	uint32_t dot11QosFrameDuplicateCount;
+	uint32_t dot11QosRTSSuccessCount;
+	uint32_t dot11QosRTSFailureCount;
+	uint32_t dot11QosACKFailureCount;
+	uint32_t dot11QosReceivedFragmentCount;
+	uint32_t dot11QosTransmittedFrameCount;
+	uint32_t dot11QosDiscardedFrameCount;
+	uint32_t dot11QosMPDUsReceivedCount;
+	uint32_t dot11QosRetriesReceivedCount;
+} __packed;
+
+/* dot11BSSAverageAccessDelay Group (only available at an AP) */
+struct ieee80211_rm_sta_stats_group10 {
+	uint32_t dot11STAStatisticsAPAverageAccessDelay;
+	uint32_t dot11STAStatisticsAverageAccessDelayBestEffort;
+	uint32_t dot11STAStatisticsAverageAccessDelayBackGround;
+	uint32_t dot11STAStatisticsAverageAccessDelayVideo;
+	uint32_t dot11STAStatisticsAverageAccessDelayVoice;
+	uint32_t dot11STAStatisticsStationCount;
+	uint32_t dot11STAStatisticsChannelUtilization;
+} __packed;
+
+struct ieee80211_rm_sta_stats_group11 {
+	uint32_t dot11TransmittedAMSDUCount;
+	uint32_t dot11FailedAMSDUCount;
+	uint32_t dot11RetryAMSDUCount;
+	uint32_t dot11MultipleRetryAMSDUCount;
+	uint32_t dot11TransmittedOctetsInAMSDUCount;
+	uint32_t dot11AMSDUAckFailureCounnt;
+	uint32_t dot11ReceivedAMSDUCount;
+	uint32_t dot11ReceivedOctetsInAMSDUCount;
+} __packed;
+
+struct ieee80211_rm_sta_stats_group12 {
+	uint32_t dot11TransmittedAMPDUCount;
+	uint32_t dot11TransmittedMPDUsInAMPDUCount;
+	uint64_t dot11TransmittedOctetsInAMPDUCount;
+	uint32_t dot11AMPDUReceivedCount;
+	uint32_t dot11MPDUInReceivedAMPDUCount;
+	uint64_t dot11ReceivedOctetsInAMPDUCount;
+	uint32_t dot11AMPDUDelimiterCRCErrorCount;
+} __packed;
+
+struct ieee80211_rm_sta_stats_group13 {
+	uint32_t dot11ImplicitBARFailureCount;
+	uint32_t dot11ExplicitBARFailureCount;
+	uint32_t dot11ChannelWidthSwitchCount;
+	uint32_t dot11TwentyMHzFrameTransmittedCount;
+	uint32_t dot11FortyMHzFrameTransmittedCount;
+	uint32_t dot11TwentyMHzFrameReceivedCount;
+	uint32_t dot11FortyMHzFrameReceivedCount;
+	uint32_t dot11PSMPUTTGrantDuration;
+	uint32_t dot11PSMPUTTUsedDuration;
+} __packed;
+
+struct ieee80211_rm_sta_stats_group14 {
+	uint32_t dot11GrantedRDGUsedCount;
+	uint32_t dot11GrantedRDGUnusedCount;
+	uint32_t dot11TransmittedFramesInGrantedRDGCount;
+	uint64_t dot11TransmittedOctetsInGrantedRDGCount;
+	uint32_t dot11DualCTSSuccessCount;
+	uint32_t dot11DualCTSFailureCount;
+	uint32_t dot11RTSLSIGSuccessCount;
+	uint32_t dot11RTSLSIGFailureCount;
+} __packed;
+
+struct ieee80211_rm_sta_stats_group15 {
+	uint32_t dot11BeamformingFrameCount;
+	uint32_t dot11STBCCTSSuccessCount;
+	uint32_t dot11STBCCTSFailureCount;
+	uint32_t dot11nonSTBCCTSSuccessCount;
+	uint32_t dot11nonSTBCCTSFailureCount;
+} __packed;
+
+struct ieee80211_rm_sta_stats_group16 {
+	uint32_t dot11RSNAStatsCMACICVErrors;
+	uint32_t dot11RSNAStatsCMACReplays;
+	uint32_t dot11RSNAStatsRobustMgmtCCMPReplays;
+	uint32_t dot11RSNAStatsTKIPICVErrors;
+	uint32_t dot11RSNAStatsTKIPReplays;
+	uint32_t dot11RSNAStatsCCMPDecryptErrors;
+	uint32_t dot11RSNAStatsCCMPReplays;
+} __packed;
+
+/*
+ * STA Statistics QTN specific
+ */
+enum RadioMeasureQTNElementID {
+	RM_QTN_TX_STATS			=	0,
+	RM_QTN_RX_STATS			=	1,
+	RM_QTN_MAX_QUEUED		=	2,
+	RM_QTN_LINK_QUALITY		=	3,
+	RM_QTN_RSSI_DBM			=	4,
+	RM_QTN_BANDWIDTH		=	5,
+	RM_QTN_SNR			=	6,
+	RM_QTN_TX_PHY_RATE		=	7,
+	RM_QTN_RX_PHY_RATE		=	8,
+	RM_QTN_CCA			=	9,
+	RM_QTN_BR_IP			=	10,
+	RM_QTN_RSSI			=	11,
+	RM_QTN_HW_NOISE			=	12,
+	RM_QTN_SOC_MACADDR		=	13,
+	RM_QTN_SOC_IPADDR		=	14,
+	RM_QTN_MAX			=	RM_QTN_SOC_IPADDR,
+	RM_QTN_UNKNOWN			=	15,
+	RM_QTN_CTRL_START		=	16,
+	RM_QTN_RESET_CNTS		=	16,
+	RM_QTN_RESET_QUEUED		=	17,
+	RM_QTN_CTRL_END			=	17,
+};
+#define RM_QTN_MEASURE_MASK ((1 << (RM_QTN_CTRL_END + 1)) - 1)
+
+/*
+ * STA Statistic for Group221 specific
+ */
+enum RadioMeasureGrp221ElementID {
+	RM_GRP221_RSSI			=	(RM_QTN_CTRL_END + 1),
+	RM_GRP221_PHY_NOISE		=	(RM_QTN_CTRL_END + 2),
+	RM_GRP221_SOC_MAC		=	(RM_QTN_CTRL_END + 3),
+};
+
+extern const uint8_t ieee80211_meas_sta_qtn_report_subtype_len[RM_QTN_CTRL_END + 1];
+
+/* Standard CCA Flag to used */
+#define RM_STANDARD_CCA 0x1009
+#define IEEE80211_11K_CCA_INTF_SCALE 255
+/*
+ * CCA radio measurement report field
+ */
+struct cca_rm_rep_data {
+	uint8_t ch_num;
+	uint8_t tm_start[8];
+	uint8_t m_duration[2];
+	uint8_t busy_frac;
+} __packed;
+
+/* CCA report IE*/
+struct ieee80211_ie_rm_measure_cca_rep {
+	uint8_t id;
+	uint8_t len;
+	uint8_t rm_token;
+	uint8_t rm_rep_mode;
+	uint8_t rm_rep_type;
+	struct cca_rm_rep_data rep_data;
+	struct ieee80211_ie_qtn_scs scs_data;
+} __packed;
+
+struct ieee80211_ie_measrep_chan_load {
+	uint8_t operating_class;
+	uint8_t channel_num;
+	uint8_t start_time[8];
+	uint16_t duration_tu;
+	uint8_t channel_load;
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_ie_measrep_noise_his {
+	uint8_t operating_class;
+	uint8_t channel_num;
+	uint8_t start_time[8];
+	uint16_t duration_tu;
+	uint8_t antenna_id;
+	uint8_t anpi;
+	uint8_t ipi[11];
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_ie_measrep_beacon {
+	uint8_t operating_class;
+	uint8_t channel_num;
+	uint8_t start_time[8];
+	uint16_t duration_tu;
+	uint8_t reported_frame_info;
+	uint8_t rcpi;
+	uint8_t rsni;
+	uint8_t bssid[IEEE80211_ADDR_LEN];
+	uint8_t antenna_id;
+	uint8_t parent_tsf[4];
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_ie_measrep_frame {
+	uint8_t operating_class;
+	uint8_t channel_num;
+	uint8_t start_time[8];
+	uint16_t duration_tu;
+	uint8_t data[0];
+} __packed;
+
+#define IEEE80211_FRAME_REPORT_SUBELE_FRAME_COUNT_REPORT	1
+
+struct ieee80211_subie_section_frame_entry {
+	uint8_t id;
+	uint8_t len;
+	uint8_t transmit_address[IEEE80211_ADDR_LEN];
+	uint8_t bssid[IEEE80211_ADDR_LEN];
+	uint8_t phy_type;
+	uint8_t avg_rcpi;
+	uint8_t last_rsni;
+	uint8_t last_rcpi;
+	uint8_t anntenna_id;
+	uint16_t frame_cnt;
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_ie_measrep_trans_stream_cat {
+	uint8_t start_time[8];
+	uint16_t duration_tu;
+	uint8_t peer_sta_address[IEEE80211_ADDR_LEN];
+	uint8_t tid;
+	uint8_t reason;
+	uint32_t tran_msdu_cnt;
+	uint32_t msdu_discarded_cnt;
+	uint32_t msdu_failed_cnt;
+	uint32_t msdu_mul_retry_cnt;
+	uint32_t qos_cf_lost_cnt;
+	uint32_t avg_queue_delay;
+	uint32_t avg_trans_delay;
+	uint8_t bin0_range;
+	uint32_t bin0;
+	uint32_t bin1;
+	uint32_t bin2;
+	uint32_t bin3;
+	uint32_t bin4;
+	uint32_t bin5;
+} __packed;
+
+struct ieee80211_ie_measrep_multicast_diag {
+	uint8_t measure_time[8];
+	uint16_t duration_tu;
+	uint8_t group_mac_addr[IEEE80211_ADDR_LEN];
+	uint8_t reason;
+	uint32_t mul_rx_msdu_cnt;
+	uint16_t first_seq_num;
+	uint16_t last_seq_num;
+	uint16_t mul_rate;
+} __packed;
+
+struct ieee80211_action_rm_link_measure_report {
+	struct ieee80211_action at_header;
+	uint8_t token;
+	struct ieee80211_ie_tpc_report tpc_report;
+	uint8_t recv_antenna_id;
+	uint8_t tran_antenna_id;
+	uint8_t rcpi;
+	uint8_t rsni;
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_action_rm_neighbor_report_response {
+	struct ieee80211_action at_header;
+	uint8_t token;
+	uint8_t data[0];
+} __packed;
+
+struct ieee80211_ie_neighbor_report {
+	uint8_t id;
+	uint8_t len;
+	uint8_t bssid[IEEE80211_ADDR_LEN];
+	uint32_t bssid_info;
+#define BSSID_INFO_AP_NOT_REACHABLE		(1 << 0)
+#define BSSID_INFO_AP_UNKNOWN			(2 << 0)
+#define BSSID_INFO_AP_REACHABLE			(3 << 0)
+#define BSSID_INFO_SECURITY_COPY		(1 << 2)
+#define BSSID_INFO_KEY_SCOPE_COPY		(1 << 3)
+#define BSSID_INFO_CAP_SPECTRUM_MANAGEMENT	(1 << 4)
+#define BSSID_INFO_CAP_QOS			(1 << 5)
+#define BSSID_INFO_CAP_APSD			(1 << 6)
+#define BSSID_INFO_CAP_RADIO_MEASUREMENT	(1 << 7)
+#define BSSID_INFO_CAP_DELAYED_BA		(1 << 8)
+#define BSSID_INFO_CAP_IMMEDIATE_BA		(1 << 9)
+#define BSSID_INFO_MOBILITY_DOMAIN		(1 << 10)
+#define BSSID_INFO_HIGH_THROUGHPUT		(1 << 11)
+#define BSSID_INFO_VERY_HIGH_THROUGHPUT		(1 << 12)
+	uint8_t operating_class;
+	uint8_t channel;
+	uint8_t phy_type;
+	uint8_t data[0];
+} __packed;
+
+/* HT actions */
+#define IEEE80211_ACTION_HT_TXCHWIDTH		0	/* recommended transmission channel width */
+#define IEEE80211_ACTION_HT_MIMOPWRSAVE		1	/* MIMO power save */
+#define IEEE80211_ACTION_HT_NCBEAMFORMING	5	/* HT non compressed beamforming report */
+#define IEEE80211_ACTION_HT_CBEAMFORMING	6	/* HT compressed beamforming report */
+
+/* VHT actions */
+#define IEEE80211_ACTION_VHT_CBEAMFORMING	0	/* VHT compressed beamforming report */
+#define IEEE80211_ACTION_VHT_MU_GRP_ID          1       /* VHT MU GRP ID mgmt */
+#define IEEE80211_ACTION_VHT_OPMODE_NOTIFICATION	2	/* VHT Operating mode Notification */
+
+/* HT - recommended transmission channel width */
+struct ieee80211_action_ht_txchwidth {
+	struct ieee80211_action		at_header;
+	u_int8_t			at_chwidth;
+} __packed;
+
+#define IEEE80211_A_HT_TXCHWIDTH_20	0
+#define IEEE80211_A_HT_TXCHWIDTH_2040	1
+
+
+/* HT - MIMO Power Save */
+struct ieee80211_action_ht_mimopowersave {
+	struct ieee80211_action		am_header;
+	uint8_t				am_enable_mode;
+} __packed;
+
+/* HT - Non compressed beam forming */
+
+struct ht_mimo_ctrl {
+	uint16_t			am_mimoctrl;
+	uint32_t			am_timestamp;
+} __packed;
+
+#define IEEE80211_HT_MIMO_CTRL_NC_M			0x0003
+#define IEEE80211_HT_MIMO_CTRL_NC_S			0
+#define IEEE80211_HT_MIMO_CTRL_NR_M			0x000C
+#define IEEE80211_HT_MIMO_CTRL_NR_S			2
+#define IEEE80211_HT_MIMO_CTRL_CH_WIDTH_20	0x0000
+#define IEEE80211_HT_MIMO_CTRL_CH_WIDTH_40	0x0010
+#define IEEE80211_HT_MIMO_CTRL_NG_M			0x0060
+#define IEEE80211_HT_MIMO_CTRL_NG_S			5
+#define IEEE80211_HT_MIMO_CTRL_NB_M		0x0180
+#define IEEE80211_HT_MIMO_CTRL_NB_S		7
+#define IEEE80211_HT_MIMO_CTRL_CODEBOOK_M	0x0600
+#define IEEE80211_HT_MIMO_CTRL_CODEBOOK_S	9
+#define IEEE80211_HT_MIMO_CTRL_SEG_M		0x3800
+#define IEEE80211_HT_MIMO_CTRL_SEG_S		11
+
+
+enum {
+	IEEE80211_HT_MIMO_CTRL_NC_1 = 0,
+	IEEE80211_HT_MIMO_CTRL_NC_2,
+	IEEE80211_HT_MIMO_CTRL_NC_3,
+	IEEE80211_HT_MIMO_CTRL_NC_4,
+};
+
+enum {
+	IEEE80211_HT_MIMO_CTRL_NR_1 = 0,
+	IEEE80211_HT_MIMO_CTRL_NR_2,
+	IEEE80211_HT_MIMO_CTRL_NR_3,
+	IEEE80211_HT_MIMO_CTRL_NR_4,
+};
+
+enum {
+	IEEE80211_HT_MIMO_CTRL_NG_NONE = 0,
+	IEEE80211_HT_MIMO_CTRL_NG_2,
+	IEEE80211_HT_MIMO_CTRL_NG_4,
+	IEEE80211_HT_MIMO_CTRL_NG_RESERVED,
+};
+
+enum {
+	IEEE80211_HT_MIMO_CTRL_NB_4 = 0,
+	IEEE80211_HT_MIMO_CTRL_NB_2,
+	IEEE80211_HT_MIMO_CTRL_NB_6,
+	IEEE80211_HT_MIMO_CTRL_NB_8,
+};
+
+struct ieee80211_action_ht_bf {
+	struct ieee80211_action	am_header;
+	struct ht_mimo_ctrl	am_mimo_ctrl;
+	uint8_t			am_bf_report[0]; /* start of beamforming report */
+} __packed;
+
+/* VHT - Tx Beamforming */
+struct vht_mimo_ctrl {
+	uint8_t		am_mimoctrl[3];
+} __packed;
+
+/* VHT - Operating mode notification */
+struct ieee80211_action_vht_opmode_notification {
+	struct ieee80211_action		am_header;
+	u_int8_t			am_opmode;
+} __packed;
+
+#define IEEE80211_VHT_OPMODE_CHWIDTH		0x03
+#define IEEE80211_VHT_OPMODE_CHWIDTH_S		0
+#define IEEE80211_VHT_OPMODE_RXNSS		0x70
+#define IEEE80211_VHT_OPMODE_RXNSS_S		4
+#define IEEE80211_VHT_OPMODE_RXNSS_TYPE		0x80
+#define IEEE80211_VHT_OPMODE_RXNSS_TYPE_S	7
+
+#define IEEE80211_VHT_MIMO_CTRL_NC_M		0x000007
+#define IEEE80211_VHT_MIMO_CTRL_NC_S		0
+#define IEEE80211_VHT_MIMO_CTRL_NR_M		0x000038
+#define IEEE80211_VHT_MIMO_CTRL_NR_S		3
+#define IEEE80211_VHT_MIMO_CTRL_CH_BW_M		0x0000C0
+#define IEEE80211_VHT_MIMO_CTRL_CH_BW_S		6
+#define IEEE80211_VHT_MIMO_CTRL_CH_WIDTH_20	0x000000
+#define IEEE80211_VHT_MIMO_CTRL_CH_WIDTH_40	0x000040
+#define IEEE80211_VHT_MIMO_CTRL_CH_WIDTH_80	0x000080
+#define IEEE80211_VHT_MIMO_CTRL_CH_WIDTH_160	0x0000C0
+#define IEEE80211_VHT_MIMO_CTRL_NG_M		0x000300
+#define IEEE80211_VHT_MIMO_CTRL_NG_S		8
+#define IEEE80211_VHT_MIMO_CTRL_CODEBOOK_M	0x000400
+#define IEEE80211_VHT_MIMO_CTRL_CODEBOOK_S	10
+#define IEEE80211_VHT_MIMO_CTRL_FBTYPE_M	0x000800
+#define IEEE80211_VHT_MIMO_CTRL_FBTYPE_S	11
+#define IEEE80211_VHT_MIMO_CTRL_R_FB_M          0x007000
+#define IEEE80211_VHT_MIMO_CTRL_R_FB_S          12
+#define IEEE80211_VHT_MIMO_CTRL_FIRSTFB_M       0x008000
+#define IEEE80211_VHT_MIMO_CTRL_FIRSTFB_S       15
+#define IEEE80211_VHT_MIMO_CTRL_DTOKEN_M        0xFC0000
+#define IEEE80211_VHT_MIMO_CTRL_DTOKEN_S        18
+
+/* Block Ack actions */
+#define IEEE80211_ACTION_BA_ADDBA_REQ		0	/* Add block ack request */
+#define IEEE80211_ACTION_BA_ADDBA_RESP		1	/* Add block ack response */
+#define IEEE80211_ACTION_BA_DELBA			2	/* delete block ack */
+
+/* BA - Add block ack request */
+struct ieee80211_action_ba_addba_req {
+	struct ieee80211_action		am_header;
+	uint8_t	am_dlg;
+	uint16_t	am_ba_params;
+	uint16_t	am_ba_to;
+	uint16_t	am_ba_seq;
+} __packed;
+
+#define IEEE80211_A_BA_AMSDU_SUPPORTED		0x0001
+#define IEEE80211_A_BA_IMMEDIATE			0x0002
+#define IEEE80211_A_BA_DELAYED				0x0000
+#define IEEE80211_A_BA_TID_M				0x003C
+#define IEEE80211_A_BA_TID_S				2
+#define IEEE80211_A_BA_BUFF_SIZE_M			0xFFC0
+#define IEEE80211_A_BA_BUFF_SIZE_S			6
+#define IEEE80211_A_BA_FRAG_M				0x000F
+#define IEEE80211_A_BA_FRAG_S				0
+#define IEEE80211_A_BA_SEQ_M				0xFFF0
+#define IEEE80211_A_BA_SEQ_S				4
+#define IEEE80211_IOT_INTEL_AGG_MAX_FRAMES_NUM          16
+
+/* BA - Add block ack response */
+struct ieee80211_action_ba_addba_resp {
+	struct ieee80211_action		am_header;
+	uint8_t	am_dlg;
+	__le16		am_status;
+	__le16		am_ba_params;
+	__le16		am_ba_to;
+} __packed;
+
+/* BA - delete block ack request */
+struct ieee80211_action_ba_delba {
+	struct ieee80211_action		am_header;
+	__le16		am_delba_params;
+	__le16		am_reason;
+}__packed;
+
+#define IEEE80211_A_BA_INITIATOR			0x0800
+#define IEEE80211_A_BA_INITIATOR_S			11
+#define IEEE80211_A_BA_DELBA_TID			0xF000
+#define IEEE80211_A_BA_DELBA_TID_S			12
+
+/* Move to a .config file later. */
+#define CONFIG_QHOP 1
+
+#ifdef CONFIG_QHOP
+#define QDRV_ACTION_TYPE_QHOP        0x19
+#define QDRV_ACTION_QHOP_DFS_REPORT  0x1
+#define QDRV_ACTION_QHOP_SCS_REPORT  0x2
+
+struct qdrv_vendor_action_header {
+	uint8_t category;
+	uint8_t oui[3];
+	uint8_t type;
+	uint8_t action;
+} __packed;
+
+struct qdrv_vendor_action_qhop_dfs_data {
+	uint8_t cur_chan;
+} __packed;
+
+#endif
+
+#ifdef CONFIG_QVSP
+
+/**
+ * Structures for action frames used to set stream states and configure VSP.
+ *
+ * These structures are the ones that go over the medium, so must be packed.
+ */
+#define QVSP_ACTION_TYPE_VSP 0x1
+#define QVSP_ACTION_STRM_CTRL 0x1
+#define QVSP_ACTION_VSP_CTRL 0x2
+
+/**
+ * Common header for all VSP action frames.
+ */
+struct ieee80211_qvsp_act_header_s {
+	uint8_t category;
+	uint8_t oui[3];
+	uint8_t type;
+	uint8_t action;
+} __packed;
+
+struct ieee80211_qvsp_act_frm_dis_attr_s {
+	uint32_t throt_policy;
+	uint32_t throt_rate;
+	uint32_t demote_rule;
+	uint32_t demote_state;
+} __packed;
+
+/**
+ * Stream control action frame.
+ */
+struct ieee80211_qvsp_act_strm_ctrl_s {
+	struct ieee80211_qvsp_act_header_s header;
+	uint8_t strm_state;
+	uint8_t count;
+	struct ieee80211_qvsp_act_frm_dis_attr_s dis_attr;
+	struct ieee80211_qvsp_strm_id strm_items[0]; /* One or more of these entries */
+} __packed;
+
+/**
+ * Single VSP control item - set a parameter remotely.
+ */
+struct ieee80211_qvsp_act_vsp_ctrl_item_s {
+	uint32_t index;
+	uint32_t value;
+} __packed;
+
+/**
+ * VSP configuration/control action frame.
+ */
+struct ieee80211_qvsp_act_vsp_ctrl_s {
+	struct ieee80211_qvsp_act_header_s header;
+	uint8_t count;
+	uint8_t pad[3]; /* Pad for 32-bit alignment */
+	struct ieee80211_qvsp_act_vsp_ctrl_item_s ctrl_items[0]; /* One or more of these entries */
+} __packed;
+
+#endif
+
+/*
+ * 802.11w / PMF SA Query Action Frame
+ */
+#define IEEE80211_ACTION_W_SA_QUERY_REQ		0
+#define IEEE80211_ACTION_W_SA_QUERY_RESP	1
+
+struct ieee80211_action_sa_query {
+	struct ieee80211_action		at_header;
+	u_int16_t			at_tid;
+} __packed;
+
+/*
+ * Control frames.
+ */
+struct ieee80211_frame_min {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_addr1[IEEE80211_ADDR_LEN];
+	uint8_t i_addr2[IEEE80211_ADDR_LEN];
+	/* FCS */
+} __packed;
+
+/* 80211.v WNM */
+#define	IEEE80211_WNM_BSS_TM_CAP 19
+
+/* IEEE 802.11v - WNM Action field values */
+#define IEEE80211_WNM_EVENT_REQ				0
+#define	IEEE80211_WNM_EVENT_REPORT			1
+#define	IEEE80211_WNM_DIAGNOSTIC_REQ			2
+#define	IEEE80211_WNM_DIAGNOSTIC_REPORT			3
+#define	IEEE80211_WNM_LOCATION_CFG_REQ			4
+#define	IEEE80211_WNM_LOCATION_CFG_RESP			5
+#define	IEEE80211_WNM_BSS_TRANS_MGMT_QUERY		6
+#define	IEEE80211_WNM_BSS_TRANS_MGMT_REQ		7
+#define	IEEE80211_WNM_BSS_TRANS_MGMT_RESP		8
+#define	IEEE80211_WNM_FMS_REQ				9
+#define	IEEE80211_WNM_FMS_RESP				10
+#define	IEEE80211_WNM_COLLOCATED_INTERFERENCE_REQ	11
+#define	IEEE80211_WNM_COLLOCATED_INTERFERENCE_REPORT	12
+#define	IEEE80211_WNM_TFS_REQ				13
+#define	IEEE80211_WNM_TFS_RESP				14
+#define	IEEE80211_WNM_TFS_NOTIFY			15
+#define	IEEE80211_WNM_SLEEP_MODE_REQ			16
+#define	IEEE80211_WNM_SLEEP_MODE_RESP			17
+#define	IEEE80211_WNM_TIM_BROADCAST_REQ			18
+#define	IEEE80211_WNM_TIM_BROADCAST_RESP		19
+#define	IEEE80211_WNM_QOS_TRAFFIC_CAPAB_UPDATE		20
+#define	IEEE80211_WNM_CHANNEL_USAGE_REQ			21
+#define	IEEE80211_WNM_CHANNEL_USAGE_RESP		22
+#define	IEEE80211_WNM_DMS_REQ				23
+#define	IEEE80211_WNM_DMS_RESP				24
+#define	IEEE80211_WNM_TIMING_MEASUREMENT_REQ		25
+#define	IEEE80211_WNM_NOTIFICATION_REQ			26
+#define	IEE8E0211_WNM_NOTIFICATION_RESP			27
+
+/* IEEE 802.11v - BSS Transition Management Request - Request Mode */
+#define BTM_REQ_PREF_CAND_LIST_INCLUDED		BIT(0)
+#define BTM_REQ_ABRIDGED			BIT(1)
+#define BTM_REQ_DISASSOC_IMMINENT		BIT(2)
+#define BTM_REQ_BSS_TERMINATION_INCLUDED	BIT(3)
+#define BTM_REQ_ESS_DISASSOC_IMMINENT		BIT(4)
+
+/* IEEE Std 802.11-2012 - Table 8-253 */
+/* BTM response status codes */
+#define	BTM_RSP_ACCEPT					0
+#define	BTM_RSP_REJECT_UNSPECIFIED			1
+#define	BTM_RSP_REJECT_INSUFFICIENT_BEACON		2
+#define	BTM_RSP_REJECT_INSUFFICIENT_CAPABITY		3
+#define	BTM_RSP_REJECT_UNDESIRED			4
+#define	BTM_RSP_REJECT_DELAY_REQUEST			5
+#define	BTM_RSP_REJECT_STA_CANDIDATE_LIST_PROVIDED	6
+#define	BTM_RSP_REJECT_NO_SUITABLE_CANDIDATES		7
+#define	BTM_RSP_REJECT_LEAVING_ESS			8
+
+/* Neighbor report and BTM subelements */
+#define WNM_NEIGHBOR_TSF				1
+#define WNM_NEIGHBOR_CONDENSED_COUNTRY_STRING		2
+#define WNM_NEIGHBOR_BTM_CANDIDATE_PREFERENCE		3
+#define WNM_NEIGHBOR_BTM_TERMINATION_DURATION		4
+#define WNM_NEIGHBOR_BEARING				5
+#define WNM_NEIGHBOR_MEASUREMENT_PILOT			66
+#define WNM_NEIGHBOR_RRM_ENABLED_CAPABILITIES		70
+#define WNM_NEIGHBOR_MULTIPLE_BSSID			71
+
+/* some of BTM related values */
+#define WNM_BTM_DEFAULT_VAL_INTVAL		15
+#define WNM_BTM_DISASSOC_TIMER_VALUE		15
+#define WNM_BTM_BSS_TERMINATION_DURATION	5 /* 1 minute */
+
+
+/* prefrence sub element  */
+struct ieee80211_subie_pref {
+	uint8_t    subelem_id;
+	uint8_t    length;
+	uint8_t    pref;
+}__packed;
+
+/* BSS Termination duration */
+struct ieee80211_ie_btm_bss_termdur {
+	uint8_t    subelem_id;
+	uint8_t    length;
+	uint64_t   bss_term_tsf;
+	uint16_t   duration;
+}__packed;
+
+struct ieee80211_btm_query_paramset {
+	uint8_t dialog_token;
+	uint8_t reason;
+	uint8_t data[0]; /* optional prefered BSS candidate list */
+}__packed;
+
+struct ieee80211_action_btm_query {
+	struct ieee80211_action btm_header;
+	struct ieee80211_btm_query_paramset btm_query_param;
+}__packed;
+
+struct btm_request_params {
+	uint8_t		dialog_token;
+	uint8_t		request_mode;
+	uint16_t	disassoc_timer;
+	uint8_t		validity_interval;
+	uint8_t		*bss_term_dur;
+	char		*url;
+	uint8_t		*neigh_reports;
+	int		neigh_reports_length;
+};
+
+struct ieee80211_btm_req_paramset {
+	uint8_t		dialog_token;
+	uint8_t		request_mode;
+	uint16_t	disassoc_timer;
+	uint8_t		validity_interval;
+	uint8_t		info[0];
+	/* Optional BSS termination duration */
+	/* Optional session information URL */
+	/* Optional BSS Transition Candidate list (neighbor report) */
+}__packed;
+
+struct ieee80211_action_btm_req {
+	struct ieee80211_action			btm_header;
+	struct ieee80211_btm_req_paramset	btm_req_param;
+}__packed;
+
+struct ieee80211_btm_rsp_paramset {
+	uint8_t	dialog_token;
+	uint8_t status_code;
+	uint8_t bss_term_delay;
+	uint8_t data[0];
+	/* Optional Target BSSID */
+	/* Optional BSS Transition Candidate list (neighbor report) */
+}__packed;
+
+struct ieee80211_action_btm_rsp {
+	struct ieee80211_action			btm_header;
+	struct ieee80211_btm_rsp_paramset	btm_rsp_param;
+}__packed;
+
+/* ieee80211r  related*/
+/*
+ * mobility domain information element.
+ */
+struct ieee80211_md_ie {
+	uint8_t		md_id;		/* IEEE80211_ELEMID_MOBILITY_DOMAIN */
+	uint8_t		md_len;		/* length in bytes */
+	uint16_t	md_info;	/* mobility domain id */
+	uint8_t		md_cap;		/* capability */
+}__packed;
+
+#define IEEE80211_MDIE_LEN	3
+
+/*
+ * BAR frame format
+ */
+#define IEEE80211_BAR_CTL_TID		0xF000      /* tid mask             */
+#define IEEE80211_BAR_CTL_TID_S         12      /* tid shift            */
+#define IEEE80211_BAR_CTL_NOACK		0x0001      /* no-ack policy        */
+#define IEEE80211_BAR_CTL_COMBA		0x0004      /* compressed block-ack */
+#define IEEE80211_BAR_CTL_MULTIBA	0x0006		/* Multi TID Block Ack */
+#define IEEE80211_BAR_INFO_FRAG_M	0x000F		/* fragment mask */
+#define IEEE80211_BAR_CTL_FRAG_S	0			/* fragment shift */
+#define IEEE80211_BAR_CTL_SEQ		0xFFF0		/* sequence number mask */
+#define IEEE80211_BAR_CTL_SEQ_S		4			/* sequence number shift */
+
+
+struct ieee80211_frame_bar {
+	uint8_t	i_fc[2];
+	uint8_t	i_dur[2];
+	uint8_t	i_ra[IEEE80211_ADDR_LEN];
+	uint8_t	i_ta[IEEE80211_ADDR_LEN];
+	uint16_t	i_ctl;
+	uint8_t	i_info[0];						/* variable length */
+	/* FCS */
+} __packed;
+
+struct ieee80211_frame_bar_info_simple {
+	uint16_t	i_seq;
+} __packed;
+
+struct ieee80211_frame_bar_info_tid {
+	uint16_t	i_tid;
+	uint16_t	i_seq;
+} __packed;
+
+#define IEEE80211_BAR_HDR_LEN		16
+#define IEEE80211_BAR_COMPRESSED_LEN	(sizeof(struct ieee80211_frame_bar) + \
+						sizeof(struct ieee80211_frame_bar_info_simple))
+
+/*
+ * BA frame format
+ */
+struct ieee80211_frame_ba {
+	uint8_t	i_fc[2];
+	uint8_t	i_dur[2];
+	uint8_t	i_ra[IEEE80211_ADDR_LEN];
+	uint8_t	i_ta[IEEE80211_ADDR_LEN];
+	uint16_t	i_ctl;
+	uint8_t	i_info[0];						/* variable length */
+	/* FCS */
+} __packed;
+
+struct ieee80211_frame_ba_simple {
+	uint16_t	i_seq;
+	uint8_t	i_bm[128];
+} __packed;
+
+struct ieee80211_frame_ba_comp {
+	uint16_t	i_seq;
+	uint8_t	i_bm[8];
+} __packed;
+
+struct ieee80211_frame_ba_tid {
+	uint16_t	i_tid;
+	uint16_t	i_seq;
+	uint8_t	i_bm[8];
+} __packed;
+
+struct ieee80211_frame_rts {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_ra[IEEE80211_ADDR_LEN];
+	uint8_t i_ta[IEEE80211_ADDR_LEN];
+	/* FCS */
+} __packed;
+
+struct ieee80211_frame_cts {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_ra[IEEE80211_ADDR_LEN];
+	/* FCS */
+} __packed;
+
+struct ieee80211_frame_ack {
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];
+	uint8_t i_ra[IEEE80211_ADDR_LEN];
+	/* FCS */
+} __packed;
+
+struct ieee80211_frame_pspoll {
+	uint8_t i_fc[2];
+	uint8_t i_aid[2];
+	uint8_t i_bssid[IEEE80211_ADDR_LEN];
+	uint8_t i_ta[IEEE80211_ADDR_LEN];
+	/* FCS */
+} __packed;
+
+struct ieee80211_frame_cfend {		/* NB: also CF-End+CF-Ack */
+	uint8_t i_fc[2];
+	uint8_t i_dur[2];	/* should be zero */
+	uint8_t i_ra[IEEE80211_ADDR_LEN];
+	uint8_t i_bssid[IEEE80211_ADDR_LEN];
+	/* FCS */
+} __packed;
+
+struct ieee80211_frame_cw {
+	uint8_t	i_fc[2];
+	uint8_t	i_dur[2];
+	uint8_t	i_ra[IEEE80211_ADDR_LEN];
+	uint8_t	i_cfc[2]; /* carried frame control */
+	/* variable control frame */
+	/* FCS */
+} __packed;
+
+/* 802.11 Management over Ethernet Payload Types (Annex U.1) */
+#define IEEE80211_SNAP_TYPE_REMOTE             1   /* Remote request/response */
+#define IEEE80211_SNAP_TYPE_TDLS               2   /* TDLS */
+
+#define IEEE80211_FCS_LEN		4
+#define IEEE80211_ENCR_HDR_AES_LEN	16
+
+/*
+ * BEACON management packets
+ *
+ *	octet timestamp[8]
+ *	octet beacon interval[2]
+ *	octet capability information[2]
+ *	information element
+ *		octet elemid
+ *		octet length
+ *		octet information[length]
+ */
+
+typedef uint8_t *ieee80211_mgt_beacon_t;
+
+#define	IEEE80211_BEACON_INTERVAL(beacon) \
+	((beacon)[8] | ((beacon)[9] << 8))
+#define	IEEE80211_BEACON_CAPABILITY(beacon) \
+	((beacon)[10] | ((beacon)[11] << 8))
+
+#define	IEEE80211_CAPINFO_ESS			0x0001
+#define	IEEE80211_CAPINFO_IBSS			0x0002
+#define	IEEE80211_CAPINFO_CF_POLLABLE		0x0004
+#define	IEEE80211_CAPINFO_CF_POLLREQ		0x0008
+#define	IEEE80211_CAPINFO_PRIVACY		0x0010
+#define	IEEE80211_CAPINFO_SHORT_PREAMBLE	0x0020
+#define	IEEE80211_CAPINFO_PBCC			0x0040
+#define	IEEE80211_CAPINFO_CHNL_AGILITY		0x0080
+#define IEEE80211_CAPINFO_SPECTRUM_MGMT		0x0100
+#define	IEEE80211_CAPINFO_WME			0x0200
+#define	IEEE80211_CAPINFO_SHORT_SLOTTIME	0x0400
+#define	IEEE80211_CAPINFO_APSD			0x0800
+#define	IEEE80211_CAPINFO_RM			0x1000
+#define	IEEE80211_CAPINFO_DSSSOFDM		0x2000
+#define	IEEE80211_CAPINFO_DELAYED_BA		0x4000
+#define	IEEE80211_CAPINFO_IMMEDIATE_BA		0x8000
+
+/* Extended Capabilities element (8.4.2.29) - bits 0 to 31 */
+#define IEEE80211_EXTCAP1_TDLS_UAPSD		0x10000000UL	/* TDLS peer U-APSD buf STA support */
+#define IEEE80211_EXTCAP1_TDLS_PSM		0x20000000UL	/* Peer PSM Support */
+#define IEEE80211_EXTCAP1_TDLS_CS		0x40000000UL	/* channel switching */
+#define IEEE80211_EXTCAP1_BSS_TRANSITION	0x00080000UL	/* bss transition */
+
+/* Extended Capabilities element (8.4.2.29) - bits 32 to 63 */
+#define IEEE80211_EXTCAP2_TDLS			0x00000020UL	/* TDLS supported */
+#define IEEE80211_EXTCAP2_TDLS_PROHIB		0x00000040UL	/* TDLS prohibited */
+#define IEEE80211_EXTCAP2_TDLS_CS_PROHIB	0x00000080UL	/* TDLS channel switch prohibited */
+#define IEEE80211_EXTCAP2_OP_MODE_NOTI		0x40000000UL	/* Operation mode notification supporting */
+
+#define IEEE8211_EXTCAP_LENGTH	8	/* Extended capabilities element length */
+
+/*
+ * 802.11i/WPA information element (maximally sized).
+ */
+struct ieee80211_ie_wpa {
+	uint8_t wpa_id;			/* IEEE80211_ELEMID_VENDOR */
+	uint8_t wpa_len;		/* length in bytes */
+	uint8_t wpa_oui[3];		/* 0x00, 0x50, 0xf2 */
+	uint8_t wpa_type;		/* OUI type */
+	uint16_t wpa_version;		/* spec revision */
+	uint32_t wpa_mcipher[1];	/* multicast/group key cipher */
+	uint16_t wpa_uciphercnt;	/* # pairwise key ciphers */
+	uint32_t wpa_uciphers[8];	/* ciphers */
+	uint16_t wpa_authselcnt;	/* authentication selector cnt*/
+	uint32_t wpa_authsels[8];	/* selectors */
+	uint16_t wpa_caps;		/* 802.11i capabilities */
+	uint16_t wpa_pmkidcnt;		/* 802.11i pmkid count */
+	uint16_t wpa_pmkids[8];		/* 802.11i pmkids */
+} __packed;
+
+/* TDLS Link Identifier element (7.3.2.62) */
+struct ieee80211_tdls_link_id {
+	uint8_t        id;                             /* IEEE80211_ELEMID_TDLS_LINK_ID */
+	uint8_t        len;                            /* 20 */
+	uint8_t        bssid[IEEE80211_ADDR_LEN];      /* BSSID */
+	uint8_t        init_sa[IEEE80211_ADDR_LEN];    /* Initiator STA MAC address */
+	uint8_t        resp_sa[IEEE80211_ADDR_LEN];    /* Responder STA MAC address */
+} __packed;
+
+/* TDLS Wakeup Schedule information element (7.3.2.63) */
+struct ieee80211_tdls_wkup_sched {
+	uint8_t        id;             /* IEEE80211_ELEMID_TDLS_WKUP_SCHED */
+	uint8_t        len;            /* 20 */
+	uint32_t       offset;         /* Offset from TSF 0 */
+	uint32_t       interval;       /* Microsecs between awake windows */
+	uint32_t       awake_slots;    /* Awake window slots */
+	uint32_t       awake_dur;      /* Max Awake Window Duration */
+	uint16_t       idle_count;     /* Idle Count */
+} __packed;
+
+/* Extender Role IE */
+struct ieee80211_ie_qtn_extender {
+	uint8_t id;		/* IEEE80211_ELEMID_VENDOR */
+	uint8_t len;		/* 5 */
+	uint8_t qtn_ie_oui[3];	/* QTN_OUI - 0x00, 0x26, 0x86*/
+	uint8_t qtn_ie_type;	/* QTN_OUI_EXTENDER_ROLE */
+	uint8_t role;		/* extender device role */
+} __packed;
+
+/* TDLS IE */
+struct ieee80211_ie_qtn_tdls_sta_info {
+	uint8_t id;		/* IEEE80211_ELEMID_VENDOR */
+	uint8_t len;		/* 6 */
+	uint8_t qtn_ie_oui[3];	/* QTN_OUI - 0x00, 0x26, 0x86 */
+	uint8_t qtn_ie_type;	/* QTN_OUI_TDLS */
+	uint16_t sta_associd;	/* station's AID, unique value at BSS */
+} __packed;
+
+/* TDLS Channel Switch Timing element (7.3.2.64) */
+struct ieee80211_tdls_cs_timing {
+	uint8_t        id;             /* IEEE80211_ELEMID_TDLS_CS_TIMING */
+	uint8_t        len;            /* 6 */
+	uint16_t       switch_time;    /* Microsecs to switch channels */
+	uint16_t       switch_timeout; /* Microsecs to timeout channel switch */
+} __packed;
+
+/* TDLS PTI Control element (7.3.2.65) */
+struct ieee80211_tdls_pti_ctrl {
+	uint8_t        id;             /* IEEE80211_ELEMID_TDLS_PTI_CTRL */
+	uint8_t        len;            /* 5 */
+	uint16_t       tid;            /* TID in last mpdu to pu sleep sta */
+	uint16_t       seq_ctrl;       /* Seq ctrl in last mpdu to sleep sta */
+} __packed;
+
+/* TDLS PU Buffer Status element (7.3.2.66) */
+struct ieee80211_tdls_pu_buf_stat {
+	uint8_t        id;             /* IEEE80211_ELEMID_TDLS_PU_BUF_STAT */
+	uint8_t        len;            /* 3 */
+	uint8_t        pu_buf_stat;    /* PU buffer status flags */
+} __packed;
+
+/* Extender Role IE */
+struct ieee80211_qtn_ext_role {
+	uint8_t id;				/* IEEE80211_ELEMID_VENDOR */
+	uint8_t len;				/* 5 */
+	uint8_t qtn_ie_oui[3];			/* QTN_OUI - 0x00, 0x26, 0x86*/
+	uint8_t qtn_ie_type;			/* QTN_OUI_EXTENDER_ROLE */
+	uint8_t role;				/* extender device role: MBS, RBS, NONE */
+} __packed;
+
+#define QTN_MAX_RBS_NUM		8
+struct ieee80211_qtn_ext_bssid {
+	uint8_t id;				/* IEEE80211_ELEMID_VENDOR */
+	uint8_t len;				/* 59 */
+	uint8_t qtn_ie_oui[3];			/* QTN_OUI - 0x00, 0x26, 0x86*/
+	uint8_t qtn_ie_type;			/* QTN_OUI_EXTENDER_BSSID */
+	uint8_t mbs_bssid[IEEE80211_ADDR_LEN];	/* BSSID of mbs */
+	uint8_t rbs_num;
+	uint8_t rbs_bssid[QTN_MAX_RBS_NUM][IEEE80211_ADDR_LEN]; /* BSSID of rbs */
+} __packed;
+
+struct ieee80211_qtn_ext_state {
+	uint8_t id;				/* IEEE80211_ELEMID_VENDOR */
+	uint8_t len;				/* 8 */
+	uint8_t qtn_ie_oui[3];			/* QTN_OUI - 0x00, 0x26, 0x86 */
+	uint8_t qtn_ie_type;			/* QTN_OUI_EXTENDER_STATE */
+#define QTN_EXT_MBS_OCAC	BIT(0)		/* MBS OCAC on-going */
+	uint8_t state1;				/* record extender specific states */
+	uint8_t __rsvd[3];
+} __packed;
+
+struct ieee80211_ie_qtn_ocac_state {
+	uint8_t id;				/* IEEE80211_ELEMID_VENDOR */
+#define OCAC_STATE_IE_LEN	6
+	uint8_t len;				/* 6 - QTN_OCAC_STATE_IE_LEN */
+	uint8_t qtn_ie_oui[3];			/* QTN_OUI - 0x00, 0x26, 0x86 */
+	uint8_t qtn_ie_type;			/* IE type - QTN_OUI_OCAC_STATE */
+#define OCAC_STATE_NONE		0
+#define OCAC_STATE_BACKOFF	1
+#define OCAC_STATE_ONGOING	2
+	uint8_t state;
+	uint8_t param;				/* Use of params depends on the current stage
+						 * OCAC_STATE_NONE   : Not used (set as 0)
+						 * OCAC_STATE_BACKOFF: Backoff before OCAC would start (in beacon count)
+						 * OCAC_STATE_ONGOING: Not used (set as 0)
+						 */
+} __packed;
+
+/*
+ * 802.11n AMPDU delimiters and frame structure
+ */
+
+/* XXX - Endianness?  */
+struct ieee80211_ampdu_delim {
+	uint8_t	dl_mpdulen[2];		/* only 12 bits */
+	uint8_t	dl_crc;
+	uint8_t	dl_uniquepat;
+} __packed;
+
+#define IEEE80211_AMPDU_DLPAT		0x4E	/* ASCII for char 'N' */
+#define	IEEE80211_AMPDU_PADMAX		3
+
+/*
+ * 802.11n HT Capability IE
+ */
+struct ieee80211_ie_htcap {
+	uint8_t	hc_id;			/* element ID */
+	uint8_t	hc_len;			/* length in bytes */
+	uint8_t	hc_cap[2];			/* HT capabilities */
+	uint8_t	hc_ampdu;		/* A-MPDU parameters */
+	uint8_t	hc_mcsset[16];		/* supported MCS set */
+	uint8_t	hc_extcap[2];		/* extended HT capabilities */
+	uint8_t	hc_txbf[4];		/* txbf capabilities */
+	uint8_t	hc_antenna;		/* antenna capabilities */
+} __packed;
+
+
+/* HT capability flags */
+#define	IEEE80211_HTCAP_C_LDPCCODING		0x0001
+#define	IEEE80211_HTCAP_C_CHWIDTH40		0x0002
+#define	IEEE80211_HTCAP_C_GREENFIELD		0x0010
+#define IEEE80211_HTCAP_C_SHORTGI20		0x0020
+#define IEEE80211_HTCAP_C_SHORTGI40		0x0040
+#define IEEE80211_HTCAP_C_TXSTBC		0x0080
+#define IEEE80211_HTCAP_C_RXSTBC		0x0100
+#define IEEE80211_HTCAP_C_DELAYEDBLKACK		0x0400
+#define IEEE80211_HTCAP_C_MAXAMSDUSIZE_8K	0x0800  /* 1 = 8K, 0 = 3839 bytes */
+#define IEEE80211_HTCAP_C_DSSSCCK40		0x1000
+#define IEEE80211_HTCAP_C_PSMP			0x2000
+#define IEEE80211_HTCAP_C_40_INTOLERANT		0x4000
+#define IEEE80211_HTCAP_C_LSIGTXOPPROT		0x8000
+
+/* STBC defines */
+#define IEEE80211_MAX_TX_STBC_SS		2
+
+/* MCS set flags */
+#define IEEE80211_HTCAP_MCS_TX_SET_DEFINED	0x01
+#define IEEE80211_HTCAP_MCS_TX_RX_SET_NEQ	0x02
+#define IEEE80211_HTCAP_MCS_TX_UNEQ_MOD		0x10
+
+/* Maximum MSDU sizes */
+#define	IEEE80211_MSDU_SIZE_7935			7935
+#define	IEEE80211_MSDU_SIZE_3839			3839
+
+
+#define IEEE80211_HT_MCS_SET_BPSK_CR_HALF		0x01
+#define IEEE80211_HT_MCS_SET_QPSK_CR_HALF		0x02
+#define IEEE80211_HT_MCS_SET_QPSK_CR_THREEFORTH	0x04
+#define IEEE80211_HT_MCS_SET_16QAM_CR_HALF		0x08
+#define IEEE80211_HT_MCS_SET_16QAM_CR_THREEFORTH	0x10
+#define IEEE80211_HT_MCS_SET_64QAM_CR_TWOTHIRD	0x20
+#define IEEE80211_HT_MCS_SET_64QAM_CR_THREEFORTH	0x40
+#define IEEE80211_HT_MCS_SET_64QAM_CR_FIVESIXTH	0x80
+
+/* Extended capabilities flags */
+#define IEEE80211_HTCAP_E_PCO				0x0001
+#define IEEE80211_HTCAP_E_PLUS_HTC			0x0400
+#define IEEE80211_HTCAP_E_RD_RESPONSE		0x0800
+
+/* Tx Beamforming flags */
+#define IEEE80211_HTCAP_B_IMP_TXBF_RX		0x00000001
+#define IEEE80211_HTCAP_B_STAG_SOUNDING_RX	0x00000002
+#define IEEE80211_HTCAP_B_STAG_SOUNDING_TX	0x00000004
+#define IEEE80211_HTCAP_B_NDP_RX			0x00000008
+#define IEEE80211_HTCAP_B_NDP_TX			0x00000010
+#define IEEE80211_HTCAP_B_IMP_TXBF_TX		0x00000020
+#define IEEE80211_HTCAP_B_EXP_CSI_TXBF		0x00000100
+#define IEEE80211_HTCAP_B_EXP_NCOMP_STEER	0x00000200
+#define IEEE80211_HTCAP_B_EXP_COMP_STEER	0x00000400
+
+/* Antenna selection flags */
+#define IEEE80211_HTCAP_A_ASEL_CAPABLE		0x01
+#define IEEE80211_HTCAP_A_EXP_CSI_FB_ASEL	0x02
+#define IEEE80211_HTCAP_A_ANT_IND_FB_ASEL	0x04
+#define IEEE80211_HTCAP_A_EXP_CSI_FB		0x08
+#define IEEE80211_HTCAP_A_ANT_IND_FB		0x10
+#define IEEE80211_HTCAP_A_RX_ASEL			0x20
+#define IEEE80211_HTCAP_A_TX_SOUNDING_PPDU	0x40
+
+/* 11 AC related defines */
+#define IEEE80211_11AC_MCS_VAL_ERR		-1
+#define IEEE80211_HT_EQUAL_MCS_START		0
+#define IEEE80211_HT_EQUAL_MCS_2SS_MAX		15
+#define IEEE80211_HT_EQUAL_MCS_3SS_MAX		23
+#define IEEE80211_EQUAL_MCS_32			32
+#define IEEE80211_UNEQUAL_MCS_START		33
+#define IEEE80211_HT_UNEQUAL_MCS_2SS_MAX	38
+#define IEEE80211_HT_UNEQUAL_MCS_3SS_MAX	52
+#define IEEE80211_UNEQUAL_MCS_MAX		76
+#define IEEE80211_UNEQUAL_MCS_BIT		0x40
+#define IEEE80211_AC_MCS_MASK			0xFF
+#define IEEE80211_AC_MCS_SHIFT			8
+#define IEEE80211_AC_MCS_VAL_MASK		0x0F
+#define IEEE80211_AC_MCS_NSS_MASK		0xF0
+#define IEEE80211_11AC_MCS_NSS_SHIFT		4
+#define IEEE80211_AC_MCS_MAX			10
+#define IEEE80211_AC_MCS_NSS_MAX		4
+
+/* B0-1 maximum rx A-MPDU factor 2^(13+Max Rx A-MPDU Factor) - 1 */
+enum {
+	IEEE80211_HTCAP_MAXRXAMPDU_8191,	/* (2 ^ 13) - 1*/
+	IEEE80211_HTCAP_MAXRXAMPDU_16383,   /* (2 ^ 14) - 1 */
+	IEEE80211_HTCAP_MAXRXAMPDU_32767,   /* (2 ^ 15) - 1*/
+	IEEE80211_HTCAP_MAXRXAMPDU_65535,   /* (2 ^ 16) - 1*/
+};
+
+/* B2-4 MPDU spacing (usec) */
+enum {
+	IEEE80211_HTCAP_MPDUSPACING_NA,		/* No time restriction */
+	IEEE80211_HTCAP_MPDUSPACING_0_25,   /* 1/4 usec */
+	IEEE80211_HTCAP_MPDUSPACING_0_5,    /* 1/2 usec */
+	IEEE80211_HTCAP_MPDUSPACING_1,      /* 1 usec */
+	IEEE80211_HTCAP_MPDUSPACING_2,      /* 2 usec */
+	IEEE80211_HTCAP_MPDUSPACING_4,      /* 4 usec */
+	IEEE80211_HTCAP_MPDUSPACING_8,      /* 8 usec */
+	IEEE80211_HTCAP_MPDUSPACING_16,     /* 16 usec */
+};
+
+/*
+ * Rx MCS set
+ * # Supported rates IE is a 10 octet bitmap - also see mcs_stream_map[]
+ * Octet: 0        1        2        3        4 UEQM1  5 UEQM2  6 UEQM3  7 UEQM4  8 UEQM5  9 UEQM6
+ * NSS:   11111111 22222222 33333333 44444444 02222223 33333333 33333444 44444444 44444444 44444...
+ * MCS:   0        8        16       24       32       40       48       56       64       72  76
+ */
+enum {
+	IEEE80211_HT_MCSSET_20_40_NSS1,		/* CBW = 20/40 MHz, Nss = 1, Nes = 1, EQM/ No EQM */
+	IEEE80211_HT_MCSSET_20_40_NSS2,		/* CBW = 20/40 MHz, Nss = 2, Nes = 1, EQM */
+	IEEE80211_HT_MCSSET_20_40_NSS3,		/* CBW = 20/40 MHz, Nss = 3, Nes = 1, EQM */
+	IEEE80211_HT_MCSSET_20_40_NSS4,		/* CBW = 20/40 MHz, Nss = 4, Nes = 1, EQM */
+	IEEE80211_HT_MCSSET_20_40_UEQM1,	/* MCS 32 and UEQM MCSs 33 - 39 */
+	IEEE80211_HT_MCSSET_20_40_UEQM2,	/* UEQM MCSs 40 - 47 */
+	IEEE80211_HT_MCSSET_20_40_UEQM3,        /* UEQM MCSs 48 - 55 */
+	IEEE80211_HT_MCSSET_20_40_UEQM4,        /* UEQM MCSs 56 - 63 */
+	IEEE80211_HT_MCSSET_20_40_UEQM5,        /* UEQM MCSs 64 - 71 */
+	IEEE80211_HT_MCSSET_20_40_UEQM6,        /* UEQM MCSs 72 - 76 plus 3 reserved bits */
+};
+
+#define IEEE80211_HT_MCSSET_20_40_UEQM1_2SS	0x7E
+
+#define IEEE80211_HT_MCSSET_20_40_UEQM1_3SS	0x80
+#define IEEE80211_HT_MCSSET_20_40_UEQM2_3SS	0xFF
+#define IEEE80211_HT_MCSSET_20_40_UEQM3_3SS	0x1F
+
+#define IEEE80211_HT_MCSSET_20_40_UEQM3_4SS	0xE0
+#define IEEE80211_HT_MCSSET_20_40_UEQM4_4SS	0xFF
+#define IEEE80211_HT_MCSSET_20_40_UEQM5_4SS	0xFF
+#define IEEE80211_HT_MCSSET_20_40_UEQM6_4SS	0x1F
+
+#define IEEE80211_HT_HAS_2SS_UEQM_MCS(mcsset) \
+		(mcsset[IEEE80211_HT_MCSSET_20_40_UEQM1] &	\
+			IEEE80211_HT_MCSSET_20_40_UEQM1_2SS)
+
+#define IEEE80211_HT_HAS_3SS_UEQM_MCS(mcsset) \
+		((mcsset[IEEE80211_HT_MCSSET_20_40_UEQM1] &	\
+			IEEE80211_HT_MCSSET_20_40_UEQM1_3SS) ||	\
+		 (mcsset[IEEE80211_HT_MCSSET_20_40_UEQM2] &	\
+			IEEE80211_HT_MCSSET_20_40_UEQM2_3SS) ||	\
+		 (mcsset[IEEE80211_HT_MCSSET_20_40_UEQM3] &	\
+			IEEE80211_HT_MCSSET_20_40_UEQM3_3SS))
+
+#define IEEE80211_HT_HAS_4SS_UEQM_MCS(mcsset) \
+		((mcsset[IEEE80211_HT_MCSSET_20_40_UEQM3] &	\
+			IEEE80211_HT_MCSSET_20_40_UEQM3_4SS) ||	\
+		 (mcsset[IEEE80211_HT_MCSSET_20_40_UEQM4] &	\
+			IEEE80211_HT_MCSSET_20_40_UEQM4_4SS) ||	\
+		 (mcsset[IEEE80211_HT_MCSSET_20_40_UEQM5] &	\
+			IEEE80211_HT_MCSSET_20_40_UEQM5_4SS) ||	\
+		 (mcsset[IEEE80211_HT_MCSSET_20_40_UEQM6] &	\
+			IEEE80211_HT_MCSSET_20_40_UEQM6_4SS))
+
+#define IEEE80211_HT_IS_1SS_NODE(mcsset) \
+		((mcsset[IEEE80211_HT_MCSSET_20_40_NSS1] != 0) && \
+		(mcsset[IEEE80211_HT_MCSSET_20_40_NSS2] == 0))
+
+#define IEEE80211_HT_IS_2SS_NODE(mcsset) \
+		((mcsset[IEEE80211_HT_MCSSET_20_40_NSS2] != 0) && \
+		(mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] == 0))
+
+#define IEEE80211_HT_IS_3SS_NODE(mcsset) \
+		((mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] != 0) && \
+		(mcsset[IEEE80211_HT_MCSSET_20_40_NSS4] == 0))
+
+#define IEEE80211_HT_IS_4SS_NODE(mcsset) \
+		(mcsset[IEEE80211_HT_MCSSET_20_40_NSS4] != 0)
+
+/* B2-3 Maximum Tx spatial streams */
+enum {
+	IEEE80211_HTCAP_MCS_ONE_TX_SS,		/* One spatial stream */
+	IEEE80211_HTCAP_MCS_TWO_TX_SS,		/* Two spatial streams */
+	IEEE80211_HTCAP_MCS_THREE_TX_SS,	/* Three spatial streams */
+	IEEE80211_HTCAP_MCS_FOUR_TX_SS		/* Four spatial streams */
+};
+
+/* B2-3 power save mode */
+enum {
+	IEEE80211_HTCAP_C_MIMOPWRSAVE_STATIC = 0,	/* No MIMO (static mode) */
+	IEEE80211_HTCAP_C_MIMOPWRSAVE_DYNAMIC,		/* Precede MIMO with RTS */
+	IEEE80211_HTCAP_C_MIMOPWRSAVE_NA,		/* Not applicable        */
+	IEEE80211_HTCAP_C_MIMOPWRSAVE_NONE		/* No limitation on MIMO (SM power save disabled) */
+};
+
+/* B8-9 Rx STBC Mode */
+enum {
+	IEEE80211_HTCAP_C_RXSTBC_NONE,			/* No STBC SS */
+	IEEE80211_HTCAP_C_RXSTBC_ONE_SS,		/* One STBC SS */
+	IEEE80211_HTCAP_C_RXSTBC_TWO_SS,		/* Two STBC SS */
+	IEEE80211_HTCAP_C_RXSTBC_THREE_SS		/* Three STBC SS */
+};
+
+/* B1-2 PCO transition time */
+enum {
+	IEEE80211_HTCAP_E_PCO_NONE,				/* No transition */
+	IEEE80211_HTCAP_E_PCO_FOUR_HUNDRED_US,	/* 400 us */
+	IEEE80211_HTCAP_E_PCO_ONE_HALF_MS,		/* 1.5 ms */
+	IEEE80211_HTCAP_E_PCO_FIVE_MS			/* 5 ms */
+};
+
+/* B8-9 MCS feedback */
+enum {
+	IEEE80211_HTCAP_E_MCS_FB_NONE,			/* No feedback */
+	IEEE80211_HTCAP_E_MCS_FB_NA,			/* Reserved */
+	IEEE80211_HTCAP_E_MCS_FB_UNSOLICITED,	/* Unsolicited feedback only*/
+	IEEE80211_HTCAP_E_MCS_FB_SOLICITED		/* Solicited and unsolicited feedback */
+};
+
+/* B6-7 Calibration */
+enum {
+	IEEE80211_HTCAP_B_CALIBRATION_NONE,			/* No support */
+	IEEE80211_HTCAP_B_CALIBRATION_RESP_ONLY,	/* Response only */
+	IEEE80211_HTCAP_B_CALIBRATION_NA,			/* Reserved */
+	IEEE80211_HTCAP_B_CALIBRATION_REQ_RESP		/* Request and response */
+};
+
+/* B11-12 explicit CSI TxBF feedback, B13-14 explicit non compressed TxBF,
+ * B15-16 explicit compressed TxBF
+ */
+enum {
+	IEEE80211_HTCAP_B_CAPABLE_NONE,			/* No support */
+	IEEE80211_HTCAP_B_CAPABLE_DELAYED,		/* delayed response only */
+	IEEE80211_HTCAP_B_CAPABLE_IMMEDIATE,	/* immediate response only */
+	IEEE80211_HTCAP_B_CAPABLE_BOTH			/* both delayed and immediate response */
+};
+
+/* B17-18 Grouping */
+enum {
+	IEEE80211_HTCAP_B_GROUPING_NONE,		/* No support */
+	IEEE80211_HTCAP_B_GROUPING_ONE_TWO,		/* groups 1 and 2 */
+	IEEE80211_HTCAP_B_GROUPING_ONE_FOUR,	/* groups 1 and 4 */
+	IEEE80211_HTCAP_B_GROUPING_ONE_TWO_FOUR	/* groups 1, 2 and 4 */
+};
+
+/* B19-20 CSI number of beamforming antennas, B21-22 non compressed number of beamforming
+ * antennas, B23-24 compressed number of beamforming antennas
+ */
+enum {
+	IEEE80211_HTCAP_B_ANTENNAS_ONE,		/* Single antenna sounding */
+	IEEE80211_HTCAP_B_ANTENNAS_TWO,		/* 2 antenna sounding */
+	IEEE80211_HTCAP_B_ANTENNAS_THREE,	/* 3 antenna sounding */
+	IEEE80211_HTCAP_B_ANTENNAS_FOUR		/* 4 antenna sounding */
+};
+
+/* B25-26 CSI Max number of beamformer rows */
+enum {
+	IEEE80211_HTCAP_B_CSI_ONE_ROW,
+	IEEE80211_HTCAP_B_CSI_TWO_ROWS,
+	IEEE80211_HTCAP_B_CSI_THREE_ROWS,
+	IEEE80211_HTCAP_B_CSI_FOUR_ROWS
+};
+
+/* B27-28 channel estimation capability */
+enum {
+	IEEE80211_HTCAP_B_ST_STREAM_ONE,	/* one space time stream */
+	IEEE80211_HTCAP_B_ST_STREAM_TWO,	/* two space time streams */
+	IEEE80211_HTCAP_B_ST_STREAM_THREE,	/* three space time streams */
+	IEEE80211_HTCAP_B_ST_STREAM_FOUR	/* four space time streams */
+};
+
+/* HT NSS */
+enum ieee80211_ht_nss {
+	IEEE80211_HT_NSS1 = 1,
+	IEEE80211_HT_NSS2 = 2,
+	IEEE80211_HT_NSS3 = 3,
+	IEEE80211_HT_NSS4 = 4
+};
+
+/* HT capability macros */
+
+/* get macros */
+/* A-MPDU spacing  B2-B4 */
+#define IEEE80211_HTCAP_MIN_AMPDU_SPACING(htcap) \
+	(((htcap)->hc_ampdu & 0x1c) >> 2)
+/* max RX A-MPDU length  B0-B1 */
+#define IEEE80211_HTCAP_MAX_AMPDU_LEN(htcap) \
+	(((htcap)->hc_ampdu & 0x03))
+/* highest supported data rate, B0-B7 in set 10, B0-B1 in set 11 */
+#define IEEE80211_HTCAP_HIGHEST_DATA_RATE(htcap) \
+	(((htcap)->hc_mcsset[10]) | (((htcap)->hc_mcsset[11] & 0x3) << 8))
+/* MCS parameters (all bits)*/
+#define IEEE80211_HTCAP_MCS_PARAMS(htcap) \
+	((htcap)->hc_mcsset[12] & 0x1F)
+/* MCS maximum spatial streams, B2-B3 in set 12 */
+#define IEEE80211_HTCAP_MCS_STREAMS(htcap) \
+	(((htcap)->hc_mcsset[12] & 0xC) >> 2)
+/* MCS set value (all bits) */
+#define IEEE80211_HTCAP_MCS_VALUE(htcap,_set) \
+	((htcap)->hc_mcsset[_set])
+/* HT capabilities (all bits) */
+#define IEEE80211_HTCAP_CAPABILITIES(htcap) \
+	(((htcap)->hc_cap[0]) | ((htcap)->hc_cap[1] << 8))
+/* B3-4 power save mode */
+#define IEEE80211_HTCAP_PWRSAVE_MODE(htcap) \
+	(((htcap)->hc_cap[0] & 0x0C) >> 2)
+/* B8-9 Rx STBC MODE */
+#define IEEE80211_HTCAP_RX_STBC_MODE(htcap) \
+	((htcap)->hc_cap[1] & 0x3)
+/* HT extended capabilities (all bits) */
+#define IEEE80211_HTCAP_EXT_CAPABILITIES(htcap) \
+	((htcap)->hc_extcap)
+/* B1-2 PCO transition time */
+#define IEEE80211_HTCAP_PCO_TRANSITION(htcap) \
+	(((htcap)->hc_extcap & 0x6) >> 1)
+/* B8-9 MCS feedback type */
+#define IEEE80211_HTCAP_MCS_FEEDBACK_TYPE(htcap) \
+	(((htcap)->hc_extcap & 0x300) >> 8)
+/* HT TxBeamForming (bits 0-13) */
+#define IEEE80211_HTCAP_TXBF_CAPABILITIES(htcap) \
+	((htcap)->hc_txbf[0] | ((htcap)->hc_txbf[1] << 8))
+/* HT TxBeamForming (bits 14-31) */
+#define IEEE80211_HTCAP_TXBF_CAPABILITIES_EXTN(htcap) \
+	((htcap)->hc_txbf[2] | ((htcap)->hc_txbf[3] << 8))
+/* B6-7 Calibration */
+#define IEEE80211_HTCAP_CALIBRATION(htcap) \
+	(((htcap)->hc_txbf[0] & 0xC0) >> 6)
+/* B11-12 explicit CSI TxBF feedback*/
+#define IEEE80211_HTCAP_EXP_CSI_TXBF(htcap) \
+	(((htcap)->hc_txbf[1] & 0x18) >> 3)
+/* B13-14 explicit non compressed TxBF */
+#define IEEE80211_HTCAP_EXP_NCOMP_TXBF(htcap) \
+	(((htcap)->hc_txbf[1] & 0x60) >> 5)
+/* B15-16 explicit compressed TxBF */
+#define IEEE80211_HTCAP_EXP_COMP_TXBF(htcap) \
+	((((htcap)->hc_txbf[1] & 0x80) >> 7) | (((htcap)->hc_txbf[2] & 0x01) << 1))
+/* B17-18 Grouping */
+#define IEEE80211_HTCAP_GROUPING(htcap) \
+	(((htcap)->hc_txbf[2] & 0x6) >> 1)
+/* B19-20 CSI number of beamforming antennas */
+#define IEEE80211_HTCAP_CSI_NUM_BF(htcap) \
+	(((htcap)->hc_txbf[2] & 0x18) >> 3)
+/* B21-22 non compressed number of beamforming antennas */
+#define IEEE80211_HTCAP_NCOM_NUM_BF(htcap) \
+	(((htcap)->hc_txbf[2] & 0x60) >> 5)
+/* B23-24 compressed number of beamforming antennas */
+#define IEEE80211_HTCAP_COMP_NUM_BF(htcap) \
+	((((htcap)->hc_txbf[2] & 0x80) >> 7) | (((htcap)->hc_txbf[3] & 0x01) << 1))
+/* B25-26 CSI Max number of beamformer rows */
+#define IEEE80211_HTCAP_CSI_BF_ROWS(htcap) \
+	(((htcap)->hc_txbf[3] & 0x6) >> 1)
+/* B27-28 channel estimation capability */
+#define IEEE80211_HTCAP_CHAN_EST(htcap) \
+	(((htcap)->hc_txbf[3] & 0x18) >> 3)
+
+/* set macros */
+/* A-MPDU spacing  B2-B4 */
+#define IEEE80211_HTCAP_SET_AMPDU_SPACING(htcap,_d) \
+	((htcap)->hc_ampdu = (((htcap)->hc_ampdu & ~0x1c)  | ((_d) << 2)))
+/* max RX A-MPDU length  B0-B1 */
+#define IEEE80211_HTCAP_SET_AMPDU_LEN(htcap,_f)	\
+	((htcap)->hc_ampdu = (((htcap)->hc_ampdu & ~0x03)  | (_f)))
+/* highest supported data rate, B0-B7 in set 10, B0-B1 in set 11) */
+#define IEEE80211_HTCAP_SET_HIGHEST_DATA_RATE(htcap,_r) \
+	((htcap)->hc_mcsset[10] = ((_r) & 0xFF)); \
+	((htcap)->hc_mcsset[11] = ((_r) & 0x3FF) >> 8)
+/* MCS set parameters (all bits) */
+#define IEEE80211_HTCAP_SET_MCS_PARAMS(htcap,_p) \
+	((htcap)->hc_mcsset[12] = (_p & 0x1F))
+/* MCS maximum spatial streams, B2-B3 in set 12 */
+#define IEEE80211_HTCAP_SET_MCS_STREAMS(htcap,_s) \
+	((htcap)->hc_mcsset[12] = ((htcap)->hc_mcsset[12] & ~0xC)| (_s << 2))
+/* MCS set value (all bits) */
+#define IEEE80211_HTCAP_SET_MCS_VALUE(htcap,_set,_value) \
+	((htcap)->hc_mcsset[_set] = (_value & 0xFF))
+/* HT capabilities (all bits) */
+#define IEEE80211_HTCAP_SET_CAPABILITIES(htcap,_cap) \
+	(htcap)->hc_cap[0] = (_cap & 0x00FF); \
+	(htcap)->hc_cap[1] = ((_cap & 0xFF00) >> 8)
+/* B2-B3 power save mode */
+#define IEEE80211_HTCAP_SET_PWRSAVE_MODE(htcap,_m) \
+	((htcap)->hc_cap[0] = (((htcap)->hc_cap[0] & ~0xC) | ((_m) << 2)))
+/* B8-9 Rx STBC MODE */
+#define IEEE80211_HTCAP_SET_RX_STBC_MODE(htcap,_m) \
+	((htcap)->hc_cap[1] = (((htcap)->hc_cap[1] & ~0x3) | (_m) ))
+/* HT extended capabilities (all bits) */
+#define IEEE80211_HTCAP_SET_EXT_CAPABILITIES(htcap,_cap) \
+	((htcap)->hc_extcap = (_cap & 0xFFFF))
+/* B1-2 PCO transition time */
+#define IEEE80211_HTCAP_SET_PCO_TRANSITION(htcap,_t) \
+	((htcap)->hc_extcap = (((htcap)->hc_extcap & ~0x6) | ((_t) << 1)))
+/* B8-9 MCS feedback type */
+#define IEEE80211_HTCAP_SET_MCS_FEEDBACK_TYPE(htcap,_t) \
+	((htcap)->hc_extcap = (((htcap)->hc_extcap & ~0x300) | ((_t) << 8)))
+/* HT TxBeamForming (all bits ) */
+#define IEEE80211_HTCAP_SET_TXBF_CAPABILITIES(htcap,_cap) \
+	(htcap)->hc_txbf[0] = ((_cap) & 0x00FF); \
+	(htcap)->hc_txbf[1] = (((_cap) & 0xFF00) >> 8)
+/* B6-7 Calibration */
+#define IEEE80211_HTCAP_SET_CALIBRATION(htcap,_t) \
+	((htcap)->hc_txbf[0] = (((htcap)->hc_txbf[0] & ~0xC0) | ((_t) << 6)))
+/* B11-12 explicit CSI TxBF feedback*/
+#define IEEE80211_HTCAP_SET_EXP_CSI_TXBF(htcap,_t) \
+	((htcap)->hc_txbf[1] = (((htcap)->hc_txbf[1] & ~0x18) | ((_t) << 3)))
+/* B13-14 explicit non compressed TxBF */
+#define IEEE80211_HTCAP_SET_EXP_NCOMP_TXBF(htcap,_t) \
+	((htcap)->hc_txbf[1] = (((htcap)->hc_txbf[1] & ~0x60) | ((_t) << 5)))
+/* B15-16 explicit compressed TxBF */
+#define IEEE80211_HTCAP_SET_EXP_COMP_TXBF(htcap,_t) \
+	(htcap)->hc_txbf[1] = (((htcap)->hc_txbf[1] & ~0x80) | ((((_t) & 0x01) << 7))); \
+	(htcap)->hc_txbf[2] = (((htcap)->hc_txbf[2] & ~0x01) | ((_t) >> 1))
+/* B17-18 Grouping */
+#define IEEE80211_HTCAP_SET_GROUPING(htcap,_t) \
+	((htcap)->hc_txbf[2] = (((htcap)->hc_txbf[2] & ~0x6) | ((_t) << 1)))
+/* B19-20 CSI number of beamforming antennas */
+#define IEEE80211_HTCAP_SET_CSI_NUM_BF(htcap,_t) \
+	((htcap)->hc_txbf[2] = (((htcap)->hc_txbf[2] & ~0x18) | ((_t) << 3)))
+/* B21-22 non compressed number of beamforming antennas */
+#define IEEE80211_HTCAP_SET_NCOMP_NUM_BF(htcap,_t) \
+	((htcap)->hc_txbf[2] = (((htcap)->hc_txbf[2] & ~0x60) | ((_t) << 5)))
+/* B23-24 compressed number of beamforming antennas */
+#define IEEE80211_HTCAP_SET_COMP_NUM_BF(htcap,_t) \
+	(htcap)->hc_txbf[2] = (((htcap)->hc_txbf[2] & ~0x80) | (((_t) & 0x01) << 7)); \
+	(htcap)->hc_txbf[3] = (((htcap)->hc_txbf[3] & ~0x01) | ((_t) >> 1))
+/* B25-26 CSI Max number of beamformer rows */
+#define IEEE80211_HTCAP_SET_CSI_BF_ROWS(htcap,_t) \
+	((htcap)->hc_txbf[3] = (((htcap)->hc_txbf[3] & ~0x6) | ((_t) << 1)))
+/* B27-28 channel estimation capability */
+#define IEEE80211_HTCAP_SET_CHAN_EST(htcap,_t) \
+	((htcap)->hc_txbf[3] = (((htcap)->hc_txbf[3] & ~0x18) | ((_t) << 3)))
+
+/*
+ * 802.11n HT Information IE
+ */
+struct ieee80211_ie_htinfo {
+	uint8_t	hi_id;			/* element ID */
+	uint8_t	hi_len;			/* length in bytes */
+	uint8_t	hi_ctrlchannel;	/* control channel */
+	uint8_t	hi_byte1;		/* ht ie byte 1 */
+	uint8_t	hi_byte2;		/* ht ie byte 2 */
+	uint8_t	hi_byte3;		/* ht ie byte 3 */
+	uint8_t	hi_byte4;		/* ht ie byte 4 */
+	uint8_t	hi_byte5;		/* ht ie byte 5 */
+	uint8_t	hi_basicmcsset[16];	/* basic MCS set */
+} __packed;
+
+#define	IEEE80211_HTINFO_CHOFF_SCN			0
+#define	IEEE80211_HTINFO_CHOFF_SCA			1
+#define	IEEE80211_HTINFO_CHOFF_SCB			3
+
+#define IEEE80211_HTINFO_B1_SEC_CHAN_OFFSET		0x03
+#define IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40		0x04
+#define IEEE80211_HTINFO_B1_RIFS_MODE			0x08
+#define IEEE80211_HTINFO_B1_CONTROLLED_ACCESS		0x10
+#define IEEE80211_HTINFO_B2_NON_GF_PRESENT		0x04
+#define IEEE80211_HTINFO_B2_OBSS_PROT			0x10
+#define IEEE80211_HTINFO_B4_DUAL_BEACON			0x40
+#define IEEE80211_HTINFO_B4_DUAL_CTS			0x80
+#define IEEE80211_HTINFO_B5_STBC_BEACON			0x01
+#define IEEE80211_HTINFO_B5_LSIGTXOPPROT		0x02
+#define IEEE80211_HTINFO_B5_PCO_ACTIVE			0x04
+#define IEEE80211_HTINFO_B5_40MHZPHASE			0x08
+
+/* get macros */
+/* control channel (all bits) */
+#define IEEE80211_HTINFO_PRIMARY_CHANNEL(htie) \
+	(htie->hi_ctrlchannel)
+/* byte 1 (all bits) */
+#define IEEE80211_HTINFO_BYTE_ONE(htie) \
+	(htie->hi_byte1)
+/* byte 2 (all bits) */
+#define IEEE80211_HTINFO_BYTE_TWO(htie) \
+	(htie->hi_byte2)
+/* byte 3 (all bits) */
+#define IEEE80211_HTINFO_BYTE_THREE(htie) \
+	(htie->hi_byte3)
+/* byte 4 (all bits) */
+#define IEEE80211_HTINFO_BYTE_FOUR(htie) \
+	(htie->hi_byte4)
+/* byte 5 (all bits) */
+#define IEEE80211_HTINFO_BYTE_FIVE(htie) \
+	(htie->hi_byte5)
+/* B5-B7, byte 1 */
+#define IEEE80211_HTINFO_B1_SIGRANULARITY(htie) \
+	(((htie)->hi_byte1 & 0xe0) >> 5)
+/* B0-B1, byte 1 */
+#define IEEE80211_HTINFO_B1_EXT_CHOFFSET(htie) \
+	(((htie)->hi_byte1 & 0x3))
+/* B0-B1, byte 2 */
+#define IEEE80211_HTINFO_B2_OP_MODE(htie) \
+	(((htie)->hi_byte2 & 0x3))
+/* MCS set value (all bits) */
+#define IEEE80211_HTINFO_BASIC_MCS_VALUE(htie,_set) \
+	((htie)->hi_basicmcsset[_set])
+
+/* set macros */
+/* control channel (all bits) */
+#define IEEE80211_HTINFO_SET_PRIMARY_CHANNEL(htie,_c) \
+	(htie->hi_ctrlchannel = _c)
+/* byte 1 (all bits) */
+#define IEEE80211_HTINFO_SET_BYTE_ONE(htie,_b) \
+	(htie->hi_byte1 = _b)
+/* byte 2 (all bits) */
+#define IEEE80211_HTINFO_SET_BYTE_TWO(htie,_b) \
+	(htie->hi_byte2 = _b)
+/* byte 3 (all bits) */
+#define IEEE80211_HTINFO_SET_BYTE_THREE(htie,_b) \
+	(htie->hi_byte3 = _b)
+/* byte 4 (all bits) */
+#define IEEE80211_HTINFO_SET_BYTE_FOUR(htie,_b) \
+	(htie->hi_byte4 = _b)
+/* byte 5 (all bits) */
+#define IEEE80211_HTINFO_SET_BYTE_FIVE(htie,_b) \
+	(htie->hi_byte5 = _b)
+/* B5-B7, byte 1 */
+#define IEEE80211_HTINFO_B1_SET_SIGRANULARITY(htie,_g)			\
+	((htie)->hi_byte1 = (((htie)->hi_byte1 & ~0xe0)  |((_g) << 5) ))
+/* B0-B1, byte 1 */
+#define IEEE80211_HTINFO_B1_SET_EXT_CHOFFSET(htie,_off)					\
+	((htie)->hi_byte1 = (((htie)->hi_byte1 & ~0x03)  |(_off)))
+/* B0-B1, byte 2 */
+#define IEEE80211_HTINFO_B2_SET_OP_MODE(htie,_m)									\
+	((htie)->hi_byte2 = (((htie)->hi_byte2 & ~0x3) | ((_m) )))
+/* Basic MCS set value (all bits) */
+#define IEEE80211_HTINFO_SET_BASIC_MCS_VALUE(htie,_set,_value) \
+	((htie)->hi_basicmcsset[_set] = (_value & 0xFF))
+
+
+/* extension channel offset (2 bit signed number) */
+enum {
+	IEEE80211_HTINFO_EXTOFFSET_NA	 = 0,	/* 0  no extension channel is present */
+	IEEE80211_HTINFO_EXTOFFSET_ABOVE = 1,   /* +1 extension channel above control channel */
+	IEEE80211_HTINFO_EXTOFFSET_UNDEF = 2,   /* -2 undefined */
+	IEEE80211_HTINFO_EXTOFFSET_BELOW = 3	/* -1 extension channel below control channel*/
+};
+
+/* operating mode */
+enum {
+	IEEE80211_HTINFO_OPMODE_NO_PROT,			/* no protection */
+	IEEE80211_HTINFO_OPMODE_HT_PROT_NON_MEM,	/* protection required (Legacy device present in other BSS) */
+	IEEE80211_HTINFO_OPMODE_HT_PROT_20_ONLY,	/* protection required ( One 20 MHZ only HT device is present in 20/40 BSS) */
+	IEEE80211_HTINFO_OPMODE_HT_PROT_MIXED,		/* protection required (Legacy device is present in this BSS) */
+};
+
+/* signal granularity */
+enum {
+	IEEE80211_HTINFO_SIGRANULARITY_5,	/* 5 ms */
+	IEEE80211_HTINFO_SIGRANULARITY_10,	/* 10 ms */
+	IEEE80211_HTINFO_SIGRANULARITY_15,	/* 15 ms */
+	IEEE80211_HTINFO_SIGRANULARITY_20,	/* 20 ms */
+	IEEE80211_HTINFO_SIGRANULARITY_25,	/* 25 ms */
+	IEEE80211_HTINFO_SIGRANULARITY_30,	/* 30 ms */
+	IEEE80211_HTINFO_SIGRANULARITY_35,	/* 35 ms */
+	IEEE80211_HTINFO_SIGRANULARITY_40,	/* 40 ms */
+};
+
+/*
+ * Management information element payloads.
+ */
+
+enum {
+	IEEE80211_ELEMID_SSID		= 0,
+	IEEE80211_ELEMID_RATES		= 1,
+	IEEE80211_ELEMID_FHPARMS	= 2,
+	IEEE80211_ELEMID_DSPARMS	= 3,
+	IEEE80211_ELEMID_CFPARMS	= 4,
+	IEEE80211_ELEMID_TIM		= 5,
+	IEEE80211_ELEMID_IBSSPARMS	= 6,
+	IEEE80211_ELEMID_COUNTRY	= 7,
+	IEEE80211_ELEMID_REQINFO	= 10,
+	IEEE80211_ELEMID_BSS_LOAD	= 11,
+	IEEE80211_ELEMID_EDCA		= 12,
+	IEEE80211_ELEMID_CHALLENGE	= 16,
+	/* 17-31 reserved for challenge text extension */
+	IEEE80211_ELEMID_PWRCNSTR	= 32,
+	IEEE80211_ELEMID_PWRCAP		= 33,
+	IEEE80211_ELEMID_TPCREQ		= 34,
+	IEEE80211_ELEMID_TPCREP		= 35,
+	IEEE80211_ELEMID_SUPPCHAN	= 36,
+	IEEE80211_ELEMID_CHANSWITCHANN	= 37,
+	IEEE80211_ELEMID_MEASREQ	= 38,
+	IEEE80211_ELEMID_MEASREP	= 39,
+	IEEE80211_ELEMID_QUIET		= 40,
+	IEEE80211_ELEMID_IBSSDFS	= 41,
+	IEEE80211_ELEMID_ERP		= 42,
+	IEEE80211_ELEMID_HTCAP		= 45,
+	IEEE80211_ELEMID_QOSCAP		= 46,
+	IEEE80211_ELEMID_RSN		= 48,
+	IEEE80211_ELEMID_XRATES		= 50,
+	IEEE80211_ELEMID_NEIGHBOR_REP	= 52,
+	IEEE80211_ELEMID_MOBILITY_DOMAIN = 54,
+	IEEE80211_ELEMID_FTIE		= 55,
+	IEEE80211_ELEMID_TIMEOUT_INT	= 56,
+	IEEE80211_ELEMID_REG_CLASSES	= 59,
+	IEEE80211_ELEMID_HTINFO		= 61,
+	IEEE80211_ELEMID_SEC_CHAN_OFF	= 62,	/* Secondary Channel Offset */
+	IEEE80211_ELEMID_RRM_ENABLED	= 70,	/* RM enabled capabilities */
+	IEEE80211_ELEMID_20_40_BSS_COEX = 72,	/* 20/40 BSS Coexistence */
+	IEEE80211_ELEMID_20_40_IT_CH_REP   = 73,  /* 20/40 BSS Intolerant channel report */
+	IEEE80211_ELEMID_OBSS_SCAN	   = 74,   /* Overlapping BSS scan parameter  */
+	IEEE80211_ELEMID_TDLS_LINK_ID	   = 101, /* TDLS Link Identifier */
+	IEEE80211_ELEMID_TDLS_WKUP_SCHED   = 102, /* TDLS Wakeup Schedule */
+	IEEE80211_ELEMID_TDLS_CS_TIMING    = 104, /* TDLS Channel Switch Timing */
+	IEEE80211_ELEMID_TDLS_PTI_CTRL	   = 105, /* TDLS PTI Control */
+	IEEE80211_ELEMID_TDLS_PU_BUF_STAT  = 106, /* TDLS PU Buffer Status */
+	IEEE80211_ELEMID_INTERWORKING	= 107,
+	IEEE80211_ELEMID_EXTCAP		= 127,
+	/* 128-129 proprietary elements used by Agere chipsets */
+	IEEE80211_ELEMID_AGERE1		= 128,
+	IEEE80211_ELEMID_AGERE2		= 129,
+	IEEE80211_ELEMID_TPC		= 150,
+	IEEE80211_ELEMID_CCKM		= 156,
+	/* 191-199 Table 8-54-Element IDs in Std 802.11ac-2013 */
+	IEEE80211_ELEMID_VHTCAP		= 191,
+	IEEE80211_ELEMID_VHTOP		= 192,
+	IEEE80211_ELEMID_EXTBSSLOAD	= 193,
+	IEEE80211_ELEMID_WBWCHANSWITCH	= 194,
+	IEEE80211_ELEMID_VHTXMTPWRENVLP	= 195,
+	IEEE80211_ELEMID_CHANSWITCHWRP	= 196,
+	IEEE80211_ELEMID_AID		= 197,
+	IEEE80211_ELEMID_QUIETCHAN	= 198,
+	IEEE80211_ELEMID_OPMOD_NOTIF	= 199,
+	/* Vendor Specific */
+	IEEE80211_ELEMID_VENDOR		= 221,	/* vendor private */
+};
+
+#define IEEE80211_2040BSSCOEX_INFO_REQ	0x01
+#define IEEE80211_2040BSSCOEX_40_intol	0x02
+#define IEEE80211_2040BSSCOEX_20_REQ	0x04
+#define IEEE80211_2040BSSCOEX_SCAN_EXEP_REQ	0x08
+#define IEEE80211_2040BSSCOEX_SCAN_EXEP_GRA	0x10
+
+#define IEEE80211_CHANSWITCHANN_BYTES 5
+#define QTN_CHANSWITCHANN_TSF_BYTES 10
+#define IEEE80211_CSA_LEN	7
+#define IEEE80211_CSA_TSF_LEN	(IEEE80211_CSA_LEN + 10)
+#define IEEE80211_SEC_CHAN_OFF_IE_LEN 3
+#define IEEE80211_WBAND_CHANSWITCH_IE_LEN 5
+#define IEEE80211_NCW_ACT_LEN   3      /* Notify Channel Width Action size */
+#define IEEE80211_MU_GRP_ID_ACT_LEN 26 /* MU grp id mgmt action size */
+
+#define IEEE80211_NODE_IDX_UNMAP(x)	(BR_SUBPORT_UNMAP(x))
+#define IEEE80211_NODE_IDX_MAP(x)	(BR_SUBPORT_MAP(x))
+#define IEEE80211_NODE_IDX_VALID(x)	((x) & 0x8000)
+#define IEEE80211_NODE_IDX_INVALID(x)	(!IEEE80211_NODE_IDX_VALID(x))
+
+/*
+ * The 802.11 spec says at most 2007 stations may be
+ * associated at once.  For most AP's this is way more
+ * than is feasible so we use a default of 128.  This
+ * number may be overridden by the driver and/or by
+ * user configuration.
+ */
+#define	IEEE80211_AID_MAX		2007
+#define	IEEE80211_AID_DEF		128
+
+#define	IEEE80211_AID(b)	((b) &~ 0xc000)
+
+struct ieee80211_tim_ie {
+	uint8_t	tim_ie;			/* IEEE80211_ELEMID_TIM */
+	uint8_t	tim_len;
+	uint8_t	tim_count;		/* DTIM count */
+	uint8_t	tim_period;		/* DTIM period */
+	uint8_t	tim_bitctl;		/* bitmap control */
+	uint8_t	tim_bitmap[IEEE80211_AID_DEF / NBBY];		/* variable-length bitmap */
+} __packed;
+
+struct ieee80211_ie_sec_chan_off {
+	uint8_t	sco_id;			/* IEEE80211_ELEMID_SEC_CHAN_OFF */
+	uint8_t	sco_len;
+	uint8_t	sco_off;		/* offset */
+} __packed;
+
+struct ieee80211_country_ie {
+	uint8_t	ie;			/* IEEE80211_ELEMID_COUNTRY */
+	uint8_t	len;
+	uint8_t	cc[3];			/* ISO CC+(I)ndoor/(O)utdoor */
+	struct {
+		uint8_t schan;			/* starting channel */
+		uint8_t nchan;			/* number channels */
+		uint8_t maxtxpwr;		/* tx power cap */
+	} __packed band[4];			/* up to 4 sub bands */
+} __packed;
+
+#define IEEE80211_CHALLENGE_LEN		128
+
+#define IEEE80211_SUPPCHAN_LEN		52
+
+#define	IEEE80211_RATE_BASIC		0x80
+#define	IEEE80211_RATE_VAL			0x7f
+#define IEEE80211_BSS_MEMBERSHIP_SELECTOR_HT_PHY 0x7F
+
+/* EPR information element flags */
+#define	IEEE80211_ERP_NON_ERP_PRESENT	0x01
+#define	IEEE80211_ERP_USE_PROTECTION	0x02
+#define	IEEE80211_ERP_LONG_PREAMBLE	0x04
+
+/* Atheros private advanced capabilities info */
+#define	ATHEROS_CAP_TURBO_PRIME		0x01
+#define	ATHEROS_CAP_COMPRESSION		0x02
+#define	ATHEROS_CAP_FAST_FRAME		0x04
+/* bits 3-6 reserved */
+#define	ATHEROS_CAP_BOOST		0x80
+
+#define IEEE80211_OUI_LEN	3
+
+#define	ATH_OUI			0x7f0300	/* Atheros OUI */
+#define	ATH_OUI_TYPE		0x01
+#define	ATH_OUI_SUBTYPE		0x01
+#define ATH_OUI_VERSION		0x00
+#define	ATH_OUI_TYPE_XR		0x03
+#define	ATH_OUI_VER_XR		0x01
+
+#define	QTN_OUI			0x862600	/* Quantenna OUI */
+#define	QTN_OUI_CFG		0x01
+#define QTN_OUI_PAIRING		0x02		/* Pairing Protection */
+#define	QTN_OUI_VSP_CTRL	0x03		/* VSP configuration */
+#define	QTN_OUI_TDLS_BRMACS	0x04		/* TDLS */
+#define QTN_OUI_TDLS		0x05		/* TDLS Information */
+#define	QTN_OUI_RM_SPCIAL	0x10		/* Radio measurement special group */
+#define	QTN_OUI_RM_ALL		0x11		/* Radio measurement all group */
+#define QTN_OUI_SCS             0x12            /* SCS status report and control */
+#define QTN_OUI_QWME            0x13            /* WME IE between QSTA */
+#define QTN_OUI_EXTENDER_ROLE	0x14		/* WDS Extender Role */
+#define QTN_OUI_EXTENDER_BSSID	0x15		/* Extender BSSID */
+#define QTN_OUI_EXTENDER_STATE	0x16		/* Extender specific states */
+#define QTN_OUI_OCAC_STATE	0x17		/* APs OCAC state - NONE, BACKOFF or ONGOING */
+
+#define QTN_OUI_EXTENDER_ROLE_NONE	0x00	/* NONE Role */
+#define QTN_OUI_EXTENDER_ROLE_MBS	0x01	/* MBS Role */
+#define QTN_OUI_EXTENDER_ROLE_RBS	0x02	/* RBS Role */
+
+#define QTN_QWME_IE_VERSION	1
+
+#define	WPA_OUI			0xf25000
+#define	WPA_RSN_OUI_TYPE	0x01
+#define	WSC_OUI_TYPE		0x04
+#define	WPA_VERSION		1		/* current supported version */
+
+#define	WPA_CSE_NULL		0x00
+#define	WPA_CSE_WEP40		0x01
+#define	WPA_CSE_TKIP		0x02
+#define	WPA_CSE_CCMP		0x04
+#define	WPA_CSE_WEP104		0x05
+#define	RSN_CSE_GROUP_NOT_ALLOW 0x07  /* Group addressed traffic not allowed */
+
+#define	WPA_ASE_NONE		0x00
+#define	WPA_ASE_8021X_UNSPEC	0x01
+#define	WPA_ASE_8021X_PSK	0x02
+#define	IEEE80211_RSN_ASE_TPK	0x07  /* TDLS TPK Handshake */
+
+#define	RSN_OUI			0xac0f00
+#define	RSN_VERSION		1		/* current supported version */
+
+#define WFA_OUI			0x9A6F50
+#define WFA_TYPE_OSEN		0x12
+#define WFA_AKM_TYPE_OSEN	0x1
+
+#define	BCM_OUI			0x4C9000	/* Apple Products */
+#define	BCM_OUI_TYPE		0x01
+#define BCM_OUI_VHT_TYPE	0x0804
+
+#define	BCM_OUI_2		0x181000	/* iPad */
+#define	BCM_OUI_2_TYPE		0x02
+
+
+#define	RSN_CSE_NULL		0x00
+#define	RSN_CSE_WEP40		0x01
+#define	RSN_CSE_TKIP		0x02
+#define	RSN_CSE_WRAP		0x03
+#define	RSN_CSE_CCMP		0x04
+#define	RSN_CSE_WEP104		0x05
+#define	RSN_CSE_BIP		0x06
+
+#define	RSN_ASE_NONE		0x00
+#define	RSN_ASE_8021X_UNSPEC	0x01
+#define	RSN_ASE_8021X_PSK	0x02
+#define RSN_ASE_FT_8021X	0x03
+#define RSN_ASE_FT_PSK		0x04
+#define	RSN_ASE_8021X_SHA256	0x05
+#define	RSN_ASE_8021X_PSK_SHA256 0x06
+
+#define	RSN_CAP_PREAUTH		0x01
+#define	RSN_CAP_MFP_REQ		0x0040
+#define	RSN_CAP_MFP_CAP		0x0080
+#define	RSN_CAP_SPP_CAP		0x0400
+#define	RSN_CAP_SPP_REQ		0x0800
+
+#define RSN_IS_MFP(_rsn_caps) (((_rsn_caps) & RSN_CAP_MFP_REQ) || ((_rsn_caps) & RSN_CAP_MFP_CAP))
+
+#define	WME_OUI			0xf25000
+#define	WME_OUI_TYPE		0x02
+#define	WME_INFO_OUI_SUBTYPE	0x00
+#define	WME_PARAM_OUI_SUBTYPE	0x01
+#define	WME_VERSION		1
+#define	WME_UAPSD_MASK		0x0f
+
+#define RLNK_OUI		0x430C00	/* Ralink OUI */
+
+#define RTK_OUI			0x4ce000	/* Realtek OUI */
+#define EDIMAX_OUI		0x021f80	/* Edimax OUI */
+
+#define PEER_VENDOR_NONE	0x00
+#define PEER_VENDOR_QTN		0x01
+#define PEER_VENDOR_BRCM	0x02
+#define PEER_VENDOR_ATH		0x04
+#define PEER_VENDOR_RLNK	0x08
+#define PEER_VENDOR_RTK		0x10
+#define PEER_VENDOR_INTEL	0x20
+
+#define PEER_VENDOR_MASK	(PEER_VENDOR_BRCM | PEER_VENDOR_ATH | PEER_VENDOR_RLNK \
+				| PEER_VENDOR_RTK | PEER_VENDOR_INTEL)
+
+/*
+ * 802.11ac VHT Capabilities element
+ */
+struct ieee80211_ie_vhtcap {
+	u_int8_t	vht_id;			/* element ID */
+	u_int8_t	vht_len;		/* length in bytes */
+	u_int8_t	vht_cap[4];		/* VHT capabilities info */
+	u_int8_t	vht_mcs_nss_set[8];	/* supported MSC and NSS set */
+} __packed;
+
+/* VHT capabilities flags */
+#define IEEE80211_VHTCAP_C_CHWIDTH			0x0000000C
+#define IEEE80211_VHTCAP_C_RX_LDPC			0x00000010
+#define IEEE80211_VHTCAP_C_SHORT_GI_80			0x00000020
+#define IEEE80211_VHTCAP_C_SHORT_GI_160			0x00000040
+#define IEEE80211_VHTCAP_C_TX_STBC			0x00000080
+#define IEEE80211_VHTCAP_C_SU_BEAM_FORMER_CAP		0x00000800
+#define IEEE80211_VHTCAP_C_SU_BEAM_FORMEE_CAP		0x00001000
+#define IEEE80211_VHTCAP_C_MU_BEAM_FORMER_CAP		0x00080000
+#define IEEE80211_VHTCAP_C_MU_BEAM_FORMEE_CAP		0x00100000
+#define IEEE80211_VHTCAP_C_VHT_TXOP_PS			0x00200000
+#define IEEE80211_VHTCAP_C_PLUS_HTC_MINUS_VHT_CAP	0x00400000
+#define IEEE80211_VHTCAP_C_RX_ATN_PATTERN_CONSISTNCY	0x10000000
+#define IEEE80211_VHTCAP_C_TX_ATN_PATTERN_CONSISTNCY	0x20000000
+
+#define IEEE80211_VHTCAP_C_MU_BEAM_FORMXX_CAP_MASK	(IEEE80211_VHTCAP_C_MU_BEAM_FORMER_CAP | \
+							 IEEE80211_VHTCAP_C_MU_BEAM_FORMEE_CAP)
+
+/* VHT mcs info extras */
+#define IEEE80211_VHTCAP_MCS_MAX			8
+#define IEEE80211_VHTCAP_MCS_DISABLED			0x03
+
+/* VHT capability macro */
+/* get macros */
+/* VHT capabilities (all bits) */
+#define IEEE80211_VHTCAP_GET_CAPFLAGS(vhtcap) \
+	(u_int32_t)((vhtcap)->vht_cap[0] | \
+	((vhtcap)->vht_cap[1] << 8) | \
+	((vhtcap)->vht_cap[2] << 16) | \
+	((vhtcap)->vht_cap[3] << 24))
+
+/* B0-1 Max. MPDU Length */
+#define IEEE80211_VHTCAP_GET_MAXMPDU(vhtcap) \
+	(enum ieee80211_vht_maxmpdu)((vhtcap)->vht_cap[0] & 0x03)
+
+/* B2-3 Supported channel width */
+#define IEEE80211_VHTCAP_GET_CHANWIDTH(vhtcap) \
+	(enum ieee80211_vht_chanwidth)(((vhtcap)->vht_cap[0] & 0x0C) >> 2)
+
+/* B4 RX LDPC support */
+#define IEEE80211_VHTCAP_GET_RXLDPC(vhtcap) \
+	(((vhtcap)->vht_cap[0] & 0x10) >> 4)
+
+/* B5 Short GI for 80MHz support */
+#define IEEE80211_VHTCAP_GET_SGI_80MHZ(vhtcap) \
+	(((vhtcap)->vht_cap[0] & 0x20) >> 5)
+
+/* B6 Short GI for 160MHz support */
+#define IEEE80211_VHTCAP_GET_SGI_160MHZ(vhtcap) \
+	(((vhtcap)->vht_cap[0] & 0x40) >> 6)
+
+/* B7 TX STBC */
+#define IEEE80211_VHTCAP_GET_TXSTBC(vhtcap) \
+	(((vhtcap)->vht_cap[0] & 0x80) >> 7)
+
+/* B8-10 RX STBC */
+#define IEEE80211_VHTCAP_GET_RXSTBC(vhtcap) \
+	(enum ieee80211_vht_rxstbc)((vhtcap)->vht_cap[1] & 0x07)
+
+/* B11 SU Beam-former */
+#define IEEE80211_VHTCAP_GET_SU_BEAMFORMER(vhtcap) \
+	(((vhtcap)->vht_cap[1] & 0x08) >> 3)
+
+/* B12 SU Beam-formee */
+#define IEEE80211_VHTCAP_GET_SU_BEAMFORMEE(vhtcap) \
+	(((vhtcap)->vht_cap[1] & 0x10) >> 4)
+
+/* B13-15 Beamformee STS capability */
+#define IEEE80211_VHTCAP_GET_BFSTSCAP(vhtcap) \
+	(u_int8_t)(((vhtcap)->vht_cap[1] & 0xE0) >> 5)
+
+/* B16-18 Number of sounding Dimensions */
+#define IEEE80211_VHTCAP_GET_NUMSOUND(vhtcap) \
+	(u_int8_t)((vhtcap)->vht_cap[2] & 0x07)
+
+/* B19 MU Beam-formee VHT capability */
+#define IEEE80211_VHTCAP_GET_MU_BEAMFORMER(vhtcap) \
+	(((vhtcap)->vht_cap[2] & 0x08) >> 3)
+
+/* B20 MU Beam-former VHT capability */
+#define IEEE80211_VHTCAP_GET_MU_BEAMFORMEE(vhtcap) \
+	(((vhtcap)->vht_cap[2] & 0x10) >> 4)
+
+/* B22 VHT variant HT control field */
+#define IEEE80211_VHTCAP_GET_HTC_VHT(vhtcap) \
+	(((vhtcap)->vht_cap[2] & 0x40) >> 6)
+
+/* B23-25 Max. A-MPDU Length Exponent */
+#define IEEE80211_VHTCAP_GET_MAXAMPDUEXP(vhtcap) \
+	(enum ieee80211_vht_maxampduexp)((((vhtcap)->vht_cap[2] & 0x80) >> 7) | \
+	(((vhtcap)->vht_cap[3] & 0x03) << 1))
+
+/* B26-27 VHT Link Adaptation capable */
+#define IEEE80211_VHTCAP_GET_LNKADPTCAP(vhtcap) \
+	(enum ieee80211_vht_lnkadptcap)(((vhtcap)->vht_cap[3] & 0x0C) >> 2)
+
+/* B28 Rx Antenna pattern consistency */
+#define IEEE80211_VHTCAP_GET_RXANTPAT(vhtcap) \
+	(((vhtcap)->vht_cap[3] & 0x10) >> 4)
+
+/* B29 Tx Antenna pattern consistency */
+#define IEEE80211_VHTCAP_GET_TXANTPAT(vhtcap) \
+	(((vhtcap)->vht_cap[3] & 0x20) >> 5)
+
+/* B0-B15 RX VHT-MCS MAP for Spatial streams 1-8 */
+#define IEEE80211_VHTCAP_GET_RX_MCS_NSS(vhtcap) \
+	(((vhtcap)->vht_mcs_nss_set[1] << 8) | \
+	((vhtcap)->vht_mcs_nss_set[0]))
+
+/* B32-B47 TX VHT-MCS MAP for Spatial streams 1-8 */
+#define IEEE80211_VHTCAP_GET_TX_MCS_NSS(vhtcap) \
+	(((vhtcap)->vht_mcs_nss_set[5] << 8) | \
+	((vhtcap)->vht_mcs_nss_set[4]))
+
+/* VHT-MCS MAP entry for RX or TX MAP */
+#define IEEE80211_VHTCAP_GET_MCS_MAP_ENTRY(mcsmap, idx) \
+	((mcsmap >> (idx * 2)) & 0x3)
+
+/* B16-B28 RX Highest supported Long GI data rates */
+#define IEEE80211_VHTCAP_GET_RX_LGIMAXRATE(vhtcap) \
+	(u_int16_t)(((vhtcap)->vht_mcs_nss_set[2]) | \
+	((vhtcap)->vht_mcs_nss_set[3] << 8))
+
+/* B48-B60 TX Highest supported Long GI data rates */
+#define IEEE80211_VHTCAP_GET_TX_LGIMAXRATE(vhtcap) \
+	(u_int16_t)(((vhtcap)->vht_mcs_nss_set[6]) | \
+	((vhtcap)->vht_mcs_nss_set[7] << 8))
+
+/* set macros */
+/* VHT capabilities (all bits) */
+#define IEEE80211_VHTCAP_SET_CAPFLAGS(vhtcap, _cap) \
+	(vhtcap)->vht_cap[0] = ((_cap) & 0x000000FF); \
+	(vhtcap)->vht_cap[1] = (((_cap) & 0x0000FF00) >> 8); \
+	(vhtcap)->vht_cap[2] = (((_cap) & 0x00FF0000) >> 16); \
+	(vhtcap)->vht_cap[3] = (((_cap) & 0xFF000000) >> 24)
+
+/* B0-1 Max. MPDU Length */
+#define IEEE80211_VHTCAP_SET_MAXMPDU(vhtcap, _m) \
+	(vhtcap)->vht_cap[0] = (((vhtcap)->vht_cap[0] & ~0x03) | ((_m) & 0x03))
+
+/* B2-3 Supported channel width */
+#define IEEE80211_VHTCAP_SET_CHANWIDTH(vhtcap, _m) \
+	(vhtcap)->vht_cap[0] = (((vhtcap)->vht_cap[0] & ~0x0C) | ((_m) & 0x03) << 2)
+
+/* B8-10 RX STBC */
+#define IEEE80211_VHTCAP_SET_RXSTBC(vhtcap, _m) \
+	(vhtcap)->vht_cap[1] = (((vhtcap)->vht_cap[1] & ~0x07) | ((_m) & 0x07))
+
+/* B13-15 Beamformee STS capability */
+#define IEEE80211_VHTCAP_SET_BFSTSCAP(vhtcap, _m) \
+	(vhtcap)->vht_cap[1] = (((vhtcap)->vht_cap[1] & ~0xE0) | ((_m) & 0x07) << 5)
+
+/* B16-18 Number of sounding Dimensions */
+#define IEEE80211_VHTCAP_SET_NUMSOUND(vhtcap, _m) \
+	(vhtcap)->vht_cap[2] = (((vhtcap)->vht_cap[2] & ~0x07) | ((_m) & 0x07))
+
+/* B23-25 Max. A-MPDU Length Exponent */
+#define IEEE80211_VHTCAP_SET_MAXAMPDUEXP(vhtcap, _m) \
+	(vhtcap)->vht_cap[2] = (((vhtcap)->vht_cap[2] & ~0x80) | ((_m) & 0x01) << 7); \
+	(vhtcap)->vht_cap[3] = (((vhtcap)->vht_cap[3] & ~0x03) | ((_m) & 0x06) >> 1)
+
+/* B26-27 VHT Link Adaptation capable */
+#define IEEE80211_VHTCAP_SET_LNKADPTCAP(vhtcap, _m) \
+	(vhtcap)->vht_cap[3] = (((vhtcap)->vht_cap[3] & ~0x0C) | ((_m) & 0x03) << 2)
+
+/* B0-B15 RX VHT-MCS MAP for Spatial streams 1-8 */
+#define IEEE80211_VHTCAP_SET_RX_MCS_NSS(vhtcap, _m) \
+	(vhtcap)->vht_mcs_nss_set[1] = (((_m) & 0xFF00) >> 8); \
+	(vhtcap)->vht_mcs_nss_set[0] = ((_m) & 0x00FF)
+
+/* B16-B28 RX Highest supported Long GI data rates */
+#define IEEE80211_VHTCAP_SET_RX_LGIMAXRATE(vhtcap, _m) \
+	(vhtcap)->vht_mcs_nss_set[2] = ((_m) & 0x00FF); \
+	(vhtcap)->vht_mcs_nss_set[3] = (((_m) & 0x1F00) >> 8)
+
+/* B32-B47 TX VHT-MCS MAP for Spatial streams 1-8 */
+#define IEEE80211_VHTCAP_SET_TX_MCS_NSS(vhtcap, _m) \
+	(vhtcap)->vht_mcs_nss_set[5] = (((_m) & 0xFF00) >> 8); \
+	(vhtcap)->vht_mcs_nss_set[4] = ((_m) & 0x00FF)
+
+/* B48-B60 TX Highest supported Long GI data rates */
+#define IEEE80211_VHTCAP_SET_TX_LGIMAXRATE(vhtcap, _m) \
+	(vhtcap)->vht_mcs_nss_set[6] = ((_m) & 0x00FF); \
+	(vhtcap)->vht_mcs_nss_set[7] = (((_m) & 0x1F00) >> 8)
+
+/* VHT MCS MAP */
+#define	IEEE80211_VHTMCS_ALL_DISABLE	(0xFFFF)
+
+/* VHT capabilities options */
+/* Defined in _ieee80211.h file */
+/*
+ * 802.11ac VHT Operation element
+ */
+struct ieee80211_ie_vhtop {
+	u_int8_t	vhtop_id;		/* element ID */
+	u_int8_t	vhtop_len;		/* length in bytes */
+	u_int8_t	vhtop_info[3];		/* VHT Operation info */
+	u_int8_t	vhtop_bvhtmcs[2];	/* basic VHT MSC and NSS set */
+} __packed;
+
+/* VHT Operation Information */
+/* Channel width Octet 1 */
+#define IEEE80211_VHTOP_SET_CHANWIDTH(vhtop, _m) \
+	(vhtop)->vhtop_info[0] = (_m)
+
+/* Channel Center Frequency Segment 0 */
+#define IEEE80211_VHTOP_SET_CENTERFREQ0(vhtop, _m) \
+	(vhtop)->vhtop_info[1] = (_m)
+
+/* Channel Center Frequency Segment 1 */
+#define IEEE80211_VHTOP_SET_CENTERFREQ1(vhtop, _m) \
+	(vhtop)->vhtop_info[2] = (_m)
+
+/* Basic VHT-MCS and NSS Set  */
+#define IEEE80211_VHTOP_SET_BASIC_MCS_NSS(vhtop, _m) \
+	(vhtop)->vhtop_bvhtmcs[0] = ((_m) & 0xFF00) >> 8; \
+	(vhtop)->vhtop_bvhtmcs[1] = ((_m) & 0x00FF)
+
+/* Get macros */
+/* Channel width Octet 1 */
+#define IEEE80211_VHTOP_GET_CHANWIDTH(vhtop) \
+	(vhtop)->vhtop_info[0]
+
+/* Channel Center Frequency Segment 0 */
+#define IEEE80211_VHTOP_GET_CENTERFREQ0(vhtop) \
+	(vhtop)->vhtop_info[1]
+
+/* Channel Center Frequency Segment 1 */
+#define IEEE80211_VHTOP_GET_CENTERFREQ1(vhtop) \
+	(vhtop)->vhtop_info[2]
+
+/* Basic VHT-MCS and NSS Set  */
+#define IEEE80211_VHTOP_GET_BASIC_MCS_NSS(vhtop) \
+	(((vhtop)->vhtop_bvhtmcs[0] << 8) | \
+	((vhtop)->vhtop_bvhtmcs[1]))
+
+/*
+ * 802.11ac VHT Operating mode notification element
+ */
+struct ieee80211_ie_vhtop_notif {
+	uint8_t	id;
+	uint8_t	len;
+	uint8_t	vhtop_notif_mode;
+} __packed;
+
+/*
+ * 802.11ac Extended BSS Load element
+ */
+struct ieee80211_ie_ebssload {
+	u_int8_t	ebl_id;			/* element ID */
+	u_int8_t	ebl_len;		/* length in bytes */
+	u_int8_t	ebl_mumimo_cnt[2];	/* MU-MIMO Capable station count */
+	u_int8_t	ebl_ss_underuse;	/* Spatial Stream Underutilization */
+	u_int8_t	ebl_20mhz_use;		/* Observable Secondary 20Mhz use */
+	u_int8_t	ebl_40mhz_use;		/* Observable Secondary 40Mhz use */
+	u_int8_t	ebl_80mhz_use;		/* Observable Secondary 80Mhz use */
+} __packed;
+
+
+/*
+ * 802.11ac Wide Bandwidth Channel Switch element
+ */
+struct ieee80211_ie_wbchansw {
+	u_int8_t	wbcs_id;		/* element ID */
+	u_int8_t	wbcs_len;		/* length in bytes */
+	u_int8_t	wbcs_newchanw;		/* New Channel Width */
+	u_int8_t	wbcs_newchancf0;	/* New Channel Center Freq 0 */
+	u_int8_t	wbcs_newchancf1;	/* New Channel Center Freq 1 */
+} __packed;
+
+
+/*
+ * 802.11ac VHT Transmit Power Envelope element
+ */
+enum {
+	IEEE80211_TX_POW_FOR_20MHZ,
+	IEEE80211_TX_POW_FOR_40MHZ,
+	IEEE80211_TX_POW_FOR_80MHZ,
+	IEEE80211_TX_POW_FOR_160MHZ
+};
+
+struct ieee80211_ie_vtxpwren {
+	u_int8_t	vtxpwren_id;		/* element ID */
+	u_int8_t	vtxpwren_len;		/* length in byte */
+	u_int8_t	vtxpwren_txpwr_info;	/* tx power info */
+	u_int8_t	vtxpwren_tp20;		/* local max tx power for 20Mhz */
+	u_int8_t	vtxpwren_tp40;		/* local max tx power for 40Mhz */
+	u_int8_t	vtxpwren_tp80;		/* local max tx power for 80Mhz */
+	u_int8_t	vtxpwren_tp160;		/* local max tx power for 160Mhz */
+} __packed;
+
+/*
+ * 802.11ac Channel Switch Wrapper element
+ */
+struct ieee80211_ie_chsw_wrapper {
+	u_int8_t			chsw_id;		/* element ID */
+	u_int8_t			chsw_len;		/* length in byte */
+} __packed;
+
+/*
+ * 802.11ac AID element
+ */
+struct ieee80211_ie_aid {
+	u_int8_t	aid_id;		/* element ID */
+	u_int8_t	aid_len;	/* length in byte */
+	u_int16_t	aid;		/* aid */
+} __packed;
+
+/*
+ * 802.11ac Quiet Channel element
+ */
+struct ieee80211_ie_quietchan {
+	u_int8_t	qc_id;		/* element ID */
+	u_int8_t	qc_len;		/* length in byte */
+	u_int8_t	qc_qmode;	/* AP Quite Mode */
+	u_int8_t	qc_qcnt;	/* AP Quite Count */
+	u_int8_t	qc_qperiod;	/* AP Quite Period */
+	u_int8_t	qc_qduration;	/* AP Quite Duration */
+	u_int8_t	qc_qoffset;	/* AP Quite Offset */
+} __packed;
+
+
+/*
+ * 802.11ac Operating Mode Notification element
+ */
+struct ieee80211_ie_opmodenotice {
+	u_int8_t	omn_id;		/* element ID */
+	u_int8_t	omn_len;	/* length in byte */
+	u_int8_t	opn_opmode;	/* Op Mode */
+} __packed;
+
+enum {
+	IEEE80211_TIMEOUT_REASSOC_DEADLINE		= 1,
+	IEEE80211_TIMEOUT_KEY_LIFETIME			= 2,
+	IEEE80211_TIMEOUT_ASSOC_COMEBACK		= 3,
+};
+
+#define IEEE80211_W_ASSOC_COMEBACK_TO		1000
+
+/*
+ * 802.11w timeout information IE
+ */
+struct ieee80211_timout_int_ie {
+	u_int8_t	timout_int_ie;			/* IEEE80211_ELEMID_TIMEOUT_INT */
+	u_int8_t	timout_int_len;
+	u_int8_t	timout_int_type;		/* Timeout Interval Type */
+	u_int32_t	timout_int_value;		/* in tus */
+} __packed;
+
+struct ieee80211_ie_brcm_vht {
+	uint8_t id;
+	uint8_t len;
+	uint8_t brcm_vht_oui[3];
+	uint16_t brcm_vht_type;
+	uint8_t vht_ies[0];
+}__packed;
+
+/*
+ * Add the Quantenna OUI to a frame
+ */
+uint8_t ieee80211_oui_add_qtn(uint8_t *oui);
+
+/*
+ * AUTH management packets
+ *
+ *	octet algo[2]
+ *	octet seq[2]
+ *	octet status[2]
+ *	octet chal.id
+ *	octet chal.length
+ *	octet chal.text[253]
+ */
+
+typedef uint8_t *ieee80211_mgt_auth_t;
+
+#define	IEEE80211_AUTH_ALGORITHM(auth) \
+	((auth)[0] | ((auth)[1] << 8))
+#define	IEEE80211_AUTH_TRANSACTION(auth) \
+	((auth)[2] | ((auth)[3] << 8))
+#define	IEEE80211_AUTH_STATUS(auth) \
+	((auth)[4] | ((auth)[5] << 8))
+
+#define	IEEE80211_AUTH_ALG_OPEN		0x0000
+#define	IEEE80211_AUTH_ALG_SHARED	0x0001
+#define	IEEE80211_AUTH_ALG_FT		0x0002
+#define	IEEE80211_AUTH_ALG_LEAP		0x0080
+
+enum {
+	IEEE80211_AUTH_OPEN_REQUEST		= 1,
+	IEEE80211_AUTH_OPEN_RESPONSE		= 2,
+};
+
+enum {
+	IEEE80211_AUTH_SHARED_REQUEST		= 1,
+	IEEE80211_AUTH_SHARED_CHALLENGE		= 2,
+	IEEE80211_AUTH_SHARED_RESPONSE		= 3,
+	IEEE80211_AUTH_SHARED_PASS		= 4,
+};
+
+#define	IEEE80211_AUTH_FT	5
+
+/*
+ * Reason codes
+ *
+ * Unlisted codes are reserved
+ */
+
+enum {
+	IEEE80211_REASON_UNSPECIFIED			= 1,
+	IEEE80211_REASON_AUTH_EXPIRE			= 2,
+	IEEE80211_REASON_AUTH_LEAVE			= 3,
+	IEEE80211_REASON_ASSOC_EXPIRE			= 4,
+	IEEE80211_REASON_ASSOC_TOOMANY			= 5,
+	IEEE80211_REASON_NOT_AUTHED			= 6,
+	IEEE80211_REASON_NOT_ASSOCED			= 7,
+	IEEE80211_REASON_ASSOC_LEAVE			= 8,
+	IEEE80211_REASON_ASSOC_NOT_AUTHED		= 9,
+	IEEE80211_REASON_DISASSOC_BAD_POWER		= 10,
+	IEEE80211_REASON_DISASSOC_BAD_SUPP_CHAN		= 11,
+	IEEE80211_REASON_IE_INVALID			= 13,
+	IEEE80211_REASON_MIC_FAILURE			= 14,
+	IEEE80211_REASON_4WAY_HANDSHAKE_TIMEOUT		= 15,
+	IEEE80211_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT	= 16,
+	IEEE80211_REASON_IE_DIFFERENT			= 17,
+	IEEE80211_REASON_INVALID_GROUP_CIPHER		= 18,
+	IEEE80211_REASON_INVALID_PAIRWISE_CIPHER	= 19,
+	IEEE80211_REASON_INVALID_AKMP			= 20,
+	IEEE80211_REASON_UNSUPP_RSN_VERSION		= 21,
+	IEEE80211_REASON_INVALID_RSN_IE_CAP		= 22,
+	IEEE80211_REASON_IEEE8021X_FAILED		= 23,
+	IEEE80211_REASON_CIPHER_SUITE_REJECTED		= 24,
+	IEEE80211_REASON_TDLS_UNREACH			= 25, /* TDLS teardown due to peer unreachable */
+	IEEE80211_REASON_TDLS_UNSPEC			= 26, /* TDLS teardown for unspecified reason */
+	IEEE80211_REASON_DISASSOC_UNSPECIFIED_QOS	= 32,
+	IEEE80211_REASON_DISASSOC_QOS_AP_NO_BANDWIDTH	= 33,
+	IEEE80211_REASON_DISASSOC_LOW_ACK		= 34,
+	IEEE80211_REASON_DISASSOC_STA_EXCEED_TXOP	= 35,
+	IEEE80211_REASON_STA_LEAVE_BSS			= 36,
+	IEEE80211_REASON_STA_NOT_USE			= 37,
+	IEEE80211_REASON_STA_REQUIRE_SETUP		= 38,
+	IEEE80211_REASON_STA_TIMEOUT			= 39,
+	IEEE80211_REASON_STA_CIPHER_NOT_SUPP		= 45,
+
+
+	IEEE80211_STATUS_SUCCESS		= 0,
+	IEEE80211_STATUS_UNSPECIFIED		= 1,
+	IEEE80211_STATUS_TDLS_WKUP_REJ_ALT	= 2,  /* Wakeup sched rejected/alternative */
+	IEEE80211_STATUS_TDLS_WKUP_REJ		= 3,  /* Wakeup sched rejected */
+	IEEE80211_STATUS_SEC_DIS		= 5,  /* Security disabled */
+	IEEE80211_STATUS_LIFETIME_NOTOK		= 6,  /* Unacceptable lifetime */
+	IEEE80211_STATUS_BSS_INVALID		= 7,  /* Not in same BSS */
+	IEEE80211_STATUS_CAPINFO		= 10,
+	IEEE80211_STATUS_NOT_ASSOCED		= 11,
+	IEEE80211_STATUS_OTHER			= 12,
+	IEEE80211_STATUS_ALG			= 13,
+	IEEE80211_STATUS_SEQUENCE		= 14,
+	IEEE80211_STATUS_CHALLENGE		= 15,
+	IEEE80211_STATUS_TIMEOUT		= 16,
+	IEEE80211_STATUS_TOOMANY		= 17,
+	IEEE80211_STATUS_BASIC_RATE		= 18,
+	IEEE80211_STATUS_SP_REQUIRED		= 19,
+	IEEE80211_STATUS_PBCC_REQUIRED		= 20,
+	IEEE80211_STATUS_CA_REQUIRED		= 21,
+	IEEE80211_STATUS_TOO_MANY_STATIONS	= 22,
+	IEEE80211_STATUS_RATES			= 23,
+	IEEE80211_STATUS_SHORTSLOT_REQUIRED	= 25,
+	IEEE80211_STATUS_DSSSOFDM_REQUIRED	= 26,
+	IEEE80211_STATUS_HT_FEATURE		= 27,
+	IEEE80211_STATUS_PMF_REJECT_RETRY		= 30,
+	IEEE80211_STATUS_PMF_VIOLATION		= 31,
+	IEEE80211_STATUS_PEER_MECHANISM_REJECT	= 37,
+	IEEE80211_STATUS_TDLS_RSNIE_INVALID	= 72, /* Invalid contents of RSNIE */
+
+	/* Quantenna */
+	IEEE80211_STATUS_DENIED			= 100,
+};
+
+#define	IEEE80211_WEP_KEYLEN		5	/* 40bit */
+#define	IEEE80211_WEP_IVLEN		3	/* 24bit */
+#define	IEEE80211_WEP_KIDLEN		1	/* 1 octet */
+#define	IEEE80211_WEP_CRCLEN		4	/* CRC-32 */
+#define	IEEE80211_WEP_NKID		4	/* number of key ids */
+
+/*
+ * 802.11i defines an extended IV for use with non-WEP ciphers.
+ * When the EXTIV bit is set in the key id byte an additional
+ * 4 bytes immediately follow the IV for TKIP.  For CCMP the
+ * EXTIV bit is likewise set but the 8 bytes represent the
+ * CCMP header rather than IV+extended-IV.
+ */
+#define	IEEE80211_WEP_EXTIV		0x20
+#define	IEEE80211_WEP_EXTIVLEN		4	/* extended IV length */
+#define	IEEE80211_WEP_CCMPLEN		8	/* CCMP header */
+#define	IEEE80211_WEP_MICLEN		8	/* trailing MIC */
+#define	IEEE80211_WEP_ICVLEN		4	/* ICV */
+
+#define	IEEE80211_CRC_LEN		4
+#define IEEE80211_MAX_IE_LEN		257
+
+/*
+ * Maximum acceptable MTU is:
+ *	IEEE80211_MAX_LEN - WEP overhead - CRC -
+ *		QoS overhead - RSN/WPA overhead
+ * Min is arbitrarily chosen > IEEE80211_MIN_LEN.  The default
+ * mtu is Ethernet-compatible; it's set by ether_ifattach.
+ */
+#define	IEEE80211_MTU_MAX		3500
+#define	IEEE80211_MTU_MIN		32
+
+#define	IEEE80211_MAX_LEN		(2300 + IEEE80211_CRC_LEN + \
+	(IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_CRCLEN))
+#define	IEEE80211_ACK_LEN \
+	(sizeof(struct ieee80211_frame_ack) + IEEE80211_CRC_LEN)
+#define	IEEE80211_MIN_LEN \
+	(sizeof(struct ieee80211_frame_min) + IEEE80211_CRC_LEN)
+
+/*
+ * RTS frame length parameters.  The default is specified in
+ * the 802.11 spec.  The max may be wrong for jumbo frames.
+ */
+#define	IEEE80211_RTS_DEFAULT		512
+#define	IEEE80211_RTS_MIN		0
+#define	IEEE80211_RTS_MAX		65536
+#define	IEEE80211_RTS_THRESH_OFF	(IEEE80211_RTS_MAX + 1)
+
+/*
+ * Regulatory extension identifier for country IE.
+ */
+#define IEEE80211_REG_EXT_ID		201
+
+/*
+ * IEEE 802.11 timer synchronization function (TSF) timestamp length
+ */
+#define IEEE80211_TSF_LEN		8
+/*
+ * 802.11n defines
+ */
+#define IEEE80211_11N_BAWLEN		64
+#define IEEE80211_11N_QLENLIM		(64*1024)
+
+#define IEEE80211_11N_SEQINORDER_BAW(seq_front, seq_back)       \
+        IEEE80211_SEQ_INORDER_LAG((seq_front), (seq_back), IEEE80211_11N_BAWLEN)
+
+struct wmm_params {
+	uint8_t wmm_acm;		/* ACM parameter */
+	uint8_t wmm_aifsn;		/* AIFSN parameters */
+	uint8_t wmm_logcwmin;		/* cwmin in exponential form */
+	uint8_t wmm_logcwmax;		/* cwmax in exponential form */
+	uint16_t wmm_txopLimit;		/* txopLimit */
+	uint8_t wmm_noackPolicy;	/* No-Ack Policy: 0=ack, 1=no-ack */
+};
+
+#define IEEE80211_DEFAULT_BA_WINSIZE	64	/* use for explicit BA establishing, size and throughput is moderate */
+#define IEEE80211_DEFAULT_BA_WINSIZE_H	256	/* use for implicit BA, large size to support large aggregates and high throughput */
+#define IEEE80211_MAX_BA_WINSIZE	0x3FF
+
+/*value assignment for the little-endian*/
+#define	ADDINT16LE(frm, v) do {			\
+	frm[0] = (v) & 0xff;				\
+	frm[1] = ((v) >> 8) & 0xff;			\
+	frm += 2;							\
+} while (0)
+/* 32 bits to 32 bits */
+#define	ADDINT32LE(frm, v) do {			\
+	frm[0] = (v) & 0xff;				\
+	frm[1] = ((v) >> 8) & 0xff;			\
+	frm[2] = ((v) >> 16) & 0xff;		\
+	frm[3] = ((v) >> 24) & 0xff;		\
+	frm += 4;							\
+} while (0)
+
+
+/* value assignment */
+/* 16 bits to 16 bits */
+#define	ADDINT16(frm, v) do {			\
+	frm[1] = (v) & 0xff;				\
+	frm[0] = ((v) >> 8) & 0xff;			\
+	frm += 2;							\
+} while (0)
+/* 32 bits to 32 bits */
+#define	ADDINT32(frm, v) do {			\
+	frm[3] = (v) & 0xff;				\
+	frm[2] = ((v) >> 8) & 0xff;			\
+	frm[1] = ((v) >> 16) & 0xff;		\
+	frm[0] = ((v) >> 24) & 0xff;		\
+	frm += 4;							\
+} while (0)
+/* 8 bits to 32 bits */
+#define	ADDINT8TO32(frm, v) do {			\
+	frm[3] = (v) & 0xff;				\
+	frm[2] = 0;					\
+	frm[1] = 0;					\
+	frm[0] = 0;					\
+	frm += 4;							\
+} while (0)
+/* 16 bits to 32 bits */
+#define	ADDINT16TO32(frm, v) do {			\
+	frm[3] = (v) & 0xff;				\
+	frm[2] = ((v) >> 8) & 0xff;			\
+	frm[1] = 0;					\
+	frm[0] = 0;					\
+	frm += 4;							\
+} while (0)
+/* 32 bits to 64 bits */
+#define	ADDINT32TO64(frm, v) do {			\
+	frm[7] = (v) & 0xff;				\
+	frm[6] = ((v) >> 8) & 0xff;			\
+	frm[5] = ((v) >> 16) & 0xff;		\
+	frm[4] = ((v) >> 24) & 0xff;		\
+	frm[3] = 0;							\
+	frm[2] = 0;							\
+	frm[1] = 0;							\
+	frm[0] = 0;							\
+	frm += 8;							\
+} while (0)
+
+#define IEEE80211_IE_LEADER_STR_VHTCAP	"vhtcap_ie="
+#define IEEE80211_IE_LEADER_STR_HTCAP	"htcap_ie="
+#define IEEE80211_IE_LEADER_STR_RSN	"rsn_ie="
+#define IEEE80211_IE_LEADER_STR_WPA	"wpa_ie="
+#define IEEE80211_IE_LEADER_STR_WME	"wme_ie="
+#define IEEE80211_IE_LEADER_STR_ATH	"ath_ie="
+#define IEEE80211_IE_LEADER_STR_EXT_ROLE	"qtn_extender_role="
+
+#ifndef DSP_BUILD
+static __inline__ int ieee80211_is_bcst(const void *p)
+{
+	const uint16_t *p16 = p;
+
+	return (p16[0] == 0xFFFF) && (p16[1] == 0xFFFF) && (p16[2] == 0xFFFF);
+}
+
+/*
+ * IEEE802.11w spec - Table 8-38 and section 11.1.7
+ */
+static __inline__ int ieee80211_mgmt_is_robust(const struct ieee80211_frame *wh) {
+
+	int is_robust_mgmt = 0;
+	const uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+
+	switch (subtype){
+		case IEEE80211_FC0_SUBTYPE_DEAUTH:
+		case IEEE80211_FC0_SUBTYPE_DISASSOC:
+			is_robust_mgmt = 1;
+			break;
+		case IEEE80211_FC0_SUBTYPE_ACTION:
+		{
+			const struct ieee80211_action *ia;
+			ia = (const struct ieee80211_action *) (const void*)&wh[1];
+
+			switch (ia->ia_category) {
+				case IEEE80211_ACTION_CAT_SPEC_MGMT:
+				case IEEE80211_ACTION_CAT_QOS:
+				case IEEE80211_ACTION_CAT_DLS:
+				case IEEE80211_ACTION_CAT_BA:
+				case IEEE80211_ACTION_CAT_RM:
+				case IEEE80211_ACTION_CAT_FBSS:
+				case IEEE80211_ACTION_CAT_SA_QUERY:
+				case IEEE80211_ACTION_CAT_PROT_DUAL_PA:
+				case IEEE80211_ACTION_CAT_WNM:
+				case IEEE80211_ACTION_CAT_MESH:
+				case IEEE80211_ACTION_CAT_MULTIHOP:
+				case IEEE80211_ACTION_CAT_VEND_PROT:
+					is_robust_mgmt = 1;
+					break;
+				default:
+					is_robust_mgmt = 0;
+					break;
+			}
+			break;
+		}
+		default:
+			break;
+
+	}
+
+	return is_robust_mgmt;
+}
+#endif
+
+/* QTN Extender */
+#define	IEEE80211_QTN_WDS_MASK		0x0003
+#define	IEEE80211_QTN_EXTDR_ALLMASK	0xFFFF
+#define	IEEE80211_QTN_EXTDR_MASK_SHIFT	16
+
+#define	IEEE80211_QTN_WDS_ONLY		0x0000	/* 0 = Plain WDS; No WDS Extender */
+#define	IEEE80211_QTN_WDS_MBS		0x0001	/* 1 = MBS-Master Base Station */
+#define	IEEE80211_QTN_WDS_RBS		0x0002	/* 2 = RBS-Repeater/Remote Base Station */
+
+static __inline__ uint32_t ieee80211_extdr_combinate(uint16_t flags, uint16_t mask)
+{
+	return (mask << IEEE80211_QTN_EXTDR_MASK_SHIFT) | flags;
+}
+
+#define IEEE80211_N_RATE_PREFIX 0x7F000000
+#define IEEE80211_AC_RATE_PREFIX 0x7E000000
+#define IEEE80211_RATE_PREFIX_MASK 0xFF000000
+
+#define IEEE80211U_PARAM_IPV4ADDRTYPE_MIN	0
+#define IEEE80211U_PARAM_IPV4ADDRTYPE_MAX	7
+#define IEEE80211U_PARAM_IPV6ADDRTYPE_MIN	0
+#define IEEE80211U_PARAM_IPV6ADDRTYPE_MAX	2
+#define IEEE80211U_PARAM_IP_STATUS_MAX		2
+/* MU MIMO */
+#define IEEE80211_MU_GRP_VALID(_grp)		\
+	(((_grp) > 0) && ((_grp) < (IEEE80211_VHT_GRP_MAX_BIT_OFFSET+1)))
+
+#define IEEE80211_MU_POS_VALID(_pos) ((_pos) < 4)
+
+#define IEEE80211_MU_DEL_GRP(mu_grp, _grp) do {		\
+	(mu_grp).member[(_grp) >> 3] &= ~(1 << ((_grp) & 0x7)); \
+} while (0)
+
+#define IEEE80211_MU_ADD_GRP(mu_grp, _grp, _pos) do {	\
+	(mu_grp).member[(_grp) >> 3] |= (1 << ((_grp) & 0x7)); \
+	(mu_grp).pos[(_grp) >> 2] &= ~((0x03 << (((_grp) & 0x3) << 1))); \
+	(mu_grp).pos[(_grp) >> 2] |= (((_pos) << (((_grp) & 0x3) << 1))); \
+} while (0)
+
+#define IEEE80211_MU_IS_GRP_MBR(mu_grp, _grp)	\
+	((mu_grp).member[(_grp) >> 3] & (1 << ((_grp) & 0x7)))
+
+#define IEEE80211_MU_GRP_POS(mu_grp, _grp)	\
+	(((mu_grp).pos[(_grp) >> 2] >> (((_grp) & 0x3) << 1)) & 0x3)
+
+#define	IEEE80211_VAP_STATE_DISABLED			0
+#define	IEEE80211_VAP_STATE_ENABLED			1
+
+#define IEEE80211_MIN_BSS_GROUP	1 /* 0 - Default internal group. 1 - 31 User configurable group id */
+#define IEEE80211_MAX_BSS_GROUP	32
+
+#endif /* _NET80211_IEEE80211_H_ */
+
diff --git a/drivers/qtn/include/shared/net80211/ieee80211_crypto.h b/drivers/qtn/include/shared/net80211/ieee80211_crypto.h
new file mode 100644
index 0000000..8b176fc
--- /dev/null
+++ b/drivers/qtn/include/shared/net80211/ieee80211_crypto.h
@@ -0,0 +1,207 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_crypto.h 1441 2006-02-06 16:03:21Z mrenzmann $
+ */
+#ifndef _NET80211_IEEE80211_CRYPTO_H_
+#define _NET80211_IEEE80211_CRYPTO_H_
+
+/*
+ * 802.11 protocol crypto-related definitions.
+ */
+#define	IEEE80211_KEYBUF_SIZE	16
+#define	IEEE80211_MICBUF_SIZE	(8 + 8)		/* space for both tx+rx keys */
+#define IEEE80211_TID_SIZE	17		/* total number of TIDs */
+
+/*
+ * Old WEP-style key.  Deprecated.
+ */
+struct ieee80211_wepkey {
+	u_int wk_len;				/* key length in bytes */
+	u_int8_t wk_key[IEEE80211_KEYBUF_SIZE];
+};
+
+struct ieee80211_cipher;
+
+/*
+ * Crypto key state.  There is sufficient room for all supported
+ * ciphers (see below).  The underlying ciphers are handled
+ * separately through loadable cipher modules that register with
+ * the generic crypto support.  A key has a reference to an instance
+ * of the cipher; any per-key state is hung off wk_private by the
+ * cipher when it is attached.  Ciphers are automatically called
+ * to detach and cleanup any such state when the key is deleted.
+ *
+ * The generic crypto support handles encap/decap of cipher-related
+ * frame contents for both hardware- and software-based implementations.
+ * A key requiring software crypto support is automatically flagged and
+ * the cipher is expected to honor this and do the necessary work.
+ * Ciphers such as TKIP may also support mixed hardware/software
+ * encrypt/decrypt and MIC processing.
+ *
+ * Note: This definition must be the same as qtn_key.
+ */
+struct ieee80211_key {
+	u_int8_t wk_keylen;		/* key length in bytes */
+	u_int8_t wk_flags;
+#define	IEEE80211_KEY_XMIT	0x01	/* key used for xmit */
+#define	IEEE80211_KEY_RECV	0x02	/* key used for recv */
+#define	IEEE80211_KEY_GROUP	0x04	/* key used for WPA group operation */
+#define	IEEE80211_KEY_SWCRYPT	0x10	/* host-based encrypt/decrypt */
+#define	IEEE80211_KEY_SWMIC	0x20	/* host-based enmic/demic */
+#define IEEE80211_KEY_VLANGROUP	0x40	/* VLAN group key */
+	u_int16_t wk_keyix;		/* key index */
+	u_int8_t wk_key[IEEE80211_KEYBUF_SIZE+IEEE80211_MICBUF_SIZE];
+#define	wk_txmic	wk_key + IEEE80211_KEYBUF_SIZE + 0
+#define	wk_rxmic	wk_key + IEEE80211_KEYBUF_SIZE + 8
+	u_int64_t wk_keyrsc[IEEE80211_TID_SIZE];	/* key receive sequence counter */
+	u_int64_t wk_keytsc;				/* key transmit sequence counter */
+	u_int32_t wk_ciphertype;
+	const struct ieee80211_cipher *wk_cipher;
+	void *wk_private;				/* private cipher state */
+};
+#define	IEEE80211_KEY_COMMON				/* common flags passed in by apps */\
+	(IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV | IEEE80211_KEY_GROUP)
+
+/*
+ * NB: these values are ordered carefully; there are lots of
+ * of implications in any reordering.  In particular beware
+ * that 4 is not used to avoid conflicting with IEEE80211_F_PRIVACY.
+ */
+#define	IEEE80211_CIPHER_WEP		0
+#define	IEEE80211_CIPHER_TKIP		1
+#define	IEEE80211_CIPHER_AES_OCB	2
+#define	IEEE80211_CIPHER_AES_CCM	3
+#define IEEE80211_CIPHER_AES_CMAC	4
+#define	IEEE80211_CIPHER_CKIP		5
+#define	IEEE80211_CIPHER_NONE		6	/* pseudo value */
+
+#define	IEEE80211_CIPHER_MAX		(IEEE80211_CIPHER_NONE+1)
+
+#define	IEEE80211_KEYIX_NONE		((u_int8_t) - 1)
+
+#if defined(__KERNEL__) || defined(_KERNEL)
+
+struct ieee80211com;
+struct ieee80211vap;
+struct ieee80211_node;
+struct sk_buff;
+
+void ieee80211_crypto_attach(struct ieee80211com *);
+void ieee80211_crypto_detach(struct ieee80211com *);
+void ieee80211_crypto_vattach(struct ieee80211vap *);
+void ieee80211_crypto_vdetach(struct ieee80211vap *);
+int ieee80211_crypto_newkey(struct ieee80211vap *, int, int,
+	struct ieee80211_key *);
+int ieee80211_crypto_delkey(struct ieee80211vap *, struct ieee80211_key *,
+	struct ieee80211_node *);
+int ieee80211_crypto_setkey(struct ieee80211vap *, struct ieee80211_key *,
+	const u_int8_t macaddr[IEEE80211_ADDR_LEN], struct ieee80211_node *);
+void ieee80211_crypto_delglobalkeys(struct ieee80211vap *);
+
+/*
+ * Template for a supported cipher.  Ciphers register with the
+ * crypto code and are typically loaded as separate modules
+ * (the null cipher is always present).
+ * XXX may need refcnts
+ */
+struct ieee80211_cipher {
+	const char *ic_name;		/* printable name */
+	u_int ic_cipher;			/* IEEE80211_CIPHER_* */
+	u_int ic_header;			/* size of privacy header (bytes) */
+	u_int ic_trailer;		/* size of privacy trailer (bytes) */
+	u_int ic_miclen;			/* size of mic trailer (bytes) */
+	void *(*ic_attach)(struct ieee80211vap *, struct ieee80211_key *);
+	void (*ic_detach)(struct ieee80211_key *);
+	int (*ic_setkey)(struct ieee80211_key *);
+	int (*ic_encap)(struct ieee80211_key *, struct sk_buff *, u_int8_t);
+	int (*ic_decap)(struct ieee80211_key *, struct sk_buff *, int);
+	int (*ic_enmic)(struct ieee80211_key *, struct sk_buff *, int);
+	int (*ic_demic)(struct ieee80211_key *, struct sk_buff *, int);
+};
+extern const struct ieee80211_cipher ieee80211_cipher_none;
+
+void ieee80211_crypto_register(const struct ieee80211_cipher *);
+void ieee80211_crypto_unregister(const struct ieee80211_cipher *);
+int ieee80211_crypto_available(u_int);
+
+struct ieee80211_key *ieee80211_crypto_encap(struct ieee80211_node *,
+	struct sk_buff *);
+struct ieee80211_key *ieee80211_crypto_decap(struct ieee80211_node *,
+	struct sk_buff *, int);
+
+/*
+ * Check and remove any MIC.
+ */
+static __inline int
+ieee80211_crypto_demic(struct ieee80211vap *vap, struct ieee80211_key *k,
+	struct sk_buff *skb, int hdrlen)
+{
+	const struct ieee80211_cipher *cip = k->wk_cipher;
+	return (cip->ic_miclen > 0 ? cip->ic_demic(k, skb, hdrlen) : 1);
+}
+
+/*
+ * Add any MIC.
+ */
+static __inline int
+ieee80211_crypto_enmic(struct ieee80211vap *vap, struct ieee80211_key *k,
+	struct sk_buff *skb, int force)
+{
+	const struct ieee80211_cipher *cip = k->wk_cipher;
+	return (cip->ic_miclen > 0 ? cip->ic_enmic(k, skb, force) : 1);
+}
+
+/* 
+ * Reset key state to an unused state.  The crypto
+ * key allocation mechanism ensures other state (e.g.
+ * key data) is properly setup before a key is used.
+ */
+static __inline void
+ieee80211_crypto_resetkey(struct ieee80211vap *vap, struct ieee80211_key *k,
+	u_int16_t ix)
+{
+	k->wk_cipher = &ieee80211_cipher_none;;
+	k->wk_private = k->wk_cipher->ic_attach(vap, k);
+	k->wk_keyix = ix;
+	k->wk_flags = IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV;
+}
+
+/*
+ * Crypto-related notification methods.
+ */
+void ieee80211_notify_replay_failure(struct ieee80211vap *,
+	const struct ieee80211_frame *, const struct ieee80211_key *,
+	u_int64_t rsc);
+void ieee80211_notify_michael_failure(struct ieee80211vap *,
+	const struct ieee80211_frame *, u_int keyix);
+#endif /* defined(__KERNEL__) || defined(_KERNEL) */
+#endif /* _NET80211_IEEE80211_CRYPTO_H_ */
diff --git a/drivers/qtn/include/shared/net80211/ieee80211_dfs_reentry.h b/drivers/qtn/include/shared/net80211/ieee80211_dfs_reentry.h
new file mode 100644
index 0000000..9bb854a
--- /dev/null
+++ b/drivers/qtn/include/shared/net80211/ieee80211_dfs_reentry.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * Common DFS re-entry definitions.
+ */
+#ifndef _IEEE80211_DFS_REENTRY_H
+#define _IEEE80211_DFS_REENTRY_H
+
+/*
+ *  DFS-reentry
+ */
+#define IEEE80211_PICK_DOMIAN_MASK	0x0007
+#define IEEE80211_PICK_ALL		0x0001		/* pick channel from all available channels */
+#define IEEE80211_PICK_DFS		0x0002		/* pick channel from available DFS channel */
+#define IEEE80211_PICK_NONDFS		0x0004		/* pick channel from available non-DFS channel */
+
+#define IEEE80211_PICK_CONTROL_MASK		0x00F8
+#define IEEE80211_PICK_SCAN_FLUSH		0x0008
+#define IEEE80211_PICK_BG_ACTIVE		0x0010
+#define IEEE80211_PICK_BG_PASSIVE_FAST		0x0020
+#define IEEE80211_PICK_BG_PASSIVE_NORMAL	0x0040
+#define IEEE80211_PICK_BG_PASSIVE_SLOW		0x0080
+#define IEEE80211_PICK_BG_MODE_MASK		0x00F0
+
+#define IEEE80211_PICK_ALGORITHM_MASK	0xFF00
+#define IEEE80211_PICK_CLEAREST		0x0100		/* pick clearest channel */
+#define IEEE80211_PICK_REENTRY		0x0200		/* pick channel again after DFS process */
+#define IEEE80211_PICK_NOPICK		0x0400		/* do not pick channel */
+#define IEEE80211_PICK_NOPICK_BG	0x0800		/* scan background and do not pick channel */
+#define IEEE80211_PICK_DEFAULT		(IEEE80211_PICK_ALL | IEEE80211_PICK_CLEAREST)
+
+#define IEEE80211_SCS_PICK_DFS_ONLY			0x1/* Pick channels from DFS set only*/
+#define IEEE80211_SCS_PICK_NON_DFS_ONLY			0x2/* Pick channels from Non-DFS set only*/
+#define IEEE80211_SCS_PICK_AVAILABLE_DFS_ONLY		0x4/* Pick channels from available DFS set*/
+#define IEEE80211_SCS_PICK_AVAILABLE_ANY_CHANNEL	0x8/* Pick channels from available DFS and Non-DFS sets*/
+#define IEEE80211_SCS_PICK_ANYWAY			0x10/* Omit channel margins during channel pick*/
+#define IEEE80211_SCS_PICK_NOT_AVAILABLE_DFS_ONLY	0x20/* Pick channels from unavailable DFS set*/
+
+/* Prefer selecting DFS only channels for bootup CAC;
+ * Below flag must be used only while calling below APIs
+ * ieee80211_scan_pickchannel, scan_pickchan, ap_pick_channel
+ */
+#define IEEE80211_SCAN_PICK_NOT_AVAILABLE_DFS_ONLY		0x00110000
+
+/* Select channel from DFS and non-DFS sets which are available only*/
+/* All Non-DFS channels are available by default,
+ * DFS channels are available only after CAC-Completion events;
+ * Below flag must be used only while calling below APIs
+ * ieee80211_scan_pickchannel, scan_pickchan, ap_pick_channel
+ */
+#define IEEE80211_SCAN_PICK_AVAILABLE_ANY_CHANNEL		0x00120000
+
+/*
+ * Select any valid DFS channel from {CAC_REQUIRED, AVAILABLE} set;
+ * Below flag must be used only while calling below APIs
+ * ieee80211_scan_pickchannel, scan_pickchan, ap_pick_channel
+ */
+#define IEEE80211_SCAN_PICK_ANY_DFS				0x00130000
+
+#endif
diff --git a/drivers/qtn/include/shared/net80211/ieee80211_ioctl.h b/drivers/qtn/include/shared/net80211/ieee80211_ioctl.h
new file mode 100644
index 0000000..7c0a6f8
--- /dev/null
+++ b/drivers/qtn/include/shared/net80211/ieee80211_ioctl.h
@@ -0,0 +1,2171 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_ioctl.h 1856 2006-12-14 01:38:00Z scottr $
+ */
+#ifndef _NET80211_IEEE80211_IOCTL_H_
+#define _NET80211_IEEE80211_IOCTL_H_
+
+/*
+ * IEEE 802.11 ioctls.
+ */
+#include "net80211/_ieee80211.h"
+#include "net80211/ieee80211.h"
+#include "net80211/ieee80211_qos.h"
+#include "net80211/ieee80211_crypto.h"
+
+#pragma pack(4)
+/*
+ * Per-channel flags to differentiate chan_pri_inactive configuration
+ * between regulatory db and user configuration.
+ * By default, system uses static regulatory db configs.
+ * However driver shall always honour dynamic user coniguration.
+ * In this way, user configuration will override regulatory db configs.
+ */
+enum {
+	CHAN_PRI_INACTIVE_CFG_DATABASE = 0x1,
+	CHAN_PRI_INACTIVE_CFG_USER_OVERRIDE = 0x2,
+	CHAN_PRI_INACTIVE_CFG_AUTOCHAN_ONLY = 0x4,
+};
+
+/*
+ * Per/node (station) statistics available when operating as an AP.
+ */
+struct ieee80211_nodestats {
+	uint32_t ns_rx_data;		/* rx data frames */
+	uint32_t ns_rx_mgmt;		/* rx management frames */
+	uint32_t ns_rx_ctrl;		/* rx control frames */
+	uint32_t ns_rx_ucast;		/* rx unicast frames */
+	uint32_t ns_rx_mcast;		/* rx multicast frames */
+	uint32_t ns_rx_bcast;		/* rx broadcast frames */
+	uint64_t ns_rx_bytes;		/* rx data count (bytes) */
+	uint64_t ns_rx_beacons;		/* rx beacon frames */
+	uint32_t ns_rx_proberesp;	/* rx probe response frames */
+
+	uint32_t ns_rx_dup;		/* rx discard because it's a dup */
+	uint32_t ns_rx_noprivacy;	/* rx w/ wep but privacy off */
+	uint32_t ns_rx_wepfail;		/* rx wep processing failed */
+	uint32_t ns_rx_demicfail;	/* rx demic failed */
+	uint32_t ns_rx_decap;		/* rx decapsulation failed */
+	uint32_t ns_rx_defrag;		/* rx defragmentation failed */
+	uint32_t ns_rx_disassoc;	/* rx disassociation */
+	uint32_t ns_rx_deauth;		/* rx deauthentication */
+	uint32_t ns_rx_decryptcrc;	/* rx decrypt failed on crc */
+	uint32_t ns_rx_unauth;		/* rx on unauthorized port */
+	uint32_t ns_rx_unencrypted;	/* rx unecrypted w/ privacy */
+
+	uint32_t ns_tx_data;		/* tx data frames */
+	uint32_t ns_tx_mgmt;		/* tx management frames */
+	uint32_t ns_tx_ucast;		/* tx unicast frames */
+	uint32_t ns_tx_mcast;		/* tx multicast frames */
+	uint32_t ns_tx_bcast;		/* tx broadcast frames */
+	uint64_t ns_tx_bytes;		/* tx data count (bytes) */
+	uint32_t ns_tx_probereq;	/* tx probe request frames */
+	uint32_t ns_tx_uapsd;		/* tx on uapsd queue */
+
+	uint32_t ns_tx_novlantag;	/* tx discard due to no tag */
+	uint32_t ns_tx_vlanmismatch;	/* tx discard due to of bad tag */
+	uint32_t ns_tx_unauth;		/* rx on unauthorized port */
+
+	uint32_t ns_tx_eosplost;	/* uapsd EOSP retried out */
+
+	uint32_t ns_ps_discard;		/* ps discard due to of age */
+
+	uint32_t ns_uapsd_triggers;	/* uapsd triggers */
+
+	/* MIB-related state */
+	uint32_t ns_tx_assoc;		/* [re]associations */
+	uint32_t ns_tx_assoc_fail;	/* [re]association failures */
+	uint32_t ns_tx_auth;		/* [re]authentications */
+	uint32_t ns_tx_auth_fail;	/* [re]authentication failures*/
+	uint32_t ns_tx_deauth;		/* deauthentications */
+	uint32_t ns_tx_deauth_code;	/* last deauth reason */
+	uint32_t ns_tx_disassoc;	/* disassociations */
+	uint32_t ns_tx_disassoc_code;	/* last disassociation reason */
+	uint32_t ns_psq_drops;		/* power save queue drops */
+	uint32_t ns_rx_action;         /* rx action */
+	uint32_t ns_tx_action;
+	/*
+	 * Next few fields track the corresponding entry in struct net_device_stats,
+	 * but here for each associated node
+	 */
+	uint32_t ns_rx_errors;
+	uint32_t ns_tx_errors;
+	uint32_t ns_rx_dropped;
+	uint32_t ns_tx_dropped;
+	/*
+	 * The number of dropped data packets failed to transmit through
+	 * wireless media for each traffic category(TC).
+	 */
+	uint32_t ns_tx_wifi_drop[WME_AC_NUM];
+
+	uint32_t ns_ap_isolation_dropped;
+	uint32_t ns_rx_fragment_pkts;
+	uint32_t ns_rx_vlan_pkts;
+
+	uint32_t ns_rx_tdls_action;
+	uint32_t ns_tx_tdls_action;
+};
+
+/*
+ * Summary statistics.
+ */
+struct ieee80211_stats {
+	uint32_t is_rx_badversion;	/* rx frame with bad version */
+	uint32_t is_rx_tooshort;	/* rx frame too short */
+	uint32_t is_rx_wrongbss;	/* rx from wrong bssid */
+	uint32_t is_rx_dup;		/* rx discard due to it's a dup */
+	uint32_t is_rx_wrongdir;	/* rx w/ wrong direction */
+	uint32_t is_rx_mcastecho;	/* rx discard due to of mcast echo */
+	uint32_t is_rx_notassoc;	/* rx discard due to sta !assoc */
+	uint32_t is_rx_noprivacy;	/* rx w/ wep but privacy off */
+	uint32_t is_rx_unencrypted;	/* rx w/o wep and privacy on */
+	uint32_t is_rx_wepfail;		/* rx wep processing failed */
+	uint32_t is_rx_decap;		/* rx decapsulation failed */
+	uint32_t is_rx_mgtdiscard;	/* rx discard mgt frames */
+	uint32_t is_rx_ctl;		/* rx discard ctrl frames */
+	uint32_t is_rx_beacon;		/* rx beacon frames */
+	uint32_t is_rx_rstoobig;	/* rx rate set truncated */
+	uint32_t is_rx_elem_missing;	/* rx required element missing*/
+	uint32_t is_rx_elem_toobig;	/* rx element too big */
+	uint32_t is_rx_elem_toosmall;	/* rx element too small */
+	uint32_t is_rx_elem_unknown;	/* rx element unknown */
+	uint32_t is_rx_badchan;	/* rx frame w/ invalid chan */
+	uint32_t is_rx_chanmismatch;	/* rx frame chan mismatch */
+	uint32_t is_rx_nodealloc;	/* rx frame dropped */
+	uint32_t is_rx_ssidmismatch;	/* rx frame ssid mismatch  */
+	uint32_t is_rx_auth_unsupported;/* rx w/ unsupported auth alg */
+	uint32_t is_rx_auth_fail;	/* rx sta auth failure */
+	uint32_t is_rx_auth_countermeasures;/* rx auth discard due to CM */
+	uint32_t is_rx_assoc_bss;	/* rx assoc from wrong bssid */
+	uint32_t is_rx_assoc_notauth;	/* rx assoc w/o auth */
+	uint32_t is_rx_assoc_capmismatch;/* rx assoc w/ cap mismatch */
+	uint32_t is_rx_assoc_norate;	/* rx assoc w/ no rate match */
+	uint32_t is_rx_assoc_badwpaie;	/* rx assoc w/ bad WPA IE */
+	uint32_t is_rx_deauth;		/* rx deauthentication */
+	uint32_t is_rx_disassoc;	/* rx disassociation */
+	uint32_t is_rx_action;         /* rx action mgt */
+	uint32_t is_rx_badsubtype;	/* rx frame w/ unknown subtype*/
+	uint32_t is_rx_nobuf;		/* rx failed for lack of buf */
+	uint32_t is_rx_decryptcrc;	/* rx decrypt failed on crc */
+	uint32_t is_rx_ahdemo_mgt;	/* rx discard ahdemo mgt frame*/
+	uint32_t is_rx_bad_auth;	/* rx bad auth request */
+	uint32_t is_rx_unauth;		/* rx on unauthorized port */
+	uint32_t is_rx_badkeyid;	/* rx w/ incorrect keyid */
+	uint32_t is_rx_ccmpreplay;	/* rx seq# violation (CCMP) */
+	uint32_t is_rx_ccmpformat;	/* rx format bad (CCMP) */
+	uint32_t is_rx_ccmpmic;		/* rx MIC check failed (CCMP) */
+	uint32_t is_rx_tkipreplay;	/* rx seq# violation (TKIP) */
+	uint32_t is_rx_tkipformat;	/* rx format bad (TKIP) */
+	uint32_t is_rx_tkipmic;		/* rx MIC check failed (TKIP) */
+	uint32_t is_rx_tkipicv;		/* rx ICV check failed (TKIP) */
+	uint32_t is_rx_badcipher;	/* rx failed due to of key type */
+	uint32_t is_rx_nocipherctx;	/* rx failed due to key !setup */
+	uint32_t is_rx_acl;		/* rx discard due to of acl policy */
+	uint32_t is_rx_ffcnt;		/* rx fast frames */
+	uint32_t is_rx_badathtnl;	/* driver key alloc failed */
+	uint32_t is_tx_nobuf;		/* tx failed for lack of buf */
+	uint32_t is_tx_nonode;		/* tx failed for no node */
+	uint32_t is_tx_unknownmgt;	/* tx of unknown mgt frame */
+	uint32_t is_tx_badcipher;	/* tx failed due to of key type */
+	uint32_t is_tx_nodefkey;	/* tx failed due to no defkey */
+	uint32_t is_tx_noheadroom;	/* tx failed due to no space */
+	uint32_t is_tx_ffokcnt;		/* tx fast frames sent success */
+	uint32_t is_tx_fferrcnt;	/* tx fast frames sent success */
+	uint32_t is_tx_unauth;		/* tx on unauthorized port */
+	uint32_t is_scan_active;	/* active scans started */
+	uint32_t is_scan_passive;	/* passive scans started */
+	uint32_t is_node_timeout;	/* nodes timed out inactivity */
+	uint32_t is_crypto_nomem;	/* no memory for crypto ctx */
+	uint32_t is_crypto_tkip;	/* tkip crypto done in s/w */
+	uint32_t is_crypto_tkipenmic;	/* tkip en-MIC done in s/w */
+	uint32_t is_crypto_tkipdemic;	/* tkip de-MIC done in s/w */
+	uint32_t is_crypto_tkipcm;	/* tkip counter measures */
+	uint32_t is_crypto_ccmp;	/* ccmp crypto done in s/w */
+	uint32_t is_crypto_wep;		/* wep crypto done in s/w */
+	uint32_t is_crypto_setkey_cipher;/* cipher rejected key */
+	uint32_t is_crypto_setkey_nokey;/* no key index for setkey */
+	uint32_t is_crypto_delkey;	/* driver key delete failed */
+	uint32_t is_crypto_badcipher;	/* unknown cipher */
+	uint32_t is_crypto_nocipher;	/* cipher not available */
+	uint32_t is_crypto_attachfail;	/* cipher attach failed */
+	uint32_t is_crypto_swfallback;	/* cipher fallback to s/w */
+	uint32_t is_crypto_keyfail;	/* driver key alloc failed */
+	uint32_t is_crypto_enmicfail;	/* en-MIC failed */
+	uint32_t is_ibss_capmismatch;	/* merge failed-cap mismatch */
+	uint32_t is_ibss_norate;	/* merge failed-rate mismatch */
+	uint32_t is_ps_unassoc;	/* ps-poll for unassoc. sta */
+	uint32_t is_ps_badaid;		/* ps-poll w/ incorrect aid */
+	uint32_t is_ps_qempty;		/* ps-poll w/ nothing to send */
+	uint32_t is_rx_assoc_nohtcap;	/* HT capabilities mismatch */
+	uint32_t is_rx_assoc_tkiphtreject; /* rx assoc requesting TKIP and HT capabilities */
+	uint32_t is_rx_assoc_toomany;	/* reach assoc limit */
+	uint32_t is_rx_ps_unauth;	/* ps-poll for un-authenticated STA */
+	uint32_t is_rx_tdls_stsmismatch;/* tdls status mismatch */
+	uint32_t is_rx_tdls;		/* tdls action frame */
+	uint32_t is_tx_tdls;		/* tdls action frame */
+};
+
+/*
+ * Max size of optional information elements.  We artificially
+ * constrain this; it's limited only by the max frame size (and
+ * the max parameter size of the wireless extensions).
+ */
+#define	IEEE80211_MAX_OPT_IE	256
+
+/*
+ * WPA/RSN get/set key request.  Specify the key/cipher
+ * type and whether the key is to be used for sending and/or
+ * receiving.  The key index should be set only when working
+ * with global keys (use IEEE80211_KEYIX_NONE for ``no index'').
+ * Otherwise a unicast/pairwise key is specified by the bssid
+ * (on a station) or mac address (on an ap).  They key length
+ * must include any MIC key data; otherwise it should be no
+ more than IEEE80211_KEYBUF_SIZE.
+ */
+struct ieee80211req_key {
+	uint8_t ik_type;		/* key/cipher type */
+	uint8_t ik_pad;
+	uint8_t ik_keyix;		/* key index */
+	uint8_t ik_keylen;		/* key length in bytes */
+	uint8_t ik_flags;
+/* NB: IEEE80211_KEY_XMIT and IEEE80211_KEY_RECV defined elsewhere */
+#define	IEEE80211_KEY_DEFAULT	0x80	/* default xmit key */
+	uint8_t ik_macaddr[IEEE80211_ADDR_LEN];
+	uint16_t ik_vlan;
+	uint64_t ik_keyrsc;		/* key receive sequence counter */
+	uint64_t ik_keytsc;		/* key transmit sequence counter */
+	uint8_t ik_keydata[IEEE80211_KEYBUF_SIZE+IEEE80211_MICBUF_SIZE];
+};
+
+/*
+ * Delete a key either by index or address.  Set the index
+ * to IEEE80211_KEYIX_NONE when deleting a unicast key.
+ */
+struct ieee80211req_del_key {
+	uint8_t idk_keyix;		/* key index */
+	uint8_t idk_macaddr[IEEE80211_ADDR_LEN];
+};
+
+/*
+ * MLME state manipulation request.  IEEE80211_MLME_ASSOC
+ * only makes sense when operating as a station.  The other
+ * requests can be used when operating as a station or an
+ * ap (to effect a station).
+ */
+struct ieee80211req_mlme {
+	uint8_t im_op;			/* operation to perform */
+#define	IEEE80211_MLME_ASSOC		1	/* associate station */
+#define	IEEE80211_MLME_DISASSOC		2	/* disassociate station */
+#define	IEEE80211_MLME_DEAUTH		3	/* deauthenticate station */
+#define	IEEE80211_MLME_AUTHORIZE	4	/* authorize station */
+#define	IEEE80211_MLME_UNAUTHORIZE	5	/* unauthorize station */
+#define IEEE80211_MLME_CLEAR_STATS	6	/* clear station statistic */
+#define IEEE80211_MLME_DEBUG_CLEAR	7	/* remove the STA without deauthing (DEBUG ONLY) */
+	uint8_t im_ssid_len;		/* length of optional ssid */
+	uint16_t im_reason;		/* 802.11 reason code */
+	uint8_t im_macaddr[IEEE80211_ADDR_LEN];
+	uint8_t im_ssid[IEEE80211_NWID_LEN];
+};
+
+struct ieee80211req_brcm {
+	uint8_t ib_op;				/* operation to perform */
+#define IEEE80211REQ_BRCM_INFO        0       /* BRCM client information */
+#define IEEE80211REQ_BRCM_PKT         1       /* BRCM pkt from ap to client */
+	uint8_t ib_macaddr[IEEE80211_ADDR_LEN];
+	int ib_rssi;
+	uint32_t ib_rxglitch;
+	uint8_t *ib_pkt;
+	int32_t ib_pkt_len;
+};
+
+#define QTN_CHAN_AVAIL_STATUS_TO_STR	{"", "Non-Available", "Available",\
+					"", "Not-Available-Radar-Detected", "",\
+					"", "", "Not-Available-CAC-Required"}
+
+#define IEEE80211REQ_SCS_REPORT_CHAN_NUM    32
+struct ieee80211req_scs_currchan_rpt {
+	uint8_t iscr_curchan;
+	uint16_t iscr_cca_try;
+	uint16_t iscr_cca_idle;
+	uint16_t iscr_cca_busy;
+	uint16_t iscr_cca_intf;
+	uint16_t iscr_cca_tx;
+	uint16_t iscr_tx_ms;
+	uint16_t iscr_rx_ms;
+	uint32_t iscr_pmbl;
+};
+
+struct ieee80211req_scs_ranking_rpt_chan {
+	uint8_t isrc_chan;
+	uint8_t isrc_dfs;
+	uint8_t isrc_txpwr;
+	int32_t isrc_metric;
+	uint32_t isrc_metric_age;
+	/* scs part */
+	uint16_t isrc_cca_intf;
+	uint32_t isrc_pmbl_ap;
+	uint32_t isrc_pmbl_sta;
+	/* initial channel selection part */
+	unsigned int isrc_numbeacons;
+	int isrc_cci;
+	int isrc_aci;
+	/* channel usage */
+	uint32_t isrc_duration;
+	uint32_t isrc_times;
+	uint8_t isrc_chan_avail_status;
+};
+
+struct ieee80211req_scs_ranking_rpt {
+	uint8_t isr_num;
+	struct ieee80211req_scs_ranking_rpt_chan isr_chans[IEEE80211REQ_SCS_REPORT_CHAN_NUM];
+};
+
+struct ieee80211req_scs_interference_rpt_chan {
+	uint8_t isrc_chan;
+	uint16_t isrc_cca_intf_20;
+	uint16_t isrc_cca_intf_40;
+	uint16_t isrc_cca_intf_80;
+};
+struct ieee80211req_scs_interference_rpt {
+	uint8_t isr_num;
+	struct ieee80211req_scs_interference_rpt_chan isr_chans[IEEE80211REQ_SCS_REPORT_CHAN_NUM];
+};
+
+struct ieee80211req_scs_score_rpt_chan {
+	uint8_t isrc_chan;
+	uint8_t isrc_score;
+};
+struct ieee80211req_scs_score_rpt {
+	uint8_t isr_num;
+	struct ieee80211req_scs_score_rpt_chan isr_chans[IEEE80211REQ_SCS_REPORT_CHAN_NUM];
+};
+
+#define SCS_MAX_TXTIME_COMP_INDEX	8
+#define SCS_MAX_RXTIME_COMP_INDEX	8
+#define SCS_MAX_TDLSTIME_COMP_INDEX	8
+/*
+ * Restrictions:
+ *   this structure must be kept in sync with ieee80211_scs
+ */
+enum qscs_cfg_param_e {
+	SCS_SMPL_DWELL_TIME = 0,
+	SCS_SAMPLE_INTV,
+	SCS_THRSHLD_SMPL_PKTNUM,
+	SCS_THRSHLD_SMPL_AIRTIME,
+	SCS_THRSHLD_ATTEN_INC,
+	SCS_THRSHLD_DFS_REENTRY,
+	SCS_THRSHLD_DFS_REENTRY_MINRATE,
+	SCS_THRSHLD_DFS_REENTRY_INTF,
+	SCS_THRSHLD_LOADED,
+	SCS_THRSHLD_AGING_NOR,
+	SCS_THRSHLD_AGING_DFSREENT,
+	SCS_ENABLE,
+	SCS_DEBUG_ENABLE,
+	SCS_SMPL_ENABLE,
+	SCS_REPORT_ONLY,
+	SCS_CCA_IDLE_THRSHLD,
+	SCS_CCA_INTF_HI_THRSHLD,
+	SCS_CCA_INTF_LO_THRSHLD,
+	SCS_CCA_INTF_RATIO,
+	SCS_CCA_INTF_DFS_MARGIN,
+	SCS_PMBL_ERR_THRSHLD,
+	SCS_CCA_SAMPLE_DUR,
+	SCS_CCA_INTF_SMTH_NOXP,
+	SCS_CCA_INTF_SMTH_XPED,
+	SCS_RSSI_SMTH_UP,
+	SCS_RSSI_SMTH_DOWN,
+	SCS_CHAN_MTRC_MRGN,
+	SCS_ATTEN_ADJUST,
+	SCS_ATTEN_SW_ENABLE,
+	SCS_PMBL_ERR_SMTH_FCTR,
+	SCS_PMBL_ERR_RANGE,
+	SCS_PMBL_ERR_MAPPED_INTF_RANGE,
+	SCS_SP_WF,
+	SCS_LP_WF,
+	SCS_PMP_RPT_CCA_SMTH_FCTR,
+	SCS_PMP_RX_TIME_SMTH_FCTR,
+	SCS_PMP_TX_TIME_SMTH_FCTR,
+	SCS_PMP_STATS_STABLE_PERCENT,
+	SCS_PMP_STATS_STABLE_RANGE,
+	SCS_PMP_STATS_CLEAR_INTERVAL,
+	SCS_AS_RX_TIME_SMTH_FCTR,
+	SCS_AS_TX_TIME_SMTH_FCTR,
+	SCS_CCA_IDLE_SMTH_FCTR,
+	SCS_TX_TIME_COMPENSTATION_START,
+	SCS_TX_TIME_COMPENSTATION_END = SCS_TX_TIME_COMPENSTATION_START+SCS_MAX_TXTIME_COMP_INDEX-1,
+	SCS_RX_TIME_COMPENSTATION_START,
+	SCS_RX_TIME_COMPENSTATION_END = SCS_RX_TIME_COMPENSTATION_START+SCS_MAX_RXTIME_COMP_INDEX-1,
+	SCS_TDLS_TIME_COMPENSTATION_START,
+	SCS_TDLS_TIME_COMPENSTATION_END = SCS_TDLS_TIME_COMPENSTATION_START+SCS_MAX_TDLSTIME_COMP_INDEX-1,
+	SCS_LEAVE_DFS_CHAN_MTRC_MRGN,
+	SCS_CCA_THRESHOD_TYPE,
+	SCS_SAMPLE_TYPE,
+	SCS_BURST_ENABLE,
+	SCS_BURST_WINDOW,
+	SCS_BURST_THRESH,
+	SCS_BURST_PAUSE_TIME,
+	SCS_BURST_FORCE_SWITCH,
+	SCS_PARAM_MAX,
+};
+
+struct ieee80211req_scs_param_rpt {
+	uint32_t cfg_param;
+	uint32_t signed_param_flag;
+};
+
+struct ieee80211req_scs {
+	uint32_t is_op;
+#define IEEE80211REQ_SCS_ID_UNKNOWN               0
+#define IEEE80211REQ_SCS_FLAG_GET                 0x80000000
+#define IEEE80211REQ_SCS_GET_CURRCHAN_RPT         (IEEE80211REQ_SCS_FLAG_GET | 1)
+#define IEEE80211REQ_SCS_GET_INIT_RANKING_RPT     (IEEE80211REQ_SCS_FLAG_GET | 2)
+#define IEEE80211REQ_SCS_GET_RANKING_RPT          (IEEE80211REQ_SCS_FLAG_GET | 3)
+#define IEEE80211REQ_SCS_GET_PARAM_RPT            (IEEE80211REQ_SCS_FLAG_GET | 4)
+#define IEEE80211REQ_SCS_GET_SCORE_RPT            (IEEE80211REQ_SCS_FLAG_GET | 5)
+#define IEEE80211REQ_SCS_GET_INTERFERENCE_RPT     (IEEE80211REQ_SCS_FLAG_GET | 6)
+	uint32_t *is_status;                  /* SCS specific reason for ioctl failure */
+#define IEEE80211REQ_SCS_RESULT_OK                    0
+#define IEEE80211REQ_SCS_RESULT_SYSCALL_ERR           1
+#define IEEE80211REQ_SCS_RESULT_SCS_DISABLED          2
+#define IEEE80211REQ_SCS_RESULT_NO_VAP_RUNNING        3
+#define IEEE80211REQ_SCS_RESULT_NOT_EVALUATED         4        /* channel ranking not evaluated */
+#define IEEE80211REQ_SCS_RESULT_TMP_UNAVAILABLE       5        /* when channel switch or param change */
+#define IEEE80211REQ_SCS_RESULT_APMODE_ONLY           6
+#define IEEE80211REQ_SCS_RESULT_AUTOCHAN_DISABLED     7
+	uint8_t *is_data;
+	int32_t is_data_len;
+};
+
+struct ieee80211_chan_power_table {
+	uint8_t chan_ieee;
+	int8_t maxpower_table[PWR_IDX_BF_MAX][PWR_IDX_SS_MAX][PWR_IDX_BW_MAX];
+};
+
+struct ieeee80211_dscp2ac {
+	uint8_t dscp[IP_DSCP_NUM];
+	uint8_t list_len;
+	uint8_t ac;
+};
+/*
+ * MAC ACL operations.
+ */
+enum {
+	IEEE80211_MACCMD_POLICY_OPEN	= 0,	/* set policy: no ACL's */
+	IEEE80211_MACCMD_POLICY_ALLOW	= 1,	/* set policy: allow traffic */
+	IEEE80211_MACCMD_POLICY_DENY	= 2,	/* set policy: deny traffic */
+	IEEE80211_MACCMD_FLUSH		= 3,	/* flush ACL database */
+	IEEE80211_MACCMD_DETACH		= 4,	/* detach ACL policy */
+};
+
+/*
+ * Set the active channel list.  Note this list is
+ * intersected with the available channel list in
+ * calculating the set of channels actually used in
+ * scanning.
+ */
+struct ieee80211req_chanlist {
+	uint8_t ic_channels[IEEE80211_CHAN_BYTES];
+};
+
+/*
+ * Basic IEEE Channel info for wireless tools
+ */
+struct ieee80211_chan {
+	uint16_t ic_freq;	/* Freq setting in Mhz */
+	uint32_t ic_flags;	/* Channel flags */
+	uint8_t ic_ieee;	/* IEEE channel number */
+} __packed;
+
+/*
+ * Get the active channel list info.
+ */
+struct ieee80211req_chaninfo {
+	uint32_t ic_nchans;
+	struct ieee80211_chan ic_chans[IEEE80211_CHAN_MAX];
+};
+
+/*
+ * Set the active channel list for 20Mhz, 40Mhz and 80Mhz
+ */
+struct ieee80211_active_chanlist {
+	u_int8_t bw;
+	u_int8_t channels[IEEE80211_CHAN_BYTES];
+};
+
+/*
+ * Set or Get the inactive channel list
+ */
+struct ieee80211_inactive_chanlist {
+	u_int8_t channels[IEEE80211_CHAN_MAX];
+};
+
+/*
+ * Set or get the disabled channel list
+ */
+struct ieeee80211_disabled_chanlist {
+	uint8_t chan[IEEE80211_CHAN_MAX];
+	uint32_t list_len;
+	uint8_t flag;	/*0: disable 1: enable*/
+	uint8_t dir;	/*0: set 1: get*/
+};
+
+enum ieee80211_chan_control_dir
+{
+	SET_CHAN_DISABLED = 0,
+	GET_CHAN_DISABLED = 1,
+};
+/*
+ * Retrieve the WPA/RSN information element for an associated station.
+ */
+struct ieee80211req_wpaie {
+	uint8_t	wpa_macaddr[IEEE80211_ADDR_LEN];
+	uint8_t	wpa_ie[IEEE80211_MAX_OPT_IE];
+	uint8_t	rsn_ie[IEEE80211_MAX_OPT_IE];
+	uint8_t	osen_ie[IEEE80211_MAX_OPT_IE];
+	uint8_t	wps_ie[IEEE80211_MAX_OPT_IE];
+	uint8_t	qtn_pairing_ie[IEEE80211_MAX_OPT_IE];
+	uint8_t	mdie[IEEE80211_MAX_OPT_IE];
+	uint8_t	ftie[IEEE80211_MAX_OPT_IE];
+#define QTN_PAIRING_IE_EXIST 1
+#define QTN_PAIRING_IE_ABSENT 0
+	uint8_t	has_pairing_ie;		/* Indicates whether Pairing IE exists in assoc req/resp */
+};
+
+/*
+ * Retrieve per-node statistics.
+ */
+struct ieee80211req_sta_stats {
+	union {
+		/* NB: explicitly force 64-bit alignment */
+		uint8_t macaddr[IEEE80211_ADDR_LEN];
+		uint64_t pad;
+	} is_u;
+	struct ieee80211_nodestats is_stats;
+};
+/*
+ * Retrieve STA Statistics(Radio measurement) information element for an associated station.
+ */
+struct ieee80211req_qtn_rmt_sta_stats {
+	int status;
+	struct ieee80211_ie_qtn_rm_sta_all rmt_sta_stats;
+	struct ieee80211_ie_rm_sta_grp221	rmt_sta_stats_grp221;
+};
+
+struct ieee80211req_qtn_rmt_sta_stats_setpara {
+	uint32_t flags;
+	uint8_t macaddr[IEEE80211_ADDR_LEN];
+};
+
+struct ieee80211req_node_meas {
+	uint8_t mac_addr[6];
+
+	uint8_t type;
+#define IOCTL_MEAS_TYPE_BASIC		0x0
+#define IOCTL_MEAS_TYPE_CCA		0x1
+#define IOCTL_MEAS_TYPE_RPI		0x2
+#define IOCTL_MEAS_TYPE_CHAN_LOAD	0x3
+#define IOCTL_MEAS_TYPE_NOISE_HIS	0x4
+#define IOCTL_MEAS_TYPE_BEACON		0x5
+#define IOCTL_MEAS_TYPE_FRAME		0x6
+#define IOCTL_MEAS_TYPE_CAT		0x7
+#define IOCTL_MEAS_TYPE_MUL_DIAG	0x8
+#define IOCTL_MEAS_TYPE_LINK		0x9
+#define IOCTL_MEAS_TYPE_NEIGHBOR	0xA
+
+	struct _ioctl_basic {
+		uint16_t start_offset_ms;
+		uint16_t duration_ms;
+		uint8_t channel;
+	} ioctl_basic;
+	struct _ioctl_cca {
+		uint16_t start_offset_ms;
+		uint16_t duration_ms;
+		uint8_t channel;
+	} ioctl_cca;
+	struct _ioctl_rpi {
+		uint16_t start_offset_ms;
+		uint16_t duration_ms;
+		uint8_t channel;
+	} ioctl_rpi;
+	struct _ioctl_chan_load {
+		uint16_t duration_ms;
+		uint8_t channel;
+	} ioctl_chan_load;
+	struct _ioctl_noise_his {
+		uint16_t duration_ms;
+		uint8_t channel;
+	} ioctl_noise_his;
+	struct _ioctl_beacon {
+		uint8_t op_class;
+		uint8_t channel;
+		uint16_t duration_ms;
+		uint8_t mode;
+		uint8_t bssid[IEEE80211_ADDR_LEN];
+	} ioctl_beacon;
+	struct _ioctl_frame {
+		uint8_t op_class;
+		uint8_t channel;
+		uint16_t duration_ms;
+		uint8_t type;
+		uint8_t mac_address[IEEE80211_ADDR_LEN];
+	} ioctl_frame;
+	struct _ioctl_tran_stream_cat {
+		uint16_t duration_ms;
+		uint8_t peer_sta[IEEE80211_ADDR_LEN];
+		uint8_t tid;
+		uint8_t bin0;
+	} ioctl_tran_stream_cat;
+	struct _ioctl_multicast_diag {
+		uint16_t duration_ms;
+		uint8_t group_mac[IEEE80211_ADDR_LEN];
+	} ioctl_multicast_diag;
+};
+
+struct ieee80211req_node_tpc {
+	uint8_t	mac_addr[6];
+};
+
+struct ieee80211req_node_info {
+	uint8_t	req_type;
+#define IOCTL_REQ_MEASUREMENT	0x0
+#define IOCTL_REQ_TPC		0x1
+	union {
+		struct ieee80211req_node_meas	req_node_meas;
+		struct ieee80211req_node_tpc	req_node_tpc;
+	} u_req_info;
+};
+
+struct ieee80211_ioctl_neighbor_report_item {
+	uint8_t bssid[IEEE80211_ADDR_LEN];
+	uint32_t bssid_info;
+	uint8_t operating_class;
+	uint8_t channel;
+	uint8_t phy_type;
+};
+#define IEEE80211_MAX_NEIGHBOR_REPORT_ITEM 3
+
+struct ieee80211rep_node_meas_result {
+	uint8_t	status;
+#define IOCTL_MEAS_STATUS_SUCC		0
+#define IOCTL_MEAS_STATUS_TIMEOUT	1
+#define IOCTL_MEAS_STATUS_NODELEAVE	2
+#define IOCTL_MEAS_STATUS_STOP		3
+
+	uint8_t report_mode;
+#define IOCTL_MEAS_REP_OK	(0)
+#define IOCTL_MEAS_REP_LATE	(1 << 0)
+#define IOCTL_MEAS_REP_INCAP	(1 << 1)
+#define IOCTL_MEAS_REP_REFUSE	(1 << 2)
+#define IOCTL_MEAS_REP_MASK	(0x07)
+
+	union {
+		uint8_t	basic;
+		uint8_t	cca;
+		uint8_t	rpi[8];
+		uint8_t chan_load;
+		struct {
+			uint8_t antenna_id;
+			uint8_t anpi;
+			uint8_t ipi[11];
+		} noise_his;
+		struct {
+			uint8_t reported_frame_info;
+			uint8_t rcpi;
+			uint8_t rsni;
+			uint8_t bssid[IEEE80211_ADDR_LEN];
+			uint8_t antenna_id;
+			uint32_t parent_tsf;
+		} beacon;
+		struct {
+			uint32_t sub_ele_report;
+			uint8_t ta[IEEE80211_ADDR_LEN];
+			uint8_t bssid[IEEE80211_ADDR_LEN];
+			uint8_t phy_type;
+			uint8_t avg_rcpi;
+			uint8_t last_rsni;
+			uint8_t last_rcpi;
+			uint8_t antenna_id;
+			uint16_t frame_count;
+		} frame;
+		struct {
+			uint8_t reason;
+			uint32_t tran_msdu_cnt;
+			uint32_t msdu_discard_cnt;
+			uint32_t msdu_fail_cnt;
+			uint32_t msdu_mul_retry_cnt;
+			uint32_t qos_lost_cnt;
+			uint32_t avg_queue_delay;
+			uint32_t avg_tran_delay;
+			uint8_t bin0_range;
+			uint32_t bins[6];
+		} tran_stream_cat;
+		struct {
+			uint8_t reason;
+			uint32_t mul_rec_msdu_cnt;
+			uint16_t first_seq_num;
+			uint16_t last_seq_num;
+			uint16_t mul_rate;
+		} multicast_diag;
+		struct {
+			struct {
+				int8_t tx_power;
+				int8_t link_margin;
+			} tpc_report;
+			uint8_t recv_antenna_id;
+			uint8_t tran_antenna_id;
+			uint8_t rcpi;
+			uint8_t rsni;
+		} link_measure;
+		struct {
+			uint8_t item_num;
+			struct ieee80211_ioctl_neighbor_report_item item[IEEE80211_MAX_NEIGHBOR_REPORT_ITEM];
+		} neighbor_report;
+	} u_data;
+};
+
+struct ieee80211rep_node_tpc_result {
+	uint8_t status;
+	int8_t	tx_power;
+	int8_t	link_margin;
+};
+
+union ieee80211rep_node_info {
+	struct ieee80211rep_node_meas_result	meas_result;
+	struct ieee80211rep_node_tpc_result	tpc_result;
+};
+
+/*
+ * Station information block; the mac address is used
+ * to retrieve other data like stats, unicast key, etc.
+ */
+struct ieee80211req_sta_info {
+	uint16_t isi_len;		/* length (mult of 4) */
+	uint16_t isi_freq;		/* MHz */
+	uint16_t isi_flags;		/* channel flags */
+	uint16_t isi_state;		/* state flags */
+	uint8_t isi_authmode;		/* authentication algorithm */
+	uint8_t isi_rssi;
+	uint16_t isi_capinfo;		/* capabilities */
+	uint8_t isi_athflags;		/* Atheros capabilities */
+	uint8_t isi_erp;		/* ERP element */
+	uint8_t isi_macaddr[IEEE80211_ADDR_LEN];
+	uint8_t isi_nrates;		/* negotiated rates */
+	uint8_t isi_rates[IEEE80211_RATE_MAXSIZE];
+	uint8_t isi_txrate;		/* index to isi_rates[] */
+	uint16_t isi_ie_len;		/* IE length */
+	uint16_t isi_associd;		/* assoc response */
+	uint16_t isi_txpower;		/* current tx power */
+	uint16_t isi_vlan;		/* vlan tag */
+	uint16_t isi_txseqs[17];	/* seq to be transmitted */
+	uint16_t isi_rxseqs[17];	/* seq previous for qos frames*/
+	uint16_t isi_inact;		/* inactivity timer */
+	uint8_t isi_uapsd;		/* UAPSD queues */
+	uint8_t isi_opmode;		/* sta operating mode */
+	uint16_t isi_htcap;		/* HT capabilities */
+
+	/* XXX frag state? */
+	/* variable length IE data */
+};
+
+enum {
+	IEEE80211_STA_OPMODE_NORMAL,
+	IEEE80211_STA_OPMODE_XR
+};
+
+/*
+ * Retrieve per-station information; to retrieve all
+ * specify a mac address of ff:ff:ff:ff:ff:ff.
+ */
+struct ieee80211req_sta_req {
+	union {
+		/* NB: explicitly force 64-bit alignment */
+		uint8_t macaddr[IEEE80211_ADDR_LEN];
+		uint64_t pad;
+	} is_u;
+	struct ieee80211req_sta_info info[1];	/* variable length */
+};
+
+/*
+ * Get/set per-station tx power cap.
+ */
+struct ieee80211req_sta_txpow {
+	uint8_t	it_macaddr[IEEE80211_ADDR_LEN];
+	uint8_t	it_txpow;
+};
+
+/*
+ * WME parameters are set and return using i_val and i_len.
+ * i_val holds the value itself.  i_len specifies the AC
+ * and, as appropriate, then high bit specifies whether the
+ * operation is to be applied to the BSS or ourself.
+ */
+#define	IEEE80211_WMEPARAM_SELF	0x0000		/* parameter applies to self */
+#define	IEEE80211_WMEPARAM_BSS	0x8000		/* parameter applies to BSS */
+#define	IEEE80211_WMEPARAM_VAL	0x7fff		/* parameter value */
+
+/*
+ * Scan result data returned for IEEE80211_IOC_SCAN_RESULTS.
+ */
+struct ieee80211req_scan_result {
+	uint16_t isr_len;		/* length (mult of 4) */
+	uint16_t isr_freq;		/* MHz */
+	uint16_t isr_flags;		/* channel flags */
+	uint8_t isr_noise;
+	uint8_t isr_rssi;
+	uint8_t isr_intval;		/* beacon interval */
+	uint16_t isr_capinfo;		/* capabilities */
+	uint8_t isr_erp;		/* ERP element */
+	uint8_t isr_bssid[IEEE80211_ADDR_LEN];
+	uint8_t isr_nrates;
+	uint8_t isr_rates[IEEE80211_RATE_MAXSIZE];
+	uint8_t isr_ssid_len;		/* SSID length */
+	uint8_t isr_ie_len;		/* IE length */
+	uint8_t isr_pad[5];
+	/* variable length SSID followed by IE data */
+};
+
+#define IEEE80211_MAX_ASSOC_HISTORY	32
+
+struct ieee80211_assoc_history {
+	uint8_t  ah_macaddr_table[IEEE80211_MAX_ASSOC_HISTORY][IEEE80211_ADDR_LEN];
+	uint32_t ah_timestamp[IEEE80211_MAX_ASSOC_HISTORY];
+};
+
+/*
+ * Channel switch history record.
+ */
+#define CSW_MAX_RECORDS_MAX 32
+struct ieee80211req_csw_record {
+	uint32_t cnt;
+	int32_t index;
+	uint32_t channel[CSW_MAX_RECORDS_MAX];
+	uint32_t timestamp[CSW_MAX_RECORDS_MAX];
+	uint32_t reason[CSW_MAX_RECORDS_MAX];
+	uint8_t csw_record_mac[CSW_MAX_RECORDS_MAX][IEEE80211_ADDR_LEN];
+};
+
+struct ieee80211req_radar_status {
+	uint32_t channel;
+	uint32_t flags;
+	uint32_t ic_radardetected;
+};
+
+struct ieee80211req_disconn_info {
+	uint32_t asso_sta_count;
+	uint32_t disconn_count;
+	uint32_t sequence;
+	uint32_t up_time;
+	uint32_t resetflag;
+};
+
+#define AP_SCAN_MAX_NUM_RATES 32
+/* for qcsapi_get_results_AP_scan */
+struct ieee80211_general_ap_scan_result {
+	int32_t num_bitrates;
+	int32_t bitrates[AP_SCAN_MAX_NUM_RATES];
+	int32_t num_ap_results;
+};
+
+struct ieee80211_per_ap_scan_result {
+	int8_t		ap_addr_mac[IEEE80211_ADDR_LEN];
+	int8_t		ap_name_ssid[32 + 1];
+	int32_t		ap_channel_ieee;
+	int32_t		ap_max_bw;
+	int32_t		ap_rssi;
+	int32_t		ap_flags;
+	int32_t		ap_htcap;
+	int32_t		ap_vhtcap;
+	int8_t		ap_qhop_role;
+	uint8_t		ap_ht_secoffset;
+	uint8_t		ap_chan_center1;
+	uint8_t		ap_chan_center2;
+	uint32_t	ap_bestrate;
+	int32_t		ap_num_genies;
+	uint16_t	ap_beacon_intval;
+	uint8_t		ap_dtim_intval;
+	uint8_t		ap_is_ess;
+	uint32_t	ap_last_beacon;
+	int32_t		ap_noise;
+	int8_t		ap_nonerp_present;
+	uint32_t	ap_basicrates_num;
+	uint32_t	ap_basicrates[AP_SCAN_MAX_NUM_RATES];	/*in 0.5Mbps*/
+	uint32_t	ap_suprates_num;
+	uint32_t	ap_suprates[AP_SCAN_MAX_NUM_RATES];	/*in 0.5Mbps*/
+	int8_t		ap_ie_buf[0];	/* just to remind there might be WPA/RSN/WSC IEs right behind*/
+};
+
+#define MAX_MACS_SIZE	1200 /* 200 macs */
+/* Report results of get mac address of clients behind associated node */
+struct ieee80211_mac_list {
+	/**
+	 * flags indicating
+	 * bit 0 set means addresses are behind 4 addr node
+	 * bit 1 set means results are truncated to fit to buffer
+	 */
+	uint32_t flags;
+	/**
+	 * num entries in the macaddr list below
+	 */
+	uint32_t num_entries;
+	/**
+	 * buffer to store mac addresses
+	 */
+	uint8_t macaddr[MAX_MACS_SIZE];
+};
+
+#define QTN_FREQ_RANGE_MAX_NUM	64
+
+struct	ieee80211_freq
+{
+	int32_t m;		/* Mantissa */
+	int16_t e;		/* Exponent */
+	uint8_t i;		/* List index (when in range struct) */
+	uint8_t flags;		/* Flags (fixed/auto) */
+};
+
+struct ieee80211_freq_range {
+	uint8_t num_freq;
+	struct ieee80211_freq freq[QTN_FREQ_RANGE_MAX_NUM];
+};
+
+#define NAC_MAX_STATIONS 128
+/* non associated clients information */
+struct nac_info_entry {
+	uint64_t	nac_timestamp;   /* time stamp of last packet received */
+	int8_t		nac_avg_rssi; /*average rssi in dBm */
+	uint8_t		nac_channel;  /* channel on which last seen */
+	uint8_t		nac_packet_type; /* packet type last transmitted */
+	uint8_t		nac_txmac[IEEE80211_ADDR_LEN]; /* mac address */
+};
+struct ieee80211_nac_stats_report {
+	uint8_t	nac_entries; /* number of entries filled, upto NAC_MAX_STATIONS */
+	struct nac_info_entry nac_stats[NAC_MAX_STATIONS];
+};
+
+#ifdef __FreeBSD__
+/*
+ * FreeBSD-style ioctls.
+ */
+/* the first member must be matched with struct ifreq */
+struct ieee80211req {
+	char i_name[IFNAMSIZ];	/* if_name, e.g. "wi0" */
+	uint16_t i_type;	/* req type */
+	int16_t	i_val;		/* Index or simple value */
+	int16_t	i_len;		/* Index or simple value */
+	void *i_data;		/* Extra data */
+};
+#define	SIOCS80211		 _IOW('i', 234, struct ieee80211req)
+#define	SIOCG80211		_IOWR('i', 235, struct ieee80211req)
+#define	SIOCG80211STATS		_IOWR('i', 236, struct ifreq)
+#define	SIOC80211IFCREATE	_IOWR('i', 237, struct ifreq)
+#define	SIOC80211IFDESTROY	 _IOW('i', 238, struct ifreq)
+
+#define IEEE80211_IOC_SSID		1
+#define IEEE80211_IOC_NUMSSIDS		2
+#define IEEE80211_IOC_WEP		3
+#define	IEEE80211_WEP_NOSUP		-1
+#define	IEEE80211_WEP_OFF		0
+#define	IEEE80211_WEP_ON		1
+#define	IEEE80211_WEP_MIXED		2
+#define IEEE80211_IOC_WEPKEY		4
+#define IEEE80211_IOC_NUMWEPKEYS	5
+#define IEEE80211_IOC_WEPTXKEY		6
+#define IEEE80211_IOC_AUTHMODE		7
+#define IEEE80211_IOC_STATIONNAME	8
+#define IEEE80211_IOC_CHANNEL		9
+#define IEEE80211_IOC_POWERSAVE		10
+#define	IEEE80211_POWERSAVE_NOSUP	-1
+#define	IEEE80211_POWERSAVE_OFF		0
+#define	IEEE80211_POWERSAVE_CAM		1
+#define	IEEE80211_POWERSAVE_PSP		2
+#define	IEEE80211_POWERSAVE_PSP_CAM	3
+#define	IEEE80211_POWERSAVE_ON		IEEE80211_POWERSAVE_CAM
+#define IEEE80211_IOC_POWERSAVESLEEP	11
+#define	IEEE80211_IOC_RTSTHRESHOLD	12
+#define IEEE80211_IOC_PROTMODE		13
+#define	IEEE80211_PROTMODE_OFF		0
+#define	IEEE80211_PROTMODE_CTS		1
+#define	IEEE80211_PROTMODE_RTSCTS	2
+#define	IEEE80211_IOC_TXPOWER		14	/* global tx power limit */
+#define	IEEE80211_IOC_BSSID		15
+#define	IEEE80211_IOC_ROAMING		16	/* roaming mode */
+#define	IEEE80211_IOC_PRIVACY		17	/* privacy invoked */
+#define	IEEE80211_IOC_DROPUNENCRYPTED	18	/* discard unencrypted frames */
+#define	IEEE80211_IOC_WPAKEY		19
+#define	IEEE80211_IOC_DELKEY		20
+#define	IEEE80211_IOC_MLME		21
+#define	IEEE80211_IOC_OPTIE		22	/* optional info. element */
+#define	IEEE80211_IOC_SCAN_REQ		23
+#define	IEEE80211_IOC_SCAN_RESULTS	24
+#define	IEEE80211_IOC_COUNTERMEASURES	25	/* WPA/TKIP countermeasures */
+#define	IEEE80211_IOC_WPA		26	/* WPA mode (0,1,2) */
+#define	IEEE80211_IOC_CHANLIST		27	/* channel list */
+#define	IEEE80211_IOC_WME		28	/* WME mode (on, off) */
+#define	IEEE80211_IOC_HIDESSID		29	/* hide SSID mode (on, off) */
+#define IEEE80211_IOC_APBRIDGE		30	/* AP inter-sta bridging */
+#define	IEEE80211_IOC_MCASTCIPHER	31	/* multicast/default cipher */
+#define	IEEE80211_IOC_MCASTKEYLEN	32	/* multicast key length */
+#define	IEEE80211_IOC_UCASTCIPHERS	33	/* unicast cipher suites */
+#define	IEEE80211_IOC_UCASTCIPHER	34	/* unicast cipher */
+#define	IEEE80211_IOC_UCASTKEYLEN	35	/* unicast key length */
+#define	IEEE80211_IOC_DRIVER_CAPS	36	/* driver capabilities */
+#define	IEEE80211_IOC_KEYMGTALGS	37	/* key management algorithms */
+#define	IEEE80211_IOC_RSNCAPS		38	/* RSN capabilities */
+#define	IEEE80211_IOC_WPAIE		39	/* WPA information element */
+#define	IEEE80211_IOC_STA_STATS		40	/* per-station statistics */
+#define	IEEE80211_IOC_MACCMD		41	/* MAC ACL operation */
+#define	IEEE80211_IOC_TXPOWMAX		43	/* max tx power for channel */
+#define	IEEE80211_IOC_STA_TXPOW		44	/* per-station tx power limit */
+#define	IEEE80211_IOC_STA_INFO		45	/* station/neighbor info */
+#define	IEEE80211_IOC_WME_CWMIN		46	/* WME: ECWmin */
+#define	IEEE80211_IOC_WME_CWMAX		47	/* WME: ECWmax */
+#define	IEEE80211_IOC_WME_AIFS		48	/* WME: AIFSN */
+#define	IEEE80211_IOC_WME_TXOPLIMIT	49	/* WME: txops limit */
+#define	IEEE80211_IOC_WME_ACM		50	/* WME: ACM (bss only) */
+#define	IEEE80211_IOC_WME_ACKPOLICY	51	/* WME: ACK policy (!bss only)*/
+#define	IEEE80211_IOC_DTIM_PERIOD	52	/* DTIM period (beacons) */
+#define	IEEE80211_IOC_BEACON_INTERVAL	53	/* beacon interval (ms) */
+#define	IEEE80211_IOC_ADDMAC		54	/* add sta to MAC ACL table */
+#define	IEEE80211_IOC_DELMAC		55	/* del sta from MAC ACL table */
+#define	IEEE80211_IOC_FF		56	/* ATH fast frames (on, off) */
+#define	IEEE80211_IOC_TURBOP		57	/* ATH turbo' (on, off) */
+#define	IEEE80211_IOC_APPIEBUF		58	/* IE in the management frame */
+#define	IEEE80211_IOC_FILTERFRAME	59	/* management frame filter */
+
+/*
+ * Scan result data returned for IEEE80211_IOC_SCAN_RESULTS.
+ */
+struct ieee80211req_scan_result {
+	uint16_t isr_len;		/* length (mult of 4) */
+	uint16_t isr_freq;		/* MHz */
+	uint16_t isr_flags;		/* channel flags */
+	uint8_t isr_noise;
+	uint8_t isr_rssi;
+	uint8_t isr_intval;		/* beacon interval */
+	uint16_t isr_capinfo;		/* capabilities */
+	uint8_t isr_erp;		/* ERP element */
+	uint8_t isr_bssid[IEEE80211_ADDR_LEN];
+	uint8_t isr_nrates;
+	uint8_t isr_rates[IEEE80211_RATE_MAXSIZE];
+	uint8_t isr_ssid_len;		/* SSID length */
+	uint8_t isr_ie_len;		/* IE length */
+	uint8_t isr_pad[5];
+	/* variable length SSID followed by IE data */
+};
+
+#endif /* __FreeBSD__ */
+
+#if defined(__linux__) || defined(MUC_BUILD) || defined(DSP_BUILD)
+/*
+ * Wireless Extensions API, private ioctl interfaces.
+ *
+ * NB: Even-numbered ioctl numbers have set semantics and are privileged!
+ *     (regardless of the incorrect comment in wireless.h!)
+ */
+#ifdef __KERNEL__
+#include <linux/if.h>
+#endif
+#define	IEEE80211_IOCTL_SETPARAM	(SIOCIWFIRSTPRIV+0)
+#define	IEEE80211_IOCTL_GETPARAM	(SIOCIWFIRSTPRIV+1)
+#define	IEEE80211_IOCTL_SETMODE		(SIOCIWFIRSTPRIV+2)
+#define	IEEE80211_IOCTL_GETMODE		(SIOCIWFIRSTPRIV+3)
+#define	IEEE80211_IOCTL_SETWMMPARAMS	(SIOCIWFIRSTPRIV+4)
+#define	IEEE80211_IOCTL_GETWMMPARAMS	(SIOCIWFIRSTPRIV+5)
+#define	IEEE80211_IOCTL_SETCHANLIST	(SIOCIWFIRSTPRIV+6)
+#define	IEEE80211_IOCTL_GETCHANLIST	(SIOCIWFIRSTPRIV+7)
+#define	IEEE80211_IOCTL_CHANSWITCH	(SIOCIWFIRSTPRIV+8)
+#define	IEEE80211_IOCTL_GET_APPIEBUF	(SIOCIWFIRSTPRIV+9)
+#define	IEEE80211_IOCTL_SET_APPIEBUF	(SIOCIWFIRSTPRIV+10)
+#define	IEEE80211_IOCTL_FILTERFRAME	(SIOCIWFIRSTPRIV+12)
+#define	IEEE80211_IOCTL_GETCHANINFO	(SIOCIWFIRSTPRIV+13)
+#define	IEEE80211_IOCTL_SETOPTIE	(SIOCIWFIRSTPRIV+14)
+#define	IEEE80211_IOCTL_GETOPTIE	(SIOCIWFIRSTPRIV+15)
+#define	IEEE80211_IOCTL_SETMLME		(SIOCIWFIRSTPRIV+16)
+#define	IEEE80211_IOCTL_RADAR		(SIOCIWFIRSTPRIV+17)
+#define	IEEE80211_IOCTL_SETKEY		(SIOCIWFIRSTPRIV+18)
+#define	IEEE80211_IOCTL_POSTEVENT	(SIOCIWFIRSTPRIV+19)
+#define	IEEE80211_IOCTL_DELKEY		(SIOCIWFIRSTPRIV+20)
+#define	IEEE80211_IOCTL_TXEAPOL		(SIOCIWFIRSTPRIV+21)
+#define	IEEE80211_IOCTL_ADDMAC		(SIOCIWFIRSTPRIV+22)
+#define	IEEE80211_IOCTL_STARTCCA	(SIOCIWFIRSTPRIV+23)
+#define	IEEE80211_IOCTL_DELMAC		(SIOCIWFIRSTPRIV+24)
+#define IEEE80211_IOCTL_GETSTASTATISTIC	(SIOCIWFIRSTPRIV+25)
+#define	IEEE80211_IOCTL_WDSADDMAC	(SIOCIWFIRSTPRIV+26)
+#define	IEEE80211_IOCTL_WDSDELMAC	(SIOCIWFIRSTPRIV+28)
+#define IEEE80211_IOCTL_GETBLOCK	(SIOCIWFIRSTPRIV+29)
+#define	IEEE80211_IOCTL_KICKMAC		(SIOCIWFIRSTPRIV+30)
+#define	IEEE80211_IOCTL_DFSACTSCAN	(SIOCIWFIRSTPRIV+31)
+
+#define IEEE80211_AMPDU_MIN_DENSITY	0
+#define IEEE80211_AMPDU_MAX_DENSITY	7
+
+#define IEEE80211_CCE_PREV_CHAN_SHIFT	8
+
+enum {
+	IEEE80211_PARAM_TURBO		= 1,	/* turbo mode */
+	IEEE80211_PARAM_MODE		= 2,	/* phy mode (11a, 11b, etc.) */
+	IEEE80211_PARAM_AUTHMODE	= 3,	/* authentication mode */
+	IEEE80211_PARAM_PROTMODE	= 4,	/* 802.11g protection */
+	IEEE80211_PARAM_MCASTCIPHER	= 5,	/* multicast/default cipher */
+	IEEE80211_PARAM_MCASTKEYLEN	= 6,	/* multicast key length */
+	IEEE80211_PARAM_UCASTCIPHERS	= 7,	/* unicast cipher suites */
+	IEEE80211_PARAM_UCASTCIPHER	= 8,	/* unicast cipher */
+	IEEE80211_PARAM_UCASTKEYLEN	= 9,	/* unicast key length */
+	IEEE80211_PARAM_WPA		= 10,	/* WPA mode (0,1,2) */
+	IEEE80211_PARAM_ROAMING		= 12,	/* roaming mode */
+	IEEE80211_PARAM_PRIVACY		= 13,	/* privacy invoked */
+	IEEE80211_PARAM_COUNTERMEASURES	= 14,	/* WPA/TKIP countermeasures */
+	IEEE80211_PARAM_DROPUNENCRYPTED	= 15,	/* discard unencrypted frames */
+	IEEE80211_PARAM_DRIVER_CAPS	= 16,	/* driver capabilities */
+	IEEE80211_PARAM_WMM		= 18,	/* WMM mode (on, off) */
+	IEEE80211_PARAM_HIDESSID	= 19,	/* hide SSID mode (on, off) */
+	IEEE80211_PARAM_APBRIDGE	= 20,   /* AP inter-sta bridging */
+	IEEE80211_PARAM_KEYMGTALGS	= 21,	/* key management algorithms */
+	IEEE80211_PARAM_RSNCAPS		= 22,	/* RSN capabilities */
+	IEEE80211_PARAM_INACT		= 23,	/* station inactivity timeout */
+	IEEE80211_PARAM_INACT_AUTH	= 24,	/* station auth inact timeout */
+	IEEE80211_PARAM_INACT_INIT	= 25,	/* station init inact timeout */
+	IEEE80211_PARAM_ABOLT		= 26,	/* Atheros Adv. Capabilities */
+	IEEE80211_PARAM_DTIM_PERIOD	= 28,	/* DTIM period (beacons) */
+	IEEE80211_PARAM_BEACON_INTERVAL	= 29,	/* beacon interval (ms) */
+	IEEE80211_PARAM_DOTH		= 30,	/* 11.h is on/off */
+	IEEE80211_PARAM_PWRCONSTRAINT	= 31,	/* Current Channel Pwr Constraint */
+	IEEE80211_PARAM_GENREASSOC	= 32,	/* Generate a reassociation request */
+	IEEE80211_PARAM_COMPRESSION	= 33,	/* compression */
+	IEEE80211_PARAM_FF		= 34,	/* fast frames support  */
+	IEEE80211_PARAM_XR		= 35,	/* XR support */
+	IEEE80211_PARAM_BURST		= 36,	/* burst mode */
+	IEEE80211_PARAM_PUREG		= 37,	/* pure 11g (no 11b stations) */
+	IEEE80211_PARAM_REPEATER	= 38,	/* simultaneous AP and STA mode */
+	IEEE80211_PARAM_WDS		= 39,	/* Enable 4 address processing */
+	IEEE80211_PARAM_BGSCAN		= 40,	/* bg scanning (on, off) */
+	IEEE80211_PARAM_BGSCAN_IDLE	= 41,	/* bg scan idle threshold */
+	IEEE80211_PARAM_BGSCAN_INTERVAL	= 42,	/* bg scan interval */
+	IEEE80211_PARAM_MCAST_RATE	= 43,	/* Multicast Tx Rate */
+	IEEE80211_PARAM_COVERAGE_CLASS	= 44,	/* coverage class */
+	IEEE80211_PARAM_COUNTRY_IE	= 45,	/* enable country IE */
+	IEEE80211_PARAM_SCANVALID	= 46,	/* scan cache valid threshold */
+	IEEE80211_PARAM_ROAM_RSSI_11A	= 47,	/* rssi threshold in 11a */
+	IEEE80211_PARAM_ROAM_RSSI_11B	= 48,	/* rssi threshold in 11b */
+	IEEE80211_PARAM_ROAM_RSSI_11G	= 49,	/* rssi threshold in 11g */
+	IEEE80211_PARAM_ROAM_RATE_11A	= 50,	/* tx rate threshold in 11a */
+	IEEE80211_PARAM_ROAM_RATE_11B	= 51,	/* tx rate threshold in 11b */
+	IEEE80211_PARAM_ROAM_RATE_11G	= 52,	/* tx rate threshold in 11g */
+	IEEE80211_PARAM_UAPSDINFO	= 53,	/* value for qos info field */
+	IEEE80211_PARAM_SLEEP		= 54,	/* force sleep/wake */
+	IEEE80211_PARAM_QOSNULL		= 55,	/* force sleep/wake */
+	IEEE80211_PARAM_PSPOLL		= 56,	/* force ps-poll generation (sta only) */
+	IEEE80211_PARAM_EOSPDROP	= 57,	/* force uapsd EOSP drop (ap only) */
+	IEEE80211_PARAM_MARKDFS		= 58,	/* mark a dfs interference channel when found */
+	IEEE80211_PARAM_REGCLASS	= 59,	/* enable regclass ids in country IE */
+	IEEE80211_PARAM_DROPUNENC_EAPOL	= 60,	/* drop unencrypted eapol frames */
+	IEEE80211_PARAM_SHPREAMBLE	= 61,	/* Short Preamble */
+	IEEE80211_PARAM_FIXED_TX_RATE = 62,	/* Set fixed TX rate          */
+	IEEE80211_PARAM_MIMOMODE = 63,		/* Select antenna to use      */
+	IEEE80211_PARAM_AGGREGATION	= 64,	/* Enable/disable aggregation */
+	IEEE80211_PARAM_RETRY_COUNT = 65,	/* Set retry count            */
+	IEEE80211_PARAM_VAP_DBG    = 66,		/* Set the VAP debug verbosity . */
+	IEEE80211_PARAM_VCO_CALIB = 67,		/* Set VCO calibration */
+	IEEE80211_PARAM_EXP_MAT_SEL = 68,	/* Select different exp mat */
+	IEEE80211_PARAM_BW_SEL = 69,		/* Select BW */
+	IEEE80211_PARAM_RG = 70,			/* Let software fill in the duration update*/
+	IEEE80211_PARAM_BW_SEL_MUC = 71,	/* Let software fill in the duration update*/
+	IEEE80211_PARAM_ACK_POLICY = 72,	/* 1 for ACK, zero for no ACK */
+	IEEE80211_PARAM_LEGACY_MODE = 73,	/* 1 for legacy, zero for HT*/
+	IEEE80211_PARAM_MAX_AGG_SUBFRM = 74,	/* Maximum number if subframes to allow for aggregation */
+	IEEE80211_PARAM_ADD_WDS_MAC = 75,	/* Add MAC address for WDS peer */
+	IEEE80211_PARAM_DEL_WDS_MAC = 76,	/* Delete MAC address for WDS peer */
+	IEEE80211_PARAM_TXBF_CTRL = 77,		/* Control TX beamforming */
+	IEEE80211_PARAM_TXBF_PERIOD = 78,	/* Set TX beamforming period */
+	IEEE80211_PARAM_BSSID = 79,			/* Set BSSID */
+	IEEE80211_PARAM_HTBA_SEQ_CTRL = 80, /* Control HT Block ACK */
+	IEEE80211_PARAM_HTBA_SIZE_CTRL = 81, /* Control HT Block ACK */
+	IEEE80211_PARAM_HTBA_TIME_CTRL = 82, /* Control HT Block ACK */
+	IEEE80211_PARAM_HT_ADDBA = 83,		/* ADDBA control */
+	IEEE80211_PARAM_HT_DELBA = 84,		/* DELBA control */
+	IEEE80211_PARAM_CHANNEL_NOSCAN = 85, /* Disable the scanning for fixed channels */
+	IEEE80211_PARAM_MUC_PROFILE = 86,	/* Control MuC profiling */
+	IEEE80211_PARAM_MUC_PHY_STATS = 87,	/* Control MuC phy stats */
+	IEEE80211_PARAM_MUC_SET_PARTNUM = 88,	/* set muc part num for cal */
+	IEEE80211_PARAM_ENABLE_GAIN_ADAPT = 89,	/* turn on the anlg gain tuning */
+	IEEE80211_PARAM_GET_RFCHIP_ID = 90,	/* Get RF chip frequency id */
+	IEEE80211_PARAM_GET_RFCHIP_VERID = 91,	/* Get RF chip version id */
+	IEEE80211_PARAM_ADD_WDS_MAC_DOWN = 92,	/* Add MAC address for WDS downlink peer */
+	IEEE80211_PARAM_SHORT_GI = 93,		/* Set to 1 for turning on SGI */
+	IEEE80211_PARAM_LINK_LOSS = 94,		/* Set to 1 for turning on Link Loss feature */
+	IEEE80211_PARAM_BCN_MISS_THR = 95,	/* Set to 0 for default value (50 Beacons). */
+	IEEE80211_PARAM_FORCE_SMPS = 96,	/* Force the SMPS mode to transition the mode (STA) - includes
+						 * sending out the ACTION frame to the AP. */
+	IEEE80211_PARAM_FORCEMICERROR = 97,	/* Force a MIC error - does loopback through the MUC back up to QDRV thence
+						 * through the normal TKIP MIC error path. */
+	IEEE80211_PARAM_ENABLECOUNTERMEASURES = 98, /* Enable/disable countermeasures */
+	IEEE80211_PARAM_IMPLICITBA = 99,	/* Set the implicit BA flags in the QIE */
+	IEEE80211_PARAM_CLIENT_REMOVE = 100,	/* Remove clients but DON'T deauth them */
+	IEEE80211_PARAM_SHOWMEM = 101,		/* If debug build for MALLOC/FREE, show the summary view */
+	IEEE80211_PARAM_SCANSTATUS = 102,	/* Get scanning state */
+	IEEE80211_PARAM_GLOBAL_BA_CONTROL = 103, /* Set the global BA flags */
+	IEEE80211_PARAM_NO_SSID_ASSOC = 104,	/* Enable/disable associations without SSIDs */
+	IEEE80211_PARAM_FIXED_SGI = 105,	/* Choose between node based SGI or fixed SGI */
+	IEEE80211_PARAM_CONFIG_TXPOWER = 106,	/* configure TX power for a band (start chan to stop chan) */
+	IEEE80211_PARAM_SKB_LIST_MAX = 107,	/* Configure the max len of the skb list shared b/n drivers */
+	IEEE80211_PARAM_VAP_STATS = 108,		/* Show VAP stats */
+	IEEE80211_PARAM_RATE_CTRL_FLAGS = 109,  /* Configure flags to tweak rate control algorithm */
+	IEEE80211_PARAM_LDPC = 110, /* Enabling/disabling LDPC */
+	IEEE80211_PARAM_DFS_FAST_SWITCH = 111,  /* On detection of radar, select a non-DFS channel and switch immediately */
+	IEEE80211_PARAM_11N_40_ONLY_MODE = 112, /* Support for 11n 40MHZ only mode */
+	IEEE80211_PARAM_AMPDU_DENSITY = 113,	/* AMPDU DENSITY CONTROL */
+	IEEE80211_PARAM_SCAN_NO_DFS = 114,	/* On detection of radar, avoid DFS channels; AP only */
+	IEEE80211_PARAM_REGULATORY_REGION = 115, /* set the regulatory region */
+	IEEE80211_PARAM_CONFIG_BB_INTR_DO_SRESET = 116, /* enable or disable sw reset for BB interrupt */
+	IEEE80211_PARAM_CONFIG_MAC_INTR_DO_SRESET = 117, /* enable or disable sw reset for MAC interrupt */
+	IEEE80211_PARAM_CONFIG_WDG_DO_SRESET = 118, /* enable or disable sw reset triggered by watchdog */
+	IEEE80211_PARAM_TRIGGER_RESET = 119,	/* trigger reset for MAC/BB */
+	IEEE80211_PARAM_INJECT_INVALID_FCS = 120, /* inject bad FCS to induce tx hang */
+	IEEE80211_PARAM_CONFIG_WDG_SENSITIVITY = 121, /* higher value means less sensitive */
+	IEEE80211_PARAM_SAMPLE_RATE = 122,	/* Set data sampling rate */
+	IEEE80211_PARAM_MCS_CAP = 123,		/* Configure an MCS cap rate - for debugging */
+	IEEE80211_PARAM_MAX_MGMT_FRAMES = 124,	/* Max number of mgmt frames not complete */
+	IEEE80211_PARAM_MCS_ODD_EVEN = 125,	/* Configure the rate adapt algorithm to only use odd or even MCSs */
+	IEEE80211_PARAM_BLACKLIST_GET = 126,	/* List blacklisted stations. */
+	IEEE80211_PARAM_BA_MAX_WIN_SIZE = 128,  /* Maximum BA window size allowed on TX and RX */
+	IEEE80211_PARAM_RESTRICTED_MODE = 129,	/* Enable or disable restricted mode */
+	IEEE80211_PARAM_BB_MAC_RESET_MSGS = 130, /* Enable / disable display of BB amd MAC reset messages */
+	IEEE80211_PARAM_PHY_STATS_MODE = 131,	/* Mode for get_phy_stats */
+	IEEE80211_PARAM_BB_MAC_RESET_DONE_WAIT = 132, /* Set max wait for tx or rx before reset (secs) */
+	IEEE80211_PARAM_MIN_DWELL_TIME_ACTIVE = 133,  /* min dwell time for an active channel */
+	IEEE80211_PARAM_MIN_DWELL_TIME_PASSIVE = 134, /* min dwell time for a passive channel */
+	IEEE80211_PARAM_MAX_DWELL_TIME_ACTIVE = 135,  /* max dwell time for an active channel */
+	IEEE80211_PARAM_MAX_DWELL_TIME_PASSIVE = 136, /* max dwell time for a passive channel */
+	IEEE80211_PARAM_TX_AGG_TIMEOUT = 137, /* Configure timeout for TX aggregation */
+	IEEE80211_PARAM_LEGACY_RETRY_LIMIT = 138, /* Times to retry sending non-AMPDU packets (0-16) per rate */
+	IEEE80211_PARAM_TRAINING_COUNT = 139,	/* Training count for rate retry algorithm (QoS NULL to STAs after assoc) */
+	IEEE80211_PARAM_DYNAMIC_AC = 140,	/* Enable / disable dynamic 1 bit auto correlation algo */
+	IEEE80211_PARAM_DUMP_TRIGGER = 141,	/* Request immediate dump */
+	IEEE80211_PARAM_DUMP_TCM_FD = 142,	/* Dump TCM frame descriptors */
+	IEEE80211_PARAM_RXCSR_ERR_ALLOW = 143,	/* allow or disallow errors packets passed to MuC */
+	IEEE80211_PARAM_STOP_FLAGS = 144,	/* Alter flags where a debug halt would be performed on error conditions */
+	IEEE80211_PARAM_CHECK_FLAGS = 145,	/* Alter flags for additional runtime checks */
+	IEEE80211_PARAM_RX_CTRL_FILTER = 146,   /* Set the control packet filter on hal. */
+	IEEE80211_PARAM_SCS = 147,		/* ACI/CCI Detection and Mitigation*/
+	IEEE80211_PARAM_ALT_CHAN = 148,		/* set the chan to jump to if radar is detected */
+	IEEE80211_PARAM_QTN_BCM_WAR = 149, /* Workaround for BCM receiver not accepting last aggr */
+	IEEE80211_PARAM_GI_SELECT = 150,	/* Enable or disable dynamic GI selection */
+	IEEE80211_PARAM_RADAR_NONOCCUPY_PERIOD = 151,	/* Specify non-occupancy period for radar */
+	IEEE80211_PARAM_RADAR_NONOCCUPY_ACT_SCAN = 152,	/* non-occupancy expire scan/no-action */
+	IEEE80211_PARAM_MC_LEGACY_RATE = 153, /* Legacy multicast rate table */
+	IEEE80211_PARAM_LDPC_ALLOW_NON_QTN = 154, /* Allow non QTN nodes to use LDPC */
+	IEEE80211_PARAM_FWD_UNKNOWN_MC = 155,	/* forward unknown IP multicast */
+	IEEE80211_PARAM_BCST_4 = 156, /* Reliable (4 addr encapsulated) broadcast to all clients */
+	IEEE80211_PARAM_AP_FWD_LNCB = 157, /* AP forward LNCB packets from the STA to other STAs */
+	IEEE80211_PARAM_PPPC_SELECT = 158, /* Per packet power control */
+	IEEE80211_PARAM_TEST_LNCB = 159, /* Test LNCB code - leaks, drops etc. */
+	IEEE80211_PARAM_STBC = 160, /* Enabling/disabling STBC */
+	IEEE80211_PARAM_RTS_CTS = 161, /* Enabling/disabling RTS-CTS */
+	IEEE80211_PARAM_GET_DFS_CCE = 162,	/* Get most recent DFS Channel Change Event */
+	IEEE80211_PARAM_GET_SCS_CCE = 163,	/* Get most recent SCS (ACI/CCI) Channel Change Event */
+	IEEE80211_PARAM_GET_CH_INUSE = 164,	/* Enable printing of channels in Use at end of scan */
+	IEEE80211_PARAM_RX_AGG_TIMEOUT = 165,	/* RX aggregation timeout value (ms) */
+	IEEE80211_PARAM_FORCE_MUC_HALT = 166,	/* Force MUC halt debug code. */
+	IEEE80211_PARAM_FORCE_ENABLE_TRIGGERS= 167,	/* Enable trace triggers */
+	IEEE80211_PARAM_FORCE_MUC_TRACE = 168,	/* MuC trace force without halt */
+	IEEE80211_PARAM_BK_BITMAP_MODE = 169,   /* back bit map mode set */
+	IEEE80211_PARAM_UNUSED = 170,		/* Not in use anymore, can be reassigned */
+	IEEE80211_PARAM_MUC_FLAGS = 171,	/* MuC flags */
+	IEEE80211_PARAM_HT_NSS_CAP = 172,	/* Set max spatial streams for HT mode */
+	IEEE80211_PARAM_ASSOC_LIMIT = 173,	/* STA assoc limit */
+	IEEE80211_PARAM_PWR_ADJUST_SCANCNT = 174,	/* Enable power Adjust if nearby stations don't associate */
+	IEEE80211_PARAM_PWR_ADJUST = 175,	/* ioctl to adjust rx gain */
+	IEEE80211_PARAM_PWR_ADJUST_AUTO = 176,	/* Enable auto RX gain adjust when associated */
+	IEEE80211_PARAM_UNKNOWN_DEST_ARP = 177,	/* Send ARP requests for unknown destinations */
+	IEEE80211_PARAM_UNKNOWN_DEST_FWD = 178,	/* Send unknown dest pkt to all bridge STAs */
+	IEEE80211_PARAM_DBG_MODE_FLAGS = 179,	/* set/clear debug mode flags */
+	IEEE80211_PARAM_ASSOC_HISTORY = 180,	/* record of remote nodes that have associated by MAC address */
+	IEEE80211_PARAM_CSW_RECORD = 181,	/* get channel switch record data */
+	IEEE80211_PARAM_RESTRICT_RTS = 182,     /* HW xretry failures before switching to RTS mode */
+	IEEE80211_PARAM_RESTRICT_LIMIT = 183,   /* RTS xretry failures before starting restricted mode */
+	IEEE80211_PARAM_AP_ISOLATE = 184,	/* set ap isolation mode */
+	IEEE80211_PARAM_IOT_TWEAKS = 185,	/* mask to switch on / off IOT tweaks */
+	IEEE80211_PARAM_SWRETRY_AGG_MAX = 186,	/* max sw retries for ampdus */
+	IEEE80211_PARAM_SWRETRY_NOAGG_MAX = 187,/* max sw retries for non-agg mpdus */
+	IEEE80211_PARAM_BSS_ASSOC_LIMIT = 188, /* STA assoc limit for a VAP */
+	IEEE80211_PARAM_VSP_NOD_DEBUG = 190,	/* turn on/off NOD debugs for VSP */
+	IEEE80211_PARAM_CCA_PRI = 191,		/* Primary CCA threshold */
+	IEEE80211_PARAM_CCA_SEC = 192,		/* Secondary CCA threshold */
+	IEEE80211_PARAM_DYN_AGG_TIMEOUT = 193,	/* Enable feature which try to prevent unnecessary waiting of aggregate before sending */
+	IEEE80211_PARAM_HW_BONDING = 194,	/* HW bonding option */
+	IEEE80211_PARAM_PS_CMD = 195,		/* Command to enable, disable, etc probe select for matrices */
+	IEEE80211_PARAM_PWR_SAVE = 196,		/* Power save parameter ctrl */
+	IEEE80211_PARAM_DBG_FD = 197,		/* Debug FD alloc/free */
+	IEEE80211_PARAM_DISCONN_CNT = 198,	/* get count of disconnection event */
+	IEEE80211_PARAM_FAST_REASSOC = 199,	/* Do a fast reassociation */
+	IEEE80211_PARAM_SIFS_TIMING = 200,	/* SIFS timing */
+	IEEE80211_PARAM_TEST_TRAFFIC = 201,	/* Test Traffic start|stop control */
+	IEEE80211_PARAM_TX_AMSDU = 202,		/* Disable/enable AMSDU and/or Adaptive AMSDU for transmission to Quantenna clients */
+	IEEE80211_PARAM_SCS_DFS_REENTRY_REQUEST = 203,	/* DFS re-entry request from SCS */
+	IEEE80211_PARAM_QCAT_STATE = 204,	/* QCAT state information */
+	IEEE80211_PARAM_RALG_DBG = 205,		/* Rate adaptation debugging */
+	IEEE80211_PARAM_PPPC_STEP = 206,	/* PPPC step size control */
+	IEEE80211_PARAM_QTN_BGSCAN_DWELL_TIME_ACTIVE = 207,  /* Quantenna bgscan dwell time for an active channel */
+	IEEE80211_PARAM_QTN_BGSCAN_DWELL_TIME_PASSIVE = 208, /* Quantenna bgscan dwell time for a passive channel */
+	IEEE80211_PARAM_QTN_BGSCAN_DEBUG = 209,	/* Quantenna background scan debugging */
+	IEEE80211_PARAM_CONFIG_REGULATORY_TXPOWER = 210,	/* configure regulatory TX power for a band (start chan to stop chan) */
+	IEEE80211_PARAM_SINGLE_AGG_QUEUING = 211,	/* Queue only AMPDU fd at a time on a given tid till all sw retries are done */
+	IEEE80211_PARAM_CSA_FLAG = 212,         /* Channel switch announcement flag */
+	IEEE80211_PARAM_BR_IP_ADDR = 213,
+	IEEE80211_PARAM_REMAP_QOS = 214,	/* Command to enable, disable, qos remap feature, asked by customer */
+	IEEE80211_PARAM_DEF_MATRIX = 215,	/* Use default expansion matrices */
+	IEEE80211_PARAM_SCS_CCA_INTF = 216,	/* CCA interference for a channel */
+	IEEE80211_PARAM_CONFIG_TPC_INTERVAL = 217,	/* periodical tpc request interval */
+	IEEE80211_PARAM_TPC_QUERY = 218,	/* enable or disable tpc request periodically */
+	IEEE80211_PARAM_TPC = 219,		/* tpc feature enable/disable flag */
+	IEEE80211_PARAM_CACSTATUS = 220,	/* Get CAC status */
+	IEEE80211_PARAM_RTSTHRESHOLD = 221,	/* Get/Set RTS Threshold */
+	IEEE80211_PARAM_BA_THROT = 222,         /* Manual BA throttling */
+	IEEE80211_PARAM_TX_QUEUING_ALG = 223,	/* MuC TX queuing algorithm */
+	IEEE80211_PARAM_BEACON_ALLOW = 224,	/* To en/disable beacon rx when associated as STA*/
+	IEEE80211_PARAM_1BIT_PKT_DETECT = 225,  /* enable/disable 1bit pkt detection */
+	IEEE80211_PARAM_WME_THROT = 226,	/* Manual WME throttling */
+	IEEE80211_PARAM_ENABLE_11AC = 227,	/* Enable-disable 11AC feature in Topaz */
+	IEEE80211_PARAM_FIXED_11AC_TX_RATE = 228,	/* Set 11AC mcs */
+	IEEE80211_PARAM_GENPCAP = 229,		/* WMAC tx/rx pcap ring buffer */
+	IEEE80211_PARAM_CCA_DEBUG = 230,	/* Debug of CCA */
+	IEEE80211_PARAM_STA_DFS	= 231,		/* Enable or disable station DFS */
+	IEEE80211_PARAM_OCAC = 232,		/* Off-channel CAC */
+	IEEE80211_PARAM_CCA_STATS_PERIOD = 233,	/* the period for updating CCA stats in MuC */
+	IEEE80211_PARAM_RADAR_BW = 235,		/* Set radar filter mode */
+	IEEE80211_PARAM_TDLS_DISC_INT = 236,	/* Set TDLS discovery interval */
+	IEEE80211_PARAM_TDLS_PATH_SEL_WEIGHT = 237,	/* The weight of path selection algorithm, 0 means always to use TDLS link */
+	IEEE80211_PARAM_DAC_DBG = 238,		/* dynamic ac debug */
+	IEEE80211_PARAM_CARRIER_ID = 239,	/* Get/Set carrier ID */
+	IEEE80211_PARAM_SWRETRY_SUSPEND_XMIT = 240,	/* Max sw retries when sending frames is suspended */
+	IEEE80211_PARAM_DEACTIVE_CHAN_PRI = 241,/* Deactive channel as being used as primary channel */
+	IEEE80211_PARAM_RESTRICT_RATE = 242,	/* Packets per second sent when in Tx restrict mode */
+	IEEE80211_PARAM_AUC_RX_DBG = 243,	/* AuC rx debug command */
+	IEEE80211_PARAM_RX_ACCELERATE = 244,	/* Enable/Disable Topaz MuC rx accelerate */
+	IEEE80211_PARAM_RX_ACCEL_LOOKUP_SA = 245,	/* Enable/Disable lookup SA in FWT for rx accelerate */
+	IEEE80211_PARAM_TX_MAXMPDU = 246,		/* Set Max MPDU size to be supported */
+	/* FIXME 247 is obsolete and do not reuse */
+	IEEE80211_PARAM_SPECIFIC_SCAN = 249,	/* Just perform specific SSID scan */
+	/* FIXME 250 is obsolete and do not reuse */
+	IEEE80211_PARAM_TRAINING_START = 251,	/* restart rate training to a particular node */
+	IEEE80211_PARAM_AUC_TX_DBG = 252,	/* AuC tx debug command */
+	IEEE80211_PARAM_AC_INHERITANCE = 253,	/* promote AC_BE to use aggresive medium contention */
+	IEEE80211_PARAM_NODE_OPMODE = 254,	/* Set bandwidth and NSS used for a particular node */
+	IEEE80211_PARAM_TACMAP = 255,		/* Config TID AC and priority at TAC_MAP, debug only */
+	IEEE80211_PARAM_VAP_PRI = 256,		/* Config priority for VAP, used for TID priority at TAC_MAP */
+	IEEE80211_PARAM_AUC_QOS_SCH = 257,	/* Tune QoS scheduling in AuC */
+	IEEE80211_PARAM_TXBF_IOT = 258,         /* turn on/off TxBF IOT to non QTN node */
+	IEEE80211_PARAM_CONGEST_IDX = 259,	/* Current channel congestion index */
+	IEEE80211_PARAM_SPEC_COUNTRY_CODE = 260,	/* Set courntry code for EU region */
+	IEEE80211_PARAM_AC_Q2Q_INHERITANCE = 261,	/* promote AC_BE to use aggresive medium contention - Q2Q case */
+	IEEE80211_PARAM_1SS_AMSDU_SUPPORT = 262,	/* Enable-Disable AMSDU support for 1SS devies - phone and tablets */
+	IEEE80211_PARAM_VAP_PRI_WME = 263,	/* Automatic adjusting WME bss param based on VAP priority */
+	IEEE80211_PARAM_MICHAEL_ERR_CNT = 264,	/* total number of TKIP MIC errors */
+	IEEE80211_PARAM_DUMP_CONFIG_TXPOWER = 265,	/* Dump configured txpower for all channels */
+	IEEE80211_PARAM_EMI_POWER_SWITCHING = 266,	/* Enable/Disable EMI power switching */
+	IEEE80211_PARAM_CONFIG_BW_TXPOWER = 267,	/* Configure the TX powers different bandwidths */
+	IEEE80211_PARAM_SCAN_CANCEL = 268,		/* Cancel any ongoing scanning */
+	IEEE80211_PARAM_VHT_NSS_CAP = 269,	/* Set max spatial streams for VHT mode */
+	IEEE80211_PARAM_FIXED_BW = 270,		/* Configure fixed tx bandwidth without changing BSS bandwidth */
+	IEEE80211_PARAM_SFS = 271,		/* Smart Feature Select commands */
+	IEEE80211_PARAM_TUNEPD = 272,       /* Specify number of tunning packets to send for power detector tuning */
+	IEEE80211_PARAM_TUNEPD_DONE = 273,              /* Specify number of tunning packets to send for power detector tuning */
+	IEEE80211_PARAM_CONFIG_PMF = 274,       /* Enable/Disable 802.11w / PMF */
+	IEEE80211_PARAM_AUTO_CCA_ENABLE = 275,	/* Enable/disable auto-cca-threshold feature */
+	IEEE80211_PARAM_AUTO_CCA_PARAMS = 276,	/* Configure the threshold parameter  */
+	IEEE80211_PARAM_AUTO_CCA_DEBUG = 277,	/* Configure the auto-cca debug flag */
+	IEEE80211_PARAM_INTRA_BSS_ISOLATE = 278,/* Intra BSS isolation */
+	IEEE80211_PARAM_BSS_ISOLATE = 279,      /* BSS isolation */
+	IEEE80211_PARAM_BF_RX_STS = 280,	/* Set max BF sounding receive STS */
+	IEEE80211_PARAM_WOWLAN = 281,
+	IEEE80211_PARAM_WDS_MODE = 282,	/* WDS mode */
+	IEEE80211_PARAM_EXTENDER_ROLE = 283, /* EXTENDER Device role */
+	IEEE80211_PARAM_EXTENDER_MBS_BEST_RSSI = 284, /* MBS best rssi threshold */
+	IEEE80211_PARAM_EXTENDER_RBS_BEST_RSSI = 285, /* RBS best rssi threshold */
+	IEEE80211_PARAM_EXTENDER_MBS_WGT = 286, /* MBS RSSI weight */
+	IEEE80211_PARAM_EXTENDER_RBS_WGT = 287, /* RBS RSSI weight */
+	IEEE80211_PARAM_AIRFAIR = 288,              /* Set airtime fairness configuration */
+	/* FIXME 289 is obsolete and do not reuse */
+	IEEE80211_PARAM_RX_AMSDU_ENABLE = 290,      /* RX AMSDU: 0 - disable, 1 - enable, 2 - enable dynamically */
+	IEEE80211_PARAM_DISASSOC_REASON = 291,	/* Get Disassoc reason */
+	IEEE80211_PARAM_TX_QOS_SCHED = 292,	/* TX QoS hold-time table */
+	IEEE80211_PARAM_RX_AMSDU_THRESHOLD_CCA = 293,	/* The threshold of cca intf for dynamic RX AMSDU */
+	IEEE80211_PARAM_RX_AMSDU_THRESHOLD_PMBL = 294,	/* The threshold of pmbl error for dynamic RX AMSDU */
+	IEEE80211_PARAM_RX_AMSDU_PMBL_WF_SP = 295,	/* The weight factor of short preamble error for calculating the pmbl error */
+	IEEE80211_PARAM_RX_AMSDU_PMBL_WF_LP = 296,	/* The weight factor of long preamble error for calculating the pmbl error */
+	IEEE80211_PARAM_PEER_RTS_MODE = 297,		/* Mode setting for peer RTS */
+	IEEE80211_PARAM_DYN_WMM = 298,			/* Dynamic WMM enable */
+	IEEE80211_PARAM_BA_SETUP_ENABLE = 299,	/* enable the BA according the rssi threshold, 0 - disable, 1 - enable */
+	IEEE80211_PARAM_AGGRESSIVE_AGG = 300,	/* Compound aggressive agg params */
+	IEEE80211_PARAM_BB_PARAM = 301,	/* Baseband param */
+	IEEE80211_PARAM_VAP_TX_AMSDU = 302,     /* Enable/disable A-MSDU for VAP */
+	IEEE80211_PARAM_PC_OVERRIDE = 303,              /* RSSI based Power-contraint override */
+	IEEE80211_PARAM_NDPA_DUR = 304,         /* set vht NDPA duration field */
+	IEEE80211_PARAM_SU_TXBF_PKT_CNT = 305,  /* set the pkt cnt per txbf interval to fire SU sounding to a node */
+	IEEE80211_PARAM_MAX_AGG_SIZE = 306,	/* Maximum AMPDU size in bytes */
+	IEEE80211_PARAM_TQEW_DESCR_LIMIT = 307,     /* Set/Get tqew descriptors limit */
+	IEEE80211_PARAM_SCAN_TBL_LEN_MAX = 308,
+	IEEE80211_PARAM_CS_THRESHOLD = 309,	/* Carrier Sense threshold */
+	IEEE80211_PARAM_TDLS_PROHIBIT_PATH_SEL = 310,	/* Enable/Disable TDLS path selection */
+	IEEE80211_PARAM_TDLS_MODE = 311,	/* TDLS path select mode */
+	IEEE80211_PARAM_TDLS_STATUS = 312,	/* TDLS status, 0 disable, 1 enable */
+	IEEE80211_PARAM_TDLS_TIMEOUT_TIME = 313,
+	IEEE80211_PARAM_TDLS_TRAINING_PKT_CNT = 314,	/* TDLS training packet count */
+	IEEE80211_PARAM_TDLS_PATH_SEL_PPS_THRSHLD = 315,	/* TDLS path select packet per second threshold */
+	IEEE80211_PARAM_TDLS_PATH_SEL_RATE_THRSHLD = 316,	/* TDLS path select rate threshold */
+	IEEE80211_PARAM_TDLS_VERBOSE = 317,	/* TDLS debug info level */
+	IEEE80211_PARAM_TDLS_MIN_RSSI = 318,	/* TDLS mininum valid RSSI */
+	IEEE80211_PARAM_TDLS_SWITCH_INTS = 319,	/* TDLS switch intervals */
+	IEEE80211_PARAM_TDLS_RATE_WEIGHT = 320,	/* TDLS accumulated rate weight */
+	IEEE80211_PARAM_TDLS_UAPSD_INDICAT_WND = 321,	/* TDLS path select rate threshold */
+	IEEE80211_PARAM_TDLS_CS_PROHIBIT = 322,	/* Prohibit TDLS channel switch */
+	IEEE80211_PARAM_TDLS_CS_MODE = 323,	/* Set TDLS channel switch mode */
+	IEEE80211_PARAM_TDLS_OFF_CHAN = 324,	/* TDLS off channel */
+	IEEE80211_PARAM_TDLS_OFF_CHAN_BW = 325,	/* TDLS off channel bandwidth */
+	IEEE80211_PARAM_TDLS_NODE_LIFE_CYCLE = 326, /* TDLS node life cycle */
+	IEEE80211_PARAM_NODEREF_DBG = 327,	/* show history of node reference debug info */
+	IEEE80211_PARAM_SWFEAT_DISABLE = 329,	/* disable an optional software feature */
+	IEEE80211_PARAM_11N_AMSDU_CTRL = 330,   /* ctrl TX AMSDU of IP ctrl packets for 11N STAs */
+	IEEE80211_PARAM_CCA_FIXED = 331,
+	IEEE80211_PARAM_CCA_SEC40 = 332,
+	IEEE80211_PARAM_CS_THRESHOLD_DBM = 333,
+	IEEE80211_PARAM_EXTENDER_VERBOSE = 334, /* EXTENDER Debug Level */
+	IEEE80211_PARAM_FLUSH_SCAN_ENTRY = 335,	/* Flush scan entry */
+	IEEE80211_PARAM_SCAN_OPCHAN = 336,	/* Scan operating channel periodically in STA mode */
+	IEEE80211_PARAM_DUMP_PPPC_TX_SCALE_BASES = 337,	/* Dump the current PPPC tx scale bases */
+	IEEE80211_PARAM_VHT_OPMODE_BW = 338, /* Controls peer transmitter's BW */
+	IEEE80211_PARAM_HS2 = 339,		/* Enable/Disable HS2.0 */
+	IEEE80211_PARAM_DGAF_CONTROL = 340,	/* Downstream Group-Addressed Forwarding (DGAF) */
+	IEEE80211_PARAM_PROXY_ARP = 341,        /* Proxy ARP */
+	IEEE80211_PARAM_GLOBAL_FIXED_TX_SCALE_INDEX = 342,	/* Set global fixed tx scale index, regardless pppc probe index and tx scale bases */
+	IEEE80211_PARAM_RATE_TRAIN_DBG = 343,			/* Rate training */
+	IEEE80211_PARAM_NDPA_LEGACY_FORMAT = 344,	/* Configure PHY format for NDPA frame */
+	IEEE80211_PARAM_QTN_HAL_PM_CORRUPT_DEBUG = 345,	/* flag to enable debug qtn packet memory corruption */
+	IEEE80211_PARAM_UPDATE_MU_GRP = 346,	/* Update MU group/position */
+	IEEE80211_PARAM_FIXED_11AC_MU_TX_RATE = 347,	/* Set 11AC MU fixed mcs */
+	IEEE80211_PARAM_MU_DEBUG_LEVEL = 348,	/* Set 11AC MU debug level */
+	IEEE80211_PARAM_MU_ENABLE = 349,	/* Enable/disable MU transmission */
+	IEEE80211_PARAM_INST_MU_GRP_QMAT = 350,	/* Install qmat for mu group */
+	IEEE80211_PARAM_DELE_MU_GRP_QMAT = 351,	/* Delete/disable qmat in mu group */
+	IEEE80211_PARAM_GET_MU_GRP = 352,	/* Retrieve MU group and Q matrix info */
+	IEEE80211_PARAM_EN_MU_GRP_QMAT = 353,	/* Enable qmat in mu group */
+	IEEE80211_PARAM_MU_DEBUG_FLAG = 354,	/* Set or clear MU debug flag */
+	IEEE80211_PARAM_DSP_DEBUG_LEVEL = 355,	/* DSP debug verbocity level */
+	IEEE80211_PARAM_DSP_DEBUG_FLAG = 356,	/* Set DSP debug flag */
+	IEEE80211_PARAM_SET_CRC_ERR = 357,	/* Enables/disables CRC error to be passed to packet memory*/
+	IEEE80211_PARAM_MU_SWITCH_USR_POS = 358, /* Switch MU user_pos for debugging MU interference */
+	IEEE80211_PARAM_SET_GRP_SND_PERIOD = 359, /* Sets group select sounding period */
+	IEEE80211_PARAM_SET_PREC_SND_PERIOD = 360, /* Sets precoding sounding period */
+	IEEE80211_PARAM_INST_1SS_DEF_MAT_ENABLE = 361,		/* Enable install 1ss default matrix feature */
+	IEEE80211_PARAM_INST_1SS_DEF_MAT_THRESHOLD = 362,	/* Configure the threshold for install 1ss default matrix */
+	IEEE80211_PARAM_SCAN_RESULTS_CHECK_INV = 363,	/* interval to check scan results */
+	IEEE80211_PARAM_TDLS_OVER_QHOP_ENABLE = 364,	/* Enable TDLS over qhop */
+	IEEE80211_PARAM_DSP_PRECODING_ALGORITHM = 365, /*select precoding algorithm, projection(1) or BD(2)*/
+	IEEE80211_PARAM_DSP_RANKING_ALGORITHM = 366, /*select ranking algorithm, projection(1) or BD(2)*/
+	IEEE80211_PARAM_DIS_MU_GRP_QMAT = 367, /* Disable QMat for MU group */
+	IEEE80211_PARAM_GET_MU_GRP_QMAT = 368, /* Get QMat status */
+	IEEE80211_PARAM_MU_USE_EQ = 369, /* Equalizer status */
+	IEEE80211_PARAM_INITIATE_TXPOWER_TABLE = 370,	/* Initiate TX power table for a band with one single value */
+	IEEE80211_PARAM_L2_EXT_FILTER = 371,        /* External L2 Filter */
+	IEEE80211_PARAM_L2_EXT_FILTER_PORT = 372,        /* External L2 Filter port */
+	IEEE80211_PARAM_MU_AIRTIME_PADDING = 373,	/* Airtime padding for MU/SU Tx decision */
+	IEEE80211_PARAM_MU_AMSDU_SIZE = 374,        /* Set Fixed MU AMSDU size */
+	IEEE80211_PARAM_SDFS = 375,		/* Seamless DFS, same as PARAM_OCAC */
+	IEEE80211_PARAM_DSP_MU_RANK_CRITERIA = 376, /* select mu ranking criteria */
+	IEEE80211_PARAM_ENABLE_RX_OPTIM_STATS = 378,        /* Enable RX optim stats */
+	IEEE80211_PARAM_SET_UNICAST_QUEUE_NUM = 379,     /* Set Max congest queue num for unicast */
+	IEEE80211_PARAM_MRC_ENABLE = 380,        /* Set Management Frame Rate Control feature */
+	IEEE80211_PARAM_VCO_LOCK_DETECT_MODE = 381,	/* Get/Set lock detect functionality enabled/disabled */
+	IEEE80211_PARAM_OBSS_EXEMPT_REQ = 382,  /* OBSS scan exemption request*/
+	IEEE80211_PARAM_OBSS_TRIGG_SCAN_INT = 383,  /* OBSS scan exemption request*/
+	IEEE80211_PARAM_PREF_BAND = 384,	/* Preferred band on dual band mode */
+	IEEE80211_PARAM_BW_2_4GHZ = 385,	/* Bandwidth in 2.4ghz band */
+	IEEE80211_PARAM_ALLOW_VHT_TKIP = 386,	/* allow VHT even only TKIP is set as cipher, for WFA testbed */
+	IEEE80211_PARAM_AUTO_CS_ENABLE = 387,	/* Enable/disable auto-cs-threshold feature */
+	IEEE80211_PARAM_AUTO_CS_PARAMS = 388,	/* Configure the threshold parameter  */
+	IEEE80211_PARAM_QTN_BGSCAN_DURATION_ACTIVE = 389,  /* Quantenna bgscan duration for an active channel */
+	IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_FAST = 390, /* Quantenna bgscan duration for a passive channel */
+	IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_NORMAL = 391, /* Quantenna bgscan duration for a passive channel */
+	IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_SLOW = 392, /* Quantenna bgscan duration for a passive channel */
+	IEEE80211_PARAM_QTN_BGSCAN_THRSHLD_PASSIVE_FAST = 393, /* Quantenna bgscan fat threshold for passive fast mode */
+	IEEE80211_PARAM_QTN_BGSCAN_THRSHLD_PASSIVE_NORMAL = 394, /* Quantenna bgscan fat threshold for passive normal mode */
+	IEEE80211_PARAM_QTN_BLOCK_BSS = 395, /* Block any association request for specified BSS */
+	IEEE80211_PARAM_VHT_2_4GHZ = 396,	/* Quantenna 2.4G band feature -- VHT support */
+	IEEE80211_PARAM_PHY_MODE = 397,		/* Hardware phy mode */
+	IEEE80211_PARAM_BEACONING_SCHEME = 398,	/* the mapping between 8 VAPs and 4 HW event queues for beacon */
+	IEEE80211_PARAM_STA_BMPS = 399,	/* enable/disable STA BMPS */
+	IEEE80211_PARAM_40MHZ_INTOLERANT = 400,	/* 20/40 coexistence - 40 MHz intolerant */
+	IEEE80211_PARAM_ANTENNA_USAGE = 401,	/* how many antennas should be used */
+	IEEE80211_PARAM_DISABLE_TX_BA = 402,	/* enable/disable TX Block Ack establishment */
+	IEEE80211_PARAM_DECLINE_RX_BA = 403,	/* permit/decline RX Block Ack establishment */
+	IEEE80211_PARAM_VAP_STATE = 404,	/* Enable or disable a VAP */
+	IEEE80211_PARAM_TX_AIRTIME_CONTROL = 405, /* start or stop tx airtime accumulaton */
+	IEEE80211_PARAM_OSEN = 406,
+	IEEE80211_PARAM_OBSS_SCAN = 407,	/* Enable or disable OBSS scan */
+	IEEE80211_PARAM_SHORT_SLOT = 408,	/* short slot */
+	IEEE80211_PARAM_SET_RTS_BW_DYN = 409,   /* set RTS bw signal bw and dynamic flag */
+	IEEE80211_PARAM_SET_CTS_BW = 410,   /* force the CTS BW by setting secondary 20/40 channel CCA busy */
+	IEEE80211_PARAM_VHT_MCS_CAP = 411,	/* Set MCS capability for VHT mode, for WFA testbed */
+	IEEE80211_PARAM_VHT_OPMODE_NOTIF = 412,	/* Override OpMode Notification IE, for WFA testbed */
+	IEEE80211_PARAM_FIRST_STA_IN_MU_SOUNDING = 413, /* select STA which will be first in mu sounding */
+	IEEE80211_PARAM_USE_NON_HT_DUPLICATE_MU = 414, /* Allows usage Non-HT duplicate for MU NDPA and Report_Poll using BW signal TA */
+	IEEE80211_PARAM_BG_PROTECT = 415,	/* 802.11g protection */
+	IEEE80211_PARAM_SET_MUC_BW = 416,	/* Set muc bandwidth */
+	IEEE80211_PARAM_11N_PROTECT = 417,	/* 802.11n protection */
+	IEEE80211_PARAM_SET_MU_RANK_TOLERANCE = 418, /* MU rank tolerance */
+	IEEE80211_PARAM_MU_NDPA_BW_SIGNALING_SUPPORT = 420, /* support of receiving NDPA with bandwidth signalling TA */
+	IEEE80211_PARAM_RESTRICT_WLAN_IP = 421,	/* Block all IP packets from wifi to bridge interfaces */
+	IEEE80211_PARAM_MC_TO_UC = 422,		/* Convert L2 multicast to unicast */
+	IEEE80211_PARAM_ENABLE_BC_IOT_WAR = 423,	/* allow STS to 4 in beacon when disabled */
+	IEEE80211_PARAM_HOSTAP_STARTED = 424,   /* hostapd state */
+	IEEE80211_PARAM_WPA_STARTED = 425,	/* wpa_supplicant state */
+	IEEE80211_PARAM_MUC_SYS_DEBUG = 427, /* system debug */
+	IEEE80211_PARAM_EP_STATUS = 428,	/* get the EP STATUS */
+	IEEE80211_PARAM_EXTENDER_MBS_RSSI_MARGIN = 429,	/* MBS RSSI margin */
+	IEEE80211_PARAM_MAX_BCAST_PPS = 430,	/* Restrict the number of broadcast pkts allowed to be processed per second */
+	IEEE80211_PARAM_OFF_CHAN_SUSPEND = 431,	/* suspend/resume all off-channel mechanisms globally */
+	IEEE80211_PARAM_BSS_GROUP_ID = 432,	/* Assigns VAP (SSID) a logical group id */
+	IEEE80211_PARAM_BSS_ASSOC_RESERVE = 433,	/* Reserve associations for specified group */
+	IEEE80211_PARAM_MAX_BOOT_CAC_DURATION = 434,	/* Max boot CAC duration in seconds */
+	IEEE80211_PARAM_RX_BAR_SYNC = 435,	/* sync rx reorder window on receiving BAR */
+	IEEE80211_PARAM_GET_REG_DOMAIN_IS_EU = 436,	/* Check if regulatory region falls under EU domain*/
+	IEEE80211_PARAM_AUC_TX_AGG_DURATION = 437,
+	IEEE80211_PARAM_GET_CHAN_AVAILABILITY_STATUS = 438, /* Channel availability status */
+	IEEE80211_PARAM_STOP_ICAC = 439,
+	IEEE80211_PARAM_STA_DFS_STRICT_MODE = 440,	/* STA DFS - strict mode operation */
+	IEEE80211_PARAM_STA_DFS_STRICT_MEASUREMENT_IN_CAC = 441, /* STA DFS - Send Measurement report if radar found during CAC */
+	IEEE80211_PARAM_STA_DFS_STRICT_TX_CHAN_CLOSE_TIME = 442, /*  STA DFS - Configure channel tx close time when radar detected */
+	IEEE80211_PARAM_NEIGHBORHOOD_THRSHD = 443, /* Set the threshold for neighborhood density type */
+	IEEE80211_PARAM_NEIGHBORHOOD_TYPE = 444, /* Get the neighborhood density type */
+	IEEE80211_PARAM_NEIGHBORHOOD_COUNT = 445,/* Get the neighbor count */
+	IEEE80211_PARAM_MU_TXBF_PKT_CNT = 446, /* set the pkt cnt per txbf interval to fire mu sounding to a node */
+	IEEE80211_PARAM_DFS_CSA_CNT = 447,	/* set CSA count for reason of IEEE80211_CSW_REASON_DFS */
+	IEEE80211_PARAM_IS_WEATHER_CHANNEL = 448, /* check if it's a weather channel */
+	IEEE80211_PARAM_COEX_20_40_SUPPORT = 449, /* Eable/Disable 20/40 bss coexistence */
+	IEEE80211_PARAM_MIN_CAC_PERIOD = 450,	/* Get Min CAC period used by WifiStack, Used only for ICAC sanity checks */
+	IEEE80211_PARAM_DEVICE_MODE = 451,	/* device mode, e.g., MBS, RBS, Repeater */
+	IEEE80211_PARAM_SYNC_CONFIG = 452,	/* Master device synchronizes BSS config with slave devices */
+	IEEE80211_PARAM_AUTOCHAN_DBG_LEVEL = 456, /* set/get debug level of channel selection */
+	IEEE80211_PARAM_NAC_MONITOR_MODE = 457,    /* non associated clients monitoring mode */
+	IEEE80211_PARAM_GET_CCA_STATS = 458, /* get CCA stats */
+	IEEE80211_PARAM_OPMODE_BW_SW_EN = 459, /* enable/disable dynamic peer BW using opmode action */
+	IEEE80211_PARAM_MAX_DEVICE_BW = 460,	/* set/get the maximum supported bandwidth */
+	IEEE80211_PARAM_BW_AUTO_SELECT = 461,	/* enable/disable bandwidth automatic selection */
+	IEEE80211_PARAM_DFS_CHANS_AVAILABLE = 462, /* Check if atleast one valid DFS channel is available */
+	IEEE80211_PARAM_DYNAMIC_SIFS_TIMING = 463, /* set/get SIFS timing */
+	IEEE80211_PARAM_CUR_CHAN_CHECK_REQUIRED = 464, /* Switch to check whether current channel check is required */
+	IEEE80211_PARAM_IGNORE_ICAC_SELECTION = 465, /*  Ignore ICAC selection */
+	IEEE80211_PARAM_RBS_MBS_ALLOW_TX_FRMS_IN_CAC = 466, /* Allow QHOP report frame Tx while RBS is performing CAC */
+	IEEE80211_PARAM_DFS_CHANS_AVAILABLE_FOR_DFS_REENTRY = 467, /* Check if atleast one valid DFS channel is available */
+	IEEE80211_PARAM_RBS_DFS_TX_CHAN_CLOSE_TIME = 468, /*  RBS DFS - Configure channel tx close time when radar detected */
+	IEEE80211_PARAM_AUTOCHAN_CCI_INSTNT = 469,	/* set/get auto-chan mechanism cci_instnt factor */
+	IEEE80211_PARAM_AUTOCHAN_ACI_INSTNT = 470,	/* set/get auto-chan mechanism aci_instnt factor */
+	IEEE80211_PARAM_AUTOCHAN_CCI_LONGTERM = 471,	/* set/get auto-chan mechanism cci_longterm factor */
+	IEEE80211_PARAM_AUTOCHAN_ACI_LONGTERM = 472,	/* set/get auto-chan mechanism aci_longterm factor */
+	IEEE80211_PARAM_AUTOCHAN_RANGE_COST = 473,	/* set/get auto-chan mechanism range_cost factor */
+	IEEE80211_PARAM_AUTOCHAN_DFS_COST = 474,	/* set/get auto-chan mechanism dfs_cost factor */
+	IEEE80211_PARAM_AUTOCHAN_MIN_CCI_RSSI = 475,	/* set/get auto-chan mechanism min_cochan_rssi factor */
+	IEEE80211_PARAM_AUTOCHAN_MAXBW_MINBENEFIT = 476,	/* set/get auto-chan mechanism maxbw_minbenefit factor */
+	IEEE80211_PARAM_AUTOCHAN_DENSE_CCI_SPAN = 477,	/* set/get auto-chan mechanism dense_cci_span factor */
+	IEEE80211_PARAM_WEATHERCHAN_CAC_ALLOWED = 478, /* control whether weather channels CAC is allowed or not */
+	IEEE80211_PARAM_BEACON_HANG_TIMEOUT = 479,	/* Software beacon hang checking timeout, in ms */
+	IEEE80211_PARAM_QTN_OPTI_MODE = 480, /* QTN opti mode enable */
+	IEEE80211_PARAM_VOPT = 481,	/* enable/disable V optimization */
+	IEEE80211_PARAM_VMODE = 482,		/* disable/enable v test mode */
+	IEEE80211_PARAM_BB_DEAFNESS_WAR_EN = 483, /* control whether WAR for BB deafness fast recovery is enabled or not */
+	IEEE80211_PARAM_VAP_TX_AMSDU_11N = 484,     /* Enable/disable A-MSDU for 11n nodes */
+	IEEE80211_PARAM_REJECT_AUTH = 487,		/* QFDR: reject authentication requests */
+	IEEE80211_PARAM_SCAN_ONLY_FREQ = 488,		/* QFDR: trigger several following scans only for specific frequency */
+	IEEE80211_PARAM_FIX_LEGACY_RATE = 490,		/* Set fixed legacy rate */
+	IEEE80211_PARAM_COC_MOVE_TO_NONDFS_CHANNEL = 491, /* Accept/Reject COC mode when operating in DFS channel */
+	IEEE80211_PARAM_80211K_NEIGH_REPORT = 492,	/* 802.11k - neighbor report */
+	IEEE80211_PARAM_80211V_BTM = 493,
+	IEEE80211_PARAM_MOBILITY_DOMAIN = 494,		/* Mobility domain */
+	IEEE80211_PARAM_FT_OVER_DS = 495,		/* FT over DS - 802.11r */
+	IEEE80211_PARAM_SHORT_RETRY_LIMIT = 496,	/* Set the short retry limits of the frame whose size is smaller than or equal to the RTS threshhold */
+	IEEE80211_PARAM_LONG_RETRY_LIMIT = 497,	/* Set the long retry limits of the frame whose size is bigger than the RTS threshhold */
+	IEEE80211_PARAM_SET_DUP_RTS = 498, /* enable/disable dup-RTS bw signal and MAC address group bit */
+};
+
+#define IEEE80211_OCAC_AUTO_WITH_FIRST_DFS_CHAN 0x8000
+
+#define IEEE80211_OFFCHAN_SUSPEND_MASK		0x80000000
+#define IEEE80211_OFFCHAN_SUSPEND_MASK_S	31
+#define IEEE80211_OFFCHAN_TIMEOUT_MASK		0x7FFFFFFF
+#define IEEE80211_OFFCHAN_TIMEOUT_DEFAULT	1 /* second*/
+#define IEEE80211_OFFCHAN_TIMEOUT_MAX		60 /* second*/
+#define IEEE80211_OFFCHAN_TIMEOUT_MIN		1 /* second*/
+#define IEEE80211_OFFCHAN_TIMEOUT_AUTH		5 /* second*/
+#define IEEE80211_OFFCHAN_TIMEOUT_EAPOL		8 /* second*/
+
+#define	SIOCG80211STATS			(SIOCDEVPRIVATE+2)
+/* NB: require in+out parameters so cannot use wireless extensions, yech */
+#define	IEEE80211_IOCTL_GETKEY		(SIOCDEVPRIVATE+3)
+#define	IEEE80211_IOCTL_GETWPAIE	(SIOCDEVPRIVATE+4)
+#define	IEEE80211_IOCTL_STA_STATS	(SIOCDEVPRIVATE+5)
+#define	IEEE80211_IOCTL_STA_INFO	(SIOCDEVPRIVATE+6)
+#define	SIOC80211IFCREATE		(SIOCDEVPRIVATE+7)
+#define	SIOC80211IFDESTROY		(SIOCDEVPRIVATE+8)
+#define	IEEE80211_IOCTL_SCAN_RESULTS	(SIOCDEVPRIVATE+9)
+#define SIOCR80211STATS                 (SIOCDEVPRIVATE+0xA) /* This define always has to sync up with SIOCRDEVSTATS in /linux/sockios.h */
+#define IEEE80211_IOCTL_GET_ASSOC_TBL	(SIOCDEVPRIVATE+0xB)
+#define IEEE80211_IOCTL_GET_RATES	(SIOCDEVPRIVATE+0xC)
+#define IEEE80211_IOCTL_SET_RATES	(SIOCDEVPRIVATE+0xD)
+#define IEEE80211_IOCTL_EXT		(SIOCDEVPRIVATE+0xF) /* This command is used to support sub-ioctls */
+
+/*
+ * ioctl command IEEE80211_IOCTL_EXT is used to support sub-ioctls.
+ * The following lists the sub-ioctl numbers
+ *
+ */
+#define SIOCDEV_SUBIO_BASE		(0)
+#define SIOCDEV_SUBIO_RST_QUEUE		(SIOCDEV_SUBIO_BASE + 1)
+#define SIOCDEV_SUBIO_RADAR_STATUS	(SIOCDEV_SUBIO_BASE + 2)
+#define SIOCDEV_SUBIO_GET_PHY_STATS	(SIOCDEV_SUBIO_BASE + 3)
+#define SIOCDEV_SUBIO_DISCONN_INFO	(SIOCDEV_SUBIO_BASE + 4)
+#define SIOCDEV_SUBIO_SET_BRCM_IOCTL	(SIOCDEV_SUBIO_BASE + 5)
+#define SIOCDEV_SUBIO_SCS	        (SIOCDEV_SUBIO_BASE + 6)
+#define SIOCDEV_SUBIO_SET_SOC_ADDR_IOCTL	(SIOCDEV_SUBIO_BASE + 7) /* Command to set the SOC addr of the STB to VAP for recording */
+#define SIOCDEV_SUBIO_SET_TDLS_OPER	(SIOCDEV_SUBIO_BASE + 8)	/* Set TDLS Operation */
+#define SIOCDEV_SUBIO_WAIT_SCAN_TIMEOUT	(SIOCDEV_SUBIO_BASE + 9)
+#define SIOCDEV_SUBIO_AP_SCAN_RESULTS	(SIOCDEV_SUBIO_BASE + 10)
+#define SIOCDEV_SUBIO_GET_11H_11K_NODE_INFO	(SIOCDEV_SUBIO_BASE + 11)
+#define SIOCDEV_SUBIO_GET_DSCP2AC_MAP	(SIOCDEV_SUBIO_BASE + 12)
+#define SIOCDEV_SUBIO_SET_DSCP2AC_MAP	(SIOCDEV_SUBIO_BASE + 13)
+#define SIOCDEV_SUBIO_SET_MARK_DFS_CHAN	(SIOCDEV_SUBIO_BASE + 14)
+#define SIOCDEV_SUBIO_WOWLAN		(SIOCDEV_SUBIO_BASE + 15)
+#define SIOCDEV_SUBIO_GET_STA_AUTH	(SIOCDEV_SUBIO_BASE + 16)
+#define SIOCDEV_SUBIO_GET_STA_VENDOR	(SIOCDEV_SUBIO_BASE + 17)
+#define SIOCDEV_SUBIO_GET_STA_TPUT_CAPS	(SIOCDEV_SUBIO_BASE + 18)
+#define SIOCDEV_SUBIO_GET_SWFEAT_MAP	(SIOCDEV_SUBIO_BASE + 19)
+#define SIOCDEV_SUBIO_DI_DFS_CHANNELS	(SIOCDEV_SUBIO_BASE + 20) /* Deactive DFS channels */
+#define SIOCDEV_SUBIO_SET_ACTIVE_CHANNEL_LIST (SIOCDEV_SUBIO_BASE + 21)
+#define SIOCDEV_SUBIO_PRINT_SWFEAT_MAP	(SIOCDEV_SUBIO_BASE + 22)
+#define SIOCDEV_SUBIO_SEND_ACTION_FRAME (SIOCDEV_SUBIO_BASE + 23)
+#define SIOCDEV_SUBIO_GET_DRIVER_CAPABILITY (SIOCDEV_SUBIO_BASE + 24)
+#define SIOCDEV_SUBIO_SET_AP_INFO	(SIOCDEV_SUBIO_BASE + 25)
+#define SIOCDEV_SUBIO_GET_LINK_QUALITY_MAX	(SIOCDEV_SUBIO_BASE + 26)
+#define SIOCDEV_SUBIO_SET_CHANNEL_POWER_TABLE	(SIOCDEV_SUBIO_BASE + 27)
+#define SIOCDEV_SUBIO_SET_WEATHER_CHAN	(SIOCDEV_SUBIO_BASE + 28)
+#define SIOCDEV_SUBIO_GET_CHANNEL_POWER_TABLE	(SIOCDEV_SUBIO_BASE + 29)
+#define SIOCDEV_SUBIO_SETGET_CHAN_DISABLED	(SIOCDEV_SUBIO_BASE + 30)
+#define SIOCDEV_SUBIO_SET_SEC_CHAN		(SIOCDEV_SUBIO_BASE + 31)
+#define SIOCDEV_SUBIO_GET_SEC_CHAN		(SIOCDEV_SUBIO_BASE + 32)
+#define SIOCDEV_SUBIO_SET_DSCP2TID_MAP		(SIOCDEV_SUBIO_BASE + 33)
+#define SIOCDEV_SUBIO_GET_DSCP2TID_MAP		(SIOCDEV_SUBIO_BASE + 34)
+#define SIOCDEV_SUBIO_GET_TX_AIRTIME		(SIOCDEV_SUBIO_BASE + 35)
+#define SIOCDEV_SUBIO_GET_CHAN_PRI_INACT	(SIOCDEV_SUBIO_BASE + 36)
+#define SIOCDEV_SUBIO_GET_SUPP_CHAN		(SIOCDEV_SUBIO_BASE + 37)
+#define SIOCDEV_SUBIO_GET_CLIENT_MACS		(SIOCDEV_SUBIO_BASE + 38)
+#define SIOCDEV_SUBIO_SAMPLE_ALL_DATA		(SIOCDEV_SUBIO_BASE + 39)
+#define SIOCDEV_SUBIO_GET_ASSOC_DATA		(SIOCDEV_SUBIO_BASE + 40)
+#define SIOCDEV_SUBIO_GET_INTERFACE_WMMAC_STATS	(SIOCDEV_SUBIO_BASE + 41)
+#define SIOCDEV_SUBIO_GET_NAC_STATS		(SIOCDEV_SUBIO_BASE + 42)
+#define SIOCDEV_SUBIO_GET_FREQ_RANGE		(SIOCDEV_SUBIO_BASE + 43)
+#define SIOCDEV_SUBIO_SET_MAC_ADDR_ACL		(SIOCDEV_SUBIO_BASE + 44)
+#define SIOCDEV_SUBIO_SET_FT_AUTH_RESP		(SIOCDEV_SUBIO_BASE + 45)
+#define SIOCDEV_SUBIO_SET_FT_ASSOC_RESP		(SIOCDEV_SUBIO_BASE + 46)
+#define SIOCDEV_SUBIO_SET_FT_REASSOC_RESP	(SIOCDEV_SUBIO_BASE + 47)
+#define SIOCDEV_SUBIO_SET_FT_ADD_NODE		(SIOCDEV_SUBIO_BASE + 48)
+#define SIOCDEV_SUBIO_GET_CCA_STATS		(SIOCDEV_SUBIO_BASE + 49)
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+#define SIOCDEV_SUBIO_SET_BSA_STATUS		(SIOCDEV_SUBIO_BASE + 50)
+#define SIOCDEV_SUBIO_GET_BSA_INTF_INFO		(SIOCDEV_SUBIO_BASE + 51)
+#define SIOCDEV_SUBIO_SET_BSA_MAC_FILTER_POLICY (SIOCDEV_SUBIO_BASE + 52)
+#define SIOCDEV_SUBIO_UPDATE_MACFILTER_LIST	(SIOCDEV_SUBIO_BASE + 53)
+#define SIOCDEV_SUBIO_GET_BSA_FAT_INFO		(SIOCDEV_SUBIO_BASE + 54)
+#define SIOCDEV_SUBIO_GET_BSA_STA_STATS		(SIOCDEV_SUBIO_BASE + 55)
+#define SIOCDEV_SUBIO_GET_BSA_ASSOC_STA_STATS	(SIOCDEV_SUBIO_BASE + 56)
+#define SIOCDEV_SUBIO_SEND_BTM_REQ_FRM		(SIOCDEV_SUBIO_BASE + 57)
+#endif
+#define IEEE80211_AG_START_RATE_INDEX	0		/* Non 802.11n initial rate index */
+
+enum L2_EXT_FILTER_PORT {
+	L2_EXT_FILTER_EMAC_0_PORT = 0,
+	L2_EXT_FILTER_EMAC_1_PORT = 1,
+	L2_EXT_FILTER_PCIE_PORT = 2
+};
+
+#ifdef CONFIG_TOPAZ_PCIE_TARGET
+	#define L2_EXT_FILTER_DEF_PORT L2_EXT_FILTER_PCIE_PORT
+#else
+	#define L2_EXT_FILTER_DEF_PORT L2_EXT_FILTER_EMAC_0_PORT
+#endif
+
+struct ieee80211_clone_params {
+	char icp_name[IFNAMSIZ];		/* device name */
+	uint16_t icp_opmode;			/* operating mode */
+	uint16_t icp_flags;			/* see below */
+#define	IEEE80211_CLONE_BSSID	0x0001		/* allocate unique mac/bssid */
+#define	IEEE80211_NO_STABEACONS	0x0002		/* Do not setup the station beacon timers */
+};
+
+enum power_table_sel {
+	PWR_TABLE_SEL_BOOTCFG_ONLY = 0,	/* Search for power table in bootcfg only */
+	PWR_TABLE_SEL_BOOTCFG_PRIOR,	/* Search for power table in bootcfg at first, if not find, then search /etc/ */
+	PWR_TABLE_SEL_IMAGE_PRIOR,	/* Search for power table in /etc/ at first, if not find, then search bootcfg */
+	PWR_TABLE_SEL_IMAGE_ONLY,	/* Search for power table in /etc/ only */
+	PWR_TABLE_SEL_MAX = PWR_TABLE_SEL_IMAGE_ONLY,
+};
+
+/* APPIEBUF related definitions */
+/* Management frame type to which application IE is added */
+enum {
+	IEEE80211_APPIE_FRAME_BEACON		= 0,
+	IEEE80211_APPIE_FRAME_PROBE_REQ		= 1,
+	IEEE80211_APPIE_FRAME_PROBE_RESP	= 2,
+	IEEE80211_APPIE_FRAME_ASSOC_REQ		= 3,
+	IEEE80211_APPIE_FRAME_ASSOC_RESP	= 4,
+	IEEE80211_APPIE_FRAME_TDLS_ACT		= 5,
+	IEEE80211_APPIE_NUM_OF_FRAME		= 6
+};
+
+/* the beaconing schemes - the mapping between 8 VAPs and 4 HW TX queues for beacon */
+enum {
+	/*
+	 * Scheme 0 - default
+	 * VAP0/VAP4 - HW queue0
+	 * VAP1/VAP5 - HW queue1
+	 * VAP2/VAP6 - HW queue2
+	 * VAP3/VAP7 - HW queue3
+	 */
+	QTN_BEACONING_SCHEME_0 = 0,
+	/*
+	 * Scheme 1:
+	 * VAP0/VAP1 - HW queue0
+	 * VAP2/VAP3 - HW queue1
+	 * VAP4/VAP5 - HW queue2
+	 * VAP6/VAP7 - HW queue3
+	 */
+	QTN_BEACONING_SCHEME_1 = 1
+};
+
+/*
+ * This enum must be kept in sync with tdls_operation_string.
+ * enum ieee80211_tdls_operation - values for tdls_oper callbacks
+ * @IEEE80211_TDLS_DISCOVERY_REQ: Send a TDLS discovery request
+ * @IEEE80211_TDLS_SETUP: Setup TDLS link
+ * @IEEE80211_TDLS_TEARDOWN: Teardown a TDLS link which is already established
+ * @IEEE80211_TDLS_ENABLE_LINK: Enable TDLS link
+ * @IEEE80211_TDLS_DISABLE_LINK: Disable TDLS link
+ * @IEEE80211_TDLS_ENABLE: Enable TDLS function
+ * @IEEE80211_TDLS_DISABLE: Disable TDLS function
+ * @IEEE80211_TDLS_PTI_REQ: Send a TDLS Peer Traffic Indication Frame
+ */
+enum ieee80211_tdls_operation {
+	IEEE80211_TDLS_DISCOVERY_REQ	= 0,
+	IEEE80211_TDLS_SETUP			= 1,
+	IEEE80211_TDLS_TEARDOWN			= 2,
+	IEEE80211_TDLS_ENABLE_LINK		= 3,
+	IEEE80211_TDLS_DISABLE_LINK		= 4,
+	IEEE80211_TDLS_ENABLE			= 5,
+	IEEE80211_TDLS_DISABLE			= 6,
+	IEEE80211_TDLS_PTI_REQ			= 7,
+	IEEE80211_TDLS_SWITCH_CHAN		= 8,
+};
+
+enum ieee80211_tdls_event {
+	IEEE80211_EVENT_TDLS,
+	IEEE80211_EVENT_STATION_LOW_ACK
+};
+
+struct ieee80211_tdls_event_data {
+	char name[32];
+	uint8_t index;
+	uint8_t sub_index;
+	uint8_t peer_mac[IEEE80211_ADDR_LEN];
+	uint8_t value[0];
+} __packed;
+
+struct ieee80211_tdls_oper_data {
+	uint8_t dest_mac[IEEE80211_ADDR_LEN];
+	uint8_t oper;
+} __packed;
+
+struct ieee80211_tdls_action_data {
+	uint8_t	dest_mac[IEEE80211_ADDR_LEN];	/* Destination address of tdls action */
+	uint8_t	action;		/* TDLS action frame type */
+	uint16_t status;	/* Statu code */
+	uint8_t	dtoken;		/* Dialog token */
+	uint32_t ie_buflen;	/* Subsequent IEs length*/
+	uint8_t	ie_buf[0];	/* Subsequent IEs */
+} __packed;
+
+struct ieee80211req_getset_appiebuf {
+	uint32_t app_frmtype;	/* management frame type for which buffer is added */
+	uint32_t app_buflen;	/* application-supplied buffer length */
+#define F_QTN_IEEE80211_PAIRING_IE 0x1
+	uint8_t	flags;		/* flags here is used to check whether QTN pairing IE exists */
+	uint8_t	app_buf[0];	/* application-supplied IE(s) */
+};
+
+/* Action frame payload */
+struct action_frame_payload {
+	u_int16_t	length;                 /* action frame payload length */
+	u_int8_t	data[0];                /* action frame payload data */
+}__packed;
+
+/* Structure used to send action frame from hostapd */
+struct app_action_frame_buf {
+	u_int8_t	cat;			/* action frame category */
+	u_int8_t	action;			/* action frame action */
+	u_int8_t	dst_mac_addr[IEEE80211_ADDR_LEN];
+	struct action_frame_payload frm_payload;
+}__packed;
+
+struct app_ie {
+	u_int8_t id;
+	u_int16_t len;
+	union {
+		struct {
+			u_int8_t interworking;
+			u_int8_t an_type;
+			u_int8_t hessid[IEEE80211_ADDR_LEN];
+		}__packed interw;
+	}u;
+}__packed;
+
+struct ieee80211_acl_params {
+	uint8_t				acl_policy;
+	uint32_t			num_mac_acl;
+	struct ieee80211_mac_addr	mac_acl[0];
+};
+
+struct qtn_cca_args
+{
+	uint32_t cca_channel;
+	uint32_t duration;
+};
+
+/* Flags ORed by application to set filter for receiving management frames */
+enum {
+	IEEE80211_FILTER_TYPE_BEACON		= 1<<0,
+	IEEE80211_FILTER_TYPE_PROBE_REQ		= 1<<1,
+	IEEE80211_FILTER_TYPE_PROBE_RESP	= 1<<2,
+	IEEE80211_FILTER_TYPE_ASSOC_REQ		= 1<<3,
+	IEEE80211_FILTER_TYPE_ASSOC_RESP	= 1<<4,
+	IEEE80211_FILTER_TYPE_AUTH		= 1<<5,
+	IEEE80211_FILTER_TYPE_DEAUTH		= 1<<6,
+	IEEE80211_FILTER_TYPE_DISASSOC		= 1<<7,
+	IEEE80211_FILTER_TYPE_ACTION		= 1<<8,
+	IEEE80211_FILTER_TYPE_ALL		= 0x1FF	/* used to check the valid filter bits */
+};
+
+struct ieee80211req_set_filter {
+	uint32_t app_filterype;		/* management frame filter type */
+};
+
+/* Tx Restrict */
+#define IEEE80211_TX_RESTRICT_RTS_MIN		2
+#define IEEE80211_TX_RESTRICT_RTS_DEF		6
+#define IEEE80211_TX_RESTRICT_LIMIT_MIN		2
+#define IEEE80211_TX_RESTRICT_LIMIT_DEF		12
+#define IEEE80211_TX_RESTRICT_RATE		5
+
+/* Compatibility fix bitmap for various vendor peer */
+#define VENDOR_FIX_BRCM_DHCP			0x01
+#define VENDOR_FIX_BRCM_REPLACE_IGMP_SRCMAC	0x02
+#define VENDOR_FIX_BRCM_REPLACE_IP_SRCMAC	0x04
+#define VENDOR_FIX_BRCM_DROP_STA_IGMPQUERY	0x08
+#define VENDOR_FIX_BRCM_AP_GEN_IGMPQUERY	0x10
+
+enum vendor_fix_idx {
+	VENDOR_FIX_IDX_BRCM_DHCP = 1,
+	VENDOR_FIX_IDX_BRCM_IGMP = 2,
+	VENDOR_FIX_IDX_MAX = VENDOR_FIX_IDX_BRCM_IGMP,
+};
+
+#define IEEE80211_TDLS_OVER_QHOP_ENABLE_MIN 0
+#define IEEE80211_TDLS_OVER_QHOP_ENABLE_MAX 1
+#define IEEE80211_TDLS_TIMEOUT_TIME_MIN	5
+#define IEEE80211_TDLS_TIMEOUT_TIME_MAX	3600
+#define IEEE80211_TDLS_LINK_WEIGHT_MIN	0
+#define IEEE80211_TDLS_LINK_WEIGHT_MAX	10
+#define IEEE80211_TDLS_TRAINING_PKT_CNT_MIN	16
+#define IEEE80211_TDLS_TRAINING_PKT_CNT_MAX	8192
+#define IEEE80211_TDLS_DISC_INTERVAL_MIN	60
+#define IEEE80211_TDLS_DISC_INTERVAL_MAX	3600
+#define IEEE80211_TDLS_PATH_SEL_PPS_THRSHLD_MIN	8
+#define IEEE80211_TDLS_PATH_SEL_PPS_THRSHLD_MAX	64
+#define IEEE80211_TDLS_PATH_SEL_RATE_THRSHLD_MIN	0
+#define IEEE80211_TDLS_PATH_SEL_RATE_THRSHLD_MAX	1000
+#define IEEE80211_TDLS_VERBOSE_MIN		0
+#define IEEE80211_TDLS_VERBOSE_MAX		2
+#define IEEE80211_TDLS_VALID_RSSI_MIN		(-1200)
+#define IEEE80211_TDLS_VALID_RSSI_MAX		0
+#define IEEE80211_TDLS_SWITCH_INTS_MIN		2
+#define IEEE80211_TDLS_SWITCH_INTS_MAX		10
+#define IEEE80211_TDLS_RATE_WEIGHT_MIN		0
+#define IEEE80211_TDLS_RATE_WEIGHT_MAX		10
+
+#define IEEE80211_TDLS_MODE_MIN			0
+#define IEEE80211_TDLS_MODE_MAX			1
+#define IEEE80211_TDLS_INDICATION_WINDOWS_MIN	1
+#define IEEE80211_TDLS_INDICATION_WINDOWS_MAX	20
+#define IEEE80211_TDLS_CS_PROHIBIT_MIN	0
+#define IEEE80211_TDLS_CS_PROHIBIT_MAX	2
+#define IEEE80211_TDLS_CS_OFFCHAN_MIN	0
+#define IEEE80211_TDLS_CS_OFFCHAN_MAX	255
+#define IEEE80211_TDLS_CS_OFFCHAN_BW_MIN	0
+#define IEEE80211_TDLS_CS_OFFCHAN_BW_MAX	160
+#define IEEE80211_TDLS_NODE_LIFE_CYCLE_MIN	5
+#define IEEE80211_TDLS_NODE_LIFE_CYCLE_MAX	1000
+#define IEEE80211_TDLS_CHAN_SWITCH_INTV_MIN	100
+struct ieee80211req_wowlan {
+	uint32_t is_op;
+	uint8_t *is_data;
+	int32_t is_data_len;
+};
+
+#define IEEE80211_AUTHDESCR_KEYMGMT_NONE		0x00
+#define IEEE80211_AUTHDESCR_KEYMGMT_EAP			0x01
+#define IEEE80211_AUTHDESCR_KEYMGMT_PSK			0x02
+#define IEEE80211_AUTHDESCR_KEYMGMT_WEP			0x03
+
+#define IEEE80211_AUTHDESCR_KEYPROTO_NONE		0x00
+#define IEEE80211_AUTHDESCR_KEYPROTO_WPA		0x01
+#define IEEE80211_AUTHDESCR_KEYPROTO_RSN		0x02
+
+#define IEEE80211_AUTHDESCR_ALGO_POS			0x00
+#define IEEE80211_AUTHDESCR_KEYMGMT_POS			0x01
+#define IEEE80211_AUTHDESCR_KEYPROTO_POS		0x02
+#define IEEE80211_AUTHDESCR_CIPHER_POS			0x03
+
+
+struct ieee80211req_auth_description {
+	uint8_t macaddr[IEEE80211_ADDR_LEN];
+	uint32_t description;
+};
+
+enum ieee80211_extender_role {
+	IEEE80211_EXTENDER_ROLE_NONE = 0x00,
+	IEEE80211_EXTENDER_ROLE_MBS = 0x01,
+	IEEE80211_EXTENDER_ROLE_RBS = 0x02
+};
+
+#define WDS_EXT_RECEIVED_MBS_IE		0
+#define WDS_EXT_RECEIVED_RBS_IE		1
+#define WDS_EXT_LINK_STATUS_UPDATE	2
+#define WDS_EXT_RBS_OUT_OF_BRR		3
+#define WDS_EXT_RBS_SET_CHANNEL		4
+#define WDS_EXT_CLEANUP_WDS_LINK	5
+#define WDS_EXT_STA_UPDATE_EXT_INFO	6
+
+#define IEEE80211_MAX_EXT_EVENT_DATA_LEN	256
+
+#define IEEE80211_EXTENDER_ROLE_MIN	0
+#define IEEE80211_EXTENDER_ROLE_MAX	2
+#define IEEE80211_EXTENDER_MIN_RSSI	0
+#define IEEE80211_EXTENDER_MAX_RSSI	70
+#define	IEEE80211_EXTENDER_MIN_WGT	0
+#define	IEEE80211_EXTENDER_MAX_WGT	10
+#define	IEEE80211_EXTENDER_MIN_VERBOSE	0
+#define	IEEE80211_EXTENDER_MAX_VERBOSE	2
+#define IEEE80211_EXTENDER_MIN_INTERVAL	30
+#define IEEE80211_EXTENDER_MAX_INTERVAL	300
+#define IEEE80211_EXTENDER_MIN_ROAMING	0
+#define IEEE80211_EXTENDER_MAX_ROAMING	1
+#define IEEE80211_EXTENDER_MIN_MARGIN	0
+#define IEEE80211_EXTENDER_MAX_MARGIN	12
+#define IEEE80211_EXTENDER_MIN_SHORT_RETRY_LIMIT 0
+#define IEEE80211_EXTENDER_MAX_SHORT_RETRY_LIMIT 8
+#define IEEE80211_EXTENDER_MIN_LONG_RETRY_LIMIT 0
+#define IEEE80211_EXTENDER_MAX_LONG_RETRY_LIMIT 8
+
+
+#define IEEE80211_AUTOCHAN_CCI_INSTNT_MIN	0
+#define IEEE80211_AUTOCHAN_CCI_INSTNT_MAX	100
+#define IEEE80211_AUTOCHAN_ACI_INSTNT_MIN	0
+#define IEEE80211_AUTOCHAN_ACI_INSTNT_MAX	100
+#define IEEE80211_AUTOCHAN_CCI_LONGTERM_MIN	0
+#define IEEE80211_AUTOCHAN_CCI_LONGTERM_MAX	100
+#define IEEE80211_AUTOCHAN_ACI_LONGTERM_MIN	0
+#define IEEE80211_AUTOCHAN_ACI_LONGTERM_MAX	100
+#define IEEE80211_AUTOCHAN_RANGE_COST_MIN	0
+#define IEEE80211_AUTOCHAN_RANGE_COST_MAX	100
+#define IEEE80211_AUTOCHAN_DFS_COST_MIN		-100
+#define IEEE80211_AUTOCHAN_DFS_COST_MAX		100
+#define IEEE80211_AUTOCHAN_MIN_CCI_RSSI_MIN	-120
+#define IEEE80211_AUTOCHAN_MIN_CCI_RSSI_MAX	0
+#define IEEE80211_AUTOCHAN_MAXBW_MINBENEFIT_MIN	0
+#define IEEE80211_AUTOCHAN_MAXBW_MINBENEFIT_MAX	10
+#define IEEE80211_AUTOCHAN_DENSE_CCI_SPAN_MIN	0
+#define IEEE80211_AUTOCHAN_DENSE_CCI_SPAN_MAX	160
+#define IEEE80211_AUTOCHAN_DBG_LEVEL_MIN	0
+#define IEEE80211_AUTOCHAN_DBG_LEVEL_MAX	2
+
+
+/**
+ * Structure contains data of wds extender event.
+ * @name will always be "QTN-WDS-EXT"
+ * @cmd message type.
+ * @mac specify wds peer mac address
+ * @link_status specify the wds link state.
+ * @ie_len when the message contains an wds extender IE, ie_len is larger than 0.
+ */
+struct qtn_wds_ext_event_data {
+	char name[12];
+	uint8_t cmd;
+	uint8_t mac[IEEE80211_ADDR_LEN];
+	uint8_t extender_role;
+	uint8_t link_status;
+	uint8_t channel;
+	uint8_t bandwidth;
+	uint8_t ssid[IEEE80211_NWID_LEN + 1];
+	uint8_t ie_len;
+	uint8_t wds_extender_ie[0];
+}__packed;
+
+struct qtn_exp_cca_stats {
+	/* Percentage of air time the channel occupied by activity of own radio and other radios */
+	uint32_t	cca_fat;
+	/* Percentage of air time which is occupied by other APs and STAs except the local AP/STA and associated STAs/AP */
+	uint32_t	cca_intf;
+	/* Percentage of air time which is occpied by the local AP/STA and the associated STAs/AP */
+	uint32_t	cca_trfc;
+};
+
+struct ieee80211req_interface_wmmac_stats {
+#define WMM_AC_NUM 4
+	/**
+	 * Number of dropped data packets failed to transmit through
+	 * wireless media for each traffic category(TC).
+	 */
+	uint32_t tx_wifi_drop[WMM_AC_NUM];
+	/**
+	 * Number of sent data packets that transmit through
+	 * wireless media for each traffic category(TC).
+	 */
+	uint32_t tx_wifi_sent[WMM_AC_NUM];
+};
+
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+struct  ieee80211_bsa_interface_fat_info {
+	uint8_t channel;
+	uint8_t band;
+	uint16_t avg_fat;
+}__packed;
+
+#define BSA_DRIVER_CAP_BTM_SHIFT	0
+struct  ieee80211_bsa_interface_status {
+	uint8_t bssid[IEEE80211_ADDR_LEN];
+	uint16_t mdid;
+	uint8_t channel;
+	uint8_t band;
+	uint16_t opclass;
+	uint8_t ssid[IEEE80211_NWID_LEN];
+	uint8_t ssid_len;
+	uint8_t phytype;
+	uint16_t capinfo;
+	struct ieee80211_ie_htcap htcap;
+	struct ieee80211_ie_htinfo htop;
+	struct ieee80211_ie_vhtcap vhtcap;
+	struct ieee80211_ie_vhtop vhtop;
+	uint16_t beacon_interval;
+	uint8_t drivercap;
+}__packed;
+
+struct ieee80211_bsa_sta_info {
+	uint64_t ts_last_rx_pkt;
+	uint64_t ts_last_tx_pkt;
+	uint32_t rx_phy_rate;
+	uint32_t tx_phy_rate;
+	int32_t  rssi_dbm;
+	uint8_t sta_mac[IEEE80211_ADDR_LEN];
+}__packed;
+
+struct ieee80211_bsa_sta_stats {
+	uint8_t sta_mac[IEEE80211_ADDR_LEN];
+	uint16_t num_sta;
+	/* STA info provided by driver */
+	struct ieee80211_bsa_sta_info ieee80211_bsa_sta_info_var[IEEE80211_AID_DEF];
+}__packed;
+
+struct ieee80211_bsa_mac_filter {
+	uint8_t sta_mac[IEEE80211_ADDR_LEN];
+	uint16_t allow_mac;
+}__packed;
+
+#define BSA_BTM_CAND_PREF		3
+#define BSA_BTM_CAND_PREF_ID		3
+#define BSA_BTM_CAND_PREF_LEN		1
+#define BSA_BTM_CAND_PREF_VAL		255
+
+struct ieee80211_bsa_btm_req_frm {
+	uint8_t sta_mac[IEEE80211_ADDR_LEN];
+	uint16_t dis_assoc_timer;
+	uint8_t req_mode;
+	uint8_t val_intvl;
+	uint8_t bssid[IEEE80211_ADDR_LEN];
+	uint32_t bssid_info;
+	uint8_t opclass;
+	uint8_t channel;
+	uint8_t phytype;
+	uint8_t subele_len;
+	/* Add any subelements here and update "subele_len" */
+}__packed;
+
+struct ieee80211_bsa_btm_resp_event {
+	uint8_t bsa_sta_mac[IEEE80211_ADDR_LEN];
+	uint8_t bsa_btm_resp_status;
+}__packed;
+#endif
+
+#define IEEE80211_DEV_MODE_UNKNOWN	0
+#define IEEE80211_DEV_MODE_MBS		1
+#define IEEE80211_DEV_MODE_RBS		2
+#define IEEE80211_DEV_MODE_REPEATER	3
+#define IEEE80211_DEV_MODE_DBDC_5G_HI	4
+#define IEEE80211_DEV_MODE_DBDC_5G_LO	5
+
+struct node_txrx_airtime {
+	uint8_t  macaddr[IEEE80211_ADDR_LEN];
+	uint32_t tx_airtime;
+	uint32_t tx_airtime_accum;
+	uint32_t rx_airtime;
+	uint32_t rx_airtime_accum;
+};
+
+struct txrx_airtime {
+	uint16_t               nr_nodes;     /* number of nodes */
+	uint16_t               free_airtime; /* in ms */
+	uint32_t               total_cli_tx_airtime; /* total tx airtime of clients */
+	uint32_t               total_cli_rx_airtime; /* total rx airtime of clients */
+#define TXRX_AIRTIME_NODE_MAX	(102)                /* The value is based on QTN_ASSOC_LIMIT in qtn_uc_comm.h */
+	struct node_txrx_airtime nodes[TXRX_AIRTIME_NODE_MAX];
+};
+
+#endif /* __linux__ */
+
+#pragma pack()
+
+#endif /* _NET80211_IEEE80211_IOCTL_H_ */
diff --git a/drivers/qtn/include/shared/net80211/ieee80211_mlme_statistics.h b/drivers/qtn/include/shared/net80211/ieee80211_mlme_statistics.h
new file mode 100644
index 0000000..5809f8a
--- /dev/null
+++ b/drivers/qtn/include/shared/net80211/ieee80211_mlme_statistics.h
@@ -0,0 +1,113 @@
+/*-
+ * Copyright (c) 2014 Quantenna
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_mlme_statistics.h 1 2014-01-17 12:00:00Z vsaiapin $
+ */
+
+
+#ifndef IEEE80211MLMESTATISTICS_H
+#define IEEE80211MLMESTATISTICS_H
+
+#include <linux/ioctl.h>
+#include "net80211/ieee80211.h"
+
+//#define MLME_STATS_DEBUG					1
+#define MLME_STATS_DEVFS					1
+#define MLME_STATS_PROCFS					1
+
+struct mlme_stats_record {
+	unsigned char mac_addr[IEEE80211_ADDR_LEN];
+
+	unsigned int auth;
+	unsigned int auth_fails;
+	unsigned int assoc;
+	unsigned int assoc_fails;
+	unsigned int deauth;
+	unsigned int diassoc;
+};
+
+// Statistics entries
+enum {
+	MLME_STAT_AUTH = 0,
+	MLME_STAT_AUTH_FAILS,
+	MLME_STAT_ASSOC,
+	MLME_STAT_ASSOC_FAILS,
+	MLME_STAT_DEAUTH,
+	MLME_STAT_DIASSOC,
+
+	MLME_STAT_MAX,
+};
+
+#define MLME_STATS_IOCTL_MAGIC				'q'
+#define MLME_STATS_IOC_BASE					0x20
+#define MLME_STATS_IOC_GET_MAX_CLIENTS		_IOR(MLME_STATS_IOCTL_MAGIC, MLME_STATS_IOC_BASE, unsigned int)						/* Get the maximum possible number of clients in table */
+#define MLME_STATS_IOC_GET_CUR_CLIENTS		_IOR(MLME_STATS_IOCTL_MAGIC, MLME_STATS_IOC_BASE + 1, unsigned int)					/* Get the current number of clients in table */
+#define MLME_STATS_IOC_GET_ALL_MACS			_IOR(MLME_STATS_IOCTL_MAGIC, MLME_STATS_IOC_BASE + 2, unsigned char*)				/* Get the list of all macs for the moment */
+#define MLME_STATS_IOC_GET_CLIENT_STATS		_IOWR(MLME_STATS_IOCTL_MAGIC, MLME_STATS_IOC_BASE + 3, struct mlme_stats_record)	/* Get stats for specified mac */
+
+/**
+ * mlme_stats_update - update stats for the client
+ * @mac_addr: mac address of the client
+ * @statistics_entry: counter needs to be updated
+ * @incrementor: value needs to be added to the specified counter
+ *
+ * Update existsing record or create new one and
+ * move it to the head of LRU list.
+ */
+extern void mlme_stats_update(unsigned char *mac_addr, unsigned int statistics_entry, unsigned int incrementor);
+
+/**
+ * mlme_stats_delayed_update - update stats for the client
+ * @mac_addr: mac address of the client
+ * @statistics_entry: counter needs to be updated
+ * @incrementor: value needs to be added to the specified counter
+ *
+ * Update existsing record or create new one and
+ * move it to the head of LRU list.
+ */
+extern void mlme_stats_delayed_update(unsigned char *mac_addr, unsigned int statistics_entry, unsigned int incrementor);
+
+/**
+ * mlme_stats_init - init statistics facility
+ *
+ * Init all necessary staff for statistics factory.
+ * Need to be called during module init.
+ */
+extern void mlme_stats_init(void);
+
+/**
+ * mlme_stats_exit - clear statitsics factory staff
+ *
+ * Clears memory and removes proc and dev file.
+ * Must be called in module exit routine.
+ */
+extern void mlme_stats_exit(void);
+
+#endif /* IEEE80211MLMESTATISTICS_H */
diff --git a/drivers/qtn/include/shared/net80211/ieee80211_qos.h b/drivers/qtn/include/shared/net80211/ieee80211_qos.h
new file mode 100644
index 0000000..ff75eec
--- /dev/null
+++ b/drivers/qtn/include/shared/net80211/ieee80211_qos.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * Common QOS definitions.
+ */
+#ifndef _IEEE80211_QOS_H
+#define _IEEE80211_QOS_H
+
+/* WME stream classes */
+#define	WME_AC_BE	0		/* best effort */
+#define	WME_AC_BK	1		/* background */
+#define	WME_AC_VI	2		/* video */
+#define	WME_AC_VO	3		/* voice */
+#define	WME_AC_NUM	4
+
+enum {
+	IEEE80211_WMMPARAMS_CWMIN	= 1,
+	IEEE80211_WMMPARAMS_CWMAX	= 2,
+	IEEE80211_WMMPARAMS_AIFS	= 3,
+	IEEE80211_WMMPARAMS_TXOPLIMIT	= 4,
+	IEEE80211_WMMPARAMS_ACM		= 5,
+	IEEE80211_WMMPARAMS_NOACKPOLICY	= 6,
+};
+
+#endif
diff --git a/drivers/qtn/include/shared/net80211/ieee80211_radiotap.h b/drivers/qtn/include/shared/net80211/ieee80211_radiotap.h
new file mode 100644
index 0000000..eb7b86b
--- /dev/null
+++ b/drivers/qtn/include/shared/net80211/ieee80211_radiotap.h
@@ -0,0 +1,336 @@
+/*-
+ * Copyright (c) 2003, 2004 David Young.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of David Young may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL DAVID
+ * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ */
+
+/*
+ * Modifications to fit into the linux IEEE 802.11 stack,
+ * Mike Kershaw (dragorn@kismetwireless.net)
+ */
+
+#ifndef IEEE80211RADIOTAP_H
+#define IEEE80211RADIOTAP_H
+
+/* Base version of the radiotap packet header data */
+#define PKTHDR_RADIOTAP_VERSION		0
+
+/* A generic radio capture format is desirable. There is one for
+ * Linux, but it is neither rigidly defined (there were not even
+ * units given for some fields) nor easily extensible.
+ *
+ * I suggest the following extensible radio capture format. It is
+ * based on a bitmap indicating which fields are present.
+ *
+ * I am trying to describe precisely what the application programmer
+ * should expect in the following, and for that reason I tell the
+ * units and origin of each measurement (where it applies), or else I
+ * use sufficiently weaselly language ("is a monotonically nondecreasing
+ * function of...") that I cannot set false expectations for lawyerly
+ * readers.
+ */
+
+/* The radio capture header precedes the 802.11 header.
+ * All data in the header is little endian on all platforms.
+ */
+struct ieee80211_radiotap_header {
+	uint8_t it_version;		/* Version 0. Only increases
+					 * for drastic changes,
+					 * introduction of compatible
+					 * new fields does not count.
+					 */
+	uint8_t it_pad;
+	__le16 it_len;			/* length of the whole
+					 * header in bytes, including
+					 * it_version, it_pad,
+					 * it_len, and data fields.
+					 */
+	__le32 it_present;		/* A bitmap telling which
+					 * fields are present. Set bit 31
+					 * (0x80000000) to extend the
+					 * bitmap by another 32 bits.
+					 * Additional extensions are made
+					 * by setting bit 31.
+					 */
+} __attribute__((__packed__));
+
+/* Name                                 Data type    Units
+ * ----                                 ---------    -----
+ *
+ * IEEE80211_RADIOTAP_TSFT              __le64       microseconds
+ *
+ *      Value in microseconds of the MAC's 64-bit 802.11 Time
+ *      Synchronization Function timer when the first bit of the
+ *      MPDU arrived at the MAC. For received frames, only.
+ *
+ * IEEE80211_RADIOTAP_CHANNEL           2 x uint16   MHz, bitmap
+ *
+ *      Tx/Rx frequency in MHz, followed by flags (see below).
+ *
+ * IEEE80211_RADIOTAP_FHSS              uint16       see below
+ *
+ *      For frequency-hopping radios, the hop set (first byte)
+ *      and pattern (second byte).
+ *
+ * IEEE80211_RADIOTAP_RATE              u8           500kb/s
+ *
+ *      Tx/Rx data rate
+ *
+ * IEEE80211_RADIOTAP_DBM_ANTSIGNAL     s8           decibels from
+ *                                                   one milliwatt (dBm)
+ *
+ *      RF signal power at the antenna, decibel difference from
+ *      one milliwatt.
+ *
+ * IEEE80211_RADIOTAP_DBM_ANTNOISE      s8           decibels from
+ *                                                   one milliwatt (dBm)
+ *
+ *      RF noise power at the antenna, decibel difference from one
+ *      milliwatt.
+ *
+ * IEEE80211_RADIOTAP_DB_ANTSIGNAL      u8           decibel (dB)
+ *
+ *      RF signal power at the antenna, decibel difference from an
+ *      arbitrary, fixed reference.
+ *
+ * IEEE80211_RADIOTAP_DB_ANTNOISE       u8           decibel (dB)
+ *
+ *      RF noise power at the antenna, decibel difference from an
+ *      arbitrary, fixed reference point.
+ *
+ * IEEE80211_RADIOTAP_LOCK_QUALITY      uint16       unitless
+ *
+ *      Quality of Barker code lock. Unitless. Monotonically
+ *      nondecreasing with "better" lock strength. Called "Signal
+ *      Quality" in datasheets.  (Is there a standard way to measure
+ *      this?)
+ *
+ * IEEE80211_RADIOTAP_TX_ATTENUATION    uint16       unitless
+ *
+ *      Transmit power expressed as unitless distance from max
+ *      power set at factory calibration.  0 is max power.
+ *      Monotonically nondecreasing with lower power levels.
+ *
+ * IEEE80211_RADIOTAP_DB_TX_ATTENUATION uint16       decibels (dB)
+ *
+ *      Transmit power expressed as decibel distance from max power
+ *      set at factory calibration.  0 is max power.  Monotonically
+ *      nondecreasing with lower power levels.
+ *
+ * IEEE80211_RADIOTAP_DBM_TX_POWER      s8           decibels from
+ *                                                   one milliwatt (dBm)
+ *
+ *      Transmit power expressed as dBm (decibels from a 1 milliwatt
+ *      reference). This is the absolute power level measured at
+ *      the antenna port.
+ *
+ * IEEE80211_RADIOTAP_FLAGS             u8           bitmap
+ *
+ *      Properties of transmitted and received frames. See flags
+ *      defined below.
+ *
+ * IEEE80211_RADIOTAP_ANTENNA           u8           antenna index
+ *
+ *      Unitless indication of the Rx/Tx antenna for this packet.
+ *      The first antenna is antenna 0.
+ *
+ * IEEE80211_RADIOTAP_RX_FLAGS          uint16       bitmap
+ *
+ *     Properties of received frames. See flags defined below.
+ *
+ * IEEE80211_RADIOTAP_TX_FLAGS          uint16       bitmap
+ *
+ *     Properties of transmitted frames. See flags defined below.
+ *
+ * IEEE80211_RADIOTAP_RTS_RETRIES       u8           data
+ *
+ *     Number of rts retries a transmitted frame used.
+ *
+ * IEEE80211_RADIOTAP_DATA_RETRIES      u8           data
+ *
+ *     Number of unicast retries a transmitted frame used.
+ *
+ * IEEE80211_RADIOTAP_MCS	u8, u8, u8		unitless
+ *
+ *     Contains a bitmap of known fields/flags, the flags, and
+ *     the MCS index.
+ *
+ * IEEE80211_RADIOTAP_AMPDU_STATUS	u32, u16, u8, u8	unitlesss
+ *
+ *	Contains the AMPDU information for the subframe.
+ */
+enum ieee80211_radiotap_type {
+	IEEE80211_RADIOTAP_TSFT = 0,
+	IEEE80211_RADIOTAP_FLAGS = 1,
+	IEEE80211_RADIOTAP_RATE = 2,
+	IEEE80211_RADIOTAP_CHANNEL = 3,
+	IEEE80211_RADIOTAP_FHSS = 4,
+	IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5,
+	IEEE80211_RADIOTAP_DBM_ANTNOISE = 6,
+	IEEE80211_RADIOTAP_LOCK_QUALITY = 7,
+	IEEE80211_RADIOTAP_TX_ATTENUATION = 8,
+	IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9,
+	IEEE80211_RADIOTAP_DBM_TX_POWER = 10,
+	IEEE80211_RADIOTAP_ANTENNA = 11,
+	IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12,
+	IEEE80211_RADIOTAP_DB_ANTNOISE = 13,
+	IEEE80211_RADIOTAP_RX_FLAGS = 14,
+	IEEE80211_RADIOTAP_TX_FLAGS = 15,
+	IEEE80211_RADIOTAP_RTS_RETRIES = 16,
+	IEEE80211_RADIOTAP_DATA_RETRIES = 17,
+	IEEE80211_RADIOTAP_XCHANNEL = 18, /* Unofficial, used by FreeBSD */
+	IEEE80211_RADIOTAP_MCS = 19,
+	IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
+	IEEE80211_RADIOTAP_VHT = 21,
+
+	/* valid in every it_present bitmap, even vendor namespaces */
+	IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
+	IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30,
+	IEEE80211_RADIOTAP_EXT = 31
+};
+
+/* not (yet) defined Radiotap present flag */
+/* Bit 22 to 28 are not defined (in binary : 0001 1111 1100 0000 0000 0000 0000 0000 */ 
+#define IEEE80211_RADIOTAP_NOTDEFINED 0x1FC00000
+
+/* Channel flags. */
+#define	IEEE80211_RADIOTAP_CHAN_TURBO	0x0010	/* Turbo channel */
+#define	IEEE80211_RADIOTAP_CHAN_CCK	0x0020	/* CCK channel */
+#define	IEEE80211_RADIOTAP_CHAN_OFDM	0x0040	/* OFDM channel */
+#define	IEEE80211_RADIOTAP_CHAN_2GHZ	0x0080	/* 2 GHz spectrum channel. */
+#define	IEEE80211_RADIOTAP_CHAN_5GHZ	0x0100	/* 5 GHz spectrum channel */
+#define	IEEE80211_RADIOTAP_CHAN_PASSIVE	0x0200	/* Only passive scan allowed */
+#define	IEEE80211_RADIOTAP_CHAN_DYN	0x0400	/* Dynamic CCK-OFDM channel */
+#define	IEEE80211_RADIOTAP_CHAN_GFSK	0x0800	/* GFSK channel (FHSS PHY) */
+
+/* For IEEE80211_RADIOTAP_FLAGS */
+#define	IEEE80211_RADIOTAP_F_CFP	0x01	/* sent/received
+						 * during CFP
+						 */
+#define	IEEE80211_RADIOTAP_F_SHORTPRE	0x02	/* sent/received
+						 * with short
+						 * preamble
+						 */
+#define	IEEE80211_RADIOTAP_F_WEP	0x04	/* sent/received
+						 * with WEP encryption
+						 */
+#define	IEEE80211_RADIOTAP_F_FRAG	0x08	/* sent/received
+						 * with fragmentation
+						 */
+#define	IEEE80211_RADIOTAP_F_FCS	0x10	/* frame includes FCS */
+#define	IEEE80211_RADIOTAP_F_DATAPAD	0x20	/* frame has padding between
+						 * 802.11 header and payload
+						 * (to 32-bit boundary)
+						 */
+#define IEEE80211_RADIOTAP_F_BADFCS	0x40	/* frame failed FCS check */
+
+/* For IEEE80211_RADIOTAP_RX_FLAGS */
+#define IEEE80211_RADIOTAP_F_RX_BADPLCP	0x0002 /* bad PLCP */
+
+/* For IEEE80211_RADIOTAP_TX_FLAGS */
+#define IEEE80211_RADIOTAP_F_TX_FAIL	0x0001	/* failed due to excessive
+						 * retries */
+#define IEEE80211_RADIOTAP_F_TX_CTS	0x0002	/* used cts 'protection' */
+#define IEEE80211_RADIOTAP_F_TX_RTS	0x0004	/* used rts/cts handshake */
+
+
+/* For IEEE80211_RADIOTAP_MCS */
+#define IEEE80211_RADIOTAP_MCS_HAVE_BW		0x01
+#define IEEE80211_RADIOTAP_MCS_HAVE_MCS		0x02
+#define IEEE80211_RADIOTAP_MCS_HAVE_GI		0x04
+#define IEEE80211_RADIOTAP_MCS_HAVE_FMT		0x08
+#define IEEE80211_RADIOTAP_MCS_HAVE_FEC		0x10
+#define IEEE80211_RADIOTAP_MCS_HAVE_STBC	0x20
+
+#define IEEE80211_RADIOTAP_MCS_BW_MASK		0x03
+#define		IEEE80211_RADIOTAP_MCS_BW_20	0
+#define		IEEE80211_RADIOTAP_MCS_BW_40	1
+#define		IEEE80211_RADIOTAP_MCS_BW_20L	2
+#define		IEEE80211_RADIOTAP_MCS_BW_20U	3
+#define IEEE80211_RADIOTAP_MCS_SGI		0x04
+#define IEEE80211_RADIOTAP_MCS_FMT_GF		0x08
+#define IEEE80211_RADIOTAP_MCS_FEC_LDPC		0x10
+#define IEEE80211_RADIOTAP_MCS_STBC		0x20
+
+
+/* For IEEE80211_RADIOTAP_AMPDU_STATUS */
+#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN		0x0001
+#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN		0x0002
+#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN		0x0004
+#define IEEE80211_RADIOTAP_AMPDU_IS_LAST		0x0008
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR		0x0010
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN	0x0020
+
+
+/* For IEEE80211_RADIOTAP_VHT */
+#define IEEE80211_RADIOTAP_VHT_HAVE_STBC	0x0001
+#define IEEE80211_RADIOTAP_VHT_HAVE_TXOP_PS	0x0002
+#define IEEE80211_RADIOTAP_VHT_HAVE_GI		0x0004
+#define IEEE80211_RADIOTAP_VHT_HAVE_SGI_NSYM_DA	0x0008
+#define IEEE80211_RADIOTAP_VHT_HAVE_LDPC_EXTRA	0x0010
+#define IEEE80211_RADIOTAP_VHT_HAVE_BF		0x0020
+#define IEEE80211_RADIOTAP_VHT_HAVE_BW		0x0040
+#define IEEE80211_RADIOTAP_VHT_HAVE_GID		0x0080
+#define IEEE80211_RADIOTAP_VHT_HAVE_PAID	0x0100
+#define IEEE80211_RADIOTAP_VHT_STBC		0x01
+#define IEEE80211_RADIOTAP_VHT_TXOP_PS		0x02
+#define IEEE80211_RADIOTAP_VHT_SGI		0x04
+#define IEEE80211_RADIOTAP_VHT_SGI_NSYM_DA	0x08
+#define IEEE80211_RADIOTAP_VHT_LDPC_EXTRA	0x10
+#define IEEE80211_RADIOTAP_VHT_BF		0x20
+#define IEEE80211_RADIOTAP_VHT_NSS		0x0f
+#define IEEE80211_RADIOTAP_VHT_MCS		0xf0
+#define IEEE80211_RADIOTAP_VHT_CODING_LDPC	0x01
+
+#define IEEE80211_RADIOTAP_VHT_BW_20		IEEE80211_RADIOTAP_MCS_BW_20
+#define IEEE80211_RADIOTAP_VHT_BW_40		IEEE80211_RADIOTAP_MCS_BW_40
+#define IEEE80211_RADIOTAP_VHT_BW_20L		IEEE80211_RADIOTAP_MCS_BW_20L
+#define IEEE80211_RADIOTAP_VHT_BW_20U		IEEE80211_RADIOTAP_MCS_BW_20U
+#define IEEE80211_RADIOTAP_VHT_BW_80		4
+#define IEEE80211_RADIOTAP_VHT_BW_40L		5
+#define IEEE80211_RADIOTAP_VHT_BW_40U		6
+#define IEEE80211_RADIOTAP_VHT_BW_20LL		7
+#define IEEE80211_RADIOTAP_VHT_BW_20LU		8
+#define IEEE80211_RADIOTAP_VHT_BW_20UL		9
+#define IEEE80211_RADIOTAP_VHT_BW_20UU		10
+#define IEEE80211_RADIOTAP_VHT_BW_160		11
+#define IEEE80211_RADIOTAP_VHT_BW_80L		12
+#define IEEE80211_RADIOTAP_VHT_BW_80U		13
+#define IEEE80211_RADIOTAP_VHT_BW_40LL		14
+#define IEEE80211_RADIOTAP_VHT_BW_40LU		15
+#define IEEE80211_RADIOTAP_VHT_BW_40UL		16
+#define IEEE80211_RADIOTAP_VHT_BW_40UU		17
+#define IEEE80211_RADIOTAP_VHT_BW_20LLL		18
+#define IEEE80211_RADIOTAP_VHT_BW_20LLU		19
+#define IEEE80211_RADIOTAP_VHT_BW_20LUL		20
+#define IEEE80211_RADIOTAP_VHT_BW_20LUU		21
+#define IEEE80211_RADIOTAP_VHT_BW_20ULL		22
+#define IEEE80211_RADIOTAP_VHT_BW_20ULU		23
+#define IEEE80211_RADIOTAP_VHT_BW_20UUL		24
+#define IEEE80211_RADIOTAP_VHT_BW_20UUU		25
+
+
+#endif				/* IEEE80211_RADIOTAP_H */
diff --git a/drivers/qtn/include/shared/qdrv_sch_const.h b/drivers/qtn/include/shared/qdrv_sch_const.h
new file mode 100644
index 0000000..8d33615
--- /dev/null
+++ b/drivers/qtn/include/shared/qdrv_sch_const.h
@@ -0,0 +1,62 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2011 Quantenna Communications Inc                   **
+**                            All Rights Reserved                            **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH0*/
+
+#ifndef __QDRV_SCH_CONST_H
+#define __QDRV_SCH_CONST_H
+
+#include <qtn/lhost_muc_comm.h>
+
+#define QDRV_BAND_AC_BE		WMM_AC_BE
+#define QDRV_BAND_AC_BK		WMM_AC_BK
+#define QDRV_BAND_AC_VI		WMM_AC_VI
+#define QDRV_BAND_AC_VO		WMM_AC_VO
+#define QDRV_BAND_CTRL		4
+
+#define QDRV_SCH_BANDS		5
+
+#define QDRV_SCH_PRIORITIES	4
+
+#define QDRV_SCH_MODULE_ID	0xAC000000
+#define QDRV_SCH_MODULE_MASK	0xFF000000
+
+#endif // __QDRV_SCH_CONST_H
+
diff --git a/drivers/qtn/include/shared/qtn/beacon_ioctl.h b/drivers/qtn/include/shared/qtn/beacon_ioctl.h
new file mode 100644
index 0000000..eb7d501
--- /dev/null
+++ b/drivers/qtn/include/shared/qtn/beacon_ioctl.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2015 Quantenna Communications, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __BEACON_IOCTL_H__
+#define __BEACON_IOCTL_H__
+/*
+#define LHOST_DEBUG_BEACON
+#define MUC_DEBUG_BEACON
+*/
+
+#define BEACON_PARAM_SIZE		1000
+/*
+ * A general ie descriptor shared between sender (LHOST) and receiver (MuC).
+ * To avoid issues of alignment compatibility between different hosts, all fields has 32bits
+ * aligned.
+ */
+struct beacon_shared_ie_t
+{
+	dma_addr_t	buf;			/* MuC reference to the ie buffer */
+	uint8_t *	lhost_buf;		/* LHOST reference to the ie buffer */
+	uint32_t	size;			/* total length of ie including id + len */
+	uint32_t	next_muc_addr;		/* next ie descriptor address presented in MuC addr mapping */
+	struct		beacon_shared_ie_t *next;	/* next ie descriptor */
+};
+#endif /* __BEACON_IOCTL_H__ */
diff --git a/drivers/qtn/include/shared/qtn/qvsp_data.h b/drivers/qtn/include/shared/qtn/qvsp_data.h
new file mode 100644
index 0000000..28d304e
--- /dev/null
+++ b/drivers/qtn/include/shared/qtn/qvsp_data.h
@@ -0,0 +1,677 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2012-2013 Quantenna Communications, Inc.            **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  File        : qvsp_data.h                                                **
+**  Description : Video Stream Protection                                    **
+**                                                                           **
+*******************************************************************************
+EH0*/
+
+#ifndef __QTN_QVSP_DATA_H__
+#define __QTN_QVSP_DATA_H__
+
+#include <net80211/_ieee80211.h>
+#include <qtn/qvsp_common.h>
+#if defined(__KERNEL__)
+#include "compat.h"
+#include <linux/in6.h>
+#elif defined(MUC_BUILD)
+#else
+#include <netinet/in.h>
+#include <sys/param.h>
+#define MSEC_PER_SEC			1000L
+#endif
+
+/*
+ * Minimum rate at which to calculate node cost.
+ * If throughput is not sufficiently high, only small aggregates get transmitted which gives an
+ * artificially high and noisy node cost estimate.
+ * The threshold should be lower than QVSP_CFG_STRM_DISABLED_MAX, so that node cost continues to
+ * be updated when streams are disabled.
+ */
+#define QVSP_MIN_NODE_KBPS_UPDATE_DFLT	480
+#define QVSP_STRM_DISABLED_MAX_DFLT	(QVSP_MIN_NODE_KBPS_UPDATE_DFLT + 20)
+
+/* Log levels */
+#define LL_0				0x00
+#define LL_1				0x01
+#define LL_2				0x02
+#define LL_3				0x03
+#define LL_4				0x04
+#define LL_5				0x05
+#define LL_6				0x06
+#define LL_7				0x07
+#define LL_8				0x08
+#define LL_9				0x09
+
+enum qvsp_ioctl_state {
+	QVSP_STATE_FAT,
+	QVSP_STATE_FAT_AVAIL,
+	QVSP_STATE_FAT_INTF,
+	QVSP_STATE_STRM_TOT,
+	QVSP_STATE_STRM_QTN,
+	QVSP_STATE_STRM_ENA,
+	QVSP_STATE_STRM_DIS,
+	QVSP_STATE_STRM_DMT,
+	QVSP_STATE_READ_MAX,
+	QVSP_STATE_RESET,
+	QVSP_STATE_TEST_FAT,
+	QVSP_STATE_ALL_MAX
+};
+
+#ifndef DOXYGEN_EXCLUDE
+
+/*
+ * Restrictions:
+ *   this structure must be kept in sync with qvsp_cfg_params and qvspdoc_enumstr_cfg
+ *   _AC[0-3] fields must follow the 'global' equivalent (for macro assumptions)
+ */
+enum qvsp_cfg_param_e {
+	QVSP_CFG_ENABLED,
+	QVSP_CFG_ENABLED_ALWAYS,
+	QVSP_CFG_FAT_MIN,
+	QVSP_CFG_FAT_MIN_SOFT,
+	QVSP_CFG_FAT_MIN_SOFT_CONSEC,
+	QVSP_CFG_FAT_MIN_SAFE,
+	QVSP_CFG_FAT_MIN_CHECK_INTV,
+	QVSP_CFG_FAT_MAX_SOFT,
+	QVSP_CFG_FAT_MAX_SOFT_CONSEC,
+	QVSP_CFG_FAT_MAX_SAFE,
+	QVSP_CFG_FAT_MAX_CHECK_INTV,
+	QVSP_CFG_NODE_DATA_MIN,
+	QVSP_CFG_DISABLE_DEMOTE,
+	QVSP_CFG_DISABLE_DEMOTE_FIX_FAT,
+	QVSP_CFG_DISABLE_WAIT,
+	QVSP_CFG_DISABLE_PER_EVENT_MAX,
+	QVSP_CFG_ENABLE_WAIT,
+	QVSP_CFG_ENABLE_PER_EVENT_MAX,
+	QVSP_CFG_STRM_RMT_DIS_TCP,
+	QVSP_CFG_STRM_RMT_DIS_UDP,
+	QVSP_CFG_STRM_TPUT_MIN,
+	QVSP_CFG_STRM_DISABLED_MAX,
+	QVSP_CFG_STRM_ADPT_THROT,
+	QVSP_CFG_STRM_ADPT_THROT_STEP,
+	QVSP_CFG_STRM_ADPT_THROT_MARGIN,
+	QVSP_CFG_STRM_TPUT_SMPL_MIN,
+	QVSP_CFG_STRM_COST_RC_ADJUST,
+	QVSP_CFG_STRM_MAX,
+	QVSP_CFG_STRM_MAX_AC0,
+	QVSP_CFG_STRM_MAX_AC1,
+	QVSP_CFG_STRM_MAX_AC2,
+	QVSP_CFG_STRM_MAX_AC3,
+	QVSP_CFG_STRM_MIN,
+	QVSP_CFG_STRM_MIN_AC0,
+	QVSP_CFG_STRM_MIN_AC1,
+	QVSP_CFG_STRM_MIN_AC2,
+	QVSP_CFG_STRM_MIN_AC3,
+	QVSP_CFG_STRM_TPUT_MAX_TCP,
+	QVSP_CFG_STRM_TPUT_MAX_FIRST = QVSP_CFG_STRM_TPUT_MAX_TCP,
+	QVSP_CFG_STRM_TPUT_MAX_TCP_AC0,
+	QVSP_CFG_STRM_TPUT_MAX_TCP_AC1,
+	QVSP_CFG_STRM_TPUT_MAX_TCP_AC2,
+	QVSP_CFG_STRM_TPUT_MAX_TCP_AC3,
+	QVSP_CFG_STRM_TPUT_MAX_UDP,
+	QVSP_CFG_STRM_TPUT_MAX_UDP_AC0,
+	QVSP_CFG_STRM_TPUT_MAX_UDP_AC1,
+	QVSP_CFG_STRM_TPUT_MAX_UDP_AC2,
+	QVSP_CFG_STRM_TPUT_MAX_UDP_AC3,
+	QVSP_CFG_STRM_TPUT_MAX_LAST = QVSP_CFG_STRM_TPUT_MAX_UDP_AC3,
+	QVSP_CFG_STRM_ENABLE_WAIT,
+	QVSP_CFG_STRM_AGE_MAX,
+	QVSP_CFG_AGE_CHK_INTV,
+	QVSP_CFG_3RDPT_CTL,
+	QVSP_CFG_3RDPT_LOCAL_THROT,
+	QVSP_CFG_3RDPT_QTN,		/* treat qtn client as 3rd party client, debug use only */
+	QVSP_CFG_BA_THROT_INTV,
+	QVSP_CFG_BA_THROT_DUR_MIN,
+	QVSP_CFG_BA_THROT_DUR_STEP,
+	QVSP_CFG_BA_THROT_WINSIZE_MIN,
+	QVSP_CFG_BA_THROT_WINSIZE_MAX,
+	QVSP_CFG_WME_THROT_AC,
+	QVSP_CFG_WME_THROT_AIFSN,
+	QVSP_CFG_WME_THROT_ECWMIN,
+	QVSP_CFG_WME_THROT_ECWMAX,
+	QVSP_CFG_WME_THROT_TXOPLIMIT,
+	QVSP_CFG_WME_THROT_THRSH_DISABLED,
+	QVSP_CFG_WME_THROT_THRSH_VICTIM,
+	QVSP_CFG_EVENT_LOG_LVL,
+	QVSP_CFG_DEBUG_LOG_LVL,
+	QVSP_CFG_MAX,
+};
+
+struct qvsp_cfg_param {
+	const char	*name;
+	const char	*desc;
+	const char	*units;
+	uint32_t	default_val;
+	uint32_t	min_val;
+	uint32_t	max_val;
+};
+
+#define QVSP_CFG_PARAMS {											\
+	{ "enabled",		"QTM enabled",				"number",	0,	0,	1},	\
+	{ "enabled_always",	"QTM enabled when no QTM peers",	"number",	0,	0,	1},	\
+	{ "fat_min",		"Min free airtime",			"msps",		100,	1,	1000 },	\
+	{ "fat_min_soft",	"Soft min free airtime",		"msps",		170,	1,	1000 },	\
+	{ "fat_min_soft_consec","Consecutive soft min free airtime",	"number",	3,	1,	255 },	\
+	{ "fat_min_safe",	"Safe min free airtime",		"msps",		200,	1,	1000 },	\
+	{ "fat_min_check_intv",	"Oversubscription check interval",	"ms",		2000,	100,	60000 },\
+	{ "fat_max_soft",	"Soft max free airtime",		"msps",		350,	1,	1000 },	\
+	{ "fat_max_soft_consec","Consecutive soft max free airtime",	"number",	5,	1,	255 },	\
+	{ "fat_max_safe",	"Safe max free airtime",		"msps",		250,	1,	1000 },	\
+	{ "fat_max_check_intv",	"Undersubscription check interval",	"ms",		2000,	100,	86400000 },\
+	{ "node_data_min",	"Min data for node cost update",	"Kbps",					\
+								QVSP_MIN_NODE_KBPS_UPDATE_DFLT,	1,	10000 },\
+	{ "disable_demote",	"Demote stream to disable",		"number",	1,	0,	1 },	\
+	{ "disable_demote_fat_fix",	"Adjust FAT when demoting streams",\
+									"number",	0,	0,	1 },	\
+	{ "disable_wait",	"Min re-disable wait time",		"secs",		3,	1,	86400 },\
+	{ "disable_event_max",	"Max streams disabled per event",	"number",	1,	1,	256 },	\
+	{ "enable_wait",	"Min re-enable wait time",		"secs",		15,	1,	86400 },\
+	{ "enable_event_max",	"Max streams enabled per event",	"number",	1,	1,	256 },	\
+	{ "rmt_disable_tcp",	"Disable Rx TCP streams at STA",	"number",	1,	0,	1 },	\
+	{ "rmt_disable_udp",	"Disable Rx UDP streams at STA",	"number",	1,	0,	1 },	\
+	{ "strm_tput_min",	"Min throughput for a real stream",	"Kbps",		1000,	1,	10000 },\
+	{ "strm_disabled_max",	"Max throughput when disabled",		"Kbps",					\
+								QVSP_STRM_DISABLED_MAX_DFLT,	20,	10000 },\
+	{ "strm_adpt_throt",	"Adaptive throttling enabled",		"number",	1,	0,	1 },	\
+	{ "strm_adpt_throt_step",	"Adaptive throttling cost step",	\
+									"percent",	40,	1,	100 },\
+	{ "strm_adpt_throt_margin",	"Adaptive throttling margin",	\
+									"Kbps",		10000,	0,	100000 },\
+	{ "strm_tput_smpl_min",	"Min throughput sampling ms",		"ms",		20,	1,      1000 },\
+	{ "strm_cost_rc_adjust","Adjust stream cost for rate change",	"number",	1,	0,      1 },\
+	{ "strm_max",		"Max streams",				"cnt",		256,	1,	256 },	\
+	{ "strm_max_ac0",	"Max streams for AC 0",			"cnt",		0,	0,	256 },	\
+	{ "strm_max_ac1",	"Max streams for AC 1",			"cnt",		0,	0,	256 },	\
+	{ "strm_max_ac2",	"Max streams for AC 2",			"cnt",		0,	0,	256 },	\
+	{ "strm_max_ac3",	"Max streams for AC 3",			"cnt",		0,	0,	256 },	\
+	{ "strm_min",		"Min streams",				"cnt",		1,	0,	1000 },	\
+	{ "strm_min_ac0",	"Min streams for AC 0",			"cnt",		0,	1,	1000 },	\
+	{ "strm_min_ac1",	"Min streams for AC 1",			"cnt",		0,	1,	1000 },	\
+	{ "strm_min_ac2",	"Min streams for AC 2",			"cnt",		0,	1,	1000 },	\
+	{ "strm_min_ac3",	"Min streams for AC 3",			"cnt",		0,	1,	1000 },	\
+	{ "strm_tput_max_tcp",	"Max stream throughput for TCP",	"Mbps",		0,	0,	10000 },\
+	{ "strm_tput_max_tcp_ac0","Max stream throughput for TCP AC 0",	"Mbps",		0,	0,	10000 },\
+	{ "strm_tput_max_tcp_ac1","Max stream throughput for TCP AC 1",	"Mbps",		0,	0,	10000 },\
+	{ "strm_tput_max_tcp_ac2","Max stream throughput for TCP AC 2",	"Mbps",		0,	0,	10000 },\
+	{ "strm_tput_max_tcp_ac3","Max stream throughput for TCP AC 3",	"Mbps",		0,	0,	10000 },\
+	{ "strm_tput_max_udp",	"Max stream throughput for UDP",	"Mbps",		0,	0,	10000 },\
+	{ "strm_tput_max_udp_ac0","Max stream throughput for UDP AC 0",	"Mbps",		0,	0,	10000 },\
+	{ "strm_tput_max_udp_ac1","Max stream throughput for UDP AC 1",	"Mbps",		0,	0,	10000 },\
+	{ "strm_tput_max_udp_ac2","Max stream throughput for UDP AC 2",	"Mbps",		0,	0,	10000 },\
+	{ "strm_tput_max_udp_ac3","Max stream throughput for UDP AC 3",	"Mbps",		0,	0,	10000 },\
+	{ "strm_enable_wait",	"Min stream re-enable wait time",	"secs",		30,	1,	86400 },\
+	{ "strm_age_max",	"Max stream age",			"secs",		5,	1,	86400 },\
+	{ "age_check_intv",	"Age check interval",			"secs",		10,	1,	86400 },\
+	{ "3rd_party_ctl",	"Enable 3rd party client control",	"number",	0,	0,	1 },	\
+	{ "3rd_party_local_throt",	"Throttling 3rd party client packet also in local",\
+									"number",	0,	0,	1 },	\
+	{ "3rd_party_qtn",	"Treat qtn client as 3rd party client",	"number",	0,	0,	1 },	\
+	{ "ba_throt_intv",	"BA throttling interval",		"ms",		1000,	0,	10000 },\
+	{ "ba_throt_dur_min",	"BA throttling min duration",		"ms",		50,	0,	10000 },\
+	{ "ba_throt_dur_step",	"BA throttling duration step",		"ms",		100,	50,	10000 },\
+	{ "ba_throt_winsize_min",	"BA throttling min winsize",	"number",	1,	0,	256 },\
+	{ "ba_throt_winsize_max",	"BA throttling max winsize",	"number",	16,	1,	256 },\
+	{ "wme_throt_ac",	"WME throttling AC bitmap",		"number",	3,	0,	15 },\
+	{ "wme_throt_aifsn",	"WME throttling AIFSN",			"number",	15,	0,	15 },\
+	{ "wme_throt_ecwmin",	"WME throttling encoded cwmin",		"number",	14,	1,	14 },\
+	{ "wme_throt_ecwmax",	"WME throttling encoded cwmax",		"number",	15,	1,	15 },\
+	{ "wme_throt_txoplimit","WME throttling TXOP limit",		"number",	0,	0,	65535 },\
+	{ "wme_throt_thrsh_disabled",	"WME throttling disabled stream cost threshold",\
+									"number",	150,	0,	1000 },\
+	{ "wme_throt_thrsh_victim",	"WME throttling victim stream cost threshold",\
+									"number",	150,	0,	1000 },\
+	{ "event_level",	"Event log level",			"number",	LL_0,	LL_0,	LL_9  },\
+	{ "debug_level",	"Debug log level",			"number",	LL_3,	LL_0,	LL_9  },\
+}
+
+/* Must be in sync with call_qcsapi_vsp_if_desc */
+enum qvsp_if_e {
+	QVSP_IF_ETH_RX,
+	QVSP_IF_QDRV_TX,
+	QVSP_IF_QDRV_RX,
+	QVSP_IF_PCIE_RX,
+	QVSP_IF_MAX
+};
+
+#define QVSP_IF_DESCS	{		\
+	"eth_rx",			\
+	"qdrv_tx",			\
+	"qdrv_rx",			\
+	"pcie_rx",			\
+	"invalid"			\
+}
+
+/*
+ * These must be kept in sync with QVSP_STRM_THROT_DESCS and QVSP_STRM_THROT_DESCS_ABBR.
+ */
+enum qvsp_strm_throt_policy {
+	QVSP_STRM_THROT_NONE = 0,
+	QVSP_STRM_THROT_BINARY = 1,
+	QVSP_STRM_THROT_ADPT = 2,
+	QVSP_STRM_THROT_MAX,
+};
+
+#define QVSP_STRM_THROT_DESCS {		\
+	"None",				\
+	"Binary",			\
+	"Adaptive",			\
+}
+
+#define QVSP_STRM_THROT_DESCS_ABBR {	\
+	"N/A",				\
+	"BIN",				\
+	"ADP",				\
+}
+
+enum qvsp_rule_dir_e {
+	QVSP_RULE_DIR_ANY,
+	QVSP_RULE_DIR_TX,
+	QVSP_RULE_DIR_RX,
+};
+
+#define QVSP_RULE_DIR_DESCS	{	\
+	"Any",				\
+	"Tx",				\
+	"Rx",				\
+}
+
+enum qvsp_rule_param_e {
+	QVSP_RULE_PARAM_DIR,
+	QVSP_RULE_PARAM_VAPPRI,
+	QVSP_RULE_PARAM_AC,
+	QVSP_RULE_PARAM_PROTOCOL,
+	QVSP_RULE_PARAM_TPUT_MIN,
+	QVSP_RULE_PARAM_TPUT_MAX,
+	QVSP_RULE_PARAM_COST_MIN,
+	QVSP_RULE_PARAM_COST_MAX,
+	QVSP_RULE_PARAM_ORDER,
+	QVSP_RULE_PARAM_THROT_POLICY,
+	QVSP_RULE_PARAM_DEMOTE,
+	QVSP_RULE_PARAM_MAX,
+};
+
+struct qvsp_rule_param {
+	const char	*name;
+	const char	*desc;
+	const char	*units;
+	uint32_t	min_val;
+	uint32_t	max_val;
+};
+
+#define QVSP_RULE_PARAMS	{								\
+	{ "dir",	"Direction",		"val",		0,	2 },			\
+	{ "vappri",	"VAP Priority",		"bitmap",	0x1,	0xf },			\
+	{ "ac",		"Access Classes",	"bitmap",	0x1,	0xf },			\
+	{ "protocol",	"IP protocol - TCP(6) or UDP(17)", "val", 6,	17 },			\
+	{ "tp_min",	"Min throughput",	"Mbps",		1,	10000 },		\
+	{ "tp_max",	"Max throughput",	"Mbps",		1,	10000 },		\
+	{ "cost_min",	"Cost min",		"msps",		1,	1000 },			\
+	{ "cost_max",	"Cost max",		"msps",		1,	1000 },			\
+	{ "order",	"Match order",		"val",		0,	QVSP_RULE_ORDER_MAX - 1 },\
+	{ "throt_policy",	"Throttling policy - binary(1) or adaptive(2)",                 \
+						"val",		1,	QVSP_STRM_THROT_MAX - 1 },\
+	{ "demote",	"Demote stream",        "val",		0,	1},                     \
+}
+
+/*
+ * These must be kept in sync with QVSP_RULE_ORDER_DESCS and QVSP_RULE_ORDER_DESCS_ABBR.
+ */
+enum qvsp_rule_order_e {
+	QVSP_RULE_ORDER_GREATEST_COST_NODE,
+	QVSP_RULE_ORDER_LEAST_COST_NODE,
+	QVSP_RULE_ORDER_GREATEST_NODE_INV_PHY_RATE,
+	QVSP_RULE_ORDER_LEAST_NODE_INV_PHY_RATE,
+	QVSP_RULE_ORDER_GREATEST_COST_STREAM,
+	QVSP_RULE_ORDER_LEAST_COST_STREAM,
+	QVSP_RULE_ORDER_NEWEST,
+	QVSP_RULE_ORDER_OLDEST,
+	QVSP_RULE_ORDER_LOWEST_TPUT,
+	QVSP_RULE_ORDER_HIGHEST_TPUT,
+	QVSP_RULE_ORDER_MAX
+};
+
+#define QVSP_RULE_ORDER_DESCS	{	\
+	"greatest cost node first",	\
+	"least cost node first",	\
+	"greatest inverse PHY rate node first",	\
+	"least inverse PHY rate node first",	\
+	"greatest cost stream first",	\
+	"least cost stream first",	\
+	"newest first",			\
+	"oldest first",			\
+	"lowest throughput first",	\
+	"highest throughput first",	\
+}
+
+#define QVSP_RULE_ORDER_DESCS_ABBR {    \
+	"GCN",                          \
+	"LCN",                          \
+	"GIPR",                         \
+	"LIPR",                         \
+	"GCS",                          \
+	"LCS",                          \
+	"NS",                           \
+	"OS",                           \
+	"LT",                           \
+	"HT",                           \
+}
+
+enum qvsp_strm_state_e {
+	QVSP_STRM_STATE_NONE,
+	QVSP_STRM_STATE_DISABLED,
+	QVSP_STRM_STATE_LOW_TPUT,
+	QVSP_STRM_STATE_PRE_ENABLED,
+	QVSP_STRM_STATE_ENABLED,
+	QVSP_STRM_STATE_DELETED,
+	QVSP_STRM_STATE_MAX
+};
+
+enum qvsp_hairpin_e {
+	QVSP_HAIRPIN_NONE,
+	QVSP_HAIRPIN_UCAST,
+	QVSP_HAIRPIN_MCAST,
+};
+
+#define QVSP_RULE_DIR_DESCS	{	\
+	"Any",				\
+	"Tx",				\
+	"Rx",				\
+}
+
+/* This definition must be kept in sync with the qvsp_ext_s struct */
+#define QVSP_INACTIVE_REASON	{	\
+		"Config",		\
+		"WDS",			\
+		"CoC"			\
+}
+
+#define QVSP_3RDPT_STR		"3"
+
+#ifndef MUC_BUILD
+
+/** \addtogroup vsp_group
+ *  @{
+ */
+
+/**
+ * Defines a stream based on source and destination
+ */
+struct qvsp_hash_flds_ipv4 {
+	/** IP source address */
+	__be32			saddr;
+
+	/** IP destination address */
+	__be32			daddr;
+
+	/** UDP/TCP source port */
+	__be16			sport;
+
+	/** UDP/TCP destination port */
+	__be16			dport;
+};
+
+struct qvsp_hash_flds_ipv6 {
+	/** IP source address */
+	struct in6_addr		saddr;
+
+	/** IP destination address */
+	struct in6_addr		daddr;
+
+	/** UDP/TCP source port */
+	__be16			sport;
+
+	/** UDP/TCP destination port */
+	__be16			dport;
+};
+
+union qvsp_hash_flds {
+	struct qvsp_hash_flds_ipv4	ipv4;
+	struct qvsp_hash_flds_ipv6	ipv6;
+};
+
+/**
+ * Whitelist definition. Passing streams are compared with
+ * the stream defined in 'hflds', ANDed with netmasks
+ */
+struct qvsp_wl_flds {
+	union qvsp_hash_flds	hflds;
+
+	/** IP source CIDR bitcount */
+	uint8_t			s_cidr_bits;
+
+	/** IP destination CIDR bitcount */
+	uint8_t			d_cidr_bits;
+
+	/** IP version */
+	uint8_t			ip_version;
+};
+
+/**
+ * IPv4 whitelist tricks for netmask; store netmasks in the hashfield union
+ */
+static inline __be32 * qvsp_wl_ipv4_netmask_src(struct qvsp_wl_flds *wl)
+{
+	struct qvsp_hash_flds_ipv4 *ipv4 = &wl->hflds.ipv4;
+	return (__be32 *)&ipv4[1];
+}
+
+static inline __be32 * qvsp_wl_ipv4_netmask_dst(struct qvsp_wl_flds *wl)
+{
+	return &(qvsp_wl_ipv4_netmask_src(wl))[1];
+}
+
+struct qvsp_rule_flds {
+	uint32_t		param[QVSP_RULE_PARAM_MAX];
+};
+
+struct qvsp_strm_stats {
+	unsigned long		first_ref;
+	uint32_t		pkts;
+	uint32_t		bytes;
+	uint32_t		bytes_sent;
+	uint32_t		pkts_sent;
+};
+
+struct qvsp_stats_if {
+	uint32_t		strm_add;
+	uint32_t		strm_none;
+	uint32_t		pkt_chk;
+	uint32_t		pkt_tcp;
+	uint32_t		pkt_udp;
+	uint32_t		pkt_other;
+	uint32_t		pkt_ignore;
+	uint32_t		pkt_sent;
+	uint32_t		pkt_drop_throttle;
+	uint32_t		pkt_drop_disabled;
+	uint32_t		pkt_demoted;
+	uint32_t		pkt_frag_found;
+	uint32_t		pkt_frag_not_found;
+};
+
+struct qvsp_stats {
+	uint32_t		is_qtm;		/* 0: VSP or 1: QTM */
+	uint32_t		strm_enable;
+	uint32_t		strm_disable;
+	uint32_t		strm_disable_remote;
+	uint32_t		strm_reenable;
+	uint32_t		fat_over;
+	uint32_t		fat_under;
+	uint32_t		fat_chk_disable;
+	uint32_t		fat_chk_reenable;
+	uint32_t		fat_chk_squeeze;
+	uint32_t		fat_chk_loosen;
+	struct qvsp_stats_if	stats_if[QVSP_IF_MAX];
+};
+
+/* This structure is being deprecated and replaced with the endian-safe qvsp_strm_entry_s */
+struct qvsp_strm_info {
+	union qvsp_hash_flds	hash_flds;
+	uint16_t		node_idx;
+	uint8_t			node_mac[6];
+	uint8_t			vap_pri;
+	uint8_t			tid;
+	uint16_t		hairpin_id;
+	uint16_t		hairpin_type;
+	uint8_t			ip_version;
+	uint8_t			ip_proto;
+	uint8_t			ac_in;
+	uint8_t			ac_out;
+	uint8_t			strm_state;
+	uint8_t			disable_remote;
+	uint8_t			is_3rdpt_udp_us;
+	uint16_t		last_ref_secs;
+	uint32_t		ni_inv_phy_rate;
+	uint32_t		phy_rate_disabled;
+	uint32_t		bytes_max;
+	uint32_t		ni_cost;
+	uint16_t		cost_current;
+	uint16_t		cost_max;
+	uint8_t			hash;
+	uint8_t			dir;
+	uint32_t                throt_policy;
+	uint32_t                throt_rate;
+	uint32_t                demote_rule;
+	/* current state, might be different from demote_rule when recovering */
+	uint32_t                demote_state;
+	struct qvsp_strm_stats	prev_stats;
+};
+
+/* Endian-safe version of qvsp_strm_info */
+struct qvsp_strm_info_safe {
+	uint16_t		node_idx;
+	uint8_t			node_mac[6];
+	uint8_t			vap_pri;
+	uint8_t			tid;
+	uint16_t		hairpin_id;
+	uint16_t		hairpin_type;
+	uint8_t			ac_in;
+	uint8_t			ac_out;
+	uint8_t			strm_state;
+	uint8_t			disable_remote;
+	uint8_t			is_3rdpt_udp_us;
+	uint16_t		last_ref_secs;
+	uint32_t		ni_inv_phy_rate;
+	uint32_t		phy_rate_disabled;
+	uint32_t		bytes_max;
+	uint32_t		ni_cost;
+	uint16_t		cost_current;
+	uint16_t		cost_max;
+	uint8_t			hash;
+	uint8_t			dir;
+	uint32_t                throt_policy;
+	uint32_t                throt_rate;
+	uint32_t                demote_rule;
+	/* current state, might be different from demote_rule when recovering */
+	uint32_t                demote_state;
+	struct qvsp_strm_stats	prev_stats;
+};
+
+#define QVSP_STRM_MAX_ENTRIES	256
+struct qvsp_strms {
+	struct qvsp_strm_info_safe strms[QVSP_STRM_MAX_ENTRIES];
+};
+
+/** @}*/
+
+#endif	/* MUC_BUILD */
+
+/*
+ * Convert kilobits (Kb) to bytes
+ */
+static __inline__ uint32_t
+qvsp_kbit2b(uint32_t kbps)
+{
+	return kbps * 1000 / NBBY;
+}
+
+/*
+ * Convert bytes to kilobits (Kb)
+ */
+static __inline__ uint32_t
+qvsp_b2kbit(uint32_t bytes)
+{
+	return bytes * NBBY / 1000;
+}
+
+/*
+ * Convert bytes over an interval into to kilobits per second
+ */
+static __inline__ uint32_t
+qvsp_b2kbitps(uint32_t bytes, unsigned long interval)
+{
+	/* bytes * NBBY / 1000 / 1000 * interval */
+	return bytes * NBBY / interval;
+}
+
+/*
+ * Convert bytes to megabits (Mb)
+ */
+static __inline__ uint32_t
+qvsp_b2mbit(uint32_t bytes)
+{
+	return bytes * NBBY / 1000000;
+}
+
+/*
+ * Convert inverse PHY rate to PHY rate
+ */
+static __inline__ uint32_t
+qvsp_inv2phy(uint32_t inv_phy)
+{
+	return (inv_phy == 0) ? 0 : (65536 / inv_phy);
+}
+
+/*
+ * Convert faked IP addr to Node/Tid.
+ * @ip is network/big endian.
+ */
+static __inline__ void
+qvsp_fake_ip2nodetid(const uint32_t *ip, uint8_t *node, uint8_t *tid)
+{
+	*node = ((const uint8_t*)ip)[2];
+	*tid = ((const uint8_t*)ip)[3];
+}
+
+#define QVSP_TID_FAKE_IP_VERSION	4
+#define QVSP_TID_FAKE_IP_PROTO		IPPROTO_UDP
+
+/*
+ * Convert Node/Tid to faked IP addr
+ * Returned IP addr is network/big endian.
+ */
+static __inline__ void
+qvsp_fake_nodetid2ip(uint32_t *ip, const uint8_t node, const uint8_t tid)
+{
+	((uint8_t*)ip)[0] = 192;
+	((uint8_t*)ip)[1] = 168;
+	((uint8_t*)ip)[2] = node;
+	((uint8_t*)ip)[3] = tid;
+}
+
+#ifndef NIPQUAD_FMT
+#define NIPQUAD_FMT "%d.%d.%d.%d"
+#endif
+
+#ifndef NIPQUAD_LEN
+#define NIPQUAD_LEN 15
+#endif
+
+#ifndef NIPQUAD
+#define NIPQUAD(addr) \
+	((unsigned char *)&addr)[0], \
+	((unsigned char *)&addr)[1], \
+	((unsigned char *)&addr)[2], \
+	((unsigned char *)&addr)[3]
+#endif
+
+#define QVSP_CFG_SHOW_ANYSTR	"Any"
+
+#endif	/* DOXYGEN_EXCLUDE */
+
+#endif	/* __QTN_QVSP_DATA_H__ */
+
diff --git a/drivers/qtn/include/shared/qtn/qvsp_ioctl.h b/drivers/qtn/include/shared/qtn/qvsp_ioctl.h
new file mode 100644
index 0000000..5705bdb
--- /dev/null
+++ b/drivers/qtn/include/shared/qtn/qvsp_ioctl.h
@@ -0,0 +1,58 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2012-2013 Quantenna Communications, Inc.            **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  File        : qvsp_ioctl.h                                               **
+**  Description : Video Stream Protection                                    **
+**                                                                           **
+*******************************************************************************
+EH0*/
+
+#ifndef __QTN_QVSP_IOCTL_H__
+#define __QTN_QVSP_IOCTL_H__
+
+enum qvsp_ioctl {
+	QVSP_IOCTL_MIN = 0x2000,
+	QVSP_IOCTL_STATE_GET,
+	QVSP_IOCTL_STATE_SET,
+	QVSP_IOCTL_CFG_GET,
+	QVSP_IOCTL_CFG_SET,
+	QVSP_IOCTL_WL_ADD,
+	QVSP_IOCTL_WL_DEL,
+	QVSP_IOCTL_WL_DEL_INDEX,
+	QVSP_IOCTL_WL_GETLIST,
+	QVSP_IOCTL_RULE_ADD,
+	QVSP_IOCTL_RULE_DEL,
+	QVSP_IOCTL_RULE_DEL_INDEX,
+	QVSP_IOCTL_RULE_GETLIST,
+	QVSP_IOCTL_STRM_GETLIST,
+	QVSP_IOCTL_STRM_GETLIST_SAFE,
+	QVSP_IOCTL_STRM_GETLIST_ALL,
+	QVSP_IOCTL_STRM_GETLIST_ALL_SAFE,
+	QVSP_IOCTL_STATS_GET,
+	QVSP_IOCTL_INACTIVE_FLAGS_GET,
+	QVSP_IOCTL_MAX
+};
+
+struct qvsp_ioctl_get {
+	unsigned int	index;
+	void		*param;
+	unsigned int	count;
+};
+
+struct qvsp_ioctl_set_cfg {
+	unsigned int index;
+	unsigned int value;
+};
+
+union qvsp_ioctl_set {
+	unsigned int index;
+	struct qvsp_ioctl_set_cfg	cfg;
+	struct qvsp_wl_flds		wl;
+	struct qvsp_rule_flds		rule;
+};
+
+#endif	/* __QTN_QVSP_IOCTL_H__ */
+
diff --git a/drivers/qtn/include/shared/qtn/wlan_ioctl.h b/drivers/qtn/include/shared/qtn/wlan_ioctl.h
new file mode 100644
index 0000000..c853798
--- /dev/null
+++ b/drivers/qtn/include/shared/qtn/wlan_ioctl.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014 Quantenna Communications, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __WLAN_IOCTL_H__
+#define __WLAN_IOCTL_H__
+
+enum ieee80211_wifi_mode {
+	IEEE80211_WIFI_MODE_NONE = 0,
+	IEEE80211_WIFI_MODE_A,
+	IEEE80211_WIFI_MODE_B,
+	IEEE80211_WIFI_MODE_G,
+	IEEE80211_WIFI_MODE_NA,
+	IEEE80211_WIFI_MODE_NG,
+	IEEE80211_WIFI_MODE_AC,
+	IEEE80211_WIFI_MODE_MAX,
+};
+
+#define WLAN_WIFI_MODES_STRINGS		{		\
+	[IEEE80211_WIFI_MODE_NONE] = "-",		\
+	[IEEE80211_WIFI_MODE_A] = "a",			\
+	[IEEE80211_WIFI_MODE_B] = "b",			\
+	[IEEE80211_WIFI_MODE_G] = "g",			\
+	[IEEE80211_WIFI_MODE_NA] = "na",		\
+	[IEEE80211_WIFI_MODE_NG] = "ng",		\
+	[IEEE80211_WIFI_MODE_AC] = "ac",		\
+}
+
+#define IEEE80211_HTCAP_IE_LENGTH	28
+#define IEEE80211_VHTCAP_IE_LENGTH	14
+
+struct ieee8011req_sta_tput_caps {
+	uint8_t	macaddr[ETH_ALEN];
+	uint8_t	mode;
+	uint8_t	htcap_ie[IEEE80211_HTCAP_IE_LENGTH];
+	uint8_t	vhtcap_ie[IEEE80211_VHTCAP_IE_LENGTH];
+};
+#endif /* __WLAN_IOCTL_H__ */
diff --git a/drivers/qtn/include/shared/qtn_logging.h b/drivers/qtn/include/shared/qtn_logging.h
new file mode 100644
index 0000000..09aa171
--- /dev/null
+++ b/drivers/qtn/include/shared/qtn_logging.h
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 2014 Quantenna
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef QTNLOGGING_H
+#define QTNLOGGING_H
+
+#define QEVT_COMMON_PREFIX		"Dot11Msg:"
+#define QEVT_ACL_PREFIX			"ACL:"
+#define QEVT_PM_PREFIX			"PM:"
+
+#endif /* QTNLOGGING_H */
diff --git a/drivers/qtn/pcie2/host/arm/Makefile b/drivers/qtn/pcie2/host/arm/Makefile
new file mode 100644
index 0000000..c2375cd
--- /dev/null
+++ b/drivers/qtn/pcie2/host/arm/Makefile
@@ -0,0 +1,30 @@
+#
+# Makefile for arm platform
+#
+
+EXTRA_CFLAGS	+= -Wall -Wno-deprecated-declarations	\
+		   -I$(src)		\
+		   -I$(src)/../../include \
+		   -I$(src)/../common
+
+EXTRA_CFLAGS    += -DQTN_TX_SKBQ_SUPPORT -DQTN_WAKEQ_SUPPORT
+
+PWD	:= $(shell pwd)
+
+default: all
+
+COMMON_DIR	:= ../common
+qdpc-host-objs   := $(COMMON_DIR)/qdpc_init.o $(COMMON_DIR)/qdpc_pcie.o $(COMMON_DIR)/topaz_vnet.o qdpc_platform.o
+obj-m           :=  qdpc-host.o
+
+qdpc_host.o: $(qdpc-host-objs)
+	ld -r $^ -o $@
+
+all:
+	make -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
+
+
+clean:
+	rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions
+	rm -rf Module.markers  Module.symvers modules.order *~ $(qdpc-host-objs) *.o *.ko *.mod.o *.mod.c
+
diff --git a/drivers/qtn/pcie2/host/arm/qdpc_platform.c b/drivers/qtn/pcie2/host/arm/qdpc_platform.c
new file mode 100644
index 0000000..d26114a
--- /dev/null
+++ b/drivers/qtn/pcie2/host/arm/qdpc_platform.c
@@ -0,0 +1,75 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+/*
+ * Platform dependant implement. Customer needs to modify this file.
+ */
+
+#include <linux/interrupt.h>
+
+#include <qdpc_platform.h>
+#include <topaz_vnet.h>
+#include <qdpc_regs.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+/*
+ * Enable MSI interrupt of PCIe.
+ */
+void enable_vmac_ints(struct vmac_priv *vmp)
+{
+	volatile uint32_t *dma_wrd_imwr = QDPC_BAR_VADDR(vmp->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET);
+
+	writel(vmp->dma_msi_imwr, dma_wrd_imwr);
+}
+
+/*
+ * Disable MSI interrupt of PCIe.
+ */
+void disable_vmac_ints(struct vmac_priv *vmp)
+{
+	volatile uint32_t *dma_wrd_imwr = QDPC_BAR_VADDR(vmp->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET);
+
+	writel(vmp->dma_msi_dummy, dma_wrd_imwr);
+}
+
+
+/*
+ * Enable interrupt for detecting EP reset.
+ */
+void enable_ep_rst_detection(struct net_device *ndev)
+{
+}
+
+/*
+ * Disable interrupt for detecting EP reset.
+ */
+void disable_ep_rst_detection(struct net_device *ndev)
+{
+}
+
+/*
+ * Interrupt context for detecting EP reset.
+ * This function should do:
+ *   1. check interrupt status to see if EP reset.
+ *   2. if EP reset, handle it.
+ */
+void handle_ep_rst_int(struct net_device *ndev)
+{
+}
diff --git a/drivers/qtn/pcie2/host/arm/qdpc_platform.h b/drivers/qtn/pcie2/host/arm/qdpc_platform.h
new file mode 100644
index 0000000..b3f678b
--- /dev/null
+++ b/drivers/qtn/pcie2/host/arm/qdpc_platform.h
@@ -0,0 +1,101 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+/*
+ * Platform dependant implement. Customer needs to modify this file.
+ */
+#ifndef __QDPC_PFDEP_H__
+#define __QDPC_PFDEP_H__
+
+#include <linux/version.h>
+
+#include <topaz_vnet.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+#define IOREMAP      ioremap_wc
+#else    /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
+#define IOREMAP      ioremap
+#endif
+
+/* IO functions */
+#ifndef readb
+#define readb(addr) (*(volatile unsigned char *) (addr))
+#endif
+
+#ifndef readw
+#define readw(addr) (*(volatile unsigned short *) (addr))
+#endif
+
+#ifndef readl
+#define readl(addr) (*(volatile unsigned int *) (addr))
+#endif
+
+#ifndef writeb
+#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
+#endif
+
+#ifndef writew
+#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
+#endif
+
+#ifndef writel
+#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
+#endif
+
+#ifndef virt_to_bus
+#define virt_to_bus virt_to_phys
+#endif
+
+/* Bit number and mask of MSI in the interrupt mask and status register */
+#define	QDPC_INTR_MSI_BIT		0
+#define QDPC_INTR_MSI_MASK		(1 << QDPC_INTR_MSI_BIT)
+
+/* Enable MSI interrupt of PCIe */
+extern void enable_vmac_ints(struct vmac_priv *vmp);
+/* Disable MSI interrupt of PCIe */
+extern void disable_vmac_ints(struct vmac_priv *vmp);
+
+/* Enable interrupt for detecting EP reset */
+extern void enable_ep_rst_detection(struct net_device *ndev);
+/* Disable interrupt for detecting EP reset */
+extern void disable_ep_rst_detection(struct net_device *ndev);
+/* Interrupt context for detecting EP reset */
+extern void handle_ep_rst_int(struct net_device *ndev);
+
+/* Allocated buffer size for a packet */
+#define SKB_BUF_SIZE		2048
+
+/* Transmit Queue Length */
+#define QDPC_TX_QUEUE_SIZE	180
+
+/* Receive Queue Length */
+#define QDPC_RX_QUEUE_SIZE	384
+
+/* Customer defined function	*/
+#define qdpc_platform_init()                  0
+#define qdpc_platform_exit()                  do { } while(0)
+
+/* PCIe driver update resource in PCI configure space after EP reset */
+#define qdpc_update_hw_bar(pdev, index)       do { } while(0)
+
+/* TODO: If MSI IRQ-loss issue can be fixed, remove macro below */
+/*#define QDPC_PLATFORM_IRQ_FIXUP*/
+
+#endif /* __QDPC_PFDEP_H__ */
+
diff --git a/drivers/qtn/pcie2/host/common/qdpc_init.c b/drivers/qtn/pcie2/host/common/qdpc_init.c
new file mode 100644
index 0000000..13da8ed
--- /dev/null
+++ b/drivers/qtn/pcie2/host/common/qdpc_init.c
@@ -0,0 +1,1059 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <linux/netlink.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+
+#include "qdpc_config.h"
+#include "qdpc_debug.h"
+#include "qdpc_init.h"
+#include "qdpc_regs.h"
+#include "qdpc_platform.h"
+#include "topaz_vnet.h"
+#define QDPC_TOPAZ_IMG		"topaz-linux.lzma.img"
+#define QDPC_TOPAZ_UBOOT	"u-boot.bin"
+#define MAX_IMG_NUM		2
+
+#define EP_BOOT_FROM_FLASH 1
+
+#ifndef MEMORY_START_ADDRESS
+#define MEMORY_START_ADDRESS virt_to_bus((void *)PAGE_OFFSET)
+#endif
+
+static unsigned int tlp_mps = 256;
+module_param(tlp_mps, uint, 0644);
+MODULE_PARM_DESC(tlp_mps, "Default PCIe Max_Payload_Size");
+
+/*
+ * Define EP state during host suspend
+ * 0 = EP does not power off
+ * 1 = EP power off
+ */
+#define EP_SUSPEND_MODE_RUNNING	0
+#define EP_SUSPEND_MODE_PWR_OFF	1
+static unsigned int suspend_mode = EP_SUSPEND_MODE_RUNNING;
+module_param(suspend_mode, uint, 0644);
+MODULE_PARM_DESC(suspend_mode, "Default suspend behavior");
+static unsigned int suspend_flag = 0;
+
+/* Quantenna PCIE vendor and device identifiers  */
+static struct pci_device_id qdpc_pcie_ids[] = {
+	{PCI_DEVICE(QDPC_VENDOR_ID, QDPC_DEVICE_ID),},
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qdpc_pcie_ids);
+
+static int qdpc_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void qdpc_pcie_remove(struct pci_dev *pdev);
+static int qdpc_boot_thread(void *data);
+static void qdpc_nl_recv_msg(struct sk_buff *skb);
+int qdpc_init_netdev(struct net_device  **net_dev, struct pci_dev *pdev);
+pci_ers_result_t qdpc_pcie_slot_reset(struct pci_dev *dev);
+static void qdpc_pcie_shutdown(struct pci_dev *pdev);
+
+#ifdef QTN_LINK_MONITOR
+static bool is_ep_reset = false;
+static int link_monitor(void *data);
+static struct task_struct *link_monitor_thread = NULL;
+#endif
+
+char qdpc_pcie_driver_name[] = "qdpc_host";
+
+#ifdef PCIE_HOTPLUG_SUPPORTED
+static struct pci_error_handlers qdpc_err_hdl = {
+        .slot_reset = qdpc_pcie_slot_reset,
+};
+#endif
+
+static struct pci_driver qdpc_pcie_driver = {
+	.name     = qdpc_pcie_driver_name,
+	.id_table = qdpc_pcie_ids,
+	.probe    = qdpc_pcie_probe,
+	.remove   = qdpc_pcie_remove,
+#ifdef CONFIG_QTN_PM
+	.suspend  = qdpc_pcie_suspend,
+	.resume  = qdpc_pcie_resume,
+#endif
+#ifdef PCIE_HOTPLUG_SUPPORTED
+        .err_handler = &qdpc_err_hdl,
+#endif
+	.shutdown = qdpc_pcie_shutdown,
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
+struct netlink_kernel_cfg qdpc_netlink_cfg = {
+	.groups   = 0,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+	.flags    = 0,
+#endif
+	.input    = qdpc_nl_recv_msg,
+	.cb_mutex = NULL,
+	.bind     = NULL,
+};
+#endif
+
+struct sock *qdpc_nl_sk = NULL;
+int qdpc_clntPid = 0;
+
+unsigned int (*qdpc_pci_readl)(void *addr) = qdpc_readl;
+void (*qdpc_pci_writel)(unsigned int val, void *addr) = qdpc_writel;
+
+static int qdpc_bootpoll(struct vmac_priv *p, uint32_t state)
+{
+	while (!kthread_should_stop() && (qdpc_isbootstate(p,state) == 0)) {
+		if (qdpc_booterror(p))
+			return -1;
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(QDPC_SCHED_TIMEOUT);
+	}
+	return 0;
+}
+static void booterror(qdpc_pcie_bda_t *bda)
+{
+	if (PCIE_BDA_TARGET_FWLOAD_ERR & qdpc_pci_readl(&bda->bda_flags))
+		printk("EP boot from download firmware failed!\n");
+	else if (PCIE_BDA_TARGET_FBOOT_ERR & qdpc_pci_readl(&bda->bda_flags))
+		printk("EP boot from flash failed! Please check if there is usable image in Target flash.\n");
+	else
+		printk("EP boot get in error, dba flag: 0x%x\n", qdpc_pci_readl(&bda->bda_flags));
+}
+
+static void qdpc_pci_endian_detect(struct vmac_priv *priv)
+{
+	__iomem qdpc_pcie_bda_t *bda = priv->bda;
+	volatile uint32_t pci_endian;
+
+	writel(QDPC_PCI_ENDIAN_DETECT_DATA, &bda->bda_pci_endian);
+	mmiowb();
+	writel(QDPC_PCI_ENDIAN_VALID_STATUS, &bda->bda_pci_pre_status);
+
+	while (readl(&bda->bda_pci_post_status) != QDPC_PCI_ENDIAN_VALID_STATUS) {
+		if (kthread_should_stop())
+			break;
+
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(QDPC_SCHED_TIMEOUT);
+	}
+
+	pci_endian = readl(&bda->bda_pci_endian);
+	if (pci_endian == QDPC_PCI_LITTLE_ENDIAN) {
+		qdpc_pci_readl = qdpc_readl;
+		qdpc_pci_writel = qdpc_writel;
+		printk("PCI memory is little endian\n");
+	} else if (pci_endian == QDPC_PCI_BIG_ENDIAN) {
+		qdpc_pci_readl = qdpc_le32_readl;
+		qdpc_pci_writel = qdpc_le32_writel;
+		printk("PCI memory is big endian\n");
+	} else {
+		qdpc_pci_readl = qdpc_readl;
+		qdpc_pci_writel = qdpc_writel;
+		printk("PCI memory endian value:%08x is invalid - using little endian\n", pci_endian);
+	}
+
+	/* Clear endian flags */
+	writel(0, &bda->bda_pci_pre_status);
+	writel(0, &bda->bda_pci_post_status);
+	writel(0, &bda->bda_pci_endian);
+}
+
+static void qdpc_pci_dma_offset_reset(struct vmac_priv *priv)
+{
+	__iomem qdpc_pcie_bda_t *bda = priv->bda;
+	uint32_t dma_offset;
+
+	/* Get EP Mapping address */
+	dma_offset = readl(&bda->bda_dma_offset);
+	if ((dma_offset & PCIE_DMA_OFFSET_ERROR_MASK) != PCIE_DMA_OFFSET_ERROR) {
+		printk("DMA offset : 0x%08x, no need to reset the value.\n", dma_offset);
+		return;
+	}
+	dma_offset &= ~PCIE_DMA_OFFSET_ERROR_MASK;
+
+	printk("EP map start addr : 0x%08x, Host memory start : 0x%08x\n",
+			dma_offset, (unsigned int)MEMORY_START_ADDRESS);
+
+	/* Reset DMA offset in bda */
+	dma_offset -= MEMORY_START_ADDRESS;
+	writel(dma_offset, &bda->bda_dma_offset);
+}
+
+static int qdpc_firmware_load(struct pci_dev *pdev, struct vmac_priv *priv, const char *name)
+{
+#define DMABLOCKSIZE	(1 * 1024 * 1024)
+#define NBLOCKS(size)  ((size)/(DMABLOCKSIZE) + (((size)%(DMABLOCKSIZE) > 0) ? 1 : 0))
+
+	int result = SUCCESS;
+	const struct firmware *fw;
+	__iomem qdpc_pcie_bda_t  *bda = priv->bda;
+
+	/* Request compressed firmware from user space */
+	if ((result = request_firmware(&fw, name, &pdev->dev)) == -ENOENT) {
+		/*
+		 * No firmware found in the firmware directory, skip firmware downloading process
+		 * boot from flash directly on target
+		 */
+		printk( "no firmware found skip fw downloading\n");
+		qdpc_pcie_posted_write((PCIE_BDA_HOST_NOFW_ERR |
+					qdpc_pci_readl(&bda->bda_flags)), &bda->bda_flags);
+		return FAILURE;
+	} else if (result == SUCCESS) {
+		uint32_t nblocks = NBLOCKS(fw->size);
+		uint32_t remaining = fw->size;
+		uint32_t count;
+		uint32_t dma_offset = qdpc_pci_readl(&bda->bda_dma_offset);
+		void *data =(void *) __get_free_pages(GFP_KERNEL | GFP_DMA,
+			get_order(DMABLOCKSIZE));
+		const uint8_t *curdata = fw->data;
+		dma_addr_t handle = 0;
+
+		if (!data) {
+			printk(KERN_ERR "Allocation failed for memory size[%u] Download firmware failed!\n", DMABLOCKSIZE);
+			release_firmware(fw);
+			qdpc_pcie_posted_write((PCIE_BDA_HOST_MEMALLOC_ERR |
+				qdpc_pci_readl(&bda->bda_flags)), &bda->bda_flags);
+			return FAILURE;
+		}
+
+		handle = pci_map_single(priv->pdev, data ,DMABLOCKSIZE, PCI_DMA_TODEVICE);
+		if (!handle) {
+			printk("Pci map for memory data block 0x%p error, Download firmware failed!\n", data);
+			free_pages((unsigned long)data, get_order(DMABLOCKSIZE));
+			release_firmware(fw);
+			qdpc_pcie_posted_write((PCIE_BDA_HOST_MEMMAP_ERR |
+				qdpc_pci_readl(&bda->bda_flags)), &bda->bda_flags);
+			return FAILURE;
+		}
+
+		qdpc_setbootstate(priv, QDPC_BDA_FW_HOST_LOAD);
+		qdpc_bootpoll(priv, QDPC_BDA_FW_EP_RDY);
+
+		/* Start loading firmware */
+		for (count = 0 ; count < nblocks; count++)
+		{
+			uint32_t size = (remaining > DMABLOCKSIZE) ? DMABLOCKSIZE : remaining;
+
+			memcpy(data, curdata, size);
+			/* flush dcache */
+			pci_dma_sync_single_for_device(priv->pdev, handle ,size, PCI_DMA_TODEVICE);
+
+			qdpc_pcie_posted_write(handle + dma_offset, &bda->bda_img);
+			qdpc_pcie_posted_write(size, &bda->bda_img_size);
+			printk("FW Data[%u]: VA:0x%p PA:0x%p Sz=%u..\n", count, (void *)curdata, (void *)handle, size);
+
+			qdpc_setbootstate(priv, QDPC_BDA_FW_BLOCK_RDY);
+			qdpc_bootpoll(priv, QDPC_BDA_FW_BLOCK_DONE);
+
+			remaining = (remaining < size) ? remaining : (remaining - size);
+			curdata += size;
+			printk("done!\n");
+		}
+
+		pci_unmap_single(priv->pdev,handle, DMABLOCKSIZE, PCI_DMA_TODEVICE);
+		/* Mark end of block */
+		qdpc_pcie_posted_write(0, &bda->bda_img);
+		qdpc_pcie_posted_write(0, &bda->bda_img_size);
+		qdpc_setbootstate(priv, QDPC_BDA_FW_BLOCK_RDY);
+		qdpc_bootpoll(priv, QDPC_BDA_FW_BLOCK_DONE);
+
+		qdpc_setbootstate(priv, QDPC_BDA_FW_BLOCK_END);
+
+		PRINT_INFO("Image. Sz:%u State:0x%x\n", (uint32_t)fw->size, qdpc_pci_readl(&bda->bda_bootstate));
+		qdpc_bootpoll(priv, QDPC_BDA_FW_LOAD_DONE);
+
+		free_pages((unsigned long)data, get_order(DMABLOCKSIZE));
+		release_firmware(fw);
+		PRINT_INFO("Image downloaded....!\n");
+	} else {
+		PRINT_ERROR("Failed to load firmware:%d\n", result);
+		return result;
+     }
+	return result;
+}
+
+static void qdpc_pcie_dev_init(struct vmac_priv *priv, struct pci_dev *pdev, struct net_device *ndev)
+{
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+
+	priv->pdev = pdev;
+	priv->ndev = ndev;
+	pci_set_drvdata(pdev, ndev);
+}
+
+static void qdpc_tune_pcie_mps(struct pci_dev *pdev, int pos)
+{
+	struct pci_dev *parent = NULL;
+	int ppos = 0;
+	uint32_t dev_cap, pcap;
+	uint16_t dev_ctl, pctl;
+	unsigned int mps = tlp_mps;
+#define BIT_TO_MPS(m) (1 << ((m) + 7))
+
+	if (pdev->bus && pdev->bus->self) {
+		parent = pdev->bus->self;
+		if (likely(parent)) {
+			ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+			if (ppos) {
+				pci_read_config_dword(parent, ppos + PCI_EXP_DEVCAP, &pcap);
+				pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &dev_cap);
+				printk(KERN_INFO "parent cap:%u, dev cap:%u\n",\
+						BIT_TO_MPS(pcap & PCI_EXP_DEVCAP_PAYLOAD), BIT_TO_MPS(dev_cap & PCI_EXP_DEVCAP_PAYLOAD));
+				mps = min(BIT_TO_MPS(dev_cap & PCI_EXP_DEVCAP_PAYLOAD), BIT_TO_MPS(pcap & PCI_EXP_DEVCAP_PAYLOAD));
+			}
+		}
+	}
+	printk(KERN_INFO"Setting MPS to %u\n", mps);
+
+	/*
+	* Set Max_Payload_Size
+	* Max_Payload_Size_in_effect = 1 << ( ( (dev_ctl >> 5) & 0x07) + 7);
+	*/
+	mps = (((mps >> 7) - 1) << 5);
+	pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &dev_ctl);
+	dev_ctl = ((dev_ctl & ~PCI_EXP_DEVCTL_PAYLOAD) | mps);
+	pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, dev_ctl);
+
+	if (parent && ppos) {
+		pci_read_config_word(parent, pos + PCI_EXP_DEVCTL, &pctl);
+		pctl = ((pctl & ~PCI_EXP_DEVCTL_PAYLOAD) | mps);
+		pci_write_config_word(parent, pos + PCI_EXP_DEVCTL, pctl);
+	}
+}
+
+static struct net_device *g_ndev = NULL;
+static int qdpc_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct vmac_priv *priv = NULL;
+	struct net_device *ndev = NULL;
+	int result = SUCCESS;
+	int pos;
+
+	/* When system boots up, the state of pdev is not saved on the entry of this function.
+	 * Data structure pci_dev will keep in use when module is removed and re-installed.
+	 * So we can call pci_restore_sate() to recover its previous configuration space.
+	 */
+	if (pdev->state_saved == true) {
+		printk("Recovery: restore saved state\n");
+		pci_restore_state(pdev);
+	}
+
+	/* Save "fresh poweron" state including BAR address, etc. So the state can be
+	 * used for recovery next time.
+	 */
+	pci_save_state(pdev);
+
+	/* Allocate device structure */
+	if (!(ndev = vmac_alloc_ndev()))
+		return -ENOMEM;
+
+	g_ndev = ndev;
+	priv = netdev_priv(ndev);
+	qdpc_pcie_dev_init(priv, pdev, ndev);
+
+	/* allocate netlink data buffer */
+	priv->nl_buf = kmalloc(VMAC_NL_BUF_SIZE, GFP_KERNEL);
+	if (!priv->nl_buf) {
+		result = -ENOMEM;
+		goto out;
+	}
+
+	/* Check if the device has PCI express capability */
+	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (!pos) {
+		PRINT_ERROR(KERN_ERR "The device %x does not have PCI Express capability\n",
+	                pdev->device);
+		result = -ENOSYS;
+		goto out;
+	} else {
+		PRINT_DBG(KERN_INFO "The device %x has PCI Express capability\n", pdev->device);
+	}
+
+	qdpc_tune_pcie_mps(pdev, pos);
+
+	/*  Wake up the device if it is in suspended state and allocate IO,
+	 *  memory regions and IRQ if not
+	 */
+	if (pci_enable_device(pdev)) {
+		PRINT_ERROR(KERN_ERR "Failed to initialize PCI device with device ID %x\n",
+				pdev->device);
+
+		result = -EIO;
+		goto out;
+	} else {
+		PRINT_DBG(KERN_INFO "Initialized PCI device with device ID %x\n", pdev->device);
+	}
+
+	/*
+	 * Check if the PCI device can support DMA addressing properly.
+	 * The mask gives the bits that the device can address
+	 */
+	pci_set_master(pdev);
+
+	/* Initialize PCIE layer  */
+	if (( result = qdpc_pcie_init_intr_and_mem(priv)) < 0) {
+		PRINT_DBG("Interrupt & Memory Initialization failed \n");
+		goto release_memory;
+	}
+
+	if (!!(result = vmac_net_init(pdev))) {
+		PRINT_DBG("Vmac netdev init fail\n");
+		goto free_mem_interrupt;
+	}
+
+	/* Create and start the thread to initiate the INIT Handshake*/
+	priv->init_thread = kthread_run(qdpc_boot_thread, priv, "qdpc_init_thread");
+	if (priv->init_thread == NULL) {
+		PRINT_ERROR("Init thread creation failed \n");
+		goto free_mem_interrupt;
+	}
+
+
+	/* Create netlink & register with kernel */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+	priv->nl_socket = netlink_kernel_create(&init_net,
+				QDPC_NETLINK_RPC_PCI_CLNT, &qdpc_netlink_cfg);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
+	priv->nl_socket = netlink_kernel_create(&init_net,
+				QDPC_NETLINK_RPC_PCI_CLNT, THIS_MODULE, &qdpc_netlink_cfg);
+#else
+	priv->nl_socket = netlink_kernel_create(&init_net,
+				QDPC_NETLINK_RPC_PCI_CLNT, 0, qdpc_nl_recv_msg,
+				NULL, THIS_MODULE);
+#endif
+	if (priv->nl_socket) {
+		return SUCCESS;
+	}
+
+	PRINT_ERROR(KERN_ALERT "Error creating netlink socket.\n");
+	result = FAILURE;
+
+free_mem_interrupt:
+	qdpc_pcie_free_mem(pdev);
+	qdpc_free_interrupt(pdev);
+
+release_memory:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
+	/* Releasing the memory region if any error occured */
+	pci_clear_master(pdev);
+#endif
+
+	pci_disable_device(pdev);
+
+out:
+	kfree(priv->nl_buf);
+	free_netdev(ndev);
+	/* Any failure in probe, so it can directly return in remove */
+	pci_set_drvdata(pdev, NULL);
+
+	return result;
+}
+
+static void qdpc_pcie_remove(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct vmac_priv *vmp;
+
+	if (ndev == NULL)
+		return;
+
+	vmp = netdev_priv(ndev);
+
+	vmp->ep_ready = 0;
+	if (vmp->init_thread)
+		kthread_stop(vmp->init_thread);
+	if (vmp->nl_socket)
+		netlink_kernel_release(vmp->nl_socket);
+
+	kfree(vmp->nl_buf);
+
+	vmac_clean(ndev);
+
+	qdpc_free_interrupt(pdev);
+	qdpc_pcie_free_mem(pdev);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
+	pci_clear_master(pdev);
+#endif
+	pci_disable_device(pdev);
+
+	writel(TOPAZ_SET_INT(IPC_RESET_EP), (volatile void *)(vmp->ep_ipc_reg));
+	qdpc_unmap_iomem(vmp);
+
+	free_netdev(ndev);
+	g_ndev = NULL;
+
+	return;
+}
+
+static inline int qdpc_pcie_set_power_state(struct pci_dev *pdev, pci_power_t state)
+{
+	uint16_t pmcsr;
+
+	pci_read_config_word(pdev, TOPAZ_PCI_PM_CTRL_OFFSET, &pmcsr);
+
+	switch (state) {
+	case PCI_D0:
+			pci_write_config_word(pdev, TOPAZ_PCI_PM_CTRL_OFFSET,(pmcsr & ~PCI_PM_CTRL_STATE_MASK) | PCI_D0);
+		break;
+
+	case PCI_D3hot:
+		pci_write_config_word(pdev, TOPAZ_PCI_PM_CTRL_OFFSET,(pmcsr & ~PCI_PM_CTRL_STATE_MASK) | (PCI_D3hot | PCI_PM_CTRL_PME_ENABLE));
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int qdpc_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct vmac_priv *priv;
+
+	if (ndev == NULL)
+		return -EINVAL;
+
+	priv = netdev_priv(ndev);
+	if (le32_to_cpu(*priv->ep_pmstate) == PCI_D3hot) {
+		return 0;
+	}
+
+	printk("%s start power management suspend\n", qdpc_pcie_driver_name);
+
+	/* Set ep not ready to drop packets in low power mode */
+	priv->ep_ready = 0;
+
+	ndev->flags &= ~IFF_RUNNING;
+	*priv->ep_pmstate = cpu_to_le32(PCI_D3hot);
+	barrier();
+	writel(TOPAZ_SET_INT(IPC_EP_PM_CTRL), (volatile void *)(priv->ep_ipc_reg));
+
+	msleep(100);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	qdpc_pcie_set_power_state(pdev, PCI_D3hot);
+
+	if (suspend_mode == EP_SUSPEND_MODE_PWR_OFF)
+		suspend_flag = 1;
+
+	return 0;
+}
+
+int qdpc_pcie_resume(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct vmac_priv *priv;
+	int ret;
+
+	if (ndev == NULL) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	priv = netdev_priv(ndev);
+	if (le32_to_cpu(*priv->ep_pmstate) == PCI_D0) {
+		ret = 0;
+		goto out;
+	}
+
+	printk("%s start power management resume\n", qdpc_pcie_driver_name);
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		PRINT_ERROR("%s: pci_enable_device failed on resume\n", __func__);
+		goto out;
+	}
+
+	pci_restore_state(pdev);
+	pdev->state_saved = true;
+	qdpc_pcie_set_power_state(pdev, PCI_D0);
+
+	{
+		*priv->ep_pmstate = cpu_to_le32(PCI_D0);
+		barrier();
+		writel(TOPAZ_SET_INT(IPC_EP_PM_CTRL), (volatile void *)(priv->ep_ipc_reg));
+
+		msleep(5000);
+	}
+
+#ifdef PCIE_HOTPLUG_SUPPORTED
+	if ( (suspend_mode == EP_SUSPEND_MODE_PWR_OFF) &&
+	     (pdev->driver && pdev->driver->err_handler && pdev->driver->err_handler->slot_reset) ) {
+		printk("slot_reset in %s(), Device name: %s\n", __FUNCTION__, dev_name(&pdev->dev));
+		if(pdev->driver->err_handler->slot_reset(pdev) == PCI_ERS_RESULT_RECOVERED)
+			printk("Recovery OK\n");
+		else {
+			printk("Recovery Error");
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+#endif
+
+	/* Set ep_ready to resume tx traffic */
+	priv->ep_ready = 1;
+	ndev->flags |= IFF_RUNNING;
+
+out:
+	if (suspend_mode == EP_SUSPEND_MODE_PWR_OFF)
+		suspend_flag = 0;
+	return ret;
+}
+
+static int __init qdpc_init_module(void)
+{
+	int ret;
+
+	PRINT_DBG(KERN_INFO "Quantenna pcie driver initialization\n");
+
+	if (qdpc_platform_init()) {
+		PRINT_ERROR("Platform initilization failed \n");
+		ret = FAILURE;
+		return ret;
+	}
+
+	/*  Register the pci driver with device*/
+	if ((ret = pci_register_driver(&qdpc_pcie_driver)) < 0 ) {
+		PRINT_ERROR("Could not register the driver to pci : %d\n", ret);
+		ret = -ENODEV;
+		return ret;
+	}
+
+#ifdef QTN_LINK_MONITOR
+	link_monitor_thread = kthread_run(link_monitor, NULL, "link_monitor");
+#endif
+
+	return ret;
+}
+
+static void __exit qdpc_exit_module(void)
+{
+	/* Release netlink */
+	qdpc_platform_exit();
+
+#ifdef QTN_LINK_MONITOR
+	kthread_stop(link_monitor_thread);
+	link_monitor_thread = NULL;
+#endif
+
+	/* Unregister the pci driver with the device */
+	pci_unregister_driver(&qdpc_pcie_driver);
+
+
+	return;
+}
+
+void qdpc_recovery_clean(struct pci_dev *pdev, struct net_device *ndev)
+{
+	struct vmac_priv *vmp;
+
+	vmp = netdev_priv(ndev);
+	vmp->ep_ready = 0;
+
+	if (vmp->init_thread) {
+		kthread_stop(vmp->init_thread);
+		vmp->init_thread = NULL;
+	}
+
+	vmac_recovery_clean(ndev);
+
+	pci_disable_device(pdev);
+
+	return;
+}
+
+int qdpc_recovery_reinit(struct pci_dev *pdev, struct net_device *ndev)
+{
+	struct vmac_priv *priv = NULL;
+
+	if (suspend_mode == EP_SUSPEND_MODE_PWR_OFF && suspend_flag)
+		suspend_flag = 0;
+	else {
+		if (pdev->state_saved == true) {
+			pci_restore_state(pdev);
+			pdev->state_saved = true;
+		} else {
+			printk("Recovery Error: No saved state\n");
+			goto out;
+		}
+	}
+
+	if (pci_enable_device(pdev)) {
+		printk("Recovery Error: Failed to enable PCI device\n");
+		goto out;
+	}
+
+	priv = netdev_priv(ndev);
+	if (vmac_recovery_init(priv, ndev) == ENOMEM) {
+		printk("Recovery Error: Not enough memory\n");
+		goto qdpc_recovery_err_0;
+	}
+
+	priv->init_thread = kthread_run(qdpc_boot_thread, priv, "qdpc_init_thread");
+	if (priv->init_thread == NULL) {
+		printk("Recovery Error: Thread creation failed \n");
+		goto qdpc_recovery_err_0;
+	}
+
+	return SUCCESS;
+
+qdpc_recovery_err_0:
+	pci_disable_device(pdev);
+out:
+	return -1;
+}
+
+static int qdpc_recovery_access_check(struct pci_dev *pdev)
+{
+	uint32_t val = 0;
+
+	pci_read_config_dword(pdev, QDPC_VENDOR_ID_OFFSET, &val);
+
+	if (val == ((QDPC_DEVICE_ID << 16) | QDPC_VENDOR_ID)) {
+		printk("%s: PCIe read access check: Pass\n", __func__);
+		return 0;
+	} else {
+		printk("%s: PCIe read access check: Fail: VENDOR_ID read error: 0x%08x\n", __func__, val);
+		return -1;
+	}
+}
+
+int qdpc_pcie_recovery(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+
+	qdpc_recovery_clean(pdev, ndev);
+
+        /* Wait EP link up. If this function is called at hardirq context where 10s
+	 * delay is not allowed, please replace with link up check at Root Complex's
+	 * status register.
+         */
+        mdelay(10000);
+
+        if (qdpc_recovery_access_check(pdev) != 0)
+                return -1;
+
+        /* Re-allocate and initialize data structure */
+        qdpc_recovery_reinit(pdev, ndev);
+
+        return 0;
+}
+
+pci_ers_result_t qdpc_pcie_slot_reset(struct pci_dev *dev)
+{
+	if (qdpc_pcie_recovery(dev) == 0)
+		return PCI_ERS_RESULT_RECOVERED;
+	else
+		return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void qdpc_pcie_shutdown(struct pci_dev *pdev)
+{
+	qdpc_pcie_remove(pdev);
+
+	return;
+}
+
+#ifdef QTN_LINK_MONITOR
+static inline bool is_pcie_linkup(struct pci_dev *pdev)
+{
+	uint32_t cs = 0;
+
+	pci_read_config_dword(pdev, QDPC_VENDOR_ID_OFFSET, &cs);
+	if (cs == QDPC_LINK_UP) {
+		msleep(10000);
+		printk("%s: PCIe link up!\n", __func__);
+		return true;
+	}
+
+	return false;
+}
+
+static inline void qdpc_pcie_print_config_space(struct pci_dev *pdev)
+{
+	int i = 0;
+	uint32_t cs = 0;
+
+	/* Read PCIe configuration space header */
+	for (i = QDPC_VENDOR_ID_OFFSET; i <= QDPC_INT_LINE_OFFSET; i += QDPC_ROW_INCR_OFFSET) {
+		pci_read_config_dword(pdev, i, &cs);
+		printk("%s: pdev:0x%p config_space offset:0x%02x value:0x%08x\n", __func__, pdev, i, cs);
+	}
+	printk("\n");
+}
+
+static inline void qdpc_pcie_check_link(struct pci_dev *pdev, struct vmac_priv *priv)
+{
+	__iomem qdpc_pcie_bda_t *bda = priv->bda;
+	uint32_t cs = 0;
+
+	pci_read_config_dword(pdev, QDPC_VENDOR_ID_OFFSET, &cs);
+	/* Endian value will be all 1s if link went down */
+	if (readl(&bda->bda_pci_endian) == QDPC_LINK_DOWN) {
+		is_ep_reset = true;
+		printk("Reset detected\n");
+	}
+}
+
+static int link_monitor(void *data)
+{
+	struct net_device *ndev = NULL;
+	struct vmac_priv *priv = NULL;
+	__iomem qdpc_pcie_bda_t *bda = NULL;
+	struct pci_dev *pdev = NULL;
+	uint32_t cs = 0;
+
+	set_current_state(TASK_RUNNING);
+	while (!kthread_should_stop()) {
+		__set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+		set_current_state(TASK_RUNNING);
+
+		ndev = g_ndev;
+		priv = netdev_priv(ndev);
+		bda = priv->bda;
+		pdev = priv->pdev;
+
+#ifdef QDPC_CS_DEBUG
+		qdpc_pcie_print_config_space(pdev);
+		msleep(5000);
+#endif
+		/* Check if reset to EP occurred */
+		while (!pci_read_config_dword(pdev, QDPC_VENDOR_ID_OFFSET, &cs)) {
+
+			if (kthread_should_stop())
+				do_exit(0);
+
+			qdpc_pcie_check_link(pdev, priv);
+			if (is_ep_reset) {
+				is_ep_reset = false;
+				qdpc_pcie_remove(pdev);
+				printk("%s: Attempting to recover from EP reset\n", __func__);
+				break;
+			}
+			msleep(500);
+		}
+
+		while(!is_pcie_linkup(pdev)) {
+		}
+
+#ifdef QDPC_CS_DEBUG
+		qdpc_pcie_print_config_space(pdev);
+#endif
+
+		qdpc_pcie_probe(pdev, NULL);
+	}
+	do_exit(0);
+}
+#endif
+
+static int qdpc_bringup_fw(struct vmac_priv *priv)
+{
+	__iomem qdpc_pcie_bda_t  *bda = priv->bda;
+	uint32_t bdaflg;
+	char *fwname;
+
+	qdpc_pci_endian_detect(priv);
+	qdpc_pci_dma_offset_reset(priv);
+
+	printk("Setting HOST ready...\n");
+	qdpc_setbootstate(priv, QDPC_BDA_FW_HOST_RDY);
+	qdpc_bootpoll(priv, QDPC_BDA_FW_TARGET_RDY);
+
+	if (qdpc_set_dma_mask(priv)){
+		printk("Failed to map DMA mask.\n");
+		priv->init_thread = NULL;
+		do_exit(-1);
+	}
+
+	bdaflg = qdpc_pci_readl(&bda->bda_flags);
+	if ((PCIE_BDA_FLASH_PRESENT & bdaflg) && EP_BOOT_FROM_FLASH) {
+		printk("EP have fw in flash, boot from flash\n");
+		qdpc_pcie_posted_write((PCIE_BDA_FLASH_BOOT |
+			qdpc_pci_readl(&bda->bda_flags)), &bda->bda_flags);
+		qdpc_setbootstate(priv, QDPC_BDA_FW_TARGET_BOOT);
+		qdpc_bootpoll(priv, QDPC_BDA_FW_FLASH_BOOT);
+		goto fw_start;
+	}
+	bdaflg &= PCIE_BDA_XMIT_UBOOT;
+	fwname = bdaflg ? QDPC_TOPAZ_UBOOT : QDPC_TOPAZ_IMG;
+
+	qdpc_setbootstate(priv, QDPC_BDA_FW_TARGET_BOOT);
+	printk("EP FW load request...\n");
+	qdpc_bootpoll(priv, QDPC_BDA_FW_LOAD_RDY);
+
+	printk("Start download Firmware %s...\n", fwname);
+	if (qdpc_firmware_load(priv->pdev, priv, fwname)){
+		printk("Failed to download firmware.\n");
+		priv->init_thread = NULL;
+		do_exit(-1);
+	}
+
+fw_start:
+	qdpc_setbootstate(priv, QDPC_BDA_FW_START);
+	printk("Start booting EP...\n");
+	if (bdaflg != PCIE_BDA_XMIT_UBOOT) {
+		if (qdpc_bootpoll(priv,QDPC_BDA_FW_CONFIG)) {
+			booterror(bda);
+			priv->init_thread = NULL;
+			do_exit(-1);
+		}
+		printk("EP boot successful, starting config...\n");
+
+		/* Save target-side MSI address for later enable/disable irq*/
+		priv->dma_msi_imwr = readl(QDPC_BAR_VADDR(priv->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET));
+		priv->dma_msi_dummy = virt_to_bus(&priv->dma_msi_data) + qdpc_pci_readl(&bda->bda_dma_offset);
+		priv->ep_pciecfg0_val = readl(QDPC_BAR_VADDR(priv->sysctl_bar, TOPAZ_PCIE_CFG0_OFFSET));
+
+		qdpc_setbootstate(priv, QDPC_BDA_FW_RUN);
+		qdpc_bootpoll(priv,QDPC_BDA_FW_RUNNING);
+		priv->ep_ready = 1;
+	}
+
+	return (int)bdaflg;
+}
+
+static int qdpc_boot_done(struct vmac_priv *priv)
+{
+	struct net_device *ndev;
+	ndev = priv->ndev;
+
+	PRINT_INFO("Connection established with Target BBIC4 board\n");
+
+#ifdef QTN_LINK_MONITOR
+	if (link_monitor_thread)
+		wake_up_process(link_monitor_thread);
+#endif
+
+	priv->init_thread = NULL;
+	do_exit(0);
+}
+
+static int qdpc_boot_thread(void *data)
+{
+	struct vmac_priv *priv = (struct vmac_priv *)data;
+	int i;
+
+	for (i = 0; i < MAX_IMG_NUM; i++) {
+		if (qdpc_bringup_fw(priv) <= 0)
+			break;
+	}
+
+	qdpc_boot_done(priv);
+
+	return 0;
+}
+
+static void qdpc_nl_recv_msg(struct sk_buff *skb)
+{
+	struct vmac_priv *priv = netdev_priv(g_ndev);
+	struct nlmsghdr *nlh  = (struct nlmsghdr*)skb->data;
+	struct sk_buff *skb2;
+	unsigned int data_len;
+	unsigned int offset;
+	qdpc_cmd_hdr_t *cmd_hdr;
+	uint16_t rpc_type;
+
+	/* Parsing the netlink message */
+
+	PRINT_DBG(KERN_INFO "%s line %d Netlink received pid:%d, size:%d, type:%d\n",
+		__FUNCTION__, __LINE__, nlh->nlmsg_pid, nlh->nlmsg_len, nlh->nlmsg_type);
+
+	switch (nlh->nlmsg_type) {
+		case QDPC_NL_TYPE_CLNT_STR_REG:
+		case QDPC_NL_TYPE_CLNT_LIB_REG:
+			if (nlh->nlmsg_type == QDPC_NL_TYPE_CLNT_STR_REG)
+				priv->str_call_nl_pid = nlh->nlmsg_pid;
+			else
+				priv->lib_call_nl_pid = nlh->nlmsg_pid;
+			return;
+		case QDPC_NL_TYPE_CLNT_STR_REQ:
+		case QDPC_NL_TYPE_CLNT_LIB_REQ:
+			break;
+		default:
+			PRINT_DBG(KERN_INFO "%s line %d Netlink Invalid type %d\n",
+				__FUNCTION__, __LINE__, nlh->nlmsg_type);
+			return;
+	}
+
+	/*
+	 * make new skbs; Fragment if necessary.
+	 * The original skb will be freed in netlink_unicast_kernel,
+	 * we hold the new skbs until DMA transfer is done
+	 */
+	offset = sizeof(struct nlmsghdr);
+	data_len = nlh->nlmsg_len;
+
+	while (data_len > 0) {
+		unsigned int len = min_t(unsigned int, data_len, priv->ndev->mtu);
+		unsigned int skb2_len = len + sizeof(qdpc_cmd_hdr_t);
+
+		skb2 = alloc_skb(skb2_len, GFP_ATOMIC);
+		if (!skb2) {
+			printk(KERN_INFO "%s: skb alloc failed\n", __func__);
+			return;
+		}
+
+		data_len -= len;
+
+		rpc_type = nlh->nlmsg_type & QDPC_RPC_TYPE_MASK;
+		rpc_type |= (data_len > 0 ? QDPC_RPC_TYPE_FRAG : 0);
+
+		cmd_hdr = (qdpc_cmd_hdr_t *)skb2->data;
+		memcpy(cmd_hdr->dst_magic, QDPC_NETLINK_DST_MAGIC, ETH_ALEN);
+		memcpy(cmd_hdr->src_magic, QDPC_NETLINK_SRC_MAGIC, ETH_ALEN);
+		cmd_hdr->type = __constant_htons(QDPC_APP_NETLINK_TYPE);
+		cmd_hdr->len = htons((uint16_t)len);
+		cmd_hdr->rpc_type = htons(rpc_type);
+		cmd_hdr->total_len = htons((uint16_t)(nlh->nlmsg_len));
+
+		memcpy((uint8_t *)(cmd_hdr + 1), skb->data + offset, len);
+
+		offset += len;
+
+		skb_put(skb2, skb2_len);
+		skb_reset_mac_header(skb2);
+		skb_reset_network_header(skb2);
+		skb2->protocol = __constant_htons(QDPC_APP_NETLINK_TYPE);
+		skb2->dev = priv->ndev;
+
+		dev_queue_xmit(skb2);
+	}
+}
+
+module_init(qdpc_init_module);
+module_exit(qdpc_exit_module);
diff --git a/drivers/qtn/pcie2/host/common/qdpc_init.h b/drivers/qtn/pcie2/host/common/qdpc_init.h
new file mode 100644
index 0000000..13c2548
--- /dev/null
+++ b/drivers/qtn/pcie2/host/common/qdpc_init.h
@@ -0,0 +1,120 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __QDPC_INIT_H_
+#define __QDPC_INIT_H_
+
+#include <asm/io.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include "topaz_vnet.h"
+
+#define QDPC_MODULE_NAME                 "qdpc_ruby"
+#define QDPC_DEV_NAME                    "qdpc_ruby"
+#define QDPC_MODULE_VERSION              "1.0"
+
+/* PCIe device information declarations */
+#define  QDPC_VENDOR_ID                   0x1bb5
+#define  QDPC_DEVICE_ID                   0x0008
+#define  QDPC_PCIE_NUM_BARS               6
+
+/* PCIe Configuration Space Defines */
+/* Used to indicate CS is valid and link is up */
+#define	QDPC_LINK_UP	((QDPC_DEVICE_ID << 16) | QDPC_VENDOR_ID)
+#define	QDPC_LINK_DOWN	0xffffffff /* Used to indicate link went down */
+#define	QDPC_VENDOR_ID_OFFSET	0x00
+#define	QDPC_INT_LINE_OFFSET	0x3C
+#define	QDPC_ROW_INCR_OFFSET	0x04
+#undef	QDPC_CS_DEBUG
+
+extern unsigned int (*qdpc_pci_readl)(void *addr);
+extern void (*qdpc_pci_writel)(unsigned int val, void *addr);
+
+/*
+ * End-point(EP) is little-Endian.
+ * These two macros are used for host side outbound window memory access.
+ * Outbound here is host side view-point. So memory accessed by these two macros
+ * should be on EP side.
+ * NOTE: On some platforms, outbound hardware swap(byte order swap) should be
+ * enabled for outbound memory access correctly. If enabled, Endian translation
+ * will be done by hardware, and software Endian translation should be disabled.
+ * */
+#ifdef OUTBOUND_HW_SWAP
+	#define le32_readl(x)           readl(x)
+	#define le32_writel(x, addr)    writel(x, addr)
+#else
+	#define le32_readl(x)           le32_to_cpu(readl((x)))
+	#define le32_writel(x, addr)    writel(cpu_to_le32((x)), addr)
+#endif
+
+static inline unsigned int qdpc_readl(void *addr)
+{
+	return readl(addr);
+}
+static inline void qdpc_writel(unsigned int val, void *addr)
+{
+	writel(val, addr);
+}
+static inline unsigned int qdpc_le32_readl(void *addr)
+{
+	return le32_to_cpu(readl((addr)));
+}
+static inline void qdpc_le32_writel(unsigned int val, void *addr)
+{
+	writel(cpu_to_le32((val)), addr);
+}
+
+static inline void qdpc_pcie_posted_write(uint32_t val, __iomem void *basereg)
+{
+	qdpc_pci_writel(val,basereg);
+	/* flush posted write */
+	qdpc_pci_readl(basereg);
+}
+
+static inline int qdpc_isbootstate(struct vmac_priv *p, uint32_t state) {
+	__iomem uint32_t *status = &p->bda->bda_bootstate;
+	uint32_t s = qdpc_pci_readl(status);
+	return (s == state);
+}
+static inline int qdpc_booterror(struct vmac_priv *p) {
+	__iomem uint32_t *status = &p->bda->bda_flags;
+	uint32_t s = qdpc_pci_readl(status);
+	return (s & PCIE_BDA_ERROR_MASK);
+}
+static inline void qdpc_setbootstate(struct vmac_priv *p, uint32_t state) {
+	__iomem qdpc_pcie_bda_t *bda = p->bda;
+
+	qdpc_pcie_posted_write(state, &bda->bda_bootstate);
+}
+
+/* Function prototypes */
+int qdpc_pcie_init_intr_and_mem(struct vmac_priv *priv);
+void qdpc_interrupt_target(struct vmac_priv *priv, uint32_t intr);
+void qdpc_disable_irqs(struct vmac_priv *priv);
+void qdpc_enable_irqs(struct vmac_priv *priv);
+void qdpc_free_interrupt(struct pci_dev *pdev);
+void qdpc_pcie_free_mem(struct pci_dev *pdev);
+void qdpc_init_target_buffers(void *data);
+int qdpc_send_packet(struct sk_buff *skb, struct net_device *ndev);
+void *qdpc_map_pciemem(unsigned long busaddr, size_t len);
+void qdpc_unmap_pciemem(unsigned long busaddr, void *vaddr, size_t len);
+int qdpc_unmap_iomem(struct vmac_priv *priv);
+int32_t qdpc_set_dma_mask(struct vmac_priv *priv);
+
+#endif
diff --git a/drivers/qtn/pcie2/host/common/qdpc_pcie.c b/drivers/qtn/pcie2/host/common/qdpc_pcie.c
new file mode 100644
index 0000000..34fff2f
--- /dev/null
+++ b/drivers/qtn/pcie2/host/common/qdpc_pcie.c
@@ -0,0 +1,353 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <asm/byteorder.h>
+#include <linux/pci.h>
+#include <linux/moduleparam.h>
+#include <asm-generic/pci-dma-compat.h>
+
+#include "qdpc_config.h"
+#include "qdpc_debug.h"
+#include "qdpc_init.h"
+#include "qdpc_regs.h"
+#include <qdpc_platform.h>
+
+static int use_msi = 1;
+module_param(use_msi, int, 0644);
+MODULE_PARM_DESC(use_msi, "Set 0 to use Legacy interrupt");
+
+static int qdpc_pcie_init_intr(struct vmac_priv *priv);
+static int qdpc_pcie_init_mem(struct vmac_priv *priv);
+static int g_msi = 1;
+int32_t qdpc_pcie_init_intr_and_mem(struct vmac_priv *priv)
+{
+	struct pci_dev *pdev = priv->pdev;
+	int result  = 0;
+
+	/*  Initialize interrupts */
+	if (( result = qdpc_pcie_init_intr(priv)) < 0) {
+		PRINT_ERROR("PCIe Interrupt Initialization failed \n");
+		return result;
+	}
+
+	/* Memory Initialization */
+	if (( result = qdpc_pcie_init_mem(priv)) < 0) {
+		PRINT_ERROR("PCIe Memory Initialization failed \n");
+		qdpc_free_interrupt(pdev);
+	}
+
+	return result;
+}
+
+static int32_t qdpc_pcie_init_intr(struct vmac_priv *priv)
+{
+	struct pci_dev *pdev = priv->pdev;
+
+	priv->msi_enabled = 0; /* Set default to use Legacy INTx interrupt */
+
+	/* Check if the device has MSI capability */
+	if (use_msi) {
+		if (!pci_enable_msi(pdev)) {
+			PRINT_INFO("PCIe MSI Interrupt Enabled\n");
+			priv->msi_enabled = 1;
+		} else {
+			PRINT_ERROR("PCIe MSI Interrupt enabling failed. Fall back to Legacy IRQ\n");
+		}
+	}
+
+	if(!priv->msi_enabled) {
+		PRINT_INFO("PCIe Legacy Interrupt Enabled\n");
+		pci_intx(pdev, 1);
+	}
+
+	return 0;
+}
+
+static bool qdpc_bar_check(struct vmac_priv *priv, qdpc_bar_t *bar)
+{
+	uint32_t offset = bar->b_offset;
+	size_t len = bar->b_len;
+	dma_addr_t busaddr = bar->b_busaddr;
+	uint8_t index = bar->b_index;
+
+	if (index > 5) {
+		printk("Invalid BAR index:%u. Must be between 0 and 5\n", index);
+		return 0;
+	}
+
+	if (!len) {
+		/* NOTE:
+		  * Do not use an implicit length such as the BAR length
+		  * if the map length is too large say > 16Mb this leaves
+		  * the implementation vulnerable to
+		  * Linux and the attack of the Silent  "S" (one between the n and u)
+		  */
+		printk("Zero length BAR\n");
+		return 0;
+	}
+
+	if (busaddr) { /*initialized BAR */
+		unsigned long bar_start =  pci_resource_start(priv->pdev , index);
+		unsigned long bar_end =  pci_resource_end(priv->pdev , index);
+
+		if (!bar_start) {
+			printk("Invalid BAR address: 0x%p.\n", (void *)busaddr);
+			return 0;
+		}
+
+		if ((busaddr - offset) != bar_start) {
+			printk("Invalid BAR offset:0x%p. BAR starts at 0x%p\n",
+				(void *)(busaddr -offset), (void *)bar_start);
+			return 0;
+		}
+		/* Check the span of the BAR including the offset + length, bar_end points to the last byte of BAR */
+		if ((busaddr + len - 1) > bar_end) {
+			printk("Invalid BAR end address:0x%p. BAR ends at 0x%p\n",
+				(void *)(busaddr + len), (void *)bar_end);
+			return 0;
+		}
+	} else { /* Unitialized bar */
+		unsigned long bar_end =  pci_resource_end(priv->pdev , index);
+		busaddr = pci_resource_start(priv->pdev , index);
+
+		if (!busaddr) {
+			printk("Invalid BAR address: 0x%p.\n", (void *)busaddr);
+			return 0;
+		}
+
+		/* Checks that offset area is within bar */
+		if ( (busaddr + offset) > bar_end) {
+			printk("Invalid BAR offset 0x%p, extends beyond end of BAR(0x%p).\n",
+				(void *)(busaddr + offset), (void *)bar_end);
+			return 0;
+		}
+
+		/* Checks that mapped area is within bar */
+		if ((busaddr + len + offset - 1) > bar_end) {
+			printk("Mapped area 0x%p, extends beyond end of BAR(0x%p).\n",
+				(void *)(busaddr + len + offset - 1), (void *)bar_end);
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+static qdpc_bar_t *qdpc_map_bar(struct vmac_priv *priv, qdpc_bar_t *bar,
+						uint8_t index, size_t len, uint32_t offset)
+{
+	void *vaddr = NULL;
+	dma_addr_t busaddr = 0;
+	qdpc_bar_t temp_bar;
+
+	memset(&temp_bar, 0 ,sizeof(qdpc_bar_t));
+
+	temp_bar.b_len = len;
+	temp_bar.b_offset = offset;
+	temp_bar.b_index = index;
+
+	if (!qdpc_bar_check(priv, &temp_bar)) {
+		printk("Failed bar mapping sanity check in %s\n", __FUNCTION__);
+		return NULL;
+	}
+
+	/* Reserve PCIe memory region*/
+	busaddr = pci_resource_start(priv->pdev , index) + offset;
+	if (!request_mem_region(busaddr, len , QDPC_DEV_NAME)) {
+		printk("Failed to reserve %u bytes of PCIe memory "
+			"region starting at 0x%p\n", (uint32_t)len, (void *)busaddr);
+		return NULL;
+	}
+
+	qdpc_update_hw_bar(priv->pdev, index);
+
+	vaddr = ioremap_nocache(busaddr, len);
+	if (!vaddr) {
+		printk("Failed to map %u bytes at BAR%u at bus address 0x%p.\n",
+			(uint32_t)len, index, (void *)busaddr);
+		release_mem_region(busaddr, len);
+		return NULL;
+	}
+
+	memset(&temp_bar, 0 ,sizeof(qdpc_bar_t));
+
+	bar->b_vaddr = vaddr;
+	bar->b_busaddr = busaddr;
+	bar->b_len = len;
+	bar->b_index = index;
+	bar->b_offset = offset;
+
+	printk("BAR:%u vaddr=0x%p busaddr=%p offset=%u len=%u\n",
+		bar->b_index, bar->b_vaddr, (void *)bar->b_busaddr,
+		bar->b_offset, (uint32_t)bar->b_len);
+	return bar;
+}
+
+static bool qdpc_unmap_bar(struct vmac_priv *priv, qdpc_bar_t *bar)
+{
+	if (!qdpc_bar_check(priv, bar)) {
+		PRINT_ERROR("Failed bar mapping sanity check in %s\n", __FUNCTION__);
+		return 0;
+	}
+
+	iounmap(bar->b_vaddr);
+	release_mem_region(bar->b_busaddr - bar->b_offset, bar->b_len);
+	memset(bar, 0 , sizeof(qdpc_bar_t));
+
+	return 1;
+
+}
+static void qdpc_map_epmem(struct vmac_priv *priv)
+{
+	printk("%s() Mapping epmem\n", __FUNCTION__);
+	qdpc_map_bar(priv, &priv->epmem_bar, QDPC_SHMEM_BAR,
+					pci_resource_len(priv->pdev, QDPC_SHMEM_BAR) , 0);
+
+	priv->bda =(qdpc_pcie_bda_t *)QDPC_BAR_VADDR(priv->epmem_bar, 0);
+	priv->bda->bda_rc_msi_enabled = g_msi;
+}
+
+static void qdpc_map_sysctl_regs(struct vmac_priv *priv)
+{
+	printk("%s() Mapping sysctl\n", __FUNCTION__);
+	qdpc_map_bar(priv, &priv->sysctl_bar, QDPC_SYSCTL_BAR, pci_resource_len(priv->pdev, QDPC_SYSCTL_BAR) , 0);
+}
+
+static void qdpc_map_dma_regs(struct vmac_priv *priv)
+{
+	printk("%s() Mapping dma registers\n", __FUNCTION__);
+	qdpc_map_bar(priv, &priv->dmareg_bar, QDPC_DMA_BAR, pci_resource_len(priv->pdev, QDPC_DMA_BAR), 0);
+}
+
+static void qdpc_unmap_epmem(struct vmac_priv *priv)
+{
+	printk("%s() Unmapping sysctl\n", __FUNCTION__);
+	priv->bda = NULL;
+	qdpc_unmap_bar(priv, &priv->epmem_bar);
+}
+
+static void qdpc_unmap_sysctl_regs(struct vmac_priv *priv)
+{
+	printk("%s() Unmapping sysctl\n", __FUNCTION__);
+
+	qdpc_unmap_bar(priv, &priv->sysctl_bar);
+}
+
+static void qdpc_unmap_dma_regs(struct vmac_priv *priv)
+{
+	printk("%s() Unmapping dma regs\n", __FUNCTION__);
+	qdpc_unmap_bar(priv, &priv->dmareg_bar);
+}
+
+int32_t qdpc_set_dma_mask(struct vmac_priv *priv) {
+	int result = 0;
+	uint64_t dma_mask = qdpc_pci_readl(&priv->bda->bda_dma_mask);
+
+	printk("Requested DMA mask:0x%llx\n", dma_mask);
+
+	result = pci_set_dma_mask(priv->pdev, dma_mask);
+	if (!result) {
+			result = pci_set_consistent_dma_mask(priv->pdev, dma_mask);
+			if (result) {
+				printk(" pci_set_consistent_dma_mask() error %d. Mask:0x%llx\n", result, dma_mask);
+				return 1;
+			}
+	} else {
+		printk(" pci_set_dma_mask() error %d. Mask:0x%llx\n", result, dma_mask);
+		return 1;
+	}
+
+	return 0;
+}
+static int32_t qdpc_pcie_init_mem(struct vmac_priv *priv)
+{
+	int ret = 0;
+
+	/* Map SynControl registers and Host to Endpoint interrupt registers to BAR-2 */
+	qdpc_map_sysctl_regs(priv);
+	qdpc_map_epmem(priv);
+	qdpc_map_dma_regs(priv);
+
+	return ret;
+}
+
+int qdpc_unmap_iomem(struct vmac_priv *priv)
+{
+	qdpc_unmap_dma_regs(priv);
+	qdpc_unmap_epmem(priv);
+	qdpc_unmap_sysctl_regs(priv);
+
+	return SUCCESS;
+}
+
+void qdpc_free_interrupt(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct vmac_priv *priv;
+
+	if (ndev == NULL)
+		return;
+
+	priv = netdev_priv(ndev);
+	if(priv->msi_enabled)
+		pci_disable_msi(pdev);
+	else
+		pci_intx(pdev, 0);
+}
+
+void qdpc_pcie_free_mem(struct pci_dev *pdev)
+{
+	return;
+}
+
+void *qdpc_map_pciemem(unsigned long busaddr, size_t len)
+{
+	/* Reserve PCIe memory region*/
+	if (!request_mem_region(busaddr, len, QDPC_DEV_NAME)) {
+		PRINT_ERROR(KERN_ERR "Failed to reserve %u bytes of "
+			"PCIe memory region starting at 0x%lx\n", (uint32_t)len, busaddr);
+		return NULL;
+	}
+	return ioremap_nocache(busaddr, len);
+}
+
+void qdpc_unmap_pciemem(unsigned long busaddr, void *vaddr, size_t len)
+{
+	if (!vaddr || !busaddr)
+		return;
+	iounmap(vaddr);
+	release_mem_region(busaddr, len);
+}
+
+void qdpc_deassert_intx(struct vmac_priv *priv)
+{
+	void *basereg = QDPC_BAR_VADDR(priv->sysctl_bar, TOPAZ_PCIE_CFG0_OFFSET);
+
+	qdpc_pcie_posted_write(priv->ep_pciecfg0_val & ~TOPAZ_ASSERT_INTX, basereg);
+}
+
diff --git a/drivers/qtn/pcie2/host/common/qdpc_regs.h b/drivers/qtn/pcie2/host/common/qdpc_regs.h
new file mode 100644
index 0000000..5325d41
--- /dev/null
+++ b/drivers/qtn/pcie2/host/common/qdpc_regs.h
@@ -0,0 +1,56 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __QDPC_REGS_H__
+#define __QDPC_REGS_H__
+
+#include <linux/bitops.h>
+#include <qdpc_platform.h>
+
+#define QDPC_SYSCTL_BAR		0
+#define QDPC_SHMEM_BAR		2
+#define QDPC_DMA_BAR		3
+
+/*
+ * NOTE: Below registers are at EP but accessed and written by RC
+ * Make sure EP codes do not write them, otherwise we have race conditions
+*/
+
+/*
+ * The register is one of registers of Endpoint. Root Complex uses it
+ * to interrupt Endpoint to transmit packets.
+ */
+#define TOPAZ_IPC_OFFSET		(0x13C)
+
+/* Used to deassert Legacy INTx */
+#define TOPAZ_PCIE_CFG0_OFFSET		(0x6C)
+#define TOPAZ_ASSERT_INTX		BIT(9)
+
+/* This macro is used to set interrupt bit of register QDPC_EP_SYS_CTL_IPC4_INT */
+#define TOPAZ_SET_INT(x)		((x) | ((x) << 16))
+
+/* "DMA Write Done IMWr Address Low" register at EP side*/
+#define TOPAZ_IMWR_DONE_ADDRLO_OFFSET	(0x700 + 0x2D0)
+#define TOPAZ_IMWR_ABORT_ADDRLO_OFFSET	(0x700 + 0x2D8)
+
+/* Power management control status register */
+#define TOPAZ_PCI_PM_CTRL_OFFSET	(0x44)
+
+#endif //__QDPC_REGS_H__
+
diff --git a/drivers/qtn/pcie2/host/common/topaz_vnet.c b/drivers/qtn/pcie2/host/common/topaz_vnet.c
new file mode 100644
index 0000000..32027b8
--- /dev/null
+++ b/drivers/qtn/pcie2/host/common/topaz_vnet.c
@@ -0,0 +1,1565 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef EXPORT_SYMTAB
+#define EXPORT_SYMTAB
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/if_bridge.h>
+#include <linux/sysfs.h>
+#include <linux/pci.h>
+
+
+#include <qdpc_platform.h>
+
+#include <asm/cache.h>		/* For cache line size definitions */
+#include <asm/cacheflush.h>	/* For cache flushing functions */
+
+#include <net/netlink.h>
+
+#include "topaz_vnet.h"
+#include "qdpc_config.h"
+#include "qdpc_init.h"
+#include "qdpc_debug.h"
+#include "qdpc_regs.h"
+#include "qdpc_version.h"
+
+#define DRV_NAME	"qdpc-host"
+
+#ifndef DRV_VERSION
+#define DRV_VERSION	"1.0"
+#endif
+
+#define DRV_AUTHOR	"Quantenna Communications Inc."
+#define DRV_DESC	"PCIe virtual Ethernet port driver"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+
+#undef __sram_text
+#define __sram_text
+
+static int __sram_text vmac_rx_poll (struct napi_struct *napi, int budget);
+static int __sram_text skb2rbd_attach(struct net_device *ndev, uint16_t i, uint32_t wrap);
+static irqreturn_t vmac_interrupt(int irq, void *dev_id);
+static void vmac_tx_timeout(struct net_device *ndev);
+static int vmac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd);
+static int vmac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd);
+static void vmac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info);
+static void free_tx_pkts(struct vmac_priv *vmp);
+static void init_tx_bd(struct vmac_priv *vmp);
+static void free_rx_skbs(struct vmac_priv *vmp);
+static int alloc_and_init_rxbuffers(struct net_device *ndev);
+static void bring_up_interface(struct net_device *ndev);
+static void shut_down_interface(struct net_device *ndev);
+static int vmac_open(struct net_device *ndev);
+static int vmac_close(struct net_device *ndev);
+static int vmac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
+static struct net_device_stats *vmac_get_stats(struct net_device *dev);
+#ifdef QTN_WAKEQ_SUPPORT
+static inline void vmac_try_wake_queue(struct net_device *ndev);
+static inline void vmac_try_stop_queue(struct net_device *ndev);
+#endif
+#ifdef RX_IP_HDR_REALIGN
+static uint32_t align_cnt = 0, unalign_cnt = 0;
+#endif
+
+
+#define RX_DONE_INTR_MSK	((0x1 << 6) -1)
+#define VMAC_BD_LEN		(sizeof(struct vmac_bd))
+
+#define QTN_GLOBAL_INIT_EMAC_TX_QUEUE_LEN 256
+#define VMAC_DEBUG_MODE
+/* Tx dump flag */
+#define DMP_FLG_TX_BD		(0x1 << ( 0)) /* vmac s 32 */
+#define DMP_FLG_TX_SKB		(0x1 << ( 1)) /* vmac s 33 */
+/* Rx dump flag */
+#define DMP_FLG_RX_BD		(0x1 << (16)) /* vmac s 48 */
+#define DMP_FLG_RX_SKB		(0x1 << (17)) /* vmac s 49 */
+#define DMP_FLG_RX_INT		(0x1 << (18)) /* vmac s 50 */
+
+#define SHOW_TX_BD		(16)
+#define SHOW_RX_BD		(17)
+#define SHOW_VMAC_STATS	(18)
+
+#ifndef QDPC_PLATFORM_IFPORT
+#define QDPC_PLATFORM_IFPORT 0
+#endif
+
+#define VMAC_TX_TIMEOUT		(180 * HZ)
+
+#ifdef VMAC_DEBUG_MODE
+
+#define dump_tx_bd(vmp) do { \
+		if (unlikely((vmp)->dbg_flg & DMP_FLG_TX_BD)) { \
+			txbd2str(vmp); \
+		} \
+	} while (0)
+
+#define dump_tx_pkt(vmp, data, len) do { \
+		if (unlikely(((vmp)->dbg_flg & DMP_FLG_TX_SKB))) \
+			dump_pkt(data, len, "Tx"); \
+	} while(0)
+
+#define dump_rx_bd(vmp) do { \
+		if (unlikely((vmp)->dbg_flg & DMP_FLG_RX_BD)) { \
+			rxbd2str(vmp); \
+		} \
+	} while (0)
+
+#define dump_rx_pkt(vmp, data, len) do { \
+		if (unlikely((vmp)->dbg_flg & DMP_FLG_RX_SKB)) \
+			dump_pkt(data, len, "Rx"); \
+	} while(0)
+
+#define dump_rx_int(vmp) do { \
+		if (unlikely((vmp)->dbg_flg & DMP_FLG_RX_INT)) \
+			dump_rx_interrupt(vmp); \
+	} while (0)
+
+#else
+#define dump_tx_bd(vmp)
+#define dump_tx_pkt(vmp, skb, len)
+#define dump_rx_bd(vmp)
+#define dump_rx_pkt(vmp, skb, len)
+#define dump_rx_int(vmp)
+#endif
+
+struct vmac_cfg vmaccfg = {
+	QDPC_RX_QUEUE_SIZE, QDPC_TX_QUEUE_SIZE, "host%d", NULL
+};
+
+static char *ethaddr = NULL;
+module_param(ethaddr, charp, S_IRUGO);
+MODULE_PARM_DESC(store, "ethaddr");
+
+#ifdef RX_IP_HDR_REALIGN
+static uint32_t rx_pkt_align = 0;
+module_param(rx_pkt_align, uint, 0644);
+MODULE_PARM_DESC(rx_pkt_align, "RX Pakcet IP header realign to 4byte boundary");
+#endif
+
+/* Alignment helper functions */
+__always_inline static unsigned long align_up_off(unsigned long val, unsigned long step)
+{
+	return (((val + (step - 1)) & (~(step - 1))) - val);
+}
+
+__always_inline static unsigned long align_down_off(unsigned long val, unsigned long step)
+{
+	return ((val) & ((step) - 1));
+}
+
+__always_inline static unsigned long align_val_up(unsigned long val, unsigned long step)
+{
+	return ((val + step - 1) & (~(step - 1)));
+}
+
+__always_inline static unsigned long align_val_down(unsigned long val, unsigned long step)
+{
+	return (val & (~(step - 1)));
+}
+
+__always_inline static void* align_buf_dma(void *addr)
+{
+	return (void*)align_val_up((unsigned long)addr, dma_get_cache_alignment());
+}
+
+__always_inline static unsigned long align_buf_dma_offset(void *addr)
+{
+	return (align_buf_dma(addr) - addr);
+}
+
+__always_inline static void* align_buf_cache(void *addr)
+{
+	return (void*)align_val_down((unsigned long)addr, dma_get_cache_alignment());
+}
+
+__always_inline static unsigned long align_buf_cache_offset(void *addr)
+{
+	return (addr - align_buf_cache(addr));
+}
+
+__always_inline static unsigned long align_buf_cache_size(void *addr, unsigned long size)
+{
+	return align_val_up(size + align_buf_cache_offset(addr), dma_get_cache_alignment());
+}
+
+/* Print the Tx Request Queue */
+static int txbd2str_range(struct vmac_priv *vmp, uint16_t s, int num)
+{
+	qdpc_pcie_bda_t *bda = vmp->bda;
+	int i;
+
+	printk("RC insert start index\t: %d\n", vmp->tx_bd_index);
+	printk("RC reclaim start index\t: %d\n", vmp->tx_reclaim_start);
+	printk("valid entries\t\t: %d\n", vmp->vmac_tx_queue_len);
+	printk("Pkt index EP handled\t: %d\n", le32_to_cpu(VMAC_REG_READ(vmp->ep_next_rx_pkt)));
+
+	printk("\t\t%8s\t%8s\t%8s\t%10s\n", "Address", "Valid", "Length", "Pkt Addr");
+
+	for (i = 0; i < num; i++) {
+		printk("\t%d\t0x%08x\t%8s\t\t%d\t0x%p\n", s, bda->request[s].addr, \
+			(bda->request[s].info & PCIE_TX_VALID_PKT) ? "Valid" : "Invalid",  \
+			bda->request[s].info & 0xffff, vmp->tx_skb[s]);
+		VMAC_INDX_INC(s, vmp->tx_bd_num);
+	}
+
+	return 0;
+}
+
+static int txbd2str(struct vmac_priv *vmp)
+{
+	uint16_t s;
+
+	s = VMAC_INDX_MINUS(vmp->tx_bd_index, 4, vmp->tx_bd_num);
+	return txbd2str_range(vmp, s, 8);
+}
+
+static int txbd2str_all(struct vmac_priv *vmp)
+{
+	return txbd2str_range(vmp, 0, vmp->tx_bd_num);
+}
+
+static int rxbd2str_range(struct vmac_priv *vmp, uint16_t s, int num)
+{
+	int i;
+	char *idxflg;
+
+	printk("rxindx\trbdaddr\t\tbuff\t\tinfo\t\trx_skb\n");
+	for (i = 0; i < num; i++) {
+		if(s == vmp->rx_bd_index)
+			idxflg = ">rbd";
+		else
+			idxflg = "";
+		printk("%2d%s\t@%p\t%08x\t%08x\t%p\n", s, idxflg,
+			&vmp->rx_bd_base[s], vmp->rx_bd_base[s].buff_addr,
+			vmp->rx_bd_base[s].buff_info, vmp->rx_skb[s]);
+
+		VMAC_INDX_INC(s, vmp->rx_bd_num);
+	}
+	return 0;
+}
+
+static int rxbd2str(struct vmac_priv *vmp)
+{
+	uint16_t s;
+	s = VMAC_INDX_MINUS(vmp->rx_bd_index, 4, vmp->rx_bd_num);
+	return rxbd2str_range(vmp, s, 8);
+}
+
+static int rxbd2str_all(struct vmac_priv *vmp)
+{
+	return rxbd2str_range(vmp, 0, vmp->rx_bd_num);
+}
+
+static int vmaccnt2str(struct vmac_priv *vmp, char *buff)
+{
+	int count;
+	count = sprintf(buff, "tx_bd_busy_cnt:\t%08x\n", vmp->tx_bd_busy_cnt);
+	count += sprintf(buff + count, "tx_stop_queue_cnt:\t%08x\n", vmp->tx_stop_queue_cnt);
+	count += sprintf(buff + count, "rx_skb_alloc_failures:\t%08x\n", vmp->rx_skb_alloc_failures);
+	count += sprintf(buff + count, "intr_cnt:\t%08x\n", vmp->intr_cnt);
+	count += sprintf(buff + count, "vmac_xmit_cnt:\t%08x\n", vmp->vmac_xmit_cnt);
+	count += sprintf(buff + count, "vmac_skb_free:\t%08x\n", vmp->vmac_skb_free);
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+	count += sprintf(buff + count, "skb_recycle_cnt:\t%08x\n", vmp->skb_recycle_cnt);
+	count += sprintf(buff + count, "skb_recycle_failures:\t%08x\n", vmp->skb_recycle_failures);
+#endif
+	count += sprintf(buff + count, "vmp->txqueue_stopped=%x\n", vmp->txqueue_stopped);
+	count += sprintf(buff + count, "*vmp->txqueue_wake=%x\n", *vmp->txqueue_wake);
+#ifdef RX_IP_HDR_REALIGN
+	if(rx_pkt_align)
+		count += sprintf(buff + count, "rx iphdr aligned:%d,unalign:%d\n", align_cnt, unalign_cnt);
+#endif
+	return count;
+}
+
+static ssize_t vmac_dbg_show(struct device *dev, struct device_attribute *attr,
+						char *buff)
+{
+	struct net_device *ndev = container_of(dev, struct net_device, dev);
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	int count = 0;
+	switch (vmp->show_item) {
+	case SHOW_TX_BD: /* Print Tx Rquest Queue */
+		count = (ssize_t)txbd2str_all(vmp);
+		break;
+	case SHOW_RX_BD:/* show Rx BD */
+		count = (ssize_t)rxbd2str_all(vmp);
+		break;
+	case SHOW_VMAC_STATS:/* show vmac interrupt statistic info */
+		count = vmaccnt2str(vmp, buff);
+	default:
+		break;
+	}
+	return count;
+}
+
+static ssize_t vmac_dbg_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct net_device *ndev = container_of(dev, struct net_device, dev);
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	uint8_t cmd;
+
+	cmd = (uint8_t)simple_strtoul(buf, NULL, 10);
+	if (cmd < 16) {
+		switch(cmd) {
+		case 0:
+			vmp->dbg_flg = 0; /* disable all of runtime dump */
+			break;
+		case 1:
+			napi_schedule(&vmp->napi);
+			break;
+		case 2:
+			vmp->tx_bd_busy_cnt = 0;
+			vmp->intr_cnt = 0;
+			vmp->rx_skb_alloc_failures = 0;
+		default:
+			break;
+		}
+	}
+	else if (cmd < 32) /* used for vmac_dbg_show */
+		vmp->show_item = cmd;
+	else if (cmd < 64) /* used for runtime dump */
+		vmp->dbg_flg |= (0x1 << (cmd - 32));
+	else if (cmd == 64) /* enable all of runtime dump */
+		vmp->dbg_flg = -1;
+
+	return count;
+}
+static DEVICE_ATTR(dbg, S_IWUSR | S_IRUSR, vmac_dbg_show, vmac_dbg_set); /* dev_attr_dbg */
+
+static ssize_t vmac_pm_show(struct device *dev, struct device_attribute *attr,
+						char *buff)
+{
+	struct net_device *ndev = container_of(dev, struct net_device, dev);
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	int count = 0;
+
+	count += sprintf(buff + count, "PCIE Device Power State : %s\n",
+				le32_to_cpu(*vmp->ep_pmstate) == PCI_D3hot ? "D3" : "D0");
+
+	return count;
+}
+
+static ssize_t vmac_pm_set(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct net_device *ndev = container_of(dev, struct net_device, dev);
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	uint8_t cmd;
+
+	cmd = (uint8_t)simple_strtoul(buf, NULL, 10);
+
+	if (cmd == 0) {
+		qdpc_pcie_resume(vmp->pdev);
+	} else if (cmd == 1) {
+		pm_message_t state;
+		state.event = 0;
+		qdpc_pcie_suspend(vmp->pdev, state);
+	}
+
+	return count;
+}
+static DEVICE_ATTR(pmctrl, S_IWUSR | S_IRUSR, vmac_pm_show, vmac_pm_set); /* dev_attr_pmctrl */
+
+static struct attribute *vmac_device_attrs[] = {
+	&dev_attr_dbg.attr,
+	&dev_attr_pmctrl.attr,
+	NULL,
+};
+
+static const struct attribute_group vmac_attr_group = {
+	.attrs = vmac_device_attrs,
+};
+
+#ifdef VMAC_DEBUG_MODE
+static void dump_pkt(char *data, int len, char *s)
+{
+	int i;
+
+	if (len > 128)
+		len = 128;
+	printk("%spkt start%p>\n", s, data);
+	for (i = 0; i < len;) {
+		printk("%02x ", data[i]);
+		if ((++i % 16) == 0)
+			printk("\n");
+	}
+	printk("<%spkt end\n", s);
+}
+
+static void dump_rx_interrupt(struct vmac_priv *vmp)
+{
+	printk("intr_cnt:\t%08x\n", vmp->intr_cnt);
+}
+#endif
+
+#define VMAC_BD_INT32_VAR 3
+
+static int alloc_bd_tbl(struct net_device *ndev)
+{
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	uint32_t ucaddr;
+	uint32_t paddr;
+	int len;	/* Length of allocated Transmitted & Received descriptor array */
+
+	/* uint32_t is used to be updated by ep */
+	len = (vmp->tx_bd_num + vmp->rx_bd_num) * VMAC_BD_LEN + VMAC_BD_INT32_VAR * sizeof(uint32_t);
+	ucaddr = (uint32_t)pci_alloc_consistent(vmp->pdev, len, (dma_addr_t *)&paddr);
+	if (!ucaddr)
+		return -1;
+
+	memset((void *)ucaddr, 0, len);
+
+	vmp->addr_uncache = ucaddr;
+	vmp->uncache_len = len;
+
+	/* Update pointers related with Tx descriptor table */
+	vmp->tx_bd_base = (struct vmac_bd *)ucaddr;
+	vmp->paddr_tx_bd_base = paddr;
+	qdpc_pcie_posted_write(paddr, &vmp->bda->bda_rc_tx_bd_base);
+	init_tx_bd(vmp);
+	printk("Tx Descriptor table: uncache virtual addr: 0x%08x paddr: 0x%08x\n",
+		(uint32_t)vmp->tx_bd_base, paddr);
+
+	/* Update pointers related with Rx descriptor table */
+	ucaddr += vmp->tx_bd_num * VMAC_BD_LEN;
+	paddr += vmp->tx_bd_num * VMAC_BD_LEN;
+
+	vmp->rx_bd_base = (struct vmac_bd *)ucaddr;
+	qdpc_pcie_posted_write(paddr, &vmp->bda->bda_rc_rx_bd_base);
+	printk("Rx Descriptor table: uncache virtual addr: 0x%08x paddr: 0x%08x\n",
+		(uint32_t)vmp->rx_bd_base, paddr);
+
+	/* Update pointers used by EP's updating consumed packet index */
+	ucaddr += vmp->rx_bd_num * VMAC_BD_LEN;
+	paddr += vmp->rx_bd_num * VMAC_BD_LEN;
+
+	vmp->ep_next_rx_pkt = (uint32_t *)ucaddr;
+	qdpc_pcie_posted_write(paddr, &vmp->bda->bda_ep_next_pkt);
+	printk("EP_handled_idx: uncache virtual addr: 0x%08x paddr: 0x%08x\n",
+		(uint32_t)vmp->ep_next_rx_pkt, paddr);
+
+	ucaddr += sizeof(uint32_t);
+	paddr += sizeof(uint32_t);
+
+	vmp->txqueue_wake = (uint32_t *)ucaddr;
+
+	ucaddr += sizeof(uint32_t);
+	paddr += sizeof(uint32_t);
+	vmp->ep_pmstate = (uint32_t *)ucaddr;
+
+	return 0;
+}
+
+static void free_bd_tbl(struct vmac_priv *vmp)
+{
+	pci_free_consistent(vmp->pdev, vmp->uncache_len, (void *)vmp->addr_uncache,
+			vmp->paddr_tx_bd_base);
+}
+
+static int alloc_skb_desc_array(struct net_device *ndev)
+{
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	uint32_t addr;
+	int len;
+
+	len = (vmp->tx_bd_num + vmp->rx_bd_num) * (sizeof(struct sk_buff *));
+	addr = (uint32_t)kzalloc(len, GFP_KERNEL);
+	if (!addr)
+		return -1;
+	vmp->tx_skb = (struct sk_buff **)addr;
+
+	addr += vmp->tx_bd_num * sizeof(struct sk_buff *);
+	vmp->rx_skb = (struct sk_buff **)addr;
+
+	return 0;
+}
+
+static void free_skb_desc_array(struct net_device *ndev)
+{
+	struct vmac_priv *vmp = netdev_priv(ndev);
+
+	kfree(vmp->tx_skb);
+}
+
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+static inline struct sk_buff *__vmac_rx_skb_freelist_pop(struct vmac_priv *vmp)
+{
+	struct sk_buff *skb = __skb_dequeue(&vmp->rx_skb_freelist);
+
+	return skb;
+}
+
+static inline int vmac_rx_skb_freelist_push(struct vmac_priv *vmp, dma_addr_t buff_addr, struct sk_buff *skb)
+{
+	unsigned long flag;
+
+	if (skb_queue_len(&vmp->rx_skb_freelist) > QTN_RX_SKB_FREELIST_MAX_SIZE) {
+		pci_unmap_single(vmp->pdev, buff_addr, skb->len, (int)DMA_BIDIRECTIONAL);
+		dev_kfree_skb(skb);
+		vmp->vmac_skb_free++;
+		return 0;
+	}
+
+	/* check for undersize skb; this should never happen, and indicates problems elsewhere */
+	if (unlikely((skb_end_pointer(skb) - skb->head) < QTN_RX_BUF_MIN_SIZE)) {
+		pci_unmap_single(vmp->pdev, buff_addr, skb->len, (int)DMA_BIDIRECTIONAL);
+		dev_kfree_skb(skb);
+		vmp->vmac_skb_free++;
+		vmp->skb_recycle_failures++;
+		return -EINVAL;
+	}
+
+	skb->len = 0;
+	skb->tail = skb->data = skb->head;
+	skb_reserve(skb, NET_SKB_PAD);
+	skb_reserve(skb, align_buf_dma_offset(skb->data));
+
+	qtn_spin_lock_bh_save(&vmp->rx_skb_freelist_lock, &flag);
+	__skb_queue_tail(&vmp->rx_skb_freelist, skb);
+	qtn_spin_unlock_bh_restore(&vmp->rx_skb_freelist_lock, &flag);
+
+	vmp->skb_recycle_cnt++;
+
+	return 0;
+}
+
+static inline void __vmac_rx_skb_freelist_refill(struct vmac_priv *vmp)
+{
+	struct sk_buff *skb = NULL;
+	int num = vmp->rx_skb_freelist_fill_level - skb_queue_len(&vmp->rx_skb_freelist);
+
+	while (num > 0) {
+		if (!(skb = dev_alloc_skb(SKB_BUF_SIZE))) {
+			vmp->rx_skb_alloc_failures++;
+			break;
+		}
+		/* Move skb->data to a cache line boundary */
+		skb_reserve(skb, align_buf_dma_offset(skb->data));
+		pci_map_single(vmp->pdev, skb->data, skb_end_pointer(skb) - skb->data, (int)DMA_FROM_DEVICE);
+		__skb_queue_tail(&vmp->rx_skb_freelist, skb);
+
+		num--;
+	}
+}
+
+static void vmac_rx_skb_freelist_purge(struct vmac_priv *vmp)
+{
+	unsigned long flag;
+
+	qtn_spin_lock_bh_save(&vmp->rx_skb_freelist_lock, &flag);
+	__skb_queue_purge(&vmp->rx_skb_freelist);
+	qtn_spin_unlock_bh_restore(&vmp->rx_skb_freelist_lock, &flag);
+}
+#endif /* QTN_SKB_RECYCLE_SUPPORT */
+
+static inline bool check_netlink_magic(qdpc_cmd_hdr_t *cmd_hdr)
+{
+	return ((memcmp(cmd_hdr->dst_magic, QDPC_NETLINK_DST_MAGIC, ETH_ALEN) == 0)
+		&& (memcmp(cmd_hdr->src_magic, QDPC_NETLINK_SRC_MAGIC, ETH_ALEN) == 0));
+}
+
+static void vmac_netlink_rx(struct net_device *ndev, void *buf, size_t len, uint16_t rpc_type, uint32_t total_len)
+{
+	struct vmac_priv *priv = netdev_priv(ndev);
+	struct sk_buff *skb;
+	struct nlmsghdr *nlh;
+	int pid = 0;
+	int frag = (rpc_type & QDPC_RPC_TYPE_FRAG_MASK);
+
+	rpc_type &= QDPC_RPC_TYPE_MASK;
+
+	if (unlikely(total_len > VMAC_NL_BUF_SIZE)) {
+		printk(KERN_INFO"%s: total length %u exceeds buffer length %u\n", __func__,
+			total_len, VMAC_NL_BUF_SIZE);
+		goto reset_nlbuf;
+	}
+
+	if (unlikely(priv->nl_len + len > total_len)) {
+		printk(KERN_INFO"%s: frag length %u exceeds total length %u\n", __func__,
+			priv->nl_len + len, total_len);
+		goto reset_nlbuf;
+	}
+
+	memcpy(priv->nl_buf + priv->nl_len, buf, len);
+	priv->nl_len += len;
+
+	if (frag)
+		return;
+
+	/* last fragment -- hand it to upper layer */
+	buf = priv->nl_buf;
+	len = priv->nl_len;
+
+	skb = nlmsg_new(len, GFP_ATOMIC);
+	if (skb == NULL) {
+		DBGPRINTF("WARNING: out of netlink SKBs\n");
+		goto reset_nlbuf;
+	}
+
+	nlh = nlmsg_put(skb, 0, 0, NLMSG_DONE, len, 0);  ;
+	memcpy(nlmsg_data(nlh), buf, len);
+	NETLINK_CB(skb).dst_group = 0;
+
+	if (rpc_type == QDPC_RPC_TYPE_STRCALL)
+		pid = priv->str_call_nl_pid;
+	else if (rpc_type == QDPC_RPC_TYPE_LIBCALL)
+		pid = priv->lib_call_nl_pid;
+
+	if (unlikely(pid == 0)) {
+		kfree_skb(skb);
+		goto reset_nlbuf;
+	}
+
+	nlmsg_unicast(priv->nl_socket, skb, pid);
+
+reset_nlbuf:
+	priv->nl_len = 0;
+}
+
+static inline void vmac_napi_schedule(struct vmac_priv *vmp)
+{
+	if (napi_schedule_prep(&vmp->napi)) {
+		disable_vmac_ints(vmp);
+		__napi_schedule(&vmp->napi);
+	}
+}
+
+#ifdef QDPC_PLATFORM_IRQ_FIXUP
+static inline int vmac_has_more_rx(struct vmac_priv *vmp)
+{
+	uint16_t i = vmp->rx_bd_index;
+	volatile struct vmac_bd *rbdp = &vmp->rx_bd_base[i];
+
+	return !(le32_to_cpu(rbdp->buff_info) & VMAC_BD_EMPTY);
+}
+static inline void vmac_irq_open_fixup(struct vmac_priv *vmp)
+{
+	vmac_napi_schedule(vmp);
+}
+/*
+ * TODO: vmac_irq_napi_fixup needs to undergo stability and
+ * especially performance test to justify its value
+*/
+static inline void vmac_irq_napi_fixup(struct vmac_priv *vmp)
+{
+	if (unlikely(vmac_has_more_rx(vmp)))
+		vmac_napi_schedule(vmp);
+}
+#else
+#define vmac_irq_open_fixup(v) do{}while(0)
+#define vmac_irq_napi_fixup(v) do{}while(0)
+#endif
+
+
+#ifdef RX_IP_HDR_REALIGN
+/*
+ * skb buffer have a pading, so skb data move less than pading is safe
+ *
+ */
+static void vmac_rx_ip_align_ahead(struct sk_buff *skb, uint32_t move_bytes)
+{
+	uint8_t *pkt_src, *pkt_dst;
+	uint8_t bytes_boundary = ((uint32_t)(skb->data)) % 4;
+	BUG_ON(bytes_boundary & 1);
+
+	/*bytes_boundary == 0 means etherheader is 4 byte aligned,
+	 *so IP header is 2(+14 ether header) byte aligned,
+	 *move whole packet 2 byte ahead for QCA NSS preference
+	*/
+
+	if(bytes_boundary == 0){
+		if(skb_headroom(skb) >= move_bytes){
+			pkt_src = skb->data;
+			pkt_dst = skb->data - move_bytes;
+
+			memmove(pkt_dst, pkt_src, skb->len);
+
+			skb->data -= move_bytes;
+			skb->tail -= move_bytes;
+		}
+		unalign_cnt++;
+	}
+	else if(bytes_boundary == 2){
+		align_cnt++;
+	}
+}
+#endif
+
+static int __sram_text vmac_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct vmac_priv *vmp = container_of(napi, struct vmac_priv, napi);
+	struct net_device *ndev = vmp->ndev;
+	struct ethhdr *eth;
+	qdpc_cmd_hdr_t *cmd_hdr;
+	int processed = 0;
+	uint16_t i = vmp->rx_bd_index;
+	volatile struct vmac_bd *rbdp = &vmp->rx_bd_base[i];
+	uint32_t descw1;
+
+	while (!((descw1 = le32_to_cpu(VMAC_REG_READ(&rbdp->buff_info))) & VMAC_BD_EMPTY) && (processed < budget)) {
+		struct sk_buff *skb;
+		skb = vmp->rx_skb[i];
+		if (skb) {
+			skb_reserve(skb, VMAC_GET_OFFSET(descw1));
+			skb_put(skb, VMAC_GET_LEN(descw1));
+
+			eth = (struct ethhdr *)(skb->data);
+			if (unlikely(ntohs(eth->h_proto) == QDPC_APP_NETLINK_TYPE)) {
+				/* Double Check if it's netlink packet*/
+				cmd_hdr = (qdpc_cmd_hdr_t *)skb->data;
+				if (check_netlink_magic(cmd_hdr)) {
+					vmac_netlink_rx(ndev,
+						skb->data + sizeof(qdpc_cmd_hdr_t),
+						ntohs(cmd_hdr->len),
+						ntohs(cmd_hdr->rpc_type),
+						ntohs(cmd_hdr->total_len));
+				}
+				dev_kfree_skb(skb);
+			} else {
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+				pci_unmap_single(vmp->pdev, rbdp->buff_addr,
+					skb_end_pointer(skb) - skb->data, (int)DMA_BIDIRECTIONAL);
+#else
+				pci_unmap_single(vmp->pdev, rbdp->buff_addr,
+					skb_end_pointer(skb) - skb->data, (int)DMA_FROM_DEVICE);
+#endif /* QTN_SKB_RECYCLE_SUPPORT */
+
+#ifdef RX_IP_HDR_REALIGN
+				if (rx_pkt_align)
+					vmac_rx_ip_align_ahead(skb, 2);
+#endif
+				dump_rx_pkt(vmp, (char *)skb->data, (int)skb->len);
+
+				skb->protocol = eth_type_trans(skb, ndev);
+				processed++;
+
+
+				netif_receive_skb(skb);
+
+				ndev->stats.rx_packets++;
+				ndev->stats.rx_bytes += VMAC_GET_LEN(descw1);
+			}
+		}
+		if ((ndev->stats.rx_packets & RX_DONE_INTR_MSK) == 0)
+			writel(TOPAZ_SET_INT(IPC_RC_RX_DONE), (volatile void *)(vmp->ep_ipc_reg));
+
+		dump_rx_bd(vmp);
+
+		ndev->last_rx = jiffies;
+
+		/*
+		 * We are done with the current buffer attached to this descriptor, so attach a new
+		 * one.
+		 */
+		if (skb2rbd_attach(ndev, i, descw1 & VMAC_BD_WRAP) == 0) {
+			if (++i >= vmp->rx_bd_num)
+				i = 0;
+			vmp->rx_bd_index = i;
+			rbdp = &vmp->rx_bd_base[i];
+		} else {
+			break;
+		}
+	}
+#ifdef QTN_WAKEQ_SUPPORT
+	vmac_try_wake_queue(ndev);
+#endif
+	if (processed < budget) {
+		napi_complete(napi);
+		enable_vmac_ints(vmp);
+		vmac_irq_napi_fixup(vmp);
+	}
+
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+	spin_lock(&vmp->rx_skb_freelist_lock);
+	__vmac_rx_skb_freelist_refill(vmp);
+	spin_unlock(&vmp->rx_skb_freelist_lock);
+#endif
+
+	return processed;
+}
+
+static int __sram_text skb2rbd_attach(struct net_device *ndev, uint16_t rx_bd_index, uint32_t wrap)
+{
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	volatile struct vmac_bd * rbdp;
+	uint32_t buff_addr;
+	struct sk_buff *skb = NULL;
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+	spin_lock(&vmp->rx_skb_freelist_lock);
+	if (unlikely(!(skb = __vmac_rx_skb_freelist_pop(vmp)))) {
+		spin_unlock(&vmp->rx_skb_freelist_lock);
+		vmp->rx_skb[rx_bd_index] = NULL;/* prevent old packet from passing the packet up */
+		return -1;
+	}
+	spin_unlock(&vmp->rx_skb_freelist_lock);
+#else
+	if (!(skb = dev_alloc_skb(SKB_BUF_SIZE))) {
+		vmp->rx_skb_alloc_failures++;
+		vmp->rx_skb[rx_bd_index] = NULL;/* prevent old packet from passing the packet up */
+		return -1;
+	}
+#endif /* QTN_SKB_RECYCLE_SUPPORT */
+	skb->dev = ndev;
+
+	vmp->rx_skb[rx_bd_index] = skb;
+#ifndef QTN_SKB_RECYCLE_SUPPORT
+	/* Move skb->data to a cache line boundary */
+	skb_reserve(skb, align_buf_dma_offset(skb->data));
+#endif /* QTN_SKB_RECYCLE_SUPPORT */
+
+	/* Invalidate cache and map virtual address to bus address. */
+	rbdp = &vmp->rx_bd_base[rx_bd_index];
+
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+	buff_addr = virt_to_bus(skb->data);
+#else
+	buff_addr = (uint32_t)pci_map_single(vmp->pdev, skb->data,
+				skb_end_pointer(skb) - skb->data, (int)DMA_FROM_DEVICE);
+#endif
+	rbdp->buff_addr = cpu_to_le32(buff_addr);
+
+	/* TODO: packet length, currently don't check the length */
+	rbdp->buff_info =  cpu_to_le32(VMAC_BD_EMPTY | wrap);
+
+	return 0;
+}
+
+
+static __attribute__((section(".sram.text"))) void
+vmac_tx_teardown(struct net_device *ndev, qdpc_pcie_bda_t *bda)
+{
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	volatile struct vmac_bd *tbdp;
+	uint16_t i;
+	uint32_t end_idx = le32_to_cpu(VMAC_REG_READ(vmp->ep_next_rx_pkt));
+
+	i = vmp->tx_reclaim_start;
+
+	while (i != end_idx) {
+		struct sk_buff *skb;
+		skb = vmp->tx_skb[i];
+		if (!skb)
+			break;
+		tbdp = &vmp->tx_bd_base[i];
+		ndev->stats.tx_packets++;
+
+		ndev->stats.tx_bytes +=  skb->len;
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+		vmac_rx_skb_freelist_push(vmp, (dma_addr_t)tbdp->buff_addr, skb);
+#else
+		pci_unmap_single(vmp->pdev, (dma_addr_t)tbdp->buff_addr,
+			skb->len, (int)DMA_TO_DEVICE);
+		dev_kfree_skb(skb);
+#endif /* QTN_SKB_RECYCLE_SUPPORT */
+		vmp->tx_skb[i] = NULL;
+
+		vmp->vmac_skb_free++;
+
+		vmp->vmac_tx_queue_len--;
+
+		if (++i >= vmp->tx_bd_num)
+			i = 0;
+	}
+
+	vmp->tx_reclaim_start = i;
+}
+
+#ifdef QTN_TX_SKBQ_SUPPORT
+static inline int __vmac_process_tx_skbq(struct net_device *ndev, uint32_t budget)
+{
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	struct sk_buff *skb;
+
+	while(!vmp->txqueue_stopped && (skb = __skb_dequeue(&vmp->tx_skb_queue)) != NULL) {
+		if (vmac_tx((void *)skb, ndev) != NETDEV_TX_OK) {
+			__skb_queue_head(&vmp->tx_skb_queue, skb);
+			break;
+		}
+
+		if (--budget == 0) {
+			break;
+		}
+	}
+
+	if (skb_queue_len(&vmp->tx_skb_queue) && !vmp->txqueue_stopped) {
+		tasklet_schedule(&vmp->tx_skbq_tasklet);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+static int vmac_process_tx_skbq(struct net_device *ndev, uint32_t budget)
+{
+	int ret;
+	struct vmac_priv *vmp = netdev_priv(ndev);
+
+	spin_lock(&vmp->tx_skbq_lock);
+	ret = __vmac_process_tx_skbq(ndev, budget);
+	spin_unlock(&vmp->tx_skbq_lock);
+
+	return ret;
+}
+
+static void __attribute__((section(".sram.text"))) vmac_tx_skbq_tasklet(unsigned long data)
+{
+	struct net_device *ndev = (struct net_device *)data;
+	struct vmac_priv *vmp = netdev_priv(ndev);
+
+	vmac_process_tx_skbq(ndev, vmp->tx_skbq_tasklet_budget);
+}
+
+static int vmac_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	int ret;
+	unsigned long flag;
+
+	if (unlikely(skb_queue_len(&vmp->tx_skb_queue) >= vmp->tx_skbq_max_size)) {
+                dev_kfree_skb((void *)skb);
+		return NETDEV_TX_OK;
+	}
+
+	qtn_spin_lock_bh_save(&vmp->tx_skbq_lock, &flag);
+	__skb_queue_tail(&vmp->tx_skb_queue, skb);
+	ret = __vmac_process_tx_skbq(ndev, vmp->tx_skbq_budget);
+	qtn_spin_unlock_bh_restore(&vmp->tx_skbq_lock, &flag);
+
+	return ret;
+}
+#else
+static int vmac_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	return vmac_tx((void *)skb, ndev);
+}
+#endif
+
+void vmac_tx_drop(void *pkt_handle, struct net_device *ndev)
+{
+	struct sk_buff *skb;
+
+	{
+                skb = (struct sk_buff *)pkt_handle;
+                dev_kfree_skb((void *)skb);
+        }
+}
+
+#ifdef QTN_WAKEQ_SUPPORT
+static inline void vmac_try_stop_queue(struct net_device *ndev)
+{
+	unsigned long flags;
+	struct vmac_priv *vmp = netdev_priv(ndev);
+
+	spin_lock_irqsave(&vmp->txqueue_op_lock, flags);
+
+	if (!vmp->txqueue_stopped) {
+		vmp->txqueue_stopped = 1;
+		*vmp->txqueue_wake = 0;
+		barrier();
+		writel(TOPAZ_SET_INT(IPC_RC_STOP_TX), (volatile void *)(vmp->ep_ipc_reg));
+		vmp->tx_stop_queue_cnt++;
+		netif_stop_queue(ndev);
+	}
+	spin_unlock_irqrestore(&vmp->txqueue_op_lock, flags);
+}
+
+static inline void vmac_try_wake_queue(struct net_device *ndev)
+{
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&vmp->txqueue_op_lock, flags);
+	if (vmp->txqueue_stopped && *vmp->txqueue_wake) {
+
+		vmp->txqueue_stopped = 0;
+
+		netif_wake_queue(ndev);
+#ifdef QTN_TX_SKBQ_SUPPORT
+		tasklet_schedule(&vmp->tx_skbq_tasklet);
+#endif
+	}
+	spin_unlock_irqrestore(&vmp->txqueue_op_lock, flags);
+}
+#endif
+
+int __attribute__((section(".sram.text")))
+vmac_tx(void *pkt_handle, struct net_device *ndev)
+{
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	uint16_t i; /* tbd index */
+	volatile struct vmac_bd *tbdp; /* Tx BD pointer */
+	int len;
+	struct sk_buff *skb;
+	uint32_t baddr;
+	qdpc_pcie_bda_t *bda = vmp->bda;
+
+	/* TODO: Under current architect, register_netdev() is called
+	before EP is ready. So an variable ep_ready is added to achieve
+	defensive programming. We need to change the code segment later */
+	if (unlikely(vmp->ep_ready == 0)) {
+		vmac_tx_drop(pkt_handle, ndev);
+		return NETDEV_TX_OK;
+	}
+
+	vmp->vmac_xmit_cnt++;
+#ifdef RC_TXDONE_TIMER
+	spin_lock(&vmp->tx_lock);
+#endif
+	/* Tear down the previous skb transmitted by DMA */
+	vmac_tx_teardown(ndev, bda);
+
+	/* Reserve one entry space to differentiate full and empty case */
+	if (vmp->vmac_tx_queue_len >= vmp->tx_bd_num - 2) {
+#ifdef QTN_WAKEQ_SUPPORT
+		vmac_try_stop_queue(ndev);
+#endif
+		if (vmp->vmac_tx_queue_len >= vmp->tx_bd_num - 1) {
+#ifdef RC_TXDONE_TIMER
+			spin_unlock(&vmp->tx_lock);
+#endif
+			vmp->tx_bd_busy_cnt++;
+			printk(KERN_ERR "%s fail to get BD\n", ndev->name);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	i = vmp->tx_bd_index;
+
+	skb = (struct sk_buff *)pkt_handle;
+	vmp->tx_skb[i] = (struct sk_buff *)pkt_handle;
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+	baddr = (uint32_t)pci_map_single(vmp->pdev, skb->data, skb->len, (int)DMA_BIDIRECTIONAL);
+#else
+	baddr = (uint32_t)pci_map_single(vmp->pdev, skb->data, skb->len, (int)DMA_TO_DEVICE);
+#endif
+	len = skb->len;
+	wmb();
+
+	/* Update local descriptor array */
+	tbdp = &vmp->tx_bd_base[i];
+	tbdp->buff_addr = baddr;
+
+	/* Update remote Request Queue */
+	VMAC_REG_WRITE(&bda->request[i].addr, (baddr));
+	VMAC_REG_WRITE(&bda->request[i].info, (len | PCIE_TX_VALID_PKT));
+
+	vmp->vmac_tx_queue_len++;
+
+	dump_tx_pkt(vmp, bus_to_virt(baddr), len);
+
+	if (++i >= vmp->tx_bd_num)
+		i = 0;
+
+	vmp->tx_bd_index = i;
+
+	dump_tx_bd(vmp);
+
+	writel(TOPAZ_SET_INT(IPC_EP_RX_PKT), (volatile void *)(vmp->ep_ipc_reg));
+
+#ifdef RC_TXDONE_TIMER
+	vmac_tx_teardown(ndev, bda);
+	mod_timer(&vmp->tx_timer, jiffies + 1);
+	spin_unlock(&vmp->tx_lock);
+#endif
+	return NETDEV_TX_OK;
+}
+
+static irqreturn_t vmac_interrupt(int irq, void *dev_id)
+{
+	struct net_device *ndev = (struct net_device *)dev_id;
+	struct vmac_priv *vmp = netdev_priv(ndev);
+
+	handle_ep_rst_int(ndev);
+
+	if(!vmp->msi_enabled) {
+		/* Deassert remote INTx message */
+		qdpc_deassert_intx(vmp);
+	}
+
+	vmp->intr_cnt++;
+
+	vmac_napi_schedule(vmp);
+#ifdef QTN_WAKEQ_SUPPORT
+	vmac_try_wake_queue(ndev);
+#endif
+	dump_rx_int(vmp);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * The Tx ring has been full longer than the watchdog timeout
+ * value. The transmitter must be hung?
+ */
+inline static void vmac_tx_timeout(struct net_device *ndev)
+{
+	printk(KERN_ERR "%s: vmac_tx_timeout: ndev=%p\n", ndev->name, ndev);
+	ndev->trans_start = jiffies;
+}
+
+#ifdef RC_TXDONE_TIMER
+static void vmac_tx_buff_cleaner(struct net_device *ndev)
+{
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	qdpc_pcie_bda_t *bda = vmp->bda;
+
+	spin_lock(&vmp->tx_lock);
+	vmac_tx_teardown(ndev, bda);
+
+	if (vmp->tx_skb[vmp->tx_reclaim_start] == NULL) {
+		del_timer(&vmp->tx_timer);
+	} else {
+		writel(TOPAZ_SET_INT(IPC_EP_RX_PKT), (volatile void *)(vmp->ep_ipc_reg));
+		mod_timer(&vmp->tx_timer, jiffies + 1);
+	}
+	spin_unlock(&vmp->tx_lock);
+}
+#endif
+
+/* ethtools support */
+static int vmac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+	return -EINVAL;
+}
+
+static int vmac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	return -EINVAL;
+}
+
+static int vmac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+	return -EINVAL;
+}
+
+static void vmac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
+{
+	struct vmac_priv *vmp = netdev_priv(ndev);
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	info->fw_version[0] = '\0';
+	sprintf(info->bus_info, "%s %d", DRV_NAME, vmp->mac_id);
+	info->regdump_len = 0;
+}
+
+static const struct ethtool_ops vmac_ethtool_ops = {
+	.get_settings = vmac_get_settings,
+	.set_settings = vmac_set_settings,
+	.get_drvinfo = vmac_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops vmac_device_ops = {
+	.ndo_open = vmac_open,
+	.ndo_stop = vmac_close,
+	.ndo_start_xmit = vmac_xmit,
+	.ndo_do_ioctl = vmac_ioctl,
+	.ndo_tx_timeout = vmac_tx_timeout,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_get_stats = vmac_get_stats,
+};
+
+struct net_device *vmac_alloc_ndev(void)
+{
+	struct net_device * ndev;
+
+        /* Allocate device structure */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+        ndev =
+            alloc_netdev(sizeof(struct vmac_priv), vmaccfg.ifname, 0,
+                         ether_setup);
+#else
+	ndev = alloc_netdev(sizeof(struct vmac_priv), vmaccfg.ifname, ether_setup);
+#endif
+	if(!ndev)
+		printk(KERN_ERR "%s: alloc_etherdev failed\n", vmaccfg.ifname);
+
+	return ndev;
+}
+EXPORT_SYMBOL(vmac_alloc_ndev);
+
+static void eth_parse_enetaddr(const char *addr, uint8_t *enetaddr)
+{
+	char *end;
+	int i;
+
+	for (i = 0; i < 6; ++i) {
+		enetaddr[i] = addr ? simple_strtoul(addr, &end, 16) : 0;
+		if (addr)
+			addr = (*end) ? end + 1 : end;
+	}
+}
+
+
+int vmac_net_init(struct pci_dev *pdev)
+{
+	struct vmac_priv *vmp = NULL;
+	struct net_device *ndev = NULL;
+	int err = -ENOMEM;
+	__iomem qdpc_pcie_bda_t *bda;
+
+	printk(KERN_INFO"%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+
+	ndev = (struct net_device *)pci_get_drvdata(pdev);
+	if (!ndev)
+		goto vnet_init_err_0;
+
+
+	if (ethaddr)
+		eth_parse_enetaddr(ethaddr, ndev->dev_addr);
+
+	if (!is_valid_ether_addr(ndev->dev_addr))
+		random_ether_addr(ndev->dev_addr);
+
+	ndev->netdev_ops = &vmac_device_ops;
+	ndev->tx_queue_len = QTN_GLOBAL_INIT_EMAC_TX_QUEUE_LEN;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)
+        netdev_set_default_ethtool_ops(ndev, &vmac_ethtool_ops);
+#else
+	SET_ETHTOOL_OPS(ndev, &vmac_ethtool_ops);
+#endif
+
+	/* Initialize private data */
+	vmp = netdev_priv(ndev);
+	vmp->pdev = pdev;
+	vmp->ndev = ndev;
+
+	vmp->pcfg = &vmaccfg;
+	vmp->tx_bd_num = vmp->pcfg->tx_bd_num;
+	vmp->rx_bd_num = vmp->pcfg->rx_bd_num;
+
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+	spin_lock_init(&vmp->rx_skb_freelist_lock);
+	skb_queue_head_init(&vmp->rx_skb_freelist);
+	vmp->rx_skb_freelist_fill_level = QTN_RX_SKB_FREELIST_FILL_SIZE;
+	vmp->skb_recycle_cnt = 0;
+	vmp->skb_recycle_failures = 0;
+#endif
+
+	if (vmp->tx_bd_num > PCIE_RC_TX_QUEUE_LEN) {
+		printk("Error: The length of TX BD array should be no more than %d\n",
+				PCIE_RC_TX_QUEUE_LEN);
+		goto vnet_init_err_0;
+	}
+
+	vmp->ep_ipc_reg = (unsigned long)
+		QDPC_BAR_VADDR(vmp->sysctl_bar, TOPAZ_IPC_OFFSET);
+	ndev->irq = pdev->irq;
+
+	ndev->if_port = QDPC_PLATFORM_IFPORT;
+
+	ndev->watchdog_timeo = VMAC_TX_TIMEOUT;
+
+	bda = vmp->bda;
+
+	qdpc_pcie_posted_write(vmp->tx_bd_num, &bda->bda_rc_tx_bd_num);
+	qdpc_pcie_posted_write(vmp->rx_bd_num, &bda->bda_rc_rx_bd_num);
+
+	/* Allocate Tx & Rx SKB descriptor array */
+	if (alloc_skb_desc_array(ndev))
+		goto vnet_init_err_0;
+
+	/* Allocate and initialise Tx & Rx descriptor array */
+	if (alloc_bd_tbl(ndev))
+		goto vnet_init_err_1;
+
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+	__vmac_rx_skb_freelist_refill(vmp);
+#endif
+
+	if (alloc_and_init_rxbuffers(ndev))
+		goto vnet_init_err_2;
+
+	/* Initialize NAPI */
+	netif_napi_add(ndev, &vmp->napi, vmac_rx_poll, 10);
+
+	/* Register device */
+	if ((err = register_netdev(ndev)) != 0) {
+		printk(KERN_ERR "%s: Cannot register net device, error %d\n", DRV_NAME, err);
+		goto vnet_init_err_3;
+	}
+	printk(KERN_INFO"%s: Vmac Ethernet found\n", ndev->name);
+
+	/* Add the device attributes */
+	err = sysfs_create_group(&ndev->dev.kobj, &vmac_attr_group);
+	if (err) {
+		printk(KERN_ERR "Error creating sysfs files\n");
+	}
+
+	enable_ep_rst_detection(ndev);
+
+	vmp->show_item = SHOW_VMAC_STATS;
+
+#ifdef RC_TXDONE_TIMER
+	spin_lock_init(&vmp->tx_lock);
+	init_timer(&vmp->tx_timer);
+	vmp->tx_timer.data = (unsigned long)ndev;
+	vmp->tx_timer.function = (void (*)(unsigned long))&vmac_tx_buff_cleaner;
+#endif
+	spin_lock_init(&vmp->txqueue_op_lock);
+
+#ifdef QTN_TX_SKBQ_SUPPORT
+	vmp->tx_skbq_budget = QTN_RC_TX_BUDGET;
+	vmp->tx_skbq_max_size = vmp->tx_bd_num << 4;
+	vmp->tx_skbq_tasklet_budget = QTN_RC_TX_TASKLET_BUDGET;
+	spin_lock_init(&vmp->tx_skbq_lock);
+	skb_queue_head_init(&vmp->tx_skb_queue);
+	tasklet_init(&vmp->tx_skbq_tasklet, vmac_tx_skbq_tasklet, (unsigned long)ndev);
+#endif
+
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+	__vmac_rx_skb_freelist_refill(vmp);
+#endif
+
+	return 0;
+
+vnet_init_err_3:
+	free_rx_skbs(vmp);
+vnet_init_err_2:
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+	vmac_rx_skb_freelist_purge(vmp);
+#endif
+	free_bd_tbl(vmp);
+vnet_init_err_1:
+	free_skb_desc_array(ndev);
+vnet_init_err_0:
+	return err;
+}
+EXPORT_SYMBOL(vmac_net_init);
+
+int vmac_recovery_init(struct vmac_priv *priv, struct net_device *ndev)
+{
+	int err = -ENOMEM;
+
+	qdpc_pcie_posted_write(priv->tx_bd_num, &priv->bda->bda_rc_tx_bd_num);
+	qdpc_pcie_posted_write(priv->rx_bd_num, &priv->bda->bda_rc_rx_bd_num);
+
+	if (alloc_skb_desc_array(ndev))
+		goto vnet_recovery_err_0;
+
+	if (alloc_bd_tbl(ndev))
+		goto vnet_recovery_err_1;
+
+#ifdef QTN_WAKEQ_SUPPORT
+	if (unlikely(priv->txqueue_stopped)) {
+		printk("Recovery: Wake tx queue\n");
+		*priv->txqueue_wake = 1;
+		vmac_try_wake_queue(ndev);
+	}
+#endif
+
+	if (alloc_and_init_rxbuffers(ndev))
+		goto vnet_recovery_err_2;
+
+	return SUCCESS;
+
+vnet_recovery_err_2:
+	free_bd_tbl(priv);
+vnet_recovery_err_1:
+	free_skb_desc_array(ndev);
+vnet_recovery_err_0:
+	return err;
+}
+EXPORT_SYMBOL(vmac_recovery_init);
+
+static void free_rx_skbs(struct vmac_priv *vmp)
+{
+	/* All Ethernet activity should have ceased before calling
+	 * this function
+	 */
+	uint16_t i;
+	for (i = 0; i < vmp->rx_bd_num; i++) {
+		if (vmp->rx_skb[i]) {
+			dev_kfree_skb(vmp->rx_skb[i]);
+			vmp->rx_skb[i] = 0;
+		}
+	}
+
+	vmp->rx_bd_index = 0;
+}
+
+static void free_tx_pkts(struct vmac_priv *vmp)
+{
+	/* All Ethernet activity should have ceased before calling
+	 * this function
+	 */
+	uint16_t i;
+	for (i = 0; i < vmp->tx_bd_num; i++) {
+		if (vmp->tx_skb[i]) {
+			dev_kfree_skb(vmp->tx_skb[i]);
+			vmp->tx_skb[i] = 0;
+		}
+	}
+
+	vmp->tx_bd_index = 0;
+	vmp->ep_next_rx_pkt = 0;
+	vmp->tx_reclaim_start = 0;
+	vmp->vmac_tx_queue_len = 0;
+}
+
+static void init_tx_bd(struct vmac_priv *vmp)
+{
+	uint16_t i;
+	for (i = 0; i< vmp->tx_bd_num; i++)
+		vmp->tx_bd_base[i].buff_info |= cpu_to_le32(VMAC_BD_EMPTY);
+}
+
+static int alloc_and_init_rxbuffers(struct net_device *ndev)
+{
+	uint16_t i;
+	struct vmac_priv *vmp = netdev_priv(ndev);
+
+	memset((void *)vmp->rx_bd_base, 0, vmp->rx_bd_num * VMAC_BD_LEN);
+
+	/* Allocate rx buffers */
+	for (i = 0; i < vmp->rx_bd_num; i++) {
+		if (skb2rbd_attach(ndev, i, 0)) {
+			return -1;
+		}
+	}
+
+	vmp->rx_bd_base[vmp->rx_bd_num - 1].buff_info |= cpu_to_le32(VMAC_BD_WRAP);
+	return 0;
+}
+
+extern int qdpc_unmap_iomem(struct vmac_priv *priv);
+void vmac_clean(struct net_device *ndev)
+{
+	struct vmac_priv *vmp;
+
+	if (!ndev)
+		return;
+
+	vmp = netdev_priv(ndev);
+
+	device_remove_file(&ndev->dev, &dev_attr_dbg);
+
+	unregister_netdev(ndev);
+
+	free_rx_skbs(vmp);
+	free_tx_pkts(vmp);
+	free_skb_desc_array(ndev);
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+	vmac_rx_skb_freelist_purge(vmp);
+#endif
+
+	disable_ep_rst_detection(ndev);
+
+	netif_napi_del(&vmp->napi);
+
+	free_bd_tbl(vmp);
+}
+
+void vmac_recovery_clean(struct net_device *ndev)
+{
+	struct vmac_priv *vmp;
+
+	vmp = netdev_priv(ndev);
+
+	free_rx_skbs(vmp);
+	free_tx_pkts(vmp);
+	free_skb_desc_array(ndev);
+	free_bd_tbl(vmp);
+}
+
+static void bring_up_interface(struct net_device *ndev)
+{
+	/* Interface will be ready to send/receive data, but will need hooking
+	 * up to the interrupts before anything will happen.
+	 */
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	enable_vmac_ints(vmp);
+}
+
+static void shut_down_interface(struct net_device *ndev)
+{
+	struct vmac_priv *vmp = netdev_priv(ndev);
+	/* Close down MAC and DMA activity and clear all data. */
+	disable_vmac_ints(vmp);
+}
+
+
+static int vmac_open(struct net_device *ndev)
+{
+	int retval = 0;
+	struct vmac_priv *vmp = netdev_priv(ndev);
+
+	bring_up_interface(ndev);
+
+	napi_enable(&vmp->napi);
+
+	/* Todo: request_irq here */
+	retval = request_irq(ndev->irq, &vmac_interrupt, 0, ndev->name, ndev);
+	if (retval) {
+		printk(KERN_ERR "%s: unable to get IRQ %d\n",
+			ndev->name, ndev->irq);
+		goto err_out;
+	}
+
+	netif_start_queue(ndev);
+
+	vmac_irq_open_fixup(vmp);
+
+	return 0;
+err_out:
+	napi_disable(&vmp->napi);
+	return retval;
+}
+
+static int vmac_close(struct net_device *ndev)
+{
+	struct vmac_priv *const vmp = netdev_priv(ndev);
+
+	napi_disable(&vmp->napi);
+
+	shut_down_interface(ndev);
+
+	netif_stop_queue(ndev);
+
+	free_irq(ndev->irq, ndev);
+
+	return 0;
+}
+
+static struct net_device_stats *vmac_get_stats(struct net_device *ndev)
+{
+	return &(ndev->stats);
+}
diff --git a/drivers/qtn/pcie2/host/common/topaz_vnet.h b/drivers/qtn/pcie2/host/common/topaz_vnet.h
new file mode 100644
index 0000000..928b809
--- /dev/null
+++ b/drivers/qtn/pcie2/host/common/topaz_vnet.h
@@ -0,0 +1,231 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __DRIVERS_NET_TOPAZ_VNET_H
+#define __DRIVERS_NET_TOPAZ_VNET_H	1
+
+#define ETH_TX_TIMEOUT (100*HZ)
+#define MULTICAST_FILTER_LIMIT 64
+
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <qdpc_config.h>
+#include <topaz_netcom.h>
+
+#define PROC_NAME_SIZE		(32)
+#define VMAC_BD_EMPTY		((uint32_t)0x00000001)
+#define VMAC_BD_WRAP		((uint32_t)0x00000002)
+#define VMAC_BD_MASK_LEN	((uint32_t)0xFFFF0000)
+#define VMAC_BD_MASK_OFFSET ((uint32_t)0x0000FF00)
+
+#define VMAC_GET_LEN(x)		(((x) >> 16) & 0xFFFF)
+#define VMAC_GET_OFFSET(x)	(((x) >> 8) & 0xFF)
+#define VMAC_SET_LEN(len)	(((len) & 0xFFFF) << 16)
+#define VMAC_SET_OFFSET(of)	(((of) & 0xFF) << 8)
+
+#define VMAC_INDX_MINUS(x, y, m) (((x) + (m) - (y)) % (m))
+#define VMAC_INDX_INC(index, m) do {	\
+	if (++(index) >= (m))	\
+		(index) = 0;	\
+	} while(0)
+
+/*
+ * Helper macros handling memory mapped area access
+ */
+#define VMAC_REG_TST(reg, val) ( *((volatile unsigned int *)(reg)) & (val) )
+#define VMAC_REG_SET(reg, val) { volatile unsigned int *r = (unsigned int *)(reg); *r = (*r | (val)); }
+#define VMAC_REG_CLR(reg, val) { volatile unsigned int *r = (unsigned int *)(reg); *r = (*r & ~(val)); }
+#define VMAC_REG_WRITE(reg, val) { *(volatile unsigned int *)reg = (val); }
+#define VMAC_REG_READ(reg) {*(volatile unsigned int *)(reg); }
+
+#define QTN_RC_TX_BUDGET		(16)
+#define QTN_RC_TX_TASKLET_BUDGET	(32)
+
+#define QTN_RX_SKB_FREELIST_FILL_SIZE	(1024)
+#define QTN_RX_SKB_FREELIST_MAX_SIZE	(8192)
+#define QTN_RX_BUF_MIN_SIZE		(1536)
+
+#define VMAC_NL_BUF_SIZE		USHRT_MAX
+
+typedef struct qdpc_bar {
+        void *b_vaddr; /* PCIe bar virtual address */
+        dma_addr_t b_busaddr; /* PCIe bar physical address */
+        size_t b_len; /* Bar resource length */
+        uint32_t b_offset; /* Offset from start of map */
+        uint8_t b_index; /* Bar Index */
+} qdpc_bar_t;
+
+#define QDPC_BAR_VADDR(bar, off) ((bar).b_vaddr +(off))
+
+struct vmac_cfg {
+	uint16_t rx_bd_num;
+	uint16_t tx_bd_num;
+	char ifname[PROC_NAME_SIZE];
+	struct net_device *dev;
+};
+
+#if defined(QTN_RC_ENABLE_HDP)
+enum pkt_type {
+        PKT_SKB = 0,
+        PKT_TQE
+};
+#endif
+
+struct vmac_tx_buf {
+        uint32_t handle;
+        uint16_t len;
+#if defined(QTN_RC_ENABLE_HDP)
+        uint8_t type; /* 1 payload only, 0 skb */
+        uint8_t rsv;
+#else
+	uint16_t rsv;
+#endif
+};
+
+struct vmac_priv {
+	struct sk_buff **tx_skb;/* skb having post to PCIe DMA */
+	volatile struct vmac_bd *tx_bd_base; /* Tx buffer descriptor */
+	dma_addr_t paddr_tx_bd_base; /* Physical address of Tx BD array */
+	volatile uint32_t *ep_next_rx_pkt;
+	uint16_t tx_bd_index;
+	uint16_t tx_reclaim_start;
+	uint16_t tx_bd_num;
+	uint8_t txqueue_stopped;
+	volatile uint32_t *txqueue_wake; /* shared variable with EP */
+	spinlock_t txqueue_op_lock;
+	unsigned long ep_ipc_reg;
+	uint32_t tx_bd_busy_cnt; /* tx BD unavailable */
+	uint32_t tx_stop_queue_cnt;
+#ifdef RC_TXDONE_TIMER
+	struct timer_list tx_timer;
+	spinlock_t tx_lock;
+#endif
+	uint32_t vmac_tx_queue_len;
+
+	struct sk_buff **rx_skb;
+	volatile struct vmac_bd *rx_bd_base; /* Rx buffer descriptor  */
+	uint16_t rx_bd_index;
+	uint16_t rx_bd_num;
+
+	uint32_t rx_skb_alloc_failures;
+	uint32_t intr_cnt; /* msi/legacy interrupt counter */
+	uint32_t vmac_xmit_cnt;
+	uint32_t vmac_skb_free;
+
+	struct sock *nl_socket;
+	uint32_t str_call_nl_pid;
+	uint32_t lib_call_nl_pid;
+	struct napi_struct napi;
+
+	uint32_t dbg_flg;
+
+	struct net_device *ndev;
+	struct pci_dev	*pdev;
+
+	int mac_id;
+
+	uint32_t dma_msi_imwr;
+	uint32_t dma_msi_data;
+	uint32_t dma_msi_dummy;
+	uint32_t ep_pciecfg0_val; /* used to deassert Legacy irq from RC */
+
+	/* The following members aren't related to datapath */
+	struct vmac_cfg *pcfg;
+	uint8_t show_item;
+
+	uint32_t addr_uncache;
+	uint32_t uncache_len;
+
+	struct task_struct *init_thread; /* Initialization thread */
+	uint8_t msi_enabled; /* PCIe MSI: 1 - Enabled, 0 - Disabled */
+
+	qdpc_bar_t sysctl_bar;
+	qdpc_bar_t epmem_bar;
+	qdpc_bar_t dmareg_bar;
+
+	uint32_t dma_imwr;
+
+	/* io memory pointers */
+	__iomem qdpc_pcie_bda_t *bda;
+	uint32_t ep_ready;
+
+#ifdef QTN_TX_SKBQ_SUPPORT
+	struct sk_buff_head tx_skb_queue;
+	spinlock_t tx_skbq_lock;
+	struct tasklet_struct tx_skbq_tasklet;
+	uint32_t tx_skbq_budget;
+	uint32_t tx_skbq_tasklet_budget;
+	uint32_t tx_skbq_max_size;
+#endif
+
+#ifdef QTN_SKB_RECYCLE_SUPPORT
+	struct sk_buff_head rx_skb_freelist;
+	spinlock_t rx_skb_freelist_lock;
+	uint32_t rx_skb_freelist_fill_level;
+	uint32_t skb_recycle_cnt;
+	uint32_t skb_recycle_failures;
+#endif
+
+	volatile uint32_t *ep_pmstate;
+	uint8_t *nl_buf;
+	size_t nl_len;
+};
+
+#define QTN_DISABLE_SOFTIRQ		(0xABCD)
+
+static inline void qtn_spin_lock_bh_save(spinlock_t *lock, unsigned long *flag)
+{
+	if (likely(irqs_disabled() || in_softirq())) {
+		spin_lock(lock);
+		*flag = 0;
+	} else {
+		spin_lock_bh(lock);
+		*flag = QTN_DISABLE_SOFTIRQ;
+        }
+}
+
+static inline void qtn_spin_unlock_bh_restore(spinlock_t *lock, unsigned long *flag)
+{
+	if (unlikely(*flag == QTN_DISABLE_SOFTIRQ)) {
+		*flag = 0;
+		spin_unlock_bh(lock);
+	} else {
+		spin_unlock(lock);
+	}
+}
+
+extern struct net_device *vmac_alloc_ndev(void);
+extern int vmac_net_init(struct pci_dev *pdev);
+extern int vmac_recovery_init(struct vmac_priv *priv, struct net_device *ndev);
+extern void vmac_clean(struct net_device *ndev);
+extern void vmac_recovery_clean(struct net_device *ndev);
+extern int vmac_tx(void *pkt_handle, struct net_device *ndev);
+
+#define PCIE_REG_CFG_BASE		0x0
+#define PCIE_LOGIC_PORT_CFG_BASE	(PCIE_REG_CFG_BASE + 0x700)
+#define PCIE_DMA_WR_INTR_MASK		0x2c4
+
+void vmac_pcie_edma_enable(struct vmac_priv *priv);
+void qdpc_deassert_intx(struct vmac_priv *priv);
+void qdpc_pcie_edma_enable(struct vmac_priv *priv);
+int qdpc_pcie_suspend(struct pci_dev *pdev, pm_message_t state);
+int qdpc_pcie_resume(struct pci_dev *pdev);
+#endif
diff --git a/drivers/qtn/pcie2/host/intel/Makefile b/drivers/qtn/pcie2/host/intel/Makefile
new file mode 100644
index 0000000..22be6ff
--- /dev/null
+++ b/drivers/qtn/pcie2/host/intel/Makefile
@@ -0,0 +1,33 @@
+#
+# Makefile for Intel platform
+#
+
+EXTRA_CFLAGS	+= -Wall		\
+		   -I$(src)		\
+		   -I$(src)/../../include \
+		   -I$(src)/../common
+
+EXTRA_CFLAGS	+= -DRC_TXDONE_TIMER -DQTN_WAKEQ_SUPPORT
+ifneq ($(CONFIG_HOTPLUG_PCI_PCIE),)
+EXTRA_CFLAGS    += -DPCIE_HOTPLUG_SUPPORTED
+endif
+
+KVERSION = $(shell uname -r)
+
+default: all
+
+COMMON_DIR	:= ../common
+qdpc-host-objs   := $(COMMON_DIR)/qdpc_init.o $(COMMON_DIR)/qdpc_pcie.o $(COMMON_DIR)/topaz_vnet.o qdpc_platform.o
+obj-m           :=  qdpc-host.o
+
+qdpc_host.o: $(qdpc-host-objs)
+	ld -r $^ -o $@
+
+all:
+	make -C /lib/modules/$(KVERSION)/build M=$(PWD) modules
+
+clean:
+	make -C /lib/modules/$(KVERSION)/build M=$(PWD) clean
+	rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions
+	rm -rf Module.markers  modules.order *~ $(qdpc-host-objs)
+
diff --git a/drivers/qtn/pcie2/host/intel/qdpc_platform.c b/drivers/qtn/pcie2/host/intel/qdpc_platform.c
new file mode 100644
index 0000000..1dde009
--- /dev/null
+++ b/drivers/qtn/pcie2/host/intel/qdpc_platform.c
@@ -0,0 +1,102 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+/*
+ * Platform dependant implement. Customer needs to modify this file.
+ */
+#include <linux/interrupt.h>
+#include <qdpc_platform.h>
+#include <topaz_vnet.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+/*
+ * Enable MSI interrupt of PCIe.
+ */
+void enable_vmac_ints(struct vmac_priv *vmp)
+{
+	enable_irq(vmp->pdev->irq);
+}
+
+/*
+ * Disable MSI interrupt of PCIe.
+ */
+void disable_vmac_ints(struct vmac_priv *vmp)
+{
+	disable_irq_nosync(vmp->pdev->irq);
+}
+
+
+/*
+ * Enable interrupt for detecting EP reset.
+ */
+void enable_ep_rst_detection(struct net_device *ndev)
+{
+}
+
+/*
+ * Disable interrupt for detecting EP reset.
+ */
+void disable_ep_rst_detection(struct net_device *ndev)
+{
+}
+
+/*
+ * Interrupt context for detecting EP reset.
+ * This function should do:
+ *   1. check interrupt status to see if EP reset.
+ *   2. if EP reset, handle it.
+ */
+void handle_ep_rst_int(struct net_device *ndev)
+{
+}
+
+/*
+ * PCIe driver update resource in PCI configure space after EP reset.
+ * This function should be called in such case:
+ *   1. The PCI configure space can be accessed after EP reset;
+ *   2. Kernel does not support PCIe hot-plug.
+ */
+void qdpc_update_hw_bar(struct pci_dev *pdev, uint8_t index)
+{
+	struct pci_bus_region region;
+	uint32_t addr, new;
+	int offset = PCI_BASE_ADDRESS_0 + 4 * index;
+	struct resource *res = pdev->resource + index;
+
+	if (!res->flags)
+		return;
+
+	pcibios_resource_to_bus(pdev, &region, res);
+	new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
+	pci_read_config_dword(pdev, offset, &addr);
+
+	if (addr != new) {
+		printk("PCI region %d: reset to PCI address %#llx", index, (unsigned long long)region.start);
+		pci_write_config_dword(pdev, offset, new);
+		if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
+		    (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) {
+			printk(" (64bit address)");
+			new = region.start >> 16 >> 16;
+			pci_write_config_dword(pdev, offset + 4, new);
+		}
+		printk("\n");
+	}
+}
+
diff --git a/drivers/qtn/pcie2/host/intel/qdpc_platform.h b/drivers/qtn/pcie2/host/intel/qdpc_platform.h
new file mode 100644
index 0000000..4176b40
--- /dev/null
+++ b/drivers/qtn/pcie2/host/intel/qdpc_platform.h
@@ -0,0 +1,91 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __QDPC_PFDEP_H__
+#define __QDPC_PFDEP_H__
+
+#include <linux/version.h>
+
+#include <topaz_vnet.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+#define IOREMAP      ioremap_wc
+#else    /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
+#define IOREMAP      ioremap
+#endif
+
+/* IO functions */
+#ifndef readb
+#define readb(addr) (*(volatile unsigned char *) (addr))
+#endif
+
+#ifndef readw
+#define readw(addr) (*(volatile unsigned short *) (addr))
+#endif
+
+#ifndef readl
+#define readl(addr) (*(volatile unsigned int *) (addr))
+#endif
+
+#ifndef writeb
+#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
+#endif
+
+#ifndef writew
+#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
+#endif
+
+#ifndef writel
+#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
+#endif
+
+/* Bit number and mask of MSI in the interrupt mask and status register */
+#define	QDPC_INTR_MSI_BIT		0
+#define QDPC_INTR_MSI_MASK		(1 << QDPC_INTR_MSI_BIT)
+
+/* Enable MSI interrupt of PCIe */
+extern void enable_vmac_ints(struct vmac_priv *vmp);
+/* Disable MSI interrupt of PCIe */
+extern void disable_vmac_ints(struct vmac_priv *vmp);
+
+/* Enable interrupt for detecting EP reset */
+extern void enable_ep_rst_detection(struct net_device *ndev);
+/* Disable interrupt for detecting EP reset */
+extern void disable_ep_rst_detection(struct net_device *ndev);
+/* Interrupt context for detecting EP reset */
+extern void handle_ep_rst_int(struct net_device *ndev);
+
+/* PCIe driver update resource in PCI configure space after EP reset */
+extern void qdpc_update_hw_bar(struct pci_dev *pdev, uint8_t index);
+
+/* Allocated buffer size for a packet */
+#define SKB_BUF_SIZE		2048
+
+/* Transmit Queue Length */
+#define QDPC_TX_QUEUE_SIZE	180
+
+/* Receive Queue Length */
+#define QDPC_RX_QUEUE_SIZE	384
+
+/* Customer defined function	*/
+#define qdpc_platform_init()                  0
+#define qdpc_platform_exit()                  do { } while(0)
+
+#endif /* __QDPC_PFDEP_H__ */
+
diff --git a/drivers/qtn/pcie2/host/mipsr2/Makefile b/drivers/qtn/pcie2/host/mipsr2/Makefile
new file mode 100644
index 0000000..b6aa1f8
--- /dev/null
+++ b/drivers/qtn/pcie2/host/mipsr2/Makefile
@@ -0,0 +1,30 @@
+#
+# Makefile for mipsr2 platform
+#
+
+EXTRA_CFLAGS	+= -Wall		\
+		   -I$(src)		\
+		   -I$(src)/../../include \
+		   -I$(src)/../common	\
+		   -D__BIG_ENDIAN
+
+EXTRA_CFLAGS    += -DQTN_TX_SKBQ_SUPPORT -DQTN_WAKEQ_SUPPORT
+PWD	:= $(shell pwd)
+
+default: all
+
+COMMON_DIR	:= ../common
+qdpc-host-objs   := $(COMMON_DIR)/qdpc_init.o $(COMMON_DIR)/qdpc_pcie.o $(COMMON_DIR)/topaz_vnet.o qdpc_platform.o
+obj-m           :=  qdpc-host.o
+
+qdpc_host.o: $(qdpc-host-objs)
+	ld -r $^ -o $@
+
+all:
+	make -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
+
+
+clean:
+	rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions
+	rm -rf Module.markers  Module.symvers modules.order *~ $(qdpc-host-objs) *.o *.ko *.mod.o *.mod.c
+
diff --git a/drivers/qtn/pcie2/host/mipsr2/qdpc_platform.c b/drivers/qtn/pcie2/host/mipsr2/qdpc_platform.c
new file mode 100644
index 0000000..2d791fe
--- /dev/null
+++ b/drivers/qtn/pcie2/host/mipsr2/qdpc_platform.c
@@ -0,0 +1,74 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+/*
+ * Platform dependant implement. Customer needs to modify this file.
+ */
+
+#include <linux/interrupt.h>
+
+#include <qdpc_platform.h>
+#include <topaz_vnet.h>
+#include <qdpc_regs.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+/*
+ * Enable MSI interrupt of PCIe.
+ */
+void enable_vmac_ints(struct vmac_priv *vmp)
+{
+	volatile uint32_t *dma_wrd_imwr = QDPC_BAR_VADDR(vmp->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET);
+
+	writel(vmp->dma_msi_imwr, dma_wrd_imwr);
+}
+
+/*
+ * Disable MSI interrupt of PCIe.
+ */
+void disable_vmac_ints(struct vmac_priv *vmp)
+{
+	volatile uint32_t *dma_wrd_imwr = QDPC_BAR_VADDR(vmp->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET);
+	writel(vmp->dma_msi_dummy, dma_wrd_imwr);
+}
+
+
+/*
+ * Enable interrupt for detecting EP reset.
+ */
+void enable_ep_rst_detection(struct net_device *ndev)
+{
+}
+
+/*
+ * Disable interrupt for detecting EP reset.
+ */
+void disable_ep_rst_detection(struct net_device *ndev)
+{
+}
+
+/*
+ * Interrupt context for detecting EP reset.
+ * This function should do:
+ *   1. check interrupt status to see if EP reset.
+ *   2. if EP reset, handle it.
+ */
+void handle_ep_rst_int(struct net_device *ndev)
+{
+}
diff --git a/drivers/qtn/pcie2/host/mipsr2/qdpc_platform.h b/drivers/qtn/pcie2/host/mipsr2/qdpc_platform.h
new file mode 100644
index 0000000..b3f678b
--- /dev/null
+++ b/drivers/qtn/pcie2/host/mipsr2/qdpc_platform.h
@@ -0,0 +1,101 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+/*
+ * Platform dependant implement. Customer needs to modify this file.
+ */
+#ifndef __QDPC_PFDEP_H__
+#define __QDPC_PFDEP_H__
+
+#include <linux/version.h>
+
+#include <topaz_vnet.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+#define IOREMAP      ioremap_wc
+#else    /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
+#define IOREMAP      ioremap
+#endif
+
+/* IO functions */
+#ifndef readb
+#define readb(addr) (*(volatile unsigned char *) (addr))
+#endif
+
+#ifndef readw
+#define readw(addr) (*(volatile unsigned short *) (addr))
+#endif
+
+#ifndef readl
+#define readl(addr) (*(volatile unsigned int *) (addr))
+#endif
+
+#ifndef writeb
+#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
+#endif
+
+#ifndef writew
+#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
+#endif
+
+#ifndef writel
+#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
+#endif
+
+#ifndef virt_to_bus
+#define virt_to_bus virt_to_phys
+#endif
+
+/* Bit number and mask of MSI in the interrupt mask and status register */
+#define	QDPC_INTR_MSI_BIT		0
+#define QDPC_INTR_MSI_MASK		(1 << QDPC_INTR_MSI_BIT)
+
+/* Enable MSI interrupt of PCIe */
+extern void enable_vmac_ints(struct vmac_priv *vmp);
+/* Disable MSI interrupt of PCIe */
+extern void disable_vmac_ints(struct vmac_priv *vmp);
+
+/* Enable interrupt for detecting EP reset */
+extern void enable_ep_rst_detection(struct net_device *ndev);
+/* Disable interrupt for detecting EP reset */
+extern void disable_ep_rst_detection(struct net_device *ndev);
+/* Interrupt context for detecting EP reset */
+extern void handle_ep_rst_int(struct net_device *ndev);
+
+/* Allocated buffer size for a packet */
+#define SKB_BUF_SIZE		2048
+
+/* Transmit Queue Length */
+#define QDPC_TX_QUEUE_SIZE	180
+
+/* Receive Queue Length */
+#define QDPC_RX_QUEUE_SIZE	384
+
+/* Customer defined function	*/
+#define qdpc_platform_init()                  0
+#define qdpc_platform_exit()                  do { } while(0)
+
+/* PCIe driver update resource in PCI configure space after EP reset */
+#define qdpc_update_hw_bar(pdev, index)       do { } while(0)
+
+/* TODO: If MSI IRQ-loss issue can be fixed, remove macro below */
+/*#define QDPC_PLATFORM_IRQ_FIXUP*/
+
+#endif /* __QDPC_PFDEP_H__ */
+
diff --git a/drivers/qtn/pcie2/host/quantenna/Makefile b/drivers/qtn/pcie2/host/quantenna/Makefile
new file mode 100644
index 0000000..82815c8
--- /dev/null
+++ b/drivers/qtn/pcie2/host/quantenna/Makefile
@@ -0,0 +1,70 @@
+#
+# Makefile for Quantenna RC paltform
+#
+#
+
+EXTRA_CFLAGS	+= -Wall -Werror -Wno-unknown-pragmas \
+		   -I$(src)		\
+		   -I$(src)/../../include \
+		   -I../drivers/include/shared \
+		   -I../drivers/include/kernel \
+		   -I$(src)/../common
+
+EXTRA_CFLAGS    += -mlong-calls -DQTN_WAKEQ_SUPPORT
+
+ifeq ($(board_config),topaz_host_realign_config)
+EXTRA_CFLAGS    += -DQTN_BYTEALIGN
+endif
+
+ ifneq ($(CONFIG_HOTPLUG_PCI_PCIE),)
+ EXTRA_CFLAGS    += -DPCIE_HOTPLUG_SUPPORTED
+ endif
+
+ifeq (${PCIE_HOST_CRUMBS},1)
+EXTRA_CFLAGS += -finstrument-functions
+endif
+
+#EXTRA_CFLAGS	+= -DDEBUG
+
+ifneq ($(KERNELRELEASE),)
+COMMON_DIR	:= ../common
+TQE_DIR_TO_WORK := ../../tqe
+TQE_DIR_TO_LINUX:= ../drivers/pcie2/tqe
+EXTRA_CFLAGS += -I.
+ifeq ($(CONFIG_TOPAZ_DBDC_HOST), y)
+qdpc-host-objs	+= $(if $(wildcard $(TQE_DIR_TO_LINUX)), $(TQE_DIR_TO_WORK)/topaz_qfp.o)
+else
+qdpc-host-objs	+= $(COMMON_DIR)/qdpc_init.o $(COMMON_DIR)/qdpc_pcie.o $(COMMON_DIR)/topaz_vnet.o qdpc_platform.o
+endif
+
+qdpc-host-objs  += $(if $(wildcard $(TQE_DIR_TO_LINUX)), $(TQE_DIR_TO_WORK)/topaz_pcie_tqe.o)
+qdpc-host-objs  += qdpc_dspload.o
+
+obj-m           :=  qdpc-host.o
+
+else
+
+KERNELDIR	?= ../../../../linux
+INSTALL		= INSTALL_MOD_PATH=../linux/modules
+CROSS		= ARCH=arc CROSS_COMPILE=/usr/local/ARC/gcc/bin/arc-linux-uclibc-
+PWD		:= $(shell pwd)
+
+default:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
+
+install:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) $(INSTALL) M=$(PWD) modules_install
+
+endif
+
+clean:
+	rm -rf *.o  *~  core  .depend  .*.cmd  *.ko  *.mod.c  .tmp_versions  Module.symvers  modules.order
+	rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions $(COMMON_DIR)/*.o
+
+depend .depend dep:
+	$(CC) $(CFLAGS) -M *.c > .depend
+
+ifeq (.depend,$(wildcard .depend))
+include .depend
+endif
+
diff --git a/drivers/qtn/pcie2/host/quantenna/qdpc_dspload.c b/drivers/qtn/pcie2/host/quantenna/qdpc_dspload.c
new file mode 100644
index 0000000..f8152f2
--- /dev/null
+++ b/drivers/qtn/pcie2/host/quantenna/qdpc_dspload.c
@@ -0,0 +1,177 @@
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/syscalls.h>
+#include <asm/unistd.h>
+#include <asm/uaccess.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <asm/io.h>
+#include <qtn/registers.h>
+#include <qtn/shared_params.h>
+#include <qtn/topaz_fwt_sw.h>
+#include <qtn/qtn_fw_info.h>
+#include "qdpc_dspload.h"
+
+static inline unsigned long
+qdpc_dsp_to_host_addr(unsigned long dsp_addr)
+{
+        void *ret = bus_to_virt(dsp_addr);
+        if (RUBY_BAD_VIRT_ADDR == ret) {
+                panic("Converting out of range DSP address 0x%lx to host address\n", dsp_addr);
+        }
+        return virt_to_phys(ret);
+}
+
+static char* qdpc_dsp_read(struct file *file, int buflen)
+{
+	char *p = NULL;
+
+	if (!file)
+		return NULL;
+
+	p = kmalloc(buflen*sizeof(unsigned char), GFP_ATOMIC);
+	if (p)
+		file->f_op->read(file, p, buflen, &file->f_pos);
+
+	return p;
+}
+
+static int qdpc_dsp_install_firmware(struct file *file, u32 *dsp_start_addr)
+{
+	Elf32_Ehdr *ehdr;
+	Elf32_Phdr *phdr;
+	Elf32_Phdr *phdr_o;
+	u8* vaddr;
+	int i, buflen;
+	char *pdata = NULL;
+	int e_phnum = 0;
+	unsigned long p_muc;
+	int match = 0;
+
+	buflen = sizeof(Elf32_Ehdr);
+	pdata = qdpc_dsp_read(file, buflen);
+	if (!pdata) {
+		return -1;
+	}
+	ehdr = (Elf32_Ehdr *)pdata;
+	e_phnum = ehdr->e_phnum;
+	kfree(pdata);
+
+	buflen = e_phnum * sizeof(Elf32_Phdr);
+	pdata = qdpc_dsp_read(file, buflen);
+	if (!pdata) {
+		return -1;
+	}
+	phdr = (Elf32_Phdr *)pdata;
+	phdr_o = (Elf32_Phdr *)pdata;
+
+	for(i = 0; i < e_phnum; i++, phdr++) {
+		pdata = qdpc_dsp_read(file, phdr->p_filesz);
+		if (!pdata) {
+			return -1;
+		}
+
+		if (FW_INFO_SEGMENT_FOUND(phdr->p_vaddr, phdr->p_filesz, pdata)) {
+			FW_INFO_CHECK_DATA((struct qtn_fw_info *)pdata, match, printk);
+			kfree(pdata);
+			continue;
+		}
+
+		/* Skip blocks for DSP X/Y memory */
+		if ((phdr->p_vaddr >= RUBY_DSP_XYMEM_BEGIN) && (phdr->p_vaddr <= RUBY_DSP_XYMEM_END)) {
+			kfree(pdata);
+			continue;
+		}
+
+		p_muc = qdpc_dsp_to_host_addr(phdr->p_vaddr);
+		printk("p_vaddr in ELF header is %p, "
+			"remapping to 0x%lx\n", (void *)phdr->p_vaddr, p_muc);
+
+		vaddr = ioremap_nocache(p_muc, phdr->p_memsz);
+		memcpy(vaddr, pdata, phdr->p_filesz);
+		memset(vaddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
+
+		iounmap(vaddr);
+		kfree(pdata);
+	}
+
+	if (phdr_o)
+		kfree(phdr_o);
+
+	if (!match)
+		panic("DSP firmware version check failed\n");
+
+	*dsp_start_addr = ehdr->e_entry;
+
+	return(0);
+}
+
+static void hal_dsp_start(u32 dsp_start_addr)
+{
+#ifdef CONFIG_ARCH_ARC
+        /* Check that we can start this address */
+        if (dsp_start_addr & ((1 << RUBY_SYS_CTL_DSP_REMAP_SHIFT) - 1)) {
+                panic("DSP address 0x%x cannot be used as entry point\n", (unsigned)dsp_start_addr);
+        }
+        /* Tells DSP from which address start execution */
+        writel(RUBY_SYS_CTL_DSP_REMAP_VAL(dsp_start_addr), RUBY_SYS_CTL_DSP_REMAP);
+#else
+        /* Swap upper and lower half words for DSP instruction */
+        dsp_start_addr = ((dsp_start_addr >> 16) & 0xFFFF) | (dsp_start_addr << 16);
+
+        /* Push the jump instr and location into the mbx */
+        *(volatile u32*)IO_ADDRESS(UMS_REGS_MB + UMS_MBX_DSP_PUSH)
+                = DSP_JUMP_INSTR_SWAP;
+        *(volatile u32*)IO_ADDRESS(UMS_REGS_MB + UMS_MBX_DSP_PUSH)
+                = dsp_start_addr;
+#endif
+}
+
+void hal_enable_dsp(void)
+{
+#ifdef CONFIG_ARCH_ARC
+        const unsigned long reset = RUBY_SYS_CTL_RESET_DSP_ALL;
+
+        qtn_txbf_lhost_init();
+
+        writel(reset, RUBY_SYS_CTL_CPU_VEC_MASK);
+        writel(reset, RUBY_SYS_CTL_CPU_VEC);
+        writel(0, RUBY_SYS_CTL_CPU_VEC_MASK);
+#else
+        /* Bring the DSP out of reset */
+        *(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR_MASK) = DSP_RESET;
+        *(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR) = DSP_RESET;
+#endif
+}
+
+int qdpc_dsp_open(void)
+{
+	struct	file *file = NULL;
+	mm_segment_t fs;
+	u32 dsp_start_addr = 0;
+
+	file = filp_open(QDCP_DSP_FILE_NAME, O_RDONLY, 0);
+	if(IS_ERR(file)) {
+		printk("error occured while opening file %s, exiting...\n", QDCP_DSP_FILE_NAME);
+		return -1;
+	}
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+	qdpc_dsp_install_firmware(file, &dsp_start_addr);
+
+        hal_dsp_start(dsp_start_addr);
+        hal_enable_dsp();
+
+	filp_close(file, NULL);
+	set_fs(fs);
+
+	return 0;
+}
diff --git a/drivers/qtn/pcie2/host/quantenna/qdpc_dspload.h b/drivers/qtn/pcie2/host/quantenna/qdpc_dspload.h
new file mode 100644
index 0000000..883fb8c
--- /dev/null
+++ b/drivers/qtn/pcie2/host/quantenna/qdpc_dspload.h
@@ -0,0 +1,62 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __QDPC_DSP_H__
+#define __QDPC_DSP_H__
+
+#include <linux/version.h>
+
+#include <topaz_vnet.h>
+
+#include <qtn/topaz_tqe_cpuif.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+#define IOREMAP     ioremap_nocache
+#else   /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
+#define IOREMAP     ioremap
+#endif
+
+#define QTN_TXBF_MUC_TO_DSP_MBOX_INT            (0)
+#define QTN_TXBF_DSP_TO_HOST_MBOX_INT           (0)
+
+#define QDCP_DSP_FILE_NAME "/etc/firmware/rdsp_driver.0.bin"
+
+RUBY_INLINE void
+qtn_txbf_lhost_init(void)
+{
+#if CONFIG_USE_SPI1_FOR_IPC
+        /* Initialize SPI controller, keep IRQ disabled */
+        qtn_mproc_sync_mem_write(RUBY_SPI1_SPCR,
+                RUBY_SPI1_SPCR_SPE | RUBY_SPI1_SPCR_MSTR |
+                RUBY_SPI1_SPCR_SPR(0));
+        qtn_mproc_sync_mem_write(RUBY_SPI1_SPER,
+                RUBY_SPI1_SPER_ESPR(0));
+#else
+        /* Ack, and keep IRQ disabled */
+        qtn_mproc_sync_mem_write(RUBY_SYS_CTL_D2L_INT,
+                qtn_mproc_sync_mem_read(RUBY_SYS_CTL_D2L_INT));
+        qtn_mproc_sync_mem_write(RUBY_SYS_CTL_D2L_INT_MASK,
+                ~(1 << QTN_TXBF_DSP_TO_HOST_MBOX_INT));
+#endif
+}
+
+extern int qdpc_dsp_open(void);
+
+#endif /* __QDPC_PFDEP_H__ */
+
diff --git a/drivers/qtn/pcie2/host/quantenna/qdpc_platform.c b/drivers/qtn/pcie2/host/quantenna/qdpc_platform.c
new file mode 100644
index 0000000..25aff26
--- /dev/null
+++ b/drivers/qtn/pcie2/host/quantenna/qdpc_platform.c
@@ -0,0 +1,360 @@
+/**
+ * Copyright (c) 2012-2013 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+/*
+ * Platform dependant implement. Customer needs to modify this file.
+ */
+#include <qdpc_platform.h>
+#include <topaz_vnet.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <asm/gpio.h>
+#include <../drivers/pcie2/host/common/qdpc_regs.h>
+
+/* WPS button event reported to user space process */
+typedef enum {
+	MODE_LED_INIT = 0,
+	MODE_LED_EXIT,
+	MODE_LED_FLASH,
+} MODE_LED_OPS;
+#define MODE_LED_GPIO 6
+
+typedef enum {
+	WPS_BUTTON_NONE_EVENT = 0,
+	WPS_BUTTON_WIRELESS_EVENT,
+	WPS_BUTTON_DBGDUMP_EVENT,
+	WPS_BUTTON_INVALIDE_EVENT
+} WPS_Button_Event;
+#define WPS_BUTTON_VALID(e) (WPS_BUTTON_NONE_EVENT < (e) && (e) < WPS_BUTTON_INVALIDE_EVENT)
+
+#define WPS_BUTTON_GPIO 4
+#define QDPC_WPS_BUTTON_ACTIVE_LEVEL 0
+#define WPS_BUTTON_TIMER_INTERVAL ((3 * HZ) / 10) /* timer interval */
+
+/*
+* Queue of processes who access wps_button file
+*/
+DECLARE_WAIT_QUEUE_HEAD(WPS_Button_WaitQ);
+
+static WPS_Button_Event wps_button_event = WPS_BUTTON_NONE_EVENT;
+struct timer_list qdpc_wps_button_timer;
+static u32 qdpc_wps_button_last_level = ~QDPC_WPS_BUTTON_ACTIVE_LEVEL;
+static u32 qdpc_wps_button_down_jiffies = 0; /* records the jiffies when button down, back to 0 after button released */
+
+static int vmac_rst_rc_en = 1;
+struct work_struct detect_ep_rst_work;
+
+void enable_vmac_ints(struct vmac_priv *vmp)
+{
+	uint32_t temp = readl(QDPC_RC_SYS_CTL_PCIE_INT_MASK);
+
+	if(vmp->msi_enabled) {
+		temp |= BIT(10); /* MSI */
+	} else {
+		temp |= BIT(11); /* Legacy INTx */
+	}
+	writel(temp, QDPC_RC_SYS_CTL_PCIE_INT_MASK);
+}
+
+void disable_vmac_ints(struct vmac_priv *vmp)
+{
+	uint32_t temp = readl(QDPC_RC_SYS_CTL_PCIE_INT_MASK);
+
+	if(vmp->msi_enabled) {
+		temp &= ~BIT(10); /* MSI */
+	} else {
+		temp &= ~BIT(11); /* Legacy INTx */
+	}
+	writel(temp, QDPC_RC_SYS_CTL_PCIE_INT_MASK);
+}
+
+static ssize_t vmac_reset_get(struct device *dev, struct device_attribute *attr, char *buf)
+{
+        return sprintf(buf, "%u\n", vmac_rst_rc_en);
+}
+
+static ssize_t vmac_reset_set(struct device *dev,
+        struct device_attribute *attr, const char *buf, size_t count)
+{
+        uint8_t cmd;
+
+        cmd = (uint8_t)simple_strtoul(buf, NULL, 10);
+	if (cmd == 0)
+		vmac_rst_rc_en = 0;
+	else
+		vmac_rst_rc_en = 1;
+
+        return count;
+}
+DEVICE_ATTR(enable_reset, S_IWUSR | S_IRUSR, vmac_reset_get, vmac_reset_set);
+
+static void detect_ep_rst(struct work_struct *data)
+{
+	kernel_restart(NULL);
+}
+
+void enable_ep_rst_detection(struct net_device *ndev)
+{
+        uint32_t temp = readl(QDPC_RC_SYS_CTL_PCIE_INT_MASK);
+
+        temp |= QDPC_INTR_EP_RST_MASK;
+        writel(temp, QDPC_RC_SYS_CTL_PCIE_INT_MASK);
+
+	device_create_file(&ndev->dev, &dev_attr_enable_reset);
+	INIT_WORK(&detect_ep_rst_work, detect_ep_rst);
+}
+
+void disable_ep_rst_detection(struct net_device *ndev)
+{
+        uint32_t temp = readl(QDPC_RC_SYS_CTL_PCIE_INT_MASK);
+
+        temp &= ~QDPC_INTR_EP_RST_MASK;
+        writel(temp, QDPC_RC_SYS_CTL_PCIE_INT_MASK);
+
+	device_remove_file(&ndev->dev, &dev_attr_enable_reset);
+}
+
+void handle_ep_rst_int(struct net_device *ndev)
+{
+	uint32_t status = readl(QDPC_RC_SYS_CTL_PCIE_INT_STAT);
+
+	if ((status & QDPC_INTR_EP_RST_MASK) == 0)
+		return;
+
+	/* Clear pending interrupt */
+	writel(QDPC_INTR_EP_RST_MASK, QDPC_RC_SYS_CTL_PCIE_INT_STAT);
+
+	printk("Detected reset of Endpoint\n");
+
+	if (vmac_rst_rc_en == 1) {
+		netif_stop_queue(ndev);
+		schedule_work(&detect_ep_rst_work);
+	}
+}
+
+static void qdpc_mode_led(struct net_device *ndev, MODE_LED_OPS op)
+{
+	static int inited = 0;
+	static u32 led_bk = 0;
+
+	switch(op) {
+	case MODE_LED_INIT:
+		if (gpio_request(MODE_LED_GPIO, ndev->name) < 0)
+			printk(KERN_INFO "%s: Failed to request GPIO%d for GPIO reset\n",
+			       ndev->name, MODE_LED_GPIO);
+
+		led_bk = gpio_get_value(MODE_LED_GPIO);
+		gpio_direction_output(MODE_LED_GPIO, led_bk);
+		inited = 1;
+
+		break;
+
+	case MODE_LED_EXIT:
+		if (inited) {
+			gpio_set_value(MODE_LED_GPIO, led_bk);
+			gpio_free(MODE_LED_GPIO);
+			inited = 0;
+		}
+
+		break;
+
+	case MODE_LED_FLASH:
+		if (inited)
+			gpio_set_value(MODE_LED_GPIO, ~gpio_get_value(MODE_LED_GPIO) & 0x01);
+
+		break;
+	}
+}
+
+
+static void qdpc_wps_button_event_wakeup(struct net_device *ndev, WPS_Button_Event event)
+{
+	struct vmac_priv *priv = netdev_priv(ndev);
+
+	if (!WPS_BUTTON_VALID(event))
+		return;
+
+	/* notify local watcher */
+	wps_button_event = event;
+	wake_up_all(&WPS_Button_WaitQ);
+
+	/* notify ep the offline dbg info, if ep is ready*/
+	if (priv->ep_ready && event == WPS_BUTTON_DBGDUMP_EVENT)
+		writel(TOPAZ_SET_INT(IPC_OFFLINE_DBG), priv->ep_ipc_reg);
+}
+
+static ssize_t qdpc_wps_button_read(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buff)
+{
+	int i = 0;
+
+	/* As usual, this read is always blocked untill wps button is pressed
+	 * so increase the module reference to prevent it being unload during
+	 * blocking read
+	 */
+	if (!try_module_get(THIS_MODULE))
+		return 0;
+
+	/* wait for valid WPS button event */
+	wait_event_interruptible(WPS_Button_WaitQ, WPS_BUTTON_VALID(wps_button_event));
+
+	/* read back empty string in signal wakeup case */
+	for (i = 0; i < _NSIG_WORDS; i++) {
+		if (current->pending.signal.sig[i] & ~current->blocked.sig[i]) {
+			module_put(THIS_MODULE);
+			return 0;
+		}
+	}
+
+	sprintf(buff, "%d\n", wps_button_event);
+
+	/* after new event been handled, reset to none event */
+	wps_button_event = WPS_BUTTON_NONE_EVENT;
+
+	module_put(THIS_MODULE);
+
+	return strlen(buff);
+}
+
+static ssize_t qdpc_wps_button_write(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf,
+				     size_t count)
+{
+	int input = 0;
+	struct net_device *ndev;
+
+	input = simple_strtoul(buf, NULL, 10);
+
+	ndev = (struct net_device*)dev_get_drvdata(dev);
+
+	switch (input) {
+	case 1:
+		qdpc_mode_led(ndev, MODE_LED_INIT);
+
+		qdpc_mode_led(ndev, MODE_LED_FLASH);
+		msleep(300);
+		qdpc_mode_led(ndev, MODE_LED_FLASH);
+		msleep(300);
+		qdpc_mode_led(ndev, MODE_LED_FLASH);
+		msleep(300);
+
+		qdpc_mode_led(ndev, MODE_LED_EXIT);
+
+		break;
+	default:
+		printk(KERN_INFO "WPS button: unknow cmd (%d)\n", input);
+	}
+
+	return count;
+}
+
+DEVICE_ATTR(wps_button, S_IWUSR | S_IRUSR, qdpc_wps_button_read, qdpc_wps_button_write); /* dev_attr_wps_button */
+
+static void qdpc_wps_button_device_file_create(struct net_device *ndev)
+{
+	device_create_file(&(ndev->dev), &dev_attr_wps_button);
+}
+
+
+static void qdpc_wps_polling_button_notifier(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	u32 current_level;
+
+	current_level = gpio_get_value(WPS_BUTTON_GPIO);
+
+	/* records the falling edge jiffies */
+	if ((current_level == QDPC_WPS_BUTTON_ACTIVE_LEVEL)
+	    && (qdpc_wps_button_last_level != QDPC_WPS_BUTTON_ACTIVE_LEVEL)) {
+
+		qdpc_mode_led(dev, MODE_LED_INIT);
+		qdpc_wps_button_down_jiffies = jiffies;
+	}
+
+	/* at rising edge */
+	if ((current_level != QDPC_WPS_BUTTON_ACTIVE_LEVEL)
+	    && (qdpc_wps_button_last_level == QDPC_WPS_BUTTON_ACTIVE_LEVEL)) {
+
+		/* WPS button event is rising triggered -- when button
+		 * being changed from active to inactive level.
+		 *
+		 * Different press time trigger different event
+		 */
+		if ((jiffies - qdpc_wps_button_down_jiffies) >= 10 * HZ) {
+
+			/* wakeup the event waiting processes */
+			qdpc_wps_button_event_wakeup(dev, WPS_BUTTON_DBGDUMP_EVENT);
+
+			printk(KERN_INFO "WPS: button long press polling at %u\n", (unsigned int) jiffies);
+		} else {
+			/* wakeup the event waiting processes */
+			qdpc_wps_button_event_wakeup(dev, WPS_BUTTON_WIRELESS_EVENT);
+
+			printk(KERN_INFO "WPS: button short press polling at %u\n", (unsigned int) jiffies);
+		}
+
+		/* back to 0 after rising edge */
+		qdpc_wps_button_down_jiffies = 0;
+		qdpc_mode_led(dev, MODE_LED_EXIT);
+	}
+
+	/* after button down more than 10s, begin change the mode led's state to notify user to release button */
+	if (qdpc_wps_button_down_jiffies != 0 && ((jiffies - qdpc_wps_button_down_jiffies) >= 10 * HZ)) {
+		qdpc_mode_led(dev, MODE_LED_FLASH);
+	}
+
+	/* Restart the timer */
+	mod_timer(&qdpc_wps_button_timer, jiffies + WPS_BUTTON_TIMER_INTERVAL);
+
+	qdpc_wps_button_last_level = current_level;
+
+	return;
+}
+
+int qdpc_wps_button_init(struct net_device *dev)
+{
+	/*
+	 * Set up timer to poll the button.
+	 * Request the GPIO resource and export it for userspace
+	 */
+	if (gpio_request(WPS_BUTTON_GPIO, dev->name) < 0)
+		printk(KERN_INFO "%s: Failed to request GPIO%d for GPIO reset\n",
+		       dev->name, WPS_BUTTON_GPIO);
+
+	init_timer(&qdpc_wps_button_timer);
+	qdpc_wps_button_timer.function = qdpc_wps_polling_button_notifier;
+	qdpc_wps_button_timer.data = (unsigned long)dev;
+	qdpc_wps_button_timer.expires = jiffies + WPS_BUTTON_TIMER_INTERVAL;
+	add_timer(&qdpc_wps_button_timer);
+
+	/* creeate the device file for user space use */
+	qdpc_wps_button_device_file_create(dev);
+
+	return 0;
+}
+
+void qdpc_wps_button_exit(void)
+{
+	del_timer(&qdpc_wps_button_timer);
+}
+
diff --git a/drivers/qtn/pcie2/host/quantenna/qdpc_platform.h b/drivers/qtn/pcie2/host/quantenna/qdpc_platform.h
new file mode 100644
index 0000000..0598b58
--- /dev/null
+++ b/drivers/qtn/pcie2/host/quantenna/qdpc_platform.h
@@ -0,0 +1,110 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __QDPC_PFDEP_H__
+#define __QDPC_PFDEP_H__
+
+#include <linux/version.h>
+
+#include <topaz_vnet.h>
+
+#include <qtn/topaz_tqe_cpuif.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+#define IOREMAP     ioremap_nocache
+#else   /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
+#define IOREMAP     ioremap
+#endif
+
+/* IO functions */
+#ifndef readb
+#define readb(addr) (*(volatile unsigned char *) (addr))
+#endif
+
+#ifndef readw
+#define readw(addr) (*(volatile unsigned short *) (addr))
+#endif
+
+#ifndef readl
+#define readl(addr) (*(volatile unsigned int *) (addr))
+#endif
+
+#ifndef writeb
+#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
+#endif
+
+#ifndef writew
+#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
+#endif
+
+#ifndef writel
+#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
+#endif
+
+/*
+ * Interrupt
+ */
+/* Interrupt Mask and Status Reigster */
+#define QDPC_RC_SYS_CTL_BASE		0xe0000000
+#define QDPC_RC_SYS_CTL_PCIE_INT_MASK	(QDPC_RC_SYS_CTL_BASE + 0xC0)
+#define QDPC_RC_SYS_CTL_PCIE_INT_STAT	(QDPC_RC_SYS_CTL_BASE + 0x17C)
+
+/* Bit number and mask of MSI in the interrupt mask and status register */
+#define	QDPC_INTR_MSI_BIT		12
+#define QDPC_INTR_MSI_MASK		(1 << QDPC_INTR_MSI_BIT)
+
+/* Bit number and mask of EP-reset-detect Interrupt in the mask and status register */
+#define QDPC_INTR_EP_RST_BIT		3
+#define QDPC_INTR_EP_RST_MASK		(1 << QDPC_INTR_EP_RST_BIT)
+
+extern void enable_vmac_ints(struct vmac_priv *vmp);
+extern void disable_vmac_ints(struct vmac_priv *vmp);
+
+extern void enable_ep_rst_detection(struct net_device *ndev);
+extern void disable_ep_rst_detection(struct net_device *ndev);
+extern void handle_ep_rst_int(struct net_device *ndev);
+
+extern int qdpc_wps_button_init(struct net_device *dev);
+extern void qdpc_wps_button_exit(void);
+
+/* Allocated buffer size for a packet */
+#define SKB_BUF_SIZE		RX_BUF_SIZE
+
+/* Transmit Queue Length */
+#if defined(QTN_BYTEALIGN)
+#define QDPC_TX_QUEUE_SIZE	180
+#else
+#define QDPC_TX_QUEUE_SIZE	200
+#endif
+
+/* Receive Queue Length */
+#define QDPC_RX_QUEUE_SIZE	384
+
+/* SDP requires packets show up at Lhost */
+#define QDPC_PLATFORM_IFPORT	TOPAZ_TQE_LHOST_PORT
+
+/* Customer defined function	*/
+#define qdpc_platform_init()                  0
+#define qdpc_platform_exit()                  do { } while(0)
+
+/* PCIe driver update resource in PCI configure space after EP reset */
+#define qdpc_update_hw_bar(pdev, index)       do { } while(0)
+
+#endif /* __QDPC_PFDEP_H__ */
+
diff --git a/drivers/qtn/pcie2/host/st/Makefile b/drivers/qtn/pcie2/host/st/Makefile
new file mode 100644
index 0000000..008ccac
--- /dev/null
+++ b/drivers/qtn/pcie2/host/st/Makefile
@@ -0,0 +1,28 @@
+#
+# Makefile for ST platform
+#
+
+EXTRA_CFLAGS	+= -Wall		\
+		   -I$(src)		\
+		   -I$(src)/../../include \
+		   -I$(src)/../common
+
+EXTRA_CFLAGS	+= -DRC_TXDONE_TIMER -DQTN_WAKEQ_SUPPORT
+
+default: all
+
+COMMON_DIR	:= ../common
+qdpc-host-objs   := $(COMMON_DIR)/qdpc_init.o $(COMMON_DIR)/qdpc_pcie.o $(COMMON_DIR)/topaz_vnet.o qdpc_platform.o
+obj-m           :=  qdpc-host.o
+
+qdpc_host.o: $(qdpc-host-objs)
+	ld -r $^ -o $@
+
+all:
+	make -C /lib/modules/$(KVERSION)/build M=$(PWD) modules
+
+clean:
+	make -C /lib/modules/$(KVERSION)/build M=$(PWD) clean
+	rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions
+	rm -rf Module.markers  modules.order *~ $(qdpc-host-objs)
+
diff --git a/drivers/qtn/pcie2/host/st/qdpc_platform.c b/drivers/qtn/pcie2/host/st/qdpc_platform.c
new file mode 100644
index 0000000..85fd1b3
--- /dev/null
+++ b/drivers/qtn/pcie2/host/st/qdpc_platform.c
@@ -0,0 +1,72 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+/*
+ * Platform dependant implement. Customer needs to modify this file.
+ */
+
+#include <linux/interrupt.h>
+
+#include <qdpc_platform.h>
+#include <topaz_vnet.h>
+#include <qdpc_regs.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+/*
+ * Enable MSI interrupt of PCIe.
+ */
+void enable_vmac_ints(struct vmac_priv *vmp)
+{
+	volatile uint32_t *dma_wrd_imwr = QDPC_BAR_VADDR(vmp->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET);
+	writel(vmp->dma_msi_imwr, dma_wrd_imwr);
+}
+
+/*
+ * Disable MSI interrupt of PCIe.
+ */
+void disable_vmac_ints(struct vmac_priv *vmp)
+{
+	volatile uint32_t *dma_wrd_imwr = QDPC_BAR_VADDR(vmp->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET);
+	writel(vmp->dma_msi_dummy, dma_wrd_imwr);
+}
+
+/*
+ * Enable interrupt for detecting EP reset.
+ */
+void enable_ep_rst_detection(struct net_device *ndev)
+{
+}
+
+/*
+ * Disable interrupt for detecting EP reset.
+ */
+void disable_ep_rst_detection(struct net_device *ndev)
+{
+}
+
+/*
+ * Interrupt context for detecting EP reset.
+ * This function should do:
+ *   1. check interrupt status to see if EP reset.
+ *   2. if EP reset, handle it.
+ */
+void handle_ep_rst_int(struct net_device *ndev)
+{
+}
diff --git a/drivers/qtn/pcie2/host/st/qdpc_platform.h b/drivers/qtn/pcie2/host/st/qdpc_platform.h
new file mode 100644
index 0000000..c0cc77b
--- /dev/null
+++ b/drivers/qtn/pcie2/host/st/qdpc_platform.h
@@ -0,0 +1,101 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+/*
+ * Platform dependant implement. Customer needs to modify this file.
+ */
+#ifndef __QDPC_PFDEP_H__
+#define __QDPC_PFDEP_H__
+
+#include <linux/version.h>
+
+#include <topaz_vnet.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+#define IOREMAP      ioremap_wc
+#else    /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
+#define IOREMAP      ioremap
+#endif
+
+/* IO functions */
+#ifndef readb
+#define readb(addr) (*(volatile unsigned char *) (addr))
+#endif
+
+#ifndef readw
+#define readw(addr) (*(volatile unsigned short *) (addr))
+#endif
+
+#ifndef readl
+#define readl(addr) (*(volatile unsigned int *) (addr))
+#endif
+
+#ifndef writeb
+#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
+#endif
+
+#ifndef writew
+#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
+#endif
+
+#ifndef writel
+#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
+#endif
+
+#ifndef virt_to_bus
+#define virt_to_bus virt_to_phys
+#endif
+
+/* Bit number and mask of MSI in the interrupt mask and status register */
+#define	QDPC_INTR_MSI_BIT		0
+#define QDPC_INTR_MSI_MASK		(1 << QDPC_INTR_MSI_BIT)
+
+/* Enable MSI interrupt of PCIe */
+extern void enable_vmac_ints(struct vmac_priv *vmp);
+/* Disable MSI interrupt of PCIe */
+extern void disable_vmac_ints(struct vmac_priv *vmp);
+
+/* Enable interrupt for detecting EP reset */
+extern void enable_ep_rst_detection(struct net_device *ndev);
+/* Disable interrupt for detecting EP reset */
+extern void disable_ep_rst_detection(struct net_device *ndev);
+/* Interrupt context for detecting EP reset */
+extern void handle_ep_rst_int(struct net_device *ndev);
+
+/* Allocated buffer size for a packet */
+#define SKB_BUF_SIZE		2048
+
+/* Transmit Queue Length */
+#define QDPC_TX_QUEUE_SIZE	180
+
+/* Receive Queue Length */
+#define QDPC_RX_QUEUE_SIZE	384
+
+/* Customer defined function	*/
+#define qdpc_platform_init()                  0
+#define qdpc_platform_exit()                  do { } while(0)
+
+/* PCIe driver update resource in PCI configure space after EP reset */
+#define qdpc_update_hw_bar(pdev, index)       do { } while(0)
+
+/* TODO: If IRQ-loss issue can be fixed, remove macro below */
+#define QDPC_PLATFORM_IRQ_FIXUP
+
+#endif /* __QDPC_PFDEP_H__ */
+
diff --git a/drivers/qtn/pcie2/include/qdpc_config.h b/drivers/qtn/pcie2/include/qdpc_config.h
new file mode 100644
index 0000000..ed14e77
--- /dev/null
+++ b/drivers/qtn/pcie2/include/qdpc_config.h
@@ -0,0 +1,66 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __QDPC_CONFIG_H__
+#define __QDPC_CONFIG_H__
+
+#include "ruby_pcie_bda.h"
+#define QDPC_MAC_ADDR_SIZE	6
+
+/*
+ * Using Type/Length field for checking if data packet or
+ * netlink packet(call_qcsapi remote interface).
+ * Using 0x0601 as netlink packet type and MAC magic number(Quantenna OUI)
+ * to distinguish netlink packet
+ */
+#define QDPC_APP_NETLINK_TYPE 0x0601
+#define QDPC_NETLINK_DST_MAGIC "\x00\x26\x86\x00\x00\x00"
+#define QDPC_NETLINK_SRC_MAGIC "\x00\x26\x86\x00\x00\x00"
+
+#define QDPC_RPC_TYPE_MASK	0x0f00
+#define QDPC_RPC_TYPE_STRCALL	0x0100
+#define QDPC_RPC_TYPE_LIBCALL	0x0200
+#define QDPC_RPC_TYPE_FRAG_MASK	0x8000
+#define QDPC_RPC_TYPE_FRAG	0x8000
+
+ /* Used on RC side */
+#define QDPC_NETLINK_RPC_PCI_CLNT	31
+#define QDPC_NL_TYPE_CLNT_STR_REG	(QDPC_RPC_TYPE_STRCALL | 0x0010)
+#define QDPC_NL_TYPE_CLNT_STR_REQ	(QDPC_RPC_TYPE_STRCALL | 0x0011)
+#define QDPC_NL_TYPE_CLNT_LIB_REG	(QDPC_RPC_TYPE_LIBCALL | 0x0010)
+#define QDPC_NL_TYPE_CLNT_LIB_REQ	(QDPC_RPC_TYPE_LIBCALL | 0x0011)
+
+ /* Used on EP side */
+#define QDPC_NETLINK_RPC_PCI_SVC	31
+#define QDPC_NL_TYPE_SVC_STR_REG	(QDPC_RPC_TYPE_STRCALL | 0x0010)
+#define QDPC_NL_TYPE_SVC_STR_REQ	(QDPC_RPC_TYPE_STRCALL | 0x0011)
+#define QDPC_NL_TYPE_SVC_LIB_REG	(QDPC_RPC_TYPE_LIBCALL | 0x0010)
+#define QDPC_NL_TYPE_SVC_LIB_REQ	(QDPC_RPC_TYPE_LIBCALL | 0x0011)
+
+typedef struct qdpc_cmd_hdr {
+	uint8_t dst_magic[ETH_ALEN];
+	uint8_t src_magic[ETH_ALEN];
+	__be16 type;
+	__be16 len;
+	__be16 rpc_type;
+	__be16 total_len;
+} qdpc_cmd_hdr_t;
+
+
+#endif /* __QDPC_CONFIG_H__ */
diff --git a/drivers/qtn/pcie2/include/qdpc_debug.h b/drivers/qtn/pcie2/include/qdpc_debug.h
new file mode 100644
index 0000000..dc63a1b
--- /dev/null
+++ b/drivers/qtn/pcie2/include/qdpc_debug.h
@@ -0,0 +1,77 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __QDPC_DEBUG_H__
+#define __QDPC_DEBUG_H__
+
+/* Debug macros */
+#define SUCCESS         0
+#define FAILURE        -1
+
+#ifdef  DEBUG
+#define PRINT_DBG(format, ...)           printk(KERN_DEBUG format, ##__VA_ARGS__)
+#else
+#define PRINT_DBG(format, ...)           do { } while(0);
+#endif
+
+#define PRINT_ERROR(format, ...)         printk(KERN_ERR format, ##__VA_ARGS__)
+#define PRINT_INFO(format, ...)          printk(KERN_INFO format, ##__VA_ARGS__)
+
+#ifdef DBGFMT
+#undef DBGFMT
+#endif
+
+#ifdef DBGARG
+#undef DBGARG
+#endif
+
+#ifdef DBGPRINTF
+#undef DBGPRINTF
+#endif
+
+#define DBGFMT	"%s-%d: "
+#define DBGARG	__func__, __LINE__
+
+#define DBGPRINTF(fmt, ...)					\
+	do {								\
+		if(printk_ratelimit()) {				\
+			printk(DBGFMT fmt, DBGARG, ##__VA_ARGS__);	\
+		}							\
+	} while(0)
+
+
+#ifdef DEBUG
+#define qdpc_print_dump(str_, buf_, len_)	\
+{					\
+	u32 i = 0;			\
+	printk("%s\n", str_);		\
+	printk("0x%04X : ", i*8);	\
+	for (i=0; i < (u32)(len_); i++) {	\
+		if (i && ((i%8) == 0)) {	\
+			printk( "%s", "\n");	\
+			printk("0x%04X : ", (i));\
+		}				\
+		printk("%02x ", (buf_)[i]);	\
+	}					\
+	printk("\n%s\n", str_);			\
+}
+#else
+#define qdpc_print_dump(str_, buf_, len_)
+#endif
+#endif
diff --git a/drivers/qtn/pcie2/include/qdpc_version.h b/drivers/qtn/pcie2/include/qdpc_version.h
new file mode 100644
index 0000000..8928354
--- /dev/null
+++ b/drivers/qtn/pcie2/include/qdpc_version.h
@@ -0,0 +1 @@
+#define DRV_VERSION "v37.4.1.89"
diff --git a/drivers/qtn/pcie2/include/ruby_pcie_bda.h b/drivers/qtn/pcie2/include/ruby_pcie_bda.h
new file mode 100644
index 0000000..89091ae
--- /dev/null
+++ b/drivers/qtn/pcie2/include/ruby_pcie_bda.h
@@ -0,0 +1,136 @@
+/*

+ * (C) Copyright 2011 Quantenna Communications Inc.

+ *

+ * See file CREDITS for list of people who contributed to this

+ * project.

+ *

+ * This program is free software; you can redistribute it and/or

+ * modify it under the terms of the GNU General Public License as

+ * published by the Free Software Foundation; either version 2 of

+ * the License, or (at your option) any later version.

+ *

+ * This program is distributed in the hope that it will be useful,

+ * but WITHOUT ANY WARRANTY; without even the implied warranty of

+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the

+ * GNU General Public License for more details.

+ *

+ * You should have received a copy of the GNU General Public License

+ * along with this program; if not, write to the Free Software

+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,

+ * MA 02111-1307 USA

+ */

+

+/*

+ * Header file which describes Ruby PCI Express Boot Data Area

+ * Has to be used by both kernel and bootloader.

+ */

+

+#ifndef RUBY_PCIE_BDA_H

+#define RUBY_PCIE_BDA_H

+

+/* Area mapped by via the BAR visible to the host */

+#define RUBY_PCIE_BDA_ADDR		CONFIG_ARC_PCIE_BASE

+#define RUBY_PCIE_BDA_SIZE		CONFIG_ARC_PCIE_SIZE

+

+#define RUBY_BDA_VADDR			(RUBY_PCIE_BDA_ADDR + 0x80000000)

+

+

+#define QDPC_PCIE_BDA_VERSION	0x1000

+

+#define QDPC_BDA_PCIE_INIT		0x01

+#define QDPC_BDA_PCIE_RDY		0x02

+#define QDPC_BDA_FW_LOAD_RDY		0x03

+#define QDPC_BDA_FW_LOAD_DONE		0x04

+#define QDPC_BDA_FW_START		0x05

+#define QDPC_BDA_FW_RUN			0x06

+#define QDPC_BDA_FW_HOST_RDY		0x07

+#define QDPC_BDA_FW_TARGET_RDY		0x11

+#define QDPC_BDA_FW_TARGET_BOOT		0x12

+#define QDPC_BDA_FW_FLASH_BOOT		0x13

+#define QDPC_BDA_FW_HOST_LOAD		0x08

+#define QDPC_BDA_FW_BLOCK_DONE		0x09

+#define QDPC_BDA_FW_BLOCK_RDY		0x0A

+#define QDPC_BDA_FW_EP_RDY		0x0B

+#define QDPC_BDA_FW_BLOCK_END		0x0C

+#define QDPC_BDA_FW_CONFIG		0x0D

+#define QDPC_BDA_FW_RUNNING		0x0E

+

+#define QDPC_BDA_PCIE_FAIL		0x82

+#define QDPC_BDA_FW_LOAD_FAIL		0x85

+

+

+#define PCIE_BDA_RCMODE                 BIT(1)

+#define PCIE_BDA_MSI                    BIT(2)

+#define PCIE_BDA_BAR64                  BIT(3)

+#define PCIE_BDA_FLASH_PRESENT          BIT(4)  /* Tell the Host if EP have flash contain firmware */

+#define PCIE_BDA_FLASH_BOOT             BIT(5)  /* Tell TARGET to boot from flash */

+#define PCIE_BDA_XMIT_UBOOT             BIT(6) /* EP ask for u-boot.bin */

+#define PCIE_BDA_TARGET_FBOOT_ERR       BIT(8)  /* TARGET flash boot failed */

+#define PCIE_BDA_TARGET_FWLOAD_ERR      BIT(9)  /* TARGET firmware load failed */

+#define PCIE_BDA_HOST_NOFW_ERR          BIT(12) /* Host not find any firmware */

+#define PCIE_BDA_HOST_MEMALLOC_ERR      BIT(13) /* Host malloc firmware download memory block failed */

+#define PCIE_BDA_HOST_MEMMAP_ERR        BIT(14) /* Host pci map download memory block failed */

+#define PCIE_BDA_VER(x)                 (((x) >> 4) & 0xFF)

+#define PCIE_BDA_ERROR_MASK             0xFF00  /* take the second 8 bits as error flag */

+

+#define PCIE_DMA_OFFSET_ERROR		0xFFFF

+#define PCIE_DMA_OFFSET_ERROR_MASK	0xFFFF

+

+#define PCIE_BDA_NAMELEN		32

+

+#define QDPC_PCI_ENDIAN_DETECT_DATA	0x12345678

+#define QDPC_PCI_ENDIAN_REVERSE_DATA	0x78563412

+

+#define QDPC_PCI_ENDIAN_VALID_STATUS	0x3c3c3c3c

+#define QDPC_PCI_ENDIAN_INVALID_STATUS	0

+

+#define QDPC_PCI_LITTLE_ENDIAN		0

+#define	QDPC_PCI_BIG_ENDIAN		0xffffffff

+

+#define QDPC_SCHED_TIMEOUT		(HZ / 20)

+

+#define PCIE_DMA_ISSUE_LOG_NUM		128

+

+#define PCIE_RC_TX_QUEUE_LEN		256

+#define PCIE_TX_VALID_PKT		0x80000000

+#define PCIE_PKT_LEN_MASK		0xffff

+

+struct vmac_pkt_info {

+	uint32_t addr;

+	uint32_t info;

+};

+

+typedef struct qdpc_pcie_bda {

+	uint16_t	bda_len;			/* Size of BDA block */

+	uint16_t	bda_version;			/* BDA version */

+	uint32_t	bda_bootstate;			/* Boot state of device */

+	uint32_t	bda_dma_mask;			/* Number of addressable DMA bits */

+	uint32_t	bda_dma_offset;			/* HW specific offset for DMA engine */

+	uint32_t	bda_flags;

+	uint32_t	bda_img;			/* Current load image block */

+	uint32_t	bda_img_size;			/* Current load image block size */

+	uint32_t	bda_ep2h_irqstatus;		/* Added here to allow boot loader to use irqs if desired */

+	uint32_t	bda_h2ep_irqstatus;		/* Added here to allow boot loader to use irqs if desired */

+	uint32_t	bda_msi_addr;

+	uint8_t		reserved1[56];			/* Reserve 56 bytes to make it compatible with older version */

+	uint32_t	bda_flashsz;

+	char		bda_boardname[PCIE_BDA_NAMELEN];

+	uint32_t	bda_pci_pre_status;		/* PCI endian check previous status */

+	uint32_t	bda_pci_endian;			/* Check pci memory endian format */

+	uint32_t	bda_pci_post_status;		/* PCI endian check post status */

+	int32_t		bda_h2ep_txd_budget;		/* txdone replenish budget for ep */

+	int32_t		bda_ep2h_txd_budget;		/* txdone replenish budget for host */

+	uint32_t	bda_rc_rx_bd_base;		/* EP rx buffer descriptors base address */

+	uint32_t	bda_rc_rx_bd_num;

+	uint32_t	bda_rc_tx_bd_base;		/* RC rx buffer descriptors base address */

+	uint32_t	bda_rc_tx_bd_num;

+	uint8_t		bda_ep_link_state;

+	uint8_t		bda_rc_link_state;

+	uint8_t		bda_rc_msi_enabled;

+	uint8_t		reserved2;

+        uint32_t        bda_ep_next_pkt;		/* A pointer to RC's memory specifying next packet to be handled by EP */

+	struct vmac_pkt_info request[PCIE_RC_TX_QUEUE_LEN];

+} qdpc_pcie_bda_t;

+

+#endif

+

diff --git a/drivers/qtn/pcie2/include/topaz_netcom.h b/drivers/qtn/pcie2/include/topaz_netcom.h
new file mode 100644
index 0000000..4eb3b56
--- /dev/null
+++ b/drivers/qtn/pcie2/include/topaz_netcom.h
@@ -0,0 +1,51 @@
+/**
+ * Copyright (c) 2012-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __DRIVES_NET_TOPAZ_NETCOM_H
+#define __DRIVES_NET_TOPAZ_NETCOM_H
+
+#define IPC_BIT_EP_RX_PKT	(0)
+#define IPC_BIT_RESET_EP	(1)
+#define IPC_BIT_RC_STOP_TX	(2)
+#define IPC_BIT_RC_RX_DONE	(3)
+#define IPC_BIT_EP_PM_CTRL	(4)
+#define IPC_BIT_OFFLINE_DBG	(5)
+
+#define IPC_EP_RX_PKT		(BIT(IPC_BIT_EP_RX_PKT))
+#define IPC_RESET_EP		(BIT(IPC_BIT_RESET_EP))
+#define IPC_RC_STOP_TX		(BIT(IPC_BIT_RC_STOP_TX))
+#define IPC_RC_RX_DONE		(BIT(IPC_BIT_RC_RX_DONE))
+#define IPC_EP_PM_CTRL		(BIT(IPC_BIT_EP_PM_CTRL))
+#define IPC_OFFLINE_DBG		(BIT(IPC_BIT_OFFLINE_DBG))
+
+#define TQE_NAPI_SCHED		(0x3)
+#define TQE_ENABLE_INTR		(0x1)
+
+struct vmac_bd {
+	uint32_t buff_addr;
+	uint32_t buff_info;
+};
+
+struct vmac_rx_buf {
+	uint32_t baddr;
+	uint16_t offset;
+	uint16_t len;
+};
+
+#endif /* __DRIVES_NET_TOPAZ_NETCOM_H */
diff --git a/drivers/qtn/pm_interval/Makefile b/drivers/qtn/pm_interval/Makefile
new file mode 100644
index 0000000..c4f5452
--- /dev/null
+++ b/drivers/qtn/pm_interval/Makefile
@@ -0,0 +1,25 @@
+#
+# Quantenna Communications Inc. Driver Makefile
+#
+EXTRA_CFLAGS	+= -Wall -Werror -I../drivers -I../include -I../drivers/include/shared -I../drivers/include/kernel
+EXTRA_CFLAGS    += -mlong-calls
+
+ifneq ($(KERNELRELEASE),)
+pm_interval-objs += pm_interval_drv.o
+obj-m   += pm_interval.o
+else
+
+INSTALL	= INSTALL_MOD_PATH=../linux/modules
+PWD	:= $(shell pwd)
+
+default:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
+
+install:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) $(INSTALL) M=$(PWD) modules_install
+
+endif
+
+clean:
+	rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions
+
diff --git a/drivers/qtn/pm_interval/pm_interval_drv.c b/drivers/qtn/pm_interval/pm_interval_drv.c
new file mode 100644
index 0000000..2dcf702
--- /dev/null
+++ b/drivers/qtn/pm_interval/pm_interval_drv.c
@@ -0,0 +1,755 @@
+/**
+ * Copyright (c) 2011 - 2012 Quantenna Communications Inc
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+/*
+ * These entry points are accessed from outside this module:
+ *
+ *     pm_interval_report
+ *     pm_interval_configure
+ *     pm_interval_schedule_work
+ *     pm_interval_monitor
+ *     pm_proc_start_interval_report
+ *
+ *     pm_interval_init
+ *     pm_interval_exit
+ */
+
+#include <linux/version.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/netdevice.h>
+
+#include <net/net_namespace.h>
+#include <net80211/ieee80211.h>
+#include <net80211/if_media.h>
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_proto.h>
+
+#include "common/queue.h"
+
+#define PM_INTERVAL_NAME		"pm_interval"
+#define PM_INTERVAL_VERSION		"1.2"
+#define PM_INTERVAL_AUTHOR		"Quantenna Communciations, Inc."
+#define PM_INTERVAL_DESC		"Manage PM Intervals"
+
+#define PM_INTERVAL_PROC_ENTRY_PREFIX	"start_"
+
+#define PM_INTERVAL_MAX_LENGTH_COMMAND	7
+#define PM_INTERVAL_MAX_LENGTH_ARG	11
+
+#define PM_INTERVAL_COMMAND_ADD		"add"
+#define PM_INTERVAL_COMMAND_DUMP	"dump"
+
+#define PM_INTERVAL_INITIAL_RESULT	(-2)
+#define PM_INTERVAL_ERROR_RESULT	(-1)
+#define PM_INTERVAL_SUCCESS_RESULT	0
+
+struct pm_interval_nd_entry {
+	struct net_device_stats	pd_start_interval;
+	const struct net_device	*pd_dev;
+	TAILQ_ENTRY(pm_interval_nd_entry)	pd_next;
+};
+
+struct pm_interval_entry {
+	unsigned long		pe_time_started;
+	unsigned long		pe_time_elapsed;
+	unsigned long		pe_time_length;
+	char			pe_name_interval[PM_INTERVAL_MAX_LENGTH_ARG + 1];
+	struct pm_interval_data	*pe_backptr;
+	TAILQ_HEAD(, pm_interval_nd_entry) pe_devices;
+	TAILQ_ENTRY(pm_interval_entry) pe_next;
+};
+
+struct pm_interval_data {
+	struct timer_list	pm_timer;
+	struct work_struct	pm_monitor_wq;
+	struct proc_dir_entry	*pm_interval_dir;
+	spinlock_t		pm_spinlock;
+	TAILQ_HEAD(, pm_interval_entry)		pm_intervals;
+	int			rc;
+};
+
+static struct device pm_interval_device =
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
+	.bus_id		= PM_INTERVAL_NAME,
+#endif
+};
+
+static int pm_interval_parse_an_arg(const char *buf, char *an_arg, size_t size_limit)
+{
+	int	iter;
+	int	found_end_of_string = 0;
+	int	count_wspace = 0;
+	char	cur_char = *buf;
+
+	while (isspace(cur_char)) {
+		cur_char = *(++buf);
+		count_wspace++;
+	}
+
+	for (iter = 0; iter < size_limit; iter++) {
+		cur_char = *buf;
+
+		if (!isspace(cur_char) && cur_char != '\0') {
+			*(an_arg++) = cur_char;
+			buf++;
+		} else {
+			found_end_of_string = 1;
+			*an_arg = '\0';
+			break;
+		}
+	}
+
+	if (found_end_of_string == 0) {
+		return PM_INTERVAL_ERROR_RESULT;
+	}
+
+	return count_wspace + iter;
+}
+
+/*
+ * directive is expected to address at least PM_INTERVAL_MAX_LENGTH_COMMAND + 1 chars
+ */
+static int pm_interval_parse_command(const char *buf, char *directive)
+{
+	int		ival = pm_interval_parse_an_arg(buf, directive, PM_INTERVAL_MAX_LENGTH_COMMAND);
+
+	if (ival < 0) {
+		printk(KERN_ERR "%s: failed to parse the command\n", PM_INTERVAL_NAME);
+		return -1;
+	}
+
+	return ival;
+}
+
+/*
+ * args (their addresses are in argv) are expected to address
+ * at least PM_INTERVAL_MAX_LENGTH_ARG + 1 chars
+ */
+
+static int pm_interval_parse_args(const char *buf, char **argv, const unsigned int max_argc)
+{
+	unsigned int	arg_index;
+	int		chars_parsed = 0;
+	int		args_parsed = 0;
+
+	if (max_argc < 1) {
+		return 0;
+	}
+
+	for (arg_index = 0;arg_index < max_argc; arg_index++) {
+		char	tmp_char = *buf;
+
+		if (tmp_char == '\0' || tmp_char == '\n') {
+			return args_parsed;
+		}
+
+		chars_parsed = pm_interval_parse_an_arg(buf, argv[args_parsed], PM_INTERVAL_MAX_LENGTH_ARG);
+		if (chars_parsed < 0) {
+			return args_parsed;
+		}
+
+		buf += chars_parsed;
+		args_parsed++;
+	}
+
+	return args_parsed;
+}
+
+static ssize_t pm_interval_report_result(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct pm_interval_data	*p_data = (struct pm_interval_data *) dev_get_drvdata(dev);
+
+	if (p_data->rc == PM_INTERVAL_SUCCESS_RESULT) {
+		strcpy(buf, "ok\n");
+	} else {
+		sprintf(buf, "error %d\n", p_data->rc);
+	}
+
+	return strlen(buf);
+}
+
+static long pm_get_uptime(void)
+{
+	struct timespec tp;
+	long uptime;
+
+	get_monotonic_boottime(&tp);
+	uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
+
+	return uptime;
+}
+
+static int pm_get_start_next_interval(int interval_length)
+{
+	if (interval_length < 1) {
+		return -1;
+	}
+
+	return interval_length - (pm_get_uptime() % interval_length);
+}
+
+static void pm_interval_schedule_work(unsigned long data)
+{
+	struct pm_interval_data		*p_data = (struct pm_interval_data *) data;
+	struct pm_interval_entry	*p_entry = NULL;
+	int				base_interval_length = 0;
+
+	spin_lock_bh(&p_data->pm_spinlock);
+
+	if (TAILQ_EMPTY(&p_data->pm_intervals)) {
+		goto ready_to_return;
+	}
+
+	p_entry = TAILQ_FIRST(&p_data->pm_intervals);
+	base_interval_length = p_entry->pe_time_length;
+	if (base_interval_length < 1) {
+		printk(KERN_ERR "%s: Invalid base interval length of %d\n", PM_INTERVAL_NAME, base_interval_length);
+		goto ready_to_return;
+	}
+
+	schedule_work(&p_data->pm_monitor_wq);
+
+	mod_timer(&p_data->pm_timer, jiffies + pm_get_start_next_interval(base_interval_length) * HZ);
+
+ready_to_return:
+	spin_unlock_bh(&p_data->pm_spinlock);
+}
+
+
+#if LINUX_VERSION_CODE	>= KERNEL_VERSION(4,7,0)
+
+
+static int pm_interval_proc_show(struct seq_file *sfile, void *v)
+{
+	return 0;
+}
+
+static int pm_interval_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pm_interval_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations pm_interval_stats_fops = {
+	.owner		= THIS_MODULE,
+	.open		= pm_interval_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#else
+static int pm_interval_proc_start_interval_report(char *buf,
+						  char **start,
+						  off_t offset,
+						  int count,
+						  int *eof,
+						  void *data)
+{
+	int				len = 0;
+	struct pm_interval_entry	*p_entry = (struct pm_interval_entry *) data;
+	struct pm_interval_data		*p_data = p_entry->pe_backptr;
+	struct pm_interval_nd_entry	*p_nd_entry = NULL;
+	unsigned long elapsed_time;
+
+	elapsed_time = pm_get_uptime();
+
+	spin_lock_bh(&p_data->pm_spinlock);
+
+	if (p_entry->pe_time_started <= elapsed_time) {
+		elapsed_time = elapsed_time - p_entry->pe_time_started;
+	} else {
+		elapsed_time = 0;
+	}
+
+	/*
+	 * Adopted from dev_seq_show, linux/net/core/dev.c
+	 * Lets the API to get a cumulative counter share source code with the API to get a PM counter.
+	 */
+	len += sprintf(buf + len, "Inter-|   Receive                            "
+				  "                    |  Transmit\n"
+				  " face |bytes    packets errs drop fifo frame "
+				  "compressed multicast|bytes    packets errs "
+				  "drop fifo colls carrier compressed\n");
+
+
+	TAILQ_FOREACH(p_nd_entry, &p_entry->pe_devices, pd_next) {
+		const struct net_device_stats *stats = &p_nd_entry->pd_start_interval;
+
+		/*
+		 * Adopted from dev_seq_printf_stats, linux/net/core/dev.c
+		 * Lets the API to get a cumulative counter share source code with the API
+		 * to get a PM counter.
+		 */
+		len += sprintf(buf + len, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
+			   "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
+			   p_nd_entry->pd_dev->name, stats->rx_bytes, stats->rx_packets,
+			   stats->rx_errors,
+			   stats->rx_dropped + stats->rx_missed_errors,
+			   stats->rx_fifo_errors,
+			   stats->rx_length_errors + stats->rx_over_errors +
+				stats->rx_crc_errors + stats->rx_frame_errors,
+			   stats->rx_compressed, stats->multicast,
+			   stats->tx_bytes, stats->tx_packets,
+			   stats->tx_errors, stats->tx_dropped,
+			   stats->tx_fifo_errors, stats->collisions,
+			   stats->tx_carrier_errors +
+				stats->tx_aborted_errors +
+				stats->tx_window_errors +
+				stats->tx_heartbeat_errors,
+			   stats->tx_compressed);
+
+	}
+
+	spin_unlock_bh(&p_data->pm_spinlock);
+
+	len += sprintf(buf + len, "\n%u seconds since the interval started\n", (unsigned int) elapsed_time);
+
+	*eof = 1;
+
+	return len;
+}
+#endif
+
+static int pm_interval_add_interval(struct pm_interval_data *p_data,
+				    const char *new_interval_name,
+				    const char *interval_length_str)
+{
+	int				new_interval_length = 0;
+	int				arm_pm_interval_timer = 0;
+	int				next_interval_start_time = 0;
+	int				ival = sscanf(interval_length_str, "%d", &new_interval_length);
+	long				time_in_seconds = pm_get_uptime();
+	char				proc_entry_name[PM_INTERVAL_MAX_LENGTH_ARG + 7];
+	struct pm_interval_entry	*p_entry = NULL;
+
+	if (ival != 1) {
+		printk(KERN_ERR "%s: cannot parse the length of time for the new interval from %s\n",
+				 PM_INTERVAL_NAME, interval_length_str);
+		return PM_INTERVAL_ERROR_RESULT;
+	} else if (new_interval_length < 1) {
+		printk(KERN_ERR "%s: invalid length of %d sec for the new interval\n",
+				PM_INTERVAL_NAME, new_interval_length);
+		return PM_INTERVAL_ERROR_RESULT;
+	}
+
+	if (strnlen(new_interval_name, PM_INTERVAL_MAX_LENGTH_ARG + 1) > PM_INTERVAL_MAX_LENGTH_ARG) {
+		printk(KERN_ERR "%s: name of interval is too long in add interval\n", PM_INTERVAL_NAME);
+		return PM_INTERVAL_ERROR_RESULT;
+	}
+
+	spin_lock_bh(&p_data->pm_spinlock);
+
+	if (!TAILQ_EMPTY(&p_data->pm_intervals)) {
+		int				base_interval_length = 0;
+		struct pm_interval_entry	*p_entry = TAILQ_FIRST(&p_data->pm_intervals);
+
+		base_interval_length = p_entry->pe_time_length;
+		if (new_interval_length % base_interval_length != 0) {
+			printk(KERN_ERR "%s: invalid length of %d sec for the new interval\n",
+					PM_INTERVAL_NAME, new_interval_length);
+			spin_unlock_bh(&p_data->pm_spinlock);
+			goto configuration_error;
+		}
+
+		TAILQ_FOREACH(p_entry, &p_data->pm_intervals, pe_next) {
+			if (p_entry->pe_time_length == new_interval_length) {
+				printk(KERN_ERR
+					"%s: interval with length of %d already configured\n",
+					 PM_INTERVAL_NAME, new_interval_length);
+				spin_unlock_bh(&p_data->pm_spinlock);
+				goto configuration_error;
+			} else if (strncmp(new_interval_name,
+				   p_entry->pe_name_interval,
+				   PM_INTERVAL_MAX_LENGTH_ARG) == 0) {
+				printk(KERN_ERR
+					"%s: interval with name of %s already configured\n",
+					 PM_INTERVAL_NAME, new_interval_name);
+				spin_unlock_bh(&p_data->pm_spinlock);
+				goto configuration_error;
+			}
+		}
+	} else {
+		arm_pm_interval_timer = 1;
+	}
+
+	spin_unlock_bh(&p_data->pm_spinlock);
+
+	if ((p_entry = kzalloc(sizeof(*p_entry), GFP_KERNEL)) == NULL) {
+		printk(KERN_ERR "%s: cannot allocate entry for interval of length %d sec\n",
+				PM_INTERVAL_NAME, new_interval_length);
+		return PM_INTERVAL_ERROR_RESULT;
+	}
+
+	TAILQ_INIT(&p_entry->pe_devices);
+	p_entry->pe_time_length = new_interval_length;
+	p_entry->pe_time_elapsed = time_in_seconds % new_interval_length;
+	p_entry->pe_time_started = time_in_seconds - p_entry->pe_time_elapsed;
+	p_entry->pe_backptr = p_data;
+	strncpy(p_entry->pe_name_interval, new_interval_name, sizeof(p_entry->pe_name_interval) - 1);
+
+	if (arm_pm_interval_timer) {
+		next_interval_start_time = pm_get_start_next_interval(new_interval_length);
+
+		if (next_interval_start_time < 1) {
+			goto problem_with_length_interval;
+		}
+	}
+
+	strcpy(&proc_entry_name[0], PM_INTERVAL_PROC_ENTRY_PREFIX);
+	strcat(&proc_entry_name[strlen(PM_INTERVAL_PROC_ENTRY_PREFIX)], p_entry->pe_name_interval);
+
+#if LINUX_VERSION_CODE	>=  KERNEL_VERSION(4,7,0)
+	if (!proc_create_data(&proc_entry_name[0], 0600, p_data->pm_interval_dir,
+				&pm_interval_stats_fops, NULL)) {
+		printk(KERN_ERR "pm_interval: could not create proc entry\n");
+		goto cant_create_proc_entry;
+	}
+#else
+	if (create_proc_read_entry(&proc_entry_name[0],
+				    0,
+				    p_data->pm_interval_dir,
+				    pm_interval_proc_start_interval_report,
+				    p_entry) == NULL) {
+		goto cant_create_proc_entry;
+	}
+#endif
+	if (arm_pm_interval_timer) {
+		p_data->pm_timer.function = pm_interval_schedule_work;
+		p_data->pm_timer.data = (unsigned long) p_data;
+		p_data->pm_timer.expires = jiffies + next_interval_start_time * HZ;
+		add_timer(&p_data->pm_timer);
+	}
+
+	spin_lock_bh(&p_data->pm_spinlock);
+
+	TAILQ_INSERT_TAIL(&p_data->pm_intervals, p_entry, pe_next);
+
+	spin_unlock_bh(&p_data->pm_spinlock);
+
+	return PM_INTERVAL_SUCCESS_RESULT;
+
+problem_with_length_interval:
+cant_create_proc_entry:
+	kfree(p_entry);
+configuration_error:
+	return PM_INTERVAL_ERROR_RESULT;
+}
+
+static int pm_interval_dump(struct pm_interval_data *p_data)
+{
+	struct pm_interval_entry	*p_entry = NULL;
+
+	printk(KERN_ERR "%s: begin dump\n", PM_INTERVAL_NAME);
+	spin_lock_bh(&p_data->pm_spinlock);
+
+	TAILQ_FOREACH(p_entry, &p_data->pm_intervals, pe_next) {
+		struct pm_interval_nd_entry	*p_nd_entry = NULL;
+
+		printk(KERN_ERR "%s: interval %s of length %d started at %d, elapsed %d\n",
+				 PM_INTERVAL_NAME,
+				 p_entry->pe_name_interval,
+				 (int) p_entry->pe_time_length,
+				 (int) p_entry->pe_time_started,
+				 (int) p_entry->pe_time_elapsed);
+
+		TAILQ_FOREACH(p_nd_entry, &p_entry->pe_devices, pd_next) {
+			const struct net_device	*dev = p_nd_entry->pd_dev;
+
+			if (dev != NULL) {
+				printk(KERN_ERR "    net device %s\n", dev->name);
+			} else {
+				printk(KERN_ERR "    net device (NULL)\n");
+			}
+
+		}
+	}
+
+	spin_unlock_bh(&p_data->pm_spinlock);
+	printk(KERN_ERR "%s: dump completes\n", PM_INTERVAL_NAME);
+
+	return PM_INTERVAL_SUCCESS_RESULT;
+}
+
+static ssize_t pm_interval_configure(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	char			command[PM_INTERVAL_MAX_LENGTH_COMMAND + 1] = {'\0'};
+	int			chars_parsed = pm_interval_parse_command(buf, &command[0]);
+	int			rc_val = PM_INTERVAL_SUCCESS_RESULT;
+	struct pm_interval_data	*p_data = (struct pm_interval_data *) dev_get_drvdata(dev);
+
+	if (chars_parsed < 0) {
+		p_data->rc = PM_INTERVAL_ERROR_RESULT;
+		/*
+		 * It's best to return the length of the message, even if an error occurs.
+		 * Otherwise the Linux kernel might keeps calling this entry point until
+		 * it is convinced the complete message has been parsed, based on
+		 * the return value(s) from this routine adding up to count.
+		 *
+		 * Application reads from the entry in the sys FS to get the status of
+		 * the operation; that is the purpose of pm_interval_report_result.
+		 */
+		return count;
+	}
+
+	if (strcmp(&command[0], PM_INTERVAL_COMMAND_ADD) == 0) {
+		char	name_of_interval[PM_INTERVAL_MAX_LENGTH_ARG + 1] = {'\0'};
+		char	interval_length_str[PM_INTERVAL_MAX_LENGTH_ARG + 1] = {'\0'};
+		char	*local_argv[] = {&name_of_interval[0], &interval_length_str[0]};
+		int	args_parsed = pm_interval_parse_args(buf + chars_parsed,
+							    &local_argv[0],
+							     ARRAY_SIZE(local_argv));
+		if (args_parsed == ARRAY_SIZE(local_argv)) {
+			rc_val = pm_interval_add_interval(p_data, &name_of_interval[0], &interval_length_str[0]);
+		} else {
+			printk(KERN_ERR "%s, insufficent params (%d) for %s command\n",
+					 PM_INTERVAL_NAME, args_parsed, PM_INTERVAL_COMMAND_ADD);
+			rc_val = PM_INTERVAL_ERROR_RESULT;
+		}
+	} else if (strcmp(&command[0], PM_INTERVAL_COMMAND_DUMP) == 0) {
+		rc_val = pm_interval_dump(p_data);
+	} else {
+		printk(KERN_ERR "%s, unrecognized command %s\n", PM_INTERVAL_NAME, &command[0]);
+		rc_val = PM_INTERVAL_ERROR_RESULT;
+	}
+
+	p_data->rc = rc_val;
+
+	return count;
+}
+
+static DEVICE_ATTR(configure, 0644,
+	pm_interval_report_result, pm_interval_configure);
+
+static struct pm_interval_data	*p_private = NULL;
+
+static struct pm_interval_nd_entry *pm_interval_get_addr_entry(struct pm_interval_entry *p_entry,
+							    struct net_device *dev)
+{
+	struct pm_interval_nd_entry	*p_nd_entry = NULL;
+
+	TAILQ_FOREACH(p_nd_entry, &p_entry->pe_devices, pd_next) {
+		if (dev == p_nd_entry->pd_dev) {
+			return p_nd_entry;
+		}
+	}
+
+	return NULL;
+}
+
+static int pm_interval_update_net_device_table(struct pm_interval_entry *p_entry, struct pm_interval_data *p_data)
+{
+	/* This routine assumes the PM Interval Spin Lock has been taken */
+	struct net *net;
+	struct net_device *dev;
+	int retval = 0;
+
+
+	read_lock(&dev_base_lock);
+
+	for_each_net(net) {
+		for_each_netdev(net, dev) {
+			struct rtnl_link_stats64 storage;
+			//const struct net_device_stats *p_current_counters = dev_get_stats(dev, &storage);
+			const struct rtnl_link_stats64 *p_current_counters = dev_get_stats(dev, &storage);
+			struct pm_interval_nd_entry	*p_nd_entry =
+				pm_interval_get_addr_entry(p_entry, dev);
+
+			if (p_nd_entry == NULL) {
+				spin_unlock_bh(&p_data->pm_spinlock);
+
+				p_nd_entry = kzalloc(sizeof(*p_nd_entry), GFP_KERNEL);
+
+				spin_lock_bh(&p_data->pm_spinlock);
+
+				if (p_nd_entry == NULL) {
+					retval = -1;
+					goto cant_allocate_nd_entry;
+				}
+
+				p_nd_entry->pd_dev = dev;
+				TAILQ_INSERT_TAIL(&p_entry->pe_devices, p_nd_entry, pd_next);
+			}
+
+			if (p_current_counters != NULL) {
+				memcpy(&p_nd_entry->pd_start_interval,
+					p_current_counters,
+					sizeof(*p_current_counters));
+			}
+		}
+	}
+
+cant_allocate_nd_entry:
+	read_unlock(&dev_base_lock);
+
+	return retval;
+}
+
+static void pm_interval_monitor(struct work_struct *work)
+{
+	struct pm_interval_data		*p_data = container_of(work, struct pm_interval_data, pm_monitor_wq);
+	long				time_in_seconds = pm_get_uptime();
+	struct pm_interval_entry	*p_entry = NULL;
+	int				interval_index = 0;
+
+	spin_lock_bh(&p_data->pm_spinlock);
+
+	TAILQ_FOREACH(p_entry, &p_data->pm_intervals, pe_next) {
+		int	since_interval_start = 0;
+
+		since_interval_start = time_in_seconds % p_entry->pe_time_length;
+
+		if (since_interval_start < p_entry->pe_time_elapsed || interval_index == 0) {
+			if (pm_interval_update_net_device_table(p_entry, p_data) != 0) {
+				goto ready_to_return;
+			}
+
+			p_entry->pe_time_started = time_in_seconds;
+		}
+
+		p_entry->pe_time_elapsed = since_interval_start;
+		interval_index++;
+	}
+
+ready_to_return:
+	spin_unlock_bh(&p_data->pm_spinlock);
+}
+
+/*
+ * Keep Linux from complaining about no release method at module unload time ...
+ */
+static void pm_interval_release(struct device *dev)
+{
+}
+
+static int __init pm_interval_init(void)
+{
+	if ((p_private = kzalloc(sizeof(*p_private), GFP_KERNEL)) == NULL) {
+		printk(KERN_ERR "%s: cannot allocate private data\n", PM_INTERVAL_NAME);
+		goto cant_alloc_private;
+	}
+
+	pm_interval_device.release = pm_interval_release;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+	dev_set_name(&pm_interval_device, PM_INTERVAL_NAME);
+#endif
+	dev_set_drvdata(&pm_interval_device, p_private);
+
+	if (device_register(&pm_interval_device) != 0) {
+		printk(KERN_ERR "%s: failed to register the device\n", PM_INTERVAL_NAME);
+		goto cant_register_pm_interval;
+	}
+
+	p_private->rc = PM_INTERVAL_INITIAL_RESULT;
+
+	TAILQ_INIT(&p_private->pm_intervals);
+	spin_lock_init(&p_private->pm_spinlock);
+	INIT_WORK(&p_private->pm_monitor_wq, pm_interval_monitor);
+
+	if (device_create_file(&pm_interval_device, &dev_attr_configure) != 0) {
+		printk(KERN_ERR "%s: failed to create configure sysfs file for \"%s\"\n",
+				PM_INTERVAL_NAME, PM_INTERVAL_NAME);
+		goto configure_sysfs_fail;
+	}
+
+	if ((p_private->pm_interval_dir = proc_mkdir(PM_INTERVAL_NAME, NULL)) == NULL) {
+		printk(KERN_ERR "PMI: cannot create /proc/" PM_INTERVAL_NAME " folder\n");
+		goto cant_create_proc;
+	}
+
+	init_timer(&p_private->pm_timer);
+
+	return 0;
+
+cant_create_proc:
+	device_remove_file(&pm_interval_device, &dev_attr_configure);
+configure_sysfs_fail:
+	device_unregister(&pm_interval_device);
+cant_register_pm_interval:
+	kfree(p_private);
+cant_alloc_private:
+	p_private = NULL;
+
+	return -ENOMEM;
+}
+
+static void pm_interval_entry_cleanup(struct pm_interval_entry *p_entry, struct pm_interval_data *p_data)
+{
+	char	proc_entry_name[PM_INTERVAL_MAX_LENGTH_ARG + 7];
+
+	while (!TAILQ_EMPTY(&p_entry->pe_devices)) {
+		struct pm_interval_nd_entry *p_nd_entry = TAILQ_FIRST(&p_entry->pe_devices);
+
+		TAILQ_REMOVE(&p_entry->pe_devices, p_nd_entry, pd_next);
+		kfree(p_nd_entry);
+	}
+
+	strcpy(&proc_entry_name[0], PM_INTERVAL_PROC_ENTRY_PREFIX);
+	strcat(&proc_entry_name[strlen(PM_INTERVAL_PROC_ENTRY_PREFIX)], p_entry->pe_name_interval);
+	remove_proc_entry(&proc_entry_name[0], p_data->pm_interval_dir);
+}
+
+static void __exit pm_interval_exit(void)
+{
+	printk(KERN_WARNING "%s: unload kernel module\n", PM_INTERVAL_NAME);
+
+	if (p_private == NULL) {
+		return;
+	}
+
+	del_timer(&p_private->pm_timer);
+
+	flush_work(&p_private->pm_monitor_wq);
+
+	spin_lock_bh(&p_private->pm_spinlock);
+
+	while (!TAILQ_EMPTY(&p_private->pm_intervals)) {
+		struct pm_interval_entry *p_entry = TAILQ_FIRST(&p_private->pm_intervals);
+
+		pm_interval_entry_cleanup(p_entry, p_private);
+
+		TAILQ_REMOVE(&p_private->pm_intervals, p_entry, pe_next);
+		kfree(p_entry);
+	}
+
+	remove_proc_entry(PM_INTERVAL_NAME, NULL);
+
+	device_remove_file(&pm_interval_device, &dev_attr_configure);
+	device_unregister(&pm_interval_device);
+
+	spin_unlock_bh(&p_private->pm_spinlock);
+
+	kfree(p_private);
+
+	p_private = NULL;
+}
+
+/******************************************************************************
+	Linux driver entries/declarations
+******************************************************************************/
+module_init(pm_interval_init);
+module_exit(pm_interval_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/qtn/qdrv/Makefile b/drivers/qtn/qdrv/Makefile
new file mode 100644
index 0000000..c210ae6
--- /dev/null
+++ b/drivers/qtn/qdrv/Makefile
@@ -0,0 +1,87 @@
+#
+# Quantenna Communications Inc. Driver Makefile
+#
+
+QDRV_SLAB_H := qdrv_slab_def.h
+
+CLEAN_FILES+=$(QDRV_SLAB_H)
+
+EXTRA_CFLAGS += -Wall -Werror -Wno-unknown-pragmas -DQDRV \
+		-I../drivers -I../include -I../drivers/include/shared -I../drivers/include/kernel
+EXTRA_CFLAGS += -mlong-calls
+EXTRA_CFLAGS += -DQTN_ENABLE_TRACE_BUFFER=0 -DQSCS_ENABLED -DQTN_BG_SCAN -DQBMPS_ENABLE
+EXTRA_CFLAGS += -DCONFIG_QTN_80211K_SUPPORT -DCONFIG_NAC_MONITOR
+#EXTRA_CFLAGS += -Wframe-larger-than=2048
+EXTRA_CFLAGS += -Wframe-larger-than=3072
+
+ifeq ($(CONFIG_QVSP), y)
+EXTRA_CFLAGS += -DCONFIG_QVSP
+endif
+EXTRA_CFLAGS += -DQTN_DEBUG
+
+ifeq ($(board_config), qtm710_rgmii_config)
+EXTRA_CFLAGS += -DQTM710_RGMII_CONFIG
+endif
+
+ifeq ($(board_config),topaz_msft_config)
+	EXTRA_CFLAGS += -DERICSSON_CONFIG
+else
+	ifeq ($(board_config),topaz_msmr_config)
+		EXTRA_CFLAGS += -DERICSSON_CONFIG
+	endif
+endif
+
+EXTRA_CFLAGS += -g      # needed for pktlogger
+
+SOURCE_MODULES := qdrv_module.o qdrv_control.o qdrv_soc.o \
+			qdrv_hal.o qdrv_muc.o qdrv_mac.o qdrv_mac_reserve.o qdrv_comm.o \
+			qdrv_wlan.o qdrv_tx.o qdrv_rx.o qdrv_scan.o \
+			qdrv_hostlink.o qdrv_vap.o \
+			qdrv_dsp.o qdrv_txbf.o qdrv_muc_stats.o \
+			qdrv_radar.o qdrv_math.o qdrv_bridge.o \
+			qdrv_uc_print.o qdrv_sch.o qdrv_sch_pm.o \
+			qdrv_pktlogger.o qdrv_auc.o qdrv_pcap.o	\
+			qdrv_vlan.o qdrv_show.o \
+			qdrv_mu.o qdrv_fw.o
+
+ifneq ($(KERNELRELEASE),)
+
+obj-m		+=	qdrv.o
+qdrv-objs	+=	$(SOURCE_MODULES)
+
+$(src)/$(QDRV_SLAB_H): $(src)/$(QDRV_SLAB_H).in $(src)/qdrv_slab_watch.h
+	cpp $(src)/$(QDRV_SLAB_H).in > $(src)/$(QDRV_SLAB_H)
+
+$(src)/qdrv_module.c: $(src)/$(QDRV_SLAB_H)
+
+else # $(KERNELRELEASE)
+
+include ../Makefile_defines
+
+DWARF_FILE	?= ./qdrv_dwarf.txt
+
+default:
+	$(MAKE) -C $(KERNELDIR) \
+		ARCH=$(ARCHITECTURE) \
+		CROSS_COMPILE=$(TOOLS_PREFIX) \
+		M=$(PWD) \
+		modules
+
+install:
+	$(MAKE) -C $(KERNELDIR) \
+		ARCH=$(ARCHITECTURE) \
+		CROSS_COMPILE=$(TOOLS_PREFIX) \
+		M=$(PWD) \
+		INSTALL_MOD_PATH=$(INSTALL_PATH) \
+		modules_install
+
+dwarf:
+	$(TOOLS_PREFIX)readelf --debug-dump=info qdrv.o > $(DWARF_FILE)
+	cp headers.txt $(HEADERS_FILE) || :
+
+clean:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) M=$(PWD) clean
+
+endif # $(KERNELRELEASE)
+
+clean-files+=$(CLEAN_FILES)
diff --git a/drivers/qtn/qdrv/qdrv_auc.c b/drivers/qtn/qdrv/qdrv_auc.c
new file mode 100644
index 0000000..a119142
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_auc.c
@@ -0,0 +1,215 @@
+/**
+ * Copyright (c) 2008 - 2013 Quantenna Communications Inc
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+#include <linux/version.h>
+
+#include <asm/io.h>
+#include <asm/board/soc.h>
+
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_debug.h"
+#include "qdrv_auc.h"
+#include "qdrv_hal.h"
+#include "qdrv_wlan.h"
+#include "qdrv_vap.h"
+#include "qdrv_fw.h"
+#include <qtn/topaz_tqe.h>
+
+
+static qtn_shared_node_stats_t *s_per_node_stats_ptr = NULL;
+static qtn_shared_vap_stats_t *s_per_vap_stats_ptr = NULL;
+
+static size_t auc_get_sram_size(void)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+
+	if (sp->fw_no_mu)
+		return CONFIG_ARC_AUC_NOMU_SRAM_SIZE;
+	else
+		return CONFIG_ARC_AUC_MU_SRAM_SIZE;
+}
+
+static void auc_clear_addr_range(unsigned long physaddr, unsigned long size)
+{
+	void *vaddr = ioremap_nocache(physaddr, size);
+
+	if (!vaddr) {
+		DBGPRINTF_E("0x%lx, 0x%lx cannot be mapped\n", physaddr, size);
+	} else {
+		qdrv_fw_auc_memzero(vaddr, size, physaddr);
+		iounmap(vaddr);
+	}
+}
+
+static void auc_clear_mem(void)
+{
+	auc_clear_addr_range(TOPAZ_AUC_IMEM_ADDR, TOPAZ_AUC_IMEM_SIZE);
+	auc_clear_addr_range(TOPAZ_AUC_DMEM_ADDR, TOPAZ_AUC_DMEM_SIZE);
+	auc_clear_addr_range(RUBY_DRAM_BEGIN + CONFIG_ARC_AUC_BASE, CONFIG_ARC_AUC_SIZE);
+	auc_clear_addr_range(RUBY_SRAM_BEGIN + CONFIG_ARC_AUC_SRAM_BASE, auc_get_sram_size());
+}
+
+void qdrv_auc_stats_setup(void)
+{
+	unsigned long phyaddr;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+
+	if (unlikely(!sp || !sp->auc.node_stats || !sp->auc.vap_stats)) {
+		DBGPRINTF(DBG_LL_ERR, QDRV_LF_TRACE, "Stats setup: failed\n");
+		return;
+	}
+
+	if (!s_per_node_stats_ptr) {
+		phyaddr = qdrv_fw_auc_to_host_addr((unsigned long)sp->auc.node_stats);
+		s_per_node_stats_ptr = ioremap_nocache(phyaddr, QTN_NCIDX_MAX * sizeof(qtn_shared_node_stats_t));
+	}
+
+	if (!s_per_vap_stats_ptr) {
+		phyaddr = qdrv_fw_auc_to_host_addr((unsigned long)sp->auc.vap_stats);
+		s_per_vap_stats_ptr = ioremap_nocache(phyaddr, QTN_MAX_VAPS * sizeof(qtn_shared_vap_stats_t));
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE, "Stats setup: Node : %p - %p\n"
+			"             Vap  : %p - %p\n",
+			sp->auc.node_stats,
+			s_per_node_stats_ptr,
+			sp->auc.vap_stats,
+			s_per_vap_stats_ptr);
+}
+
+void qdrv_auc_stats_unmap(void)
+{
+	if (s_per_node_stats_ptr)
+		iounmap(s_per_node_stats_ptr);
+	if (s_per_vap_stats_ptr)
+		iounmap(s_per_vap_stats_ptr);
+}
+
+qtn_shared_node_stats_t* qdrv_auc_get_node_stats(uint8_t node)
+{
+	return (s_per_node_stats_ptr) ? (s_per_node_stats_ptr + node) : NULL;
+}
+
+qtn_shared_vap_stats_t* qdrv_auc_get_vap_stats(uint8_t vapid)
+{
+	return (s_per_vap_stats_ptr) ? (s_per_vap_stats_ptr + vapid) : NULL;
+}
+
+void qdrv_auc_update_multicast_stats(void *ctx, uint8_t nid)
+{
+	uint8_t vapid;
+	struct ieee80211com *ic = (struct ieee80211com *)ctx;
+	struct ieee80211_node *node;
+	struct ieee80211vap *vap;
+	struct qdrv_vap * qv;
+	qtn_shared_node_stats_t *nstats;
+	qtn_shared_vap_stats_t *vstats;
+
+	if (!ctx)
+		return;
+
+	node = ic->ic_node_idx_ni[nid];
+	if (unlikely(!node))
+		return;
+
+	vap = node->ni_vap;
+	qv = container_of(vap, struct qdrv_vap, iv);
+	vapid = QDRV_WLANID_FROM_DEVID(qv->devid);
+	nstats = qdrv_auc_get_node_stats(nid);
+	vstats = qdrv_auc_get_vap_stats(vapid);
+
+	if (unlikely(!nstats || !vstats))
+		return;
+
+	nstats->qtn_tx_mcast++;
+	vstats->qtn_tx_mcast++;
+}
+
+void qdrv_auc_print_memory_map(void)
+{
+	int bank = 0;
+	u_int32_t auc_sram_start, auc_sram_end, auc_sram_size, auc_sram_bank_end;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+
+	auc_sram_size = auc_get_sram_size();
+	auc_sram_start = RUBY_SRAM_BEGIN + CONFIG_ARC_AUC_SRAM_BASE;
+	auc_sram_end = auc_sram_start + auc_sram_size;
+
+	printk("AuC SRAM start 0x%08x end 0x%08x size %d\n",
+		  auc_sram_start, auc_sram_end, auc_sram_size);
+
+	if (sp->fw_no_mu)
+		printk("AuC is configured for non-MU SRAM layout\n");
+	else
+		printk("AuC is configured for MU-enabled SRAM layout\n");
+
+	auc_sram_bank_end = auc_sram_start + RUBY_SRAM_BANK_SIZE;
+	while (auc_sram_start < auc_sram_end) {
+		printk("AuC SRAM bank %d start 0x%08x end 0x%08x\n",
+		       bank++, auc_sram_start, auc_sram_bank_end);
+		auc_sram_start = auc_sram_bank_end;
+		auc_sram_bank_end += RUBY_SRAM_BANK_SIZE;
+	}
+}
+
+int qdrv_auc_init(struct qdrv_cb *qcb)
+{
+	u32 auc_start_addr = 0;
+	struct qdrv_wlan *qw = qcb->macs[0].data;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter");
+
+	qtn_mproc_sync_shared_params_get()->auc.auc_config = global_auc_config;
+
+	qdrv_auc_print_memory_map();
+
+	auc_clear_mem();
+
+	if (qdrv_fw_load_auc(qcb->dev, qcb->auc_firmware, &auc_start_addr) < 0) {
+		DBGPRINTF_E("AuC load firmware failed\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_DSP, "Firmware start address is %x\n", auc_start_addr);
+
+	hal_enable_auc();
+
+	tqe_reg_multicast_tx_stats(qdrv_auc_update_multicast_stats, &qw->ic);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+int qdrv_auc_exit(struct qdrv_cb *qcb)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter");
+
+	qdrv_auc_stats_unmap();
+	hal_disable_auc();
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
diff --git a/drivers/qtn/qdrv/qdrv_auc.h b/drivers/qtn/qdrv/qdrv_auc.h
new file mode 100644
index 0000000..139404c
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_auc.h
@@ -0,0 +1,30 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_AUC_H
+#define _QDRV_AUC_H
+
+void qdrv_auc_stats_setup(void);
+qtn_shared_node_stats_t* qdrv_auc_get_node_stats(uint8_t node);
+qtn_shared_vap_stats_t* qdrv_auc_get_vap_stats(uint8_t vapid);
+int qdrv_auc_init(struct qdrv_cb *qcb);
+int qdrv_auc_exit(struct qdrv_cb *qcb);
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_bridge.c b/drivers/qtn/qdrv/qdrv_bridge.c
new file mode 100644
index 0000000..659b6f2
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_bridge.c
@@ -0,0 +1,1244 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2010 Quantenna Communications Inc                   **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Author      : Quantenna Communications Inc                               **
+**  File        : qdrv_bridge.c                                              **
+**  Description : 3-address mode bridging                                    **
+**                                                                           **
+**  This module maintains two tables.                                        **
+**                                                                           **
+**  1. A Unicast table containing the MAC address and IP address of each     **
+**     downstream client. This is used to determine the destination Ethernet **
+**     MAC address for dowstream frames.                                     **
+**                                                                           **
+**  2. A Multicast table for keeping track of multicast group registrations  **
+**     made by downstream clients.                                           **
+**     Each multicast table entry contains an multicast IP address and a     **
+**     list of one or more downstream clients that have joined the           **
+**     multicast group.                                                      **
+**     Downstream clients are not visible to the AP in 3-address mode, so    **
+**     the AP 'sees' only that the station is joined to the multicast group. **
+**     This table is used to ensure that an upstream  multicast LEAVE        **
+**     message is only sent when the last downstream client leaves a         **
+**     group.                                                                **
+**                                                                           **
+*******************************************************************************/
+/**
+  Copyright (c) 2010 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/spinlock.h>
+#include <linux/jhash.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <asm/unaligned.h>
+#endif
+#include "linux/udp.h"
+#include "linux/igmp.h"
+#include "qdrv_debug.h"
+#include "qdrv_bridge.h"
+
+#include <qtn/iputil.h>
+#include <qtn/topaz_fwt_sw.h>
+
+#if defined(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <linux/icmpv6.h>
+#endif
+
+struct udp_dhcp_packet {
+	struct iphdr iphdr_p;
+	struct udphdr udphdr_p;
+	struct dhcp_message dhcp_msg;
+}__attribute__ ((packed));
+
+#define BOOTREQUEST	1
+#define DHCPSERVER_PORT	67
+#define DHCPREQUEST	3
+#define ARPHRD_ETHER	1
+#define DHCP_BROADCAST_FLAG	0x8000
+
+#define QDRV_IGMP_OP_NONE	0x00
+#define QDRV_IGMP_OP_JOIN	0x01
+#define QDRV_IGMP_OP_LEAVE	0x02
+#define QDRV_IP_MCAST_PREF	0xE
+
+#define QDRV_BR_PRINT_BUF_SIZE 2048
+#define QDRV_BR_PRINT_LINE_MAX 48
+
+static struct kmem_cache *br_uc_cache __read_mostly = NULL;
+static struct kmem_cache *br_mc_cache __read_mostly = NULL;
+static struct kmem_cache *br_mc_client_cache __read_mostly = NULL;
+#if defined(CONFIG_IPV6)
+static struct kmem_cache *br_ipv6uc_cache __read_mostly = NULL;
+#endif
+
+/*
+ * Create a hash of a MAC address
+ */
+static int
+qdrv_br_mac_hash(unsigned char *mac_addr)
+{
+	return jhash(mac_addr, IEEE80211_ADDR_LEN, 0) & (QDRV_BR_MAC_HASH_SIZE - 1);
+}
+
+/*
+ * Create a hash of an IP address
+ */
+static int
+qdrv_br_ip_hash(__be32 ip_addr)
+{
+	return jhash_1word(ip_addr, 0) & (QDRV_BR_IP_HASH_SIZE - 1);
+}
+
+#if defined(CONFIG_IPV6)
+/*
+ * Create a hash of an IPv6 address
+ */
+static int
+qdrv_br_ipv6_hash(const struct in6_addr *ipv6_addr)
+{
+	uint32_t aligned32[2];
+
+	aligned32[0] = get_unaligned((uint32_t *)(&ipv6_addr->s6_addr32[2]));
+	aligned32[1] = get_unaligned((uint32_t *)(&ipv6_addr->s6_addr32[3]));
+
+	return jhash_2words(aligned32[0], aligned32[1], 0) & (QDRV_BR_IP_HASH_SIZE - 1);
+}
+#endif
+
+/*
+ * Lock the unicast table for write access
+ */
+static void
+qdrv_br_uc_lock(struct qdrv_br *br)
+{
+	spin_lock_irqsave(&br->uc_lock, br->uc_lock_flags);
+}
+
+/*
+ * Unlock the unicast table
+ */
+static void
+qdrv_br_uc_unlock(struct qdrv_br *br)
+{
+	spin_unlock_irqrestore(&br->uc_lock, br->uc_lock_flags);
+}
+
+/*
+ * Lock the multicast table for write access
+ */
+static void
+qdrv_br_lock_mc(struct qdrv_br *br)
+{
+	spin_lock_irqsave(&br->mc_lock, br->mc_lock_flags);
+}
+
+/*
+ * Unlock the multicast table
+ */
+static void
+qdrv_br_unlock_mc(struct qdrv_br *br)
+{
+	spin_unlock_irqrestore(&br->mc_lock, br->mc_lock_flags);
+}
+
+static int
+qdrv_br_ip_is_unicast(u32 ip_addr)
+{
+	if ((ip_addr != INADDR_ANY) &&
+			(ip_addr != INADDR_BROADCAST) &&
+			(!IN_MULTICAST(ntohl(ip_addr)))) {
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Free a unicast entry
+ */
+static void
+qdrv_br_uc_free(struct rcu_head *head)
+{
+	struct qdrv_br_uc *br_uc;
+
+	br_uc = container_of(head, struct qdrv_br_uc, rcu);
+
+	kmem_cache_free(br_uc_cache, br_uc);
+}
+
+#if defined(CONFIG_IPV6)
+static void
+qdrv_br_ipv6uc_free(struct rcu_head *head)
+{
+	struct qdrv_br_ipv6_uc *br_ipv6_uc;
+
+	br_ipv6_uc = container_of(head, struct qdrv_br_ipv6_uc, rcu);
+
+	kmem_cache_free(br_ipv6uc_cache, br_ipv6_uc);
+}
+#endif
+
+/*
+ * Free a multicast entry
+ */
+static void
+qdrv_br_mc_free(struct rcu_head *head)
+{
+	struct qdrv_br_mc *br_mc;
+
+	br_mc = container_of(head, struct qdrv_br_mc, rcu);
+
+	kmem_cache_free(br_mc_cache, br_mc);
+}
+
+/*
+ * Free a multicast client entry
+ */
+static void
+qdrv_br_mc_client_free(struct rcu_head *head)
+{
+	struct qdrv_br_mc_client *br_mc_client;
+
+	br_mc_client = container_of(head, struct qdrv_br_mc_client, rcu);
+
+	kmem_cache_free(br_mc_client_cache, br_mc_client);
+}
+
+/*
+ * Remove a multicast client entry from a multicast entry
+ * Assumes the multicast table is locked for write.
+ */
+static void
+qdrv_br_mc_client_delete(struct qdrv_br *br, struct qdrv_br_mc *br_mc,
+				struct qdrv_br_mc_client *br_mc_client)
+{
+	hlist_del_rcu(&br_mc_client->mc_client_hlist);
+	call_rcu(&br_mc_client->rcu, qdrv_br_mc_client_free);
+	atomic_dec(&br_mc->mc_client_tot);
+	atomic_dec(&br->mc_tot);
+}
+
+/*
+ * Remove a multicast address entry
+ * Assumes the multicast table is locked for write.
+ */
+static void
+qdrv_br_mc_delete(struct qdrv_br *br, struct qdrv_br_mc *br_mc)
+{
+	hlist_del_rcu(&br_mc->mc_hlist);
+	call_rcu(&br_mc->rcu, qdrv_br_mc_free);
+	atomic_dec(&br->mc_tot);
+}
+
+/*
+ * Remove a unicast entry
+ * Assumes the unicast table is locked for write.
+ */
+static void
+qdrv_br_uc_delete(struct qdrv_br *br, struct qdrv_br_uc *br_uc)
+{
+	hlist_del_rcu(&br_uc->mac_hlist);
+	hlist_del_rcu(&br_uc->ip_hlist);
+	call_rcu(&br_uc->rcu, qdrv_br_uc_free);
+	atomic_dec(&br->uc_tot);
+
+	fwt_sw_remove_uc_ipmac((uint8_t *)&br_uc->ip_addr, __constant_htons(ETH_P_IP));
+}
+
+#if defined(CONFIG_IPV6)
+/*
+ * Remove an IPv6 unicast entry
+ */
+static void
+qdrv_br_ipv6uc_delete(struct qdrv_br *br, struct qdrv_br_ipv6_uc *br_ipv6_uc)
+{
+	hlist_del_rcu(&br_ipv6_uc->ipv6_hlist);
+	call_rcu(&br_ipv6_uc->rcu, qdrv_br_ipv6uc_free);
+	atomic_dec(&br->uc_ipv6_tot);
+
+	fwt_sw_remove_uc_ipmac((uint8_t *)&br_ipv6_uc->ipv6_addr, __constant_htons(ETH_P_IPV6));
+}
+#endif
+
+/*
+ * Find a multicast client entry
+ * Assumes the multicast table is locked for read or write.
+ */
+static struct qdrv_br_mc_client *
+qdrv_br_mc_client_find(struct qdrv_br_mc *br_mc, unsigned char *mac_addr)
+{
+	struct hlist_head *head = &br_mc->mc_client_hash[qdrv_br_mac_hash(mac_addr)];
+	struct qdrv_br_mc_client *br_mc_client;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	struct hlist_node *h;
+	hlist_for_each_entry_rcu(br_mc_client, h, head, mc_client_hlist) {
+#else
+	hlist_for_each_entry_rcu(br_mc_client, head, mc_client_hlist) {
+#endif
+		if (IEEE80211_ADDR_EQ(br_mc_client->mac_addr, mac_addr)) {
+			return br_mc_client;
+		}
+	}
+
+	return NULL;
+}
+
+/*
+ * Find a multicast entry
+ * Assumes the multicast table is locked for read or write.
+ */
+static struct qdrv_br_mc *
+qdrv_br_mc_find(struct qdrv_br *br, __be32 mc_ip_addr)
+{
+	struct hlist_head *head = &br->mc_ip_hash[qdrv_br_ip_hash(mc_ip_addr)];
+	struct qdrv_br_mc *br_mc;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	struct hlist_node *h;
+	hlist_for_each_entry_rcu(br_mc, h, head, mc_hlist) {
+#else
+	hlist_for_each_entry_rcu(br_mc, head, mc_hlist) {
+#endif
+		if (br_mc->mc_ip_addr == mc_ip_addr) {
+			return br_mc;
+		}
+	}
+
+	return NULL;
+}
+
+/*
+ * Find a unicast entry by IP address
+ * Assumes the unicast table is locked for read or write.
+ */
+static struct qdrv_br_uc *
+qdrv_br_uc_find_by_ip(struct qdrv_br *br, u32 ip_addr)
+{
+	struct hlist_head *head = &br->uc_ip_hash[qdrv_br_ip_hash(ip_addr)];
+	struct qdrv_br_uc *br_uc;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	struct hlist_node *h;
+	hlist_for_each_entry_rcu(br_uc, h, head, ip_hlist) {
+#else
+	hlist_for_each_entry_rcu(br_uc, head, ip_hlist) {
+#endif
+		if (br_uc->ip_addr == ip_addr) {
+			return br_uc;
+		}
+	}
+
+	return NULL;
+}
+
+/* Currently unused static function qdrv_br_uc_find_by_mac - remove */
+#if 0
+/*
+ * Find a unicast entry by MAC address
+ * Assumes the unicast table is locked for read or write.
+ */
+static struct qdrv_br_uc *
+qdrv_br_uc_find_by_mac(struct qdrv_br *br, unsigned char *mac_addr)
+{
+	struct hlist_head *head = &br->uc_mac_hash[qdrv_br_mac_hash(mac_addr)];
+	struct qdrv_br_uc *br_uc;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	struct hlist_node *h;
+	hlist_for_each_entry_rcu(br_uc, h, head, mac_hlist) {
+#else
+	hlist_for_each_entry_rcu(br_uc, head, mac_hlist) {
+#endif
+		if (IEEE80211_ADDR_EQ(br_uc->mac_addr, mac_addr)) {
+			return br_uc;
+		}
+	}
+
+	return NULL;
+}
+#endif
+
+#if defined(CONFIG_IPV6)
+/*
+ * Find a unicast entry by IPv6 address
+ * Assumes the unicast table is locked for read or write
+ */
+static struct qdrv_br_ipv6_uc *
+qdrv_br_ipv6uc_find_by_ip(struct qdrv_br *br, const struct in6_addr *ipv6_addr)
+{
+	struct hlist_head *head = &br->uc_ipv6_hash[qdrv_br_ipv6_hash(ipv6_addr)];
+	struct qdrv_br_ipv6_uc *br_ipv6_uc;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	struct hlist_node *h;
+	hlist_for_each_entry_rcu(br_ipv6_uc, h, head, ipv6_hlist) {
+#else
+	hlist_for_each_entry_rcu(br_ipv6_uc, head, ipv6_hlist) {
+#endif
+		if (memcmp(&br_ipv6_uc->ipv6_addr, ipv6_addr, sizeof(struct in6_addr)) == 0) {
+			return br_ipv6_uc;
+		}
+	}
+
+	return NULL;
+}
+#endif
+
+/*
+ * Add a multicast client entry to a multicast entry
+ * Assumes the multicast table is locked for write.
+ */
+static void
+qdrv_br_mc_client_add(struct qdrv_br *br, struct qdrv_br_mc *br_mc,
+			unsigned char *mac_addr)
+{
+	struct qdrv_br_mc_client *br_mc_client;
+
+	if (atomic_read(&br->mc_tot) >= QDRV_BR_MCAST_MAX) {
+		DBGPRINTF_E("multicast table is full, can't add %pM\n",
+			mac_addr);
+		return;
+	}
+
+	br_mc_client = kmem_cache_alloc(br_mc_client_cache, GFP_ATOMIC);
+	if (br_mc_client == NULL) {
+		DBGPRINTF_E("failed to allocate multicast client entry %pM\n",
+			mac_addr);
+		return;
+	}
+
+	memset(br_mc_client, 0, sizeof(*br_mc_client));
+	atomic_inc(&br->mc_tot);
+	atomic_inc(&br_mc->mc_client_tot);
+	IEEE80211_ADDR_COPY(br_mc_client->mac_addr, mac_addr);
+	hlist_add_head_rcu(&br_mc_client->mc_client_hlist,
+			&br_mc->mc_client_hash[qdrv_br_mac_hash(mac_addr)]);
+}
+
+/*
+ * Add multicast entry
+ * Assumes the multicast table is locked for write.
+ */
+static struct qdrv_br_mc *
+qdrv_br_mc_add(struct qdrv_br *br, __be32 mc_ip_addr)
+{
+	struct qdrv_br_mc *br_mc;
+
+	if (atomic_read(&br->mc_tot) >= QDRV_BR_MCAST_MAX) {
+		DBGPRINTF_E("multicast table is full, cant add "
+			NIPQUAD_FMT "\n",
+			NIPQUAD(mc_ip_addr));
+		return NULL;
+	}
+
+	br_mc = kmem_cache_alloc(br_mc_cache, GFP_ATOMIC);
+	if (br_mc == NULL) {
+		DBGPRINTF_E("failed to allocate multicast entry "
+			NIPQUAD_FMT "\n",
+			NIPQUAD(mc_ip_addr));
+		return NULL;
+	}
+
+	memset(br_mc, 0, sizeof(*br_mc));
+
+	atomic_inc(&br->mc_tot);
+	br_mc->mc_ip_addr = mc_ip_addr;
+	hlist_add_head_rcu(&br_mc->mc_hlist, &br->mc_ip_hash[qdrv_br_ip_hash(mc_ip_addr)]);
+
+	return br_mc;
+}
+
+/*
+ * Add a unicast entry
+ * Assumes the unicast table is locked for write.
+ */
+static void
+qdrv_br_uc_add(struct qdrv_br *br, unsigned char *mac_addr, __be32 ip_addr)
+{
+	struct qdrv_br_uc *br_uc;
+
+	/* IP address must be unique.  Remove any stale entry. */
+	br_uc = qdrv_br_uc_find_by_ip(br, ip_addr);
+	if (br_uc != NULL) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_BRIDGE,
+			"delete old entry mac=%pM ip=" NIPQUAD_FMT "\n",
+			br_uc->mac_addr, NIPQUAD(ip_addr));
+		qdrv_br_uc_delete(br, br_uc);
+	} else {
+		if (atomic_read(&br->uc_tot) >= QDRV_BR_ENT_MAX) {
+			DBGPRINTF_LIMIT_E("unicast table is full, can't add %pM\n",
+				mac_addr);
+			return;
+		}
+	}
+
+	br_uc = kmem_cache_alloc(br_uc_cache, GFP_ATOMIC);
+	if (br_uc == NULL) {
+		DBGPRINTF_E("failed to allocate unicast entry %pM\n",
+			mac_addr);
+		return;
+	}
+
+	memset(br_uc, 0, sizeof(*br_uc));
+
+	atomic_inc(&br->uc_tot);
+	IEEE80211_ADDR_COPY(br_uc->mac_addr, mac_addr);
+	br_uc->ip_addr = ip_addr;
+	hlist_add_head_rcu(&br_uc->mac_hlist, &br->uc_mac_hash[qdrv_br_mac_hash(mac_addr)]);
+
+	hlist_add_head_rcu(&br_uc->ip_hlist, &br->uc_ip_hash[qdrv_br_ip_hash(ip_addr)]);
+}
+
+#if defined(CONFIG_IPV6)
+static void
+qdrv_br_ipv6uc_add(struct qdrv_br *br, const unsigned char *mac_addr, const struct in6_addr *ipv6_addr)
+{
+	struct qdrv_br_ipv6_uc *br_ipv6_uc;
+
+	/* IP address must be unique.  Remove any stale entry. */
+	br_ipv6_uc = qdrv_br_ipv6uc_find_by_ip(br, ipv6_addr);
+	if (br_ipv6_uc != NULL) {
+		qdrv_br_ipv6uc_delete(br, br_ipv6_uc);
+	} else {
+		if (atomic_read(&br->uc_ipv6_tot) >= QDRV_BR_ENT_MAX) {
+			DBGPRINTF_LIMIT_E("unicast table is full, can't add %pM\n",
+				mac_addr);
+			return;
+		}
+	}
+
+	br_ipv6_uc = kmem_cache_alloc(br_ipv6uc_cache, GFP_ATOMIC);
+	if (br_ipv6_uc == NULL) {
+		DBGPRINTF_E("failed to allocate unicast entry %pM\n",
+			mac_addr);
+		return;
+	}
+
+	memset(br_ipv6_uc, 0, sizeof(*br_ipv6_uc));
+
+	atomic_inc(&br->uc_ipv6_tot);
+	IEEE80211_ADDR_COPY(br_ipv6_uc->mac_addr, mac_addr);
+	memcpy(&br_ipv6_uc->ipv6_addr, ipv6_addr, sizeof(br_ipv6_uc->ipv6_addr));
+
+	hlist_add_head_rcu(&br_ipv6_uc->ipv6_hlist, &br->uc_ipv6_hash[qdrv_br_ipv6_hash(ipv6_addr)]);
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+		"mapping %pI6 to %pM\n", &br_ipv6_uc->ipv6_addr, br_ipv6_uc->mac_addr);
+}
+#endif
+
+/*
+ * Update a multicast entry from an upstream IGMP packet
+ *
+ * For an IGMP JOIN, create an entry for the multicast address if it is not
+ * already present, then add the client to the multicast address entry.
+ *
+ * For an IGMP LEAVE, delete the client from the multicast entry.  If no clients
+ * are left under the multicast entry, delete the multicast entry.  If other clients
+ * remain under the multicast entry, notify the caller so that the LEAVE message
+ * is dropped.
+ *
+ * Returns 1 if leaving and other clients are registered with the multicast address,
+ * otherwise 0.
+ */
+static int
+qdrv_br_mc_update(struct qdrv_br *br, int op, __be32 mc_ip_addr, unsigned char *client_addr)
+{
+	struct qdrv_br_mc *br_mc;
+	struct qdrv_br_mc_client *br_mc_client;
+	int rc = 0;
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+		"ip=" NIPQUAD_FMT " client=%pM op=%s\n",
+		NIPQUAD(mc_ip_addr), client_addr,
+		(op == QDRV_IGMP_OP_JOIN) ? "join" : "leave");
+
+	qdrv_br_lock_mc(br);
+
+	br_mc = qdrv_br_mc_find(br, mc_ip_addr);
+	if ((br_mc == NULL) &&
+			(op == QDRV_IGMP_OP_JOIN)) {
+		br_mc = qdrv_br_mc_add(br, mc_ip_addr);
+	}
+	if (br_mc == NULL) {
+		/* Either malloc failed or leaving and there is no mc entry to leave */
+		if (op == QDRV_IGMP_OP_LEAVE) {
+			DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+				"client=%pM leaving mc " NIPQUAD_FMT " but mc not in table\n",
+				client_addr, NIPQUAD(mc_ip_addr));
+		}
+		qdrv_br_unlock_mc(br);
+		return 0;
+	}
+
+	br_mc_client = qdrv_br_mc_client_find(br_mc, client_addr);
+	switch (op) {
+	case QDRV_IGMP_OP_JOIN:
+		if (br_mc_client == NULL) {
+			qdrv_br_mc_client_add(br, br_mc, client_addr);
+		}
+		break;
+	case QDRV_IGMP_OP_LEAVE:
+		if (br_mc_client == NULL) {
+			/* This can happen, for example, if the STA rebooted after the JOIN */
+			DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+				"client=%pM leaving mc " NIPQUAD_FMT " but not in table\n",
+				client_addr, NIPQUAD(mc_ip_addr));
+			if (br_mc != NULL) {
+				rc = 1;
+			}
+		} else {
+			qdrv_br_mc_client_delete(br, br_mc, br_mc_client);
+			if (atomic_read(&br_mc->mc_client_tot) < 1) {
+				qdrv_br_mc_delete(br, br_mc);
+			} else {
+				rc = 1;
+			}
+		}
+	}
+
+	qdrv_br_unlock_mc(br);
+
+	return rc;
+}
+
+/*
+ * Create or update an IPv4 unicast entry
+ */
+static void
+qdrv_br_uc_update(struct qdrv_br *br, __be32 ip_addr, unsigned char *mac_addr)
+{
+	struct qdrv_br_uc *br_uc;
+
+	qdrv_br_uc_lock(br);
+
+	br_uc = qdrv_br_uc_find_by_ip(br, ip_addr);
+	if (br_uc == NULL) {
+		qdrv_br_uc_add(br, mac_addr, ip_addr);
+	} else if (!IEEE80211_ADDR_EQ(br_uc->mac_addr, mac_addr)) {
+		/* Update the entry if its MAC address has changed */
+		hlist_del_rcu(&br_uc->mac_hlist);
+		IEEE80211_ADDR_COPY(br_uc->mac_addr, mac_addr);
+		hlist_add_head_rcu(&br_uc->mac_hlist,
+				   &br->uc_mac_hash[qdrv_br_mac_hash(mac_addr)]);
+	}
+
+	fwt_sw_update_uc_ipmac(mac_addr, (uint8_t *)&ip_addr, __constant_htons(ETH_P_IP));
+
+	qdrv_br_uc_unlock(br);
+}
+
+#if defined(CONFIG_IPV6)
+/*
+ * Create or update an IPv6 unicast entry
+ */
+static void
+qdrv_br_ipv6uc_update(struct qdrv_br *br, const struct in6_addr *ipv6_addr, const unsigned char *mac_addr)
+{
+	struct qdrv_br_ipv6_uc *br_ipv6_uc;
+
+	qdrv_br_uc_lock(br);
+
+	br_ipv6_uc = qdrv_br_ipv6uc_find_by_ip(br, ipv6_addr);
+	if (!br_ipv6_uc) {
+		qdrv_br_ipv6uc_add(br, mac_addr, ipv6_addr);
+	} else {
+		/* Update the entry if its MAC address has changed */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		if (ether_addr_equal(br_ipv6_uc->mac_addr, mac_addr) == 0) {
+#else
+		if (compare_ether_addr(br_ipv6_uc->mac_addr, mac_addr) != 0) {
+#endif
+			hlist_del_rcu(&br_ipv6_uc->ipv6_hlist);
+			memcpy(br_ipv6_uc->mac_addr, mac_addr, ETH_ALEN);
+			hlist_add_head_rcu(&br_ipv6_uc->ipv6_hlist,
+				&br->uc_ipv6_hash[qdrv_br_ipv6_hash(ipv6_addr)]);
+		}
+	}
+
+	fwt_sw_update_uc_ipmac(mac_addr, (uint8_t *)ipv6_addr, __constant_htons(ETH_P_IPV6));
+
+	qdrv_br_uc_unlock(br);
+}
+#endif
+
+/*
+ * Update a unicast entry from an upstream ARP packet
+ */
+void
+qdrv_br_uc_update_from_arp(struct qdrv_br *br, struct ether_arp *arp)
+{
+	__be32 ip_addr;
+
+	ip_addr = get_unaligned((u32 *)&arp->arp_spa);
+
+	if (!qdrv_br_ip_is_unicast(ip_addr)) {
+		return;
+	}
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+		"mac=%pM ip=" NIPQUAD_FMT "\n",
+		arp->arp_sha, NIPQUAD(ip_addr));
+
+	qdrv_br_uc_update(br, ip_addr, arp->arp_sha);
+}
+
+/*
+ * Update a multicast entry from an upstream IGMP packet
+ *
+ * Returns 0 if OK, or 1 if the packet should be dropped.
+ *
+ * A client station in 3-address mode forwards multicast subscriptions to the
+ * AP with source address set to the station's Wifi MAC address, so the AP sees
+ * only one subscription even if when multiple clients subscribe.  This
+ * function's sole purpose is to keep track of the downstream clients that
+ * subscribe to a given multicast address, and to only forward a delete
+ * request when the last client leaves.
+ *
+ * Note: A combination of join and leave requests in a single message (probably
+ * never done in practice?) may not be handled correctly, since the decision to
+ * forward or drop the message can only be made once.
+ */
+int
+qdrv_br_mc_update_from_igmp(struct qdrv_br *br, struct sk_buff *skb,
+				struct ether_header *eh, struct iphdr *iphdr_p)
+{
+	const struct igmphdr *igmp_p = (struct igmphdr *)
+		((unsigned int *) iphdr_p + iphdr_p->ihl);
+	const struct igmpv3_report *igmpv3_p = (struct igmpv3_report *)
+		((unsigned int *) iphdr_p + iphdr_p->ihl);
+
+	__be32 mc_ip_addr = 0;
+	int num = -1;
+	int n = 0;
+	int op;
+	int rc = 0;
+
+	if ((skb->data + skb->len + 1) < (unsigned char *)(igmp_p + 1)) {
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+			"IGMP packet is too small (%p/%p)\n",
+			skb->data + skb->len, igmp_p + 1);
+		return 0;
+	}
+
+	do {
+		op = QDRV_IGMP_OP_NONE;
+
+		switch(igmp_p->type) {
+		case IGMP_HOST_MEMBERSHIP_REPORT:
+			op = QDRV_IGMP_OP_JOIN;
+			mc_ip_addr = get_unaligned((u32 *)&igmp_p->group);
+			break;
+		case IGMPV2_HOST_MEMBERSHIP_REPORT:
+			op = QDRV_IGMP_OP_JOIN;
+			mc_ip_addr = get_unaligned((u32 *)&igmp_p->group);
+			break;
+		case IGMP_HOST_LEAVE_MESSAGE:
+			op = QDRV_IGMP_OP_LEAVE;
+			mc_ip_addr = get_unaligned((u32 *)&igmp_p->group);
+			break;
+		case IGMPV3_HOST_MEMBERSHIP_REPORT:
+			mc_ip_addr = get_unaligned((u32 *)&igmpv3_p->grec[n].grec_mca);
+			if (num < 0) {
+				num = ntohs(igmpv3_p->ngrec);
+			}
+			if ((igmpv3_p->grec[n].grec_type == IGMPV3_CHANGE_TO_EXCLUDE) ||
+					(igmpv3_p->grec[n].grec_type == IGMPV3_MODE_IS_EXCLUDE)) {
+				op = QDRV_IGMP_OP_JOIN;
+			} else if ((igmpv3_p->grec[n].grec_type == IGMPV3_CHANGE_TO_INCLUDE) ||
+					(igmpv3_p->grec[n].grec_type == IGMPV3_MODE_IS_INCLUDE)) {
+				op = QDRV_IGMP_OP_LEAVE;
+			}
+			n++;
+			break;
+		default:
+			break;
+		}
+
+		if (op > QDRV_IGMP_OP_NONE) {
+			/* rc will be 1 if leaving and the multicast entry still has clients */
+			rc = qdrv_br_mc_update(br, op, mc_ip_addr, eh->ether_shost);
+		}
+	} while (--num > 0);
+
+	/* Last operation in packet determines whether it will be dropped */
+	return rc;
+}
+
+/*
+ * Update a unicast entry from an upstream DHCP packet
+ */
+void
+qdrv_br_uc_update_from_dhcp(struct qdrv_br *br, struct sk_buff *skb, struct iphdr *iphdr_p)
+{
+	struct udp_dhcp_packet *dhcpmsg = (struct udp_dhcp_packet *)iphdr_p;
+	struct udphdr *uh = &dhcpmsg->udphdr_p;
+	__be32 ip_addr;
+	__wsum csum;
+
+	/* Is this a DHCP packet? */
+	if ((skb->len < ((u8 *)iphdr_p - skb->data) + sizeof(*dhcpmsg)) ||
+			(uh->dest != __constant_htons(DHCPSERVER_PORT)) ||
+			(dhcpmsg->dhcp_msg.op != BOOTREQUEST) ||
+			(dhcpmsg->dhcp_msg.htype != ARPHRD_ETHER)) {
+		return;
+	}
+
+	/*
+	 * 3rd party APs may not forward unicast DHCP responses to us, so set the
+	 * broadcast flag and recompute the UDP checksum.
+	 */
+	if (!(dhcpmsg->dhcp_msg.flags & __constant_htons(DHCP_BROADCAST_FLAG))) {
+
+		dhcpmsg->dhcp_msg.flags |= __constant_htons(DHCP_BROADCAST_FLAG);
+
+		/* Recalculate the UDP checksum */
+		if (uh->check != 0) {
+			uh->check = 0;
+			csum = csum_partial(uh, ntohs(uh->len), 0);
+
+			/* Add psuedo IP header checksum */
+			uh->check = csum_tcpudp_magic(iphdr_p->saddr, iphdr_p->daddr,
+					ntohs(uh->len), iphdr_p->protocol, csum);
+
+			/* 0 is converted to -1 */
+			if (uh->check == 0) {
+				uh->check = CSUM_MANGLED_0;
+			}
+		}
+	}
+
+	/*
+	 * Assume that any record containing a valid client IP address in the bootp structure
+	 * is valid.  Ideally we should parse the DHCP structure that follows
+	 * the BOOTP structure for message type 3, but this should suffice.
+	 */
+	ip_addr = get_unaligned((u32 *)&dhcpmsg->dhcp_msg.ciaddr);
+	if (qdrv_br_ip_is_unicast(ip_addr)) {
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+			"source=%d dest=%d op=%02x ip=" NIPQUAD_FMT " mac=%pM\n",
+			dhcpmsg->udphdr_p.source, dhcpmsg->udphdr_p.dest,
+			dhcpmsg->dhcp_msg.op, NIPQUAD(ip_addr),
+			dhcpmsg->dhcp_msg.chaddr);
+		qdrv_br_uc_update(br, ip_addr, dhcpmsg->dhcp_msg.chaddr);
+	}
+}
+
+#if defined(CONFIG_IPV6)
+void qdrv_br_ipv6uc_update_from_icmpv6(struct qdrv_br *br, const struct ethhdr *eth,
+			const struct ipv6hdr *ipv6h, const struct icmp6hdr *icmpv6h)
+{
+	static const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
+
+	if (icmpv6h->icmp6_type != NDISC_NEIGHBOUR_SOLICITATION
+			&& icmpv6h->icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
+		return;
+
+	/*
+	 * RFC4861 section 4.3: source address field of an ICMPv6 neighbor solicitation
+	 * can be "unspecified" if Duplicate Address Detection is in progress
+	 */
+	if (memcmp(&in6addr_any, &ipv6h->saddr, sizeof(struct in6_addr)) == 0)
+		return;
+
+	qdrv_br_ipv6uc_update(br, &ipv6h->saddr, eth->h_source);
+}
+#endif
+
+static int
+qdrv_br_ipv4_set_dest_mac(struct qdrv_br *br, struct ether_header *eh, __be32 ip_addr)
+{
+	struct qdrv_br_uc *br_uc;
+
+	if (!qdrv_br_ip_is_unicast(ip_addr))
+		return 1;
+
+	rcu_read_lock();
+
+	br_uc = qdrv_br_uc_find_by_ip(br, ip_addr);
+	if (!br_uc) {
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+			"IP address %pI4 not found in bridge table\n", &ip_addr);
+		rcu_read_unlock();
+		return 1;
+	}
+
+	IEEE80211_ADDR_COPY(eh->ether_dhost, br_uc->mac_addr);
+
+	rcu_read_unlock();
+
+	return 0;
+}
+
+#if defined(CONFIG_IPV6)
+static int
+qdrv_br_ipv6_set_dest_mac(struct qdrv_br *br, struct ether_header *eh, const struct in6_addr *ipv6_addr)
+{
+	struct qdrv_br_ipv6_uc *br_ipv6_uc;
+
+	rcu_read_lock();
+
+	br_ipv6_uc = qdrv_br_ipv6uc_find_by_ip(br, ipv6_addr);
+	if (!br_ipv6_uc) {
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+			"IP address %pI6 not found in bridge table\n", ipv6_addr);
+		rcu_read_unlock();
+		return 1;
+	}
+
+	IEEE80211_ADDR_COPY(eh->ether_dhost, br_ipv6_uc->mac_addr);
+
+	rcu_read_unlock();
+
+	return 0;
+}
+#endif
+
+/*
+ * Replace the destination MAC address in a downstream packet
+ *
+ * For multicast IP (224.0.0.0 to 239.0.0.0), the MAC address may have been
+ * changed to the station's unicast MAC address by the AP.  Convert it back to
+ * an IPv4 Ethernet MAC address as per RFC 1112, section 6.4: place the
+ * low-order 23-bits of the IP address into the low-order 23 bits of the
+ * Ethernet multicast address 01-00-5E-00-00-00.
+
+ * For unicast IP, use the qdrv bridge table.
+ *
+ * Returns 0 if OK, or 1 if the MAC was not updated.
+ */
+int
+qdrv_br_set_dest_mac(struct qdrv_br *br, struct ether_header *eh, const struct sk_buff *skb)
+{
+	struct iphdr *iphdr_p;
+	struct ether_arp *arp_p = NULL;
+	__be32 ip_addr = INADDR_ANY;
+	unsigned char *ip_addr_p = (unsigned char *)&ip_addr;
+	char mc_pref[] = {0x01, 0x00, 0x5e};
+#ifdef CONFIG_IPV6
+	struct ipv6hdr *ip6hdr_p;
+	struct in6_addr *ip6addr_p = NULL;
+	char mc6_pref[] = {0x33, 0x33};
+#endif
+	void *l3hdr;
+	uint16_t ether_type;
+
+	if (eh->ether_type == __constant_htons(ETH_P_8021Q)) {
+		ether_type = *(&eh->ether_type + 2);
+		l3hdr = &eh->ether_type + 3;
+	} else {
+		ether_type = eh->ether_type;
+		l3hdr = eh + 1;
+	}
+
+	if (ether_type == __constant_htons(ETH_P_IP)) {
+		iphdr_p = (struct iphdr *)l3hdr;
+		if ((skb->data + skb->len) < (unsigned char *)(iphdr_p + 1)) {
+			DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+				"IP packet is too small (%p/%p)\n",
+				skb->data + skb->len, iphdr_p + 1);
+			return 1;
+		}
+		ip_addr = get_unaligned((u32 *)&iphdr_p->daddr);
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_BRIDGE,
+			"ip proto=%u smac=%pM sip=" NIPQUAD_FMT " dip=" NIPQUAD_FMT "\n",
+			iphdr_p->protocol, eh->ether_shost,
+			NIPQUAD(iphdr_p->saddr), NIPQUAD(ip_addr));
+
+		if ((ip_addr_p[0] >> 4) == QDRV_IP_MCAST_PREF) {
+			eh->ether_dhost[0] = mc_pref[0];
+			eh->ether_dhost[1] = mc_pref[1];
+			eh->ether_dhost[2] = mc_pref[2];
+			eh->ether_dhost[3] = ip_addr_p[1] & 0x7F;
+			eh->ether_dhost[4] = ip_addr_p[2];
+			eh->ether_dhost[5] = ip_addr_p[3];
+			return 0;
+		}
+
+		return qdrv_br_ipv4_set_dest_mac(br, eh, ip_addr);
+#if defined(CONFIG_IPV6)
+	} else if (ether_type == __constant_htons(ETH_P_IPV6)) {
+		ip6hdr_p = (struct ipv6hdr *)l3hdr;
+		if ((skb->data + skb->len) < (unsigned char *)(ip6hdr_p + 1)) {
+			DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+				"IP packet is too small (%p/%p)\n",
+				skb->data + skb->len, ip6hdr_p + 1);
+			return 1;
+		}
+
+		/*
+		 * IPv6 address map to MAC address
+		 * First two octets are the value 0x3333 and
+		 * last four octets are the last four octets of ip.
+		 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		 * |0 0 1 1 0 0 1 1|0 0 1 1 0 0 1 1|
+		 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		 * |    IP6[12]    |    IP6[13]    |
+		 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		 * |    IP6[14]    |    IP6[15]    |
+		 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		*/
+		ip6addr_p = &(ip6hdr_p->daddr);
+		if (ip6addr_p->s6_addr[0] == 0xFF) {
+			eh->ether_dhost[0] = mc6_pref[0];
+			eh->ether_dhost[1] = mc6_pref[1];
+			eh->ether_dhost[2] = ip6addr_p->s6_addr[12];
+			eh->ether_dhost[3] = ip6addr_p->s6_addr[13];
+			eh->ether_dhost[4] = ip6addr_p->s6_addr[14];
+			eh->ether_dhost[5] = ip6addr_p->s6_addr[15];
+			return 0;
+		}
+
+		return qdrv_br_ipv6_set_dest_mac(br, eh, ip6addr_p);
+#endif
+	} else if (ether_type == __constant_htons(ETH_P_ARP)) {
+		arp_p = (struct ether_arp *)l3hdr;
+		if ((skb->data + skb->len + 1) < (unsigned char *)(arp_p + 1)) {
+			DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+				"ARP packet is too small (%p/%p)\n",
+				skb->data + skb->len, arp_p + 1);
+			return 1;
+		}
+		ip_addr = get_unaligned((u32 *)&arp_p->arp_tpa);
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_BRIDGE,
+			"ARP proto=%04x op=%04x sha=%pM tha=%pM "
+			"sip=" NIPQUAD_FMT " dip=" NIPQUAD_FMT "\n",
+			arp_p->ea_hdr.ar_pro, arp_p->ea_hdr.ar_op,
+			arp_p->arp_sha, arp_p->arp_tha,
+			NIPQUAD(arp_p->arp_spa), NIPQUAD(ip_addr));
+
+		if (qdrv_br_ipv4_set_dest_mac(br, eh, ip_addr) == 0) {
+			IEEE80211_ADDR_COPY(arp_p->arp_tha, eh->ether_dhost);
+			return 0;
+		}
+
+		return 1;
+	} else {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_BRIDGE,
+			"Ethertype 0x%04x not supported\n", ntohs(ether_type));
+		return 1;
+	}
+}
+
+/*
+ * Display all entries in a vap's bridge table
+ */
+void
+qdrv_br_show(struct qdrv_br *br)
+{
+	struct qdrv_br_uc *br_uc;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	struct qdrv_br_ipv6_uc *br_ipv6_uc;
+#endif
+	struct qdrv_br_mc *br_mc;
+	struct qdrv_br_mc_client *br_mc_client;
+	int i;
+	int j;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	struct hlist_node *h, *h1;
+#endif
+
+	printk("Client MAC          IPv4 Address\n");
+
+	rcu_read_lock();
+	for (i = 0; i < QDRV_BR_IP_HASH_SIZE; i++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+		hlist_for_each_entry_rcu(br_uc, h, &br->uc_ip_hash[i], ip_hlist) {
+#else
+		hlist_for_each_entry_rcu(br_uc, &br->uc_ip_hash[i], ip_hlist) {
+#endif
+		printk("%pM   %pI4\n",
+			br_uc->mac_addr, &br_uc->ip_addr);
+		}
+	}
+	rcu_read_unlock();
+
+	printk("\n");
+	printk("Client MAC          IPv6 Address\n");
+
+	rcu_read_lock();
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	for (i = 0; i < QDRV_BR_IP_HASH_SIZE; i++) {
+		hlist_for_each_entry_rcu(br_ipv6_uc, h, &br->uc_ipv6_hash[i], ipv6_hlist) {
+			printk("%pM  %pI6\n",
+				br_ipv6_uc->mac_addr, &br_ipv6_uc->ipv6_addr);
+		}
+	}
+#endif
+	rcu_read_unlock();
+
+	printk("\n");
+	printk("Multicast IP        Client MAC\n");
+
+	rcu_read_lock();
+	for (i = 0; i < QDRV_BR_IP_HASH_SIZE; i++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+		hlist_for_each_entry_rcu(br_mc, h, &br->mc_ip_hash[i], mc_hlist) {
+#else
+		hlist_for_each_entry_rcu(br_mc, &br->mc_ip_hash[i], mc_hlist) {
+#endif
+
+			printk(NIPQUAD_FMT "\n", NIPQUAD(br_mc->mc_ip_addr));
+
+			for (j = 0; j < QDRV_BR_MAC_HASH_SIZE; j++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+				hlist_for_each_entry_rcu(br_mc_client, h1,
+							 &br_mc->mc_client_hash[j],
+							 mc_client_hlist) {
+#else
+				hlist_for_each_entry_rcu(br_mc_client,
+							 &br_mc->mc_client_hash[j],
+							 mc_client_hlist) {
+#endif
+					printk("                    %pM\n",
+						br_mc_client->mac_addr);
+
+				}
+			}
+		}
+	}
+	rcu_read_unlock();
+
+	printk("\n");
+}
+
+/*
+ * Clear the qdrv bridge table for a vap
+ */
+void
+qdrv_br_clear(struct qdrv_br *br)
+{
+	struct hlist_node *h;
+	struct hlist_node *h1;
+	struct qdrv_br_uc *br_uc;
+	struct qdrv_br_mc *br_mc;
+	struct qdrv_br_mc_client *br_mc_client;
+	int i;
+	int j;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	struct hlist_node *h2, *h3;
+#endif
+
+	qdrv_br_uc_lock(br);
+
+	for (i = 0; i < QDRV_BR_MAC_HASH_SIZE; i++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+		hlist_for_each_entry_safe(br_uc, h, h1, &br->uc_mac_hash[i], mac_hlist) {
+#else
+		hlist_for_each_entry_safe(br_uc, h, &br->uc_mac_hash[i], mac_hlist) {
+#endif
+			qdrv_br_uc_delete(br, br_uc);
+		}
+	}
+	atomic_set(&br->uc_tot, 0);
+
+	qdrv_br_uc_unlock(br);
+
+	qdrv_br_lock_mc(br);
+
+	for (i = 0; i < QDRV_BR_IP_HASH_SIZE; i++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+		hlist_for_each_entry_safe(br_mc, h, h1, &br->mc_ip_hash[i], mc_hlist) {
+#else
+		hlist_for_each_entry_safe(br_mc, h, &br->mc_ip_hash[i], mc_hlist) {
+#endif
+			for (j = 0; j < QDRV_BR_MAC_HASH_SIZE; j++) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+				hlist_for_each_entry_safe(br_mc_client, h2, h3,
+						&br_mc->mc_client_hash[j], mc_client_hlist) {
+#else
+				hlist_for_each_entry_safe(br_mc_client, h1,
+						&br_mc->mc_client_hash[j], mc_client_hlist) {
+#endif
+					qdrv_br_mc_client_delete(br, br_mc, br_mc_client);
+				}
+			}
+			qdrv_br_mc_delete(br, br_mc);
+		}
+	}
+	atomic_set(&br->mc_tot, 0);
+
+	qdrv_br_unlock_mc(br);
+}
+
+/*
+ * Delete the bridge table for a vap
+ */
+void
+qdrv_br_delete(struct qdrv_br *br)
+{
+	qdrv_br_clear(br);
+}
+
+/*
+ * Create the bridge table for a vap
+ */
+void
+qdrv_br_create(struct qdrv_br *br)
+{
+	spin_lock_init(&br->uc_lock);
+	spin_lock_init(&br->mc_lock);
+
+	/* Cache tables are global and are never deleted */
+	if (br_uc_cache != NULL) {
+		return;
+	}
+
+	br_uc_cache = kmem_cache_create("qdrv_br_uc_cache",
+					 sizeof(struct qdrv_br_uc),
+					 0, 0, NULL);
+	br_mc_cache = kmem_cache_create("qdrv_br_mc_cache",
+					sizeof(struct qdrv_br_mc),
+					0, 0, NULL);
+	br_mc_client_cache = kmem_cache_create("qdrv_br_mc_client_cache",
+					sizeof(struct qdrv_br_mc_client),
+					0, 0, NULL);
+#if defined(CONFIG_IPV6)
+	br_ipv6uc_cache = kmem_cache_create("qdrv_br_ipv6_uc_cache",
+					sizeof(struct qdrv_br_ipv6_uc),
+					0, 0, NULL);
+	KASSERT((br_ipv6uc_cache != NULL),
+		(DBGEFMT "Could not allocate qdrv bridge cache ipv6", DBGARG));
+#endif
+
+	KASSERT(((br_uc_cache != NULL) && (br_mc_cache != NULL) &&
+			(br_mc_client_cache != NULL) && (br_ipv6uc_cache != NULL)),
+		(DBGEFMT "Could not allocate qdrv bridge cache", DBGARG));
+}
+
+void qdrv_br_exit(struct qdrv_br *br)
+{
+	kmem_cache_destroy(br_uc_cache);
+	kmem_cache_destroy(br_mc_cache);
+	kmem_cache_destroy(br_mc_client_cache);
+#if defined(CONFIG_IPV6)
+	kmem_cache_destroy(br_ipv6uc_cache);
+#endif
+}
+
diff --git a/drivers/qtn/qdrv/qdrv_bridge.h b/drivers/qtn/qdrv/qdrv_bridge.h
new file mode 100644
index 0000000..d79ef0a
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_bridge.h
@@ -0,0 +1,126 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_BRIDGE_H_
+#define _QDRV_BRIDGE_H_
+
+#include <linux/inetdevice.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#if defined(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <linux/in6.h>
+#endif
+#include <net80211/if_ethersubr.h>
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_comm.h"
+
+#define QDRV_BR_ENT_MAX			128	/* unicast clients */
+#define QDRV_BR_MCAST_MAX		128	/* multicast addresses & clients */
+#define QDRV_BR_MAC_HASH_SIZE		256
+#define QDRV_BR_IP_HASH_SIZE		256
+
+struct	ether_arp {
+	struct	arphdr ea_hdr;		/* fixed-size header */
+	u_int8_t arp_sha[ETH_ALEN];	/* sender hardware address */
+	u_int8_t arp_spa[4];		/* sender protocol address */
+	u_int8_t arp_tha[ETH_ALEN];	/* target hardware address */
+	u_int8_t arp_tpa[4];		/* target protocol address */
+};
+
+/*
+ * Each bridge table (one per vap) contains:
+ * - a list of downstream (unicast) clients, keyed by MAC address and IP address hash
+ * - a list of multicast addresses, keyed by IP address, containing a list of
+ *   subscribed downstream client MAC addresses
+ * All keys are implemented as hash tables.
+ */
+struct qdrv_br_uc {
+	__be32				ip_addr;
+	unsigned char			mac_addr[IEEE80211_ADDR_LEN];
+	struct hlist_node		mac_hlist;
+	struct hlist_node		ip_hlist;
+	struct rcu_head			rcu;
+};
+
+struct qdrv_br_mc {
+	__be32				mc_ip_addr;
+	atomic_t			mc_client_tot;
+	struct hlist_node		mc_hlist;
+	struct hlist_head		mc_client_hash[QDRV_BR_IP_HASH_SIZE];
+	struct rcu_head			rcu;
+};
+
+struct qdrv_br_mc_client {
+	unsigned char			mac_addr[IEEE80211_ADDR_LEN];
+	struct hlist_node		mc_client_hlist;
+	struct rcu_head			rcu;
+};
+
+#if defined(CONFIG_IPV6)
+struct qdrv_br_ipv6_uc {
+	struct in6_addr			ipv6_addr;
+	unsigned char			mac_addr[ETH_ALEN];
+	struct hlist_node		mac_hlist;
+	struct hlist_node		ipv6_hlist;
+	struct rcu_head			rcu;
+};
+#endif
+
+struct qdrv_br {
+	spinlock_t			uc_lock;
+	spinlock_t			mc_lock;
+	unsigned long			uc_lock_flags;
+	unsigned long			mc_lock_flags;
+	atomic_t			uc_tot;
+	atomic_t			mc_tot;	// Total multicast and client entries
+	struct hlist_head		uc_mac_hash[QDRV_BR_MAC_HASH_SIZE];
+	struct hlist_head		uc_ip_hash[QDRV_BR_IP_HASH_SIZE];
+	struct hlist_head		mc_ip_hash[QDRV_BR_IP_HASH_SIZE];
+#if defined(CONFIG_IPV6)
+	struct hlist_head		uc_ipv6_hash[QDRV_BR_IP_HASH_SIZE];
+	atomic_t			uc_ipv6_tot;
+	spinlock_t			uc_ipv6_lock;
+	unsigned long			uc_ipv6_lock_flags;
+#endif
+};
+
+void qdrv_br_create(struct qdrv_br *br);
+void qdrv_br_exit(struct qdrv_br *br);
+void qdrv_br_delete(struct qdrv_br *br);
+void qdrv_br_show(struct qdrv_br *br);
+void qdrv_br_clear(struct qdrv_br *br);
+void qdrv_br_uc_update_from_dhcp(struct qdrv_br *br, struct sk_buff *skb,
+				  struct iphdr *iphdr_p);
+void qdrv_br_uc_update_from_arp(struct qdrv_br *br, struct ether_arp *arp);
+int qdrv_br_mc_update_from_igmp(struct qdrv_br *br, struct sk_buff *skb,
+				struct ether_header *eh, struct iphdr *iphdr_p);
+
+#if defined(CONFIG_IPV6)
+void qdrv_br_ipv6uc_update_from_icmpv6(struct qdrv_br *br,
+				const struct ethhdr *eth,
+				const struct ipv6hdr *ipv6h,
+				const struct icmp6hdr *icmpv6h);
+#endif
+
+int qdrv_br_set_dest_mac(struct qdrv_br *br, struct ether_header *eh, const struct sk_buff *skb);
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_comm.c b/drivers/qtn/qdrv/qdrv_comm.c
new file mode 100644
index 0000000..5f2d049
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_comm.c
@@ -0,0 +1,967 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <asm/hardware.h>
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_comm.h"
+#include "qdrv_hal.h"
+#include "qdrv_wlan.h"
+#include "qdrv_vap.h"
+#include "qdrv_txbf.h"
+#include "qdrv_auc.h"
+#include <qtn/registers.h>
+#include <qtn/shared_params.h>
+#include <qtn/muc_phy_stats.h>
+#include <qtn/bootcfg.h>
+#include <qtn/semaphores.h>
+#include <qtn/lhost_muc_comm.h>
+
+static int msg_attach_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_detach_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_logattach_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_temp_attach_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_devchange_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_null_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_fops_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_tkip_mic_error_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_muc_booted_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_drop_ba_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_disassoc_sta_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_rfic_caused_reboot_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_tdls_events(struct qdrv_cb *qcb, void *msg);
+static int msg_ba_add_start_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_peer_rts_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_dyn_wmm_handler(struct qdrv_cb *qcb, void *msg);
+static int msg_rate_train(struct qdrv_cb *qcb, void *msg);
+static int msg_csa_complete(struct qdrv_cb *qcb, void *msg);
+static int msg_ocac_backoff_done(struct qdrv_cb *qcb, void *msg);
+static int msg_cca_stats_handler(struct qdrv_cb *qcb, void *msg);
+
+static const char * const cal_filenames[] = LHOST_CAL_FILES;
+
+static int (*s_msg_handler_table[])(struct qdrv_cb *qcb, void *msg) =
+{
+	msg_null_handler,		/*                          */
+	msg_attach_handler,		/* IOCTL_HLINK_DEVATTACH    */
+	msg_detach_handler,		/* IOCTL_HLINK_DEVDETACH    */
+	msg_devchange_handler,		/* IOCTL_HLINK_DEVCHANGE    */
+	msg_logattach_handler,		/* IOCTL_HLINK_LOGATTACH    */
+	msg_temp_attach_handler,	/* IOCTL_HLINK_TEMP_ATTACH   */
+	msg_null_handler,		/* IOCTL_HLINK_SVCERRATTACH */
+	msg_null_handler,		/* IOCTL_HLINK_RTNLEVENT    */
+	msg_null_handler,		/* IOCTL_HLINK_NDP_FRAME    */
+	msg_fops_handler,		/* IOCTL_HLINK_FOPS_REQ  */
+	msg_tkip_mic_error_handler,	/* IOCTL_HLINK_MIC_ERR */
+	msg_muc_booted_handler,		/* IOCTL_HLINK_BOOTED */
+	msg_drop_ba_handler,		/* IOCTL_HLINK_DROP_BA */
+	msg_disassoc_sta_handler,	/* IOCTL_HLINK_DISASSOC_STA */
+	msg_rfic_caused_reboot_handler, /* IOCTL_HLINK_RFIC_CAUSED_REBOOT */
+	msg_ba_add_start_handler,	/* IOCTL_HLINK_BA_ADD_START */
+	msg_peer_rts_handler,		/* IOCTL_HLINK_PEER_RTS */
+	msg_dyn_wmm_handler,		/* IOCTL_HLINK_DYN_WMM */
+	msg_tdls_events,		/* IOCTL_HLINK_TDLS_EVENTS	*/
+	msg_rate_train,			/* IOCTL_HLINK_RATE_TRAIN	*/
+	msg_csa_complete,		/* IOCTL_HLINK_CSA_COMPLETE	*/
+	msg_ocac_backoff_done,		/* IOCTL_HLINK_OCAC_BACKOFF_DONE */
+	msg_cca_stats_handler,		/* IOCTL_HLINK_CCA_STATS	*/
+};
+
+#define MSG_HANDLER_TABLE_SIZE	(ARRAY_SIZE(s_msg_handler_table))
+
+static void comm_irq_handler(void *arg1, void *arg2)
+{
+	struct qdrv_cb *qcb = (struct qdrv_cb *) arg1;
+
+	if (qcb->hlink_work_queue) {
+		queue_work(qcb->hlink_work_queue, &qcb->comm_wq);
+	} else {
+		/* In case separate work queue is not created for hlink interrupts, we are
+		 * going to use generic work queue */
+		schedule_work(&qcb->comm_wq);
+	}
+}
+
+static int msg_null_handler(struct qdrv_cb *qcb, void *msg)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE,
+			"-->Enter  cmd %d %p",
+			((struct host_ioctl *) msg)->ioctl_command, msg);
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_CMM,
+			"%u\n", ((struct host_ioctl *) msg)->ioctl_command);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+static int msg_fops_handler(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl *) msg;
+	int cmd_id = mp->ioctl_arg1;
+	int cmd_arg = mp->ioctl_arg2;
+	struct muc_fops_req* fops_req;
+	const char* filename = NULL;
+	char* fops_data = NULL;
+	mm_segment_t old_fs;
+	struct file *file;
+	loff_t pos = 0;
+	u32 open_flags;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	/* argp is already a virtual address */
+	fops_req = ioremap_nocache((u32)mp->ioctl_argp, sizeof(*fops_req));
+
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_TRACE,
+			"Entry Cmd %d Arg %d FD %d Ret Val %d\n",
+			cmd_id,cmd_arg,fops_req->fd,fops_req->ret_val);
+	fops_req->ret_val = -1;
+
+
+	switch(cmd_id){
+		case MUC_FOPS_OPEN:
+			if ((fops_req->fd >= ARRAY_SIZE(cal_filenames)) || (fops_req->fd < 0)) {
+				return(-1);
+			}
+			filename = cal_filenames[fops_req->fd];
+			open_flags = (cmd_arg & MUC_FOPS_RDONLY ? O_RDONLY : 0)
+				| (cmd_arg & MUC_FOPS_WRONLY ? O_WRONLY | O_CREAT : 0)
+				| (cmd_arg & MUC_FOPS_RDWR ? O_RDWR : 0)
+				| (cmd_arg & MUC_FOPS_APPEND ? O_APPEND : 0);
+
+			DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_TRACE,
+					"File name %s\n", filename);
+
+			fops_req->ret_val = sys_open(filename, open_flags, 0);
+			if ((fops_req->ret_val < 0) && (open_flags & O_CREAT)){
+				if (strstr(filename, "/proc/bootcfg/") != 0) {
+					bootcfg_create(&filename[14], 0);
+					fops_req->ret_val = sys_open(filename, open_flags, 0);
+				}
+			}
+			break;
+		case MUC_FOPS_READ:
+			if(fops_req->fd >= 0) {
+				fops_data = ioremap_nocache(muc_to_lhost((u32)fops_req->data_buff), cmd_arg);
+				if (fops_data) {
+					fops_req->ret_val = sys_read(fops_req->fd, fops_data, cmd_arg);
+					iounmap(fops_data);
+				} else {
+					DBGPRINTF_E("could not remap MuC ptr 0x%x (linux 0x%x) for file read, size %u bytes\n",
+							(u32)fops_req->data_buff,
+							(u32)muc_to_lhost((u32)fops_req->data_buff), (u32)cmd_arg);
+				}
+			}
+			break;
+		case MUC_FOPS_WRITE:
+			if(fops_req->fd >= 0){
+				file = fget(fops_req->fd);
+				fops_data = ioremap_nocache(muc_to_lhost((u32)fops_req->data_buff), cmd_arg);
+				WARN_ON(!file);
+				if (file && fops_data) {
+					pos = file->f_pos;
+					fops_req->ret_val = vfs_write(file, fops_data, cmd_arg, &pos);
+					fput(file);
+					file->f_pos = pos;
+				}
+				if (fops_data) {
+					iounmap(fops_data);
+				}
+			}
+			break;
+		case MUC_FOPS_LSEEK:
+			break;
+		case MUC_FOPS_CLOSE:
+			if(fops_req->fd >= 0) {
+				fops_req->ret_val = sys_close(fops_req->fd);
+			}
+			break;
+	}
+
+	set_fs(old_fs);
+
+	/* clear the argp in the ioctl to prevent MuC from cleaning it up*/
+	mp->ioctl_argp = (u32)NULL;
+
+	fops_req->req_state = MUC_FOPS_DONE;
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_TRACE, "Exit Cmd %d Arg %d FD %d Ret Val %d\n",
+			cmd_id,cmd_arg,fops_req->fd,fops_req->ret_val);
+
+	iounmap(fops_req);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+
+}
+
+/**
+ * Message from MUC to qdriver indicating a number of TKIP MIC errors (as reported
+ * by the MAC hardware).
+ *
+ * Pass this on to the WLAN driver to thence pass up to the higher layer to act on.
+ */
+static int msg_tkip_mic_error_handler(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl *) msg;
+	u16 count;
+	/* IOCTL argument 1 is:
+	 * 31      24      16                    0
+	 *  | RESVD | UNIT  |      DEVICE ID     |
+	 */
+	u16 devid = mp->ioctl_arg1 & 0xFFFF;
+	u8 unit = mp->ioctl_arg1 & 0xFF0000 >> 24;
+	count = mp->ioctl_arg2;
+	DBGPRINTF_E("Report from MUC for %d TKIP MIC errors,"
+			        " unit %d, devid %d\n", count, unit, devid);
+	qdrv_wlan_tkip_mic_error(&qcb->macs[unit], devid, count);
+	return(0);
+}
+
+static int msg_devchange_handler(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl *) msg;
+	u16 devid;
+	u16 flags;
+	int unit = 0;
+	struct net_device *dev = NULL;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	devid = mp->ioctl_arg1 & IOCTL_DEVATTACH_DEVID_MASK;
+	flags = mp->ioctl_arg2;
+
+	/* Currently only have a single MAC */
+	unit = 0;
+
+	DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_CMM,
+			"devid %d unit %d flags 0x%04x\n", devid, unit, flags);
+
+	if(flags & NETDEV_F_EXTERNAL)
+	{
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_CMM,
+				"NETDEV_F_EXTERNAL\n");
+		dev = qcb->macs[unit].vnet[(devid - MAC_UNITS) % QDRV_MAX_VAPS];
+	}
+	else
+	{
+		/* "Parent" device */
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_CMM,
+				"MAC unit %d is operational\n", unit);
+
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(0);
+	}
+
+	if(dev == NULL)
+	{
+		DBGPRINTF_E("No device found.\n");
+
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+	/* This used to call dev_open() on the parent device - not needed now */
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+static int msg_detach_handler(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl *) msg;
+	struct qdrv_mac *mac = NULL;
+	u16 devid;
+	u16 flags;
+	int ret = 0;
+
+	int unit = 0;	/* only have 1 WMAC */
+	mac = &qcb->macs[unit];
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	devid = mp->ioctl_arg1;
+	flags = mp->ioctl_arg2;
+
+	if (flags & NETDEV_F_EXTERNAL) {
+		int i;
+		struct net_device *vdev;
+		unsigned long resource;
+
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_CMM,
+				"External devid 0x%04x flags 0x%04x\n", devid, flags);
+
+		for (i = 0; i < QDRV_MAX_VAPS; i++) {
+			vdev = qcb->macs[unit].vnet[i];
+			if (vdev) {
+				struct qdrv_vap *qv;
+				qv = netdev_priv(vdev);
+				DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_CMM, "i %d vdev %p qv %p qv->devid 0x%x\n",
+						i, vdev, qv, qv->devid);
+				if (qv->devid == devid) {
+					break;
+				}
+			}
+			vdev = NULL;
+		}
+
+		if (vdev == NULL) {
+			DBGPRINTF_E("Could not find net_device for devid: 0x%x\n", devid);
+			DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+			return -ENODEV;
+		}
+
+		if (qdrv_vap_exit_muc_done(mac, vdev) < 0) {
+			DBGPRINTF_E("Failed to exit VAP\n");
+			DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+			return -1;
+		}
+
+		resource = QDRV_RESOURCE_VAP(QDRV_WLANID_FROM_DEVID(devid));
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_CMM,
+				"removing resource 0x%lx\n", resource);
+		qcb->resources &= ~resource;
+	} else {
+		DBGPRINTF_E("Non external dev detach unimplemented\n");
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return ret;
+}
+
+static int msg_attach_handler(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = msg;
+	struct host_ioctl_hifinfo *hifinfo = NULL;
+	struct qdrv_mac *mac = NULL;
+	u16 devid;
+	u16 flags;
+	u32 version_size;
+	int ret = 0;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+#ifdef DEBUG_DGPIO
+	printk( "Enter msg_attach_handler\n" );
+#endif
+
+	if(hal_range_check_sram_addr((void *) mp->ioctl_argp))
+	{
+
+		DBGPRINTF_E("Argument address 0x%08x is invalid\n",
+			mp->ioctl_argp);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+	devid = mp->ioctl_arg1 & IOCTL_DEVATTACH_DEVID_MASK;
+	flags = (mp->ioctl_arg1 & IOCTL_DEVATTACH_DEVFLAG_MASK) >>
+		IOCTL_DEVATTACH_DEVFLAG_MASK_S;
+	hifinfo = ioremap_nocache((u32)IO_ADDRESS((u32)mp->ioctl_argp), sizeof(*hifinfo));
+
+	version_size = sizeof( qcb->algo_version ) - 1;
+	memcpy( qcb->algo_version, hifinfo->hi_algover, version_size );
+	qcb->algo_version[ version_size ] = '\0';
+
+	qcb->dspgpios = hifinfo->hi_dspgpios;
+#ifdef DEBUG_DGPIO
+	printk( "msg_attach_handler, GPIOs: 0x%x\n", qcb->dspgpios );
+#endif
+
+	if(flags & NETDEV_F_EXTERNAL)
+	{
+
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_CMM,
+				"External devid 0x%04x flags 0x%04x\n",
+				devid, flags);
+
+		mac = &qcb->macs[0];
+		if(qdrv_vap_init(mac, hifinfo, mp->ioctl_arg1,
+			mp->ioctl_arg2) < 0)
+		{
+			DBGPRINTF_E("Failed to initialize VAP\n");
+			ret = -1;
+			goto done;
+		}
+
+		qcb->resources |= QDRV_RESOURCE_VAP(QDRV_WLANID_FROM_DEVID(devid));
+	}
+	else
+	{
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_CMM,
+				"Internal devid 0x%04x flags 0x%04x\n", devid, flags);
+
+		if(devid > (MAC_UNITS - 1))
+		{
+			DBGPRINTF_E("MAC unit %d is not supported\n", devid);
+			ret = -1;
+			goto done;
+		}
+
+		/* create_qdev() */
+		mac = &qcb->macs[devid];
+		if(qdrv_wlan_init(mac, hifinfo, mp->ioctl_arg1,
+			mp->ioctl_arg2) < 0)
+		{
+			DBGPRINTF_E("Failed to initialize WLAN feature\n");
+			ret = -1;
+			goto done;
+		}
+
+		/* Mark that we have successfully allocated a resource */
+		qcb->resources |= QDRV_RESOURCE_WLAN;
+	}
+
+	if (mac) {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE | QDRV_LF_CMM,
+				"IOCTL_HLINK_DEVATTACH device enabled\n");
+		mac->enabled = 1;
+		mac->dead = 0;
+	}
+
+done:
+	iounmap(hifinfo);
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+	return ret;
+}
+
+static int msg_logattach_handler(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl *) msg;
+	u16 devid;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	devid = mp->ioctl_arg1;
+
+	if (qcb->macs[devid].mac_sys_stats != NULL) {
+		iounmap(qcb->macs[devid].mac_sys_stats);
+	}
+
+	qcb->macs[devid].mac_sys_stats = ioremap_nocache(muc_to_lhost((u32) mp->ioctl_arg2),
+							sizeof(*qcb->macs[devid].mac_sys_stats));
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+extern struct _temp_info *p_muc_temp_index;
+
+static int msg_temp_attach_handler(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl *) msg;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Map temp index */
+	if (p_muc_temp_index) {
+		iounmap(p_muc_temp_index);
+	}
+
+	p_muc_temp_index = ioremap_nocache(muc_to_lhost((u32) mp->ioctl_arg1),
+					sizeof(*p_muc_temp_index));
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+static int msg_muc_booted_handler(struct qdrv_cb *qcb, void *msg)
+{
+	qcb->resources |= QDRV_RESOURCE_MUC_BOOTED;
+	qdrv_auc_stats_setup();
+	return(0);
+}
+
+static int msg_disassoc_sta_handler(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl*)msg;
+	int ncidx = mp->ioctl_arg1;
+	int devid = mp->ioctl_arg2;
+	struct qdrv_mac *mac = &qcb->macs[devid];
+	struct qdrv_wlan *qw = (struct qdrv_wlan*)mac->data;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211_node *node = NULL;
+	struct ieee80211vap *vap;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_state == IEEE80211_S_RUN) {
+			if (qw->ic.ic_opmode == IEEE80211_M_HOSTAP) {
+				node = ieee80211_find_node_by_node_idx(vap, ncidx);
+				if (node) {
+					ieee80211_disconnect_node(vap, node);
+					ieee80211_free_node(node);
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int msg_rate_train(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl*)msg;
+	uint16_t ncidx = mp->ioctl_arg1 & 0xFFFF;
+	uint16_t devid = mp->ioctl_arg1 >> 16;
+	struct qdrv_mac *mac = &qcb->macs[devid];
+	struct qdrv_wlan *qw = (struct qdrv_wlan*)mac->data;
+	struct ieee80211_node *ni;
+
+	ni = ieee80211_find_node_by_idx(&qw->ic, NULL, ncidx);
+	if (!ni) {
+		DBGPRINTF_E("node with idx %u not found\n", ncidx);
+		return 0;
+	}
+
+	ni->ni_rate_train_hash = mp->ioctl_arg2;
+	ieee80211_free_node(ni);
+
+	return 0;
+}
+
+static int msg_csa_complete(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl*)msg;
+	uint16_t devid = mp->ioctl_arg1;
+	struct qdrv_mac *mac = &qcb->macs[devid];
+	struct qdrv_wlan *qw = (struct qdrv_wlan*)mac->data;
+	struct ieee80211com *ic = &qw->ic;
+
+	complete(&ic->csa_completion);
+	return 0;
+}
+
+static int msg_ocac_backoff_done(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl *) msg;
+	uint16_t devid = mp->ioctl_arg1;
+	struct qdrv_mac *mac = &qcb->macs[devid];
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) mac->data;
+	struct ieee80211com *ic = &qw->ic;
+
+	complete(&ic->ic_ocac.ocac_backoff_completion);
+
+	return 0;
+}
+
+static int msg_drop_ba_handler(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl*)msg;
+	int relax = mp->ioctl_arg2 >> 24;
+	int tid = (mp->ioctl_arg2 >> 16) & 0xFF;
+	int devid = mp->ioctl_arg2 & 0xFFFF;
+	int send_delba = (mp->ioctl_arg2 >> 24) & 0x0F;
+	int ncidx = mp->ioctl_arg1;
+	struct qdrv_mac *mac = &qcb->macs[devid];
+	struct qdrv_wlan *qw = (struct qdrv_wlan*)mac->data;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211_node *node = NULL;
+	struct ieee80211vap *vap;
+	int i;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_state == IEEE80211_S_RUN) {
+			if (qw->ic.ic_opmode == IEEE80211_M_STA) {
+				IEEE80211_NODE_LOCK_BH(&vap->iv_ic->ic_sta);
+				node = vap->iv_bss;
+				if (node) {
+					ieee80211_ref_node(node);
+				}
+				IEEE80211_NODE_UNLOCK_BH(&vap->iv_ic->ic_sta);
+			} else {
+				node = ieee80211_find_node_by_node_idx(vap, ncidx);
+			}
+			if (node) {
+				if (send_delba) {
+					for (i = 0; i < WME_NUM_TID; i++) {
+						if (node->ni_ba_rx[i].state == IEEE80211_BA_ESTABLISHED) {
+							ieee80211_send_delba(node, i, 0, 39);
+							node->ni_ba_rx[i].state = IEEE80211_BA_NOT_ESTABLISHED;
+						}
+					}
+				}
+				ieee80211_node_tx_ba_set_state(node, tid,
+					IEEE80211_BA_NOT_ESTABLISHED,
+					relax ? IEEE80211_TX_BA_REQUEST_LONG_RELAX_TIMEOUT :
+						IEEE80211_TX_BA_REQUEST_RELAX_TIMEOUT);
+				ieee80211_free_node(node);
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int msg_ba_add_start_handler(struct qdrv_cb *qcb, void *msg)
+{
+	const struct host_ioctl *mp = (struct host_ioctl*) msg;
+	const int tid = (mp->ioctl_arg2 >> 16) & 0xFF;
+	const int devid = mp->ioctl_arg2 & 0xFFFF;
+	const int node_idx_unmapped = mp->ioctl_arg1;
+	struct qdrv_mac *mac = &qcb->macs[devid];
+	struct qdrv_wlan *qw = mac->data;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211_node *ni = NULL;
+	struct ieee80211vap *vap;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+		if (vap->iv_state == IEEE80211_S_RUN) {
+			ni = ieee80211_find_node_by_node_idx(vap, node_idx_unmapped);
+			if (ni) {
+				qdrv_tx_ba_establish(qv, ni, tid);
+				ieee80211_free_node(ni);
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int msg_peer_rts_handler(struct qdrv_cb *qcb, void *msg)
+{
+	const struct host_ioctl *mp = (struct host_ioctl*) msg;
+	const int devid = mp->ioctl_arg1 & 0xFFFF;
+	const int enable = !!mp->ioctl_arg2;
+	struct qdrv_mac *mac = &qcb->macs[devid];
+	struct qdrv_wlan *qw = mac->data;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211vap *vap;
+
+	/* Save the current status, in case the mode is changed */
+	ic->ic_dyn_peer_rts = enable;
+	if (ic->ic_peer_rts_mode != IEEE80211_PEER_RTS_DYN) {
+		return 0;
+	}
+
+	ic->ic_peer_rts = enable;
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			continue;
+		if (vap->iv_state != IEEE80211_S_RUN)
+			continue;
+		ic->ic_beacon_update(vap);
+	}
+
+	return 0;
+}
+
+static int msg_dyn_wmm_handler(struct qdrv_cb *qcb, void *msg)
+{
+	const struct host_ioctl *mp = (struct host_ioctl*) msg;
+	const int devid = mp->ioctl_arg1 & 0xFFFF;
+	const int enable = !!mp->ioctl_arg2;
+	struct qdrv_mac *mac = &qcb->macs[devid];
+	struct qdrv_wlan *qw = mac->data;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211vap *vap;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			continue;
+		if (vap->iv_state != IEEE80211_S_RUN)
+			continue;
+
+		ieee80211_wme_updateparams_delta(vap, enable);
+	}
+
+	return 0;
+}
+
+static int msg_rfic_caused_reboot_handler(struct qdrv_cb *qcb, void *msg)
+{
+	panic("RFIC reset required\n");
+
+	return (0);
+}
+
+static int msg_cca_stats_handler(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl *) msg;
+	int i;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if (qcb->cca_stats_all != NULL) {
+		iounmap(qcb->cca_stats_all);
+	}
+
+	qcb->cca_stats_all = ioremap_nocache(muc_to_lhost((u32) mp->ioctl_arg1),
+							(sizeof(struct qtn_cca_stats) * MAC_UNITS));
+
+	for (i = 0; i < MAC_UNITS; i++) {
+		qcb->macs[i].cca_stats = &qcb->cca_stats_all[i];
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+static void qdrv_tdls_pti_event(struct host_ioctl *mp,
+		struct ieee80211com *ic, struct qtn_tdls_args *tdls_args)
+{
+	struct ieee80211vap *vap = NULL;
+	struct ieee80211_node *ni = NULL;
+	enum ieee80211_tdls_operation operation;
+
+	if (likely(tdls_args->tdls_cmd == IOCTL_TDLS_PTI_EVENT)) {
+		if (ic->ic_opmode == IEEE80211_M_STA) {
+			vap = TAILQ_FIRST(&ic->ic_vaps);
+			if (vap && vap->iv_state == IEEE80211_S_RUN)
+				ni = ieee80211_find_node(&vap->iv_ic->ic_sta, tdls_args->ni_macaddr);
+		}
+
+		if (ni) {
+			if (IEEE80211_NODE_IS_TDLS_ACTIVE(ni)) {
+				operation = IEEE80211_TDLS_PTI_REQ;
+				if (ieee80211_tdls_send_event(ni, IEEE80211_EVENT_TDLS, &operation))
+					DBGPRINTF_E("TDLS %s: Send event %d failed\n", __func__, operation);
+			}
+			ieee80211_free_node(ni);
+		} else {
+			DBGPRINTF_E("TDLS EVENT: Can find node - "
+					"node %pM cmd %d pti 0x%x\n", tdls_args->ni_macaddr,
+					tdls_args->tdls_cmd, mp->ioctl_arg2);
+		}
+	} else {
+		DBGPRINTF_E("TDLS EVENT: parameter error for PTI - "
+			"node %pM cmd %d pti 0x%x\n", tdls_args->ni_macaddr,
+			tdls_args->tdls_cmd, mp->ioctl_arg2);
+	}
+}
+
+static int msg_tdls_events(struct qdrv_cb *qcb, void *msg)
+{
+	struct host_ioctl *mp = (struct host_ioctl*)msg;
+	int devid = mp->ioctl_arg2;
+	struct qdrv_mac *mac = &qcb->macs[devid];
+	struct qdrv_wlan *qw = (struct qdrv_wlan*)mac->data;
+	struct ieee80211com *ic = &qw->ic;
+	struct qtn_tdls_args *tdls_args;
+	unsigned long int addr;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	addr = (unsigned long int)IO_ADDRESS((uint32_t)mp->ioctl_argp);
+	tdls_args = ioremap_nocache(addr, sizeof(*tdls_args));
+
+	switch (mp->ioctl_arg1) {
+	case IOCTL_TDLS_PTI_EVENT:
+		qdrv_tdls_pti_event(mp, ic, tdls_args);
+		break;
+	default:
+		DBGPRINTF_E("Unkown tdls events: arg1 %u arg2 0x%x argp 0x%x\n",
+			mp->ioctl_arg1, mp->ioctl_arg2, mp->ioctl_argp);
+		break;
+	}
+
+	iounmap(tdls_args);
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+	return (0);
+}
+
+static void comm_work(struct work_struct *work)
+{
+	struct qdrv_cb *qcb = container_of(work, struct qdrv_cb, comm_wq);
+	volatile u32 physmp;
+	struct host_ioctl *mp;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Initialisation */
+	if (qcb->hlink_mbox == 0) {
+		/* See if the hostlink address is setup */
+		u32 hlink_addr = qdrv_soc_get_hostlink_mbox();
+		if (hlink_addr) {
+			qcb->hlink_mbox = ioremap_nocache(muc_to_lhost(hlink_addr),
+						sizeof(*qcb->hlink_mbox));
+		} else {
+			panic("Hostlink interrupt, no mailbox setup - reboot\n");
+			return;
+		}
+	}
+
+	/* Take the semaphore */
+	while (!sem_take(RUBY_SYS_CTL_L2M_SEM, QTN_SEM_HOST_LINK_SEMNUM));
+
+	/* Get the contents of the message mailbox - physical address */
+	physmp = (u32)(*qcb->hlink_mbox);
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE | QDRV_LF_CMM, "physmp %08X\n", physmp);
+
+	/* Clear the content of the mailbox. */
+	if (physmp) {
+		writel_wmb(0, qcb->hlink_mbox);
+		/* Translate to a local address. Can't do this earlier or
+		 * we'd translate '0' to '0x80000000'
+		 */
+		physmp = muc_to_lhost(physmp);
+	}
+
+	/* Give back the semaphore */
+	sem_give(RUBY_SYS_CTL_L2M_SEM, QTN_SEM_HOST_LINK_SEMNUM);
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE | QDRV_LF_CMM, "remapped physmp %08X\n", physmp);
+
+	while (physmp) {
+		struct host_ioctl loc_ioctl;
+		struct host_ioctl *p_save;
+
+		if (hal_range_check_sram_addr((void *) physmp)) {
+			DBGPRINTF_E("Message address 0x%08x is invalid\n",physmp);
+			DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+			return;
+		}
+
+		mp = ioremap_nocache((u32)IO_ADDRESS((u32) physmp), sizeof(*mp));
+		loc_ioctl = *mp;
+		p_save = mp;
+		mp = &loc_ioctl;
+
+		/* Call the right message handle function */
+		if (mp->ioctl_command <=  MSG_HANDLER_TABLE_SIZE)
+		{
+			u_int32_t tmp_argp = mp->ioctl_argp;
+
+			/* If we have argp pointer, translate it */
+			if (mp->ioctl_argp) {
+				u32 lhost_addr = muc_to_lhost(mp->ioctl_argp);
+				if (lhost_addr == RUBY_BAD_BUS_ADDR) {
+					panic(KERN_ERR"argp translation is failed: cmd=0x%x\n",
+					    (unsigned)mp->ioctl_command);
+				}
+				mp->ioctl_argp = lhost_addr;
+			}
+			/* The message code indexes directly into the table */
+			if((*s_msg_handler_table[mp->ioctl_command])(qcb, (void *) mp) < 0)
+			{
+				DBGPRINTF_E("Failed to process message %d\n",
+					mp->ioctl_command);
+			}
+			/* Put argp back */
+			mp->ioctl_argp = tmp_argp;
+		}
+		else
+		{
+			/* We don't have a handler for this message */
+			DBGPRINTF_W("Unknown message %d\n", mp->ioctl_command);
+		}
+
+		/* Move to the next one */
+		physmp = (u32)mp->ioctl_next;
+		if (physmp) {
+			DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_HLINK, "Next ioctl %08X\n", physmp);
+			physmp = muc_to_lhost(physmp);
+			DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_HLINK, "Next ioctl remap %08X\n", physmp);
+		}
+		p_save->ioctl_rc |= QTN_HLINK_RC_DONE;
+		iounmap(p_save);
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return;
+}
+
+int qdrv_comm_init(struct qdrv_cb *qcb)
+{
+	struct int_handler int_handler;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* MBOX will be passed in by the MuC when it starts up */
+	qcb->hlink_mbox = 0;
+
+	qcb->hlink_work_queue = create_workqueue("hlink_work_queue");
+	if (!qcb->hlink_work_queue)
+		DBGPRINTF_E("Failed to create hlink work queue\n");
+	/* Finish the interrupt work in a workqueue */
+	INIT_WORK(&qcb->comm_wq, comm_work);
+
+	int_handler.handler = comm_irq_handler;
+	int_handler.arg1 = qcb;
+	int_handler.arg2 = NULL;
+
+	/* Install handler. Use mac device 0 */
+	if (qdrv_mac_set_handler(&qcb->macs[0], RUBY_M2L_IRQ_LO_HLINK, &int_handler) != 0) {
+		DBGPRINTF_E("Failed to install handler\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	qdrv_mac_enable_irq(&qcb->macs[0], RUBY_M2L_IRQ_LO_HLINK);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+int qdrv_comm_exit(struct qdrv_cb *qcb)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Make sure work queues are done */
+	if (qcb->hlink_work_queue) {
+		flush_workqueue(qcb->hlink_work_queue);
+		destroy_workqueue(qcb->hlink_work_queue);
+		qcb->hlink_work_queue = NULL;
+	} else {
+		flush_scheduled_work();
+	}
+
+	qdrv_mac_disable_irq(&qcb->macs[0], RUBY_M2L_IRQ_LO_HLINK);
+
+	if (qcb->hlink_mbox) {
+		iounmap((void *)qcb->hlink_mbox);
+	}
+
+	if (qcb->macs[0].mac_sys_stats) {
+		iounmap(qcb->macs[0].mac_sys_stats);
+	}
+
+	if (p_muc_temp_index != NULL) {
+		iounmap(p_muc_temp_index);
+	}
+
+	if (qcb->cca_stats_all) {
+		iounmap(qcb->cca_stats_all);
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
diff --git a/drivers/qtn/qdrv/qdrv_comm.h b/drivers/qtn/qdrv/qdrv_comm.h
new file mode 100644
index 0000000..d954769
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_comm.h
@@ -0,0 +1,39 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_COMM_H
+#define _QDRV_COMM_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <net80211/if_media.h>
+#include <net80211/ieee80211_var.h>
+
+#include <qtn/lhost_muc_comm.h>
+
+#include "qdrv_soc.h"
+
+#define QNET_MAXBUF_SIZE 4400
+int qdrv_comm_init(struct qdrv_cb *qcb);
+int qdrv_comm_exit(struct qdrv_cb *qcb);
+
+#endif
+
diff --git a/drivers/qtn/qdrv/qdrv_config.h b/drivers/qtn/qdrv/qdrv_config.h
new file mode 100644
index 0000000..a500532
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_config.h
@@ -0,0 +1,3 @@
+/* Automatically generated file. Do not edit. */
+#define QDRV_CFG_PLATFORM_ID 466
+#define QDRV_CFG_TYPE "topaz_config"
diff --git a/drivers/qtn/qdrv/qdrv_control.c b/drivers/qtn/qdrv/qdrv_control.c
new file mode 100644
index 0000000..dd300d8
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_control.c
@@ -0,0 +1,5869 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <qtn/qdrv_sch.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/stddef.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/sch_generic.h>
+#include <trace/ippkt.h>
+#include <asm/io.h>
+#include <asm/hardware.h>
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_config.h"
+#include "qdrv_control.h"
+#include "qdrv_pktlogger.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_muc_stats.h"
+#include "qdrv_wlan.h"
+#include <linux/etherdevice.h>
+#include "qdrv_radar.h"
+#include <qtn/qtn_math.h>
+#include "qdrv_vap.h"
+#include "qdrv_bridge.h"
+#include "qtn/qdrv_bld.h"
+#include "qdrv_netdebug_checksum.h"
+#include "qdrv_vlan.h"
+#include "qdrv_mac_reserve.h"
+#include <radar/radar.h>
+#include <asm/board/soc.h>
+#include <qtn/mproc_sync_base.h>
+#include <common/ruby_version.h>
+#include <common/ruby_mem.h>
+#include <qtn/qtn_bb_mutex.h>
+#include <qtn/hardware_revision.h>
+#include <qtn/emac_debug.h>
+#include <qtn/ruby_cpumon.h>
+#include <qtn/qtn_muc_stats_print.h>
+#include <qtn/qtn_vlan.h>
+#include <asm/board/troubleshoot.h>
+#include <linux/net/bridge/br_public.h>
+#include "qdrv_show.h"
+#include <qtn/txbf_mbox.h>
+#include <net/iw_handler.h> /* wireless_send_event(..) */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <linux/pm_qos.h>
+#include <linux/gpio.h>
+#else
+#include <linux/pm_qos_params.h>
+#include <asm/gpio.h>
+#endif
+
+#include <qtn/topaz_tqe_cpuif.h>
+#include <qtn/topaz_vlan_cpuif.h>
+#include <qtn/topaz_fwt_sw.h>
+#include <ruby/pm.h>
+#include <ruby/gpio.h>
+
+#ifdef MTEST
+#include "../mtest/mtest.h"
+#endif
+
+static const struct qtn_auc_stat_field auc_field_stats_default[] = {
+#if !defined(CONFIG_TOPAZ_PCIE_HOST)
+#include <qtn/qtn_auc_stats_fields.default.h>
+#endif
+};
+static const struct qtn_auc_stat_field auc_field_stats_nomu[] = {
+#if !defined(CONFIG_TOPAZ_PCIE_HOST)
+#include <qtn/qtn_auc_stats_fields.nomu.h>
+#endif
+};
+
+#define STR2L(_str)    (simple_strtol(_str, 0, 0))
+
+static int qdrv_command_start(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_stop(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_get(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_set(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_read(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_write(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_calcmd(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_led(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_gpio(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_pwm(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_memdebug(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_radar(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_rifs(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_bridge(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_clearsram(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_dump(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_muc_memdbg(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_pktlogger(struct device *dev, int argc, char *argv[]);
+static int qdrv_command_rf_reg_dump(struct device *dev, int argc, char *argv[]);
+
+#if defined(QTN_DEBUG)
+static int qdrv_command_dbg(struct device *dev, int argc, char *argv[]);
+#endif
+#ifdef QDRV_TX_DEBUG
+static int qdrv_command_txdbg(struct device *dev, int argc, char *argv[]);
+#endif
+static int qdrv_command_mu(struct device *dev, int argc, char *argv[]);
+unsigned int g_dbg_dump_pkt_len = 0;
+
+unsigned int g_qos_q_merge = 0x00000000;
+unsigned int g_catch_fcs_corruption = 0;
+
+static unsigned int g_qdrv_radar_test_mode = 0;
+int qdrv_radar_is_test_mode(void)
+{
+	return !!g_qdrv_radar_test_mode;
+}
+
+int qdrv_radar_test_mode_csa_en(void)
+{
+	return (g_qdrv_radar_test_mode == 0x3);
+}
+
+#if defined (ERICSSON_CONFIG)
+int qdrv_wbsp_ctrl = 1;
+#else
+int qdrv_wbsp_ctrl = 0;
+#endif
+
+static u8 wifi_macaddr[IEEE80211_ADDR_LEN];
+static unsigned int s_txif_list_max = QNET_TXLIST_ENTRIES_DEFAULT;
+
+uint32_t g_carrier_id = 0;
+EXPORT_SYMBOL(g_carrier_id);
+
+#define LED_FILE "/mnt/jffs2/led.txt"
+//#define LED_FILE "/scripts/led.txt"
+#define QDRV_UC_STATS_DESC_LEN 35
+
+/*
+ * In core.c, linux/arch/arm/mach-ums
+ */
+extern void set_led_gpio(unsigned int gpio_num, int val);
+extern int get_led_gpio(unsigned int gpio_num);
+
+#define ENVY2_REV_A 0x0020
+
+static struct semaphore s_output_sem;
+static struct qdrv_cb *s_output_qcb = NULL;
+
+/* Static buffer for holding kernel crash across reboot */
+static char *qdrv_crash_log = NULL;
+static uint32_t qdrv_crash_log_len = 0;
+
+static struct command_cb
+{
+	char *command;
+	int (*fn)(struct device *dev, int argc, char *argv[]);
+}
+
+s_command_table[] =
+{
+	{ "start",	qdrv_command_start },
+	{ "stop",	qdrv_command_stop },
+	{ "set",	qdrv_command_set },
+	{ "get",	qdrv_command_get },
+	{ "read",	qdrv_command_read },
+	{ "write",	qdrv_command_write },
+	{ "calcmd",	qdrv_command_calcmd },
+	{ "ledcmd",	qdrv_command_led },
+	{ "gpio",	qdrv_command_gpio },
+	{ "pwm",	qdrv_command_pwm },
+	{ "radar",	qdrv_command_radar },
+	{ "bridge",	qdrv_command_bridge },
+	{ "memdebug",	qdrv_command_memdebug },
+	{ "clearsram",	qdrv_command_clearsram },
+	{ "dump",	qdrv_command_dump },
+	{ "muc_memdbg",	qdrv_command_muc_memdbg },
+	{ "rifs",	qdrv_command_rifs },
+#if defined(QTN_DEBUG)
+	{ "dbg",	qdrv_command_dbg },
+#endif
+	{ "pktlogger",	qdrv_command_pktlogger },
+#ifdef QDRV_TX_DEBUG
+	{ "txdbg",	qdrv_command_txdbg },
+#endif
+	{ "mu",         qdrv_command_mu },
+	{ "rf_regdump",  qdrv_command_rf_reg_dump },
+};
+
+#define COMMAND_TABLE_SIZE (sizeof(s_command_table)/sizeof(struct command_cb))
+
+#define membersizeof(type, field) \
+	sizeof(((type *) NULL)->field)
+
+static struct param_cb
+{
+	char *name;
+	unsigned int flags;
+#define P_FL_TYPE_INT		0x00000001
+#define P_FL_TYPE_STRING	0x00000002
+#define P_FL_TYPE_MAC		0x00000004
+	char *address;
+	int offset;
+	int size;
+}
+s_param_table[] =
+{
+	{
+		"mac0addr", P_FL_TYPE_MAC, NULL,
+		offsetof(struct qdrv_cb, mac0), membersizeof(struct qdrv_cb, mac0)
+	},
+	{
+		"mac1addr", P_FL_TYPE_MAC, NULL,
+		offsetof(struct qdrv_cb, mac1), membersizeof(struct qdrv_cb, mac1)
+	},
+	{
+		"wifimacaddr", P_FL_TYPE_MAC, (char *)&wifi_macaddr[ 0 ],
+		0, sizeof(wifi_macaddr)
+	},
+	{
+		"mucfw", P_FL_TYPE_STRING, NULL,
+		offsetof(struct qdrv_cb, muc_firmware), membersizeof(struct qdrv_cb, muc_firmware)
+	},
+	{
+		"dspfw", P_FL_TYPE_STRING, NULL,
+		offsetof(struct qdrv_cb, dsp_firmware), membersizeof(struct qdrv_cb, dsp_firmware)
+	},
+	{
+		"aucfw", P_FL_TYPE_STRING, NULL,
+		offsetof(struct qdrv_cb, auc_firmware), membersizeof(struct qdrv_cb, auc_firmware)
+	},
+	{
+		"dump_pkt_len", P_FL_TYPE_INT, (char *) &g_dbg_dump_pkt_len,
+		0, sizeof(g_dbg_dump_pkt_len)
+	},
+	{
+		"uc_flags", P_FL_TYPE_INT, NULL,
+		0, sizeof(u_int32_t)
+	},
+	{
+		"txif_list_max", P_FL_TYPE_INT, (char *) &s_txif_list_max,
+		0, sizeof(s_txif_list_max)
+	},
+	{
+		"catch_fcs_corruption", P_FL_TYPE_INT, (char *) &g_catch_fcs_corruption,
+		0, sizeof(g_catch_fcs_corruption)
+	},
+	{
+		"muc_qos_q_merge", P_FL_TYPE_INT, (char *) &g_qos_q_merge,
+		0, sizeof(g_qos_q_merge)
+	},
+	{
+		"test1", P_FL_TYPE_INT, (char *) &g_qdrv_radar_test_mode,
+		0, sizeof(g_qdrv_radar_test_mode)
+	},
+	{
+		"vendor_fix", P_FL_TYPE_INT, NULL,
+		0, sizeof(u_int32_t)
+	},
+	{
+		"vap_default_state", P_FL_TYPE_INT, NULL,
+		0, sizeof(u_int32_t)
+	},
+	{
+		"brcm_rxglitch_thrshlds", P_FL_TYPE_INT, NULL,
+		0, sizeof(u_int32_t)
+	},
+	{
+		"vlan_promisc", P_FL_TYPE_INT, NULL,
+		0, sizeof(u_int32_t)
+	},
+	{
+		"pwr_mgmt", P_FL_TYPE_INT, NULL,
+		0, sizeof(u_int32_t)
+	},
+	{
+		"rxgain_params", P_FL_TYPE_INT, NULL,
+		0, sizeof(u_int32_t)
+	},
+	{
+		"wbsp_ctrl", P_FL_TYPE_INT, (char *) &qdrv_wbsp_ctrl,
+		0, sizeof(qdrv_wbsp_ctrl)
+	},
+	{
+		"fw_no_mu", P_FL_TYPE_INT, NULL,
+		offsetof(struct qdrv_cb, fw_no_mu), membersizeof(struct qdrv_cb, fw_no_mu)
+	},
+};
+
+#define PARAM_TABLE_SIZE (sizeof(s_param_table)/sizeof(struct param_cb))
+
+static struct qdrv_event qdrv_event_log_table[QDRV_EVENT_LOG_SIZE];
+static int qdrv_event_ptr = 0;
+static spinlock_t qdrv_event_lock;
+
+struct qdrv_show_assoc_params g_show_assoc_params;
+
+#define VERIWAVE_TXPOWER_CMD_SIZE	6
+
+void qdrv_event_log(char *str, int arg1, int arg2, int arg3, int arg4, int arg5)
+{
+	spin_lock(&qdrv_event_lock);
+	qdrv_event_log_table[qdrv_event_ptr].jiffies = jiffies;
+	qdrv_event_log_table[qdrv_event_ptr].clk = 0;
+	qdrv_event_log_table[qdrv_event_ptr].str = str;
+	qdrv_event_log_table[qdrv_event_ptr].arg1 = arg1;
+	qdrv_event_log_table[qdrv_event_ptr].arg2 = arg2;
+	qdrv_event_log_table[qdrv_event_ptr].arg3 = arg3;
+	qdrv_event_log_table[qdrv_event_ptr].arg4 = arg4;
+	qdrv_event_log_table[qdrv_event_ptr].arg5 = arg5;
+	qdrv_event_ptr = (qdrv_event_ptr + 1) & QDRV_EVENT_LOG_MASK;
+	spin_unlock(&qdrv_event_lock);
+}
+EXPORT_SYMBOL(qdrv_event_log);
+
+/* used for send formatted string custom event IWEVCUSTOM */
+int qdrv_eventf(struct net_device *dev, const char *fmt, ...)
+{
+	va_list args;
+	int i;
+	union iwreq_data wreq;
+	char buffer[IW_CUSTOM_MAX];
+
+	if (dev == NULL) {
+		return 0;
+	}
+
+	/* Format the custom wireless event */
+	memset(&wreq, 0, sizeof(wreq));
+
+	va_start(args, fmt);
+	i = vsnprintf(buffer, IW_CUSTOM_MAX, fmt, args);
+	va_end(args);
+
+	wreq.data.length = strnlen(buffer, IW_CUSTOM_MAX);
+	wireless_send_event(dev, IWEVCUSTOM, &wreq, buffer);
+	return i;
+}
+EXPORT_SYMBOL(qdrv_eventf);
+
+static int qdrv_command_is_valid_addr(unsigned long addr)
+{
+	if (is_valid_mem_addr(addr)) {
+		return 1;
+	} else if (addr >= RUBY_ARC_CACHE_BYPASS) {
+		/* ARC's no-cache, TLB-bypass region where we have all our registers */
+		return 1;
+	}
+	return 0;
+}
+
+int qdrv_parse_mac(const char *mac_str, uint8_t *mac)
+{
+	unsigned int tmparray[IEEE80211_ADDR_LEN];
+
+	if (mac_str == NULL)
+		return -1;
+
+	if (sscanf(mac_str, "%02x:%02x:%02x:%02x:%02x:%02x",
+			&tmparray[0],
+			&tmparray[1],
+			&tmparray[2],
+			&tmparray[3],
+			&tmparray[4],
+			&tmparray[5]) != IEEE80211_ADDR_LEN) {
+		return -1;
+	}
+
+	mac[0] = tmparray[0];
+	mac[1] = tmparray[1];
+	mac[2] = tmparray[2];
+	mac[3] = tmparray[3];
+	mac[4] = tmparray[4];
+	mac[5] = tmparray[5];
+
+	return 0;
+}
+
+static unsigned long qdrv_read_mem(unsigned long read_addr)
+{
+	unsigned long retval = 0;
+
+	if (!qdrv_command_is_valid_addr(read_addr)) {
+		DBGPRINTF_E("Q driver read mem, address 0x%lx is not valid\n", read_addr);
+		retval = -1;
+	} else {
+		unsigned long *segvaddr = ioremap_nocache(read_addr, sizeof(*segvaddr));
+
+		if (segvaddr == NULL) {
+			DBGPRINTF_E("Q driver read mem, failed to remap address 0x%lx\n", read_addr);
+			retval = -1;
+		} else {
+			retval = *segvaddr;
+			iounmap(segvaddr);
+		}
+	}
+
+	return retval;
+}
+
+static void qdrv_show_memory(struct seq_file *s, void *data, size_t num)
+{
+	struct qdrv_cb *qcb = data;
+
+	if (!qdrv_command_is_valid_addr(qcb->read_addr)) {
+		seq_printf(s, "%08x: invalid addr\n", qcb->read_addr);
+	} else {
+		unsigned long *segvaddr_base = ioremap_nocache(qcb->read_addr,
+				sizeof(*segvaddr_base) * qcb->values_per_line);
+
+		if (!segvaddr_base) {
+			seq_printf(s, "%08x: remapping failed\n", qcb->read_addr);
+		} else {
+			int limit = qcb->values_per_line - 1;
+			unsigned long *segvaddr_moving = segvaddr_base;
+
+			seq_printf(s, "%08x: %08lx", qcb->read_addr, *segvaddr_moving);
+			qcb->read_addr += sizeof(*segvaddr_moving);
+			qcb->read_count--;
+			segvaddr_moving++;
+
+			if (limit > qcb->read_count) {
+				limit = qcb->read_count;
+			}
+
+			if (qcb->values_per_line > 1 && limit > 0) {
+				int i;
+
+				for (i = 0; i < limit; i++) {
+					seq_printf(s, " %08lx", *segvaddr_moving);
+					qcb->read_addr += sizeof(*segvaddr_moving);
+					qcb->read_count--;
+					segvaddr_moving++;
+				}
+			}
+
+			seq_printf(s, "\n");
+			iounmap(segvaddr_base);
+		}
+	}
+}
+
+/* Post command_set processing - propagate the changes in qcb into other structures */
+static void
+qdrv_command_set_post(struct qdrv_cb *qcb)
+{
+	qcb->params.txif_list_max = s_txif_list_max;
+}
+
+static struct qdrv_wlan *qdrv_control_wlan_get(struct qdrv_mac *mac)
+{
+	if (mac->data == NULL) {
+		/* This will happen for PCIe host */
+		DBGPRINTF_N("WLAN not found\n");
+	}
+
+	return (struct qdrv_wlan *)mac->data;
+}
+
+static struct qdrv_mac *qdrv_control_mac_get(struct device *dev, const char *argv)
+{
+	struct qdrv_cb *qcb = dev_get_drvdata(dev);
+	unsigned int unit;
+
+	if ((sscanf(argv, "%d", &unit) != 1) || (unit > (MAC_UNITS - 1))) {
+		DBGPRINTF_E("Invalid MAC unit %u in control command\n", unit);
+		return NULL;
+	}
+
+	if (&qcb->macs[unit].data == NULL) {
+		return NULL;
+	}
+
+	return &qcb->macs[unit];
+}
+
+struct ieee80211com *qdrv_get_ieee80211com(struct device *dev)
+{
+	struct qdrv_wlan *qw;
+	struct qdrv_mac *mac = qdrv_control_mac_get(dev, "0");
+
+	if (!mac) {
+		return NULL;
+	}
+
+	qw = qdrv_control_wlan_get(mac);
+	if (!qw) {
+		return NULL;
+	}
+
+	return &qw->ic;
+}
+
+/*
+ * check that MuC has completed its own initialisation code, and the boot complete
+ * hostlink message has been processed.
+ *
+ * 1 on success, 0 otherwise
+ */
+static int check_muc_boot(struct device *dev, void *token)
+{
+	(void)token;
+	struct qdrv_cb *qcb = dev_get_drvdata(dev);
+	uint32_t *resources = &qcb->resources;
+	uint32_t mask = QDRV_RESOURCE_WLAN | QDRV_RESOURCE_MUC_BOOTED;
+	char *desc;
+
+	if ((*resources & mask) != mask)
+		return 0;
+
+	desc = qdrv_soc_get_hw_desc(0);
+	if (desc[0] == '\0')
+		panic("QDRV: invalid bond option");
+
+	printk("QDRV: hardware is %s\n", desc);
+
+	return 1;
+}
+
+/*
+ * Check that vap has been created successfully, 1 on success
+ */
+static int check_vap_created(struct device *dev, void *token)
+{
+	struct qdrv_cb *qcb = dev_get_drvdata(dev);
+	int vap = (int) token;
+
+	if (vap < 0 || vap >= QDRV_MAX_VAPS) {
+		DBGPRINTF_E("invalid VAP %d\n", vap);
+		return 1;
+	}
+
+	return qcb->resources & QDRV_RESOURCE_VAP(vap);
+}
+
+static int check_vap_deleted(struct device *dev, void *token)
+{
+	struct qdrv_cb *qcb = dev_get_drvdata(dev);
+	int vap = (int) token;
+
+	if (vap < 0 || vap >= QDRV_MAX_VAPS) {
+		DBGPRINTF_E("invalid VAP %d\n", vap);
+		return 1;
+	}
+
+	return (qcb->resources & QDRV_RESOURCE_VAP(vap)) == 0;
+}
+
+/**
+ * block until a condition is met, which is provided by check_func.
+ * check_func returns 1 on successful condition.
+ *
+ * if booting the muc for the first time,
+ * wlan (mac->data) will not be available yet.
+ * assume qcb->macs[0] is the mac of interest here.
+ *
+ * returns 0 on successful condition, -1 on timeout, crash, or
+ * mac/wlan/shared_params not properly initialized
+ */
+static int qdrv_command_start_block(struct device *dev, const char* description,
+		int (*check_func)(struct device *cf_dev, void *cf_token), void *token)
+{
+	const unsigned long warn_threshold_msecs = 5000;
+	unsigned long start_jiff = jiffies;
+	unsigned long deadline = start_jiff + (MUC_BOOT_WAIT_SECS * HZ);
+	int can_block = !in_atomic();
+	int ret = -1;
+	int complete = 0;
+	struct qdrv_cb *qcb = dev_get_drvdata(dev);
+	struct qdrv_mac *mac = &qcb->macs[0];
+
+	BUG_ON(!can_block);
+	BUG_ON(!mac);
+
+#ifdef MTEST
+	complete = 1;
+	ret = 0;
+#endif
+
+	while (!complete) {
+		if (time_after(jiffies, deadline)) {
+			DBGPRINTF_E("Timeout waiting for %s; waited %u seconds\n",
+						description, MUC_BOOT_WAIT_SECS);
+			complete = 1;
+		} else if (mac && mac->dead) {
+			DBGPRINTF_E("Failure waiting for %s\n",
+						description);
+			complete = 1;
+		} else if (check_func(dev, token)) {
+			unsigned long elapsed_msecs;
+			const char *slow_warn = "";
+
+			/* once alive, qw should be initialised */
+			BUG_ON(qdrv_control_wlan_get(mac) == NULL);
+
+			elapsed_msecs = jiffies_to_msecs(jiffies - start_jiff);
+			if (elapsed_msecs > warn_threshold_msecs) {
+				slow_warn = " (SLOW)";
+			}
+
+			printk(KERN_INFO "%s succeeded %lu.%03lu seconds%s\n",
+					description,
+					elapsed_msecs / MSEC_PER_SEC,
+					elapsed_msecs % MSEC_PER_SEC,
+					slow_warn);
+
+			complete = 1;
+			ret = 0;
+		}
+
+		if (can_block) {
+			msleep(1);
+		}
+	}
+
+	return ret;
+}
+
+static int qdrv_soc_get_next_devid(const struct qdrv_cb *qcb)
+{
+	int maci;
+	int vdevi;
+	int ndevs = QDRV_RESERVED_DEVIDS;
+
+	for (maci = 0; maci < MAC_UNITS; maci++) {
+		for (vdevi = 0; vdevi < QDRV_MAX_VAPS; vdevi++) {
+			if (qcb->macs[maci].vnet[vdevi] == NULL)
+				break;
+
+			ndevs++;
+		}
+	}
+
+	return ndevs;
+}
+
+static int qdrv_control_mimo_mode_set(struct qdrv_mac *mac, struct net_device *vdev, int opmode)
+{
+	struct qdrv_vap *qv = netdev_priv(vdev);
+	int mimo_mode;
+
+	if (qv == NULL)
+		return -1;
+
+	if (opmode == IEEE80211_M_HOSTAP && mac->mac_active_bss > 1)
+		return 0;
+
+	if (ieee80211_swfeat_is_supported(SWFEAT_ID_4X4, 0))
+		mimo_mode = 4;
+	else if (ieee80211_swfeat_is_supported(SWFEAT_ID_3X3, 0))
+		mimo_mode = 3;
+	else
+		mimo_mode = 2;
+
+	ieee80211_param_to_qdrv(&qv->iv, IEEE80211_PARAM_MIMOMODE, mimo_mode, NULL, 0);
+
+	return 0;
+}
+
+static int qdrv_command_dev_init(struct device *dev, struct qdrv_cb *qcb)
+{
+	char *argv[] = { "test", "31", "0", "4", "0" };
+	int bond;
+
+	if (qcb->resources != 0) {
+		DBGPRINTF_W("Driver is already started\n");
+		return -1;
+	}
+
+	if (qdrv_soc_init(dev) < 0)
+		panic("Restarting due to SoC failure to initialise");
+
+	if (qdrv_command_start_block(dev, "MuC boot", &check_muc_boot, NULL))
+		panic("Restarting due to failed MuC");
+
+	if (get_hardware_revision() >= HARDWARE_REVISION_TOPAZ_A2) {
+#ifdef CONFIG_TOPAZ_PCIE_TARGET
+		/* There is no bond EMAC for PCIe EP board */
+		bond = 0;
+#else
+		bond = topaz_emac_get_bonding();
+#endif
+		topaz_tqe_emac_reflect_to(TOPAZ_TQE_LHOST_PORT,  bond);
+	}
+
+	qdrv_command_calcmd(dev, ARRAY_SIZE(argv), argv);
+
+	return 0;
+}
+
+static int qdrv_command_start(struct device *dev, int argc, char *argv[])
+{
+	struct qdrv_cb *qcb;
+	struct qdrv_mac *mac;
+	struct net_device *vdev = NULL;
+	uint8_t mac_addr[IEEE80211_ADDR_LEN] = {0};
+	int opmode = -1;
+	int flags = 0;
+	char *p;
+	int dev_id;
+	int rc;
+
+	qcb = dev_get_drvdata(dev);
+
+	if (argc == 1) {
+		if (qdrv_command_dev_init(dev, qcb) < 0)
+			return -1;
+	} else if ((argc == 4) || (argc == 5)) {
+		for (p = argv[1]; *p != '\0'; p++) {
+			if (!isdigit(*p)) {
+				goto error;
+			}
+		}
+
+		mac = qdrv_control_mac_get(dev, argv[1]);
+		if (mac == NULL) {
+			goto error;
+		}
+
+		if (strncmp(argv[2], "ap", 2) == 0) {
+			DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_QCTRL, "AP\n");
+			opmode = IEEE80211_M_HOSTAP;
+		} else if (strncmp(argv[2], "sta", 3) == 0) {
+			DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_QCTRL, "STA\n");
+			opmode = IEEE80211_M_STA;
+		} else if (strncmp(argv[2], "wds", 3) == 0) {
+			DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_QCTRL, "WDS\n");
+			opmode = IEEE80211_M_WDS;
+		} else {
+			goto error;
+		}
+
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_QCTRL,
+				"Interface name \"%s\"\n", argv[3]);
+
+		vdev = dev_get_by_name(&init_net, argv[3]);
+		if (vdev != NULL) {
+			DBGPRINTF_E("The device name \"%s\" already exists\n", argv[3]);
+			dev_put(vdev);
+			goto error;
+		}
+
+		if (argc == 5) {
+			if (qdrv_parse_mac(argv[4], mac_addr) < 0) {
+				DBGPRINTF_E("Error mac address for new vap\n");
+				goto error;
+			}
+		}
+
+		if (opmode == IEEE80211_M_HOSTAP &&
+				mac->mac_active_bss == QDRV_MAX_BSS_VAPS) {
+			DBGPRINTF_E("Maximum MBSSID VAPs reached (%d)\n",
+				 QDRV_MAX_BSS_VAPS);
+			goto error;
+		}
+
+		if (opmode == IEEE80211_M_WDS &&
+				mac->mac_active_wds == QDRV_MAX_WDS_VAPS) {
+			DBGPRINTF_E("Maximum WDS peers reached (%d)\n",
+				 QDRV_MAX_WDS_VAPS);
+			goto error;
+		}
+	} else if (argc == 2) {
+		if (strncmp(argv[1], "dsp", sizeof("dsp")) == 0)
+			(void) qdrv_start_dsp_only(dev);
+		else
+			goto error;
+	} else {
+		goto error;
+	}
+
+	/* Act on the opmode if set */
+	if (opmode > 0) {
+		dev_id = qdrv_soc_get_next_devid(qcb);
+
+		if (qdrv_soc_start_vap(qcb, dev_id, mac, argv[3], mac_addr, opmode, flags) < 0) {
+			DBGPRINTF_E("Failed to start VAP \"%s\"\n", argv[3]);
+			return -1;
+		}
+
+		if (qdrv_command_start_block(dev, "VAP create", &check_vap_created,
+				(void *)QDRV_WLANID_FROM_DEVID(dev_id) )) {
+			return -1;
+		}
+
+		if (opmode == IEEE80211_M_HOSTAP) {
+			mac->mac_active_bss++;
+		} else if (opmode == IEEE80211_M_WDS) {
+			mac->mac_active_wds++;
+		}
+
+		vdev = dev_get_by_name(&init_net, argv[3]);
+		if (vdev != NULL) {
+			rc = qdrv_control_mimo_mode_set(mac, vdev, opmode);
+			dev_put(vdev);
+			if (rc != 0) {
+				return -1;
+			}
+		}
+	}
+
+	return 0;
+
+error:
+	DBGPRINTF_E("Invalid arguments to start command\n");
+
+	return -1;
+}
+
+static int qdrv_command_stop_one_vap(struct device *dev, struct qdrv_mac *mac, const char *vapname)
+{
+	struct qdrv_cb *qcb = dev_get_drvdata(dev);
+	struct net_device *vdev = dev_get_by_name(&init_net, vapname);
+	struct qdrv_vap *qv;
+	enum ieee80211_opmode opmode;
+	unsigned int wlanid;
+
+	if (vdev == NULL) {
+		DBGPRINTF_E("net device \"%s\" doesn't exist\n", vapname);
+		return -ENODEV;
+	}
+	dev_put(vdev);
+
+	qv = netdev_priv(vdev);
+	opmode = qv->iv.iv_opmode;
+	wlanid = QDRV_WLANID_FROM_DEVID(qv->devid);
+
+	if (qdrv_vap_exit(mac, vdev) < 0) {
+		DBGPRINTF_E("Failed to exit VAP \"%s\"\n", vapname);
+		return -1;
+	}
+
+	if (qdrv_soc_stop_vap(qcb, mac, vdev) < 0) {
+		DBGPRINTF_E("Failed to stop VAP \"%s\"\n", vapname);
+		return -1;
+	}
+
+	if (opmode == IEEE80211_M_HOSTAP) {
+		mac->mac_active_bss--;
+	} else if (opmode == IEEE80211_M_WDS) {
+		mac->mac_active_wds--;
+	}
+
+	if (qdrv_command_start_block(dev, "VAP delete", &check_vap_deleted, (void *)wlanid)) {
+		return -1;
+	}
+
+	return 0;
+}
+
+static int qdrv_command_stop(struct device *dev, int argc, char *argv[])
+{
+	struct qdrv_mac *mac;
+	char *p;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if (argc == 1) {
+		if(qdrv_soc_exit(dev) < 0) {
+			return(-1);
+		}
+	} else if (argc == 3) {
+		const char *macstr = argv[1];
+		const char *vapname = argv[2];
+
+		/* Second argument must be all digits */
+		for (p = argv[1]; *p != '\0'; p++) {
+			if (!isdigit(*p)) {
+				goto error;
+			}
+		}
+
+		mac = qdrv_control_mac_get(dev, macstr);
+		if (mac == NULL) {
+			DBGPRINTF_E("no mac for arg: \"%s\"\n", macstr);
+			return -ENODEV;
+		}
+
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_QCTRL,
+				"Interface name \"%s\"\n", vapname);
+
+		if (strncmp(vapname, "all", 3) == 0) {
+			int i;
+			int res;
+			struct qdrv_wlan *qw = qdrv_mac_get_wlan(mac);
+
+			for (i = QDRV_MAX_VAPS - 1; i >= 0; --i) {
+				if (mac->vnet[i]) {
+					res = qdrv_command_stop_one_vap(dev, mac, mac->vnet[i]->name);
+					if (res != 0)
+						return res;
+				}
+			}
+
+			/* deleted DFS/Radar related timers */
+			qdrv_radar_unload(mac);
+			qdrv_wlan_cleanup_before_reload(&qw->ic);
+		} else {
+			return qdrv_command_stop_one_vap(dev, mac, vapname);
+		}
+	} else {
+		goto error;
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+
+error:
+	DBGPRINTF_E("Invalid arguments to stop command\n");
+
+	return -1;
+}
+
+int _atoi(const char *str)
+{
+	int rVal = 0;
+	int sign = 1;
+
+	if (!str)
+		return 0;
+
+	while (*str && (*str == ' '|| *str == '\t'))
+		str++;
+
+	if (*str == '\0')
+		return 0;
+
+	if (*str == '+' || *str == '-')
+		sign = (*str++ == '+') ? 1 : -1;
+
+	while (*str && *str >= '0' && *str <= '9')
+		rVal = rVal * 10 + *str++ - '0';
+
+	return (rVal * sign);
+}
+
+#if 0
+#define DEFAULT_LENGTH 4
+int qdrv_command_calcmd_from_shell (char* calcmd, char **arg, int arc)
+{
+	int i;
+	char shellInterpret[128];
+	int length = DEFAULT_LENGTH;
+
+	for(i = 0; i < 128; i++)
+	{
+		shellInterpret[i] = 0;
+	}
+
+	if(strcmp("VCO", arg[1])==0)
+	{
+		shellInterpret[0] = 0x1;
+		shellInterpret[1] = 0x0;
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_QCTRL | QDRV_LF_CALCMD, "VCO\n");
+	}
+	else if(strcmp("IQ_COMP", arg[1])==0)
+	{
+		shellInterpret[0] = 0x2;
+		shellInterpret[1] = 0x0;
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_QCTRL | QDRV_LF_CALCMD, "IQ_COMP\n");
+	}
+	else if(strcmp("DC_OFFSET", arg[1])==0)
+	{
+		shellInterpret[0] = 0x3;
+		shellInterpret[1] = 0x0;
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_QCTRL | QDRV_LF_CALCMD, "DC_OFFSET\n");
+	}
+	else
+	{
+		shellInterpret[0] = 0x0;
+		shellInterpret[1] = 0x0;
+	}
+
+	for(i = 2; i < arc; i++)
+	{
+		shellInterpret[i+2] = _atoi(arg[i]);
+		length ++;
+	}
+
+	shellInterpret[2] = (length & 0x00FF);
+	shellInterpret[3] = (length & 0xFF00)>>8;
+
+	memcpy(calcmd, shellInterpret, length);
+	return length;
+}
+#endif
+
+#define QDRV_LED_MAX_READ_SIZE 32
+
+int set_led_data (char *file, int data1, int data2){
+	int fd;
+	int ret_val;
+	int fr = -EINVAL;
+	int data;
+	int i;
+	char buf[QDRV_LED_MAX_READ_SIZE + 1];
+	mm_segment_t OldFs;
+	struct file *pfile;
+	loff_t pos = 0;
+	char p[QDRV_LED_MAX_READ_SIZE];
+
+	memset(buf, 0, sizeof(buf));
+	OldFs = get_fs();
+	set_fs(KERNEL_DS);
+
+	data = data1;
+	fd = sys_open(file, O_RDWR, 0);
+
+	if (fd < 0) {
+		fd = sys_open(file, O_CREAT | O_RDWR, 0666);
+		if (fd < 0) {
+			DBGPRINTF_E("Failed to Create File for LED GPIOs.\n");
+			goto setfs_ret;
+		}
+	}
+	if (fd >= 0) {
+		fr = sys_read(fd, buf, QDRV_LED_MAX_READ_SIZE);
+
+		if (fr == 0) {
+			for (i = 0; i < 2; i++) {
+				sprintf(p, "%d ", data);
+				pfile = fget(fd);
+				if (pfile) {
+					pos = pfile->f_pos;
+					ret_val = vfs_write(pfile, p, strlen(p), &pos);
+					fput(pfile);
+					pfile->f_pos = pos;
+				}
+				data = data2;
+			}
+		} else if (fr > 0) {
+		} else {
+			DBGPRINTF_E("Failed to read LED GPIO file\n");
+		}
+		sys_close(fd);
+	}
+
+setfs_ret:
+	set_fs(OldFs);
+	return fr;
+}
+
+/* address range [begin, end], inclusive */
+struct reg_range {
+	u32 begin;
+	u32 end;
+	const char *description;
+};
+
+/*
+ * List below is based on "golden values" registers file
+ * Commented memory ranges contains "holes" which are not
+ * allowed to be read or they're intentionally missed by
+ * qregcheck tool.
+ */
+static struct reg_range qdrv_hw_regs[] = {
+	{0xE0000000, 0xE0000FFF, "System Control regs"},
+	/*{0xE1007FFC, 0xE1038FFF, "switch regs"},*/
+	{0xE1020000, 0xE1020200, "HBM regs"},
+	/*{0xE2000000, 0xE200000F, "SPI0 regs"},*/
+	/*{0xE3100000, 0xE310009F, "AHB regs"},*/
+	{0xE5030000, 0xE50300FF, "Packet memory"},
+	{0xE5040000, 0xE5043FFF, "TCM"},
+	{0xE5044000, 0xE50440FF, "Global control"},
+	{0xE5050000, 0xE5051FFF, "Global control regs(1): TX frame processor regs"},
+	{0xE5052000, 0xE5052FFF, "Global control regs(2): RX frame processor regs"},
+	{0xE5050300, 0xE5053FFF, "Global control regs(3): shared frame processor regs"},
+	{0xE5090000, 0xE50904FF, "Global control regs(4)"},
+	{0xE6000000, 0xE60003FF, "BB Global regs"},
+	{0xE6010000, 0xE60103FF, "BB BP regs"},
+	{0xE6020000, 0xE602002B, "BB TXVEC regs"},
+	{0xE6030000, 0xE6030037, "BB RXVEC regs"},
+	{0xE6040000, 0xE604FFFF, "BB SPI0 regs"},
+	{0xE6050000, 0xE60504C4, "BB MIMO regs"},
+	{0xE6090000, 0xE60906FF, "BB TD (1) regs"},
+	{0xE6091000, 0xE60915FF, "BB TD (2) regs"},
+	{0xE6092000, 0xE60925FF, "BB TD (3) regs"},
+	{0xE60A1000, 0xE60A133F, "BB TD (RFC W mem 0) regs"},
+	{0xE60A2000, 0xE60A233F, "BB TD (RFC W mem 1) regs"},
+	{0xE60A3000, 0xE60A333F, "BB TD (RFC W mem 2) regs"},
+	{0xE60A4000, 0xE60A433F, "BB TD (RFC W mem 3) regs"},
+	{0xE60A5000, 0xE60A53FF, "BB TD (RFC TX mem) regs"},
+	{0xE60A6000, 0xE60A63FF, "BB TD (RX mem) regs"},
+	{0xE60B1000, 0xE60B10FF, "BB TD (gain SG) regs"},
+	{0xE60B2000, 0xE60B24FB, "BB TD (gain AG) regs"},
+	{0xE60B3000, 0xE60B313F, "BB TD (gain AG) regs"},
+	{0xE60B4000, 0xE60B4063, "BB TD (gain AG) regs"},
+	{0xE60F0000, 0xE60F0260, "BB 11B regs"},
+	{0xE6100000, 0xE6107FFF, "BB QMatrix regs"},
+	{0xE6200000, 0xE6200FFB, "BB FFT dump registers rx chain 1"},
+	{0xE6201000, 0xE6201FFB, "BB FFT dump registers rx chain 2"},
+	{0xE6202000, 0xE6202FFB, "BB FFT dump registers rx chain 3"},
+	{0xE6203000, 0xE6203FFB, "BB FFT dump registers rx chain 4"},
+	{0xE6400000, 0xE6400FFB, "BB Radar regs"},
+	{0xE8000000, 0xE8000800, "EMAC1 Control regs"},
+	/*{0xE9000000, 0xE9000bFF, "PCIE regs"},*/
+	/*{0xEA000000, 0xEA0003FF, "DMAC Conrol regs"},*/
+	{0xED000000, 0xED000800, "EMAC0 Control regs"},
+	{0xF0000000, 0xF00000FF, "UART1 Control regs"},
+	{0xF1000000, 0xF100003F, "GPIO regs"},
+	{0xF2000000, 0xF2000020, "SPI1 regs"},
+	{0xF4000000, 0xF40000AF, "Timer Control regs"},
+	{0xF5000000, 0xF50000FF, "UART2 Control regs"},
+	{0xF6000000, 0xF60008FF, "DDR Control regs"},
+	/*{0xF9000000, 0xF90000FC, "I2C regs"},*/
+};
+
+/*
+ * Variables for the register save/comparison logic. Two buffers for caching the values
+ * and an index into the hardware registers structure for the currently measured set
+ * of registers.
+ */
+#define QDRV_MAX_REG_MONITOR ARRAY_SIZE(qdrv_hw_regs)
+#define QDRV_MAX_REG_PER_BUF 3
+static u32 *p_hw_reg_buf[QDRV_MAX_REG_MONITOR][QDRV_MAX_REG_PER_BUF] = {{NULL},{NULL}};
+
+static inline u32 qdrv_control_hwreg_get_range_len(struct reg_range *range)
+{
+	return range->end - range->begin + 1;
+}
+
+static inline u32 qdrv_control_hwreg_get_range_reg_count(struct reg_range *range)
+{
+	return qdrv_control_hwreg_get_range_len(range) / 4;
+}
+
+static void qdrv_control_hwreg_trigger(int set_num, int buf_num)
+{
+	int num_regs;
+	int i;
+	u32 bytes;
+	u32 *ptr;
+	/* Address-> register count, with each register 32-bits */
+	num_regs = qdrv_control_hwreg_get_range_reg_count(&qdrv_hw_regs[set_num]);
+
+	for (i = 0; i < QDRV_MAX_REG_PER_BUF - 1; i++) {
+		if (p_hw_reg_buf[set_num][i] == NULL) {
+			p_hw_reg_buf[set_num][i] = kmalloc(num_regs * sizeof(u32), GFP_KERNEL);
+		}
+	}
+
+	bytes = qdrv_control_hwreg_get_range_len(&qdrv_hw_regs[set_num]);
+	ptr = ioremap_nocache(qdrv_hw_regs[set_num].begin, bytes);
+	if (ptr && p_hw_reg_buf[set_num][buf_num]) {
+		memcpy(p_hw_reg_buf[set_num][buf_num], ptr, bytes);
+		iounmap(ptr);
+	}
+}
+
+static void qdrv_command_memdbg_usage(void)
+{
+	printk("Usage:\n"
+		"  muc_memdbg 0 dump\n"
+		"  muc_memdbg 0 status\n"
+		"  muc_memdbg 0 dumpcfg <cmd> <val>\n"
+		"     %u - max file descriptors to print\n"
+		"     %u - max node structures to print\n"
+		"     %u - max bytes per hex dump\n"
+		"     %u - print rate control tables (0 or 1)\n"
+		"     %u - send MuC msgs to netdebug (0 or 1)\n"
+		"     %u - print MuC trace msgs (0 or 1)\n",
+		QDRV_CMD_MUC_MEMDBG_FD_MAX,
+		QDRV_CMD_MUC_MEMDBG_NODE_MAX,
+		QDRV_CMD_MUC_MEMDBG_DUMP_MAX,
+		QDRV_CMD_MUC_MEMDBG_RATETBL,
+		QDRV_CMD_MUC_MEMDBG_MSG_SEND,
+		QDRV_CMD_MUC_MEMDBG_TRACE);
+}
+
+void qdrv_control_sysmsg_timer(unsigned long data)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) data;
+	struct qdrv_pktlogger_types_tbl *tbl =
+			qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_SYSMSG);
+
+	if (!tbl) {
+		return;
+	}
+
+	qdrv_control_sysmsg_send(qw, NULL, 0, 1);
+	mod_timer(&qw->pktlogger.sysmsg_timer, jiffies + (tbl->interval * HZ));
+}
+
+static void qdrv_command_muc_memdbgcfg(struct qdrv_mac *mac, struct qdrv_wlan *qw,
+					unsigned int param, unsigned int val)
+{
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+
+	if (param == QDRV_CMD_MUC_MEMDBG_MSG_SEND) {
+		mac->params.mucdbg_netdbg = !!val;
+		if (mac->params.mucdbg_netdbg == 0) {
+			qdrv_control_sysmsg_send(qw, NULL, 0, 1);
+			del_timer(&qw->pktlogger.sysmsg_timer);
+		} else {
+			tbl = qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_SYSMSG);
+			if (!tbl) {
+				return;
+			}
+			init_timer(&qw->pktlogger.sysmsg_timer);
+			qw->pktlogger.sysmsg_timer.function = qdrv_control_sysmsg_timer;
+			qw->pktlogger.sysmsg_timer.data = (unsigned long)qw;
+			mod_timer(&qw->pktlogger.sysmsg_timer, jiffies +
+				(tbl->interval * HZ));
+		}
+	}
+
+	qdrv_hostlink_msg_cmd(qw, IOCTL_DEV_CMD_MEMDBG_DUMPCFG, (param << 16) | val);
+}
+
+static int qdrv_command_muc_memdbg(struct device *dev, int argc, char *argv[])
+{
+	struct qdrv_mac *mac;
+	struct qdrv_wlan *qw;
+	u_int32_t arg = 0;
+	unsigned int param;
+	unsigned int val;
+
+	if (argc < 3) {
+		qdrv_command_memdbg_usage();
+		return 0;
+	}
+
+	mac = qdrv_control_mac_get(dev, argv[1]);
+	if (mac == NULL) {
+		return -1;
+	}
+
+	qw = qdrv_control_wlan_get(mac);
+	if (!qw) {
+		return -1;
+	}
+
+	if (strcmp(argv[2], "dump") == 0) {
+		if (argc > 3) {
+			if (strcmp(argv[3], "-v") == 0) {
+				arg |= 0x1;
+			} else {
+				qdrv_command_memdbg_usage();
+				return 0;
+			}
+		}
+		qdrv_hostlink_msg_cmd(qw, IOCTL_DEV_CMD_MEMDBG_DUMP, arg);
+	} else if (strcmp(argv[2], "dumpcfg") == 0) {
+		if (argc < 5) {
+			qdrv_command_memdbg_usage();
+			return 0;
+		}
+		sscanf(argv[3], "%u", &param);
+		sscanf(argv[4], "%u", &val);
+		qdrv_command_muc_memdbgcfg(mac, qw, param, val);
+	} else if (strcmp(argv[2], "status") == 0) {
+		qdrv_command_muc_memdbgcfg(mac, qw, 0, 0);
+	} else if (strcmp(argv[2], "dumpnodes") == 0) {
+		qdrv_hostlink_msg_cmd(qw, IOCTL_DEV_CMD_MEMDBG_DUMPNODES, arg);
+	} else {
+		qdrv_command_memdbg_usage();
+		return 0;
+	}
+
+	return 0;
+}
+
+struct qdrv_hwreg_print_data {
+	struct seq_file *s;
+	unsigned int set_num;
+	unsigned int buf1_num;
+	unsigned int buf2_num;
+};
+
+#define QDRV_HWREG_PRINT_BUF_LEN 1024
+static void qdrv_dump_hwreg_print_func(struct qdrv_hwreg_print_data *sets, const char *f, ...)
+{
+	char buf[QDRV_HWREG_PRINT_BUF_LEN] = {0};
+	va_list args;
+
+	va_start(args, f);
+	vsnprintf(buf, QDRV_HWREG_PRINT_BUF_LEN - 1, f, args);
+	va_end(args);
+	if (sets != NULL && sets->s != NULL) {
+		seq_printf(sets->s, buf);
+	} else {
+		printk(buf);
+	}
+}
+
+#define QDRV_HWREG_MIN_ARGS_CNT 4
+#define QDRV_HWREG_MAX_ARGS_CNT 6
+static void qdrv_dump_hwreg_one_reg_output(struct seq_file *s, void *data, uint32_t num)
+{
+	struct qdrv_hwreg_print_data *sets = (struct qdrv_hwreg_print_data*)data;
+	int len = qdrv_control_hwreg_get_range_reg_count(&qdrv_hw_regs[sets->set_num]);
+
+	if (sets == NULL) {
+		return;
+	}
+
+	sets->s = s;
+	/* Inverse counter because seq file iterator uses reverse direction */
+	num = len - num;
+	if (num >= len) {
+		printk("%s: seq file iterator: bad register number %u\n", __func__, num);
+		return;
+	}
+
+	if (num == 0) {
+		qdrv_dump_hwreg_print_func(sets, "#  Mac reg set %s:\n",
+				qdrv_hw_regs[sets->set_num].description);
+	}
+
+	qdrv_dump_hwreg_print_func(sets, s == NULL ? "%d: 0x%08X: 0x%08X " : "%d: 0x%08X: 0x%08X\n",
+			num,
+			qdrv_hw_regs[sets->set_num].begin + (num*4),
+			p_hw_reg_buf[sets->set_num][sets->buf1_num][num]);
+
+	if (s == NULL && num && ((num % 2) == 0)) {
+		qdrv_dump_hwreg_print_func(sets, "\n");
+	}
+}
+
+static void qdrv_dump_hwreg_printk_output(struct qdrv_hwreg_print_data *sets)
+{
+	int len = qdrv_control_hwreg_get_range_reg_count(&qdrv_hw_regs[sets->set_num]);
+	int i;
+
+	/*
+	 * Index starts from 1 to allow qdrv_dump_hwreg_one_reg_output compatibility with seq files output.
+	 */
+	for (i = 1; i <= len; i++) {
+		qdrv_dump_hwreg_one_reg_output(NULL, (void*)sets, i);
+	}
+}
+
+#define QDRV_HWREGPRINT_MIN_ARGS_CNT 2
+#define QDRV_HWREGPRINT_MAX_ARGS_CNT 3
+static void qdrv_dump_hwregprint_one_group(struct seq_file *s, void *data, uint32_t grp_num)
+{
+	struct qdrv_hwreg_print_data *sets = (struct qdrv_hwreg_print_data*)data;
+
+	if (sets != NULL) {
+		sets->s = s;
+	}
+
+	/* Inverse counter because seq file iterator uses reverse direction */
+	grp_num = QDRV_MAX_REG_MONITOR - grp_num;
+	if (grp_num >= QDRV_MAX_REG_MONITOR) {
+		printk("%s: seq file iterator: bad group number at %u\n", __func__, grp_num);
+		return;
+	}
+
+	qdrv_dump_hwreg_print_func(sets, "Set %d (%s, 0x%08X->0x%08X)\n",
+		grp_num, qdrv_hw_regs[grp_num].description,
+		qdrv_hw_regs[grp_num].begin,
+		qdrv_hw_regs[grp_num].end);
+}
+
+static void qdrv_dump_hwregprint_printk_output(struct qdrv_hwreg_print_data *sets)
+{
+	int grp_num;
+
+	qdrv_dump_hwreg_print_func(sets, "\n");
+	for (grp_num = QDRV_MAX_REG_MONITOR; grp_num > 0; --grp_num) {
+		qdrv_dump_hwregprint_one_group(NULL, (void*)sets, grp_num);
+	}
+}
+
+#define QDRV_HWREGCMP_MIN_ARGS_CNT 3
+#define QDRV_HWREGCMP_MAX_ARGS_CNT 6
+static void qdrv_dump_hwregcmp_one_reg(struct seq_file *s, void *data, uint32_t reg_num)
+{
+	struct qdrv_hwreg_print_data *sets = (struct qdrv_hwreg_print_data*)data;
+	int set_num = sets->set_num,
+	    buf_1_num = sets->buf1_num,
+	    buf_2_num = sets->buf2_num,
+	    reg_count;
+
+	sets->s = s;
+	if (!p_hw_reg_buf[set_num][buf_1_num] || !p_hw_reg_buf[set_num][buf_2_num]) {
+		return;
+	}
+
+	reg_count = qdrv_control_hwreg_get_range_reg_count(&qdrv_hw_regs[set_num]);
+	/* Inverse counter because seq file iterator uses reverse direction */
+	reg_num = reg_count - reg_num;
+	if (reg_num > reg_count) {
+		printk("%s: seq file iterator: bad register number %u\n", __func__, reg_num);
+		return;
+	}
+
+	if (reg_num == 0) {
+		qdrv_dump_hwreg_print_func(sets, "#  Mac reg set %s:\n",
+				qdrv_hw_regs[sets->set_num].description);
+	}
+
+	if (p_hw_reg_buf[set_num][buf_1_num][reg_num] != p_hw_reg_buf[set_num][buf_2_num][reg_num]) {
+		qdrv_dump_hwreg_print_func(sets, "0x%08X: %08X -> %08X\n",
+			qdrv_hw_regs[set_num].begin + (reg_num * 4),
+			p_hw_reg_buf[set_num][buf_1_num][reg_num],
+			p_hw_reg_buf[set_num][buf_2_num][reg_num], NULL);
+	} else if (sets->s != NULL) {
+		qdrv_dump_hwreg_print_func(sets, "0x%08X: ===== %08X =====\n",
+			qdrv_hw_regs[set_num].begin + (reg_num * 4),
+			p_hw_reg_buf[set_num][buf_2_num][reg_num], NULL);
+	}
+}
+
+static void qdrv_dump_hwregcmp_all(struct qdrv_hwreg_print_data *sets)
+{
+	register int reg_count, reg_num;
+	reg_count = qdrv_control_hwreg_get_range_reg_count(&qdrv_hw_regs[sets->set_num]);
+
+	for (reg_num = 1; reg_num <= reg_count; reg_num++) {
+		qdrv_dump_hwregcmp_one_reg(NULL, (void*)sets, reg_num);
+	}
+}
+
+void qdrv_control_dump_active_hwreg(void)
+{
+	int i;
+
+	for (i = 0; i < QDRV_MAX_REG_MONITOR; i++) {
+		if (p_hw_reg_buf[i][0]) {
+			/*
+			 * If the buffer is allocated, do one final dump in the final index.
+			 */
+			qdrv_control_hwreg_trigger(i, QDRV_MAX_REG_PER_BUF - 1);
+			printk("\nDump active registers for set %d (%s)\n",
+					i, qdrv_hw_regs[i].description);
+			qdrv_dump_hwregcmp_all(&(struct qdrv_hwreg_print_data){NULL,
+					i, 0, QDRV_MAX_REG_PER_BUF - 1});
+		}
+	}
+}
+
+static void qdrv_dump_hwregcmp(int argc, char *argv[])
+{
+	static struct qdrv_hwreg_print_data sets = {NULL, 0, 1};
+	int qdrvdata_output = 0;
+
+	if (argc < QDRV_HWREGCMP_MIN_ARGS_CNT || argc > QDRV_HWREGCMP_MAX_ARGS_CNT) {
+		printk("Bad arguments count\n");
+		return;
+	}
+
+	sscanf(argv[2], "%u", &sets.set_num);
+	if (sets.set_num > QDRV_MAX_REG_MONITOR - 1) {
+		printk("Buffer set value out of range - should be between 0 and %d\n",
+				QDRV_MAX_REG_MONITOR - 1);
+		return;
+	}
+
+	if (argc > 3) {
+		if (strcmp("--qdrvdata", argv[argc - 1]) == 0) {
+			qdrvdata_output = 1;
+		}
+
+		sscanf(argv[3], "%u", &sets.buf1_num);
+		sscanf(argv[4], "%u", &sets.buf2_num);
+	}
+
+	if (sets.buf1_num > QDRV_MAX_REG_PER_BUF - 1 || sets.buf2_num > QDRV_MAX_REG_PER_BUF - 1) {
+		printk("Invalid buffer index\n");
+		return;
+	}
+
+	if (qdrvdata_output == 1) {
+		int reg_count = qdrv_control_hwreg_get_range_reg_count(&qdrv_hw_regs[sets.set_num]);
+		qdrv_control_set_show(qdrv_dump_hwregcmp_one_reg, &sets, reg_count, 1);
+	} else {
+		qdrv_dump_hwregcmp_all(&sets);
+	}
+}
+
+static void qdrv_dump_hwreg(int argc, char *argv[])
+{
+	static struct qdrv_hwreg_print_data sets = {NULL, 0, 0};
+	int qdrvdata_output = 0;
+	int verbose = 0;
+
+	if (argc < QDRV_HWREG_MIN_ARGS_CNT || argc > QDRV_HWREG_MAX_ARGS_CNT) {
+		printk("Bad arguments count\n");
+		return;
+	}
+
+	sscanf(argv[2], "%u", &sets.set_num);
+	if (sets.set_num > QDRV_MAX_REG_MONITOR - 1) {
+		printk("Buffer value out of range - should be between 0 and %d\n",
+				QDRV_MAX_REG_MONITOR - 1);
+		return;
+	}
+
+	sscanf(argv[3], "%u", &sets.buf1_num);
+	if (sets.buf1_num > (QDRV_MAX_REG_PER_BUF - 1)) {
+		printk("Buffer value out of range - should be between 0 and %d\n",
+				QDRV_MAX_REG_PER_BUF - 1);
+		return;
+	}
+
+	printk("[%d]Dump register set \"%s\"\n", sets.set_num, qdrv_hw_regs[sets.set_num].description);
+	if (argc > QDRV_HWREG_MIN_ARGS_CNT) {
+		int i;
+		for (i = QDRV_HWREG_MIN_ARGS_CNT; i < argc; ++i) {
+			if (strcmp("--qdrvdata", argv[i]) == 0) {
+				qdrvdata_output = 1;
+			} else if (strcmp("--verbose", argv[i]) == 0) {
+				verbose = 1;
+			} else {
+				printk("Unknown option \"%s\"\n", argv[i]);
+				return;
+			}
+		}
+	}
+
+	if (qdrvdata_output && !verbose)
+		printk("Option --qdrvdata doesn't make sense without --verbose option\n");
+
+	qdrv_control_hwreg_trigger(sets.set_num, sets.buf1_num);
+	if (verbose) {
+		if (qdrvdata_output) {
+			int len = qdrv_control_hwreg_get_range_reg_count(&qdrv_hw_regs[sets.set_num]);
+			qdrv_control_set_show(qdrv_dump_hwreg_one_reg_output, &sets, len, 1);
+		} else {
+			qdrv_dump_hwreg_printk_output(&sets);
+		}
+	}
+}
+
+static void qdrv_dump_hwregprint(int argc, char *argv[])
+{
+	static struct qdrv_hwreg_print_data sets = {NULL, 0, 0};
+	int qdrvdata_output = 0;
+
+	printk("\n");
+	if (argc > QDRV_HWREGPRINT_MAX_ARGS_CNT) {
+		printk("Bad arguments\n");
+		return;
+	}
+	if (argc > QDRV_HWREGPRINT_MIN_ARGS_CNT && strcmp(argv[2], "--qdrvdata") == 0) {
+		qdrvdata_output = 1;
+	}
+	if (qdrvdata_output) {
+		qdrv_control_set_show(qdrv_dump_hwregprint_one_group, &sets, (u32)QDRV_MAX_REG_MONITOR, 1);
+	} else {
+		qdrv_dump_hwregprint_printk_output(&sets);
+	}
+}
+
+static void qdrv_dump_irqstatus(void)
+{
+	uint32_t *ptr = ioremap_nocache(0xe6000320, 16);
+	uint32_t val = *ptr;
+
+	printf("BB IRQ status\n");
+	printf("0xe6000320:%08x\n",val);
+	printf("td_sigma_hw_noise:%x, radar:%x, rfc:%x, dleaf_overflow:%x, leaf_overflow:%x\n",
+			val & 1,(val>>1)&0x3f,(val>>7)&3,(val>>9)&1,(val>>10)&1);
+	printf("tx_td_overflow:%x, com_mem:%x, tx_td_underflow:%x, rfic:%x\n",
+			(val>>11)&1,(val>>12)&0x1,(val>>13)&1,(val>>14)&1);
+	printf("rx_sm_watchdog:%x, tx_sm_watchdog:%x, main_sm_watchdog:%x, hready_watchdog:%x\n",
+			(val>>15)&1,(val>>16)&0x1,(val>>17)&1,(val>>18)&1);
+
+	val = *(u32 *)((u32)ptr + 4);
+	printf("0xe6000324:%08x\n",val);
+	printf("rx_done:%x, rx_phase:%x, rx_start:%x, tx_done:%x, tx_phase:%x, tx_start:%x \n",
+			(val>>0)&1,(val>>1)&0x1,(val>>2)&1,(val>>3)&1,(val>>4)&1,(val>>5)&1);
+	iounmap(ptr);
+}
+
+static void qdrv_dump_txstatus(void)
+{
+	uint32_t *ptr  = ioremap_nocache(0xe5050000, 0x800);
+	uint32_t *ptr1 = ioremap_nocache(0xe5040000, 0x200);
+	uint32_t read_ptr;
+	uint32_t temp;
+	uint32_t i;
+
+	/* For read and write pointers, the MSB is ignored */
+	for (i = 0; i < 4; i++) {
+		printk("Queue  %x, %x wr:%03x rd:%03x ",
+				i, (u32)(0xe5040000 +  i * 32),
+				*(u32 *)((u32)ptr + 0x308 + i * 16) + 0xe5030000,
+				*(u32 *)((u32)ptr + 0x30c + i * 16) + 0xe5030000);
+		temp = *(u32 *)((u32)ptr + 0x400 + i * 4);
+		printk("level %d, wrptr:%x, rdptr:%x en:%x ",
+				temp & 0xf, (temp >> 4) & 0xf, (temp >> 9) & 0x3, (temp >> 31) & 1);
+		read_ptr = (temp >> 9) & 0x3;
+		printk("frm:%08x %08x\n",
+				*(u32 *)((u32)ptr1 + i * 32 + read_ptr * 8),
+				*(u32 *)((u32)ptr1 + i * 32 + 4 + read_ptr * 8));
+	}
+	for (i = 0; i < 4; i++) {
+		read_ptr = ((*(u32 *)((u32)ptr + 0x410 + i * 4) & 0xf) -
+				((*(u32 *)((u32)ptr + 0x410 + i * 4) >> 26) & 0xf)) & 0xf;
+		printk("Timer0 %x, %x wr:%03x rd:%03x ",
+				i, (u32)(0xe5040000 + 0x80 + i * 32),
+				*(u32 *)((u32)ptr + 0x348 + i * 16) + 0xe5030000,
+				*(u32 *)((u32)ptr + 0x34c + i * 16) + 0xe5030000);
+		temp = *(u32 *)((u32)ptr + 0x400 + i * 4);
+		printk("level %d, wrptr:%x, rdptr:%x en:%x ",
+				temp & 0xf, (temp >> 4) & 0xf, (temp >> 9) & 0x3, (temp >> 31) & 1);
+		read_ptr = (temp >> 9) & 0x3;
+		printk("frm:%08x %08x\n",
+				*(u32 *)((u32)ptr1 + i * 32 + read_ptr * 8 + 0x80),
+				*(u32 *)((u32)ptr1 + i * 32 + 4 + read_ptr * 8 + 0x80));
+	}
+	for (i = 0; i < 4; i++) {
+		read_ptr =((*(u32 *)((u32)ptr + 0x420 + i * 4) & 0xf) -
+				((*(u32 *)((u32)ptr + 0x420 + i * 4) >> 26) & 0xf)) & 0xf;
+		printk("Timer1 %x, %x wr:%03x rd:%03x ",
+				i, (u32)(0xe5040000 + 0x100 + i * 32),
+				*(u32 *)((u32)ptr + 0x388 + i * 16) + 0xe5030000,
+				*(u32 *)((u32)ptr + 0x38c + i * 16) + 0xe5030000);
+		temp = *(u32 *)((u32)ptr + 0x400 + i * 4);
+		printk("level %d, wrptr:%x, rdptr:%x en:%x ",
+				temp & 0xf, (temp >> 4) & 0xf, (temp >> 9) & 0x3, (temp >> 31) & 1);
+		read_ptr = (temp >> 9) & 0x3;
+		printk("frm:%08x %08x\n",
+				*(u32 *)((u32)ptr1 + i * 32 + read_ptr * 8 + 0x100),
+				*(u32 *)((u32)ptr1 + i * 32 + 4 + read_ptr * 8 + 0x100));
+	}
+	iounmap(ptr);
+	iounmap(ptr1);
+}
+
+static void qdrv_dump_event_log(void)
+{
+	char buffer[256] = {0};
+	int i;
+
+	for (i = 0; i < QDRV_EVENT_LOG_SIZE; i++) {
+		if (qdrv_event_log_table[i].str == NULL) {
+			break;
+		}
+		sprintf(buffer,qdrv_event_log_table[i].str,qdrv_event_log_table[i].arg1,
+			qdrv_event_log_table[i].arg2,
+			qdrv_event_log_table[i].arg3,
+			qdrv_event_log_table[i].arg4,
+			qdrv_event_log_table[i].arg5);
+		printf("%08x:%08x %s\n",qdrv_event_log_table[i].jiffies,qdrv_event_log_table[i].clk,buffer);
+	}
+}
+
+static void qdrv_dump_muc_log(struct device *dev)
+{
+		extern int qdrv_dump_log(struct qdrv_wlan *qw);
+		struct qdrv_cb *qcb = dev_get_drvdata(dev);
+		struct qdrv_wlan *qw = (struct qdrv_wlan *)qcb->macs[0].data;
+		qdrv_dump_log(qw);
+}
+
+static void qdrv_dump_fctl(int argc, char *argv[])
+{
+	uint32_t *ptr = NULL;
+	uint32_t *ptr1 = NULL;
+	uint32_t temp = 0;
+	char frame_type[10][12] = {"0 prestore","1 manage","2 data","3 A-MSDU","4-A-MPDU","5 beacon","6 probe","7 vht bf", "8 ht bf", "unknown"};
+
+	sscanf(argv[2],"%x",&temp);
+	if (temp == 0) {
+		printk("invalid address!\n");
+		return;
+	}
+
+	ptr = ioremap_nocache(temp, 32 * 4);
+	if (ptr == NULL)
+		return;
+
+	ptr1 = ptr;
+	temp = *ptr1++;
+	printk("\nframe_type    %s",frame_type[min((temp>>4) & 0xf, (u32)9)]);
+	printk("\nnot sounding: %8x",(temp >> 3) & 0x1 );
+	printk("\trate table:   %8x",(temp >> 8) & 0x1 );
+	printk("\tUse RA:       %8x",(temp >> 9) & 0x1 );
+	printk("\tburst OK:     %8x",(temp >> 10) & 0x1 );
+	printk("\nRIFS EN:      %8x",(temp >> 11) & 0x1 );
+	printk("\tDur Upadate:  %8x",(temp >> 12) & 0x1 );
+	printk("\tencrypt en:   %8x",(temp >> 13) & 0x1 );
+	printk("\tignore cca:   %8x",(temp >> 14) & 0x1 );
+	printk("\nignore nav:   %8x",(temp >> 15) & 0x1 );
+	printk("\tmore frag:    %8x",(temp >> 16) & 0x1 );
+	printk("\tnode en:      %8x",(temp >> 17) & 0x1 );
+	printk("\tprefetch en:  %8x",(temp >> 18) & 0x1 );
+	printk("\nint en:       %8x",(temp >> 19) & 0x1 );
+	printk("\tfirst seq sel:%8x",(temp >> 21) & 0x1 );
+	printk("\thdr len:      %8x",(temp >> 24) & 0xff );
+
+	temp = *ptr1++;
+	printk("\tframe Len:    %8x",(temp >> 0) & 0xffff );
+	printk("\nprefectch Len:%8x",(temp >> 16) & 0xffff );
+	temp = *ptr1++;
+	printk("\tprefetch addr:%08x",temp);
+	temp = *ptr1++;
+	printk("\tDMA src addr: %08x",temp);
+	temp = *ptr1++;
+	printk("\tDMA next addr:%08x",temp);
+
+	temp = *ptr1++;
+	printk("\nnDMA Len:     %8x",(temp >> 0) & 0xffff );
+	printk("\tRFTx Pwr0:    %8x",(temp >> 16) & 0xff );
+	printk("\tRFTx Pwr0:    %8x",(temp >> 24) & 0xff );
+
+	temp = *ptr1++;
+	printk("\tAIFSN:        %8x  ",(temp >> 0) & 0xf );
+	printk("\nECWmin:       %8x",(temp >> 4) & 0xf );
+	printk("\tECWmax:       %8x",(temp >> 8) & 0xf );
+	printk("\tNode Cache:   %8x",(temp >> 12) & 0x7f );
+	printk("\tHW DMA:       %8x",(temp >> 19) & 0x1 );
+	printk("\nFrm TimeoutEn:%8x",(temp >> 23) & 0x1 );
+	printk("\tTXOP En:      %8x",(temp >> 24) & 0x1 );
+
+	printk("\tA-MPDU Den:   %8x",(temp >> 25) & 0x7 );
+	printk("\tSM Tone Grp:  %8x",(temp >> 29) & 0x7 );
+
+	temp = *ptr1++;
+	printk("\nFrm timeout:  %8x",temp);
+	temp = *ptr1++;
+	printk("\tTx Status:    %8x",temp);
+	temp = *ptr1++;
+	printk("\tRA[31:0]:     %8x",temp);
+	temp = *ptr1++;
+	printk("\tRA[47:32]:    %8x",(temp >> 0) & 0xffff );
+	printk("\nRateRty 0:    %8x",0xe5040000 + ((temp >> 16) & 0xffff));
+	temp = *ptr1++;
+	printk("\tRateRty 1:    %8x",0xe5040000 + ((temp >> 0) & 0xffff ));
+	printk("\tNxt Frm Len:  %8x",(temp >> 16) & 0xffff );
+
+	temp = *ptr1++;
+	printk("\tEDCA TxOpLim: %8x  ",(temp >> 0) & 0xff );
+	printk("\nTxScale:      %8x",(temp >> 8) & 0x7 );
+	printk("\tSmoothing:    %8x",(temp >> 19) & 0x1 );
+	printk("\tNot Sounding: %8x",(temp >> 20) & 0x1 );
+	printk("\tshort GI:     %8x",(temp >> 24) & 0x1 );
+	temp = *ptr1++;
+	printk("\nAntSelPtr:    %8x",(temp >> 0) & 0xffff );
+	printk("\tFxdRateSeqPtr:%8x",(temp >> 16) & 0xffff );
+	temp = *ptr1++;
+	printk("\tNumSubFrames: %8x",(temp >> 0) & 0xffff );
+	printk("\tTotalDenBytes:%8x",(temp >> 16) & 0xffff );
+
+	temp = *ptr1++;
+	printk("\nPN[31:0]:     %8x",temp);
+	temp = *ptr1++;
+	printk("\tPN[47:0]:     %8x",(temp >> 0) & 0xffff );
+	printk("\tRxTxPwr 2:    %8x",(temp >> 16) & 0xff );
+	printk("\tRxTxPwr 3:    %8x",(temp >> 24) & 0xff );
+
+	temp = *ptr1++;
+	printk("\nTxService:    %8x",(temp >> 0) & 0xffff );
+	printk("\tLSIG Rsvd:    %8x",(temp >> 16) & 1 );
+	printk("\tHTSIG Rsvd:   %8x",(temp >> 17) & 1 );
+
+	printk("\n");
+	iounmap(ptr);
+}
+
+static void qdrv_dump_rrt(int argc, char *argv[])
+{
+	uint32_t *ptr = NULL;
+	uint32_t *ptr1 = NULL;
+	uint32_t temp = 0;
+	int not_done = 4;
+
+	sscanf(argv[2],"%x",&temp);
+	if (temp == 0) {
+		printk("invalid address!\n");
+		return;
+	}
+
+	ptr = ioremap_nocache(temp, 32 * 4);
+	if (ptr == NULL)
+		return;
+
+	ptr1 = ptr;
+	do {
+		temp = *ptr1++;
+		printk("\nRateInd:      %8x",(temp >> 0) & 0x7f );
+		printk("\tLongPre:      %8x",(temp >> 7) & 0x1 );
+		printk("\t11n:          %8x",(temp >> 8) & 0x1 );
+		printk("\tBW:           %8x",(temp >> 9) & 0x3 );
+		printk("\nChOff:        %8x",(temp >> 11) & 0x3 );
+		printk("\tNEss:         %8x",(temp >> 13) & 0x3 );
+		printk("\tShortGI:      %8x",(temp >> 15) & 0x1 );
+		printk("\tCount:        %8x",(temp >> 16) & 0xf );
+		printk("\nAntSet:       %8x",(temp >> 20) & 0xf );
+		printk("\tAntSetOn:     %8x",(temp >> 24) & 0x1 );
+		printk("\tNTx:          %8x",(temp >> 25) & 0x3 );
+		printk("\tSTBC:         %8x",(temp >> 27) & 0x3 );
+		printk("\nExpMatType:   %8x",(temp >> 29) & 0x7 );
+		temp = *ptr1++;
+		printk("\tSeqPtr:       %8x",(temp >> 0) & 0x7fff);
+		printk("\tExpMatPtr:    %8x",(temp >> 16) & 0x3f);
+		printk("\tLDPC:         %8x",(temp >> 26) & 0x1);
+		printk("\nLDPCAdj:      %8x",(temp >> 27) & 0x1);
+		printk("\tShiftVal:     %8x",(temp >> 28) & 0x7);
+		printk("\tLastEntry:    %8x",(temp >> 31) & 0x1);
+		printk("\t11ac:         %8x",(temp >> 15) & 0x1);
+		not_done--;
+	} while (((temp & 0x80000000)==0) && not_done);
+
+	printk("\n");
+	iounmap(ptr);
+}
+
+static void qdrv_dump_mem(int argc, char *argv[])
+{
+	uint32_t *ptr;
+	int num_bytes = 256;
+	uint32_t addr;
+	int i;
+
+	if (argc < 3) {
+		printk("dump mem <addr> [num bytes]\n");
+		return;
+	}
+
+	sscanf(argv[2],"%x",&addr);
+
+	// if ddr addr, add the ddr bit otherwise assume addr
+	// is correct (i.e. user has to correct for sram addr
+	addr &= 0xfffffffc;
+	if (addr < 0x80000000) {
+		addr |= 0x80000000;
+	}
+
+	if (!qdrv_command_is_valid_addr(addr)) {
+		printk("invalid address\n");
+		return;
+	}
+
+	if (argc >= 4) {
+		sscanf(argv[3],"%x",&num_bytes);
+	}
+	ptr = ioremap_nocache(addr, num_bytes);
+	if (!ptr) {
+		printk("remapping failed\n");
+	} else {
+		for (i = 0; i < num_bytes; i += 4) {
+			if ((i%16) == 0) {
+				printk("\n%08x: ",addr + i);
+			}
+			printk("%08x ",*(u32 *)((int)ptr + i));
+		}
+		printk("\n");
+		iounmap(ptr);
+	}
+}
+
+static void qdrv_dump_dma(int argc, char *argv[])
+{
+	uint32_t *ptr = NULL;
+	int addr;
+
+	if (argc < 3) {
+		printk("dump dma <addr>\n");
+		return;
+	}
+
+	sscanf(argv[2], "%x", &addr);
+	addr &= 0xfffffffc;
+
+	while (addr != 0) {
+		uint8_t *data_ptr;
+		uint32_t src;
+		int i;
+
+		// convert from muc addr
+		if (addr < 0x80000000) {
+			addr |= 0x80000000;
+		} else {
+			addr |= 0x08000000;
+		}
+
+		ptr = ioremap_nocache(addr, 4 * 4);
+		if (ptr == NULL) {
+			printk("invalid address!\n");
+			return;
+		}
+
+		printk("\nsrc:   %08x",ptr[0]);
+		printk(" dst:   %08x",ptr[1]);
+		printk(" next:  %08x",ptr[2]);
+		printk(" len:   %08x",ptr[3]&0xffff);
+		addr = ptr[2];
+		src = ptr[0];
+		iounmap(ptr);
+		if (src < 0x80000000) {
+			src |= 0x80000000;
+		} else {
+			src |= 0x08000000;
+		}
+
+		data_ptr = ioremap_nocache(src, 8);
+		if (data_ptr == NULL) {
+			printk("invalid data_ptr!\n");
+			return;
+		}
+
+		for (i = 0; i < 8; i++) {
+			printf(" %02x", data_ptr[i]);
+		}
+
+		printk("\n");
+		iounmap(data_ptr);
+	}
+}
+
+static void qdrv_dump_usage(void)
+{
+	printk("usage: dump irqstatus\n");
+	printk("usage: dump txstatus\n");
+	printk("usage: dump log\n");
+	printk("usage: dump muc\n");
+	printk("usage: dump fctl\n");
+	printk("usage: dump rrt\n");
+	printk("usage: dump mem <addr> [num bytes]\n");
+	printk("usage: dump dma <addr>\n");
+	printk("usage: dump hwregprint [--qdrvdata]\n");
+	printk("usage: dump hwreg <reg set: 0-%d> <buf num: 0-%d> [--verbose [--qdrvdata]]\n",
+		QDRV_MAX_REG_MONITOR - 1, QDRV_MAX_REG_PER_BUF - 1);
+	printk("usage: dump hwregcmp <set: 0-%d> [<buf 1: 0-%d> <buf 2: 0-%d>] [--qdrvdata]\n",
+		QDRV_MAX_REG_MONITOR - 1, QDRV_MAX_REG_PER_BUF - 1, QDRV_MAX_REG_PER_BUF - 1);
+#if QTN_SEM_TRACE
+	printk("usage: dump sem\n");
+#endif
+}
+
+static int qdrv_command_dump(struct device *dev, int argc, char *argv[])
+{
+	if (argc < 2) {
+		qdrv_dump_usage();
+		return 0;
+	}
+
+	if (strcmp(argv[1], "hwregprint") == 0) {
+		qdrv_dump_hwregprint(argc, argv);
+	} else if (strcmp(argv[1], "hwreg") == 0) {
+		qdrv_dump_hwreg(argc, argv);
+	} else if (strcmp(argv[1], "hwregcmp") == 0) {
+		qdrv_dump_hwregcmp(argc, argv);
+	} else if (strcmp(argv[1], "irqstatus") == 0) {
+		qdrv_dump_irqstatus();
+	} else if (strcmp(argv[1],"txstatus") == 0) {
+		qdrv_dump_txstatus();
+	} else if (strcmp(argv[1],"log") == 0) {
+		qdrv_dump_event_log();
+	} else if (strcmp(argv[1],"muc") == 0) {
+		qdrv_dump_muc_log(dev);
+	} else if (strcmp(argv[1],"fctl") == 0) {
+		qdrv_dump_fctl(argc, argv);
+	} else if (strcmp(argv[1],"rrt") == 0) {
+		qdrv_dump_rrt(argc, argv);
+	} else if (strcmp(argv[1],"mem") == 0) {
+		qdrv_dump_mem(argc, argv);
+	} else if (strcmp(argv[1],"dma") == 0) {
+		qdrv_dump_dma(argc, argv);
+#if QTN_SEM_TRACE
+	} else if (strcmp(argv[1],"sem") == 0) {
+		qtn_mproc_sync_spin_lock_log_dump();
+#endif
+	} else {
+		printk("%s: invalid dump type %s\n", __func__, argv[1]);
+	}
+
+	return 0;
+}
+
+static int qdrv_command_radar(struct device *dev, int argc, char *argv[])
+{
+	if ((3 <= argc) && (strcmp(argv[1], "enable")==0)) {
+		qdrv_radar_enable(argv[2]);
+	} else if ((2 <= argc) && (strcmp(argv[1], "disable")==0)) {
+		qdrv_radar_disable();
+	} else {
+		goto usage;
+	}
+
+	return 0;
+
+usage:
+	printk("usage: %s (enable <region>|disable)\n", argv[0]);
+	return 0;
+}
+
+static int qdrv_command_rifs(struct device *dev, int argc, char *argv[])
+{
+	if (argc == 2 && strcmp(argv[1], "enable") == 0) {
+		qtn_rifs_mode_enable(QTN_LHOST_SOC_CPU);
+	} else if (argc == 2 && strcmp(argv[1], "disable") == 0) {
+		qtn_rifs_mode_disable(QTN_LHOST_SOC_CPU);
+	} else {
+		printk("usage: %s (enable|disable)\n", argv[0]);
+	}
+
+	return 0;
+}
+
+static int qdrv_command_led (struct device *dev, int argc, char *argv[]){
+	int ret_val, data1, data2;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Check that we have all the arguments */
+	if(argc != 3) {
+		goto error;
+	}
+
+	if(sscanf(argv[1], "%d", &data1) != 1) {
+		goto error;
+	}
+
+	if(sscanf(argv[2], "%d", &data2) != 1) {
+		goto error;
+	}
+
+	ret_val = set_led_data(LED_FILE, data1, data2);
+	if (ret_val > 1){
+		printk("Led GPIO already set.\n");
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return (0);
+
+error:
+	DBGPRINTF_E("Invalid arguments to led command\n");
+
+	return(-1);
+}
+
+/*
+ * GPIO programs - separate from the LED programs
+ */
+
+static u8	gpio_wps_push_button = 255;
+static u8	wps_push_button_active_logic = 0;
+static u8	wps_push_button_interrupts = 0;
+static u8	wps_push_button_configured = 0;
+static u8	wps_push_button_enabled = 0;
+
+int
+qdrv_get_wps_push_button_config( u8 *p_gpio_pin, u8 *p_use_interrupt, u8 *p_active_logic )
+{
+	int retval = 0;
+
+	if (wps_push_button_configured != 0) {
+		*p_gpio_pin = gpio_wps_push_button;
+		*p_use_interrupt = wps_push_button_interrupts;
+		*p_active_logic = (wps_push_button_active_logic == 0) ? 0 : 1;
+	} else {
+		retval = -1;
+	}
+
+	return( retval );
+}
+
+void
+set_wps_push_button_enabled( void )
+{
+	wps_push_button_enabled = 1;
+}
+
+static int
+qdrv_command_gpio(struct device *dev, int argc, char *argv[])
+{
+	int		retval = 0;
+	u8		gpio_pin = 0;
+	unsigned int	tmp_uval = 0;
+	int		wps_flag = 0;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if (argc < 3)
+	{
+		DBGPRINTF_E("Not enough arguments to gpio command\n");
+		return(-1);
+	}
+	else
+	{
+		if (strcmp( argv[ 2 ], "wps" ) == 0)
+		  wps_flag = 1;
+		else if (sscanf(argv[ 2 ] , "%u", &tmp_uval) != 1)
+		{
+			goto bad_args;
+		}
+
+		gpio_pin = (u8) tmp_uval;
+	}
+
+	if (strcmp( argv[ 1 ], "get" ) == 0)
+	{
+		if (wps_flag == 0)
+		{
+			retval = -1;
+		}
+	  /*
+	   * For WPS, just report thru printk.
+	   * No reporting thru /proc/qdrvdata (qdrv_control_set_show, etc.)
+	   */
+		else
+		{
+			if (wps_push_button_configured)
+			{
+				printk( "WPS push button accessed using GPIO pin %u\n", gpio_wps_push_button );
+				printk( "monitored using %s\n", wps_push_button_interrupts ? "interrupt" : "polling" );
+			}
+			else
+			{
+				printk( "WPS push button not configured\n" );
+			}
+		}
+	}
+	else if (strcmp( argv[ 1 ], "set" ) == 0)
+	{
+		if (argc < 4)
+		{
+			DBGPRINTF_E("Not enough arguments for gpio set command\n");
+			retval = -1;
+		}
+		else if (wps_flag == 0)
+		{
+			retval = -1;
+		}
+		/*
+		 * For WPS, we have "gpio set wps 4" and "gpio set wps 4 intr".
+		 * Latter selects interrupt-based monitoring.
+		 */
+		else if (wps_push_button_enabled == 0)
+		{
+			unsigned int	tmp_uval_2 = 0;
+
+			if (sscanf(argv[ 3 ] , "%u", &tmp_uval) != 1)
+			{
+				goto bad_args;
+			}
+
+			wps_push_button_interrupts = 0;
+			wps_push_button_active_logic = 1;
+
+			if (argc > 4)
+			{
+				if (strcmp( argv[ 4 ], "intr" ) == 0)
+				{
+					wps_push_button_interrupts = 1;
+					wps_push_button_active_logic = 1;
+				}
+				else if (sscanf(argv[ 4 ], "%u", &tmp_uval_2 ) == 1)
+				{
+					wps_push_button_active_logic = (u8) tmp_uval_2;
+				}
+			}
+
+			if ((wps_push_button_interrupts && tmp_uval > MAX_GPIO_INTR) ||
+			    (tmp_uval > MAX_GPIO_PIN))
+			{
+				printk( "GPIO pin number %u out of range, maximum is %d\n", tmp_uval,
+					 wps_push_button_interrupts ? MAX_GPIO_INTR : MAX_GPIO_PIN );
+				goto bad_args;
+			}
+			else
+			{
+				gpio_wps_push_button = (u8) tmp_uval;
+				wps_push_button_configured = 1;
+			}
+		}
+		else
+		{
+			DBGPRINTF_E("WPS Push button enabled, cannot (re)configure.\n");
+		}
+	}
+	else
+	{
+		DBGPRINTF_E("Unrecognized gpio subcommand %s\n", argv[1]);
+		retval = -1;
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return( retval );
+
+bad_args:
+	DBGPRINTF_E("Invalid argument(s) to gpio command\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(-1);
+}
+
+static int
+qdrv_command_pwm(struct device *dev, int argc, char *argv[])
+{
+	int retval = 0;
+	int pin = 0;
+	int high_count = 0;
+	int low_count = 0;
+
+	if (argc < 3)
+		goto bad_args;
+	if (sscanf(argv[2], "%d", &pin) != 1)
+		goto bad_args;
+
+	if (strcmp(argv[1], "enable") == 0) {
+		if (argc < 5)
+			goto bad_args;
+		if (sscanf(argv[3], "%d", &high_count) != 1)
+			goto bad_args;
+		if (sscanf(argv[4], "%d", &low_count) != 1)
+			goto bad_args;
+		retval = gpio_enable_pwm(pin, high_count - 1, low_count - 1);
+	} else if (strcmp(argv[1], "disable") == 0) {
+		retval = gpio_disable_pwm(pin);
+	} else {
+		goto bad_args;
+	}
+
+	return ( retval );
+
+bad_args:
+	DBGPRINTF_E("Invalid argument(s) to pwm command\n");
+	DBGPRINTF_E("usage: %s (enable|disable) <pin> <high_count> <low_count>\n", argv[0]);
+
+	return (-1);
+}
+
+static void
+qdrv_calcmd_show_packet_counts( struct seq_file *s, void *data, u32 num )
+{
+	struct qdrv_packet_report	*p_packet_report = (struct qdrv_packet_report *) data;
+
+	seq_printf( s, "RF1_TX = %d, RF2_TX = %d\n", p_packet_report->rf1.num_tx, p_packet_report->rf2.num_tx );
+	seq_printf( s, "RF1_RX = %d, RF2_RX = %d\n", p_packet_report->rf1.num_rx, p_packet_report->rf2.num_rx );
+}
+
+static void
+qdrv_calcmd_show_tx_power( struct seq_file *s, void *data, u32 num )
+{
+	unsigned int *p_data = (unsigned int *) data;
+
+	seq_printf( s, "%u %u %u %u\n", p_data[0], p_data[1], p_data[2], p_data[3] );
+}
+
+static void
+qdrv_calcmd_show_rssi( struct seq_file *s, void *data, u32 num )
+{
+	unsigned int *p_data = (unsigned int *) data;
+
+	seq_printf( s, "%d %d %d %d\n", p_data[0], p_data[1], p_data[2], p_data[3] );
+}
+
+static void
+qdrv_calcmd_show_test_mode_param( struct seq_file *s, void *data, u32 num )
+{
+	struct qdrv_cal_test_setting *cal_test_setting= (struct qdrv_cal_test_setting*) data;
+
+	seq_printf( s, "%d %d %d %d %d %d\n",
+			cal_test_setting->antenna,
+			cal_test_setting->mcs,
+			cal_test_setting->bw_set,
+			cal_test_setting->pkt_len,
+			cal_test_setting->is_eleven_N,
+			cal_test_setting->bf_factor_set
+		);
+}
+
+static void
+qdrv_calcmd_show_vpd( struct seq_file *s, void *data, u32 num )
+{
+	unsigned int *p_data = (unsigned int *) data;
+
+	seq_printf( s, "%d\n", p_data[0] );
+}
+
+static void
+qdrv_calcmd_show_temperature( struct seq_file *s, void *data, u32 num )
+{
+	struct qdrv_cb *qcb = (struct qdrv_cb *) data;
+	seq_printf( s, "%d %d\n", qcb->temperature_rfic_external, qcb->temperature_rfic_internal);
+}
+
+/* RF register value is now returned from the MuC */
+
+static void
+qdrv_show_rfmem(struct seq_file *s, void *data, u32 num)
+{
+	struct qdrv_cb *qcb = (struct qdrv_cb *) data;
+
+	seq_printf(s, "mem[0x%08x] = 0x%08x\n", qcb->read_addr, qcb->rf_reg_val );
+}
+
+#ifdef POST_RF_LOOP
+static void
+qdrv_calcmd_post_rfloop_show(struct seq_file *s, void *data, u32 num)
+{
+        int *p_data = (int *)data;
+
+        seq_printf(s, "%d\n", *p_data);
+}
+#endif
+
+static void
+qdrv_calcmd_show_pd_voltage_level(struct seq_file *s, void *data, u32 num)
+{
+	int *p_data = (int *) data;
+
+	seq_printf(s, "%d %d %d %d\n", p_data[0], p_data[1], p_data[2], p_data[3]);
+}
+
+char dc_iq_calfile_version[VERSION_SIZE];
+char power_calfile_version[VERSION_SIZE];
+
+/*
+ * Calcmd format:
+ *
+ * Each calcmd is a sequence of U8's; thus each element is in the range 0 - 255.
+ *
+ * The first element identifies the calcmd.  Look in macfw/cal/utils/common/calcmd.h for symbolic
+ * enums.  Example values include SET_TEST_MODE and GET_TEST_STATS.  Symbolic values are not used in
+ * the Q driver.
+ *
+ * Second element is required to be 0.
+ *
+ * Third element is the total length of the calcmd sequence.  If no additional argument are present,
+ * this element will be 4.  For each argument, add 2 to this element.
+ *
+ * Fourth element is also required to be 0.
+ *
+ * Remaining elements are the arguments, organized as pairs.  First element in each pair is the
+ * argument index, numbered starting from 1 - NOT 0.  Second element is the argument value.  Thus
+ * ALL calcmd input arguments (output arguments are also possible) are required to be 8 bit values.
+ * Larger values (e.g. U16 or U32) have to be passed in 8-bit pieces, with Linux taking the value
+ * apart and the MuC reassembling then.
+ *
+ * Output arguments are returned in the same buffer used to send the calcmd to the MuC.  See
+ * GET_TEST_STATS (calcmd = 15) for an example.
+ */
+
+static int qdrv_command_calcmd(struct device *dev, int argc, char *argv[])
+{
+	struct qdrv_cb *qcb;
+	char *cmd = NULL;
+	dma_addr_t cmd_dma;
+	struct qdrv_wlan *qw;
+	int cmdlen;
+
+	int temp_calcmd[30] = {0};
+	char calcmd[30] = {0};
+	int i;
+	int evm_int[4] = {0}, evm_frac[4] = {0};
+
+	u32 num_rf1_rx;
+	u32 num_rf1_tx;
+	u32 num_rf2_rx;
+	u32 num_rf2_tx;
+
+	qcb = dev_get_drvdata(dev);
+	qw = qdrv_control_wlan_get(&qcb->macs[0]);
+	if (!qw) {
+		return -1;
+	}
+
+	cmd = qdrv_hostlink_alloc_coherent(NULL, sizeof(qcb->command), &cmd_dma, GFP_ATOMIC);
+	if (cmd == NULL) {
+		DBGPRINTF_E("Failed allocate %d bytes for cmd\n", sizeof(qcb->command));
+		return -1;
+	}
+
+	cmdlen = argc - 1;
+
+	for (i = 1; i < argc; i++) {
+		temp_calcmd[i-1] = _atoi(argv[i]);
+		calcmd[i-1] = (char)temp_calcmd[i-1];
+	}
+
+	DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_CALCMD, "cmdlen %d\n", cmdlen);
+	memcpy(cmd, calcmd, cmdlen);
+
+	qdrv_hostlink_msg_calcmd(qw, cmdlen, cmd_dma);
+
+	if(cmd[0] == 31) {
+		sprintf(dc_iq_calfile_version, "V%d.%d", cmd[6], cmd[5]);
+		sprintf(power_calfile_version, "V%d.%d", cmd[9], cmd[8]);
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_CALCMD,
+				"Calibration version %d.%d\n", cmd[12], cmd[11]);
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_CALCMD,
+				"RFIC version %d.%d\n", cmd[15], cmd[14]);
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_CALCMD,
+				"BBIC version %d.%d\n", cmd[18], cmd[17]);
+
+	} else if(cmd[0] == 28 || cmd[0] == 29 || cmd[0] == 30) {
+		DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD, ".");
+
+	} else if(cmd[0] == 15) {
+		num_rf1_rx = cmd[8] << 24 | cmd[7] << 16 | cmd[6] << 8 | cmd[5];
+		num_rf1_tx = cmd[13] << 24 | cmd[12] << 16 | cmd[11] << 8 | cmd[10];
+		num_rf2_rx = cmd[18] << 24 | cmd[17] << 16 | cmd[16] << 8 | cmd[15];
+		num_rf2_tx = cmd[23] << 24 | cmd[22] << 16 | cmd[21] << 8 | cmd[20];
+		DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD,
+				"MuC: RF1_TX = %d, RF2_TX = %d\n", num_rf1_tx, num_rf2_tx);
+		DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD,
+				"MuC: RF1_RX = %d, RF2_RX = %d\n", num_rf1_rx, num_rf2_rx);
+
+		qcb->packet_report.rf1.num_tx = num_rf1_tx;
+		qcb->packet_report.rf2.num_tx = num_rf2_tx;
+
+		if (chip_id() == 0x20) {
+			qcb->packet_report.rf1.num_rx += num_rf1_rx;
+			qcb->packet_report.rf2.num_rx += num_rf2_rx;
+		} else {
+			qcb->packet_report.rf1.num_rx = num_rf1_rx;
+			qcb->packet_report.rf2.num_rx = num_rf2_rx;
+		}
+
+		qdrv_control_set_show(qdrv_calcmd_show_packet_counts, (void *) &(qcb->packet_report), 1, 1);
+
+	} else if(cmd[0] == 3) {
+		int temp_cal_13_W = 0, temp_cal_13_I, temp_cal_13_P;
+		u32 rfic_temp_int;
+		u32 rfic_temp_frac;
+		u32 flag = (cmd[8] << 24 | cmd[7] << 16 | cmd[6] << 8 | cmd[5]);
+		u32 rfic_temp = (cmd[28] << 24 | cmd[27] << 16 | cmd[26] << 8 | cmd[25]);
+
+		qtn_tsensor_get_temperature(qw->se95_temp_sensor, &temp_cal_13_W);
+		temp_cal_13_I = (int) (temp_cal_13_W / QDRV_TEMPSENS_COEFF);
+		temp_cal_13_P = ABS((temp_cal_13_W - (temp_cal_13_I * QDRV_TEMPSENS_COEFF)));
+		rfic_temp_int = rfic_temp / QDRV_TEMPSENS_COEFF10;
+		rfic_temp_frac = rfic_temp / QDRV_TEMPSENS_COEFF -
+				 (rfic_temp_int * (QDRV_TEMPSENS_COEFF10/QDRV_TEMPSENS_COEFF));
+
+		if (flag == EXT_TEMPERATURE_SENSOR_REPORT_FLAG) {
+			DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD,
+					"Gain = %d, %d, %d, %d\n",
+					(cmd[15] | cmd[16] << 8), (cmd[17] | cmd[18] << 8),
+					(cmd[20] | cmd[21] << 8), (cmd[22] | cmd[23] << 8));
+
+		} else if (flag == DISABLE_REPORT_FLAG) {
+			DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD, "Power compensation is Disabled\n");
+		} else {
+			DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD,
+					"(RF,BB) = (%d, %d), (%d, %d), (%d, %d), (%d, %d)\n",
+					cmd[5], cmd[10], cmd[6], cmd[11],
+					cmd[7], cmd[12], cmd[8], cmd[13]);
+			DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD,
+					" Voltage = %d, %d, %d, %d\n",
+					(cmd[15] | cmd[16] << 8), (cmd[17] | cmd[18] << 8),
+					(cmd[20] | cmd[21] << 8), (cmd[22] | cmd[23] << 8));
+		}
+		DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD,
+				"TEMPERATURE_RFIC_EXTERNAL= %d.%d\n",
+				temp_cal_13_I, temp_cal_13_P);
+		DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD,
+				"TEMPERATURE_RFIC_INTERNAL = %d.%d\n",
+				rfic_temp_int, rfic_temp_frac); /* Please do not delete for future use */
+		qcb->temperature_rfic_external = temp_cal_13_W;
+		qcb->temperature_rfic_internal = rfic_temp;
+		qdrv_control_set_show(qdrv_calcmd_show_temperature, (void *)qcb, 1, 1);
+
+	} else if (cmd[0] == 12)			/* SET_TEST_MODE */ {
+		qcb->packet_report.rf1.num_tx = 0;
+		qcb->packet_report.rf1.num_rx = 0;
+		qcb->packet_report.rf2.num_tx = 0;
+		qcb->packet_report.rf2.num_rx = 0;
+
+	} else if (cmd[0] == 33)		/* GET_RFIC_REG */ {
+		u32	register_value = cmd[8] << 24 | cmd[7] << 16 | cmd[6] << 8 | cmd[5];
+		u32	register_address = cmd[13] << 24 | cmd[12] << 16 | cmd[11] << 8 | cmd[10];
+
+		qcb->read_addr = register_address;
+		qcb->rf_reg_val = register_value;
+
+		qdrv_control_set_show(qdrv_show_rfmem, (void *) qcb, 1, 1);
+
+	} else if(cmd[0] == 41) {
+		u8 mcs;
+		u16 rx_gain;
+		u16 evm[4];
+		u16 num_rx_sym;
+
+		u8 bw, nsts, format, rssi_flag;
+		int16_t rssi[4];
+
+		mcs = cmd[5];
+		rx_gain = cmd[8] << 8 | cmd[7];
+		num_rx_sym = cmd[21] << 8 | cmd[20];
+
+		bw = cmd[23];
+		nsts = cmd[25];
+		format = cmd[27];
+		DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD,
+				"MCS = %d, RX SYMBOL NUM = %d, NSTS = %d, BW = %d, FORMAT = %d  _\n",
+				mcs, num_rx_sym, nsts, bw, format);
+
+		DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD,
+				"RX_GAIN = %d  __ (0x%x)\n", rx_gain, rx_gain);
+
+		rssi_flag = cmd[29];
+		if(rssi_flag > 0)	//rssi_flag is 0 for EVM measurement
+		{
+			rssi[0] = (int16_t)(cmd[11] << 8 | cmd[10]);
+			rssi[1] = (int16_t)(cmd[13] << 8 | cmd[12]);
+			rssi[2] = (int16_t)(cmd[16] << 8 | cmd[15]);
+			rssi[3] = (int16_t)(cmd[18] << 8 | cmd[17]);
+			if(rssi_flag == 1)
+			DBGPRINTF(DBG_LL_HIDDEN, QDRV_LF_CALCMD,
+				"RX_RSSI (dBFS) : %d.%d, %d.%d, %d.%d, %d.%d\n",
+				rssi[0] / 10, ABS(rssi[0]) % 10 , rssi[1] / 10, ABS(rssi[1]) % 10,
+				rssi[2] / 10, ABS(rssi[2]) % 10 , rssi[3] / 10, ABS(rssi[3]) % 10);
+			else
+			DBGPRINTF(DBG_LL_HIDDEN, QDRV_LF_CALCMD,
+				"RX_RSSI (dBm) : %d.%d, %d.%d, %d.%d, %d.%d\n",
+				rssi[0] / 10, ABS(rssi[0]) % 10 , rssi[1] / 10, ABS(rssi[1]) % 10,
+				rssi[2] / 10, ABS(rssi[2]) % 10 , rssi[3] / 10, ABS(rssi[3]) % 10);
+		}
+		else
+		{
+			evm[0] = cmd[11] << 8 | cmd[10];
+			evm[1] = cmd[13] << 8 | cmd[12];
+			evm[2] = cmd[16] << 8 | cmd[15];
+			evm[3] = cmd[18] << 8 | cmd[17];
+			if (evm[0] > 0) convert_evm_db(evm[0], num_rx_sym,  &evm_int[0], &evm_frac[0]);
+			if (evm[1] > 0) convert_evm_db(evm[1], num_rx_sym,  &evm_int[1], &evm_frac[1]);
+			if (evm[2] > 0) convert_evm_db(evm[2], num_rx_sym,  &evm_int[2], &evm_frac[2]);
+			if (evm[3] > 0) convert_evm_db(evm[3], num_rx_sym,  &evm_int[3], &evm_frac[3]);
+
+			DBGPRINTF(DBG_LL_HIDDEN, QDRV_LF_CALCMD,
+				"RX_EVM[0] = %d.%d  RX_EVM[1] = %d.%d  RX_EVM[2] = %d.%d  RX_EVM[3] = %d.%d \n",
+				evm_int[0], evm_frac[0], evm_int[1], evm_frac[1],
+				evm_int[2], evm_frac[2], evm_int[3], evm_frac[3]);
+		}
+	} else if(cmd[0] == 48) {
+		s16 pd_vol0_reading = cmd[6] << 8 | cmd[5];
+		if (pd_vol0_reading > 511) pd_vol0_reading -= 1024;
+		s16 pd_vol1_reading = cmd[9] << 8 | cmd[8];
+		if (pd_vol1_reading > 511) pd_vol1_reading -= 1024;
+		s16 pd_vol2_reading = cmd[12] << 8 | cmd[11];
+		if (pd_vol2_reading > 511) pd_vol2_reading -= 1024;
+		s16 pd_vol3_reading = cmd[15] << 8 | cmd[14];
+		if (pd_vol3_reading > 511) pd_vol3_reading -= 1024;
+		s16 rfic_temp_reading = cmd[18] << 8 | cmd[17]; //need to check specs of RFIC4 to find out dynamic range temp sensor
+		u8 pd_dBm0 = cmd[20];
+		u8 pd_dBm1 = cmd[22];
+		u8 pd_dBm2 = cmd[24];
+		u8 pd_dBm3 = cmd[26];
+		printk("OUT PD_LEVEL : (%d, %d, %d, %d) / RFIC_TEMP = %d oC\n",
+				pd_vol0_reading, pd_vol1_reading, pd_vol2_reading,
+				pd_vol3_reading, rfic_temp_reading);
+		printk("OUT PD_POWER : %d.%ddBm, %d.%ddBm, %d.%ddBm, %d.%ddBm\n",
+				pd_dBm0 >> 2, (pd_dBm0 % 4) * 25,
+				pd_dBm1 >> 2, (pd_dBm1 % 4) * 25,
+				pd_dBm2 >> 2, (pd_dBm2 % 4) * 25,
+				pd_dBm3 >> 2, (pd_dBm3 % 4) * 25);
+
+		qcb->qdrv_cal_test_report.pd_voltage_level[0] = pd_vol0_reading;
+		qcb->qdrv_cal_test_report.pd_voltage_level[1] = pd_vol1_reading;
+		qcb->qdrv_cal_test_report.pd_voltage_level[2] = pd_vol2_reading;
+		qcb->qdrv_cal_test_report.pd_voltage_level[3] = pd_vol3_reading;
+		qdrv_control_set_show(qdrv_calcmd_show_pd_voltage_level, (void *) &(qcb->qdrv_cal_test_report.pd_voltage_level[0]), 1, 1);
+
+	} else if (cmd[0] == 51) {
+		u16 pd_vol0 = cmd[6] << 8 | cmd[5];
+		u16 pd_vol1 = cmd[9] << 8 | cmd[8];
+		u16 pd_vol2 = cmd[12] << 8 | cmd[11];
+		u16 pd_vol3 = cmd[15] << 8 | cmd[14];
+		printk("BASE PD_POWER : %d.%ddBm, %d.%ddBm, %d.%ddBm, %d.%ddBm\n",
+				pd_vol0 >> 2, (pd_vol0 % 4) * 25,
+				pd_vol1 >> 2, (pd_vol1 % 4) * 25,
+				pd_vol2 >> 2, (pd_vol2 % 4) * 25,
+				pd_vol3 >> 2, (pd_vol3 % 4) * 25);
+
+		qcb->qdrv_cal_test_report.tx_power[0] = pd_vol0;
+		qcb->qdrv_cal_test_report.tx_power[1] = pd_vol1;
+		qcb->qdrv_cal_test_report.tx_power[2] = pd_vol2;
+		qcb->qdrv_cal_test_report.tx_power[3] = pd_vol3;
+		qdrv_control_set_show(qdrv_calcmd_show_tx_power, (void *) &(qcb->qdrv_cal_test_report.tx_power[0]), 1, 1);
+	}
+
+	else if(cmd[0] == 54)
+	{
+		int rssi[4];
+
+		rssi[0] = cmd[8] << 24 | cmd[7] << 16 | cmd[6] << 8 | cmd[5];
+		rssi[0] = ((rssi[0] > 0xFFF) ? 0xFFF : rssi[0]);
+		rssi[1] = cmd[13] << 24 | cmd[12] << 16 | cmd[11] << 8 | cmd[10];
+		rssi[1] = ((rssi[1] > 0xFFF) ? 0xFFF : rssi[1]);
+		rssi[2] = cmd[18] << 24 | cmd[17] << 16 | cmd[16] << 8 | cmd[15];
+		rssi[2] = ((rssi[2] > 0xFFF) ? 0xFFF : rssi[2]);
+		rssi[3] = cmd[23] << 24 | cmd[22] << 16 | cmd[21] << 8 | cmd[20];
+		rssi[3] = ((rssi[3] > 0xFFF) ? 0xFFF : rssi[3]);
+
+		DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD,
+				"RSSI (dBm) : %d.%d, %d.%d, %d.%d, %d.%d\n",
+				rssi[0] / 10, ABS(rssi[0]) % 10, rssi[1] / 10, ABS(rssi[1]) % 10,
+				rssi[2] / 10, ABS(rssi[2]) % 10, rssi[3] / 10, ABS(rssi[3]) % 10);
+		qcb->qdrv_cal_test_report.rssi[0] = rssi[0];
+		qcb->qdrv_cal_test_report.rssi[1] = rssi[1];
+		qcb->qdrv_cal_test_report.rssi[2] = rssi[2];
+		qcb->qdrv_cal_test_report.rssi[3] = rssi[3];
+		qdrv_control_set_show(qdrv_calcmd_show_rssi, (void *) &(qcb->qdrv_cal_test_report.rssi[0]), 1, 1);
+	}
+
+	else if (cmd[0] == 56) {
+		DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD,
+				"Get Test Mode = Ant_Sel: %d, MCS: %d, BW: %d, Pkt_Len: %d, Protocol: %d, BF: %d\n",
+				cmd[5], cmd[7], cmd[9], cmd[11], cmd[13], cmd[15]);
+
+		qcb->qdrv_cal_test_report.setting.antenna = cmd[5];
+		qcb->qdrv_cal_test_report.setting.mcs = cmd[7];
+		qcb->qdrv_cal_test_report.setting.bw_set = cmd[9];
+		qcb->qdrv_cal_test_report.setting.pkt_len = cmd[11];
+		qcb->qdrv_cal_test_report.setting.is_eleven_N = cmd[13];
+		qcb->qdrv_cal_test_report.setting.bf_factor_set = cmd[15];
+		qdrv_control_set_show(qdrv_calcmd_show_test_mode_param, (void *) &qcb->qdrv_cal_test_report.setting, 1, 1);
+	}
+#ifdef POST_RF_LOOP
+	else if (cmd[0] == 60) {
+		qcb->qdrv_cal_test_report.post_rfloop_success = cmd[5];
+		qdrv_control_set_show(qdrv_calcmd_post_rfloop_show, (void *)&qcb->qdrv_cal_test_report.post_rfloop_success, 1, 1);
+	}
+#endif
+	else if (cmd[0] == 62) {
+		DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD, "Calstate TX Power = %d\n", cmd[5]);
+		qcb->calstate_vpd = cmd[5];
+		qdrv_control_set_show(qdrv_calcmd_show_vpd, (void *) &qcb->calstate_vpd, 1, 1);
+
+	}
+	else if (cmd[0] == 63) {
+		/* Rx IQ cal cmd: print failed status*/
+		if(cmd[5] > 0)
+			DBGPRINTF_RAW(DBG_LL_INFO, QDRV_LF_CALCMD,
+				"qdrv ERROR: cmd id 63 failed: status= %d\n", cmd[5]);
+	}
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(qcb->command), cmd, cmd_dma);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+	return(0);
+}
+
+int qdrv_command_read_rf_reg(struct device *dev, int offset)
+{
+	struct qdrv_cb *qcb;
+	char *cmd = NULL;
+	dma_addr_t cmd_dma;
+	struct qdrv_wlan *qw;
+	int cmdlen;
+	char calcmd[8];
+	int result = 0;
+
+	qcb = dev_get_drvdata(dev);
+	qw = (struct qdrv_wlan *)qcb->macs[0].data;
+
+	cmd = qdrv_hostlink_alloc_coherent(NULL, sizeof(qcb->command), &cmd_dma, GFP_ATOMIC);
+	if (cmd == NULL) {
+		DBGPRINTF_E("Failed allocate %d bytes for cmd\n", sizeof(qcb->command));
+		return(-1);
+	}
+
+	cmdlen = sizeof(calcmd)/sizeof(calcmd[0]);
+
+	calcmd[0] = 33; //GET_RFIC_REG;
+	calcmd[1] = 0;
+	calcmd[2] = cmdlen;
+	calcmd[3] = 0;
+	calcmd[4] = 1;
+	calcmd[5] = 0;
+	calcmd[6] = 2;
+	calcmd[7] = offset;
+
+	memcpy(cmd, calcmd, cmdlen);
+	qdrv_hostlink_msg_calcmd(qw, cmdlen, cmd_dma);
+
+	result = (cmd[8] << 24 | cmd[7] << 16 | cmd[6] << 8 | cmd[5]);
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(qcb->command), cmd, cmd_dma);
+
+	return result;
+}
+
+int qdrv_command_read_chip_ver(struct device *dev)
+{
+	struct qdrv_cb *qcb;
+	char *cmd = NULL;
+	dma_addr_t cmd_dma;
+	struct qdrv_wlan *qw;
+	int cmdlen, i;
+	char calcmd[6];
+	char ret_val;
+
+	for(i = 0; i < 6; i++)
+		calcmd[i] = 0;
+
+	/* Get the private device data */
+	qcb = dev_get_drvdata(dev);
+	qw = (struct qdrv_wlan *)qcb->macs[0].data;
+
+	cmd = qdrv_hostlink_alloc_coherent(NULL, sizeof(qcb->command), &cmd_dma, GFP_ATOMIC);
+	if(cmd == NULL)
+	{
+		DBGPRINTF_E("Failed allocate %d bytes for cmd\n", sizeof(qcb->command));
+		return(-1);
+	}
+
+	cmdlen = 4; //Cmd format : 11 0 4 0 //
+
+	calcmd[0] = GET_CHIP_ID;
+	calcmd[1] = 0;
+	calcmd[2] = cmdlen;
+	calcmd[3] = 0;
+
+	memcpy(cmd, calcmd, cmdlen);
+
+	qdrv_hostlink_msg_calcmd(qw, cmdlen, cmd_dma);
+
+	ret_val = cmd[5];
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(qcb->command), cmd, cmd_dma);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return ret_val;
+}
+
+void qdrv_calcmd_set_tx_power(struct device *dev, uint8_t value)
+{
+	struct qdrv_cb *qcb;
+	char *cmd = NULL;
+	dma_addr_t cmd_dma;
+	struct qdrv_wlan *qw;
+
+	qcb = dev_get_drvdata(dev);
+	qw = (struct qdrv_wlan *)qcb->macs[0].data;
+
+	cmd = qdrv_hostlink_alloc_coherent(NULL, sizeof(qcb->command), &cmd_dma, GFP_ATOMIC);
+	if (cmd == NULL) {
+		DBGPRINTF_E("Failed allocate %d bytes for cmd\n", sizeof(qcb->command));
+		return;
+	}
+
+	cmd[0] = 19;
+	cmd[1] = 0;
+	cmd[2] = VERIWAVE_TXPOWER_CMD_SIZE;
+	cmd[3] = 0;
+	cmd[4] = 1;
+	if (!value)
+		value = 11;
+	cmd[5] = 4 * value;
+
+	qdrv_hostlink_msg_calcmd(qw, VERIWAVE_TXPOWER_CMD_SIZE, cmd_dma);
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(qcb->command), cmd, cmd_dma);
+}
+
+static int qdrv_command_write(struct device *dev, int argc, char *argv[])
+{
+	u32 addr;
+	u32 value;
+	u32 *segvaddr;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Check that we have all the arguments */
+	if(argc != 4) {
+		goto error;
+	}
+
+	if(strcmp(argv[1], "addr") == 0) {
+		if (sscanf(argv[2], "%x", &addr) != 1) {
+			goto error;
+		}
+
+		if (sscanf(argv[3], "%x", &value) != 1) {
+			goto error;
+		}
+	} else {
+		goto error;
+	}
+
+	/* Check that address is valid */
+	if (!qdrv_command_is_valid_addr(addr)) {
+		DBGPRINTF_E("addr 0x%x is not valid\n", (unsigned)addr);
+		goto error;
+	}
+
+	DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_QCTRL, "0x%08x = 0x%08x\n", addr, value);
+
+	segvaddr = ioremap_nocache(addr, 4);
+	if (segvaddr == NULL) {
+		goto error;
+	}
+	*segvaddr = value;
+	iounmap(segvaddr);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+
+error:
+
+	DBGPRINTF_E("Invalid arguments to write command\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(-1);
+}
+
+static int qdrv_command_read(struct device *dev, int argc, char *argv[])
+{
+	u32 addr;
+	unsigned int num;
+	int values_per_line = 1;
+	struct qdrv_cb *qcb = dev_get_drvdata(dev);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if(argc < 4) {
+		goto error;
+	}
+
+	if(strcmp(argv[1], "addr") == 0) {
+		if (sscanf(argv[2], "%x", &addr) != 1) {
+			goto error;
+		}
+
+		if (sscanf(argv[3], "%u", &num) != 1) {
+			goto error;
+		}
+
+		qcb->read_count = num;
+
+		if (argc > 4) {
+			if (sscanf(argv[4], "%d", &values_per_line) != 1) {
+				goto error;
+			}
+
+			if (values_per_line != 1 &&
+			    values_per_line != 2 &&
+			    values_per_line != 4) {
+				goto error;
+			}
+
+			num = (num + values_per_line - 1) / values_per_line;
+		}
+	} else {
+		goto error;
+	}
+
+	DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_QCTRL, "0x%08x (%d)\n", addr, num);
+
+	qdrv_control_set_show(qdrv_show_memory, (void *) qcb, num, 1);
+
+	/* Save the address for a memory read */
+	qcb->read_addr = addr;
+	qcb->values_per_line = values_per_line;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+
+error:
+	DBGPRINTF_E("Invalid arguments to read command\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(-1);
+}
+
+/*
+ * Push selected sysmsgs for capture and debugging
+ * - currently only MuC messages are sent
+ *
+ * Add a debug message to the output buffer.
+ * If the output buffer is full it is forwarded to the Ethernet driver.
+ * A timer function also calls this function periodically so that data is not left in the
+ * buffer for too long.
+ */
+void qdrv_control_sysmsg_send(void *data, char *sysmsg, u_int32_t text_len, int send_now)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) data;
+	struct qdrv_mac *mac = qw->mac;
+	uint64_t tsf;
+#define QDRV_CMD_SYSMSG_PREF_LEN	25
+	char fmt[] = "[%08x.%08x] MuC: %s";
+	u_int16_t line_len = QDRV_CMD_SYSMSG_PREF_LEN + text_len;
+	static struct qdrv_netdebug_sysmsg *msgbuf = NULL;
+	static int msg_len = 0;
+
+	if (mac->params.mucdbg_netdbg == 0 &&
+			(qw->pktlogger.flag & BIT(QDRV_NETDEBUG_TYPE_SYSMSG)) == 0) {
+		return;
+	}
+
+	if ((msgbuf != NULL) &&
+			((send_now != 0) || ((msg_len + line_len) > (QDRV_NETDEBUG_SYSMSG_LENGTH - 20)))) {
+		int udp_len = sizeof(msgbuf->ndb_hdr) + msg_len + 1;
+		qdrv_pktlogger_hdr_init(qw, &msgbuf->ndb_hdr, QDRV_NETDEBUG_TYPE_SYSMSG, udp_len);
+		qdrv_pktlogger_send(msgbuf, udp_len);
+		msgbuf = NULL;
+		msg_len = 0;
+	}
+
+	if (text_len == 0 || sysmsg == NULL) {
+		return;
+	}
+
+	if (msgbuf == NULL) {
+		msgbuf = qdrv_pktlogger_alloc_buffer("sysmsg", sizeof(*msgbuf));
+		if (msgbuf == NULL) {
+			return;
+		}
+	}
+
+	if (mac->params.mucdbg_netdbg) {
+		qw->ic.ic_get_tsf(&tsf);
+		snprintf(msgbuf->msg + msg_len, line_len, fmt, U64_HIGH32(tsf), U64_LOW32(tsf), sysmsg),
+		msg_len += line_len;
+	} else if (qw->pktlogger.flag & BIT(QDRV_NETDEBUG_TYPE_SYSMSG)) {
+		sprintf(msgbuf->msg + msg_len, "%s", sysmsg);
+		msg_len += text_len;
+	}
+}
+
+/*
+ * Push TXBF pkt for capture and debugging
+ */
+void qdrv_control_txbf_pkt_send(void *data, u8 *stvec, u32 bw)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)data;
+	void *databuf = NULL;
+	struct qdrv_netdebug_txbf *stats;
+	int indx;
+
+	for (indx = 0; indx <= !!bw; indx++) {
+		databuf = qdrv_pktlogger_alloc_buffer("txbf", sizeof(*stats));
+		if (databuf == NULL) {
+			return;
+		}
+
+		stats = (struct qdrv_netdebug_txbf*)databuf;
+		qdrv_pktlogger_hdr_init(qw, &stats->ndb_hdr,
+				QDRV_NETDEBUG_TYPE_TXBF, sizeof(*stats));
+		if (bw && !indx) {
+			stats->ndb_hdr.flags = QDRV_NETDEBUG_FLAGS_TRUNCATED;
+		}
+
+		dma_map_single(NULL, stvec + (indx * QDRV_NETDEBUG_TXBF_DATALEN),
+			QDRV_NETDEBUG_TXBF_DATALEN, DMA_FROM_DEVICE);
+		memcpy(stats->stvec_data, stvec + (indx * QDRV_NETDEBUG_TXBF_DATALEN),
+				QDRV_NETDEBUG_TXBF_DATALEN);
+
+		qdrv_pktlogger_send(stats, sizeof(*stats));
+		databuf = NULL;
+	}
+}
+
+static void qdrv_command_memdebug_usage(void)
+{
+	printk("Usage: \n"
+		"\tmemdebug 0 add <address> <size>  - e.g. memdebug add 3e01430 16\n");
+}
+
+static int qdrv_command_memdebug(struct device *dev, int argc, char *argv[])
+{
+	struct qdrv_mac *mac;
+	struct qdrv_wlan *qw;
+	size_t totalsize = 0;
+	int i;
+	struct qdrv_memdebug_watchpt *wp;
+
+	if (argc < 2) {
+		qdrv_command_memdebug_usage();
+		return -1;
+	}
+
+	mac = qdrv_control_mac_get(dev, argv[1]);
+	if (mac == NULL) {
+		return -1;
+	}
+
+	qw = qdrv_control_wlan_get(mac);
+	if (!qw) {
+		return -1;
+	}
+
+	if (strncmp(argv[2], "add", 3) == 0) {
+		if (argc < 4) {
+			qdrv_command_memdebug_usage();
+			return -1;
+		}
+
+		if (qw->pktlogger.mem_wp_index >= MAX_MEMDEBUG_WATCHPTS) {
+			DBGPRINTF_E("memdebug watchpoint limit reached\n");
+			return -1;
+		}
+
+		wp = &qw->pktlogger.mem_wps[qw->pktlogger.mem_wp_index];
+
+		if (sscanf(argv[3], "%lx", (unsigned long *)&wp->addr) != 1) {
+			DBGPRINTF_E("could not parse hex address\n");
+			return -1;
+		}
+		if ((wp->addr & 0x03)) {
+			DBGPRINTF_E("address must be word-aligned\n");
+			return -1;
+		}
+
+		if (sscanf(argv[4], "%lu", (unsigned long *)&wp->size) != 1) {
+			DBGPRINTF_E("could not parse decimal size\n");
+			return -1;
+		}
+
+		/*
+		 * totalsize if the amount of payload:
+		 * [wp struct] [data] * num watchpoints
+		 * check that we're not requesting too much data, e.g. oversizing the debug packet
+		 */
+		for (i = 0; i <= qw->pktlogger.mem_wp_index; i++) {
+			totalsize += (qw->pktlogger.mem_wps[i].size * sizeof(u32));
+			totalsize += sizeof(struct qdrv_memdebug_watchpt);
+		}
+
+		if (totalsize > QDRV_NETDEBUG_MEM_DATALEN) {
+			DBGPRINTF_E("data monitoring packet limit hit\n");
+			return -1;
+		}
+
+		wp->remap_addr = ioremap_nocache(wp->addr, (wp->size * sizeof(u32)));
+		if (wp->remap_addr == NULL) {
+			DBGPRINTF_E("unable to remap address\n");
+			return -1;
+		}
+		printk("%s add %08x %p %u\n", __FUNCTION__, wp->addr, wp->remap_addr, wp->size);
+
+		qw->pktlogger.mem_wp_index++;
+
+	} else {
+		qdrv_command_memdebug_usage();
+		return -1;
+	}
+
+	return 0;
+}
+
+#ifdef QDRV_TX_DEBUG
+__sram_text uint32_t qdrv_tx_ctr[60] = {0};
+__sram_text uint32_t qdrv_dbg_ctr[8] = {0};
+/*
+ * Display or enable Tx debugs
+ *
+ * Syntax:
+ *   txdbg
+ *   - display all qdrv_tx_ctr[] values
+ *
+ *   txdbg [<ctr> <cnt>] ...
+ *   - print the next <cnt> QDRV_TX_DBG(<ctr>, ...) debug messages
+ *   - any number of <ctr>/<cnt> pairs may be specified
+ */
+static int qdrv_command_txdbg(struct device *dev, int argc, char *argv[])
+{
+	int i;
+	int j;
+
+	if (argc > 2) {
+		printk("qdrv_dbg_ctr");
+		for (i = 1; (i + 1) < argc; i += 2) {
+			sscanf(argv[i], "%d", &j);
+			if (j > (ARRAY_SIZE(qdrv_dbg_ctr) - 1)) {
+				printk("%s: ctr %d exceeds array size\n",
+					argv[0], j);
+			}
+			sscanf(argv[i + 1], "%d", &qdrv_dbg_ctr[j]);
+			printk(" [%u]=%u", j, qdrv_dbg_ctr[j]);
+		}
+		printk("\n");
+		return 0;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(qdrv_tx_ctr); i++) {
+		printk("%02u:%-8u ", i, qdrv_tx_ctr[i]);
+		if (((i + 1) % 12) == 0) {
+			printk("\n");
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static int qdrv_command_clearsram(struct device *dev, int argc, char *argv[])
+{
+	struct qdrv_cb *qcb;
+	uint16_t *buf = (uint16_t *) (CONFIG_ARC_MUC_STACK_INIT - CONFIG_ARC_MUC_STACK_SIZE);
+
+	qcb = dev_get_drvdata(dev);
+
+	/* Copy out any crash log, let the parse function deal with validity of the buffer */
+	if (qdrv_crash_log == NULL) {
+
+		/*
+		 * Format of buffer:
+		 *	2 bytes: Header (HEADER_CORE_DUMP)
+		 *	2 bytes: Length of the compressed logs (n)
+		 *	n bytes: Compressed logs
+		 */
+
+		/* Check if the header exists */
+		if (*buf == HEADER_CORE_DUMP) {
+			uint32_t len = *(buf + 1);
+
+			if (len > CONFIG_ARC_MUC_STACK_SIZE) {
+				DBGPRINTF_E("%s: crash log len (%u) out of range\n", __func__, len);
+				return -1;
+			}
+
+			qdrv_crash_log = kmalloc(len, GFP_KERNEL);
+			if (!qdrv_crash_log) {
+				DBGPRINTF_E("%s: Could not allocate %u bytes for qdrv_crash_log\n", __func__,
+					len);
+				return -1;
+			}
+
+			/* Strip the header and length while copying */
+			memcpy(qdrv_crash_log, (char *) (buf + 2), len);
+
+			qdrv_crash_log_len = len;
+		}
+	}
+
+	if (qcb->resources == 0) {
+		/*
+		 * Strictly speaking memory clearing is not necessary as during ELF segments copying
+		 * to memory, it is cleared before placing data.
+		 * Firmware should clear heaps by itself.
+		 * But let's have this function (can be invoked from user-space only by writing
+		 * command to sysfs file) to fill holes between segments, and for safety.
+		 */
+
+		u32 *p_uncached = ioremap_nocache(RUBY_SRAM_BEGIN + CONFIG_ARC_MUC_SRAM_B1_BASE, CONFIG_ARC_MUC_SRAM_B1_SIZE);
+		memset(p_uncached, 0, CONFIG_ARC_MUC_SRAM_B1_SIZE);
+		iounmap(p_uncached);
+
+		p_uncached = ioremap_nocache(RUBY_SRAM_BEGIN + CONFIG_ARC_MUC_SRAM_B2_BASE, CONFIG_ARC_MUC_SRAM_B2_SIZE);
+		memset(p_uncached, 0, CONFIG_ARC_MUC_SRAM_B2_SIZE);
+		iounmap(p_uncached);
+
+		p_uncached = ioremap_nocache(RUBY_CRUMBS_ADDR, RUBY_CRUMBS_SIZE);
+		memset(p_uncached, 0, RUBY_CRUMBS_SIZE);
+		iounmap(p_uncached);
+
+		p_uncached = ioremap_nocache(RUBY_DRAM_BEGIN + CONFIG_ARC_MUC_BASE, CONFIG_ARC_MUC_SIZE);
+		memset(p_uncached, 0, CONFIG_ARC_MUC_SIZE);
+		iounmap(p_uncached);
+
+		p_uncached = ioremap_nocache(RUBY_DRAM_BEGIN + CONFIG_ARC_DSP_BASE, CONFIG_ARC_DSP_SIZE);
+		memset(p_uncached, 0, CONFIG_ARC_DSP_SIZE);
+		iounmap(p_uncached);
+
+	} else {
+		DBGPRINTF_E("Resources are held, not freeing SRAM\n");
+	}
+
+	return 0;
+}
+
+int qdrv_copy_core_dump(void *buf, uint32_t len, uint32_t *len_copied)
+{
+	if (!buf || (len < qdrv_crash_log_len) || !len_copied)
+		return -EINVAL;
+
+	if (qdrv_crash_log_len)
+		memcpy(buf, qdrv_crash_log, qdrv_crash_log_len);
+
+	*len_copied = qdrv_crash_log_len;
+
+	return 0;
+}
+
+static int qdrv_command_bridge(struct device *dev, int argc, char *argv[])
+{
+	struct qdrv_mac *mac;
+	struct qdrv_wlan *qw;
+	struct ieee80211vap *vap;
+	char *dev_name;
+
+	if (argc < 2) {
+		printk("Usage: bridge 0 {showmacs | enable | disable | clear}\n");
+		return -1;
+	}
+
+	mac = qdrv_control_mac_get(dev, argv[1]);
+	if (mac == NULL) {
+		return -1;
+	}
+	qw = qdrv_control_wlan_get(mac);
+	if (!qw) {
+		return -1;
+	}
+
+	if (mac->vnet[0] != NULL) {
+		dev_name = mac->vnet[0]->name;
+	} else {
+		/* Maybe never come here */
+		DBGPRINTF_E("No primary interface\n");
+		return -1;
+	}
+
+	vap = TAILQ_FIRST(&qw->ic.ic_vaps);
+	if (vap->iv_opmode != IEEE80211_M_STA) {
+		DBGPRINTF_E("%s: 3-address mode bridging is only supported on stations\n",
+			dev_name);
+		return 0;
+	}
+
+	if (strncmp(argv[2], "showmacs", 8) == 0) {
+		if (!QDRV_FLAG_3ADDR_BRIDGE_ENABLED()) {
+			printk("%s: 3-address mode bridging is disabled\n",
+				dev_name);
+			return 0;
+		}
+		qdrv_br_show(&qw->bridge_table);
+	} else if (strncmp(argv[2], "enable", 6) == 0) {
+		if (QDRV_FLAG_3ADDR_BRIDGE_ENABLED()) {
+			printk("%s: 3-address mode bridging is already enabled\n",
+				dev_name);
+			return 0;
+		}
+		qdrv_br_create(&qw->bridge_table);
+		qw->flags_ext &= ~QDRV_FLAG_3ADDR_BRIDGE_DISABLE;
+		printk("%s: 3-address mode bridging enabled \n",
+			dev_name);
+	} else if (strncmp(argv[2], "disable", 7) == 0) {
+		if (!QDRV_FLAG_3ADDR_BRIDGE_ENABLED()) {
+			printk("%s: 3-address mode bridging is already disabled\n",
+				dev_name);
+			return 0;
+		}
+		qw->flags_ext |= QDRV_FLAG_3ADDR_BRIDGE_DISABLE;
+		qdrv_br_delete(&qw->bridge_table);
+		printk("%s: 3-address mode bridging disabled\n",
+			dev_name);
+	} else if (strncmp(argv[2], "clear", 5) == 0) {
+		if (!QDRV_FLAG_3ADDR_BRIDGE_ENABLED()) {
+			printk("%s: 3-address mode bridging is disabled\n",
+				dev_name);
+			return 0;
+		}
+		qdrv_br_clear(&qw->bridge_table);
+	} else {
+		printk("Usage: bridge 0 {showmacs | enable | disable | clear}\n");
+	}
+
+	return 0;
+}
+
+static inline void qdrv_control_show_wmm_ac_map(
+		struct seq_file *s, void *data, u32 num)
+{
+	uint32_t i;
+
+	seq_printf(s, "TOS/AC:\n");
+	for (i = 0; i < IEEE8021P_PRIORITY_NUM; i++) {
+		seq_printf(s, "%d/%s\n", i, qdrv_sch_tos2ac_str(i));
+	}
+}
+
+static inline void qdrv_control_set_wmm_ac_map(
+		char *dev_name, int tos, int aid)
+{
+	struct net_device *ndev = dev_get_by_name(&init_net, dev_name);
+
+	if (ndev) {
+		netif_stop_queue(ndev);
+		qdrv_sch_set_ac_map(tos, aid);
+		netif_start_queue(ndev);
+
+		dev_put(ndev);
+	} else {
+		printk("Fail to set wmm ac map, device can't be found.\n");
+	}
+}
+
+static int qdrv_control_set_br_isolate(struct device *dev, int argc, char *argv[])
+{
+	struct qdrv_mac *mac = NULL;
+	struct qdrv_wlan *qw = NULL;
+	int val;
+
+	mac = qdrv_control_mac_get(dev, "0");
+	if (mac)
+		qw = qdrv_control_wlan_get(mac);
+
+	if (!qw)
+		return -ENODEV;
+
+	if (argc == 1) {
+		val = simple_strtol(argv[0], NULL, 10);
+		if (val < 0)
+			return -EINVAL;
+		else if (val > 0)
+			qw->br_isolate |= QDRV_BR_ISOLATE_NORMAL;
+		else
+			qw->br_isolate &= ~QDRV_BR_ISOLATE_NORMAL;
+
+		return 0;
+	}
+
+	if (argc == 2) {
+		if (!strcmp(argv[0], "vlan")) {
+			if (!strcmp(argv[1], "all")) {
+				qw->br_isolate |= QDRV_BR_ISOLATE_VLAN;
+				qw->br_isolate_vid = QVLAN_VID_ALL;
+				return 0;
+			} else if (!strcmp(argv[1], "none")) {
+				qw->br_isolate &= ~QDRV_BR_ISOLATE_VLAN;
+				return 0;
+			}
+
+			val = simple_strtol(argv[1], NULL, 10);
+			if (val <= 0 || val >= QVLAN_VID_MAX)
+				return -EINVAL;
+
+			qw->br_isolate |= QDRV_BR_ISOLATE_VLAN;
+			qw->br_isolate_vid = val;
+
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int qdrv_control_set_sta_vlan(struct qdrv_mac *mac, const char *addr, uint16_t vid)
+{
+	struct qdrv_wlan *qw = qdrv_mac_get_wlan(mac);
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211_node *ni;
+	struct qdrv_vap *qv;
+	int ret;
+	uint8_t sta_addr[ETH_ALEN];
+	struct qtn_vlan_dev *vdev;
+
+	sscanf(addr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+		&sta_addr[0], &sta_addr[1], &sta_addr[2], &sta_addr[3], &sta_addr[4], &sta_addr[5]);
+
+	ni = ieee80211_find_node(nt, sta_addr);
+	if (unlikely(!ni)) {
+		printk(KERN_ERR"station %s was not found\n", addr);
+		return -EINVAL;
+	}
+	qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	vdev = vdev_tbl_lhost[QDRV_WLANID_FROM_DEVID(qv->devid)];
+
+	ret = switch_vlan_set_node(vdev, IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx), vid);
+	if (ret)
+		printk(KERN_ERR "failed to put station %s into VLAN %u\n", addr, vid);
+	else
+		printk(KERN_INFO"station %s into VLAN %u\n", addr, vid);
+
+	ieee80211_free_node(ni);
+	return ret;
+}
+
+static void qdrv_control_vlan_enable(struct qdrv_mac *mac, int enable)
+{
+	struct qdrv_wlan *qw;
+
+	if (vlan_enabled != enable) {
+		vlan_enabled = enable;
+#if !defined(CONFIG_TOPAZ_PCIE_HOST) && !defined(CONFIG_TOPAZ_PCIE_TARGET)
+		topaz_emac_to_lhost(enable);
+#endif
+		qw = qdrv_mac_get_wlan(mac);
+		qdrv_wlan_vlan_enable(&qw->ic, enable);
+	}
+}
+
+int qdrv_control_set_vlan_enable(struct qdrv_mac *mac, const char *cmd)
+{
+	if (strcmp(cmd, "enable") == 0) {
+		qdrv_control_vlan_enable(mac, 1);
+	} else if (strcmp(cmd, "disable") == 0) {
+		qdrv_control_vlan_enable(mac, 0);
+		switch_vlan_reset();
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qdrv_control_vlan_config_cmd_parse(
+			int argc,
+			char *argv[],
+			int *vlanid,
+			int *tagtx,
+			int *pvid,
+			int *mode,
+			int *add)
+{
+	if (!strcmp(argv[3], "access")) {
+		*mode = QVLAN_MODE_ACCESS;
+	} else if (!strcmp(argv[3], "trunk")) {
+		*mode = QVLAN_MODE_TRUNK;
+	} else if (!strcmp(argv[3], "hybrid")) {
+		*mode = QVLAN_MODE_HYBRID;
+	} else if (!strcmp(argv[3], "dynamic")) {
+		*mode = QVLAN_MODE_DYNAMIC;
+		*add = 1;
+		return 0;
+	} else if (!strcmp(argv[3], "undynamic")) {
+		*mode = QVLAN_MODE_DYNAMIC;
+		*add = 0;
+		return 0;
+	} else if (!strcmp(argv[3], "default_priority")) {
+		*mode = QVLAN_CMD_DEF_PRIORITY;
+	} else {
+		return -EINVAL;
+	}
+
+	*vlanid = simple_strtol(argv[4], NULL, 10);
+
+	if (*mode == QVLAN_CMD_DEF_PRIORITY)
+		return 0;
+
+	if (!strcmp(argv[5], "add"))
+		*add = 1;
+	else if (!strcmp(argv[5], "del"))
+		*add = 0;
+	else
+		return -EINVAL;
+
+	if (!strcmp(argv[6], "tag"))
+		*tagtx = 1;
+	else if (!strcmp(argv[6], "untag"))
+		*tagtx = 0;
+	else
+		return -EINVAL;
+
+	if (!strcmp(argv[7], "default"))
+		*pvid = 1;
+	else if (!strcmp(argv[7], "none"))
+		*pvid = 0;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int qdrv_control_vlan_config_cmd_validation(
+			struct qtn_vlan_dev *vdev,
+			struct qdrv_mac *mac,
+			struct qdrv_vap *qv,
+			int wifidev,
+			int vlanid,
+			int tagtx,
+			int pvid,
+			int mode,
+			int add)
+{
+	if (mode == QVLAN_MODE_DYNAMIC) {
+		if (!wifidev || qv->iv.iv_opmode != IEEE80211_M_HOSTAP) {
+			printk(KERN_ERR "Dynamic VLAN applies only to wifi AP interfaces\n");
+			return -EINVAL;
+		}
+		if (add == 1)
+			qdrv_control_vlan_enable(mac, 1);
+		return 0;
+	}
+
+	if (!vlan_enabled) {
+		printk(KERN_ERR "VLAN is disabled\n");
+		return -EINVAL;
+	}
+
+	if (mode == QVLAN_CMD_DEF_PRIORITY) {
+		if (vlanid <= QVLAN_PRIO_MAX)
+			return 0;
+		return -EINVAL;
+	}
+
+	if (vlanid != QVLAN_VID_ALL && !qtn_vlan_is_valid(vlanid))
+		return -EINVAL;
+
+	if (pvid == 1 && vlanid == QVLAN_VID_ALL)
+		return -EINVAL;
+
+	if (!qtn_vlan_is_mode(vdev, mode) && vlanid != QVLAN_PRIO_VID)
+		switch_vlan_dev_reset(vdev, mode);
+
+	if (add == 0 && !qtn_vlan_is_member(vdev, vlanid))
+		return -EINVAL;
+
+	if (add == 0 && pvid == 1 && !qtn_vlan_is_pvid(vdev, vlanid))
+		return -EINVAL;
+
+	if (qtn_vlan_is_pvid(vdev, vlanid) && pvid == 0)
+		return -EINVAL;
+
+	if (pvid == 1 && tagtx == 1)
+		return -EINVAL;
+
+	switch (mode) {
+	case  QVLAN_MODE_ACCESS:
+		if (pvid == 0)
+			return -EINVAL;
+		break;
+	case QVLAN_MODE_TRUNK:
+		if (tagtx == 0 && pvid == 0)
+			return -EINVAL;
+		break;
+	case QVLAN_MODE_HYBRID:
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int qdrv_control_vlan_config_cmd_execute(
+			struct qtn_vlan_dev *vdev,
+			int vlanid,
+			int tagtx,
+			int pvid,
+			int mode,
+			int add)
+{
+	int ret = -1;
+
+	if (mode == QVLAN_MODE_DYNAMIC) {
+		if (add == 1) {
+			switch_vlan_dyn_enable(vdev);
+			return 0;
+		} else if (add == 0) {
+			if (QVLAN_IS_DYNAMIC(vdev))
+				switch_vlan_dyn_disable(vdev);
+			return 0;
+		} else {
+			return -EINVAL;
+		}
+	} else if (mode == QVLAN_CMD_DEF_PRIORITY) {
+		switch_vlan_set_priority(vdev, vlanid);
+		return 0;
+	}
+
+	if (pvid == 1 && add == 0) {
+		pvid = 1;
+		vlanid = QVLAN_DEF_PVID;
+	}
+
+	if (pvid == 1) {
+		ret = switch_vlan_set_pvid(vdev, vlanid);
+	} else if (add == 1) {
+		ret = switch_vlan_add_member(vdev, vlanid, 1);
+		if (ret == 0 && tagtx == 1) {
+			ret = switch_vlan_tag_member(vdev, vlanid);
+		} else if (ret == 0 && tagtx == 0) {
+			ret = switch_vlan_untag_member(vdev, vlanid);
+		}
+	} else if (add == 0) {
+		ret = switch_vlan_del_member(vdev, vlanid);
+		if (ret == 0 && (vlanid == QVLAN_VID_ALL ||
+					qtn_vlan_is_pvid(vdev, vlanid)))
+			ret = switch_vlan_set_pvid(vdev, QVLAN_DEF_PVID);
+	}
+
+	return ret;
+}
+
+static int qdrv_control_vlan_config(struct qdrv_mac *mac, int argc, char *argv[])
+{
+	int vlanid = -1;
+	int ret;
+	int tagtx = -1;
+	int pvid = -1;
+	int mode = -1;
+	int add = -1;
+	struct net_device *ndev = NULL;
+	struct qdrv_vap *qv = NULL;
+	const char *dev_name = NULL;
+	struct qtn_vlan_dev *vdev;
+	int wifidev;
+
+	if (argc != 3			/* enable/disable */
+			&& argc != 4	/* reset */
+			&& argc != 5	/* priority_tag_tx/default_priority */
+			&& argc != 8)	/* access/trunk/hybrid/dynamic */
+		return -EINVAL;
+
+	if (argc == 3)
+		return qdrv_control_set_vlan_enable(mac, argv[2]);
+
+	dev_name = argv[2];
+
+	ndev = dev_get_by_name(&init_net, dev_name);
+	if (!ndev) {
+		printk(KERN_ERR"%s: netdevice %s does not exist\n", __FUNCTION__, dev_name);
+		return -EINVAL;
+	}
+	wifidev = (ndev->qtn_flags & QTN_FLAG_WIFI_DEVICE);
+	dev_put(ndev);
+
+	if (wifidev) {
+		qv = netdev_priv(ndev);
+		vdev = switch_vlan_dev_get_by_idx(QTN_WLANID_FROM_DEVID(qv->devid));
+	} else {
+		vdev = switch_vlan_dev_get_by_port(ndev->if_port);
+	}
+
+	if (unlikely(vdev == NULL))
+		return -EINVAL;
+
+	if (argc == 4) {
+		if (strcmp(argv[3], "reset") != 0)
+			return -EINVAL;
+
+		switch_vlan_dev_reset(vdev, QVLAN_MODE_ACCESS);
+		return 0;
+	}
+
+	ret = qdrv_control_vlan_config_cmd_parse(argc, argv, &vlanid, &tagtx, &pvid, &mode, &add);
+	if (ret)
+		return ret;
+
+	ret = qdrv_control_vlan_config_cmd_validation(vdev, mac, qv, wifidev, vlanid, tagtx, pvid, mode, add);
+	if (ret)
+		return ret;
+
+	ret = qdrv_control_vlan_config_cmd_execute(vdev, vlanid, tagtx, pvid, mode, add);
+	if (ret)
+		return ret;
+
+	if (wifidev)
+		return qdrv_vap_vlan2index_sync(qv, mode, vlanid);
+
+	return 0;
+}
+
+static int qdrv_control_set_vlan_group(struct qdrv_mac *mac, const char *dev_name, uint16_t vid, int enable)
+{
+	struct net_device *ndev;
+	struct qdrv_vap *qv;
+	struct qdrv_node *vlan_group;
+
+	ndev = dev_get_by_name(&init_net, dev_name);
+	if (!ndev) {
+		printk(KERN_ERR"netdevice %s does not exist\n", dev_name);
+		return -EINVAL;
+	}
+	qv = netdev_priv(ndev);
+	dev_put(ndev);
+
+	if (vid >= QVLAN_VID_MAX) {
+		printk(KERN_ERR"%u is not a valid VLAN\n", vid);
+		return -EINVAL;
+	}
+
+	if (enable) {
+		vlan_group = qdrv_vlan_find_group_noref(qv, vid);
+		if (vlan_group) {
+			printk(KERN_INFO"VLAN group %u is present\n", vid);
+			return 0;
+		}
+
+		vlan_group = qdrv_vlan_alloc_group(qv, vid);
+		if (!vlan_group) {
+			printk(KERN_INFO"VLAN group %u allocation failed\n", vid);
+			return -ENOMEM;
+		}
+
+		printk(KERN_INFO"VLAN group %u created\n", vid);
+
+	} else {
+		vlan_group = qdrv_vlan_find_group_noref(qv, vid);
+		if (!vlan_group) {
+			printk(KERN_INFO"VLAN group %u does not exist\n", vid);
+			return -EEXIST;
+		}
+
+		printk(KERN_INFO"VLAN group %u removed, refcnt %u\n", vid, ieee80211_node_refcnt(&vlan_group->qn_node));
+
+		qdrv_vlan_free_group(vlan_group); /* neutralize VLAN group creation */
+	}
+
+	return 0;
+}
+
+static int qdrv_control_mac_reserve(int argc, char *argv[])
+{
+	uint8_t addr[IEEE80211_ADDR_LEN];
+	uint8_t mask[IEEE80211_ADDR_LEN];
+
+	if (argc == 0) {
+		qdrv_mac_reserve_clear();
+		return 0;
+	}
+
+	if (qdrv_parse_mac(argv[0], addr) < 0) {
+		printk("%s: invalid mac address %s\n", __func__, argv[0]);
+		return -1;
+	}
+
+	if (argc > 1) {
+		if (qdrv_parse_mac(argv[1], mask) < 0) {
+			printk("%s: invalid mask %s\n", __func__, argv[1]);
+			return -1;
+		}
+	} else {
+		memset(mask, 0xff, ARRAY_SIZE(mask));
+	}
+
+	return qdrv_mac_reserve_set(addr, mask);
+}
+
+static void qdrv_control_set_power_table_checksum(struct qdrv_cb *qcb, char *fname,
+							char *checksum)
+{
+	struct qdrv_power_table_checksum_entry *p_entry;
+	struct qdrv_power_table_checksum_entry *n_entry;
+
+	if (qcb->power_table_ctrl.checksum_list_locked) {
+		printk("QDRV: power table checksum list has been locked\n");
+		return;
+	}
+
+	if (strlen(fname) > QDRV_POWER_TABLE_FNAME_MAX_LEN) {
+		printk("QDRV: power table filename is too long\n");
+		return;
+	}
+
+	if (strlen(checksum) != QDRV_POWER_TABLE_CHECKSUM_LEN) {
+		printk("QDRV: power table checksum length is invalid\n");
+		return;
+	}
+
+	p_entry = qcb->power_table_ctrl.checksum_list;
+	while (p_entry) {
+		if (strcmp(p_entry->fname, fname) == 0) {
+			printk("QDRV: power table checksum for %s exists\n", fname);
+			return;
+		}
+		if (p_entry->next) {
+			p_entry = p_entry->next;
+		} else {
+			break;
+		}
+	}
+
+	n_entry = kmalloc(sizeof(struct qdrv_power_table_checksum_entry), GFP_KERNEL);
+	if (!n_entry) {
+		printk("QDRV: set power table checksum malloc failed\n");
+		return;
+	}
+
+	n_entry->next = NULL;
+	strcpy(n_entry->fname, fname);
+	strcpy(n_entry->checksum, checksum);
+	if (p_entry) {
+		p_entry->next = n_entry;
+	} else {
+		qcb->power_table_ctrl.checksum_list = n_entry;
+	}
+
+	printk("QDRV: power table checksum for %s\n", fname);
+}
+
+static int qdrv_command_set(struct device *dev, int argc, char *argv[])
+{
+	struct qdrv_cb *qcb;
+	struct qdrv_mac *mac = NULL;
+	struct qdrv_wlan *qw = NULL;
+	int i;
+	char *value;
+	char *name;
+	char *dest;
+	uint32_t vendor_fix;
+	uint32_t vap_default_state;
+	int32_t brcm_rxglitch_thrshlds;
+
+	qcb = dev_get_drvdata(dev);
+
+	name = argv[1];
+	if (name == NULL) {
+		DBGPRINTF_E("set command is NULL\n");
+		return -1;
+	}
+
+	value = argv[2];
+	if (value == NULL) {
+		DBGPRINTF_E("set command value for %s is NULL\n", name);
+		return -1;
+	}
+
+	if (strcmp(name, "wmm_ac_map") == 0) {
+		char *dev_name = argv[2];
+		int tos = simple_strtol(argv[3], NULL, 10);
+		int aid = simple_strtol(argv[4], NULL, 10);
+
+		qdrv_control_set_wmm_ac_map(dev_name, tos, aid);
+		return 0;
+	}
+	if (strcmp(name, "power_table_checksum") == 0) {
+		char *fname = argv[2];
+		char *checksum = argv[3];
+
+		qdrv_control_set_power_table_checksum(qcb, fname, checksum);
+		return 0;
+	}
+	if (strcmp(name, "lock_checksum_list") == 0) {
+		qcb->power_table_ctrl.checksum_list_locked = 1;
+		return 0;
+	}
+	if (strcmp(name, "power_selection") == 0) {
+		qcb->power_table_ctrl.power_selection = simple_strtol(argv[2], NULL, 10);
+		printk("set power_selection %u\n", qcb->power_table_ctrl.power_selection);
+		return 0;
+	}
+	if (strcmp(name, "power_recheck") == 0) {
+		qcb->power_table_ctrl.power_recheck = !!simple_strtol(argv[2], NULL, 10);
+		printk("set power_recheck %u\n", qcb->power_table_ctrl.power_recheck);
+		return 0;
+	}
+	if (strcmp(name, "vlan") == 0) {
+		return qdrv_control_vlan_config((struct qdrv_mac *) (&qcb->macs[0]), argc, argv);
+	}
+
+	if (strcmp(name, "dyn-vlan") == 0) {
+		const char *addr;
+		uint16_t vid;
+		int ival;
+		if (argc != 4)
+			return -EINVAL;
+		addr = argv[2];
+		ival = simple_strtol(argv[3], NULL, 10);
+		vid = (qtn_vlan_is_valid(ival) ? (uint16_t)ival : QVLAN_DEF_PVID);
+
+		return qdrv_control_set_sta_vlan((struct qdrv_mac *) (&qcb->macs[0]),
+							addr, vid);
+	}
+
+	if (strcmp(name, "vlan-group") == 0) {
+		/* set vlan-group {ifname} {vlan_id} [0|1] */
+		const char *dev_name;
+		uint16_t vid;
+		int enable;
+		if (argc != 5) {
+			printk("vlan-group: invalid argument\n");
+			return -EINVAL;
+		}
+		dev_name = argv[2];
+		vid = simple_strtol(argv[3], NULL, 10);
+		enable = simple_strtol(argv[4], NULL, 10);
+		return qdrv_control_set_vlan_group((struct qdrv_mac *) (&qcb->macs[0]),
+						dev_name, vid, !!enable);
+	}
+
+	if (strcmp(name, "mac_reserve") == 0)
+		return qdrv_control_mac_reserve(argc - 2, &argv[2]);
+
+	if (strcmp(name, "wps_intf") == 0) {
+                if (argc != 3)
+                        return -EINVAL;
+
+                const char *dev_name = argv[2];
+                struct net_device *ndev = dev_get_by_name(&init_net, dev_name);
+                if (!ndev)
+                        return -EINVAL;
+                if (!(ndev->qtn_flags & QTN_FLAG_WIFI_DEVICE)) {
+                        dev_put(ndev);
+                        return -EINVAL;
+                }
+
+                qdrv_wps_button_exit();
+                qdrv_wps_button_init(ndev);
+
+                dev_put(ndev);
+
+		return 0;
+        }
+
+	if (strcmp(name, "br_isolate") == 0)
+		return qdrv_control_set_br_isolate(dev, argc - 2, &argv[2]);
+
+	for (i = 0; i < PARAM_TABLE_SIZE; i++) {
+		if (strcmp(name, s_param_table[i].name) == 0) {
+			break;
+		}
+	}
+
+	if (i == PARAM_TABLE_SIZE) {
+		DBGPRINTF_E("Parameter %s is not recognized\n", name);
+		return -1;
+	}
+
+	/* Check if a specific address is given */
+	if (s_param_table[i].address != NULL) {
+		dest = s_param_table[i].address;
+	} else if(strcmp(name, "uc_flags") == 0) {
+		/* setting something in shared parameters */
+		shared_params *sp = qtn_mproc_sync_shared_params_get();
+		if (sp == NULL) {
+			DBGPRINTF_E("shared_params struct not yet published\n");
+			return(-1);
+		}
+		dest = (char *) &sp->uc_flags;
+	} else if(strcmp(name, "vendor_fix") == 0) {
+		dest = (char *)&vendor_fix;
+	} else if(strcmp(name, "vap_default_state") == 0) {
+		dest = (char *)&vap_default_state;
+	} else if(strcmp(name, "brcm_rxglitch_thrshlds") == 0) {
+		dest = (char *)&brcm_rxglitch_thrshlds;
+	} else {
+		/* Use an offset into our control structure */
+		dest = (char *) qcb + s_param_table[i].offset;
+	}
+
+	if (s_param_table[i].flags & P_FL_TYPE_INT) {
+		if (value[0] == '0' && value[1] == 'x') {
+			if (sscanf(&value[2], "%x", (int *) dest) != 1)
+				goto error;
+		} else {
+			if (sscanf(value, "%d", (int *) dest) != 1)
+				goto error;
+		}
+	} else if (s_param_table[i].flags & P_FL_TYPE_STRING) {
+		strncpy(dest, value, s_param_table[i].size);
+		dest[s_param_table[i].size - 1] = '\0';
+
+	} else if(s_param_table[i].flags & P_FL_TYPE_MAC) {
+		if (qdrv_parse_mac(value, (uint8_t *) dest) < 0) {
+			goto error;
+		}
+
+		if ((uint8_t *)dest == &wifi_macaddr[0]) {
+			qw = (struct qdrv_wlan *)qcb->macs[0].data;
+			mac = (struct qdrv_mac *) (&qcb->macs[0]);
+			qdrv_hostlink_msg_set_wifi_macaddr(qw, &wifi_macaddr[0]);
+			memcpy(mac->mac_addr, wifi_macaddr, IEEE80211_ADDR_LEN);
+			memcpy(qcb->mac0, wifi_macaddr, IEEE80211_ADDR_LEN);
+			memcpy(qw->ic.ic_myaddr, wifi_macaddr, IEEE80211_ADDR_LEN);
+		}
+	}
+
+	/* Propagate any parameters into sub-structures of the qcb. */
+	qdrv_command_set_post(qcb);
+
+	if(strcmp(name, "test1") == 0) {
+		static int test_mode_pm_overide = 0;
+
+		if ((g_qdrv_radar_test_mode == 0x2) || (g_qdrv_radar_test_mode == 0x3)) {
+			if ((test_mode_pm_overide == 0) &&
+					(pm_qos_add_requirement(PM_QOS_POWER_SAVE, "war_test1",
+							BOARD_PM_LEVEL_FORCE_NO) == 0)) {
+
+				test_mode_pm_overide = 1;
+			}
+			g_dbg_log_module |= DBG_LM;
+			DBG_LOG_FUNC |= QDRV_LF_DFS_TESTMODE;
+			DBG_LOG_LEVEL = DBG_LL_NOTICE;
+		} else if (g_qdrv_radar_test_mode == 0x0) {
+			if (test_mode_pm_overide != 0) {
+				pm_qos_remove_requirement(PM_QOS_POWER_SAVE, "war_test1");
+				test_mode_pm_overide = 0;
+			}
+			DBG_LOG_FUNC &= ~QDRV_LF_DFS_TESTMODE;
+			DBG_LOG_LEVEL = DBG_LL_WARNING;
+		}
+		else
+			goto error;
+	} else if (strcmp(name, "vendor_fix") == 0) {
+		struct ieee80211com *ic;
+		int update_beacon = 0;
+
+		mac = qdrv_control_mac_get(dev, "0");
+		if (mac == NULL) {
+			DBGPRINTF_E("mac NULL\n");
+			goto error;
+                }
+                qw = qdrv_control_wlan_get(mac);
+		if (!qw) {
+			goto error;
+		}
+
+		ic = &qw->ic;
+
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX, "Previous vendor fix flag is 0x%x\n",
+			ic->ic_vendor_fix);
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX,"DHCP fix is %s\n",
+			(vendor_fix & VENDOR_FIX_BRCM_DHCP) ? "enabled" : "disabled");
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX,"Replace IGMP src mac is %s\n",
+			(vendor_fix & VENDOR_FIX_BRCM_REPLACE_IGMP_SRCMAC) ? "enabled" : "disabled");
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX,"Replace IP src mac is %s\n",
+			(vendor_fix & VENDOR_FIX_BRCM_REPLACE_IP_SRCMAC) ? "enabled" : "disabled");
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX,"Drop STA IGMP query is %s\n",
+			(vendor_fix & VENDOR_FIX_BRCM_DROP_STA_IGMPQUERY) ? "enabled" : "disabled");
+
+		if ((ic->ic_vendor_fix & VENDOR_FIX_BRCM_DHCP) != (vendor_fix & VENDOR_FIX_BRCM_DHCP)) {
+			update_beacon = 1;
+		}
+		ic->ic_vendor_fix = vendor_fix;
+		if (update_beacon) {
+			struct ieee80211vap *vap;
+			TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+				if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+					continue;
+				if (vap->iv_state != IEEE80211_S_RUN)
+					continue;
+				ic->ic_beacon_update(vap);
+			}
+		}
+	} else if (strcmp(name, "vap_default_state") == 0) {
+		struct ieee80211com *ic;
+
+		mac = qdrv_control_mac_get(dev, "0");
+		if (mac == NULL) {
+			DBGPRINTF_E("mac NULL\n");
+			goto error;
+                }
+                qw = qdrv_control_wlan_get(mac);
+		if (!qw) {
+			goto error;
+		}
+
+		ic = &qw->ic;
+		ic->ic_vap_default_state = !!vap_default_state;
+	} else if (strcmp(name, "brcm_rxglitch_thrshlds") == 0) {
+		struct ieee80211com *ic;
+		struct brcm_rxglitch_thrshld_pair *pair;
+		int pwr, idx, rssi, pos;
+		uint32_t glitch;
+
+		ic = qdrv_get_ieee80211com(dev);
+		if (ic == NULL) {
+			return -1;
+		}
+		pair = ic->ic_scs.scs_brcm_rxglitch_thrshlds;
+
+		if ((brcm_rxglitch_thrshlds > 0) && (brcm_rxglitch_thrshlds <= BRCM_RXGLITCH_THRSHLD_SCALE_MAX)) {
+			ic->ic_scs.scs_brcm_rxglitch_thrshlds_scale = brcm_rxglitch_thrshlds;
+		} else {
+			printk("brcm rx glitch thresholds scale must be in range (%u, %u]\n", 0, BRCM_RXGLITCH_THRSHLD_SCALE_MAX);
+		}
+		printk("brcm rx glitch thresholds scale = %u\n", ic->ic_scs.scs_brcm_rxglitch_thrshlds_scale);
+		if (argc >= 7) {
+			pwr = STR2L(argv[3]);
+			idx = STR2L(argv[4]);
+			rssi = STR2L(argv[5]);
+			glitch = STR2L(argv[6]);
+			pos = pwr * BRCM_RXGLITH_THRSHLD_STEP + idx;
+			pair[pos].rssi = rssi;
+			pair[pos].rxglitch = glitch;
+			printk("Set pwr=%d, idx=%d to rssi=%d, glitch=%u\n", pwr, idx,
+					pair[pos].rssi, pair[pos].rxglitch);
+		} else {
+			printk("current brcm_rxglitch_thresholds:\n");
+			for (pwr = 0; pwr < BRCM_RXGLITH_THRSHLD_PWR_NUM; pwr++) {
+				for (idx = 0; idx < BRCM_RXGLITH_THRSHLD_STEP; idx++) {
+					pos = pwr * BRCM_RXGLITH_THRSHLD_STEP + idx;
+					printk("pwr=%d, idx=%d, rssi=%d, glitch=%u\n", pwr, idx,
+							pair[pos].rssi, pair[pos].rxglitch);
+				}
+			}
+		}
+	} else if(strcmp(name, "vlan_promisc") == 0) {
+		br_vlan_set_promisc(*((int*)dest));
+	} else if (strcmp(name, "pwr_mgmt") == 0) {
+		uint8_t tdls_peer_mac[IEEE80211_ADDR_LEN];
+		struct ieee80211com *ic;
+
+		if (qdrv_parse_mac(argv[3], (uint8_t *)tdls_peer_mac) < 0) {
+			goto error;
+		}
+
+		ic = qdrv_get_ieee80211com(dev);
+		if (ic == NULL)
+			return -1;
+
+		ieee80211_send_qosnulldata_ext(ic, tdls_peer_mac, *((int*)dest));
+	} else if (strcmp(name, "rxgain_params") == 0) {
+		struct ieee80211com *ic;
+		ic = qdrv_get_ieee80211com(dev);
+		if (argc >= 8) {
+			struct qtn_rf_rxgain_params rx_gain_params = {0};
+			int index = (int)STR2L(argv[2]);
+			rx_gain_params.lna_on_indx = (int8_t)STR2L(argv[3]);;
+			rx_gain_params.max_gain_idx = (int16_t)STR2L(argv[4]);
+			rx_gain_params.cs_thresh_dbm = (int16_t)STR2L(argv[5]);
+			rx_gain_params.cca_prim_dbm = (int16_t)STR2L(argv[6]);
+			rx_gain_params.cca_sec_scs_off_dbm = (int16_t)STR2L(argv[7]);
+			rx_gain_params.cca_sec_scs_on_dbm = (int16_t)STR2L(argv[8]);
+			qdrv_rxgain_params(ic, index, &rx_gain_params);
+		} else {
+			qdrv_rxgain_params(ic, 0, NULL);
+		}
+	}
+
+	return 0;
+
+error:
+	DBGPRINTF_E("Value %s for parameter %s is invalid\n", value, name);
+
+	return -1;
+}
+
+static char *qdrv_show_bands(const int chipid)
+{
+	switch (chipid) {
+	case CHIPID_2_4_GHZ:
+		return "2.4GHz";
+		break;
+	case CHIPID_5_GHZ:
+		return "5GHz";
+		break;
+	case CHIPID_DUAL:
+		return "dual";
+		break;
+	}
+
+	return "unknown";
+}
+
+#define QDRV_SHOW_PRINT(_s, _fmt, ...) do {		\
+	if (s)						\
+		seq_printf(_s, _fmt, ##__VA_ARGS__);	\
+	else						\
+		printk(_fmt, ##__VA_ARGS__);		\
+} while(0);
+
+static char *qdrv_control_bld_type_str(enum qdrv_bld_type bld_type)
+{
+	switch (bld_type) {
+	case QDRV_BLD_TYPE_ENG:
+		return "eng";
+	case QDRV_BLD_TYPE_BENCH:
+		return "bench";
+	case QDRV_BLD_TYPE_BUILDBOT:
+		return "buildbot";
+	case QDRV_BLD_TYPE_REL:
+		return "release";
+	case QDRV_BLD_TYPE_SDK:
+		return "SDK";
+	case QDRV_BLD_TYPE_GPL:
+		return "GPL";
+	}
+
+	return "unknown";
+}
+
+static void qdrv_show_info(struct seq_file *s, void *data, uint32_t num)
+{
+	struct qdrv_mac *mac = (struct qdrv_mac *)data;
+	struct qdrv_cb *qcb = container_of(mac, struct qdrv_cb, macs[mac->unit]);
+	struct ieee80211com *ic = NULL;
+	struct qdrv_wlan *qw;
+#define QDRV_SWVER_STR_MAX	20
+	char swver[QDRV_SWVER_STR_MAX] = { 0 };
+
+	qw = qdrv_control_wlan_get(mac);
+	if (qw) {
+		ic = &qw->ic;
+		snprintf(swver, sizeof(swver) - 1, DBGFMT_BYTEFLD4_P,
+			DBGFMT_BYTEFLD4_V(ic->ic_ver_sw));
+	}
+
+	QDRV_SHOW_PRINT(s, "Build name:            %s\n", QDRV_BLD_NAME);
+	QDRV_SHOW_PRINT(s, "Build revision:        %s\n", QDRV_BLD_REV);
+	QDRV_SHOW_PRINT(s, "Build type:            %s\n", qdrv_control_bld_type_str(QDRV_BLD_TYPE));
+	QDRV_SHOW_PRINT(s, "Build timestamp:       %lu\n", QDRV_BUILDDATE);
+	if (strcmp(QDRV_BLD_NAME, QDRV_BLD_LABEL) != 0)
+		QDRV_SHOW_PRINT(s, "Software label:        %s\n", QDRV_BLD_LABEL);
+	QDRV_SHOW_PRINT(s, "Platform ID:           %u\n", QDRV_CFG_PLATFORM_ID);
+	QDRV_SHOW_PRINT(s, "Hardware ID:           %s\n", qdrv_soc_get_hw_id(0));
+	if (ic) {
+		QDRV_SHOW_PRINT(s, "Hardware revision:     %s\n", qdrv_soc_get_hw_rev_desc(ic->ic_ver_hw));
+		QDRV_SHOW_PRINT(s, "Band:                  %s\n", qdrv_show_bands(ic ? ic->ic_rf_chipid : -1));
+	}
+	QDRV_SHOW_PRINT(s, "Kernel version:        " DBGFMT_BYTEFLD3_P "\n",
+							DBGFMT_BYTEFLD3_V(LINUX_VERSION_CODE));
+	QDRV_SHOW_PRINT(s, "Calibration version:   %s\n", qcb->algo_version);
+	QDRV_SHOW_PRINT(s, "DC/IQ cal version:     %s\n", dc_iq_calfile_version);
+	QDRV_SHOW_PRINT(s, "Power cal version:     %s\n", power_calfile_version);
+	QDRV_SHOW_PRINT(s, "MuC firmware:          %s\n", qcb->muc_firmware);
+	QDRV_SHOW_PRINT(s, "DSP firmware:          %s\n", qcb->dsp_firmware);
+	QDRV_SHOW_PRINT(s, "AuC firmware:          %s\n", qcb->auc_firmware);
+	QDRV_SHOW_PRINT(s, "MAC address 0:         %pM\n", qcb->mac0);
+	QDRV_SHOW_PRINT(s, "MAC address 1:         %pM\n", qcb->mac1);
+	QDRV_SHOW_PRINT(s, "U-Boot version:        %s\n", RUBY_UBOOT_VERSION);
+}
+
+static void
+qdrv_show_hw_desc( struct seq_file *s, void *data, u32 num )
+{
+	seq_printf(s, "%s\n", qdrv_soc_get_hw_desc(0));
+}
+
+static void
+qdrv_show_mucfw( struct seq_file *s, void *data, u32 num )
+{
+	struct qdrv_cb *qcb = (struct qdrv_cb *) data;
+
+	seq_printf( s, "%s\n", qcb->muc_firmware );
+}
+
+static int
+qdrv_fw_is_internal(enum qdrv_bld_type bld_type)
+{
+	switch(bld_type) {
+	case QDRV_BLD_TYPE_REL:
+	case QDRV_BLD_TYPE_SDK:
+	case QDRV_BLD_TYPE_GPL:
+		return 0;
+	case QDRV_BLD_TYPE_ENG:
+	case QDRV_BLD_TYPE_BENCH:
+	case QDRV_BLD_TYPE_BUILDBOT:
+		return 1;
+	}
+	return 1;
+}
+
+static void
+qdrv_show_fw_ver(struct seq_file *s, void *data, u32 num)
+{
+	struct ieee80211com *ic = data;
+
+	if (qdrv_fw_is_internal(QDRV_BLD_TYPE)) {
+		seq_printf(s, "%s\n", QDRV_BLD_NAME);
+	} else {
+		seq_printf(s, DBGFMT_BYTEFLD4_P "\n", DBGFMT_BYTEFLD4_V(ic->ic_ver_sw));
+	}
+}
+
+static void
+qdrv_show_platform_id( struct seq_file *s, void *data, u32 num )
+{
+	seq_printf( s, "%u\n", QDRV_CFG_PLATFORM_ID );
+}
+
+static void
+qdrv_show_checksum( struct seq_file *s, void *data, u32 num )
+{
+	struct qdrv_cb *qcb = (struct qdrv_cb *) data;
+
+	if (qcb->power_table_ctrl.reading_checksum) {
+		seq_printf( s, "%s\n", qcb->power_table_ctrl.reading_checksum->checksum);
+	} else {
+		seq_printf( s, "NA\n");
+	}
+	qcb->power_table_ctrl.reading_checksum = NULL;
+}
+
+static void
+qdrv_show_muc_value( struct seq_file *s, void *data, u32 num )
+{
+	struct qdrv_cb *qcb = (struct qdrv_cb *) data;
+
+	seq_printf( s, "%d\n", (int) qcb->value_from_muc );
+}
+
+static void
+qdrv_show_auc_stats(struct seq_file *s, void *data, u32 num)
+{
+	const struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	const bool fw_no_mu = sp->fw_no_mu;
+	const struct qtn_auc_stat_field *auc_field_stats = fw_no_mu ? auc_field_stats_nomu :
+								      auc_field_stats_default;
+	const size_t nstats = fw_no_mu ? ARRAY_SIZE(auc_field_stats_nomu) :
+					 ARRAY_SIZE(auc_field_stats_default);
+	unsigned int i;
+
+	for (i = 0; i < nstats; i++) {
+		const uintptr_t addr = auc_field_stats[i].addr;
+		const char *const name = auc_field_stats[i].name;
+		uint32_t val = *((const uint32_t *) addr);
+		seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, name, val);
+	}
+}
+
+/* Calculate avarage value for histogram */
+static uint32_t qdrv_get_hist_avr(uint32_t *histogram, uint32_t size, uint32_t width)
+{
+	uint32_t i;
+	uint32_t sum1, sum2;
+
+	for (i = 0, sum1 = 0, sum2 = 0; i < size; i++) {
+		sum1 += (2*width*i + (width - 1))*histogram[i];
+		sum2 += histogram[i];
+	}
+
+	return sum1/sum2/2;
+}
+
+static void
+qdrv_show_dsp_time_histogram(struct seq_file *s,
+	volatile struct qtn_txbf_mbox *txbf_mbox)
+{
+	int i;
+
+/* ------------------------------------------------------------------------ */
+	seq_printf(s, "%-*s ", QDRV_UC_STATS_DESC_LEN, "dsp_mu_qmat_qmem_copy_time_hist");
+	for (i = 0; i < FIELD_ARRAY_SIZE(struct qtn_dsp_stats, dsp_mu_qmat_qmem_copy_time_hist); i++) {
+		seq_printf(s, "%-2u-%-2uus ", i*DSP_MU_QMAT_COPY_TIME_HIST_WIDTH_US,
+				(i + 1)*DSP_MU_QMAT_COPY_TIME_HIST_WIDTH_US - 1);
+	}
+	seq_printf(s, "avr us");
+
+	seq_printf(s, "\n%-*s ", QDRV_UC_STATS_DESC_LEN, "dsp_mu_qmat_qmem_copy_time_hist");
+	for (i = 0; i < FIELD_ARRAY_SIZE(struct qtn_dsp_stats, dsp_mu_qmat_qmem_copy_time_hist); i++) {
+		seq_printf(s, "%-7u ", txbf_mbox->dsp_stats.dsp_mu_qmat_qmem_copy_time_hist[i]);
+	}
+	seq_printf(s, "%-7u\n", qdrv_get_hist_avr((uint32_t*)&txbf_mbox->dsp_stats.dsp_mu_qmat_qmem_copy_time_hist[0],
+			FIELD_ARRAY_SIZE(struct qtn_dsp_stats, dsp_mu_qmat_qmem_copy_time_hist),
+			DSP_MU_QMAT_COPY_TIME_HIST_WIDTH_US));
+	seq_printf(s, "%-*s", QDRV_UC_STATS_DESC_LEN, "dsp_mu_qmat_qmem_copy_time_hist");
+	seq_printf(s, "total maximum is %u us\n", txbf_mbox->dsp_stats.dsp_mu_qmat_qmem_copy_time_max);
+/* ------------------------------------------------------------------------ */
+	seq_printf(s, "%-*s ", QDRV_UC_STATS_DESC_LEN, "dsp_mu_qmat_inst_time_hist");
+	for (i = 0; i < FIELD_ARRAY_SIZE(struct qtn_dsp_stats, dsp_mu_qmat_inst_time_hist); i++) {
+		seq_printf(s, "%-2u-%-2ums ", i*DSP_MU_QMAT_INST_TIME_HIST_WIDTH_MS,
+				(i + 1)*DSP_MU_QMAT_INST_TIME_HIST_WIDTH_MS - 1);
+	}
+	seq_printf(s, "avr ms");
+
+	seq_printf(s, "\n%-*s ", QDRV_UC_STATS_DESC_LEN, "dsp_mu_qmat_inst_time_hist");
+	for (i = 0; i < FIELD_ARRAY_SIZE(struct qtn_dsp_stats, dsp_mu_qmat_inst_time_hist); i++) {
+		seq_printf(s, "%-7u ", txbf_mbox->dsp_stats.dsp_mu_qmat_inst_time_hist[i]);
+	}
+	seq_printf(s, "%-7u\n", qdrv_get_hist_avr((uint32_t*)&txbf_mbox->dsp_stats.dsp_mu_qmat_inst_time_hist[0],
+			FIELD_ARRAY_SIZE(struct qtn_dsp_stats, dsp_mu_qmat_inst_time_hist),
+			DSP_MU_QMAT_INST_TIME_HIST_WIDTH_MS));
+	seq_printf(s, "%-*s ", QDRV_UC_STATS_DESC_LEN, "dsp_mu_qmat_inst_time_hist");
+	seq_printf(s, "total maximum is %u ms\n", txbf_mbox->dsp_stats.dsp_mu_qmat_inst_time_max);
+/* ------------------------------------------------------------------------ */
+
+}
+
+static void
+qdrv_show_dsp_stats(struct seq_file *s, void *data, u32 num)
+{
+#if DSP_ENABLE_STATS
+	int i;
+	volatile struct qtn_txbf_mbox *txbf_mbox = qtn_txbf_mbox_get();
+
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_ndp_rx", txbf_mbox->dsp_stats.dsp_ndp_rx);
+
+	seq_printf(s, "%-*s %-3s %-8s %-8s %-8s %-8s %-8s %-8s %-8s %-8s %-8s %-8s\n",
+			QDRV_UC_STATS_DESC_LEN,
+			"dsp_act_rx", "aid", "total", "mu_gr_sl", "mu_prec", "su", "bad",
+			"mu_drop", "mu_nexp", "mu_lock", "mu_rl_nu", "inv_len");
+
+	for (i = 0; i < ARRAY_SIZE(txbf_mbox->dsp_stats.dsp_act_rx); i++)
+		seq_printf(s, "%-*s %-3u %-8u %-8u %-8u %-8u %-8u %-8u %-8u %-8u %-8u %-8u\n",
+			QDRV_UC_STATS_DESC_LEN, "dsp_act_rx", i,
+			txbf_mbox->dsp_stats.dsp_act_rx[i],
+			txbf_mbox->dsp_stats.dsp_act_rx_mu_grp_sel[i],
+			txbf_mbox->dsp_stats.dsp_act_rx_mu_prec[i],
+			txbf_mbox->dsp_stats.dsp_act_rx_su[i],
+			txbf_mbox->dsp_stats.dsp_act_rx_bad[i],
+			txbf_mbox->dsp_stats.dsp_act_rx_mu_drop[i],
+			txbf_mbox->dsp_stats.dsp_act_rx_mu_nexp[i],
+			txbf_mbox->dsp_stats.dsp_act_rx_mu_lock_cache[i],
+			txbf_mbox->dsp_stats.dsp_act_rx_mu_rel_nuse[i],
+			txbf_mbox->dsp_stats.dsp_act_rx_inval_len[i]);
+
+	seq_printf(s, "%-*s %-3s %-4s %-4s %-8s %-8s %-8s %-8s\n", QDRV_UC_STATS_DESC_LEN,
+			"dsp_mu_grp", "grp", "aid0", "aid1", "rank", "inst_ok", "upd_ok",
+			"upd_fl");
+	for (i = 0; i < ARRAY_SIZE(txbf_mbox->dsp_stats.dsp_mu_grp_inst_success); i++) {
+		seq_printf(s, "%-*s %-3u %-4u %-4u %-8d %-8u %-8u %-8u\n",
+			QDRV_UC_STATS_DESC_LEN, "dsp_mu_grp", i + 1,
+			txbf_mbox->dsp_stats.dsp_mu_grp_aid0[i],
+			txbf_mbox->dsp_stats.dsp_mu_grp_aid1[i],
+			txbf_mbox->dsp_stats.dsp_mu_grp_rank[i],
+			txbf_mbox->dsp_stats.dsp_mu_grp_inst_success[i],
+			txbf_mbox->dsp_stats.dsp_mu_grp_update_success[i],
+			txbf_mbox->dsp_stats.dsp_mu_grp_update_fail[i]);
+	}
+
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_del_mu_node_rx", txbf_mbox->dsp_stats.dsp_del_mu_node_rx);
+
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_act_tx", txbf_mbox->dsp_stats.dsp_act_tx);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_act_free_tx", txbf_mbox->dsp_stats.dsp_act_free_tx);
+
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_ndp_discarded", txbf_mbox->dsp_stats.dsp_ndp_discarded);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_ndp_inv_bw", txbf_mbox->dsp_stats.dsp_ndp_inv_bw);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_ndp_inv_len", txbf_mbox->dsp_stats.dsp_ndp_inv_len);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_ndp_max_len", txbf_mbox->dsp_stats.dsp_ndp_max_len);
+
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_inst_mu_grp_tx", txbf_mbox->dsp_stats.dsp_inst_mu_grp_tx);
+
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "wr", txbf_mbox->wr);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "muc_to_dsp_action_frame_mbox[0]", txbf_mbox->muc_to_dsp_action_frame_mbox[0]);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "muc_to_dsp_action_frame_mbox[1]", txbf_mbox->muc_to_dsp_action_frame_mbox[1]);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "muc_to_dsp_ndp_mbox", txbf_mbox->muc_to_dsp_ndp_mbox);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "muc_to_dsp_del_grp_node_mbox", txbf_mbox->muc_to_dsp_del_grp_node_mbox);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "dsp_to_host_mbox", txbf_mbox->dsp_to_host_mbox);
+
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_ipc_in", txbf_mbox->dsp_stats.dsp_ipc_in);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_ipc_out", txbf_mbox->dsp_stats.dsp_ipc_out);
+
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_exc", txbf_mbox->dsp_stats.dsp_exc);
+
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_ipc_int", txbf_mbox->dsp_stats.dsp_ipc_int);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_timer_int", txbf_mbox->dsp_stats.dsp_timer_int);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_timer1_int", txbf_mbox->dsp_stats.dsp_timer1_int);
+
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_last_int", txbf_mbox->dsp_stats.dsp_last_int);
+	seq_printf(s, "%-*s 0x%08x\n", QDRV_UC_STATS_DESC_LEN, "dsp_flag", txbf_mbox->dsp_stats.dsp_flag);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_point", txbf_mbox->dsp_stats.dsp_point);
+
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_sleep_in", txbf_mbox->dsp_stats.dsp_sleep_in);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_sleep_out", txbf_mbox->dsp_stats.dsp_sleep_out);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_qmat_invalid", txbf_mbox->dsp_stats.dsp_qmat_invalid);
+
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "dsp_sram_qmat_num", txbf_mbox->dsp_stats.dsp_sram_qmat_num);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_err_neg_qmat_num", txbf_mbox->dsp_stats.dsp_err_neg_qmat_num);
+
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_stat_bad_stack", txbf_mbox->dsp_stats.dsp_stat_bad_stack);
+
+	uint32_t reg = qtn_mproc_sync_mem_read(RUBY_SYS_CTL_M2D_INT);
+	seq_printf(s, "%-*s 0x%08x\n", QDRV_UC_STATS_DESC_LEN, "RUBY_SYS_CTL_M2D_INT", reg);
+
+	reg = qtn_mproc_sync_mem_read(RUBY_SYS_CTL_D2L_INT);
+	seq_printf(s, "%-*s 0x%08x\n", QDRV_UC_STATS_DESC_LEN, "RUBY_SYS_CTL_D2L_INT", reg);
+
+	seq_printf(s, "%-*s 0x%08x\n", QDRV_UC_STATS_DESC_LEN, "dsp_status32", txbf_mbox->dsp_stats.dsp_status32);
+	seq_printf(s, "%-*s 0x%08x\n", QDRV_UC_STATS_DESC_LEN, "dsp_status32_l1", txbf_mbox->dsp_stats.dsp_status32_l1);
+	seq_printf(s, "%-*s 0x%08x\n", QDRV_UC_STATS_DESC_LEN, "dsp_status32_l2", txbf_mbox->dsp_stats.dsp_status32_l2);
+	seq_printf(s, "%-*s 0x%08x\n", QDRV_UC_STATS_DESC_LEN, "dsp_ilink1", txbf_mbox->dsp_stats.dsp_ilink1);
+	seq_printf(s, "%-*s 0x%08x\n", QDRV_UC_STATS_DESC_LEN, "dsp_ilink2", txbf_mbox->dsp_stats.dsp_ilink2);
+	seq_printf(s, "%-*s 0x%08x\n", QDRV_UC_STATS_DESC_LEN, "dsp_blink", txbf_mbox->dsp_stats.dsp_blink);
+	seq_printf(s, "%-*s 0x%08x\n", QDRV_UC_STATS_DESC_LEN, "dsp_sp", txbf_mbox->dsp_stats.dsp_sp);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_time", txbf_mbox->dsp_stats.dsp_time);
+	seq_printf(s, "%-*s %d, %d, %d, %d\n", QDRV_UC_STATS_DESC_LEN, "mu_D_user1",
+		txbf_mbox->dsp_stats.dspmu_D_user1[0], txbf_mbox->dsp_stats.dspmu_D_user1[1],
+		txbf_mbox->dsp_stats.dspmu_D_user1[2], txbf_mbox->dsp_stats.dspmu_D_user1[3]);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "mu_intf_user1", txbf_mbox->dsp_stats.dspmu_max_intf_user1);
+	seq_printf(s, "%-*s %d, %d, %d, %d\n", QDRV_UC_STATS_DESC_LEN, "mu_D_user2",
+		txbf_mbox->dsp_stats.dspmu_D_user2[0], txbf_mbox->dsp_stats.dspmu_D_user2[1],
+		txbf_mbox->dsp_stats.dspmu_D_user2[2], txbf_mbox->dsp_stats.dspmu_D_user2[3]);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "mu_intf_user2", txbf_mbox->dsp_stats.dspmu_max_intf_user2);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "mu rank criteria", txbf_mbox->rank_criteria_to_use);
+
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "dsp_trig_mu_grp_sel", txbf_mbox->dsp_stats.dsp_trig_mu_grp_sel);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "dsp_mu_rank_success", txbf_mbox->dsp_stats.dsp_mu_rank_success);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "dsp_mu_rank_fail", txbf_mbox->dsp_stats.dsp_mu_rank_fail);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "dsp_mu_grp_inv_act", txbf_mbox->dsp_stats.dsp_mu_grp_inv_act);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "dsp_act_cache_expired[grp_sel]", txbf_mbox->dsp_stats.dsp_act_cache_expired[0]);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "dsp_act_cache_expired[prec]", txbf_mbox->dsp_stats.dsp_act_cache_expired[1]);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "dsp_mu_grp_upd_done", txbf_mbox->dsp_stats.dsp_mu_grp_upd_done);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "dsp_mu_node_del", txbf_mbox->dsp_stats.dsp_mu_node_del);
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "dsp_mu_grp_inst_fail", txbf_mbox->dsp_stats.dsp_mu_grp_inst_fail);
+
+	seq_printf(s, "%-*s %d\n", QDRV_UC_STATS_DESC_LEN, "dsp_mimo_ctrl_fail", txbf_mbox->dsp_stats.dsp_mimo_ctrl_fail);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_mu_fb_80mhz", txbf_mbox->dsp_stats.dsp_mu_fb_80mhz);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_mu_fb_40mhz", txbf_mbox->dsp_stats.dsp_mu_fb_40mhz);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_mu_fb_20mhz", txbf_mbox->dsp_stats.dsp_mu_fb_20mhz);
+	seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "dsp_mu_drop_20mhz", txbf_mbox->dsp_stats.dsp_mu_drop_20mhz);
+
+	seq_printf(s, "%-*s %-3s %-3s %-3s %-3s %-8s\n", QDRV_UC_STATS_DESC_LEN,
+			"txbf_msg_bufs", "idx", "st", "mt", "aid", "cnt");
+	for (i = 0; i < ARRAY_SIZE(txbf_mbox->txbf_msg_bufs); i++) {
+		seq_printf(s, "%-*s %-3u %-3u %-3u %-3u %-8u\n",
+			QDRV_UC_STATS_DESC_LEN, "txbf_msg_bufs", i,
+			txbf_mbox->txbf_msg_bufs[i].state,
+			txbf_mbox->txbf_msg_bufs[i].msg_type,
+			txbf_mbox->txbf_msg_bufs[i].aid,
+			txbf_mbox->txbf_msg_bufs[i].counter);
+	}
+
+	qdrv_show_dsp_time_histogram(s, txbf_mbox);
+#else
+	seq_printf(s, "%-*s\n", QDRV_UC_STATS_DESC_LEN, "DSP_ENABLE_STATS must be defined to enable DSP stats");
+#endif
+}
+
+static void
+qdrv_show_uc_tx_stats(struct seq_file *s, void *data, u32 num)
+{
+	struct qdrv_mac *mac = (struct qdrv_mac *) data;
+	struct qtn_stats_log *iw_stats_log;
+	struct qdrv_wlan *qw;
+	const u32* tx_stats;
+	int i;
+	const char *tx_stats_names[] = MUC_TX_STATS_NAMES_TABLE;
+
+	qw = qdrv_control_wlan_get(mac);
+	if (!qw) {
+		return;
+	}
+
+	iw_stats_log = qw->mac->mac_sys_stats;
+
+	if (iw_stats_log == NULL) {
+		return;
+	}
+
+	qdrv_pktlogger_flush_data(qw);
+
+	if (qw->pktlogger.stats_uc_tx_ptr == NULL) {
+		qw->pktlogger.stats_uc_tx_ptr = ioremap_nocache(muc_to_lhost((u32)iw_stats_log->tx_muc_stats),
+				sizeof(struct muc_tx_stats));
+		if (qw->pktlogger.stats_uc_tx_ptr == NULL)
+			return;
+	}
+
+	tx_stats = qw->pktlogger.stats_uc_tx_ptr;
+
+	for (i = 0; i < ARRAY_SIZE(tx_stats_names); i++) {
+		seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, tx_stats_names[i], tx_stats[i]);
+	}
+}
+
+static void
+qdrv_show_uc_rx_stats(struct seq_file *s, void *data, u32 num)
+{
+	struct qdrv_mac *mac = (struct qdrv_mac *) data;
+	struct qtn_stats_log *iw_stats_log;
+	struct qdrv_wlan *qw;
+	struct muc_rx_rates *uc_rx_rates;
+	struct muc_rx_bf_stats *uc_rx_bf_stats;
+	const u32* rx_stats;
+	int i;
+	const char *rx_stats_names[] = MUC_RX_STATS_NAMES_TABLE;
+
+	qw = qdrv_control_wlan_get(mac);
+
+	if (!qw) {
+		return;
+	}
+
+	iw_stats_log = qw->mac->mac_sys_stats;
+
+	if (iw_stats_log == NULL) {
+		return;
+	}
+
+	qdrv_pktlogger_flush_data(qw);
+
+	if (qw->pktlogger.stats_uc_rx_ptr == NULL) {
+		qw->pktlogger.stats_uc_rx_ptr = ioremap_nocache(muc_to_lhost((u32)iw_stats_log->rx_muc_stats),
+				sizeof(struct muc_rx_stats));
+		if (qw->pktlogger.stats_uc_rx_ptr == NULL)
+			return;
+	}
+	if (qw->pktlogger.stats_uc_rx_rate_ptr == NULL) {
+		qw->pktlogger.stats_uc_rx_rate_ptr =  ioremap_nocache(muc_to_lhost((u32)iw_stats_log->rx_muc_rates),
+				sizeof(struct muc_rx_rates));
+		if (qw->pktlogger.stats_uc_rx_rate_ptr == NULL)
+			return;
+	}
+	if (qw->pktlogger.stats_uc_rx_bf_ptr == NULL) {
+		qw->pktlogger.stats_uc_rx_bf_ptr =  ioremap_nocache(muc_to_lhost((u32)iw_stats_log->rx_muc_bf_stats),
+				sizeof(struct muc_rx_bf_stats));
+		if (qw->pktlogger.stats_uc_rx_bf_ptr == NULL)
+			return;
+	}
+
+	/* Stats */
+	rx_stats = qw->pktlogger.stats_uc_rx_ptr;
+	uc_rx_rates = (struct muc_rx_rates *)qw->pktlogger.stats_uc_rx_rate_ptr;
+
+	for (i = 0; i < ARRAY_SIZE(rx_stats_names); i++) {
+		seq_printf(s, "%-*s %u\n", QDRV_UC_STATS_DESC_LEN, rx_stats_names[i], rx_stats[i]);
+	}
+
+	/* Beamforming */
+	uc_rx_bf_stats = (struct muc_rx_bf_stats *)qw->pktlogger.stats_uc_rx_bf_ptr;
+	for (i = 0; i < QTN_STATS_NUM_BF_SLOTS; i++) {
+		seq_printf(s, "\nBF slot=%u aid=%u ng=%u rx:", i, uc_rx_bf_stats->rx_bf_aid[i],
+			uc_rx_bf_stats->rx_bf_ng[i]);
+		if (uc_rx_bf_stats->rx_bf_valid[i] == 0) {
+			seq_printf(s, " free");
+		} else if ((i > 0) && (uc_rx_bf_stats->rx_bf_aid[i - 1] == uc_rx_bf_stats->rx_bf_aid[i])) {
+			seq_printf(s, " dual slot");
+		}
+		if (uc_rx_bf_stats->rx_bf_11ac_ndp[i] || uc_rx_bf_stats->rx_bf_11ac_act[i] || uc_rx_bf_stats->rx_bf_11ac_grp_sel[i]) {
+			seq_printf(s, " (11ac ndp=%u act=%u grp sel=%u prec=%u "
+				"su=%u bad=%u fail=%u gradd=%u grdel=%u)",
+				uc_rx_bf_stats->rx_bf_11ac_ndp[i],
+				uc_rx_bf_stats->rx_bf_11ac_act[i],
+				uc_rx_bf_stats->rx_bf_11ac_grp_sel[i],
+				uc_rx_bf_stats->rx_bf_11ac_prec[i],
+				uc_rx_bf_stats->rx_bf_11ac_su[i],
+				uc_rx_bf_stats->rx_bf_11ac_bad[i],
+				uc_rx_bf_stats->rx_bf_11ac_dsp_fail[i],
+				uc_rx_bf_stats->mu_grp_add[i],
+				uc_rx_bf_stats->mu_grp_del[i]);
+		}
+		if (uc_rx_bf_stats->rx_bf_11n_ndp[i] || uc_rx_bf_stats->rx_bf_11n_act[i]) {
+			seq_printf(s, " (11n ndp=%u act=%u)",
+				uc_rx_bf_stats->rx_bf_11n_ndp[i], uc_rx_bf_stats->rx_bf_11n_act[i]);
+		}
+	}
+	seq_printf(s, "\n%-*s %u\n", QDRV_UC_STATS_DESC_LEN, "BF msg_buf_alloc_fail",
+		uc_rx_bf_stats->msg_buf_alloc_fail);
+
+	/* 11n rates */
+	for (i = 0; i < ARRAY_SIZE(uc_rx_rates->rx_mcs); i++) {
+		if ((i % 10) == 0) {
+			seq_printf(s, "\nmcs_11n ");
+		}
+		seq_printf(s, " %2d:%-9u", i, uc_rx_rates->rx_mcs[i]);
+	}
+	/* 11ac rates */
+	for (i = 0; i < ARRAY_SIZE(uc_rx_rates->rx_mcs_11ac); i++) {
+		if ((i % 10) == 0) {
+			seq_printf(s, "\nmcs_11ac");
+		}
+		seq_printf(s, " %2d:%-9u", i, uc_rx_rates->rx_mcs_11ac[i]);
+	}
+	seq_printf(s, "\n");
+}
+
+static void
+qdrv_show_debug_level( char *type )
+{
+	printk("%s dump enabled, dump length %d", type, g_dbg_dump_pkt_len);
+
+	if (DBG_LOG_FUNC_TEST(QDRV_LF_DUMP_BEACON)) {
+		printk(", dump beacon frame");
+	}
+	if (DBG_LOG_FUNC_TEST(QDRV_LF_DUMP_ACTION)) {
+		printk(", dump action frame");
+	}
+	if (DBG_LOG_FUNC_TEST(QDRV_LF_DUMP_MGT)) {
+		printk(", dump management frame (exclude action frame)");
+	}
+	if (DBG_LOG_FUNC_TEST(QDRV_LF_DUMP_DATA)) {
+		printk(", dump data frame");
+	}
+	printk(".\n");
+}
+
+static void
+qdrv_show_vendor_fix( struct seq_file *s, void *data, u32 num )
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) data;
+	struct ieee80211com *ic = &qw->ic;
+
+	seq_printf(s, "0x%x\n", ic->ic_vendor_fix);
+}
+
+static void
+qdrv_show_vap_default_state( struct seq_file *s, void *data, u32 num )
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) data;
+	struct ieee80211com *ic = &qw->ic;
+
+	seq_printf(s, "%d\n", ic->ic_vap_default_state);
+}
+
+static void
+qdrv_control_show_vlan_dev(struct seq_file *s, struct qtn_vlan_dev *vdev)
+{
+	struct qtn_vlan_config *vcfg;
+	int ret;
+
+	vcfg = kzalloc(sizeof(struct qtn_vlan_config), GFP_KERNEL);
+	if (!vcfg) {
+		printk(KERN_ERR"Not enough memory to print QVLAN configuration\n");
+		return;
+	}
+
+	if (vlan_enabled) {
+		vcfg->vlan_cfg = vdev->pvid & QVLAN_MASK_VID;
+		vcfg->vlan_cfg |= ((struct qtn_vlan_user_interface *)(vdev->user_data))->mode << QVLAN_SHIFT_MODE;
+		vcfg->priority = vdev->priority;
+		memcpy(vcfg->u.dev_config.member_bitmap, vdev->u.member_bitmap, sizeof(vcfg->u.dev_config.member_bitmap));
+		memcpy(vcfg->u.dev_config.tag_bitmap, vdev->tag_bitmap, sizeof(vcfg->u.dev_config.tag_bitmap));
+	} else {
+		memset(vcfg, 0, sizeof(*vcfg));
+		vcfg->vlan_cfg = (QVLAN_MODE_DISABLED << QVLAN_SHIFT_MODE);
+	}
+
+	ret = seq_write(s, vcfg, sizeof(struct qtn_vlan_config));
+	if (ret)
+		printk(KERN_ERR"VLAN info could not be written to seq file\n");
+
+	kfree(vcfg);
+}
+
+static void
+qdrv_control_show_vlan_config(struct seq_file *s, void *data, u32 num)
+{
+	int dev;
+	int ret;
+	struct qtn_vlan_dev *vdev;
+	struct qtn_vlan_config *vcfg;
+	struct net_device *ndev;
+
+	if (!strcmp(data, "tagrx")) {
+		vcfg = kzalloc(sizeof(struct qtn_vlan_config), GFP_KERNEL);
+		if (!vcfg) {
+			printk(KERN_ERR"Not enough memory to print QVLAN configuration\n");
+			return;
+		}
+
+		if (vlan_enabled) {
+			vcfg->vlan_cfg = 0;
+			memcpy(vcfg->u.tagrx_config, qtn_vlan_info.vlan_tagrx_bitmap, sizeof(vcfg->u.tagrx_config));
+		} else {
+			memset(vcfg, 0, sizeof(*vcfg));
+			vcfg->vlan_cfg = (QVLAN_MODE_DISABLED << QVLAN_SHIFT_MODE);
+		}
+
+		ret = seq_write(s, vcfg, sizeof(struct qtn_vlan_config));
+		if (ret)
+			printk(KERN_ERR"VLAN info could not be written to seq file\n");
+		kfree(vcfg);
+	} else {
+		for (dev = 0; dev < VLAN_INTERFACE_MAX; dev++) {
+			vdev = vdev_tbl_lhost[dev];
+			if (vdev) {
+				ndev = dev_get_by_index(&init_net, vdev->ifindex);
+				if (unlikely(!ndev))
+					break;
+				if (!strncmp(ndev->name, data, IFNAMSIZ)) {
+					dev_put(ndev);
+					qdrv_control_show_vlan_dev(s, vdev);
+					break;
+				} else {
+					dev_put(ndev);
+				}
+			}
+		}
+	}
+
+}
+
+static void qdrv_control_show_assoc(struct qdrv_mac *mac, int argc, char *argv[])
+{
+	struct qdrv_show_assoc_params params;
+
+	qdrv_show_assoc_init_params(&params, mac);
+
+	if (qdrv_show_assoc_parse_params(&params, argc, argv) != 0) {
+		qdrv_control_set_show(qdrv_show_assoc_print_usage, &g_show_assoc_params, 1, 1);
+		return;
+	}
+
+	if (down_interruptible(&s_output_sem)) {
+		return;
+	}
+
+	g_show_assoc_params = params;
+
+	up(&s_output_sem);
+
+	qdrv_control_set_show(qdrv_show_assoc_print_stats, &g_show_assoc_params, 1, 1);
+}
+
+static void qdrv_show_core_dump_size( struct seq_file *s, void *data, uint32_t num )
+{
+	seq_printf(s, "%u\n", *((uint32_t *) data));
+}
+
+static void qdrv_show_core_dump( struct seq_file *s, void *data, uint32_t num )
+{
+	char byte;
+	uint32_t i;
+
+	for (i = 0; i < num; ++i) {
+/* Only for debug - should be "off" in production code */
+#if 0
+		byte = (char) ((i % 26) + 'A');
+#else
+		byte = *(((char *) data) + i);
+#endif
+
+		seq_putc(s, byte);
+	}
+}
+
+static void qdrv_show_wps_intf(struct seq_file *s, void *data, uint32_t num)
+{
+	struct net_device *ndev = qdrv_wps_button_get_dev();
+	seq_printf(s, "%s", ndev->name);
+}
+
+static void
+qdrv_control_get_br_isolate(struct seq_file *s, void *data, u32 num)
+{
+	struct qdrv_mac *mac = (struct qdrv_mac *)data;
+	struct qdrv_wlan *qw;
+	uint32_t val;
+
+	qw = qdrv_control_wlan_get(mac);
+	if (unlikely(!qw))
+		return;
+
+	val = ((qw->br_isolate_vid << 16) | qw->br_isolate);
+
+	seq_write(s, &val, sizeof(val));
+}
+
+static int qdrv_command_get(struct device *dev, int argc, char *argv[])
+{
+	struct qdrv_cb *qcb;
+	struct qdrv_mac *mac;
+	struct qdrv_wlan *qw;
+	struct ieee80211com *ic = NULL;
+
+	if (!dev) {
+		return -1;
+	}
+
+	qcb = dev_get_drvdata(dev);
+	if (!qcb) {
+		return -1;
+	}
+
+	if (argc < 3) {
+		DBGPRINTF_E("Invalid number of arguments\n");
+		return -1;
+	}
+
+	mac = qdrv_control_mac_get(dev, argv[1]);
+	if (!mac) {
+		DBGPRINTF_E("mac not found\n");
+		return -1;
+	}
+
+	qw = qdrv_control_wlan_get(mac);
+	if (!qw) {
+		return -1;
+	}
+
+	ic = &qw->ic;
+
+	if(strcmp(argv[2], "stats") == 0) {
+		qdrv_soc_stats(qcb, mac);
+
+	} else if(strcmp(argv[2], "info") == 0) {
+		qdrv_control_set_show(qdrv_show_info, mac, 1, 1);
+
+	} else if(strcmp(argv[2], "info_log") == 0) {
+		qdrv_show_info(NULL, mac, 1);
+
+	} else if(strcmp(argv[2], "muc_stats") == 0) {
+		qdrv_muc_stats_printlog(qcb, mac, &(qw->ic), argc - 3, &argv[3]);
+
+	} else if(strcmp(argv[2], "assoc_info") == 0) {
+		qdrv_wlan_get_assoc_info(qw);
+
+	} else if(strcmp(argv[2], "show_assoc") == 0) {
+		qdrv_control_show_assoc(mac, argc - 3, &argv[3]);
+
+	} else if(strcmp(argv[2], "assoc_q") == 0) {
+		qdrv_wlan_get_assoc_queue_info(qw);
+
+	} else if(strcmp(argv[2], "chip_id") == 0) {
+		u32 local_chip_id = chip_id();
+		printk("Chip ID: %u (0x%x)\n", local_chip_id, local_chip_id );
+
+	} else if (strcmp(argv[2], "mucfw") == 0) {
+		qdrv_control_set_show(qdrv_show_mucfw, (void *) qcb, 1, 1);
+
+	} else if (strcmp(argv[2], "fwver") == 0) {
+		qdrv_control_set_show(qdrv_show_fw_ver, ic, 1, 1);
+
+	} else if (strcmp(argv[2], "platform_id") == 0) {
+		qdrv_control_set_show(qdrv_show_platform_id, (void *) qcb, 1, 1);
+
+	} else if (strcmp(argv[2], "noise") == 0) {
+		qcb->value_from_muc = qdrv_muc_get_noise(mac, ic);
+		qdrv_control_set_show(qdrv_show_muc_value, (void *) qcb, 1, 1);
+
+	} else if (strcmp(argv[2], "rssi") == 0) {
+		unsigned int rf_chain = 0;
+
+		if (argc > 3) {
+			if(sscanf(argv[3], "%u", &rf_chain) != 1) {
+				rf_chain = 0;
+			}
+		}
+
+		qcb->value_from_muc = qdrv_muc_get_rssi_by_chain(mac, ic, rf_chain);
+		qdrv_control_set_show(qdrv_show_muc_value, (void *) qcb, 1, 1);
+
+	} else if (strcmp(argv[2], "rx_gain") == 0) {
+		qcb->value_from_muc = qdrv_muc_get_rx_gain_fields(mac, ic);
+		qdrv_control_set_show(qdrv_show_muc_value, (void *) qcb, 1, 1);
+
+	} else if (strcmp(argv[2], "max_gain") == 0) {
+#if TOPAZ_FPGA_PLATFORM
+		qcb->value_from_muc = (0 && qdrv_read_mem(RUBY_QT3_BB_TD_MAX_GAIN));
+#else
+		qcb->value_from_muc = qdrv_read_mem(RUBY_QT3_BB_TD_MAX_GAIN);
+#endif
+		qdrv_control_set_show(qdrv_show_muc_value, (void *) qcb, 1, 1);
+	} else if (strcmp(argv[2], "ext_lna_gain") == 0) {
+		shared_params *sp = qtn_mproc_sync_shared_params_get();
+
+		if (sp == NULL) {
+			DBGPRINTF_E("shared_params struct not yet published\n");
+			DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+			return(-1);
+		}
+
+		if (sp->ext_lna_gain < QTN_EXT_LNA_GAIN_MAX) {
+			printk("ext_lna_gain : %d.", sp->ext_lna_gain);
+		} else {
+			printk("ext_lna_gain(%d) is invalid.", sp->ext_lna_gain);
+		}
+
+		if (sp->ext_lna_bypass_gain < QTN_EXT_LNA_GAIN_MAX) {
+			printk("        ext_lna_bypass_gain : %d.\n", sp->ext_lna_bypass_gain);
+		} else {
+			printk("        ext_lna_bypass_gain(%d) is invalid.\n", sp->ext_lna_bypass_gain);
+		}
+
+	} else if (strcmp(argv[2], "node_info") == 0) {
+		ieee80211_dump_nodes(&ic->ic_sta);
+
+	} else if (strcmp(argv[2], "phy_stat") == 0) {
+		unsigned int stat_index = 0;
+		int stat_value;
+
+		if (argc < 4) {
+			return -1;
+		}
+		if (argc > 4) {
+			if (sscanf(argv[4], "%u", &stat_index) != 1) {
+				return -1;
+			}
+
+		}
+
+		if (qdrv_muc_get_phy_stat(mac, ic, argv[3], stat_index, &stat_value)) {
+			return -1;
+		}
+		qcb->value_from_muc = stat_value;
+		qdrv_control_set_show(qdrv_show_muc_value, (void *) qcb, 1, 1);
+
+	} else if (strcmp(argv[2], "debug_flag") == 0) {
+		if (DBG_LOG_FUNC_TEST(QDRV_LF_DUMP_RX_PKT)) {
+			qdrv_show_debug_level("RX pkt");
+		} else {
+			printk("RX pkt dump disabled.\n");
+		}
+		if (DBG_LOG_FUNC_TEST(QDRV_LF_DUMP_TX_PKT)) {
+			qdrv_show_debug_level("TX pkt");
+		} else {
+			printk("TX pkt dump disabled.\n");
+		}
+	} else if (strcmp(argv[2], "vendor_fix") == 0) {
+                uint32_t vendor_fix = ic->ic_vendor_fix;
+                qdrv_control_set_show(qdrv_show_vendor_fix, (void *) qw, 1, 1);
+
+                DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX, "Current vendor fix flag is 0x%x\n",
+			  ic->ic_vendor_fix);
+                DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX,"DHCP fix is %s\n",
+			  (vendor_fix & VENDOR_FIX_BRCM_DHCP) ? "enabled" : "disabled");
+                DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX,"Replace IGMP src mac is %s\n",
+			  (vendor_fix & VENDOR_FIX_BRCM_REPLACE_IGMP_SRCMAC) ? "enabled" : "disabled");
+                DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX,"Replace IP src mac is %s\n",
+			  (vendor_fix & VENDOR_FIX_BRCM_REPLACE_IP_SRCMAC) ? "enabled" : "disabled");
+                DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX,"Drop STA IGMP query is %s\n",
+			  (vendor_fix & VENDOR_FIX_BRCM_DROP_STA_IGMPQUERY) ? "enabled" : "disabled");
+	} else if (strcmp(argv[2], "hw_options") == 0) {
+		qcb->value_from_muc = qdrv_soc_get_hw_options();
+		qdrv_control_set_show(qdrv_show_muc_value, (void *) qcb, 1, 1);
+	} else if (strcmp(argv[2], "hw_desc") == 0) {
+		qdrv_control_set_show(qdrv_show_hw_desc, (void *) qcb, 1, 1);
+	} else if (strcmp(argv[2], "rf_chipid") == 0) {
+		qcb->value_from_muc = qw->rf_chipid;
+		qdrv_control_set_show(qdrv_show_muc_value, (void *) qcb, 1, 1);
+	} else if (strcmp(argv[2], "rf_chip_verid") == 0) {
+		qcb->value_from_muc = qw->rf_chip_verid;
+		qdrv_control_set_show(qdrv_show_muc_value, (void *) qcb, 1, 1);
+	} else if (strcmp(argv[2], "auc_stats") == 0) {
+		qdrv_control_set_show(qdrv_show_auc_stats, (void *)mac, 1, 1);
+	} else if (strcmp(argv[2], "dsp_stats") == 0) {
+		qdrv_control_set_show(qdrv_show_dsp_stats, (void *)mac, 1, 1);
+	} else if (strcmp(argv[2], "uc_tx_stats") == 0) {
+		qdrv_control_set_show(qdrv_show_uc_tx_stats, (void *)mac, 1, 1);
+	} else if (strcmp(argv[2], "uc_rx_stats") == 0) {
+		qdrv_control_set_show(qdrv_show_uc_rx_stats, (void *)mac, 1, 1);
+	} else if (strcmp(argv[2], "wmm_ac_map") == 0) {
+		qdrv_control_set_show(qdrv_control_show_wmm_ac_map, (void *)mac, 1, 1);
+	} else if (strcmp(argv[2], "br_isolate") == 0) {
+		qdrv_control_set_show(qdrv_control_get_br_isolate, (void *)mac, 1, 1);
+	} else if (strcmp(argv[2], "power_table_checksum") == 0) {
+		struct qdrv_power_table_checksum_entry *p_entry = qcb->power_table_ctrl.checksum_list;
+		while (p_entry) {
+			if (strcmp(argv[3], p_entry->fname) == 0) {
+				break;
+			}
+			p_entry = p_entry->next;
+		}
+		qcb->power_table_ctrl.reading_checksum = p_entry;
+		qdrv_control_set_show(qdrv_show_checksum, (void *) qcb, 1, 1);
+	} else if (strcmp(argv[2], "checksum_list") == 0) {
+		struct qdrv_power_table_checksum_entry *p_entry = qcb->power_table_ctrl.checksum_list;
+		while (p_entry) {
+			printk("%s  %s\n", p_entry->checksum, p_entry->fname);
+			p_entry = p_entry->next;
+		}
+	} else if (strcmp(argv[2], "power_selection") == 0) {
+		qcb->value_from_muc = qcb->power_table_ctrl.power_selection;
+		qdrv_control_set_show(qdrv_show_muc_value, (void *) qcb, 1, 1);
+	} else if (strcmp(argv[2], "power_recheck") == 0) {
+		qcb->value_from_muc = qcb->power_table_ctrl.power_recheck;
+		qdrv_control_set_show(qdrv_show_muc_value, (void *) qcb, 1, 1);
+	} else if (strcmp(argv[2], "vlan_config") == 0) {
+		if (argc != 4)
+			return -1;
+		qdrv_control_set_show(qdrv_control_show_vlan_config, (void *)argv[3], 1, 1);
+	} else if(strcmp(argv[2], "mac_reserve") == 0) {
+		if (argc != 4)
+			return -1;
+		qdrv_control_set_show(qdrv_mac_reserve_show, argv[3], 1, 1);
+	} else if (strcmp(argv[2], "wbsp_ctrl") == 0) {
+		printk("qdrv_wbsp_ctrl = %d\n", qdrv_wbsp_ctrl);
+	} else if (strcmp(argv[2], "vap_default_state") == 0) {
+                qdrv_control_set_show(qdrv_show_vap_default_state, (void *) qw, 1, 1);
+	} else if (strcmp(argv[2], "core_dump_size") == 0) {
+		qdrv_control_set_show(qdrv_show_core_dump_size, &qdrv_crash_log_len, 1, 1);
+	} else if (strcmp(argv[2], "core_dump") == 0) {
+		if (!qdrv_crash_log) {
+			DBGPRINTF_E("QDRV: core dump not saved\n");
+		} else {
+			qdrv_control_set_show(qdrv_show_core_dump, qdrv_crash_log, qdrv_crash_log_len,
+				qdrv_crash_log_len);
+		}
+	} else if (strcmp(argv[2], "wps_intf") == 0) {
+		qdrv_control_set_show(qdrv_show_wps_intf, (void *)qw, 1, 1);
+#ifdef CONFIG_NAC_MONITOR
+	} else if(strcmp(argv[2], "nac_info") == 0) {
+		qdrv_wlan_get_nac_info(qw);
+#endif
+	} else {
+		DBGPRINTF_E("The get request \"%s\" is unknown.\n", argv[2]);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int read_proc_carrier_id(struct seq_file *s)
+{
+	seq_printf(s, "%d\n",g_carrier_id);
+
+	return (0);
+}
+#else
+static int read_proc_carrier_id(char *page, char **start, off_t offset, int count, int *eof, void *data)
+{
+	int len;
+	char *p = page;
+	if (offset > 0) {
+		*eof = 1;
+		return 0;
+	}
+	p += sprintf(p, "%d\n",g_carrier_id);
+
+	len = p - page;
+	return len;
+}
+#endif
+
+#if defined(QTN_DEBUG)
+static int qdrv_command_dbg(struct device *dev, int argc, char *argv[])
+{
+	u32 module_id = 0;
+	u32 dbg_func_mask = 0;
+	u32 dbg_log_level = 0;
+	u32 arg = 0;
+	int i = 0;
+
+	if (argc < 2) {
+		goto error;
+	}
+	if (strcmp(argv[1], "set") == 0) {
+		if (argc < 5) {
+			goto error;
+		}
+		for (i = 0; i < (DBG_LM_MAX - 1); i++) {
+			if (!strcmp(argv[2], dbg_module_name_entry[i].dbg_module_name)){
+				module_id = dbg_module_name_entry[i].dbg_module_id;
+				break;
+			}
+		}
+		if (module_id == 0) {
+			goto error;
+		}
+
+		if (sscanf(argv[3], "%x", &dbg_func_mask) != 1) {
+			goto error;
+		}
+
+		if (sscanf(argv[4], "%u", &dbg_log_level) != 1) {
+			goto error;
+		}
+
+		if (module_id != DBG_LM_QMACFW) {
+			g_dbg_log_module |= BIT(module_id - 1);
+			g_dbg_log_func[module_id - 1] = dbg_func_mask;
+			g_dbg_log_level[module_id - 1] = dbg_log_level;
+		} else {
+			struct qdrv_cb *qcb;
+			qcb = dev_get_drvdata(dev);
+			struct qdrv_mac *mac = &qcb->macs[0];
+			struct qdrv_wlan *qw = (struct qdrv_wlan *) mac->data;
+			arg = dbg_func_mask << 4 | dbg_log_level;
+			qdrv_hostlink_msg_cmd(qw, IOCTL_DEV_CMD_SET_DRV_DBG, arg);
+		}
+	} else if (strcmp(argv[1], "get") == 0) {
+			for (i = 0; i < (DBG_LM_MAX - 1); i++) {
+				if (!strcmp(argv[2], dbg_module_name_entry[i].dbg_module_name)) {
+					module_id = dbg_module_name_entry[i].dbg_module_id;
+					break;
+				}
+			}
+			if (module_id == 0){
+				goto error;
+			}
+			if (module_id != DBG_LM_QMACFW) {
+				printk("module name: %s\n", argv[2]);
+				printk("function mask: 0x%08x\n", g_dbg_log_func[module_id - 1]);
+				printk("debug level: %u\n", g_dbg_log_level[module_id - 1]);
+			} else {
+				struct qdrv_cb *qcb;
+				qcb = dev_get_drvdata(dev);
+				struct qdrv_mac *mac = &qcb->macs[0];
+				struct qdrv_wlan *qw = (struct qdrv_wlan *) mac->data;
+				qdrv_hostlink_msg_cmd(qw, IOCTL_DEV_CMD_GET_DRV_DBG, 0);
+			}
+	}
+	return(0);
+
+error:
+	printk("Usage\n");
+	printk("    dbg set <module name> <function mask> <debug level>\n");
+	printk("    dbg get <module name>\n");
+	printk("Module names:\n");
+	for (i = 0; i < (DBG_LM_MAX - 1); i++) {
+		printk("    %s\n", dbg_module_name_entry[i].dbg_module_name);
+	}
+	return(0);
+}
+
+#endif
+
+static void qdrv_command_pktlogger_usage (void) {
+	printk("Usage:\n"
+		"    pktlogger 0 show\n"
+		"    pktlogger 0 start <log type> [<interval>]\n"
+		"    pktlogger 0 stop <log type>\n"
+		"    pktlogger 0 set <parameter> <value>\n"
+		"\n"
+		"Parameters:\n"
+		"    show                 display current settings\n"
+		"    start                start logging for the specified log type\n"
+		"    <log type>           stats, radar, txbf, iwevent, sysmsg, mem, vsp, phy_stats, dsp_stats, core_dump\n"
+		"    <interval>           logging frequency\n"
+		"    stop                 stop logging for the specified log type\n"
+		"    set                  set a parameter value\n"
+		"    <parameter>          dstmac, dstip, dstport, srcip, wifimac, interface\n");
+}
+
+static int qdrv_command_pktlogger(struct device *dev, int argc, char *argv[])
+{
+	struct qdrv_mac *mac;
+	struct qdrv_wlan *qw;
+	int ret = 0;
+
+	if (argc < 2) {
+		qdrv_command_pktlogger_usage();
+		return 0;
+	}
+
+	mac = qdrv_control_mac_get(dev, argv[1]);
+	if (mac == NULL) {
+		DBGPRINTF_E("mac NULL\n");
+		return -1;
+	}
+
+	qw = qdrv_control_wlan_get(mac);
+	if (!qw) {
+		return -1;
+	}
+
+	if (strcmp(argv[2], "set") == 0) {
+		if (argc < 5) {
+			qdrv_command_pktlogger_usage();
+			return 0;
+		}
+		ret = qdrv_pktlogger_set(qw, argv[3], argv[4]);
+	} else if (strcmp(argv[2], "show") == 0) {
+		qdrv_pktlogger_show(qw);
+	} else if (strcmp(argv[2], "start") == 0) {
+		unsigned interval = 0;
+		if (argc < 4) {
+			qdrv_command_pktlogger_usage();
+			return 0;
+		}
+		if (argc >= 5) {
+			if (sscanf(argv[4], "%d", &interval) != 1) {
+				return 0;
+			}
+		}
+		ret = qdrv_pktlogger_start_or_stop(qw, argv[3], 1, interval);
+	} else if (strcmp(argv[2], "stop") == 0) {
+		if (argc < 4) {
+			qdrv_command_pktlogger_usage();
+			return 0;
+		}
+		ret = qdrv_pktlogger_start_or_stop(qw, argv[3], 0, 0);
+	} else {
+		qdrv_command_pktlogger_usage();
+	}
+	return ret;
+
+}
+
+static int qdrv_command_rf_reg_dump(struct device *dev, int argc, char *argv[])
+{
+        struct qdrv_mac *mac;
+        struct qdrv_wlan *qw;
+        u_int32_t arg = 0;
+
+        if (argc < 2) {
+                qdrv_command_memdbg_usage();
+                return 0;
+        }
+        mac = qdrv_control_mac_get(dev, argv[1]);
+        if (mac == NULL) {
+                return -1;
+        }
+
+        qw = qdrv_control_wlan_get(mac);
+        if (!qw) {
+                return -1;
+        }
+
+	qdrv_hostlink_msg_cmd(qw, IOCTL_DEV_CMD_RF_REG_DUMP, arg);
+
+        return 0;
+}
+
+
+int qdrv_control_output(struct device *dev, char *buf)
+{
+	struct qdrv_cb *qcb;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Get the private device data */
+	qcb = dev_get_drvdata(dev);
+
+	/* Show the return code from the previous command */
+	if(qcb->rc == 0)
+	{
+		strcpy(buf, "ok\n");
+	}
+	else
+	{
+		sprintf(buf, "error %d\n", qcb->rc);
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(strlen(buf));
+}
+
+static int qdrv_parse_args(char *string, int argc, char *argv[])
+{
+	char *p = string;
+	int n = 0;
+	char *end;
+	int i;
+
+	/* Skip leading white space up to first argument */
+	while((*p != '\0') && isspace(*p) && p++);
+
+	/* Check for empty string */
+	if (*p == '\0') {
+		/* Empty string!! */
+		return(0);
+	}
+
+	for (i = 0; i < argc; i++) {
+		/* Save argument */
+		argv[i] = p;
+
+		/* Increment the number of arguments we have found */
+		n++;
+
+		/* Skip argument */
+		while ((*p != '\0') && !isspace(*p) && p++);
+
+		/* Remember the end of argument */
+		end = p;
+
+		/* Skip leading white spaces up to next argument(s) */
+		while ((*p != '\0') && isspace(*p) && p++);
+
+		/* Terminate argument */
+		*end = '\0';
+
+		/* Check for end of arguments */
+		if (*p == '\0') {
+			break;
+		}
+	}
+
+	/* Check if there are arguments left */
+	if (*p != '\0') {
+		/* Too many arguments */
+		return(-1);
+	}
+
+	return(n);
+}
+
+int qdrv_control_input(struct device *dev, char *buf, unsigned int count)
+{
+	int n;
+	int found = -1;
+	struct qdrv_cb *qcb;
+	char *argv[19];
+	int argc;
+	char *p;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Get the private device data */
+	qcb = dev_get_drvdata(dev);
+	if (!qcb) {
+		return -1;
+	}
+
+	/* Make sure it fits into our command buffer as a '\0' */
+	/* terminated string.                                  */
+	if (count >= (sizeof(qcb->command) - 1)) {
+		DBGPRINTF_E("Command is too large (%d >= %d)\n",
+			count, sizeof(qcb->command) - 1);
+		qcb->rc = -1;
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	/* Copy to a buffer to make a proper C string */
+	memcpy(qcb->command, buf, count);
+	qcb->command[count] = '\0';
+
+	/* Kill '\n' if there is one */
+	p = strrchr(qcb->command, '\n');
+	if (p) {
+		*p = '\0';
+	}
+
+	/* Parse the arguments */
+	argc = qdrv_parse_args(qcb->command, 23, argv);
+
+	/* Make sure we got at least a command */
+	if (argc == 0) {
+		DBGPRINTF_E("No command specified\n");
+		qcb->rc = -2;
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+	if (argc < 0) {
+		DBGPRINTF_E("Too many arguments specified.\n");
+		qcb->rc = -3;
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+	/* Try to match the commands to the input */
+	for (n = 0; found < 0 && n < COMMAND_TABLE_SIZE; n++) {
+		if (strcmp(argv[0], s_command_table[n].command) == 0) {
+			found = n;
+			break;
+		}
+	}
+
+	/* Call the function if we found a match */
+	if (found >= 0) {
+		if ((*s_command_table[found].fn)(dev, argc, argv) < 0) {
+			DBGPRINTF_E("Failed to execute command \"%s\" (%d)\n",
+				s_command_table[found].command, found);
+
+			qcb->rc = -4;
+			DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+			return(-1);
+		}
+	} else {
+		DBGPRINTF_E("Command \"%s\" is not recognized\n",qcb->command);
+
+		qcb->rc = -5;
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+	qcb->rc = 0;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+void qdrv_control_set_show(void (*fn)(struct seq_file *s, void *data, u32 num),
+	void *data, int start_num, int decr)
+{
+	if (data == NULL) {
+		DBGPRINTF_E("qdrv_control_set_show called with NULL address\n" );
+		return;
+	}
+
+	if (down_interruptible(&s_output_sem)) {
+		return;
+	}
+
+	if (s_output_qcb == NULL) {
+		/* Nothing to output */
+		up(&s_output_sem);
+		return;
+	}
+
+	/* Get the number of items to read */
+	s_output_qcb->read_start_num = start_num;
+	s_output_qcb->read_num = start_num;
+	s_output_qcb->read_decr = decr;
+	s_output_qcb->read_data = data;
+	s_output_qcb->read_show = fn;
+
+	up(&s_output_sem);
+
+	return;
+}
+
+static void *qdrv_seq_start(struct seq_file *s, loff_t *pos)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if (down_interruptible(&s_output_sem)) {
+		return NULL;
+	}
+
+	if (s_output_qcb == NULL) {
+		/* Nothing to output */
+		up(&s_output_sem);
+		return NULL;
+	}
+
+	up(&s_output_sem);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	/* Return the number of items to read */
+	if (*pos >= 0 && *pos <= s_output_qcb->read_start_num / s_output_qcb->read_decr) {
+		s_output_qcb->read_num = s_output_qcb->read_start_num -
+					*pos * s_output_qcb->read_decr;
+		return (void*)s_output_qcb->read_num;
+	} else {
+		return NULL;
+	}
+}
+
+static void *qdrv_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if (down_interruptible(&s_output_sem)) {
+		return NULL;
+	}
+
+	if (s_output_qcb == NULL) {
+		/* Nothing to output */
+		up(&s_output_sem);
+		return NULL;
+	}
+
+	(*pos)++;
+	s_output_qcb->read_num -= s_output_qcb->read_decr;
+
+	if (s_output_qcb->read_num <= 0) {
+		/* The iterator is done */
+		up(&s_output_sem);
+		return NULL;
+	}
+
+	up(&s_output_sem);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return (void*)s_output_qcb->read_num;
+}
+
+static void qdrv_seq_stop(struct seq_file *s, void *v)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if (down_interruptible(&s_output_sem)) {
+		return;
+	}
+
+	if (s_output_qcb == NULL) {
+		/* Not valid any more */
+		up(&s_output_sem);
+		return;
+	}
+
+	if (v == NULL)
+		s_output_qcb->read_show = NULL;
+
+	up(&s_output_sem);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return;
+}
+
+static int qdrv_seq_show(struct seq_file *s, void *v)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if (down_interruptible(&s_output_sem)) {
+		return -ERESTARTSYS;
+	}
+
+	if (s_output_qcb == NULL) {
+		/* Nothing to output */
+		up(&s_output_sem);
+		return -1;
+	}
+
+	if (s_output_qcb->read_show != NULL) {
+		(*s_output_qcb->read_show)(s, s_output_qcb->read_data, (u32) v);
+	}
+
+	up(&s_output_sem);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+/*
+ * MU related commands:
+ *    syntax: mu {set | get | dbg | clr} [sub-cmd] [mac_addr] [options]
+ *
+ *    1. set station group id and user_position
+ *       mu set grp {mac_addr} {grp_id} {position}
+ *    2. clear station group id
+ *       mu clr grp {mac_addr} {grp_id} {position}
+ */
+static int qdrv_command_mu(struct device *dev, int argc, char *argv[])
+{
+	int i;
+	qdrv_mu_cmd mu_cmd;
+	uint8_t mac[IEEE80211_ADDR_LEN];
+	uint8_t grp = 0, delete = 0;
+	struct ieee80211_node *ni = NULL, *ni1 = NULL;
+	struct ieee80211com *ic = qdrv_get_ieee80211com(dev);
+	int res = -1;
+
+	if (!ieee80211_swfeat_is_supported(SWFEAT_ID_MU_MIMO, 1))
+		return -1;
+
+	for (i = 0; i < argc; i++) {
+		printk("arg[%d] %s\n", i, argv[i]);
+	}
+
+	/* parse the cmd chains */
+	if (strcmp(argv[1], "set") == 0) {
+		mu_cmd = QDRV_MU_CMD_SET;
+	} else if (strcmp(argv[1], "get") == 0) {
+		mu_cmd = QDRV_MU_CMD_GET;
+	} else if (strcmp(argv[1], "dbg") == 0) {
+		mu_cmd = QDRV_MU_CMD_DBG;
+	} else if (strcmp(argv[1], "clr") == 0) {
+		mu_cmd = QDRV_MU_CMD_CLR;
+		delete = 1;
+	} else if (strcmp(argv[1], "sta0") == 0) {
+		mu_cmd = QDRV_MU_CMD_FIRST_IN_GROUP_SELECTION;
+	} else {
+		goto mu_exit;
+	}
+
+	if (mu_cmd == QDRV_MU_CMD_SET || mu_cmd == QDRV_MU_CMD_GET) {
+		if (qdrv_parse_mac(argv[3], mac) < 0) {
+			printk("Error mac address\n");
+			goto mu_exit;
+		}
+
+		ni = ieee80211_find_node(&ic->ic_sta, mac);
+		if (!ni) {
+			printk("Can't find node\n");
+			goto mu_exit;
+		}
+	} else if (mu_cmd == QDRV_MU_CMD_FIRST_IN_GROUP_SELECTION) {
+		if (qdrv_parse_mac(argv[2], mac) < 0) {
+			printk("Error mac address\n");
+			goto mu_exit;
+		}
+	}
+
+	/* parse subcmd & the parameters */
+	switch (mu_cmd) {
+	case QDRV_MU_CMD_CLR:
+	{
+		if (argc < 4) {
+			goto mu_exit;
+		}
+
+		volatile struct qtn_txbf_mbox *txbf_mbox = qtn_txbf_mbox_get();
+		grp = _atoi(argv[3]);
+		if (!(grp > 0 && grp < ARRAY_SIZE(txbf_mbox->mu_grp_man_rank) + 1)) {
+			printk("Group %u is out of range\n", grp);
+			goto mu_exit;
+		}
+		grp--;
+		txbf_mbox->mu_grp_man_rank[grp].u0_aid = 0;
+		txbf_mbox->mu_grp_man_rank[grp].u1_aid = 0;
+		txbf_mbox->mu_grp_man_rank[grp].rank = 0;
+		break;
+	}
+	case QDRV_MU_CMD_GET:
+		printk("MU grp: "
+			"%02x%02x%02x%02x%02x%02x%02x%02x\n"
+			"MU pos: %02x%02x%02x%02x%02x%02x%02x%02x"
+			"%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			ni->ni_mu_grp.member[7],
+			ni->ni_mu_grp.member[6],
+			ni->ni_mu_grp.member[5],
+			ni->ni_mu_grp.member[4],
+			ni->ni_mu_grp.member[3],
+			ni->ni_mu_grp.member[2],
+			ni->ni_mu_grp.member[1],
+			ni->ni_mu_grp.member[0],
+			ni->ni_mu_grp.pos[15],
+			ni->ni_mu_grp.pos[14],
+			ni->ni_mu_grp.pos[13],
+			ni->ni_mu_grp.pos[12],
+			ni->ni_mu_grp.pos[11],
+			ni->ni_mu_grp.pos[10],
+			ni->ni_mu_grp.pos[9],
+			ni->ni_mu_grp.pos[8],
+			ni->ni_mu_grp.pos[7],
+			ni->ni_mu_grp.pos[6],
+			ni->ni_mu_grp.pos[5],
+			ni->ni_mu_grp.pos[4],
+			ni->ni_mu_grp.pos[3],
+			ni->ni_mu_grp.pos[2],
+			ni->ni_mu_grp.pos[1],
+			ni->ni_mu_grp.pos[0]);
+			break;
+	case QDRV_MU_CMD_SET:
+	{
+		if (argc < 7) {
+			goto mu_exit;
+		}
+		if (qdrv_parse_mac(argv[4], mac) < 0) {
+			printk("Error mac address\n");
+			goto mu_exit;
+		}
+
+		ni1 = ieee80211_find_node(&ic->ic_sta, mac);
+		if (!ni1) {
+			printk("Can't find node\n");
+			goto mu_exit;
+		}
+		int32_t rank = _atoi(argv[6]);
+
+		volatile struct qtn_txbf_mbox *txbf_mbox = qtn_txbf_mbox_get();
+		grp = _atoi(argv[5]);
+		if (!(grp > 0 && grp < ARRAY_SIZE(txbf_mbox->mu_grp_man_rank) + 1)) {
+			printk("Group %u is out of range\n", grp);
+			goto mu_exit;
+		}
+		grp--;
+		txbf_mbox->mu_grp_man_rank[grp].u0_aid= IEEE80211_AID(ni->ni_associd);
+		txbf_mbox->mu_grp_man_rank[grp].u1_aid= IEEE80211_AID(ni1->ni_associd);
+		txbf_mbox->mu_grp_man_rank[grp].rank = rank;
+		break;
+	}
+	case QDRV_MU_CMD_FIRST_IN_GROUP_SELECTION:
+	{
+		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+		if (vap) {
+			ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_FIRST_STA_IN_MU_SOUNDING, 0,
+						mac, sizeof(mac));
+		}
+
+		break;
+	}
+	default:
+		goto mu_exit;
+		break;
+	}
+
+	res = 0;
+mu_exit:
+	if (res == -1) {
+		printk("Usage: mu {set | get | clr | dbg | sta0} [sub-cmd] [mac_addr] [options]\n");
+	}
+
+	if (ni) {
+		ieee80211_free_node(ni);
+	}
+
+	if (ni1) {
+		ieee80211_free_node(ni1);
+	}
+
+	return(0);
+
+}
+
+static struct seq_operations s_qdrv_seq_ops =
+{
+	.start = qdrv_seq_start,
+	.next = qdrv_seq_next,
+	.stop = qdrv_seq_stop,
+	.show = qdrv_seq_show,
+};
+
+static int qdrv_proc_open(struct inode *inode, struct file *file)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(seq_open(file, &s_qdrv_seq_ops));
+}
+
+static struct file_operations s_qdrv_proc_ops =
+{
+	.owner   = THIS_MODULE,
+	.open    = qdrv_proc_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int carrier_id_seq_show(struct seq_file *file, void *v)
+{
+	return read_proc_carrier_id(file);
+}
+
+static int carrier_id_open(struct inode *inode, struct file *file)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return single_open(file, carrier_id_seq_show, NULL);
+}
+
+static struct file_operations s_carrier_id_ops = {
+	.owner	= THIS_MODULE,
+	.open	= carrier_id_open,
+	.read	= seq_read,
+	.llseek	= seq_lseek,
+	.release= seq_release,
+};
+#endif
+
+int qdrv_control_init(struct device *dev)
+{
+	struct qdrv_cb *qcb;
+	struct proc_dir_entry *entry, *carrier_entry;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF_N("qdrv wbsp: %d\n", qdrv_wbsp_ctrl);
+
+	/* Get the private device data */
+	qcb = dev_get_drvdata(dev);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if ((entry = proc_create_data("qdrvdata", S_IFREG | 0666, NULL, &s_qdrv_proc_ops, NULL)) == NULL) {
+		DBGPRINTF_E("Failed to create \"/proc/qdrvdata\"\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		goto err1;
+	}
+#else
+	if ((entry = create_proc_entry("qdrvdata", S_IFREG | 0666, NULL)) == NULL) {
+		DBGPRINTF_E("Failed to create \"/proc/qdrvdata\"\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	sema_init(&s_output_sem, 1);
+#else
+	init_MUTEX(&s_output_sem);
+#endif
+	s_output_qcb = qcb;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	entry->proc_fops = &s_qdrv_proc_ops;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	carrier_entry = proc_create_data("carrier_id", 0, NULL, &s_carrier_id_ops, NULL);
+	if (carrier_entry == NULL) {
+		DBGPRINTF_E("Failed to create \"/proc/carrier_id\"\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		goto err2;
+	}
+#else
+	carrier_entry = create_proc_read_entry("carrier_id", 0, NULL, read_proc_carrier_id, NULL);
+	if (carrier_entry == NULL) {
+		DBGPRINTF_E("Failed to create \"/proc/carrier_id\"\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+#endif
+	spin_lock_init(&qdrv_event_lock);
+	memset(qdrv_event_log_table,0,sizeof(qdrv_event_log_table));
+	QDRV_EVENT("Qdrv Log Init");
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+	return(0);
+
+    err2:
+	remove_proc_entry("qdrvdata", NULL);
+    err1:
+	return -1;
+}
+
+int qdrv_control_exit(struct device *dev)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if (down_interruptible(&s_output_sem)) {
+		return(-ERESTARTSYS);
+	}
+
+	s_output_qcb = NULL;
+
+	up(&s_output_sem);
+
+	remove_proc_entry("qdrvdata", NULL);
+	remove_proc_entry("hw_revision", NULL);
+	remove_proc_entry("carrier_id", NULL);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+	return(0);
+}
diff --git a/drivers/qtn/qdrv/qdrv_control.h b/drivers/qtn/qdrv/qdrv_control.h
new file mode 100644
index 0000000..f29b180
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_control.h
@@ -0,0 +1,112 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_CONTROL_H
+#define _QDRV_CONTROL_H
+
+#include <linux/seq_file.h>
+#include "qdrv_mac.h"
+#include "qdrv_wlan.h"
+
+int qdrv_radar_is_test_mode(void);
+int qdrv_radar_test_mode_csa_en(void);
+
+int qdrv_get_wps_push_button_config( u8 *p_gpio_pin, u8 *p_use_interrupt, u8 *p_active_logic );
+void set_wps_push_button_enabled( void );
+
+void qdrv_control_txbf_pkt_send(void *data, u8 *stvec, u32 bw);
+void qdrv_control_sysmsg_send(void *data, char *sysmsg, u_int32_t len, int send_now);
+void qdrv_control_sysmsg_timer(unsigned long data);
+
+int qdrv_control_init(struct device *dev);
+int qdrv_control_exit(struct device *dev);
+int qdrv_control_output(struct device *dev, char *buf);
+int qdrv_control_input(struct device *dev, char *buf, unsigned int count);
+void qdrv_control_set_show(void (*fn)(struct seq_file *s, void *data, u32 num),
+	void *data, int start_num, int decr);
+int qdrv_command_read_rf_reg(struct device *dev, int offset);
+int qdrv_parse_mac(const char *mac_str, u8 *mac);
+void qdrv_pktlogger_flush_data(struct qdrv_wlan *qw);
+void qdrv_calcmd_set_tx_power(struct device *dev, uint8_t value);
+
+enum
+{
+	GET_MAC_ADDRESS1 = 0,
+	VCO_CALIBRATION,
+	IQ_COMP_CALIBRATION,
+	DC_OFFSET_CALIBRATION,
+	GET_AP_INFO,
+	GET_PHY_INFO,
+	SET_MAC_ADDRESS,
+	GET_VERSION,
+	SET_TXONLY_MODE,
+	SET_AFE_PATTERN = 9,
+	GET_MAC_ADDRESS,
+	GET_CHIP_ID,
+	MAX_CMD
+};
+
+typedef struct qdrv_event {
+	char *str;
+	u32 jiffies;
+	u32 clk;
+	int arg1;
+	int arg2;
+	int arg3;
+	int arg4;
+	int arg5;
+} qdrv_event_t;
+
+#define QDRV_EVENT_LOG_SIZE	1024
+#define QDRV_EVENT_LOG_MASK	(QDRV_EVENT_LOG_SIZE - 1)
+
+enum {
+
+	EXT_TEMPERATURE_SENSOR_REPORT_FLAG = 0x11111111,
+	DISABLE_REPORT_FLAG  = 0x22222222
+};
+
+void qdrv_event_log(char *str, int arg1, int arg2, int arg3, int arg4, int arg5);
+int qdrv_eventf(struct net_device *dev, const char *fmt, ...);
+void qdrv_control_dump_active_hwreg(void);
+
+// convenience functions
+#define QDRV_EVENT(x)			(qdrv_event_log(x,0,0,0,0,0))
+#define QDRV_EVENT_1(x,a)		(qdrv_event_log(x,a,0,0,0,0))
+#define QDRV_EVENT_2(x,a,b)		(qdrv_event_log(x,a,b,0,0,0))
+#define QDRV_EVENT_3(x,a,b,c)		(qdrv_event_log(x,a,b,c,0,0))
+#define QDRV_EVENT_4(x,a,b,c,d)		(qdrv_event_log(x,a,b,c,d,0))
+#define QDRV_EVENT_5(x,a,b,c,d,e)	(qdrv_event_log(x,a,b,c,d,e))
+
+/* MU related cmds */
+typedef enum {
+	QDRV_MU_CMD_SET = 0,
+	QDRV_MU_CMD_GET = 1,
+	QDRV_MU_CMD_DBG = 2,
+	QDRV_MU_CMD_CLR = 3,
+	QDRV_MU_CMD_FIRST_IN_GROUP_SELECTION = 4,
+} qdrv_mu_cmd;
+
+typedef enum {
+	QDRV_MU_SUBMD_GRP = 0,
+} qdrv_mu_subcmd;
+#endif
+
+extern int qdrv_copy_core_dump(void *buf, uint32_t len, uint32_t *len_copied);
diff --git a/drivers/qtn/qdrv/qdrv_debug.h b/drivers/qtn/qdrv/qdrv_debug.h
new file mode 100644
index 0000000..a8337f5
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_debug.h
@@ -0,0 +1,93 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_DEBUG_H
+#define _QDRV_DEBUG_H
+#include <qtn/qtn_debug.h>
+/* Global debug log messsage function mask definition of the moudle qdrv */
+#define QDRV_LF_TRACE				DBG_LF_00
+#define QDRV_LF_BRIDGE				DBG_LF_01
+#define QDRV_LF_RADAR				DBG_LF_02
+#define QDRV_LF_IGMP				DBG_LF_03
+#define QDRV_LF_PKT_TX				DBG_LF_04
+#define QDRV_LF_PKT_RX				DBG_LF_05
+#define QDRV_LF_CALCMD				DBG_LF_06
+#define QDRV_LF_HLINK				DBG_LF_07
+#define QDRV_LF_TXBF				DBG_LF_08
+#define QDRV_LF_WLAN				DBG_LF_09
+#define QDRV_LF_VAP				DBG_LF_10
+#define QDRV_LF_DUMP_RX_PKT			DBG_LF_11
+#define QDRV_LF_DUMP_TX_PKT			DBG_LF_12
+#define	QDRV_LF_DUMP_MGT			DBG_LF_13 /* management frames except for beacon and action */
+#define	QDRV_LF_DUMP_BEACON			DBG_LF_14 /* beacon frame */
+#define	QDRV_LF_DUMP_ACTION			DBG_LF_15 /* action frame */
+#define	QDRV_LF_DUMP_DATA			DBG_LF_16 /* data frame */
+#define QDRV_LF_DFS_QUICKTIMER			DBG_LF_17
+#define QDRV_LF_DFS_DONTCAREDOTH		DBG_LF_18
+#define QDRV_LF_DFS_TESTMODE			DBG_LF_19
+#define QDRV_LF_DFS_DISALLOWRADARDETECT		DBG_LF_20
+#define QDRV_LF_QCTRL				DBG_LF_21 /* qdrv control */
+#define QDRV_LF_CMM				DBG_LF_22
+#define QDRV_LF_DSP				DBG_LF_23
+#define QDRV_LF_AUC				DBG_LF_24
+#define QDRV_LF_VSP				DBG_LF_24
+#define QDRV_LF_ALL				DBG_LF_ALL
+#define DBG_LM					DBG_LM_QDRV
+
+extern unsigned int g_dbg_dump_pkt_len;
+
+#define	IS_BEACON(wh) \
+    (((wh)->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
+	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
+
+#define	IS_ACTION(wh) \
+    (((wh)->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
+	 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_ACTION))
+
+#define	IS_MGT(wh) \
+    (((wh)->i_fc[0] & (IEEE80211_FC0_TYPE_MASK)) == (IEEE80211_FC0_TYPE_MGT))
+
+#define	IS_DATA(wh) \
+    (((wh)->i_fc[0] & (IEEE80211_FC0_TYPE_MASK)) == (IEEE80211_FC0_TYPE_DATA))
+
+#define	IFF_DUMPPKTS_RECV(wh, f)								\
+	(((DBG_LOG_FUNC_TEST(QDRV_LF_PKT_RX)) &&						\
+	((DBG_LOG_LEVEL >= (DBG_LL_TRIAL)) ||							\
+	((!IS_BEACON(wh)) && (IS_MGT(wh)) && DBG_LOG_LEVEL >= (DBG_LL_INFO)))) ||		\
+	((f & QDRV_LF_DUMP_RX_PKT) &&								\
+	(((IS_MGT(wh)) &&									\
+	(((f & QDRV_LF_DUMP_MGT) && !IS_BEACON(wh) && !IS_ACTION(wh))				\
+	|| ((f & QDRV_LF_DUMP_BEACON) && (IS_BEACON(wh)))					\
+	|| ((f & QDRV_LF_DUMP_ACTION) && (IS_ACTION(wh)))))					\
+	|| ((f & QDRV_LF_DUMP_DATA) && IS_DATA(wh)))))
+
+#define	IFF_DUMPPKTS_XMIT_MGT(wh, f)								\
+	(((DBG_LOG_FUNC_TEST(QDRV_LF_PKT_TX)) &&						\
+	(DBG_LOG_LEVEL >= (DBG_LL_INFO))) ||							\
+	((f & QDRV_LF_DUMP_TX_PKT) &&								\
+	(((f & QDRV_LF_DUMP_MGT) && !(IS_ACTION(wh)))						\
+	|| ((f & QDRV_LF_DUMP_ACTION) && (IS_ACTION(wh))))))
+
+#define	IFF_DUMPPKTS_XMIT_DATA(f)								\
+	(((DBG_LOG_FUNC_TEST(QDRV_LF_PKT_TX)) &&						\
+	(DBG_LOG_LEVEL >= (DBG_LL_INFO))) ||							\
+	((f & QDRV_LF_DUMP_TX_PKT) &&								\
+	(f & QDRV_LF_DUMP_DATA)))
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_dsp.c b/drivers/qtn/qdrv/qdrv_dsp.c
new file mode 100644
index 0000000..9d4dac4
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_dsp.c
@@ -0,0 +1,69 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <asm/io.h>
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_dsp.h"
+#include "qdrv_hal.h"
+#include "qdrv_fw.h"
+#include <qtn/registers.h>
+
+int qdrv_dsp_init(struct qdrv_cb *qcb)
+{
+	u32 dsp_start_addr = 0;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter");
+
+	if (qdrv_fw_load_dsp(qcb->dev, qcb->dsp_firmware, &dsp_start_addr) < 0) {
+		DBGPRINTF_E("dsp load firmware failed\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_DSP, "Firmware start address is %x\n", dsp_start_addr);
+
+	hal_dsp_start(dsp_start_addr);
+	hal_enable_dsp();
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+int qdrv_dsp_exit(struct qdrv_cb *qcb)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter");
+
+	hal_disable_dsp();
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
diff --git a/drivers/qtn/qdrv/qdrv_dsp.h b/drivers/qtn/qdrv/qdrv_dsp.h
new file mode 100644
index 0000000..319f26e
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_dsp.h
@@ -0,0 +1,27 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_DSP_H
+#define _QDRV_DSP_H
+
+int qdrv_dsp_init(struct qdrv_cb *qcb);
+int qdrv_dsp_exit(struct qdrv_cb *qcb);
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_features.h b/drivers/qtn/qdrv/qdrv_features.h
new file mode 100644
index 0000000..dc2b6cc
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_features.h
@@ -0,0 +1,37 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_FEATURES_H
+#define _QDRV_FEATURES_H
+
+#define QDRV_FEATURE_HT
+#define QDRV_FEATURE_VHT
+#define QDRV_FEATURE_DEMO
+#undef QDRV_FEATURE_IGMP_SNOOP
+
+/* Stop the MuC on detecting bad stat condition */
+#if 0
+#define QDRV_FEATURE_KILL_MUC
+#endif
+
+/* For external use to query flash size */
+size_t get_flash_size(void);
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_fw.c b/drivers/qtn/qdrv/qdrv_fw.c
new file mode 100644
index 0000000..db6f6b9
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_fw.c
@@ -0,0 +1,278 @@
+/**
+  Copyright (c) 2015 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/elf.h>
+#include <asm/io.h>
+#include <asm/hardware.h>
+#include <qtn/qtn_fw_info.h>
+#include "qdrv_debug.h"
+#include "qdrv_fw.h"
+
+static int
+auc_is_ccm_addr(unsigned long addr)
+{
+	return
+		__in_mem_range(addr, TOPAZ_AUC_IMEM_ADDR, TOPAZ_AUC_IMEM_SIZE) ||
+		__in_mem_range(addr, TOPAZ_AUC_DMEM_ADDR, TOPAZ_AUC_DMEM_SIZE);
+}
+
+static void
+auc_write_ccm_uint8(void *dst, uint8_t val)
+{
+	unsigned long addr = (unsigned long)dst;
+	unsigned long addr_align = addr & ~0x3;
+	unsigned long val_shift = (addr & 0x3) * BITS_PER_BYTE;
+	unsigned mem_val = readl(addr_align);
+
+	mem_val = mem_val & ~(0xFF << val_shift);
+	mem_val = mem_val | (val << val_shift);
+
+	writel(mem_val, addr_align);
+}
+
+static void auc_memzero_ccm(void *dst, unsigned long size)
+{
+	char *dst_c = (char *)dst;
+
+	while (size > 0) {
+		auc_write_ccm_uint8(dst_c, 0);
+		--size;
+		++dst_c;
+	}
+}
+
+static void auc_memcpy_ccm(void *dst, const void *src, unsigned long size)
+{
+	char *dst_c = (char *)dst;
+	const char *src_c = (const char *)src;
+
+	while (size > 0) {
+		auc_write_ccm_uint8(dst_c, readb(src_c));
+		--size;
+		++dst_c;
+		++src_c;
+	}
+}
+
+void qdrv_fw_auc_memzero(void *dst, unsigned long size, unsigned long dst_phys_addr)
+{
+	if (auc_is_ccm_addr(dst_phys_addr)) {
+		auc_memzero_ccm(dst, size);
+	} else {
+		memset(dst, 0, size);
+	}
+}
+
+static
+void qdrv_fw_auc_memcpy(void *dst, const void *src, unsigned long size, unsigned long dst_phys_addr)
+{
+	if (auc_is_ccm_addr(dst_phys_addr)) {
+		auc_memcpy_ccm(dst, src, size);
+	} else {
+		memcpy(dst, src, size);
+	}
+}
+
+unsigned long
+qdrv_fw_auc_to_host_addr(unsigned long auc_addr)
+{
+	void *ret = bus_to_virt(auc_addr);
+	if (RUBY_BAD_VIRT_ADDR == ret) {
+		panic("Converting out of range AuC address 0x%lx to host address\n", auc_addr);
+	}
+	return virt_to_phys(ret);
+}
+
+static unsigned long
+qdrv_fw_muc_to_host_addr(unsigned long muc_addr)
+{
+	void *ret = (void*)muc_to_lhost(muc_addr);
+	if (RUBY_BAD_VIRT_ADDR == ret) {
+		panic("Converting out of range MuC address 0x%lx to host address\n", muc_addr);
+	}
+	return virt_to_phys(ret);
+}
+
+static unsigned long
+qdrv_fw_dsp_to_host_addr(unsigned long dsp_addr)
+{
+	void *ret = bus_to_virt(dsp_addr);
+	if (RUBY_BAD_VIRT_ADDR == ret) {
+		panic("Converting out of range DSP address 0x%lx to host address\n", dsp_addr);
+	}
+	return virt_to_phys(ret);
+}
+
+enum qdrv_fw_cpu {
+	qdrv_fw_muc,
+	qdrv_fw_auc,
+	qdrv_fw_dsp
+};
+
+static const char *qdrv_fw_cpu_str[] = {
+		[qdrv_fw_muc] = "MuC",
+		[qdrv_fw_auc] = "AuC",
+		[qdrv_fw_dsp] = "DSP"
+};
+
+static void qdrv_fw_install_segment(const Elf32_Phdr *phdr,
+		const char *data,
+		enum qdrv_fw_cpu cpu)
+{
+	uint8_t *vaddr;
+	unsigned long paddr;
+
+	if (cpu == qdrv_fw_dsp) {
+		/* Skip blocks for DSP X/Y memory */
+		if ((phdr->p_vaddr >= RUBY_DSP_XYMEM_BEGIN) &&
+				(phdr->p_vaddr <= RUBY_DSP_XYMEM_END)) {
+			return;
+		}
+		paddr = phdr->p_vaddr;
+	} else {
+		paddr = phdr->p_paddr;
+		if (!paddr) {
+			paddr = phdr->p_vaddr;
+		}
+	}
+
+	switch (cpu) {
+	case qdrv_fw_muc:
+		paddr = qdrv_fw_muc_to_host_addr(paddr);
+		break;
+	case qdrv_fw_auc:
+		paddr = qdrv_fw_auc_to_host_addr(paddr);
+		break;
+	case qdrv_fw_dsp:
+		paddr = qdrv_fw_dsp_to_host_addr(paddr);
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE, "ELF header p_vaddr=%p p_paddr=%p, "
+				 "remapping to 0x%lx filesz %d memsz %d\n",
+			(void*)phdr->p_vaddr, (void*)phdr->p_paddr,
+			paddr, phdr->p_filesz, phdr->p_memsz);
+
+	vaddr = ioremap_nocache(paddr, phdr->p_memsz);
+
+	/* Copy data and clear BSS */
+	if (cpu == qdrv_fw_auc) {
+		qdrv_fw_auc_memcpy(vaddr, data, phdr->p_filesz, paddr);
+		qdrv_fw_auc_memzero(vaddr + phdr->p_filesz, phdr->p_memsz - phdr->p_filesz, paddr);
+	} else {
+		memcpy(vaddr, data, phdr->p_filesz);
+		memset(vaddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
+	}
+
+	iounmap(vaddr);
+}
+
+static int qdrv_fw_install(char *data, uint32_t *start_addr, enum qdrv_fw_cpu cpu)
+{
+	Elf32_Ehdr *ehdr;
+	Elf32_Phdr *phdr;
+	int match = 0;
+	int i;
+
+	ehdr = (Elf32_Ehdr *) data;
+	data += sizeof(Elf32_Ehdr);
+
+	phdr = (Elf32_Phdr *) data;
+	data += ehdr->e_phnum * sizeof(Elf32_Phdr);
+
+	*start_addr = (uint32_t)ehdr->e_entry;
+
+	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+		if (FW_INFO_SEGMENT_FOUND(phdr->p_vaddr, phdr->p_filesz, data)) {
+			FW_INFO_CHECK_DATA((struct qtn_fw_info *)data, match, DBGPRINTF_E);
+		} else {
+			qdrv_fw_install_segment(phdr, data, cpu);
+		}
+		data += phdr->p_filesz;
+	}
+
+	if (!match) {
+		DBGPRINTF_E("\"%s\" firmware version check failed\n", \
+				qdrv_fw_cpu_str[cpu]);
+		return -1;
+	}
+
+	return i;
+}
+
+static
+int qdrv_fw_load(struct device *dev,
+		char *firmware,
+		uint32_t *start_addr,
+		enum qdrv_fw_cpu cpu)
+{
+	const struct firmware *fw;
+	int ret;
+
+	if (request_firmware(&fw, firmware, dev) < 0) {
+		DBGPRINTF_E("Failed to load %s firmware \"%s\"\n",
+				qdrv_fw_cpu_str[cpu], firmware);
+		return -1;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE, "Firmware size is %d\n", fw->size);
+
+	ret = qdrv_fw_install((char *)fw->data, start_addr, cpu);
+	if (ret <= 0) {
+		DBGPRINTF_E("Failed to install %s firmware \"%s\"\n",
+				qdrv_fw_cpu_str[cpu], firmware);
+	}
+
+	release_firmware(fw);
+
+	return ret;
+}
+
+int qdrv_fw_load_muc(struct device *dev,
+		char *firmware,
+		uint32_t *start_addr)
+{
+	return qdrv_fw_load(dev, firmware, start_addr, qdrv_fw_muc);
+}
+
+int qdrv_fw_load_auc(struct device *dev,
+		char *firmware,
+		uint32_t *start_addr)
+{
+	return qdrv_fw_load(dev, firmware, start_addr, qdrv_fw_auc);
+}
+
+int qdrv_fw_load_dsp(struct device *dev,
+		char *firmware,
+		uint32_t *start_addr)
+{
+	return qdrv_fw_load(dev, firmware, start_addr, qdrv_fw_dsp);
+}
diff --git a/drivers/qtn/qdrv/qdrv_fw.h b/drivers/qtn/qdrv/qdrv_fw.h
new file mode 100644
index 0000000..55c1efd
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_fw.h
@@ -0,0 +1,41 @@
+/**
+  Copyright (c) 2015 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_FW_H
+#define _QDRV_FW_H
+
+unsigned long
+qdrv_fw_auc_to_host_addr(unsigned long auc_addr);
+
+void qdrv_fw_auc_memzero(void *dst, unsigned long size, unsigned long dst_phys_addr);
+
+int qdrv_fw_load_muc(struct device *dev,
+		char *firmware,
+		uint32_t *start_addr);
+
+int qdrv_fw_load_auc(struct device *dev,
+		char *firmware,
+		uint32_t *start_addr);
+
+int qdrv_fw_load_dsp(struct device *dev,
+		char *firmware,
+		uint32_t *start_addr);
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_hal.c b/drivers/qtn/qdrv/qdrv_hal.c
new file mode 100644
index 0000000..1ecfc3c
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_hal.c
@@ -0,0 +1,304 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <asm/hardware.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <linux/gpio.h>
+#else
+#include <asm/gpio.h>
+#endif
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_hal.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include <qtn/registers.h>
+#include <qtn/shared_params.h>
+#include <qtn/txbf_mbox.h>
+#include <qtn/qtn_bb_mutex.h>
+#include <common/topaz_reset.h>
+
+/* unaligned little endian access */
+#define LE_READ_4(p)	((u_int32_t)						\
+				((((const u_int8_t *)(p))[0]) |			\
+					(((const u_int8_t *)(p))[1] <<  8) |	\
+					(((const u_int8_t *)(p))[2] << 16) |	\
+					(((const u_int8_t *)(p))[3] << 24)))
+
+void hal_get_tsf(uint64_t *ret)
+{
+	uint64_t tsf64;
+	uint32_t *tsf = (uint32_t *) &tsf64;
+	uint32_t temp_tsf1;
+
+	qtn_bb_mutex_enter(QTN_LHOST_SOC_CPU);
+
+	temp_tsf1 = readl(HAL_REG(HAL_REG_TSF_HI));
+	tsf[0] = readl(HAL_REG(HAL_REG_TSF_LO));
+	tsf[1] = readl(HAL_REG(HAL_REG_TSF_HI));
+	if (temp_tsf1 != tsf[1]) /* handling wrap-around case. */
+		tsf[0] = readl(HAL_REG(HAL_REG_TSF_LO));
+
+	qtn_bb_mutex_leave(QTN_LHOST_SOC_CPU);
+
+	*ret = tsf64;
+}
+
+void hal_reset(void)
+{
+	u32 reset;
+
+#ifdef CONFIG_ARC
+	reset = RUBY_SYS_CTL_RESET_NETSS | RUBY_SYS_CTL_RESET_MAC | RUBY_SYS_CTL_RESET_BB;
+
+	/* Reset MAC HW */
+	writel(reset, SYS_RESET_VECTOR_MASK);
+	writel(reset, SYS_RESET_VECTOR);
+	udelay(50);
+	writel(0, SYS_RESET_VECTOR_MASK);
+	/*
+	 * After MAC reset the Rx path is enabled, disabling it here.
+	 * Note: This may need to be revisited for Mu-MIMO, or QAC3.
+	 */
+	writel(0, HAL_REG(HAL_REG_RX_CSR));
+# if !TOPAZ_FPGA_PLATFORM
+	/*
+	 * Special programming to turn off the power amplifiers
+	 * immediately after bringing the baseband out of reset.
+	 *
+	 * Baseband has to be put into Soft Reset for this operation to work.
+	 */
+	writel(0x04, QT3_BB_GLBL_SOFT_RST);
+	writel(0x0bb5, QT3_BB_TD_PA_CONF);
+	writel(0x00, QT3_BB_GLBL_SOFT_RST);
+	/*
+	 * Bring the RFIC out of reset by first driving GPIO 15 (RFIC RESET) to be low
+	 * for 10 usec, and then driving it high.
+	 */
+	if (gpio_request(RUBY_GPIO_RFIC_RESET, "rfic_reset") < 0)
+		printk(KERN_ERR "Failed to request GPIO%d for GPIO rfic_reset\n",
+				RUBY_GPIO_RFIC_RESET);
+
+	gpio_direction_output(RUBY_GPIO_RFIC_RESET, 0);
+	udelay(10);
+	gpio_set_value(RUBY_GPIO_RFIC_RESET, 1);
+	gpio_free(RUBY_GPIO_RFIC_RESET);
+# endif /* !TOPAZ_FPGA_PLATFORM */
+#else
+	/* BBIC2 - non-ARC processor */
+	reset = DSPSS_RESET | NETSS_RESET | MMC_RESET | BB_RESET | MB_RESET | SRAM_RESET | BB_RESET;
+	/* Reset MAC HW */
+	writel(reset, SYS_RESET_VECTOR_MASK);
+	writel(reset, SYS_RESET_VECTOR);
+	udelay(50);
+	/* fix soft-reset polarity issue BBIC2 */
+	writel(0x2030, BB_PREG_SPARE_0(0));
+#endif /* CONFIG_ARC */
+}
+
+void hal_enable_muc(u32 muc_start_addr)
+{
+	const unsigned long reset = RUBY_SYS_CTL_RESET_MUC_ALL;
+#ifdef FIXME_NOW
+	volatile u32 value;
+#endif
+
+#ifdef CONFIG_ARC
+	/* Check that we can start this address */
+	if (muc_start_addr & ((1 << RUBY_SYS_CTL_MUC_REMAP_SHIFT) - 1)) {
+		panic("MuC address 0x%x cannot be used as entry point\n", (unsigned)muc_start_addr);
+	}
+	/* Tells MuC its boot address */
+	writel(RUBY_SYS_CTL_MUC_REMAP_VAL(muc_start_addr), RUBY_SYS_CTL_MUC_REMAP);
+	/* Take MUC out of reset */
+	topaz_set_reset_vec(1, reset);
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE, "Reset MuC and enabled MuC boot remap %08X\n", readl(RUBY_SYS_CTL_MUC_REMAP));
+#else
+	/* Take MUC out of reset */
+	*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR_MASK) = MUC_RESET;
+	*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR) = MUC_RESET;
+#endif //CONFIG_ARC
+
+#ifdef FIXME_NOW
+	/* Set bit 15 for DGPIO enable */
+	value = *(volatile u32 *)IO_ADDRESS(SYS_CONTROL_MASK);
+	*(volatile u32 *)IO_ADDRESS(SYS_CONTROL_MASK) = value | DSP_MASTER_GPIO_ENABLE;
+
+	value = *(volatile u32 *) IO_ADDRESS(SYS_CONTROL_REG);
+	*(volatile u32 *)IO_ADDRESS(SYS_CONTROL_REG) = value | DSP_MASTER_GPIO_ENABLE;
+#endif
+}
+
+void hal_disable_muc(void)
+{
+#ifdef CONFIG_ARC
+	topaz_set_reset_vec(0, RUBY_SYS_CTL_RESET_MUC_ALL);
+#else
+	*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR_MASK) = MUC_RESET;
+	*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR) = 0;
+#endif
+}
+EXPORT_SYMBOL(hal_disable_muc);
+
+void hal_enable_mbx(void)
+{
+	*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR_MASK) = MB_RESET;
+	*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR) = MB_RESET;
+}
+
+void hal_disable_mbx(void)
+{
+	/* Clear the mailboxes by cycling them */
+	*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR_MASK) = MB_RESET;
+	*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR) = 0;
+}
+
+void hal_disable_dsp(void)
+{
+#ifdef CONFIG_ARC
+# if 0
+# error writing to this hangs the bus
+	const unsigned long reset = RUBY_SYS_CTL_RESET_DSP_ALL;
+
+	topaz_set_reset_vec(0, reset);
+# endif
+#else
+	/* Hold the DSP in reset */
+	*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR_MASK) = DSP_RESET;
+	*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR) = 0;
+#endif
+}
+
+void hal_enable_dsp(void)
+{
+#ifdef CONFIG_ARC
+	const unsigned long reset = RUBY_SYS_CTL_RESET_DSP_ALL;
+
+	qtn_txbf_lhost_init();
+
+	topaz_set_reset_vec(1, reset);
+#else
+	/* Bring the DSP out of reset */
+	*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR_MASK) = DSP_RESET;
+	*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR) = DSP_RESET;
+#endif
+}
+
+void hal_enable_gpio(void)
+{
+	u32 value;
+
+	/* Set bit 15 for DGPIO enable */
+	value = *(volatile u32 *)IO_ADDRESS(SYS_CONTROL_MASK);
+	*(volatile u32 *)IO_ADDRESS(SYS_CONTROL_MASK) =
+		value | DSP_MASTER_GPIO_ENABLE;
+
+	value = *(volatile u32 *)IO_ADDRESS(SYS_CONTROL_REG);
+	*(volatile u32 *)IO_ADDRESS(SYS_CONTROL_REG) =
+		value | DSP_MASTER_GPIO_ENABLE;
+}
+
+#define DSP_JUMP_INSTR_SWAP	0x0F802020
+
+void hal_dsp_start(u32 dsp_start_addr)
+{
+#ifdef CONFIG_ARC
+	/* Check that we can start this address */
+	if (dsp_start_addr & ((1 << RUBY_SYS_CTL_DSP_REMAP_SHIFT) - 1)) {
+		panic("DSP address 0x%x cannot be used as entry point\n", (unsigned)dsp_start_addr);
+	}
+	/* Tells DSP from which address start execution */
+	writel(RUBY_SYS_CTL_DSP_REMAP_VAL(dsp_start_addr), RUBY_SYS_CTL_DSP_REMAP);
+#else
+	/* Swap upper and lower half words for DSP instruction */
+	dsp_start_addr = ((dsp_start_addr >> 16) & 0xFFFF) | (dsp_start_addr << 16);
+
+	/* Push the jump instr and location into the mbx */
+	*(volatile u32*)IO_ADDRESS(UMS_REGS_MB + UMS_MBX_DSP_PUSH)
+		= DSP_JUMP_INSTR_SWAP;
+	*(volatile u32*)IO_ADDRESS(UMS_REGS_MB + UMS_MBX_DSP_PUSH)
+		= dsp_start_addr;
+#endif
+}
+
+void hal_disable_auc(void)
+{
+}
+
+void hal_enable_auc(void)
+{
+	const unsigned long reset = TOPAZ_SYS_CTL_RESET_AUC;
+
+	topaz_set_reset_vec(1, reset);
+}
+
+int hal_range_check_sram_addr(void *addr)
+{
+	/*
+	* FIXME!!!
+	*
+	* On Ruby platform MuC firmware can use several sections mapped to different
+	* (not joined) address ranges. So there are no simple way to detect whether address
+	* is valid or not (check should determine whether address belongs to any of these
+	* sections or not).
+	* And for sure this function should not have "sram" in its name.
+	* For Ruby not only SRAM is used for MuC!
+	* For now let's always return success.
+	*/
+	return(0);
+}
+
+
+
+void hal_rf_enable()
+{
+	volatile u32 *addr;
+
+	/* set bit 15 for DGPIO enable */
+	addr = (volatile u32 *)IO_ADDRESS(SYS_CONTROL_MASK);
+	*addr |= DSP_MASTER_GPIO_ENABLE;
+
+	addr = (volatile u32 *)IO_ADDRESS(SYS_CONTROL_REG);
+	*addr |= DSP_MASTER_GPIO_ENABLE;
+
+	addr = (volatile u32 *)IO_ADDRESS(RUBY_GPIO_REGS_ADDR + GPIO_MODE1);
+	*addr |= GPIO_MODE_OUTPUT << (15 << 1);
+
+	addr = (volatile u32 *)IO_ADDRESS(RUBY_GPIO_REGS_ADDR + GPIO_MODE2);
+	*addr |= (GPIO_MODE_OUTPUT << (9 << 1)) | (GPIO_MODE_OUTPUT << (13 << 1));
+
+	addr = (volatile u32 *)IO_ADDRESS(RUBY_GPIO_REGS_ADDR + GPIO_OUTPUT_MASK);
+	*addr |= (1 << 15) | (1 << 25);
+
+	addr = (volatile u32 *)IO_ADDRESS(RUBY_GPIO_REGS_ADDR + GPIO_OUTPUT);
+	*addr |= (1 << 15) | (1 << 25);
+
+	addr = (volatile u32 *)IO_ADDRESS(RUBY_GPIO_REGS_ADDR + GPIO_ALTFN);
+	*addr |= (1 << 29);
+}
+
diff --git a/drivers/qtn/qdrv/qdrv_hal.h b/drivers/qtn/qdrv/qdrv_hal.h
new file mode 100644
index 0000000..9dd2511
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_hal.h
@@ -0,0 +1,47 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_HAL_H
+#define _QDRV_HAL_H
+
+/* FIXME copied from qh_reg.h - hw dependent */
+#define HAL_REG_OFFSET(_i)              0xE5050000+(_i)*0x10000
+#define HAL_REG(_reg)                   (HAL_REG_OFFSET(0) + (_reg))
+
+#define HAL_REG_TSF_LO			0x3014
+#define HAL_REG_TSF_HI			0x3018
+#define HAL_REG_RX_CSR                  0x2000          /* Rx CSR and Rx filter */
+
+void hal_reset(void);
+void hal_enable_muc(u32 muc_start_addr);
+void hal_disable_muc(void);
+void hal_disable_mbx(void);
+void hal_enable_mbx(void);
+void hal_disable_dsp(void);
+void hal_enable_dsp(void);
+void hal_enable_gpio(void);
+void hal_dsp_start(u32 dsp_start_addr);
+void hal_disable_auc(void);
+void hal_enable_auc(void);
+int hal_range_check_sram_addr(void *addr);
+void hal_rf_enable(void);
+void hal_get_tsf(uint64_t *tsf);
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_hostlink.c b/drivers/qtn/qdrv/qdrv_hostlink.c
new file mode 100644
index 0000000..decb073
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_hostlink.c
@@ -0,0 +1,1203 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_comm.h"
+#include "qdrv_wlan.h"
+#include "qdrv_vap.h"
+#include <qtn/qtn_global.h>
+
+#ifdef MTEST
+#include "../mtest/mtest.h"
+#endif
+
+static void dump_hring(struct qdrv_wlan *qw)
+{
+	int i = 0;
+	struct host_ioctl *ioctl;
+	struct host_ioctl *ioctl_phys;
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_HLINK,
+			"HLINK State Write %d Tosend %d Read %d First %p Last %p Mbx %p\n",
+			qw->tx_if.hl_write, qw->tx_if.hl_tosend, qw->tx_if.hl_read,
+			qw->tx_if.hl_first, qw->tx_if.hl_last,&qw->tx_if.tx_mbox[0]);
+	for (i = 0; i < QNET_HLRING_ENTRIES; i++) {
+		ioctl = &qw->tx_if.hl_ring[i];
+		ioctl_phys = &(((struct host_ioctl *) qw->tx_if.hl_ring_dma)[qw->tx_if.hl_tosend]);
+
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_HLINK,
+				"I[%d] IO(V):%p IO(P):%p ARGP:%p COMM:%d STAT:%08X RC:%08X\n", i,
+				ioctl, ioctl_phys,(void *)ioctl->ioctl_argp,
+				ioctl->ioctl_command,
+				ioctl->ioctl_status,
+				ioctl->ioctl_rc);
+	}
+}
+
+static struct host_ioctl *qdrv_alloc_ioctl(struct qdrv_wlan *qw)
+{
+	int indx;
+	struct host_ioctl *ioctl;
+	unsigned long flags;
+
+#ifdef QDRV_FEATURE_KILL_MUC
+	if (qw->flags_ext & QDRV_WLAN_MUC_KILLED) {
+		return NULL;
+	}
+#endif
+	spin_lock_irqsave(&qw->tx_if.hl_flowlock, flags);
+
+	/* Search for an empty IOCTL */
+	for (indx=0; indx < QNET_HLRING_ENTRIES; indx++) {
+		ioctl = &qw->tx_if.hl_ring[indx];
+		if (ioctl->ioctl_status == QTN_HLINK_STATUS_AVAIL)
+			break;
+	}
+
+	if (indx == QNET_HLRING_ENTRIES) {
+		DBGPRINTF_E("Hostlink buffer not available\n");
+
+		if (DBG_LOG_FUNC_TEST(QDRV_LF_HLINK)) {
+			dump_hring(qw);
+		}
+
+		spin_unlock_irqrestore(&qw->tx_if.hl_flowlock, flags);
+		return (NULL);
+	}
+
+	ioctl->ioctl_status = 0;
+
+	spin_unlock_irqrestore(&qw->tx_if.hl_flowlock, flags);
+
+	memset(ioctl, 0, sizeof(*ioctl));
+	ioctl->ioctl_dev = qw->unit;
+
+	return (ioctl);
+}
+
+static void qdrv_free_ioctl(struct host_ioctl *ioctl)
+{
+	if (ioctl == NULL) {
+		return;
+	}
+
+	ioctl->ioctl_status = QTN_HLINK_STATUS_AVAIL;
+}
+
+#define QDRV_IOCTL_EVNT_TIMEOUT(cond, wait_time, msg, status, proc_context)	\
+do {										\
+	unsigned long start_time = jiffies;					\
+	u32 timeout = 0;							\
+	u32 dly_cnt = 0;							\
+	u32 irq_to = wait_time*100000/HZ;					\
+	for (; !timeout && cond;) {						\
+		if (proc_context) {						\
+			msleep(1);						\
+		}								\
+		if (in_irq()) {							\
+			udelay(10);						\
+			timeout = (++dly_cnt) > irq_to;				\
+		} else {							\
+			timeout = time_after(jiffies, start_time + wait_time);	\
+		}								\
+	}									\
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_HLINK,					\
+		"HLINK MSG: %s %s after %lu jiffies\n",				\
+		msg, timeout ? "timed out" : "accepted", jiffies - start_time);	\
+	status = timeout && cond;						\
+} while (0);
+
+
+#define QDRV_IOCTL_RET_TIMEOUT		(-1)
+#define QDRV_IOCTL_HARD_IRQ_PM_WAIT_TIME	30
+#define QDRV_IOCTL_HARD_IRQ_WAIT_TIME		(HZ / 20)
+#define QDRV_IOCTL_ISR_WAIT_TIME		(HZ / 2)
+#define QDRV_IOCTL_PROC_WAIT_TIME		(HZ * 10)
+
+static int qdrv_send_ioctl(struct qdrv_wlan *qw, struct host_ioctl *ioctl)
+{
+
+#ifdef MTEST
+	return 0;
+#else
+	volatile u32 *mbox = &qw->tx_if.tx_mbox[0];
+	struct qdrv_mac *mac = qw->mac;
+	struct ieee80211com *ic = &qw->ic;
+	int rc;
+	int in_isr = 0;
+	int wait_time;
+	unsigned long flags;
+	char *desc;
+
+
+	/* This is a purely blocking IOCTL. No real ring. */
+
+	KASSERT(ioctl, (DBGEFMT "PASSED NULL IOCTL IN HOSTLINK SEND", DBGARG));
+
+	in_isr = in_interrupt();
+	if (in_irq()) {
+		wait_time = QDRV_IOCTL_HARD_IRQ_WAIT_TIME;
+		if (ic->ic_pm_enabled)
+			wait_time = QDRV_IOCTL_HARD_IRQ_PM_WAIT_TIME;
+	} else if (in_isr) {
+		wait_time = QDRV_IOCTL_ISR_WAIT_TIME;
+	} else {
+		wait_time = QDRV_IOCTL_PROC_WAIT_TIME;
+	}
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_HLINK,
+			"HLINK MSG: %d Dev %d args %08X %08X called in %s context\n",
+			ioctl->ioctl_command, ioctl->ioctl_dev, ioctl->ioctl_arg1, ioctl->ioctl_arg2,
+			in_isr ? "interrupt" : "process");
+
+	if (mac->dead) {
+		static int count_msg = 0;
+		#define QDRV_MAX_DEAD_MSG 25
+		if ((count_msg++) <= QDRV_MAX_DEAD_MSG) {
+			DBGPRINTF_E("(%d)Dropping IOCTL %p due to dead MAC: %d dev %d args %08X %08X called in %s context\n",
+					count_msg, ioctl, ioctl->ioctl_command, ioctl->ioctl_dev, ioctl->ioctl_arg1,
+					ioctl->ioctl_arg2, in_isr ? "interrupt" : "process");
+			if (count_msg == QDRV_MAX_DEAD_MSG) {
+				DBGPRINTF_E("Restricting dead IOCTL messages\n");
+			}
+		}
+		rc = 1;
+		qdrv_free_ioctl(ioctl);
+		return (rc);
+	}
+
+	/* IOCTLs can be sent from non-sleep context. So we are forced to busy
+	 * wait. MuC treats IOCTL msgs as highest prio task
+	 */
+	desc = "waiting for empty mbox";
+	QDRV_IOCTL_EVNT_TIMEOUT(*mbox, wait_time, desc, rc, !in_isr);
+	if (rc) {
+		goto hlink_timeout;
+	}
+
+	/* We should check again if mbx empty. NON preempt kernel we should be ok */
+	spin_lock_irqsave(&qw->flowlock, flags);
+
+	/*
+	 * Push msg into mbx
+	 * - the current msg is offset into the dma region by ioctl index
+	 */
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE | QDRV_LF_HLINK,
+			"MBOX %p\n", mbox);
+	*mbox = (u32)((ioctl - qw->tx_if.hl_ring) + (struct host_ioctl *)qw->tx_if.hl_ring_dma);
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE | QDRV_LF_HLINK,
+			"set MBOX %p\n", mbox);
+
+	spin_unlock_irqrestore(&qw->flowlock, flags);
+	/* Interrupt Muc */
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE | QDRV_LF_HLINK,
+			"Interrupting MuC %p\n", qw->mac);
+	qdrv_mac_interrupt_muc(qw->mac);
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE | QDRV_LF_HLINK,
+			"Interrupted MuC %p\n", qw->mac);
+
+	desc = "waiting for MuC dequeue";
+	QDRV_IOCTL_EVNT_TIMEOUT(*mbox, wait_time, desc, rc, !in_isr);
+	if (rc) {
+		goto hlink_timeout;
+	}
+
+	desc = "waiting for ioctl completion";
+	// when in calibration mode, it takes long time. So, increase time-out period
+	if (soc_shared_params->calstate == QTN_CALSTATE_DEFAULT) {
+		QDRV_IOCTL_EVNT_TIMEOUT(
+				!(ioctl->ioctl_rc & (QTN_HLINK_RC_DONE | QTN_HLINK_RC_ERR)),
+				wait_time, desc, rc, !in_isr);
+	} else {
+		QDRV_IOCTL_EVNT_TIMEOUT(
+				!(ioctl->ioctl_rc & (QTN_HLINK_RC_DONE | QTN_HLINK_RC_ERR)),
+				wait_time*100, desc, rc, !in_isr);
+	}
+	if (rc) {
+		goto hlink_timeout;
+	}
+
+	rc = ioctl->ioctl_rc;
+
+	qdrv_free_ioctl(ioctl);
+	mac->ioctl_fail_count = 0;
+
+	return rc;
+
+hlink_timeout:
+	DBGPRINTF_E("HLINK MSG timed out while %s: cmd=%d dev=%d args=%08x %08x status=%u rc=%u ctxt=%s\n",
+		desc,
+		ioctl->ioctl_command, ioctl->ioctl_dev, ioctl->ioctl_arg1, ioctl->ioctl_arg2,
+		ioctl->ioctl_status, ioctl->ioctl_rc,
+		in_isr ? "interrupt" : "process");
+	rc = QDRV_IOCTL_RET_TIMEOUT;
+	qdrv_free_ioctl(ioctl);
+
+	/* If too many failed IOCTLs, perform some system action (eg, panic, gather logs, whatever). */
+	mac->ioctl_fail_count++;
+	#define QDRV_MAX_IOCTL_FAIL_DIE 16
+	if (mac->ioctl_fail_count > QDRV_MAX_IOCTL_FAIL_DIE) {
+		DBGPRINTF_E("Too many failed IOCTLs (%d) - MAC is dead\n", mac->ioctl_fail_count);
+		qdrv_mac_die_action(mac);
+	}
+
+	return rc;
+#endif /* #ifdef MTEST */
+}
+
+void* qdrv_hostlink_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
+{
+	void *ret = dma_alloc_coherent(dev, size, dma_handle, flag);
+	if(dma_handle && *dma_handle) {
+		*dma_handle = (dma_addr_t)muc_to_nocache((void*)(*dma_handle));
+	}
+	return ret;
+}
+
+void qdrv_hostlink_free_coherent(struct device *dev, size_t size, void *kvaddr, dma_addr_t dma_handle)
+{
+
+/* TODO: Since we are freeing DMA memory while in softirq context, this
+ * operation results into warn_on from kernel.
+ */
+
+#if 1
+	dma_free_coherent(dev, size, kvaddr, (dma_addr_t)nocache_to_muc((void*)dma_handle));
+#else
+	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+		return;
+
+	unmap_kernel_range((unsigned long)kvaddr, PAGE_ALIGN(size));
+	free_pages_exact((void *)plat_dma_to_phys(dev, dma_handle),
+				size);
+
+#endif
+}
+
+/* THese are the Per vap IOCLTS that finally go over the DEV ioctl */
+void vnet_free_ioctl(struct host_ioctl *ioctl)
+{
+	if (ioctl) {
+		qdrv_free_ioctl(ioctl);
+	}
+}
+
+struct host_ioctl *vnet_alloc_ioctl(struct qdrv_vap *qv)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan*)qv->parent;
+	struct host_ioctl *ioctl;
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl != NULL) {
+		ioctl->ioctl_dev = qv->devid;
+	}
+
+	return (ioctl);
+}
+
+int vnet_send_ioctl(struct qdrv_vap *qv, struct host_ioctl *ioctl)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan*)qv->parent;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_msg_calcmd(struct qdrv_wlan *qw, int cmdlen, dma_addr_t cmd_dma)
+{
+	struct host_ioctl *ioctl;
+	int unit;
+	unit = qw->unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_CALCMD;
+	ioctl->ioctl_arg1 = 0; /*sys_rev_num*/;
+	ioctl->ioctl_arg2 = cmdlen;
+	ioctl->ioctl_argp = cmd_dma;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_msg_cmd(struct qdrv_wlan *qw, u_int32_t cmd, u_int32_t arg)
+{
+	struct host_ioctl *ioctl;
+	int unit;
+	unit = qw->unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_CMD;
+	ioctl->ioctl_arg1 = cmd;
+	ioctl->ioctl_arg2 = arg;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+static int qdrv_get_vap_id(const char *ifname, uint8_t *vap_id)
+{
+	if (vap_id == NULL || sscanf(ifname, "wifi%hhu", vap_id) != 1)
+		return -EINVAL;
+	if (*vap_id >= QDRV_MAX_BSS_VAPS)
+		return -EINVAL;
+	return 0;
+}
+
+int qdrv_hostlink_msg_create_vap(struct qdrv_wlan *qw,
+	const char *name_lhost, const uint8_t *mac_addr, int devid, int opmode, int flags)
+{
+	struct host_ioctl *ioctl;
+	struct qtn_vap_args *vap_args = NULL;
+	dma_addr_t args_dma;
+	int alloc_len;
+	int unit;
+	int ret;
+	uint8_t vap_id = QTN_MAX_BSS_VAPS;
+
+	unit = qw->unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		ret = -ENOMEM;
+		goto out_no_free;
+	}
+
+	if (opmode == IEEE80211_M_WDS)
+		vap_id = 0;
+	else {
+		ret = qdrv_get_vap_id(name_lhost, &vap_id);
+		if (ret != 0)
+			goto out_free_ioctl;
+	}
+
+	alloc_len = sizeof(*vap_args) + 1;
+	vap_args = qdrv_hostlink_alloc_coherent(NULL, alloc_len,
+			&args_dma, GFP_ATOMIC);
+	if (vap_args == NULL) {
+		DBGPRINTF_E("Failed allocate %d bytes for name\n", alloc_len);
+		ret = -ENOMEM;
+		goto out_free_ioctl;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_VAPCREATE;
+	ioctl->ioctl_arg1 = unit | (flags << 8) | (opmode << 16);
+	ioctl->ioctl_arg2 = devid;
+	ioctl->ioctl_argp = args_dma;
+
+	memset(vap_args, 0, sizeof(*vap_args));
+	strncpy(vap_args->vap_name, name_lhost, sizeof(vap_args->vap_name)-1);
+	vap_args->vap_name[sizeof(vap_args->vap_name)-1] = '\0';
+	memcpy(vap_args->vap_macaddr, mac_addr, IEEE80211_ADDR_LEN);
+	vap_args->vap_id = vap_id;
+
+	ret = qdrv_send_ioctl(qw, ioctl);
+	qdrv_hostlink_free_coherent(NULL, alloc_len, vap_args, args_dma);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return ret;
+
+out_free_ioctl:
+	qdrv_free_ioctl(ioctl);
+out_no_free:
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+	return ret;
+}
+
+int qdrv_hostlink_msg_delete_vap(struct qdrv_wlan *qw, struct net_device *vdev)
+{
+	struct host_ioctl *ioctl;
+	int unit;
+	int devid;
+	struct qdrv_vap *qv = netdev_priv(vdev);
+
+	unit = qw->unit;
+	devid = qv->devid;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_VAPDELETE;
+	ioctl->ioctl_arg1 = unit;
+	ioctl->ioctl_arg2 = devid;
+	ioctl->ioctl_argp = 0;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_sample_chan_cancel(struct qdrv_wlan *qw, struct qtn_samp_chan_info *samp_chan_bus)
+{
+	struct host_ioctl *ioctl;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_SAMPLE_CHANNEL_CANCEL;
+	ioctl->ioctl_arg1 = (u32)samp_chan_bus;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_sample_chan(struct qdrv_wlan *qw, struct qtn_samp_chan_info *samp_chan_bus)
+{
+	struct host_ioctl *ioctl;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_SAMPLE_CHANNEL;
+	ioctl->ioctl_arg1 = (u32)samp_chan_bus;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_remain_chan(struct qdrv_wlan *qw, struct qtn_remain_chan_info *remain_chan_bus)
+{
+	struct host_ioctl *ioctl;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_REMAIN_CHANNEL;
+	ioctl->ioctl_arg1 = (uint32_t)remain_chan_bus;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_suspend_off_chan(struct qdrv_wlan *qw, uint32_t suspend)
+{
+	struct host_ioctl *ioctl;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_SUSPEND_OFF_CHANNEL;
+	ioctl->ioctl_arg1 = suspend;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_set_ocac(struct qdrv_wlan *qw, struct qtn_ocac_info *ocac_bus)
+{
+	struct host_ioctl *ioctl;
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		return -1;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_SET_OCAC;
+	ioctl->ioctl_arg1 = (u32)ocac_bus;
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_meas_chan(struct qdrv_wlan *qw, struct qtn_meas_chan_info *meas_chan_bus)
+{
+	struct host_ioctl *ioctl;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_MEAS_CHANNEL;
+	ioctl->ioctl_arg1 = (u32)meas_chan_bus;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_rxgain_params(struct qdrv_wlan *qw, uint32_t index, struct qtn_rf_rxgain_params *rx_gain_params)
+{
+	struct host_ioctl *ioctl;
+	dma_addr_t args_dma = 0;
+	struct qtn_rf_rxgain_params *sp_rxgain_params=NULL;
+	int alloc_len = sizeof(*sp_rxgain_params) + 1;
+	int ret = 0;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+
+	if (rx_gain_params != NULL) {
+		sp_rxgain_params = qdrv_hostlink_alloc_coherent(NULL, alloc_len,
+				&args_dma, GFP_ATOMIC);
+
+		if (sp_rxgain_params == NULL) {
+			qdrv_free_ioctl(ioctl);
+			DBGPRINTF_E("Failed allocate %d bytes for name\n", alloc_len);
+			DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+			return -ENOMEM;
+		}
+
+		*sp_rxgain_params = *rx_gain_params;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_SET_RX_GAIN_PARAMS;
+	ioctl->ioctl_arg1 = index;
+	ioctl->ioctl_arg2 = args_dma;
+
+	ret = qdrv_send_ioctl(qw, ioctl);
+
+	if (sp_rxgain_params != NULL) {
+		qdrv_hostlink_free_coherent(NULL, alloc_len, sp_rxgain_params, args_dma);
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return ret;
+}
+
+#ifdef QTN_BG_SCAN
+int qdrv_hostlink_bgscan_chan(struct qdrv_wlan *qw, struct qtn_scan_chan_info *scan_chan_bus)
+{
+	struct host_ioctl *ioctl;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_BGSCAN_CHANNEL;
+	ioctl->ioctl_arg1 = (u32)scan_chan_bus;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+#endif /* QTN_BG_SCAN */
+
+int qdrv_hostlink_store_txpow(struct qdrv_wlan *qw, u_int32_t txpow)
+{
+	struct host_ioctl *ioctl;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+#if 0
+		DPRINTF(LL_1, LF_ERROR, (DBGEFMT "ioctl NULL\n", DBGARG));
+		DPRINTF(LL_1, LF_TRACE, (DBGFMT "<--Exit\n", DBGARG));
+#endif
+		return -ENOMEM;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_STORE_TXPOW;
+	ioctl->ioctl_arg1 = qw->rf_chipid;
+	ioctl->ioctl_arg2 = txpow;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_setchan(struct qdrv_wlan *qw, uint32_t freq_band, uint32_t qtn_chan)
+{
+	struct host_ioctl *ioctl;
+	int unit;
+	unit = qw->unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_CHANGE_CHANNEL;
+	ioctl->ioctl_arg1 = freq_band;
+	ioctl->ioctl_arg2 = qtn_chan;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_setchan_deferred(struct qdrv_wlan *qw, struct qtn_csa_info *csa_phyaddr_info)
+{
+	struct host_ioctl *ioctl;
+	int unit;
+	unit = qw->unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_CHANGE_CHAN_DEFERRED;
+	ioctl->ioctl_arg1 = (u32)csa_phyaddr_info;
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_HLINK,
+			"sending to %p muc\n", csa_phyaddr_info);
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_xmitctl(struct qdrv_wlan *qw, bool enable_xmit)
+{
+	struct host_ioctl *ioctl;
+	int unit;
+	unit = qw->unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_XMITCTL;
+	ioctl->ioctl_arg1 = (enable_xmit)? 1 : 0;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_use_rtscts(struct qdrv_wlan *qw, int rtscts_required)
+{
+	struct host_ioctl *ioctl;
+	int unit;
+	unit = qw->unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_USE_RTS_CTS;
+	ioctl->ioctl_arg1 = (rtscts_required)? 1 : 0;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+#ifdef QDRV_FEATURE_KILL_MUC
+int qdrv_hostlink_killmuc(struct qdrv_wlan *qw)
+{
+	struct host_ioctl *ioctl;
+	int unit;
+	unit = qw->unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_KILL_MUC;
+	ioctl->ioctl_arg1 = unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+#endif
+
+#ifdef CONFIG_QVSP
+int qdrv_hostlink_qvsp(struct qdrv_wlan *qw, uint32_t param, uint32_t value)
+{
+	struct host_ioctl *ioctl;
+	int rc;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_VSP;
+	ioctl->ioctl_arg1 = param;
+	ioctl->ioctl_arg2 = value;
+
+	rc = qdrv_send_ioctl(qw, ioctl);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return rc;
+}
+#endif
+
+int qdrv_dump_log(struct qdrv_wlan *qw)
+{
+	struct host_ioctl *ioctl;
+	int unit;
+	unit = qw->unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_DUMP_LOG;
+	ioctl->ioctl_arg1 = unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+int qdrv_hostlink_msg_set_wifi_macaddr( struct qdrv_wlan *qw, u8 *new_macaddr )
+{
+	struct host_ioctl *ioctl;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_SET_MACADDR;
+	ioctl->ioctl_arg1 = ((new_macaddr[ 0 ] << 24) | (new_macaddr[ 1 ] << 16) | (new_macaddr[ 2 ] << 8) | new_macaddr[ 3 ]);
+	ioctl->ioctl_arg2 = ((new_macaddr[ 4 ] << 8) | new_macaddr[ 5 ]);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_setscanmode(struct qdrv_wlan *qw, u_int32_t scanmode)
+{
+	struct host_ioctl *ioctl;
+	struct ieee80211com *ic = &qw->ic;
+	int unit;
+	unit = qw->unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;;
+	}
+
+	if ((TAILQ_FIRST(&ic->ic_vaps))->iv_opmode == IEEE80211_M_STA) {
+		ioctl->ioctl_command = IOCTL_DEV_SET_SCANMODE_STA;
+	} else {
+		ioctl->ioctl_command = IOCTL_DEV_SET_SCANMODE;
+	}
+
+	ioctl->ioctl_arg1 = scanmode;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_set_hrflags(struct qdrv_wlan *qw, u_int32_t hrflags)
+{
+	struct host_ioctl *ioctl;
+	int unit;
+	unit = qw->unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_SET_HRFLAGS;
+	ioctl->ioctl_arg1 = hrflags;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_power_save(struct qdrv_wlan *qw, int param, int val)
+{
+	struct host_ioctl *ioctl;
+	int unit;
+	unit = qw->unit;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E( "ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_SET_POWER_SAVE;
+	ioctl->ioctl_arg1 = param;
+	ioctl->ioctl_arg2 = val;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_tx_airtime_control(struct qdrv_wlan *qw, uint32_t value)
+{
+	struct host_ioctl *ioctl;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (!ioctl) {
+		DBGPRINTF_E("ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_AIRTIME_CONTROL;
+	ioctl->ioctl_arg1 = value;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_mu_group_update(struct qdrv_wlan *qw, struct qtn_mu_group_update_args *args)
+{
+	struct host_ioctl *ioctl;
+	struct qtn_mu_group_update_args *sp_args = NULL;
+	dma_addr_t args_dma = 0;
+	int alloc_len = sizeof(struct qtn_mu_group_update_args);
+	int ret = 0;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (!ioctl) {
+		DBGPRINTF_E("ioctl NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+
+	sp_args = qdrv_hostlink_alloc_coherent(NULL, alloc_len,
+		&args_dma, GFP_ATOMIC);
+	if (sp_args == NULL) {
+		qdrv_free_ioctl(ioctl);
+		DBGPRINTF_E("Failed to allocate %d bytes for name\n", alloc_len);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+	*sp_args = *args;
+
+	ioctl->ioctl_command = IOCTL_DEV_MU_GROUP_UPDATE;
+	ioctl->ioctl_arg1 = 0;
+	ioctl->ioctl_arg2 = 0;
+	ioctl->ioctl_argp = args_dma;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	ret = qdrv_send_ioctl(qw, ioctl);
+
+	qdrv_hostlink_free_coherent(NULL, alloc_len, sp_args, args_dma);
+	return ret;
+}
+
+int qdrv_hostlink_send_ioctl_args(struct qdrv_wlan *qw, uint32_t command,
+		uint32_t arg1, uint32_t arg2)
+{
+	struct host_ioctl *ioctl;
+	int unit;
+	unit = qw->unit;
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E("ioctl %u qdrv_alloc_ioctl return NULL\n", command);
+		return -ENOMEM;
+	}
+
+	ioctl->ioctl_command = command;
+	ioctl->ioctl_arg1 = arg1;
+	ioctl->ioctl_arg2 = arg2;
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_start(struct qdrv_mac *mac)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+int qdrv_hostlink_stop(struct qdrv_mac *mac)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+int qdrv_hostlink_init(struct qdrv_wlan *qw, struct host_ioctl_hifinfo *hifinfo)
+{
+	int indx;
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Allocate the Hostlink circular buffer */
+	qw->tx_if.hl_ring = qdrv_hostlink_alloc_coherent(NULL, QNET_HLRING_SIZE,
+		(dma_addr_t *) &qw->tx_if.hl_ring_dma, GFP_ATOMIC);
+
+	if(!qw->tx_if.hl_ring)
+	{
+		DBGPRINTF_E("Failed to allocate DMA memory\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-ENOMEM);
+	}
+
+	memset(qw->tx_if.hl_ring, 0, QNET_HLRING_SIZE);
+	for(indx=0;indx<QNET_HLRING_ENTRIES;indx++){
+		qw->tx_if.hl_ring[indx].ioctl_status = QTN_HLINK_STATUS_AVAIL;
+	}
+	qw->tx_if.hl_read = qw->tx_if.hl_write = qw->tx_if.hl_tosend = 0;
+	qw->tx_if.hl_first = qw->tx_if.hl_last = NULL;
+
+	spin_lock_init(&qw->tx_if.hl_flowlock);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+int qdrv_hostlink_exit(struct qdrv_wlan *qw)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Make sure work queues are done */
+	flush_scheduled_work();
+
+	dma_free_coherent(NULL, QNET_HLRING_SIZE, qw->tx_if.hl_ring,
+		qw->tx_if.hl_ring_dma);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+int qdrv_hostlink_vlan_enable(struct qdrv_wlan *qw, int enable)
+{
+	struct host_ioctl *ioctl;
+	int ret = 0;
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E("ioctl cmd [%d] NULL\n", IOCTL_DEV_ENABLE_VLAN);
+		return -ENOMEM;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_ENABLE_VLAN;
+	ioctl->ioctl_arg1 = enable;
+	ioctl->ioctl_arg2 = 0;
+	ioctl->ioctl_argp = 0;
+
+	ret = qdrv_send_ioctl(qw, ioctl);
+	return ret;
+}
+
+int qdrv_hostlink_enable_flush_data(struct qdrv_wlan *qw, int enable)
+{
+        struct host_ioctl *ioctl;
+        int ret = 0;
+
+        ioctl = qdrv_alloc_ioctl(qw);
+        if (ioctl == NULL) {
+                DBGPRINTF_E("ioctl cmd[%d] NULL\n", IOCTL_DEV_FLUSH_DATA);
+                return -ENOMEM;
+        }
+
+        if (enable) {
+
+		ioctl->ioctl_command = IOCTL_DEV_FLUSH_DATA;
+		ioctl->ioctl_arg1 = enable;
+		ioctl->ioctl_arg2 = 0;
+		ioctl->ioctl_argp = 0;
+
+		ret = qdrv_send_ioctl(qw, ioctl);
+        }
+        return ret;
+}
+
+int qdrv_hostlink_update_ocac_state_ie(struct qdrv_wlan *qw, uint8_t state, uint8_t param)
+{
+	struct host_ioctl *ioctl;
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E("%s: ioctl NULL\n", __func__);
+		return -1;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_UPDATE_OCAC_STATE_IE;
+	ioctl->ioctl_arg1 = state;
+	ioctl->ioctl_arg2 = param;
+
+	return qdrv_send_ioctl(qw, ioctl);
+}
+
+int qdrv_hostlink_change_bcn_scheme(struct qdrv_vap *qv, int param, int value)
+{
+	struct host_ioctl *ioctl;
+	struct qtn_setparams_args *args = NULL;
+	dma_addr_t args_dma = 0;
+	int ret = 0;
+	struct qdrv_wlan *qw = (struct qdrv_wlan*)qv->parent;
+
+	ioctl = qdrv_alloc_ioctl(qw);
+	if (ioctl == NULL) {
+		DBGPRINTF_E("ioctl cmd[%d] NULL\n", IOCTL_DEV_SETPARAMS);
+		return -1;
+	}
+
+	args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC);
+	if (args == NULL) {
+		qdrv_free_ioctl(ioctl);
+		DBGPRINTF_E("Failed allocate memory for bytes for args\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	args->ni_param = param;
+	args->ni_value = value;
+	args->ni_len = 0;
+
+	ioctl->ioctl_command = IOCTL_DEV_SETPARAMS;
+	ioctl->ioctl_dev = qv->devid;
+	ioctl->ioctl_argp = args_dma;
+
+	ret = qdrv_send_ioctl(qw, ioctl);
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+	return ret;
+}
diff --git a/drivers/qtn/qdrv/qdrv_mac.c b/drivers/qtn/qdrv/qdrv_mac.c
new file mode 100644
index 0000000..83a9c14
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_mac.c
@@ -0,0 +1,281 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+
+#include <asm/hardware.h>
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_uc_print.h"
+#include "qdrv_soc.h"
+#include "qdrv_control.h"
+#include <qtn/registers.h>
+#include <qtn/mproc_sync_base.h>
+#include <qtn/txbf_mbox.h>
+
+#ifdef TOPAZ_AMBER_IP
+#include <qtn/amber.h>
+#endif
+
+/* Default irq handler for unclaimed interrupts */
+static void no_irq_handler(void *arg1, void *arg2)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return;
+}
+
+int qdrv_mac_set_handler(struct qdrv_mac *mac, int irq, struct int_handler *handler)
+{
+	if(irq < 0 || irq >= HOST_INTERRUPTS)
+	{
+		return(-1);
+	}
+
+	/* Copy the handler structure */
+	mac->int_handlers[irq] = *handler;
+
+	return(0);
+}
+
+int qdrv_mac_set_host_dsp_handler(struct qdrv_mac *mac, int irq, struct int_handler *handler)
+{
+	if(irq < 0 || irq >= HOST_INTERRUPTS)
+	{
+		return(-1);
+	}
+
+	/* Copy the handler structure */
+	mac->mac_host_dsp_int_handlers[irq] = *handler;
+
+	return(0);
+}
+
+int qdrv_mac_clear_handler(struct qdrv_mac *mac, int irq)
+{
+	struct int_handler int_handler;
+
+	/* Install an empty handler so we can avoid checking         */
+	/* if a handler is installed every time we take an interrupt */
+	int_handler.handler = no_irq_handler;
+	int_handler.arg1 = NULL;
+	int_handler.arg2 = NULL;
+
+	return(qdrv_mac_set_handler(mac, irq, &int_handler));
+}
+
+/* Called by the high priority input to kill off the host and
+ * prevent it locking up.
+ */
+static irqreturn_t qdrv_mac_die(int irq, void *dev_id)
+{
+	struct qdrv_mac *mac = (struct qdrv_mac *)dev_id;
+	u_int32_t status = qtn_mproc_sync_irq_ack_nolock(
+		TOPAZ_SYS_CTL_M2L_HI_INT,
+		0xFFFF << RUBY_M2L_IPC_HI_IRQ(0) /* high IPC*/);
+
+	if (status & (1 << RUBY_M2L_IRQ_HI_DIE)) {
+		DBGPRINTF_E( "IRQ from MAC: dead\n");
+		qdrv_mac_die_action(mac);
+	}
+
+	if (status & (1 << RUBY_M2L_IRQ_HI_REBOOT)) {
+		DBGPRINTF_E( "IRQ from MAC: reboot\n");
+
+		/* MuC ask to restart system.
+		 */
+#ifdef TOPAZ_AMPER_IP
+		amber_set_shutdown_code(AMBER_SD_CODE_EMERGENCY);
+#endif
+
+		kernel_restart("MUC restart");
+	}
+
+
+	/* touch uc print buffer work queue to flush shared message buf */
+	uc_print_schedule_work();
+
+	return(IRQ_HANDLED);
+}
+
+static irqreturn_t __sram_text qdrv_mac_interrupt(int irq, void *dev_id)
+{
+	int i;
+	struct qdrv_mac *mac = (struct qdrv_mac *)dev_id;
+	u_int32_t status = qtn_mproc_sync_irq_ack_nolock(
+		(u_int32_t)mac->mac_host_int_status,
+		0xFFFF /* low IPC*/);
+
+	/* Call the handlers */
+	for(i = 0; i < HOST_INTERRUPTS; i++)
+	{
+		if ((status & (1 << i)) && mac->int_handlers[i].handler)
+		{
+			(*mac->int_handlers[i].handler)(mac->int_handlers[i].arg1,
+				mac->int_handlers[i].arg2);
+		}
+	}
+
+	/* We are done - See you next time */
+	return(IRQ_HANDLED);
+}
+
+static irqreturn_t __sram_text qdrv_dsp_interrupt(int irq, void *dev_id)
+{
+	int i;
+	struct qdrv_mac *mac = (struct qdrv_mac *)dev_id;
+	u_int32_t status = qtn_txbf_lhost_irq_ack(mac);
+
+	/* Call the handlers */
+	for (i = 0; i < HOST_DSP_INTERRUPTS; i++) {
+		if ((status & (1 << i)) && mac->mac_host_dsp_int_handlers[i].handler)
+		{
+//			printk("Interrupt for irq %d dev %p\n", i, dev_id);
+			(*mac->mac_host_dsp_int_handlers[i].handler)(
+				mac->mac_host_dsp_int_handlers[i].arg1,
+				mac->mac_host_dsp_int_handlers[i].arg2);
+		}
+	}
+
+	/* We are done - See you next time */
+	return(IRQ_HANDLED);
+}
+
+void __sram_text qdrv_mac_interrupt_muc(struct qdrv_mac *mac)
+{
+	qtn_mproc_sync_irq_trigger((u_int32_t)mac->mac_uc_intgen, RUBY_L2M_IRQ_HLINK);
+}
+
+void __sram_text qdrv_mac_interrupt_muc_high(struct qdrv_mac *mac)
+{
+	qtn_mproc_sync_irq_trigger((u_int32_t)mac->mac_uc_intgen, RUBY_L2M_IRQ_HIGH);
+}
+
+int qdrv_mac_init(struct qdrv_mac *mac, u8 *mac_addr, int unit, int irq, struct qdrv_mac_params *params)
+{
+	int i;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Reset the MAC data structure */
+	memset(mac, 0, sizeof(struct qdrv_mac));
+
+	/* Initialize */
+	mac->unit = unit;
+	mac->irq = irq;
+	mac->reg = (struct muc_ctrl_reg *)
+		IO_ADDRESS(MUC_BASE_ADDR + MUC_OFFSET_CTRL_REG);
+	mac->ruby_sysctrl = (struct ruby_sys_ctrl_reg *)IO_ADDRESS(RUBY_SYS_CTL_BASE_ADDR);
+	memcpy(mac->mac_addr, mac_addr, IEEE80211_ADDR_LEN);
+	memcpy(&mac->params, params, sizeof(mac->params));
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_QCTRL | QDRV_LF_TRACE, "Copied MAC addr etc.\n");
+
+	/* Clear all the interrupt handlers */
+	for (i = 0; i < HOST_INTERRUPTS; i++) {
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_QCTRL | QDRV_LF_TRACE, "Cleared handler %d\n", i);
+		qdrv_mac_clear_handler(mac, i);
+	}
+
+	/* Set interrupts to low (IRQ) priority */
+	mac->reg->mac0_host_int_pri = 0;
+
+	/* Set up pointer to int status register for generic int handler */
+	mac->mac_host_int_mask = &mac->ruby_sysctrl->m2l_int_mask;
+	mac->mac_host_int_status = &mac->ruby_sysctrl->m2l_int;
+	mac->mac_host_sem = &mac->ruby_sysctrl->l2m_sem;
+	mac->mac_uc_intgen = &mac->ruby_sysctrl->l2m_int;
+
+	/* Set up pointers for LHost to DSP communication */
+	mac->mac_host_dsp_int_mask = &mac->ruby_sysctrl->d2l_int_mask;
+	mac->mac_host_dsp_int_status = &mac->ruby_sysctrl->d2l_int;
+	mac->mac_host_dsp_sem = &mac->ruby_sysctrl->l2d_sem;
+	mac->mac_host_dsp_intgen = &mac->ruby_sysctrl->l2d_int;
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_QCTRL | QDRV_LF_TRACE, "Requesting IRQs\n");
+	/* Register handler with linux for MuC interrupt line. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if (request_irq(mac->irq, qdrv_mac_interrupt, 0, "QMAC0low", mac) != 0) {
+#else
+	if (request_irq(mac->irq, qdrv_mac_interrupt, IRQF_SAMPLE_RANDOM, "QMAC0low", mac) != 0) {
+#endif
+		DBGPRINTF_E("Can't get MAC0 irq %d\n", mac->irq);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-ENODEV);
+	}
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_QCTRL | QDRV_LF_TRACE, "Requested IRQ QMAC0low %p\n", mac);
+	if (request_irq(RUBY_IRQ_IPC_HI, qdrv_mac_die, 0, "QMACDie", mac) != 0) {
+		DBGPRINTF_E("Can't get MACDIE irq %d\n", RUBY_IRQ_IPC_HI);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-ENODEV);
+	}
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_QCTRL | QDRV_LF_TRACE, "Requested IRQ DIE %p\n", mac);
+	if (request_irq(QTN_TXBF_D2L_IRQ, qdrv_dsp_interrupt, 0, QTN_TXBF_D2L_IRQ_NAME, mac) != 0) {
+		DBGPRINTF_E("Can't get DSP irq %d\n", QTN_TXBF_D2L_IRQ);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-ENODEV);
+	}
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_QCTRL | QDRV_LF_TRACE, "Requested IRQ DSP %p\n", mac);
+
+	/* Enable the high priority interrupts */
+	for (i = 16; i < 32; i++) {
+		qdrv_mac_enable_irq(mac, i);
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+int qdrv_mac_exit(struct qdrv_mac *mac)
+{
+	int i;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if (!mac->enabled) {
+		DBGPRINTF_E("MAC unit %d is not enabled\n", mac->unit);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+	free_irq(mac->irq, mac);
+	free_irq(RUBY_IRQ_IPC_HI, mac);
+	free_irq(QTN_TXBF_D2L_IRQ, mac);
+
+	for (i = 0; i < HOST_INTERRUPTS; i++) {
+		qdrv_mac_disable_irq(mac, i);
+		qdrv_mac_clear_handler(mac, i);
+	}
+
+	mac->enabled = 0;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
diff --git a/drivers/qtn/qdrv/qdrv_mac.h b/drivers/qtn/qdrv/qdrv_mac.h
new file mode 100644
index 0000000..d7d3656
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_mac.h
@@ -0,0 +1,133 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_MAC_H
+#define _QDRV_MAC_H
+
+#include <linux/workqueue.h>
+#include <qtn/lhost_muc_comm.h>
+
+#define IEEE80211_ADDR_LEN	6	/* size of 802.11 address */
+
+#define MAC_UNITS		1
+#define HOST_INTERRUPTS		16
+#define HOST_DSP_INTERRUPTS	16
+
+#define QDRV_RESERVED_DEVIDS		QTN_RESERVED_DEVIDS
+#define QDRV_WLANID_FROM_DEVID(devid)	QTN_WLANID_FROM_DEVID(devid)
+#define QDRV_MAX_BSS_VAPS 8
+#define QDRV_MAX_WDS_VAPS 8
+#define QDRV_MAX_VAPS (QDRV_MAX_BSS_VAPS + QDRV_MAX_WDS_VAPS)
+#define QDRV_MAX_DEVID (QDRV_MAX_VAPS + QDRV_RESERVED_DEVIDS)
+
+static __always_inline int qdrv_devid_valid(uint8_t devid)
+{
+	return (devid >= QDRV_RESERVED_DEVIDS) && (devid < QDRV_MAX_DEVID);
+}
+
+struct int_handler
+{
+	void (*handler)(void *arg1, void *arg2);
+	void *arg1;
+	void *arg2;
+};
+
+struct qdrv_mac_params
+{
+	unsigned int txif_list_max;
+	uint8_t mucdbg_netdbg;
+};
+
+struct qdrv_mac
+{
+	int unit;
+	struct net_device *vnet[QDRV_MAX_VAPS];
+	uint8_t vnet_last;
+	uint8_t mac_addr[IEEE80211_ADDR_LEN];
+	unsigned int irq;
+	uint8_t enabled;
+	struct int_handler int_handlers[HOST_INTERRUPTS];
+	volatile struct muc_ctrl_reg *reg;
+	volatile u32 *mac_host_int_status;
+	volatile u32 *mac_host_int_mask;
+	volatile u32 *mac_host_sem;
+	volatile u32 *mac_uc_intgen;
+	struct int_handler mac_host_dsp_int_handlers[HOST_DSP_INTERRUPTS];
+	volatile u32 *mac_host_dsp_int_status;
+	volatile u32 *mac_host_dsp_int_mask;
+	volatile u32 *mac_host_dsp_sem;
+	volatile u32 *mac_host_dsp_intgen;
+	struct qtn_stats_log *mac_sys_stats;
+	void *data;
+	struct qdrv_mac_params params; /* Configurable parameters per MAC */
+	volatile struct ruby_sys_ctrl_reg *ruby_sysctrl;
+	int dead;
+	int mgmt_dead;
+	int ioctl_fail_count;
+	int mac_active_bss;
+	int mac_active_wds;
+	struct qtn_cca_stats *cca_stats;
+};
+
+int qdrv_mac_init(struct qdrv_mac *mac, u8 *mac_addr, int unit, int irq, struct qdrv_mac_params *params);
+int qdrv_mac_exit(struct qdrv_mac *mac);
+int qdrv_mac_set_handler(struct qdrv_mac *mac, int irq,
+	struct int_handler *handler);
+int qdrv_mac_set_host_dsp_handler(struct qdrv_mac *mac, int irq,
+	struct int_handler *handler);
+int qdrv_mac_clear_handler(struct qdrv_mac *mac, int irq);
+void qdrv_mac_interrupt_muc(struct qdrv_mac *mac);
+void qdrv_mac_interrupt_muc_high(struct qdrv_mac *mac);
+void qdrv_muc_died_sysfs(void);
+
+static __always_inline void
+qdrv_mac_enable_irq(struct qdrv_mac *mac, int irq)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
+	set_bit(irq, (volatile unsigned long *)mac->mac_host_int_mask);
+#else
+	set_bit(irq, mac->mac_host_int_mask);
+#endif
+}
+
+static __always_inline void
+qdrv_mac_disable_irq(struct qdrv_mac *mac, int irq)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
+	clear_bit(irq, (volatile unsigned long *)mac->mac_host_int_mask);
+#else
+	clear_bit(irq, mac->mac_host_int_mask);
+#endif
+
+}
+
+static __always_inline void
+qdrv_mac_die_action(struct qdrv_mac *mac)
+{
+	/*
+	 * Mark the mac as dead so ioctls and transmissions are
+	 * dropped.
+	 */
+	mac->dead = 1;
+	qdrv_muc_died_sysfs();
+}
+
+#endif
+
diff --git a/drivers/qtn/qdrv/qdrv_mac_reserve.c b/drivers/qtn/qdrv/qdrv_mac_reserve.c
new file mode 100644
index 0000000..37b330c
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_mac_reserve.c
@@ -0,0 +1,157 @@
+/**
+  Copyright (c) 2008 - 2015 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+
+#include "qdrv_mac.h"
+#include "qdrv_wlan.h"
+#include "qdrv_mac_reserve.h"
+#include "qtn/topaz_fwt_sw.h"
+
+#include <qtn/topaz_tqe.h>
+
+#define QDRV_MAC_RESERVE_MAX		6
+
+struct qdrv_mac_reserve_ent_s {
+	uint8_t addr[ETH_ALEN];
+	uint8_t mask[ETH_ALEN];
+};
+
+struct qdrv_mac_reserve_s {
+	struct qdrv_wlan *qw;
+	uint32_t max;
+	struct qdrv_mac_reserve_ent_s entry[QDRV_MAC_RESERVE_MAX];
+};
+
+static struct qdrv_mac_reserve_s qdrv_mac_reserve;
+
+/* specialised version of compare_ether_addr() */
+static inline unsigned qdrv_mac_reserve_compare_ether_addr_masked(const void *addr1,
+						const void *mask, const void *addr2)
+{
+	const uint16_t *a = addr1;
+	const uint16_t *m = mask;
+	const uint16_t *b = addr2;
+
+	return ((a[0] ^ (b[0] & m[0])) | (a[1] ^ (b[1] & m[1])) | (a[2] ^ (b[2] & m[2]))) != 0;
+}
+
+int __sram_text qdrv_mac_reserved(const uint8_t *addr)
+{
+	struct qdrv_mac_reserve_ent_s *res;
+	int i;
+
+	for (i = 0; i < qdrv_mac_reserve.max; i++) {
+		res = &qdrv_mac_reserve.entry[i];
+		if (!qdrv_mac_reserve_compare_ether_addr_masked(res->addr, res->mask, addr)) {
+			RXSTAT(qdrv_mac_reserve.qw, rx_mac_reserved);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qdrv_mac_reserved);
+
+void qdrv_mac_reserve_clear(void)
+{
+	local_bh_disable();
+
+	tqe_register_mac_reserved_cbk(NULL);
+	qdrv_mac_reserve.max = 0;
+	memset(&qdrv_mac_reserve.entry, 0, sizeof(qdrv_mac_reserve.entry));
+
+	local_bh_enable();
+
+	printk("%s: mac reservation table cleared\n", __func__);
+}
+
+/*
+ * Reserve a MAC address for use by non-WiFi interfaces or clear all reserved MAC addresses.
+ */
+int qdrv_mac_reserve_set(const uint8_t *addr, const uint8_t *mask)
+{
+	int i;
+
+	if (qdrv_mac_reserve.max > (ARRAY_SIZE(qdrv_mac_reserve.entry) - 1)) {
+		printk("%s: mac address reservation for %pM failed - table is full\n", __func__,
+			addr);
+		return -1;
+	}
+
+	if (IEEE80211_ADDR_NULL(addr) || IEEE80211_IS_MULTICAST(addr)) {
+		printk("%s: invalid mac address %pM\n", __func__, addr);
+		return -1;
+	}
+
+	if (IEEE80211_ADDR_NULL(mask)) {
+		printk("%s: invalid mask address %pM\n", __func__, mask);
+		return -1;
+	}
+
+	local_bh_disable();
+
+	for (i = 0; i < ETH_ALEN; i++) {
+		qdrv_mac_reserve.entry[qdrv_mac_reserve.max].addr[i] = (addr[i] & mask[i]);
+		qdrv_mac_reserve.entry[qdrv_mac_reserve.max].mask[i] = mask[i];
+	}
+
+	qdrv_mac_reserve.max++;
+	if (qdrv_mac_reserve.max == 1)
+		tqe_register_mac_reserved_cbk(qdrv_mac_reserved);
+
+	/* clear the FWT table */
+	fwt_sw_reset();
+
+	local_bh_enable();
+
+	printk("%s: mac address %pM/%pM reserved\n", __func__, addr, mask);
+
+	return 0;
+}
+
+/*
+ * Get the list of reserved MAC addresses.
+ */
+void qdrv_mac_reserve_show(struct seq_file *s, void *data, u32 num)
+{
+	struct qdrv_mac_reserve_ent_s *res;
+	int i;
+
+	if (strcmp(data, "full") == 0) {
+		seq_printf(s, "%u\n",
+			(qdrv_mac_reserve.max > ARRAY_SIZE(qdrv_mac_reserve.entry)));
+		return;
+	}
+
+	seq_printf(s, "MAC address       Mask\n");
+	for (i = 0; i < qdrv_mac_reserve.max; i++) {
+		res = &qdrv_mac_reserve.entry[i];
+		seq_printf(s, "%pM %pM\n", res->addr, res->mask);
+	}
+}
+
+void qdrv_mac_reserve_init(struct qdrv_wlan *qw)
+{
+	qdrv_mac_reserve_clear();
+	qdrv_mac_reserve.qw = qw;
+}
+
diff --git a/drivers/qtn/qdrv/qdrv_mac_reserve.h b/drivers/qtn/qdrv/qdrv_mac_reserve.h
new file mode 100644
index 0000000..303adff
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_mac_reserve.h
@@ -0,0 +1,32 @@
+/**
+  Copyright (c) 2008 - 2015 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_MAC_RESERVE_H
+#define _QDRV_MAC_RESERVE_H
+
+#include "qdrv_wlan.h"
+
+int __sram_text qdrv_mac_reserved(const uint8_t *addr);
+void qdrv_mac_reserve_clear(void);
+int qdrv_mac_reserve_set(const uint8_t *addr, const uint8_t *mask);
+void qdrv_mac_reserve_show(struct seq_file *s, void *data, u32 num);
+void qdrv_mac_reserve_init(struct qdrv_wlan *qw);
+
+#endif /* _QDRV_MAC_RESERVE_H */
diff --git a/drivers/qtn/qdrv/qdrv_math.c b/drivers/qtn/qdrv/qdrv_math.c
new file mode 100644
index 0000000..3217fef
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_math.c
@@ -0,0 +1,24 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#include <linux/string.h>
+#include <qtn/qtn_math.h>
+#include <qtn/qtn_math.inl>
+
diff --git a/drivers/qtn/qdrv/qdrv_module.c b/drivers/qtn/qdrv/qdrv_module.c
new file mode 100644
index 0000000..5dd0158
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_module.c
@@ -0,0 +1,360 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_control.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_muc_stats.h"
+
+#include <qtn/bootcfg.h>
+
+#include <asm/board/troubleshoot.h>
+
+/*
+ * Define boiler plate module stuff
+ */
+MODULE_DESCRIPTION("802.11 Wireless Driver");
+MODULE_AUTHOR("Quantenna Communications Inc., Mats Aretun");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+#define QDRV_DEV_NAME "qdrv"
+
+/* 3.1 times seems safe for MuC (or AuC) crash - found by trial and error */
+#define QDRV_CORE_DUMP_COMPRESS_RATIO	(310)
+
+struct qdrv_mac *qdrv_device_to_qdrv_mac(struct device *dev)
+{
+	struct qdrv_cb *qcb;
+	const char *name_of_dev = NULL;
+
+	if (dev == NULL) {
+		return NULL;
+	}
+
+	name_of_dev = dev_name(dev);
+
+	if (strcmp(name_of_dev, QDRV_DEV_NAME) != 0) {
+		return NULL;
+	}
+
+	qcb = (struct qdrv_cb *) dev_get_drvdata(dev);
+
+	return &(qcb->macs[0]);
+}
+
+static ssize_t qdrv_attr_control_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int count;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if((count = qdrv_control_output(dev, buf)) < 0)
+	{
+		DBGPRINTF_E("Failed to generate output\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(0);
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(count);
+}
+
+static ssize_t qdrv_attr_control_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	if (count < 1) {
+		goto out;
+	}
+
+	qdrv_control_input(dev, (char *) buf, (unsigned int) count);
+
+out:
+	/*
+	* FIXME: return value should reflect failure if qdrv_control_input
+	* above said so. And yet busybox echo seems to retry for failure.
+	* This is not what we expected. Have to make it success here, while
+	* what we really should fix is busybox echo
+	*/
+	return (ssize_t)count;
+}
+
+static DEVICE_ATTR(control, 0644,
+	qdrv_attr_control_show, qdrv_attr_control_store);
+
+/* flag to trigger kernel panic on MuC halt isr */
+static int panic_on_muc_halt = 1;
+
+static void qdrv_panic_on_muc_halt_init(void)
+{
+	char buf[32] = {0};
+	int dev_mode;
+
+	if (bootcfg_get_var("dev_mode", buf)) {
+		if (sscanf(buf, "=%d", &dev_mode) == 1) {
+			if (dev_mode) {
+				panic_on_muc_halt = 0;
+			}
+		}
+	}
+}
+
+static void muc_death_work(struct work_struct *work)
+{
+	printk(KERN_ERR"Dumping register differences\n");
+	qdrv_control_dump_active_hwreg();
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+
+	arc_save_to_sram_safe_area(QDRV_CORE_DUMP_COMPRESS_RATIO);
+#endif
+	/* panic if appropriate - will eventually restart the system by calling machine_restart() */
+	if (panic_on_muc_halt) {
+		panic("MuC has halted; panic_on_muc_halt is %d", panic_on_muc_halt);
+	}
+}
+
+static DECLARE_DELAYED_WORK(muc_death_wq, &muc_death_work);
+
+#define QTN_MUC_DEAD_TIMER	(QTN_MPROC_TIMEOUT - HZ) /* Must be less than the mproc timeout */
+
+void qdrv_muc_died_sysfs(void)
+{
+	const unsigned long delay_jiff = QTN_MUC_DEAD_TIMER;
+
+	schedule_delayed_work(&muc_death_wq, delay_jiff);
+}
+
+static ssize_t show_panic_on_muc_halt(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", panic_on_muc_halt);
+}
+
+static ssize_t store_panic_on_muc_halt(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	if (count >= 1)
+		panic_on_muc_halt = (buf[0] == '1');
+	return count;
+}
+
+static DEVICE_ATTR(panic_on_muc_halt, 0644, show_panic_on_muc_halt, store_panic_on_muc_halt);
+
+/* service entry points and corresponding attributes for PHY stats entries in the sys fs */
+
+BIN_ATTR_ACCESS_DECL(qdrv_attr_rssi_phy_stats, filp, p_kobj, p_bin_attr, buf, offset, size)
+{
+	ssize_t	size_phy_stats = qdrv_muc_get_size_rssi_phy_stats();
+	struct qdrv_mac *mac =  qdrv_device_to_qdrv_mac((struct device *) p_bin_attr->private);
+	struct ieee80211com *ic = NULL;
+	struct qtn_stats *addr_rssi_phy_stats = NULL;
+
+	if (mac != NULL) {
+		struct qdrv_wlan *qw = mac->data;
+
+		if (qw != NULL) {
+			ic = &(qw->ic);
+		}
+	}
+
+	if (ic == NULL) {
+		return -1;
+	}
+
+	addr_rssi_phy_stats = qtn_muc_stats_get_addr_latest_stats(mac, ic, MUC_PHY_STATS_RSSI_RCPI_ONLY);
+	if (addr_rssi_phy_stats == NULL) {
+		return -1;
+	}
+
+	/*
+	 * avoid buffer overruns ...
+	 * i.e. for command "hexdump /sys/devices/qdrv/rssi_phy_stats",
+	 * size is only 16.
+	 */
+	if (size < size_phy_stats) {
+		size_phy_stats = size;
+	}
+
+	if (offset >= (loff_t) size_phy_stats) {
+		return 0;
+	}
+
+	memcpy(buf, addr_rssi_phy_stats, size_phy_stats);
+
+	return size_phy_stats;
+}
+
+static void qdrv_module_release(struct device *dev)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+}
+
+static struct device qdrv_device =
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
+	.bus_id		= QDRV_DEV_NAME,
+#endif
+	.release	= qdrv_module_release,
+};
+
+struct device *qdrv_soc_get_addr_dev(void)
+{
+	return &qdrv_device;
+}
+
+static struct bin_attribute qdrv_show_rssi_phy_stats =
+{
+	.attr	 = { .name = "rssi_phy_stats", .mode = 0444 },
+	.private = (void *) &qdrv_device,
+	.read	 = qdrv_attr_rssi_phy_stats,
+	.write	 = NULL,
+	.mmap	 = NULL,
+};
+
+static int qdrv_module_create_sysfs_phy_stats(void)
+{
+	if (device_create_bin_file(&qdrv_device, &qdrv_show_rssi_phy_stats) != 0) {
+		DBGPRINTF_E("Failed to create rssi_phy_stats sysfs file \"%s\"\n",
+			qdrv_show_rssi_phy_stats.attr.name);
+		goto rssi_phy_stats_fail;
+	}
+
+	return 0;
+
+rssi_phy_stats_fail:
+	return -1;
+}
+
+static int __init qdrv_module_init(void)
+{
+	void *data;
+	int size;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Get the size of our private data data structure */
+	size = qdrv_soc_cb_size();
+
+	/* Allocate (zero filled) private memory for our device context */
+	if ((data = kzalloc(size, GFP_KERNEL)) ==  NULL) {
+		DBGPRINTF_E("Failed to allocate %d bytes for private data\n", size);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-ENOMEM);
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+	dev_set_name(&qdrv_device, QDRV_DEV_NAME);
+#endif
+
+	/* Attach the private memory to the device */
+	dev_set_drvdata(&qdrv_device, data);
+
+	qdrv_panic_on_muc_halt_init();
+
+	if (device_register(&qdrv_device) != 0) {
+		DBGPRINTF_E("Failed to register \"%s\"\n", QDRV_DEV_NAME);
+		goto device_reg_fail;
+	}
+
+	if (device_create_file(&qdrv_device, &dev_attr_control) != 0) {
+		DBGPRINTF_E("Failed to create control sysfs file \"%s\"\n", QDRV_DEV_NAME);
+		goto control_sysfs_fail;
+	}
+
+	if (device_create_file(&qdrv_device, &dev_attr_panic_on_muc_halt) != 0)	{
+		DBGPRINTF_E("Failed to create panic_on_muc_halt sysfs file \"%s\"\n", QDRV_DEV_NAME);
+		goto muc_halt_sysfs_fail;
+	}
+
+	/* Initialize control interface */
+	if (qdrv_control_init(&qdrv_device) < 0) {
+		DBGPRINTF_E("Failed to initialize driver control interface\n");
+		goto control_init_fail;
+	}
+
+	if (qdrv_module_create_sysfs_phy_stats() < 0) {
+		/* no cleanup and no error reporting required if failure */
+		goto control_init_fail;
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+
+control_init_fail:
+	device_remove_file(&qdrv_device, &dev_attr_panic_on_muc_halt);
+muc_halt_sysfs_fail:
+	device_remove_file(&qdrv_device, &dev_attr_control);
+control_sysfs_fail:
+	device_unregister(&qdrv_device);
+device_reg_fail:
+	kfree(data);
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+	return -1;
+}
+
+static void __exit qdrv_module_exit(void)
+{
+	void *data;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Cleanup in reverse order */
+
+	/* Start with executing a stop command */
+	if (qdrv_control_input(&qdrv_device, "stop", 4) < 0) {
+		DBGPRINTF_E("Failed to execute stop command\n");
+	}
+
+	/* Exit the control interface */
+	if (qdrv_control_exit(&qdrv_device) < 0) {
+		DBGPRINTF_E("Failed to exit driver control interface\n");
+	}
+
+	/* Get the private device data */
+	data = dev_get_drvdata(&qdrv_device);
+
+	device_remove_bin_file(&qdrv_device, &qdrv_show_rssi_phy_stats);
+
+	device_remove_file(&qdrv_device, &dev_attr_panic_on_muc_halt);
+	device_remove_file(&qdrv_device, &dev_attr_control);
+
+	device_unregister(&qdrv_device);
+
+	/* Free the private memory */
+	kfree(data);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+}
+
+module_init(qdrv_module_init);
+module_exit(qdrv_module_exit);
diff --git a/drivers/qtn/qdrv/qdrv_mu.c b/drivers/qtn/qdrv/qdrv_mu.c
new file mode 100644
index 0000000..dd95d5d
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_mu.c
@@ -0,0 +1,298 @@
+/**
+ * Copyright (c) 2012-2013 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <linux/proc_fs.h>
+#endif
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/elf.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <asm/hardware.h>
+
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_muc.h"
+#include "qdrv_hal.h"
+#include "qdrv_wlan.h"
+
+#include <qtn/txbf_mbox.h>
+
+
+#define QDRV_MU_PROC_FILENAME	"qdrv_mu"
+
+static struct qdrv_cb *qdrv_mu_qcb = NULL;
+
+static const char* status_str[] = {"Enabled", "Disabled", "Freezed", "Not used"};
+
+enum mu_grp_status {
+	MU_GRP_STR_EN	= 0,
+	MU_GRP_STR_DIS	= 1,
+	MU_GRP_STR_FRZ	= 2,
+	MU_GRP_STR_NUS	= 3,
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int proc_qdrv_mu_stat_show(struct seq_file *sfile, void *v)
+{
+	struct qdrv_wlan *qw = qdrv_mu_qcb->macs[0].data;
+	struct ieee80211com *ic;
+	struct ieee80211vap *vap;
+	struct qtn_mu_grp_args mu_grp_tbl[IEEE80211_MU_GRP_NUM_MAX];
+	bool is_station = false;
+	int i;
+	int is_mu = 0;
+	int j;
+
+	if (qw == NULL)
+		return -EFAULT;
+
+	ic = &qw->ic;
+	if (ic == NULL)
+		return -EFAULT;
+
+	vap = TAILQ_FIRST(&qw->ic.ic_vaps);
+	if (vap->iv_opmode == IEEE80211_M_STA)
+		is_station = true;
+
+	ieee80211_get_mu_grp(ic, &mu_grp_tbl[0]);
+
+	for (i = IEEE80211_VHT_GRP_1ST_BIT_OFFSET; i <= IEEE80211_VHT_GRP_MAX_BIT_OFFSET; i++) {
+		if (mu_grp_tbl[i].grp_id == i) {
+			is_mu = 1;
+
+
+			if (!is_station) {
+				enum mu_grp_status idx = MU_GRP_STR_DIS;
+
+				if (DSP_PARAM_GET(debug_flag) & MU_QMAT_FREEZE) {
+					idx = MU_GRP_STR_FRZ;
+				} else if (mu_grp_tbl[i].qmat_installed == MU_QMAT_ENABLED) {
+					idx = MU_GRP_STR_EN;
+				} else if (mu_grp_tbl[i].qmat_installed == MU_QMAT_NOT_USED) {
+					idx = MU_GRP_STR_NUS;
+				}
+				seq_printf(sfile, "GRP ID: %d update cnt %d", i, mu_grp_tbl[i].upd_cnt);
+				seq_printf(sfile, " %s\n", status_str[idx]);
+				seq_printf(sfile, "Rank: %d\n", mu_grp_tbl[i].rank);
+
+				for (j = 0; j < ARRAY_SIZE(mu_grp_tbl[i].aid); j++) {
+					if ( mu_grp_tbl[i].aid[j] != 0) {
+						seq_printf(sfile, "AID%d: 0x%04x ", j, mu_grp_tbl[i].aid[j]);
+					}
+				}
+				seq_printf(sfile, "\n");
+
+				for (j = 0; j < ARRAY_SIZE(mu_grp_tbl[i].ncidx); j++) {
+					if ( mu_grp_tbl[i].ncidx[j] != 0) {
+						seq_printf(sfile, "IDX%d:   %4d ", j, mu_grp_tbl[i].ncidx[j]);
+					}
+				}
+				seq_printf(sfile, "\n");
+
+				if(mu_grp_tbl[i].qmat_installed == MU_QMAT_ENABLED ||
+					mu_grp_tbl[i].qmat_installed == MU_QMAT_FREEZED) {
+					seq_printf(sfile, "u0_1ss_u1_1ss: 0x%x\n", mu_grp_tbl[i].u0_1ss_u1_1ss);
+					seq_printf(sfile, "u0_2ss_u1_1ss: 0x%x\n", mu_grp_tbl[i].u0_2ss_u1_1ss);
+					seq_printf(sfile, "u0_3ss_u1_1ss: 0x%x\n", mu_grp_tbl[i].u0_3ss_u1_1ss);
+					seq_printf(sfile, "u0_1ss_u1_2ss: 0x%x\n", mu_grp_tbl[i].u0_1ss_u1_2ss);
+					seq_printf(sfile, "u0_1ss_u1_3ss: 0x%x\n", mu_grp_tbl[i].u0_1ss_u1_3ss);
+					seq_printf(sfile, "u0_2ss_u1_2ss: 0x%x\n", mu_grp_tbl[i].u0_2ss_u1_2ss);
+#ifdef PEARL_PLATFORM
+					seq_printf(sfile, "u0_1ss_u1_1ss_u2_1ss_u3_1ss: 0x%x\n", mu_grp_tbl[i].u0_1ss_u1_1ss_u2_1ss_u3_1ss);
+#endif
+				}
+			} else {
+				seq_printf(sfile, "AP GRP ID: %d update cnt %d\n", i, mu_grp_tbl[i].upd_cnt);
+				for (j = 0; j < ARRAY_SIZE(mu_grp_tbl[i].aid); j++) {
+					if ( mu_grp_tbl[i].aid[j] != 0) {
+						seq_printf(sfile, "User pos = %d with AID = 0x%04x\n", j, mu_grp_tbl[i].aid[j]);
+					}
+				}
+				for (j = 0; j < ARRAY_SIZE(mu_grp_tbl[i].ncidx); j++) {
+					if ( mu_grp_tbl[i].ncidx[j] != 0) {
+						seq_printf(sfile, "Local node index (Idx) = %d\n", mu_grp_tbl[i].ncidx[j]);
+					}
+				}
+			}
+		}
+	}
+
+	if (!is_mu) {
+		seq_printf(sfile, "No MU groups found\n");
+	}
+
+	return 0;
+}
+
+static int proc_qdrv_mu_stat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_qdrv_mu_stat_show, NULL);
+}
+
+static const struct file_operations proc_qdrv_mu_stat_ops = {
+	.owner		= THIS_MODULE,
+	.open		= proc_qdrv_mu_stat_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+#else
+static int qdrv_mu_stat_rd(char *page, char **start, off_t offset,
+		int count, int *eof, void *data)
+{
+	struct qdrv_wlan *qw = qdrv_mu_qcb->macs[0].data;
+	bool is_station = false;
+
+	if (qw == NULL) {
+		return -EFAULT;
+	}
+
+	struct ieee80211vap* vap = TAILQ_FIRST(&qw->ic.ic_vaps);
+	if (vap->iv_opmode == IEEE80211_M_STA) {
+		is_station = true;
+	}
+	struct ieee80211com* ic = &qw->ic;
+
+	if (ic == NULL) {
+		return -EFAULT;
+	}
+
+	char *p = page;
+	struct qtn_mu_grp_args mu_grp_tbl[IEEE80211_MU_GRP_NUM_MAX];
+
+	ieee80211_get_mu_grp(ic, &mu_grp_tbl[0]);
+
+	int i, is_mu = 0;
+	for (i = IEEE80211_VHT_GRP_1ST_BIT_OFFSET; i <= IEEE80211_VHT_GRP_MAX_BIT_OFFSET; i++) {
+		if (mu_grp_tbl[i].grp_id == i) {
+			is_mu = 1;
+			int j;
+
+			if (!is_station)
+			{
+				enum mu_grp_status idx = MU_GRP_STR_DIS;
+
+				if (DSP_PARAM_GET(debug_flag) & MU_QMAT_FREEZE) {
+					idx = MU_GRP_STR_FRZ;
+				} else if (mu_grp_tbl[i].qmat_installed == MU_QMAT_ENABLED) {
+					idx = MU_GRP_STR_EN;
+				} else if (mu_grp_tbl[i].qmat_installed == MU_QMAT_NOT_USED) {
+					idx = MU_GRP_STR_NUS;
+				}
+
+				p += sprintf(p, "GRP ID: %d update cnt %d", i, mu_grp_tbl[i].upd_cnt);
+				p += sprintf(p, " %s\n", status_str[idx]);
+				p += sprintf(p, "Rank: %d\n", mu_grp_tbl[i].rank);
+
+				for (j = 0; j < ARRAY_SIZE(mu_grp_tbl[i].aid); j++) {
+					if ( mu_grp_tbl[i].aid[j] != 0) {
+						p += sprintf(p, "AID%d: 0x%04x ", j, mu_grp_tbl[i].aid[j]);
+					}
+				}
+
+				p += sprintf(p, "\n");
+
+				for (j = 0; j < ARRAY_SIZE(mu_grp_tbl[i].ncidx); j++) {
+					if ( mu_grp_tbl[i].ncidx[j] != 0) {
+						p += sprintf(p, "IDX%d:   %4d ", j, mu_grp_tbl[i].ncidx[j]);
+					}
+				}
+
+				p += sprintf(p, "\n");
+
+				if(mu_grp_tbl[i].qmat_installed == MU_QMAT_ENABLED ||
+					mu_grp_tbl[i].qmat_installed == MU_QMAT_FREEZED) {
+					p += sprintf(p, "u0_1ss_u1_1ss: 0x%x\n", mu_grp_tbl[i].u0_1ss_u1_1ss);
+					p += sprintf(p, "u0_2ss_u1_1ss: 0x%x\n", mu_grp_tbl[i].u0_2ss_u1_1ss);
+					p += sprintf(p, "u0_3ss_u1_1ss: 0x%x\n", mu_grp_tbl[i].u0_3ss_u1_1ss);
+					p += sprintf(p, "u0_1ss_u1_2ss: 0x%x\n", mu_grp_tbl[i].u0_1ss_u1_2ss);
+					p += sprintf(p, "u0_1ss_u1_3ss: 0x%x\n", mu_grp_tbl[i].u0_1ss_u1_3ss);
+					p += sprintf(p, "u0_2ss_u1_2ss: 0x%x\n", mu_grp_tbl[i].u0_2ss_u1_2ss);
+				}
+			}
+			else
+			{
+				p += sprintf(p, "AP GRP ID: %d update cnt %d\n", i, mu_grp_tbl[i].upd_cnt);
+				for (j = 0; j < ARRAY_SIZE(mu_grp_tbl[i].aid); j++) {
+					if ( mu_grp_tbl[i].aid[j] != 0) {
+						p += sprintf(p, "User pos = %d with AID = 0x%04x\n", j, mu_grp_tbl[i].aid[j]);
+					}
+				}
+				for (j = 0; j < ARRAY_SIZE(mu_grp_tbl[i].ncidx); j++) {
+					if ( mu_grp_tbl[i].ncidx[j] != 0) {
+						p += sprintf(p, "Local node index (Idx) = %d\n", mu_grp_tbl[i].ncidx[j]);
+					}
+				}
+			}
+		}
+	}
+
+	if (!is_mu) {
+		p += sprintf(p, "No MU groups found\n");
+	}
+
+	return p - page;
+}
+#endif
+
+int qdrv_mu_stat_init(struct qdrv_cb *qcb)
+{
+	printk("qdrv_mu_stat_init\n");
+
+	if (qcb == NULL) {
+		printk("qdrv_mu_stat_init: NULL qcb\n");
+		return -EFAULT;
+	}
+
+	qdrv_mu_qcb = qcb;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if (!proc_create_data(QDRV_MU_PROC_FILENAME, 0, NULL,
+			      &proc_qdrv_mu_stat_ops, NULL)) {
+		return -EEXIST;
+	}
+#else
+	if (!create_proc_read_entry(QDRV_MU_PROC_FILENAME, 0,
+				NULL, qdrv_mu_stat_rd, NULL)) {
+		return -EEXIST;
+	}
+
+#endif
+
+	return 0;
+}
+
+int qdrv_mu_stat_exit(struct qdrv_cb *qcb)
+{
+	if (qdrv_mu_qcb != NULL) {
+		remove_proc_entry(QDRV_MU_PROC_FILENAME, 0);
+	}
+
+	return 0;
+}
+
diff --git a/drivers/qtn/qdrv/qdrv_mu.h b/drivers/qtn/qdrv/qdrv_mu.h
new file mode 100644
index 0000000..10813cd
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_mu.h
@@ -0,0 +1,27 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_MU_H
+#define _QDRV_MU_H
+
+int qdrv_mu_stat_init(struct qdrv_cb *qcb);
+int qdrv_mu_stat_exit(struct qdrv_cb *qcb);
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_muc.c b/drivers/qtn/qdrv/qdrv_muc.c
new file mode 100644
index 0000000..c6059f4
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_muc.c
@@ -0,0 +1,146 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/elf.h>
+#include <asm/io.h>
+#include <asm/hardware.h>
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_muc.h"
+#include "qdrv_hal.h"
+#include "qdrv_wlan.h"
+#include "qdrv_fw.h"
+#include <asm/board/board_config.h>
+#include <asm/board/gpio.h>
+
+static int
+adjust_muc_firmware_path( char *muc_firmware, size_t max_firmware_len )
+{
+	int      retval = 0;
+	enum {
+		SPI_shift_count = 7,
+		RFIC_prj_shift_count = 5, 
+           	RFIC_prj_mask = 0x07,
+		RFIC3_value = 0,
+		RFIC4_value = 1
+	};
+
+	const char      *default_suffix =".bin";
+	const char      *RFIC3_suffix =".bin";
+	const char      *RFIC4_suffix =".RFIC4.bin";
+
+#ifdef FIXME_NOW
+	u32		*RFIC_ver_addr = ioremap_nocache(RFIC_VERSION, 4);
+	u32		 RFIC_ver_val = *RFIC_ver_addr;
+	u32		 RFIC_prj_val = *RFIC_ver_addr;
+#else
+	u32 RFIC_prj_val = RFIC3_value;
+#endif
+	char		*tmpaddr = strstr( muc_firmware, default_suffix );
+	unsigned int	 cur_firmware_len  = 0;
+
+	if (tmpaddr != NULL)
+	  *tmpaddr = '\0';
+
+	cur_firmware_len = strnlen( muc_firmware, max_firmware_len );
+	if (cur_firmware_len + strlen( RFIC4_suffix ) >= max_firmware_len)
+	  return( -1 );
+
+#ifdef FIXME_NOW
+	RFIC_ver_val = RFIC_ver_val >> SPI_shift_count;
+	RFIC_prj_val = ((RFIC_ver_val >> RFIC_prj_shift_count) & RFIC_prj_mask);
+#endif
+
+	switch (RFIC_prj_val) {
+	case RFIC3_value:
+		strcat( muc_firmware, RFIC3_suffix );
+		break;
+#ifdef FIXME_NOW
+	case RFIC4_value:
+		strcat( muc_firmware, RFIC4_suffix );
+		break;
+
+	default:
+		retval = -1;
+		break;
+#endif
+	}
+
+	return( retval );
+}
+
+int qdrv_muc_init(struct qdrv_cb *qcb)
+{
+	u32 muc_start_addr = 0;
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if (get_bootcfg_scancnt() == 0) {
+#ifndef TOPAZ_AMBER_IP
+		gpio_config(RUBY_GPIO_LNA_TOGGLE, RUBY_GPIO_ALT_OUTPUT);
+#else
+		/*
+		 * In Amber GPIO pins are not shared. No need to set up alternate function.
+		 */
+#endif
+	}
+
+	if (adjust_muc_firmware_path( qcb->muc_firmware, sizeof( qcb->muc_firmware )) < 0) {
+		DBGPRINTF_E( "Adjusting firmware path failed\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return( -1 );
+	}
+
+	if (qdrv_fw_load_muc(qcb->dev, qcb->muc_firmware, &muc_start_addr) < 0) {
+		DBGPRINTF_E( "Failed to load firmware\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+#ifdef FIXME_NOW
+	hal_rf_enable();
+#endif
+
+	hal_enable_muc(muc_start_addr);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+int qdrv_muc_exit(struct qdrv_cb *qcb)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	hal_disable_muc();
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
diff --git a/drivers/qtn/qdrv/qdrv_muc.h b/drivers/qtn/qdrv/qdrv_muc.h
new file mode 100644
index 0000000..c770331
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_muc.h
@@ -0,0 +1,27 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_MUC_H
+#define _QDRV_MUC_H
+
+int qdrv_muc_init(struct qdrv_cb *qcb);
+int qdrv_muc_exit(struct qdrv_cb *qcb);
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_muc_stats.c b/drivers/qtn/qdrv/qdrv_muc_stats.c
new file mode 100644
index 0000000..da4443f
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_muc_stats.c
@@ -0,0 +1,1363 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_comm.h"
+#include "qdrv_wlan.h"
+#include "qdrv_vap.h"
+#include "qdrv_txbf.h"
+#include <qtn/qtn_math.h>
+#include "qdrv_muc_stats.h"
+#include <qtn/txbf_common.h>
+#include <qtn/registers.h>
+#include <qtn/muc_phy_stats.h>
+
+#define PHY_STATS_SUM_EVM  1
+#define TIME_AVERAGE_PHY_STATS   1
+#define PHY_STATS_PHY_DATE_RATE  1
+
+/* valid only when Y is a power of 2, redefine more generally */
+#define MOD(X,Y) ((X)&(Y-1))
+
+static struct qtn_stats_log host_log;
+static unsigned int last_tstamp = 0;
+#ifdef TIME_AVERAGE_PHY_STATS
+
+#define QDRV_MUC_STATS_RATE_TABLE_TO_MBPS	10
+
+#endif
+
+#ifdef PHY_STATS_PHY_DATE_RATE
+#define UNSUPPORTED_RATE	0
+#define MAX_BW_SIZE 2
+#define MAX_GI_SIZE 2
+#define MAX_MCS_SIZE 77 // index : 0~76
+
+enum muc_stats_opt {
+	MSO_DEF		= 0,
+	MSO_BOTH	= 1,
+	MSO_SU		= 2,
+	MSO_MU		= 3,
+};
+
+const uint16_t mcs_rate_table[MAX_BW_SIZE * MAX_GI_SIZE * MAX_MCS_SIZE] = {
+
+  //=========20Mhz with long GI from MCS 0 ~ 15===============
+   65,  130,  195,  260,  390,  520,  585,  650,
+  130,  260,  390,  520,  780, 1040, 1170, 1300,
+
+  //20Mhz with long GI from MCS 16 ~ 31
+  195,  390,  585,  780, 1170, 1560, 1755, 1950,
+  260,  520,  780, 1040, 1560, 2080, 2340, 2600,
+
+  //20Mhz with long GI from MCS 32 ~ 47
+    0,  390,  520,  650,  585,  780,  975,  520,
+  650,  650,  780,  910,  910, 1040,  780,  975,
+
+  //20Mhz with long GI from MCS 48 ~ 63
+  975,  1170,  1365,  1365,  1560,  650,  780,  910,
+  780,   910,  1040,  1170,  1040, 1170, 1300, 1300,
+
+  //20Mhz with long GI from MCS 64 ~ 76
+  1430,  975,  1170,  1365,  1170, 1365, 1560, 1755,  1560,  1755,  1950,  1950,  2145,
+
+
+  //==========20Mhz with short GI from MCS 0 ~ 15==============
+   72,  144,  217,   289,  433,   578,   650,   722,
+  144,  289,  433,   578,  867,  1156,  1300,  1444,
+
+  //20Mhz with short GI from MCS 16 ~ 31
+  217,  433,  650,   867,  1300,  1733,  1950,  2167,
+  289,  578,  867,  1156,  1733,  2311,  2600,  2889,
+
+  //20Mhz with short GI from MCS 32 ~ 47
+    0,  433,  578,   722,   650,   867,  1083,   578,
+  722,  722,  867,  1011,  1011,  1156,   867,  1083,
+
+  //20Mhz with short GI from MCS 48 ~ 63
+  1083,  1300,  1517,  1517,  1733,  722,  867,  1011,
+   867,  1011,  1156,  1300,  1156, 1300, 1444,  1444,
+
+  //20Mhz with short GI from MCS 64 ~ 76
+  1589,  1083,  1300,  1517,  1300, 1517, 1733, 1950,  1733,  1950,  2167,  2167,  2383,
+
+
+  //======40Mhz with long GI from MCS 0 ~ 15==================
+  135,  270,   405,   540,   810,  1080,  1215,  1350,
+  270,  540,   810,  1080,  1620,  2160,  2430,  2700,
+
+  //40Mhz with long GI from MCS 16 ~ 31
+  405,  810,  1215,  1620,  2430,  3240,  3645,  4050,
+  540, 1080,  1620,  2160,  3240,  4320,  4860,  5400,
+
+  //40Mhz with long GI from MCS 32 ~ 47
+    60,  810,  1080,  1350, 1215,  1620,  2025,  1080,
+  1350, 1350,  1620,  1890, 1890,  2160,  1620,  2025,
+
+  //40Mhz with long GI from MCS 48 ~ 63
+  2025,  2430,  2835,  2835,  3240,  1350,  1620,  1890,
+  1620,  1890,  2160,  2430,  2160,  2430,  2700,  2700,
+
+  //40Mhz with long GI from MCS 64 ~ 76
+  2970,  2025,  2430,  2835,  2430, 2835, 3240, 3645,  3240,  3645,  4050,  4050,  4455,
+
+
+  //========40Mhz with short GI from MCS 0 ~ 15================
+  150,  300,   450,   600,   900,  1200,  1350,  1500,
+  300,  600,   900,  1200,  1800,  2400,  2700,  3000,
+
+  //40Mhz with short GI from MCS 16 ~ 31
+  450,  900,  1350,  1800,  2700,  3600,  4050,  4500,
+  600, 1200,  1800,  2400,  3600,  4800,  5400,  6000,
+
+  //40Mhz with short GI from MCS 32 ~ 47
+    67,  900,  1200,  1500,  1350,  1800,  2250,  1200,
+  1500, 1500,  1800,  2100,  2100,  2400,  1800,  2250,
+
+  //40Mhz with short GI from MCS 48 ~ 63
+  2250,  2700,  3150,  3150,  3600,  1500,  1800,  2100,
+  1800,  2100,  2400,  2700,  2400,  2700,  3000,  3000,
+
+  //40Mhz with short GI from MCS 64 ~ 76
+  3300,  2250,  2700,  3150,  2700, 3150, 3600, 4050,  3600,  4050,  4500,  4500,  4950
+};
+
+#ifdef QDRV_FEATURE_VHT
+#define VHT_MAX_BW_SIZE	4
+#define VHT_MAX_GI_SIZE	2
+#define VHT_MAX_NSS_SIZE	4
+#define VHT_MAX_MCS_SIZE	10
+/* rate in uint 100kbps */
+const uint16_t vht_rate_table[VHT_MAX_BW_SIZE * VHT_MAX_GI_SIZE *
+                              VHT_MAX_NSS_SIZE * VHT_MAX_MCS_SIZE] = {
+	/* 20MHz, Long GI, Nss = 1, MCS 0 ~ 9 */
+	65, 130, 195, 260, 390, 520, 585, 650, 780, UNSUPPORTED_RATE,
+	/* 20MHz, Long GI, Nss = 2, MCS 0 ~ 9 */
+	130, 260, 390, 520, 780, 1040, 1170, 1300, 1560, UNSUPPORTED_RATE,
+	/* 20MHz, Long GI, Nss = 3, MCS 0 ~ 9 */
+	195, 390, 585, 780, 1170, 1560, 1755, 1950, 2340, 2600,
+	/* 20MHz, Long GI, Nss = 4, MCS 0 ~ 9 */
+	260, 520, 780, 1040, 1560, 2080, 2340, 2600, 3120, UNSUPPORTED_RATE,
+
+	/* 20MHz, Short GI, Nss = 1, MCS 0 ~ 9 */
+	72, 144, 217, 289, 433, 578, 650, 722, 867, UNSUPPORTED_RATE,
+	/* 20MHz, Short GI, Nss = 2, MCS 0 ~ 9 */
+	144, 289, 433, 578, 867, 1156, 1300, 1444, 1733, UNSUPPORTED_RATE,
+	/* 20MHz, Short GI, Nss = 3, MCS 0 ~ 9 */
+	217, 433, 650, 867, 1300, 1733, 1950, 2167, 2600, 2889,
+	/* 20MHz, Short GI, Nss = 4, MCS 0 ~ 9 */
+	289, 578, 867, 1156, 1733, 2311, 2600, 2889, 3467, UNSUPPORTED_RATE,
+
+	/* 40MHz, Long GI, Nss = 1, MCS 0 ~ 9 */
+	135, 270, 405, 540, 810, 1080, 1215, 1350, 1620, 1800,
+	/* 40MHz, Long GI, Nss = 2, MCS 0 ~ 9 */
+	270, 540, 810, 1080, 1620, 2160, 2430, 2700, 3240, 3600,
+	/* 40MHz, Long GI, Nss = 3, MCS 0 ~ 9 */
+	405, 810, 1215, 1620, 2430, 3240, 3645, 4050, 4860, 5400,
+	/* 40MHz, Long GI, Nss = 4, MCS 0 ~ 9 */
+	540, 1080, 1620, 2160, 3240, 4320, 4860, 5400, 6480, 7200,
+
+	/* 40MHz, Short GI, Nss = 1, MCS 0 ~ 9 */
+	150, 300, 450, 600, 900, 1200, 1350, 1500, 1800, 2000,
+	/* 40MHz, Short GI, Nss = 2, MCS 0 ~ 9 */
+	300, 600, 900, 1200, 1800, 2400, 2700, 3000, 3600, 4000,
+	/* 40MHz, Short GI, Nss = 3, MCS 0 ~ 9 */
+	450, 900, 1350, 1800, 2700, 3600, 4050, 4500, 5400, 6000,
+	/* 40MHz, Short GI, Nss = 4, MCS 0 ~ 9 */
+	600, 1200, 1800, 2400, 3600, 4800, 5400, 6000, 7200, 8000,
+
+	/* 80MHz, Long GI, Nss = 1, MCS 0 ~ 9 */
+	293, 585, 878, 1170, 1755, 2340, 2633, 2925, 3510, 3900,
+	/* 80MHz, Long GI, Nss = 2, MCS 0 ~ 9 */
+	585, 1170, 1755, 2340, 3510, 4680, 5265, 5850, 7020, 7800,
+	/* 80MHz, Long GI, Nss = 3, MCS 0 ~ 9 */
+	878, 1755, 2633, 3510, 5265, 7020, UNSUPPORTED_RATE, 8775, 10530, 11700,
+	/* 80MHz, Long GI, Nss = 4, MCS 0 ~ 9 */
+	1170, 2340, 3510, 4680, 7020, 9360, 10530, 11700, 14040, 15600,
+
+	/* 80MHz, Short GI, Nss = 1, MCS 0 ~ 9 */
+	325, 650, 975, 1300, 1950, 2600, 2925, 3250, 3900, 4333,
+	/* 80MHz, Short GI, Nss = 2, MCS 0 ~ 9 */
+	650, 1300, 1950, 2600, 3900, 5200, 5850, 6500, 7800, 8667,
+	/* 80MHz, Short GI, Nss = 3, MCS 0 ~ 9 */
+	975, 1950, 2925, 3900, 5850, 7800, UNSUPPORTED_RATE, 9750, 11700, 13000,
+	/* 80MHz, Short GI, Nss = 4, MCS 0 ~ 9 */
+	1300, 2600, 3900, 5200, 7800, 10400, 11700, 13000, 15600, 17333,
+
+	/* 160MHz, Long GI, Nss = 1, MCS 0 ~ 9 */
+	585, 1170, 1755, 2340, 3510, 4680, 5265, 5850, 7020, 7800,
+	/* 160MHz, Long GI, Nss = 2, MCS 0 ~ 9 */
+	1170, 2340, 3510, 4680, 7020, 9360, 10530, 11700, 14040, 15600,
+	/* 160MHz, Long GI, Nss = 3, MCS 0 ~ 9 */
+	1755, 3510, 5265, 7020, 10530, 14040, 15795, 17550, 21060, UNSUPPORTED_RATE,
+	/* 160MHz, Long GI, Nss = 4, MCS 0 ~ 9 */
+	2340, 4680, 7020, 9360, 14040, 18720, 21060, 23400, 28080, 31200,
+
+	/* 160MHz, Short GI, Nss = 1, MCS 0 ~ 9 */
+	650, 1300, 1950, 2600, 3900, 5200, 5850, 6500, 7800, 8667,
+	/* 160MHz, Short GI, Nss = 2, MCS 0 ~ 9 */
+	1300, 2600, 3900, 5200, 7800, 10400, 11700, 13000, 15600, 17333,
+	/* 160MHz, Short GI, Nss = 3, MCS 0 ~ 9 */
+	1950, 3900, 5850, 7800, 11700, 15600, 17550, 19500, 23400, UNSUPPORTED_RATE,
+	/* 160MHz, Short GI, Nss = 4, MCS 0 ~ 9 */
+	2600, 5200, 7800, 10400, 15600, 20800, 23400, 26000, 31200, 34667
+};
+#endif
+
+#endif
+
+
+#define  QDRV_MUC_STATS_DB_VALUE_LENGTH		8
+#define  QDRV_MUC_STATS_DAGC_SHIFT_UP_M		0x00380000
+#define  QDRV_MUC_STATS_DAGC_SHIFT_UP_S		19
+#define  QDRV_MUC_STATS_DAGC_SHIFT_DOWN_M	0x00070000
+#define  QDRV_MUC_STATS_DAGC_SHIFT_DOWN_S	16
+
+#define  QDRV_MUC_STATS_DAGC_SHIFT_FIELD_M	0x00ff0000
+#define  QDRV_MUC_STATS_DAGC_SHIFT_FIELD_S	16
+
+#define  MIN_RCPI_VALUE				-1000
+
+/* Tag to filter(grep) statistics in csv format */
+#define STAT_CSV_TAG	"*CSV_STAT*"
+
+static uint32_t qdrv_muc_stats_rate_lut(uint8_t bw, uint8_t sgi, uint8_t mcs)
+{
+	int index;
+	uint32_t rate = 0;
+
+	if (bw < MAX_BW_SIZE && sgi < MAX_GI_SIZE && mcs < MAX_MCS_SIZE) {
+		index = mcs + (sgi * MAX_MCS_SIZE) + (bw * MAX_GI_SIZE * MAX_MCS_SIZE);
+		rate = (uint32_t)mcs_rate_table[index];
+	}
+
+	return rate;
+}
+
+static uint32_t qdrv_muc_stats_vhtrate_lut(uint8_t bw, uint8_t sgi, uint8_t mcs, uint8_t nss)
+{
+#ifdef QDRV_FEATURE_VHT
+	int index;
+	uint32_t rate = UNSUPPORTED_RATE;
+
+	if (bw < VHT_MAX_BW_SIZE && sgi < VHT_MAX_GI_SIZE && nss < VHT_MAX_NSS_SIZE
+			&& mcs < VHT_MAX_MCS_SIZE) {
+		index = mcs + (nss * VHT_MAX_MCS_SIZE) + (sgi * VHT_MAX_NSS_SIZE * VHT_MAX_MCS_SIZE) +
+				(bw * VHT_MAX_GI_SIZE * VHT_MAX_NSS_SIZE * VHT_MAX_MCS_SIZE);
+		rate = (uint32_t)vht_rate_table[index];
+	}
+
+	return rate;
+#else
+	return UNSUPPORTED_RATE;
+#endif
+}
+
+/* The returned rate has unit kbps */
+uint32_t qdrv_muc_stats_mcs_to_phyrate(uint8_t bw, uint8_t sgi, uint8_t mcs,
+			uint8_t nss, uint8_t vht)
+{
+	if (vht)
+		return qdrv_muc_stats_vhtrate_lut(bw, sgi, mcs, nss) * 100;
+	else
+		return qdrv_muc_stats_rate_lut(bw, sgi, mcs) * 100;
+}
+
+enum qdrv_muc_stats_display_choice qdrv_muc_stats_get_display_choice(const struct qtn_stats *stats,
+		const struct ieee80211com *ic)
+{
+	enum qdrv_muc_stats_display_choice display_choice = QDRV_MUC_STATS_SHOW_RSSI;
+
+	if (ic->ic_mode_get_phy_stats == MUC_PHY_STATS_RSSI_RCPI_ONLY) {
+		display_choice = (stats->tstamp & 0x01) ? QDRV_MUC_STATS_SHOW_RSSI : QDRV_MUC_STATS_SHOW_RCPI;
+	} else {
+		if (qtn_select_rssi_over_error_sums(stats->tstamp, ic->ic_mode_get_phy_stats)) {
+			display_choice = QDRV_MUC_STATS_SHOW_RSSI;
+		} else {
+			display_choice = QDRV_MUC_STATS_SHOW_EVM;
+		}
+	}
+
+	return(display_choice);
+}
+
+static int qtn_muc_stats_get_dagc_shift_count(u_int32_t rx_gain_fields)
+{
+	int dagc_shift_up =
+			(rx_gain_fields & QDRV_MUC_STATS_DAGC_SHIFT_UP_M) >> QDRV_MUC_STATS_DAGC_SHIFT_UP_S;
+	int dagc_shift_down =
+			(rx_gain_fields & QDRV_MUC_STATS_DAGC_SHIFT_DOWN_M) >> QDRV_MUC_STATS_DAGC_SHIFT_DOWN_S;
+
+	return(dagc_shift_up - dagc_shift_down);
+}
+
+/*
+ * Display RSSI/EVM stats of the node with the most packets
+ */
+static struct ieee80211_node *qtn_muc_stats_get_node(struct ieee80211com *ic, int mu)
+{
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211_node *ni;
+	struct ieee80211_node *found = NULL;
+	unsigned long max_pkts = 0;
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		unsigned long pkts = ni->ni_shared_stats->rx[mu].pkts + ni->ni_shared_stats->tx[mu].pkts;
+		if (pkts > max_pkts || found == NULL) {
+			found = ni;
+			max_pkts = pkts;
+		}
+	}
+
+	if (found)
+		ieee80211_ref_node(found);
+
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+
+	return found;
+}
+
+static void qtn_muc_stats_fmt_hw_noise(char *buf, const struct qtn_rx_stats *rxstats)
+{
+	int noise_dbm;
+	int int_val;
+	int fract_val;
+
+	if (rxstats->hw_noise != MIN_RCPI_VALUE) {
+		noise_dbm = rxstats->hw_noise;
+		int_val =  noise_dbm / 10;
+		fract_val = ABS(noise_dbm) % 10;
+
+		sprintf(buf, "%4d.%d", int_val, fract_val);
+	} else {
+		strcpy(buf, "  -inf");
+	}
+}
+
+static const char g_header_line[]       = KERN_DEBUG "Tstamp"" SU/MU"" RxMPDU"" AMSDU"" RxG""   CRC""  Noise"" TxFrame"" Defers"" Touts"" Retries"" ShPmbl""  LgPmbl"" Scale" " " " MCS-(TX/RX)""  RSSI / RCPI / EVM\n";
+static const char g_stat_comm_fmt[]     = KERN_DEBUG    "%6d"   "%6s"   "%7d"   "%6d" "%4d"   "%6d"    "%7s"   "%8d"    "%7d"   "%6d"     "%8d"    "%7d"     "%8d"   "%6d"    " ";
+static const char g_stat_comm_csv_fmt[] =               "%d,"   "%s,"   "%d,"   "%d," "%d,"   "%d,"    "%s,"   "%d,"    "%d,"   "%d,"     "%d,"    "%d,"     "%d,"   "%d,";
+
+static void qtn_muc_stats_display_common(const struct qtn_stats *stats,
+		int mu, int csv_format)
+{
+	const struct qtn_rx_stats *rxstats[] = {&stats->rx_phy_stats, &stats->mu_rx_phy_stats};
+	const struct qtn_tx_stats *txstats[] = {&stats->tx_phy_stats, &stats->mu_tx_phy_stats};
+
+	char noise_strs[QDRV_MUC_STATS_DB_VALUE_LENGTH];
+
+	qtn_muc_stats_fmt_hw_noise(noise_strs, rxstats[mu]);
+
+	printk(csv_format ? g_stat_comm_csv_fmt : g_stat_comm_fmt,
+		stats->tstamp,
+		(mu) ? "MU" : "SU",
+		rxstats[mu]->num_pkts,
+		rxstats[mu]->num_amsdu,
+		rxstats[mu]->avg_rxgain,
+		rxstats[mu]->cnt_mac_crc,
+		noise_strs,
+		txstats[mu]->num_pkts,
+		txstats[mu]->num_defers,
+		txstats[mu]->num_timeouts,
+		txstats[mu]->num_retries,
+		rxstats[mu]->cnt_sp_fail,
+		rxstats[mu]->cnt_lp_fail,
+		txstats[mu]->last_tx_scale);
+}
+
+static void qtn_muc_stats_display_rssi_only(struct ieee80211_node *ni, int mu, int csv_format)
+{
+	int iter;
+	char db_strs[NUM_ANT + 1][QDRV_MUC_STATS_DB_VALUE_LENGTH];
+	const struct qtn_node_shared_stats_rx *node_rxstats = &ni->ni_shared_stats->rx[mu];
+	const struct qtn_node_shared_stats_tx *node_txstats= &ni->ni_shared_stats->tx[mu];
+
+	/* RSSIs in dBM (units are actually 0.1 dBM) ... */
+	for (iter = 0; iter <= NUM_ANT; iter++) {
+		int rssi_dbm = node_rxstats->last_rssi_dbm[iter];
+		if (rssi_dbm) {
+			int int_val = rssi_dbm / 10;
+			int fract_val = ABS(rssi_dbm) % 10;
+			snprintf(&db_strs[iter][0],
+					sizeof( db_strs[iter] ),
+					"%d.%d",
+					int_val,
+					fract_val);
+		} else {
+			strcpy(&db_strs[iter][0], "-inf");
+		}
+	}
+
+#ifdef PHY_STATS_PHY_DATE_RATE
+#define STAT_RSSI_FMT "%4dM %4dM     %4s %4s %4s %4s dBm %4s avg RSSI\n"
+#define STAT_RSSI_CSV_FMT "%d,%d,%s,%s,%s,%s,%s,"
+	printk(csv_format ? STAT_RSSI_CSV_FMT : STAT_RSSI_FMT,
+		MS(node_txstats->last_mcs, QTN_PHY_STATS_MCS_PHYRATE),
+		MS(node_rxstats->last_mcs, QTN_PHY_STATS_MCS_PHYRATE),
+		db_strs[0],
+		db_strs[1],
+		db_strs[2],
+		db_strs[3],
+		db_strs[4]);
+#undef STAT_RSSI_FMT
+#undef STAT_RSSI_CSV_FMT
+#else
+	unsigned int tx_mcs;
+	unsigned int rx_mcs;
+	rx_mcs = (node_rxstats->last_mcs & QTN_STATS_MCS_RATE_MASK) +
+		MS(node_rxstats->last_mcs, QTN_PHY_STATS_MCS_NSS) * 100;
+	tx_mcs = (node_txstats->last_mcs & QTN_STATS_MCS_RATE_MASK) +
+		MS(node_txstats->last_mcs, QTN_PHY_STATS_MCS_NSS) * 100;
+
+	printk("%5d %5d     %4s %4s %4s %4s dBm %4s avg RSSI\n",
+		tx_mcs,
+		rx_mcs,
+		db_strs[0],
+		db_strs[1],
+		db_strs[2],
+		db_strs[3],
+		db_strs[4]);
+
+#endif
+}
+
+static void qtn_muc_stats_display_rcpi_only(struct ieee80211_node *ni, int mu)
+{
+	int i;
+	char db_strs[NUM_ANT + 1][QDRV_MUC_STATS_DB_VALUE_LENGTH];
+
+	const struct qtn_node_shared_stats_rx *node_rxstats = &ni->ni_shared_stats->rx[mu];
+	const struct qtn_node_shared_stats_tx *node_txstats= &ni->ni_shared_stats->tx[mu];
+	unsigned int tx_mcs;
+	unsigned int rx_mcs;
+
+	/* RSSIs in dBM (units are actually 0.1 dBM) ... */
+	for (i = 0; i <= NUM_ANT; i++) {
+		int rcpi_dbm = node_rxstats->last_rcpi_dbm[i];
+		if (rcpi_dbm) {
+			int int_val = rcpi_dbm / 10;
+			int fract_val = ABS(rcpi_dbm) % 10;
+
+			snprintf(db_strs[i],
+					sizeof(db_strs[i]),
+					"%d.%d",
+					int_val,
+					fract_val);
+		} else {
+			strcpy(db_strs[i], "-inf");
+		}
+	}
+
+	rx_mcs = (node_rxstats->last_mcs & QTN_STATS_MCS_RATE_MASK) +
+		MS(node_rxstats->last_mcs, QTN_PHY_STATS_MCS_NSS) * 100;
+	tx_mcs = (node_txstats->last_mcs & QTN_STATS_MCS_RATE_MASK) +
+		MS(node_txstats->last_mcs, QTN_PHY_STATS_MCS_NSS) * 100;
+
+	printk("%5d %5d     %4s %4s %4s %4s dBm %4s max RCPI\n",
+		tx_mcs,
+		rx_mcs,
+		db_strs[0],
+		db_strs[1],
+		db_strs[2],
+		db_strs[3],
+		db_strs[4]);
+}
+
+static void qtn_muc_stats_display_evm_only(struct ieee80211_node *ni, int mu, int csv_format)
+{
+	char evm_strs[NUM_ANT+1][QDRV_MUC_STATS_DB_VALUE_LENGTH];
+	int i;
+	int evm_int;
+	int evm_fract;
+	unsigned int tx_mcs;
+	unsigned int rx_mcs;
+
+	const struct qtn_node_shared_stats_rx *node_rxstats = &ni->ni_shared_stats->rx[mu];
+	const struct qtn_node_shared_stats_tx *node_txstats = &ni->ni_shared_stats->tx[mu];
+
+	for (i = 0; i <= NUM_ANT; i++) {
+		int v;
+
+		v = node_rxstats->last_evm_dbm[i];
+
+		snprintf(evm_strs[i],
+				sizeof(evm_strs[i]),
+				"%d.%d",
+				v / 10,
+				ABS(v) % 10);
+	}
+
+	evm_int = node_rxstats->last_evm_dbm[NUM_ANT] / 10;
+	evm_fract = ABS(node_rxstats->last_evm_dbm[NUM_ANT]) % 10;
+
+#ifdef  PHY_STATS_SUM_EVM // doing the dB summation
+	int evm_sum=0;
+
+	for (i = 0; i < NUM_ANT; i++) {
+		evm_sum += node_rxstats->last_evm_dbm[i];
+	}
+
+	evm_int = (int) evm_sum / 10;
+        evm_fract = (int) (ABS(evm_sum) % 10);
+#endif
+
+	snprintf(&evm_strs[NUM_ANT][0],
+		  sizeof(evm_strs[NUM_ANT]),
+		  "%d.%d",
+		  evm_int,
+		  evm_fract);
+
+	rx_mcs = (node_rxstats->last_mcs & QTN_STATS_MCS_RATE_MASK) +
+		MS(node_rxstats->last_mcs, QTN_PHY_STATS_MCS_NSS) * 100;
+	tx_mcs = (node_txstats->last_mcs & QTN_STATS_MCS_RATE_MASK) +
+		MS(node_txstats->last_mcs, QTN_PHY_STATS_MCS_NSS) * 100;
+
+#define STAT_EVM_FMT "%5d %5d %3d %4s %4s %4s %4s dB  %4s avg EVM\n"
+#define STAT_EVM_CSV_FMT "%d,%d,%d,%s,%s,%s,%s,%s,"
+	printk(csv_format ? STAT_EVM_CSV_FMT : STAT_EVM_FMT,
+		tx_mcs,
+		rx_mcs,
+		node_rxstats->last_rxsym,
+		&evm_strs[0][0],
+		&evm_strs[1][0],
+		&evm_strs[2][0],
+		&evm_strs[3][0],
+		&evm_strs[4][0]);
+#undef STAT_EVM_FMT
+#undef STAT_EVM_CSV_FMT
+}
+
+static void display_log_node_info(struct ieee80211_node *ni,
+		enum qdrv_muc_stats_display_choice display_choice, int mu, int csv_format)
+{
+	switch (display_choice) {
+	case QDRV_MUC_STATS_SHOW_RCPI:
+		qtn_muc_stats_display_rcpi_only(ni, mu);
+		break;
+	case QDRV_MUC_STATS_SHOW_EVM:
+		qtn_muc_stats_display_evm_only(ni, mu, csv_format);
+		break;
+	case QDRV_MUC_STATS_SHOW_RSSI:
+	default:
+		qtn_muc_stats_display_rssi_only(ni, mu, csv_format);
+		break;
+	}
+}
+
+static void display_log_info(const struct qtn_stats *stats, struct ieee80211com *ic, enum muc_stats_opt opt,
+				int show_all_nodes, int csv_format)
+{
+	struct ieee80211_node *main_ni;
+	enum qdrv_muc_stats_display_choice display_choice;
+
+	static int start[] = {STATS_MIN, STATS_MIN, STATS_SU, STATS_MU };
+	static int stop[] =  {STATS_MAX, STATS_MAX, STATS_MU, STATS_MAX};
+	int mu;
+	int ret = 1;
+
+	display_choice = qdrv_muc_stats_get_display_choice(stats, ic);
+
+	for (mu = start[opt]; mu < stop[opt]; mu++) {
+		main_ni = qtn_muc_stats_get_node(ic, mu);
+
+		if (unlikely(main_ni == NULL)) {
+			continue;
+		}
+		ret = 0;
+		qtn_muc_stats_display_common(stats, mu, csv_format);
+		if (csv_format) {
+			display_log_node_info(main_ni, QDRV_MUC_STATS_SHOW_RSSI, mu, csv_format);
+			display_log_node_info(main_ni, QDRV_MUC_STATS_SHOW_EVM, mu, csv_format);
+		} else {
+			display_log_node_info(main_ni, display_choice, mu, csv_format);
+		}
+		ieee80211_free_node(main_ni);
+	}
+
+	if (ret) goto exit;
+
+	if (show_all_nodes) {
+		struct ieee80211_node_table *nt = &ic->ic_sta;
+		struct ieee80211_node *ni;
+
+		IEEE80211_NODE_LOCK_IRQ(nt);
+		TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+			for (mu = start[opt]; mu < stop[opt]; mu++) {
+#define STAT_ALL_FMT "\t\t%s\tNode " DBGMACVAR "\trx_mpdu %u tx_frame %u\t"
+#define STAT_ALL_CSV_FMT "%s," DBGMACVAR ",%u,%u,"
+				printk(csv_format ? STAT_ALL_CSV_FMT : KERN_DEBUG STAT_ALL_FMT,
+					(mu == STATS_SU) ? "SU" : "MU",
+					DBGMACFMT(ni->ni_macaddr),
+					ni->ni_shared_stats->rx[mu].pkts,
+					ni->ni_shared_stats->tx[mu].pkts);
+				if (csv_format) {
+					display_log_node_info(ni, QDRV_MUC_STATS_SHOW_RSSI, mu, csv_format);
+					display_log_node_info(ni, QDRV_MUC_STATS_SHOW_EVM, mu, csv_format);
+				} else {
+					display_log_node_info(ni, display_choice, mu, csv_format);
+				}
+#undef STAT_ALL_FMT
+#undef STAT_ALL_CSV_FMT
+			}
+		}
+		IEEE80211_NODE_UNLOCK_IRQ(nt);
+	}
+exit:
+	return;
+}
+
+#define COMMON_CSV_HEADER(x) "Tstamp_"#x","#x",RxPkt_"#x",AMSDU_"#x",RxG_"#x",CRC_"#x",Noise_"#x",TxPkt_"#x",Defers_"#x",Touts_"#x",Retries_"#x",ShPmbl_"#x",LgPmbl_"#x",Scale_"#x","
+
+#define RSSI_AND_EVM_CSV_HEADER(x) "PHY_RATE_TX_"#x"(M),PHY_RATE_RX_"#x"(M),"\
+	"RSSI0_"#x"(dBm),RSSI1_"#x"(dBm),RSSI2_"#x"(dBm),RSSI3_"#x"(dBm),RSSV_AVR_"#x"(dBm),"\
+	"MCS-TX_"#x",MCS-RX_"#x",RxSym_"#x","\
+	"EVM0_"#x"(dB),EVM1_"#x"(dB),EVM2_"#x"(dB),EVM3_"#x"(dB),EVM_SUM_"#x"(dB),"
+
+#define FULL_CSV_HEADER(x) COMMON_CSV_HEADER(x)RSSI_AND_EVM_CSV_HEADER(x)
+
+static void display_log_hdr(struct ieee80211com *ic, enum muc_stats_opt opt,
+			int show_all_nodes, int csv_format)
+{
+	if (!csv_format) {
+		printk(g_header_line);
+	} else {
+		/* New line */
+		printk(KERN_DEBUG "");
+		/* Comma to separate linux time stamp */
+		printk(","STAT_CSV_TAG",");
+
+		if (opt == MSO_BOTH || opt == MSO_SU) {
+			printk(FULL_CSV_HEADER(SU));
+		}
+		if (opt == MSO_BOTH || opt == MSO_MU) {
+			printk(FULL_CSV_HEADER(MU));
+		}
+		if (show_all_nodes) {
+			struct ieee80211_node_table *nt = &ic->ic_sta;
+			struct ieee80211_node *ni;
+			int i = 0;
+
+			IEEE80211_NODE_LOCK_IRQ(nt);
+			TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+				if (opt == MSO_BOTH || opt == MSO_SU) {
+					printk("SU%u,MAC_SU%u,rxpkts_SU%u,txpkts_SU%u,", i, i, i, i);
+					printk("PHY_RATE_TX_SU%u(M),PHY_RATE_RX_SU%u(M),RSSI0_SU%u(dBm),RSSI1_SU%u(dBm),RSSI2_SU%u(dBm),RSSI3_SU%u(dBm),"
+						"RSSV_AVR_SU%u(dBm),MCS-TX_SU%u,MCS-RX_SU%u,RxSym_SU%u,EVM0_SU%u(dB),EVM1_SU%u(dB),EVM2_SU%u(dB),"
+						"EVM3_SU%u(dB),EVM_SUM_SU%u(dB),", i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
+				}
+				if (opt == MSO_BOTH || opt == MSO_MU) {
+					printk("MU%u,MAC_MU%u,rxpkts_MU%u,txpkts_MU%u,", i, i, i, i);
+					printk("PHY_RATE_TX_MU%u(M),PHY_RATE_RX_MU%u(M),RSSI0_MU%u(dBm),RSSI1_MU%u(dBm),RSSI2_MU%u(dBm),RSSI3_MU%u(dBm),"
+						"RSSV_AVR_MU%u(dBm),MCS-TX_MU%u,MCS-RX_MU%u,RxSym_MU%u,EVM0_MU%u(dB),EVM1_MU%u(dB),EVM2_MU%u(dB),"
+						"EVM3_MU%u(dB),EVM_SUM_MU%u(dB),", i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
+				}
+			i++;
+			}
+			IEEE80211_NODE_UNLOCK_IRQ(nt);
+		}
+	}
+}
+
+/*
+ * Possible values for required_phy_stat_mode are defined in include/qtn/muc_phy_stats.h
+ * They define the possible phy stats mode in the ieee80211com.  As the parameter
+ * required_phy_stat_mode the meaning of the values is:
+ *
+ *      MUC_PHY_STATS_ALTERNATE		- any block will do, so return the latest
+ *      MUC_PHY_STATS_RSSI_RCPI_ONLY	- block must have RSSIs and RCPI, not errored sums
+ *      MUC_PHY_STATS_ERROR_SUM_ONLY	- block must have errored sums, not RSSIs or RCPIs.
+ */
+static int qtn_muc_stats_does_stat_have_required(struct qtn_stats *curr_log_ptr,
+				   int cur_phy_stats_mode,
+				   int required_phy_stat_mode)
+{
+	int retval = 0;
+
+	if (required_phy_stat_mode == MUC_PHY_STATS_ALTERNATE) {
+		retval = 1;
+	} else {
+		int block_has_rssi = qtn_select_rssi_over_error_sums(curr_log_ptr->tstamp,
+								     cur_phy_stats_mode);
+
+		if (block_has_rssi != 0) {
+			retval = (required_phy_stat_mode == MUC_PHY_STATS_RSSI_RCPI_ONLY);
+		} else {
+			retval = (required_phy_stat_mode == MUC_PHY_STATS_ERROR_SUM_ONLY);
+		}
+	}
+
+	return(retval);
+}
+
+/*
+ * If required_phy_stat_mode is MUC_PHY_STATS_RSSI_RCPI_ONLY or MUC_PHY_STATS_ERROR_SUM_ONLY,
+ * none of the blocks may have what is required (based on the current phy stats mode in the
+ * ieee80211com).  If so, return NULL.
+ */
+struct qtn_stats *qtn_muc_stats_get_addr_latest_stats(struct qdrv_mac *mac,
+						  const struct ieee80211com *ic,
+						  int required_phy_stat_mode)
+{
+	struct qtn_stats_log *log = NULL;
+	struct qtn_stats *curr_log_ptr = NULL;
+	struct qtn_stats *retaddr = NULL;
+	int cr_indx;
+	unsigned int latest_tstamp = 0;
+	int cur_phy_stats_mode;
+
+	if (mac == NULL || ic == NULL) {
+		return NULL;
+	}
+
+	cur_phy_stats_mode = ic->ic_mode_get_phy_stats;
+
+	log = (struct qtn_stats_log *)mac->mac_sys_stats;
+	if (log == NULL) {
+		return(NULL);
+	} else {
+		int	required_phy_stats_available = 1;
+
+		if (required_phy_stat_mode == MUC_PHY_STATS_ERROR_SUM_ONLY &&
+		    cur_phy_stats_mode == MUC_PHY_STATS_RSSI_RCPI_ONLY) {
+			required_phy_stats_available = 0;
+		} else if (required_phy_stat_mode == MUC_PHY_STATS_RSSI_RCPI_ONLY &&
+			   cur_phy_stats_mode == MUC_PHY_STATS_ERROR_SUM_ONLY) {
+			required_phy_stats_available = 0;
+		}
+
+		if (required_phy_stats_available == 0) {
+			return(NULL);
+		}
+	}
+
+	cr_indx = MOD(log->curr_buff + 2, NUM_LOG_BUFFS);
+	curr_log_ptr = &log->stat_buffs[cr_indx];
+
+	while (cr_indx != log->curr_buff) {
+		if (curr_log_ptr->tstamp > latest_tstamp &&
+		    qtn_muc_stats_does_stat_have_required(curr_log_ptr,
+							  cur_phy_stats_mode,
+							  required_phy_stat_mode)) {
+			latest_tstamp = curr_log_ptr->tstamp;
+			retaddr = curr_log_ptr;
+		}
+
+		cr_indx = MOD(cr_indx + 1, NUM_LOG_BUFFS);
+		curr_log_ptr = &log->stat_buffs[cr_indx];
+	}
+
+	return(retaddr);
+}
+
+/*
+ * Parse log is subtly different from get address latest stats.
+ *
+ *     Parse Log reports ALL stats that have not been previously reported;
+ *     thus it uses a file-scope variable "last_tstamp" to track which
+ *     stats have not been reported.  If called repeatedly between MUC
+ *     updates it returns without reporting anything.
+ *
+ *     Get Address Latest Stats returns the address of the stats with the
+ *     latest time stamp.  It can thus be repeatedly called between updates
+ *     from the MUC, with each call in this situation returning the address
+ *     of the same qtn_stats.
+ */
+
+static void parse_log(struct qtn_stats_log *log, struct ieee80211com *ic, enum muc_stats_opt opt,
+			int show_all_nodes, int csv_format)
+{
+	struct qtn_stats *curr_log_ptr;
+	int cr_indx;
+
+	cr_indx = MOD(log->curr_buff + 2, NUM_LOG_BUFFS);
+
+	while(cr_indx != log->curr_buff) {
+		curr_log_ptr = &log->stat_buffs[cr_indx];
+		cr_indx = MOD(cr_indx+1, NUM_LOG_BUFFS);
+
+		if(curr_log_ptr->tstamp <= last_tstamp) continue;
+
+		if (csv_format) printk("\n,"STAT_CSV_TAG",");
+
+		last_tstamp = curr_log_ptr->tstamp;
+		display_log_info(curr_log_ptr, ic, opt, show_all_nodes, csv_format);
+	}
+}
+
+#if 0
+static void dump_log(struct qtn_stats_log *log)
+{
+
+	int cr_indx = 0;
+
+	printk("Current Log indx %d\n", log->curr_buff);
+
+	for(cr_indx=0;cr_indx < NUM_LOG_BUFFS; cr_indx++){
+		printk("Indx %d, Tstamp %d\n",cr_indx,log->stat_buffs[cr_indx].tstamp);
+	}
+}
+#endif
+
+static void copy_log_to_local(u32 *muc_log_addr,struct qtn_stats_log *log)
+{
+
+	memcpy(log,muc_log_addr,sizeof(struct qtn_stats_log));
+}
+
+int qdrv_muc_stats_printlog(const struct qdrv_cb *data,
+			    struct qdrv_mac *mac,
+			    struct ieee80211com *ic,
+			    int argc,
+			    char **argv)
+{
+	static int cnt = 0;
+	enum muc_stats_opt opt = MSO_BOTH;
+	int show_all_nodes = 0;
+	int csv_format = 0;
+	int title_only = 0;
+	int i;
+
+	if (mac->mac_sys_stats == NULL) {
+		printk(KERN_DEBUG "No MuC stats available\n");
+		return 0;
+	}
+
+	for (i = 0; i < argc; i++) {
+		if (strcmp(argv[i], "both") == 0) {
+			opt = MSO_BOTH;
+		} else if (strcmp(argv[i], "su") == 0) {
+			opt = MSO_SU;
+		} else if (strcmp(argv[i], "mu") == 0) {
+			if (!ic->ic_mu_enable) {
+				printk("MU not enabled\n");
+				return 0;
+			}
+			opt = MSO_MU;
+		} else if (strcmp(argv[i], "all") == 0) {
+			show_all_nodes = 1;
+		} else if (strcmp(argv[i], "csv") == 0) {
+			csv_format = 1;
+		} else if (strcmp(argv[i], "title") == 0) {
+			title_only = 1;
+		}
+	}
+
+	if (opt == MSO_BOTH && !ic->ic_mu_enable) {
+		opt = MSO_SU;
+	}
+
+	if((!csv_format && !(cnt++ & 0x3)) || (csv_format && title_only)) {
+		/* Display header periodically */
+		display_log_hdr(ic, opt, show_all_nodes, csv_format);
+	}
+
+	if (title_only) goto exit;
+
+	copy_log_to_local((u32*)mac->mac_sys_stats, &host_log);
+	parse_log(&host_log, ic, opt, show_all_nodes, csv_format);
+
+exit:
+	return(0);
+}
+
+/*
+ * These functions need to be prepared for qtn_muc_stats_get_addr_latest_stats returning NULL.
+ *
+ * Currently they all return -1 to signal Failed to Get Requested Value.
+ */
+int qdrv_muc_get_noise(struct qdrv_mac *mac, const struct ieee80211com *ic)
+{
+	int retval = -1;
+	struct qtn_stats *address_current_log = qtn_muc_stats_get_addr_latest_stats(
+							mac, ic, MUC_PHY_STATS_ALTERNATE);
+	static uint32_t prev_hw_noise = 0;
+
+	if (address_current_log != NULL) {
+		if (address_current_log->rx_phy_stats.hw_noise > 0)
+			prev_hw_noise = address_current_log->rx_phy_stats.hw_noise;
+		retval = prev_hw_noise;
+	}
+
+	return(retval);
+}
+
+int qdrv_muc_get_rssi_by_chain(struct qdrv_mac *mac, const struct ieee80211com *ic, unsigned int rf_chain)
+{
+	int retval = -1;
+	struct qtn_stats *address_current_log = qtn_muc_stats_get_addr_latest_stats(
+							mac, ic, MUC_PHY_STATS_RSSI_RCPI_ONLY);
+
+	if (rf_chain >= NUM_ANT) {
+		rf_chain = NUM_ANT - 1;
+	}
+
+	if (address_current_log != NULL) {
+		retval = address_current_log->rx_phy_stats.last_rssi_evm[rf_chain];
+	}
+
+	return(retval);
+}
+
+u_int32_t qdrv_muc_get_rx_gain_fields(struct qdrv_mac *mac, const struct ieee80211com *ic)
+{
+	u_int32_t rx_gain_fields = (u_int32_t) -1;
+	struct qtn_stats *address_current_log = qtn_muc_stats_get_addr_latest_stats(mac,
+										    ic,
+										    MUC_PHY_STATS_RSSI_RCPI_ONLY);
+
+	if (address_current_log != NULL) {
+		int8_t dagc_shift = 0;
+
+		rx_gain_fields = address_current_log->rx_phy_stats.rx_gain_fields;
+		dagc_shift = (int8_t) qtn_muc_stats_get_dagc_shift_count(rx_gain_fields);
+
+		rx_gain_fields = (rx_gain_fields & ~QDRV_MUC_STATS_DAGC_SHIFT_FIELD_M);
+		rx_gain_fields = (rx_gain_fields |
+			((dagc_shift << QDRV_MUC_STATS_DAGC_SHIFT_FIELD_S) & QDRV_MUC_STATS_DAGC_SHIFT_FIELD_M));
+	}
+
+	return(rx_gain_fields);
+}
+
+int qdrv_muc_get_phy_stat(struct qdrv_mac *mac,
+			  const struct ieee80211com *ic,
+			  const char *name_of_stat,
+			  const unsigned int array_index,
+			  int *stat_value)
+{
+	const struct {
+		char			*stat_param_name;
+		enum qtn_phy_stat_field stat_param_field;
+		int			stat_phy_stat_mode;
+	} stat_param_table[] = {
+		{ QTN_PHY_AVG_ERROR_SUM_NSYM_NAME,
+		  QTN_PHY_AVG_ERROR_SUM_NSYM_FIELD,
+		  MUC_PHY_STATS_ERROR_SUM_ONLY },
+	};
+
+	int iter;
+	enum qtn_phy_stat_field	param_field = QTN_PHY_NOSUCH_FIELD;
+	int required_phy_stat_mode = MUC_PHY_STATS_ALTERNATE;
+	struct qtn_stats *address_current_log = NULL;
+	int retval = 0;
+
+	for (iter = 0; iter < ARRAY_SIZE(stat_param_table); iter++) {
+		if (strcmp(name_of_stat, stat_param_table[iter].stat_param_name) == 0) {
+			param_field = stat_param_table[iter].stat_param_field;
+			required_phy_stat_mode = stat_param_table[iter].stat_phy_stat_mode;
+			break;
+		}
+	}
+
+	if (param_field == QTN_PHY_NOSUCH_FIELD) {
+#if 0
+		DPRINTF(LL_1, LF_ERROR,
+			(DBGEFMT "Unknown field %s in get phy_stat.\n", DBGARG, name_of_stat));
+#endif
+		return -1;
+	}
+
+	address_current_log = qtn_muc_stats_get_addr_latest_stats(mac,
+								  ic,
+								  required_phy_stat_mode);
+	if (address_current_log == NULL) {
+#if 0
+		DPRINTF(LL_1, LF_ERROR,
+			(DBGEFMT "Incorrect phy stat mode in get phy_stat for %s.\n", DBGARG, name_of_stat));
+#endif
+		return -1;
+	}
+
+	switch (param_field) {
+	case QTN_PHY_AVG_ERROR_SUM_NSYM_FIELD:
+		{
+			int	sum_snr = 0;
+			int i;
+
+			for (i = 0; i < NUM_ANT; i++) {
+				sum_snr += address_current_log->rx_phy_stats.last_rssi_evm[i];
+			}
+
+			if (sum_snr < 0) {
+				sum_snr = (sum_snr - 5) / 10;
+			} else {
+				sum_snr = (sum_snr + 5) / 10;
+			}
+
+			*stat_value = (0 - sum_snr);
+		}
+		break;
+
+	default:
+#if 0
+		DPRINTF(LL_1, LF_ERROR,
+			(DBGEFMT "No support for %s in get phy_stat.\n", DBGARG, name_of_stat));
+#endif
+		retval = -1;
+		break;
+	}
+
+	return retval;
+}
+
+int qdrv_muc_stats_rssi(const struct ieee80211_node *ni)
+{
+	int mu = STATS_SU;
+	return ni->ni_shared_stats->rx[mu].last_rssi_dbm[NUM_ANT];
+}
+
+int qdrv_muc_stats_smoothed_rssi(const struct ieee80211_node *ni)
+{
+	int mu = STATS_SU;
+	return ni->ni_shared_stats->rx[mu].rssi_dbm_smoothed[NUM_ANT];
+}
+
+int qdrv_muc_stats_hw_noise(const struct ieee80211_node *ni)
+{
+	int mu = STATS_SU;
+	return ni->ni_shared_stats->rx[mu].last_hw_noise[NUM_ANT];
+}
+
+int qdrv_muc_stats_rxtx_phy_rate(const struct ieee80211_node *ni, const int is_rx,
+		uint8_t *nss, uint8_t *mcs, u_int32_t *phy_rate)
+{
+	unsigned int last_mcs;
+	int mu = STATS_SU;
+
+	last_mcs = (is_rx) ? ni->ni_shared_stats->rx[mu].last_mcs :
+			ni->ni_shared_stats->tx[mu].last_mcs;
+
+	if (nss)
+		*nss = MS(last_mcs, QTN_PHY_STATS_MCS_NSS);
+	if (mcs)
+		*mcs = MS(last_mcs, QTN_STATS_MCS_RATE_MASK);
+	if (phy_rate)
+		*phy_rate = MS(last_mcs, QTN_PHY_STATS_MCS_PHYRATE);
+
+	return 0;
+}
+
+int qdrv_muc_stats_snr(const struct ieee80211_node *ni)
+{
+	int	sum_snr = 0;
+	int i;
+	const struct qtn_node_shared_stats_rx *rxstats;
+	int mu = STATS_SU;
+
+	rxstats = &ni->ni_shared_stats->rx[mu];
+
+	for (i = 0; i < NUM_ANT; i++) {
+		sum_snr += rxstats->last_evm_dbm[i];
+	}
+
+	return sum_snr;
+}
+
+int qdrv_muc_stats_max_queue(const struct ieee80211_node *ni)
+{
+	int mu = STATS_SU;
+	return ni->ni_shared_stats->tx[mu].max_queue;
+}
+
+ssize_t qdrv_muc_get_size_rssi_phy_stats(void)
+{
+	return sizeof(struct qtn_stats);
+}
+
+u_int32_t qdrv_muc_stats_tx_failed(const struct ieee80211_node *ni)
+{
+	int mu = STATS_SU;
+	return ni->ni_shared_stats->tx[mu].txdone_failed_cum;
+}
+
+static void qdrv_muc_stats_get_evm(struct ieee80211_phy_stats *ps,
+				const struct qtn_node_shared_stats_rx *node_rxstats)
+{
+	int iter;
+	int v;
+
+	for (iter = 0; iter <= NUM_ANT; iter++) {
+		v = node_rxstats->last_evm_dbm[iter];
+
+		if (iter < NUM_ANT) {
+			ps->last_evm_array[iter] = v;
+		} else {
+			ps->last_evm = v;
+		}
+	}
+
+#ifdef  PHY_STATS_SUM_EVM
+	ps->last_evm = 0;
+	for (iter = 0; iter < NUM_ANT; iter++) {
+		ps->last_evm += node_rxstats->last_evm_dbm[iter];
+	}
+#endif
+}
+
+static void qdrv_muc_stats_get_rssi(struct ieee80211_phy_stats *ps,
+				const struct qtn_node_shared_stats_rx *node_rxstats)
+{
+	int iter;
+	int rssi_dbm;
+
+	for (iter = 0; iter <= NUM_ANT; iter++) {
+		rssi_dbm = node_rxstats->last_rssi_dbm[iter];
+		if (rssi_dbm) {
+			if (iter == NUM_ANT) {
+				ps->last_rssi = rssi_dbm;
+			} else {
+				ps->last_rssi_array[iter] = rssi_dbm;
+			}
+		} else {
+			if (iter == NUM_ANT) {
+				ps->last_rssi = 0;
+			} else {
+				ps->last_rssi_array[iter] = 0;
+			}
+		}
+	}
+}
+
+void qdrv_muc_update_missing_stats(struct qdrv_mac *mac,
+				struct ieee80211com *ic,
+				struct ieee80211_phy_stats *ps,
+				enum qdrv_muc_stats_display_choice missing_type)
+{
+	struct qtn_stats *prev_stats = NULL;
+	struct qtn_rx_stats *prev_rx_stats = NULL;
+	int iter;
+
+	if (missing_type == QDRV_MUC_STATS_SHOW_RSSI) {
+		prev_stats = qtn_muc_stats_get_addr_latest_stats(mac,
+					ic, MUC_PHY_STATS_RSSI_RCPI_ONLY);
+		if (prev_stats == NULL) {
+			return;
+		}
+		prev_rx_stats = &prev_stats->rx_phy_stats;
+
+		memcpy(ps->last_rssi_array, prev_rx_stats->last_rssi_evm,
+					sizeof(prev_rx_stats->last_rssi_evm));
+	} else {
+		prev_stats = qtn_muc_stats_get_addr_latest_stats(mac,
+					ic, MUC_PHY_STATS_ERROR_SUM_ONLY);
+		if (prev_stats == NULL) {
+			return;
+		}
+		prev_rx_stats = &prev_stats->rx_phy_stats;
+
+		memcpy(ps->last_evm_array, prev_rx_stats->last_rssi_evm,
+					sizeof(prev_rx_stats->last_rssi_evm));
+
+#ifdef  PHY_STATS_SUM_EVM
+		ps->last_evm = 0;
+		for (iter = 0; iter < NUM_ANT; iter++) {
+			ps->last_evm += ps->last_evm_array[iter];
+		}
+#endif
+	}
+}
+
+int qdrv_muc_get_last_phy_stats(struct qdrv_mac *mac,
+				struct ieee80211com *ic,
+				struct ieee80211_phy_stats *ps,
+				uint8_t all_stats)
+{
+	struct qtn_stats *stats;
+	struct qtn_rx_stats *rx_stats = NULL;
+	struct qtn_tx_stats *tx_stats = NULL;
+	enum qdrv_muc_stats_display_choice display_choice = QDRV_MUC_STATS_SHOW_RSSI;
+	enum qdrv_muc_stats_display_choice missing_type = QDRV_MUC_STATS_SHOW_EVM;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_scs_info_set *scs_info_lh = sp->scs_info_lhost;
+	int txpower;
+	int rssi = SCS_RSSI_UNINITED;
+	int mu = STATS_SU;
+	uint32_t cca_try = scs_info_lh->scs_info[scs_info_lh->valid_index].cca_try;
+	uint32_t cca_tx = scs_info_lh->scs_info[scs_info_lh->valid_index].cca_tx;
+	uint32_t rx_usecs = scs_info_lh->scs_info[scs_info_lh->valid_index].rx_usecs;
+	uint32_t cca_interference = scs_info_lh->scs_info[scs_info_lh->valid_index].cca_interference;
+	uint32_t cca_idle = scs_info_lh->scs_info[scs_info_lh->valid_index].cca_idle;
+
+	struct ieee80211_node *ni;
+	const struct qtn_node_shared_stats_rx *node_rxstats;
+	const struct qtn_node_shared_stats_tx *node_txstats;
+
+	DBGPRINTF(DBG_LL_ERR, QDRV_LF_TRACE, "-->Enter\n");
+
+	if (ps == NULL)
+		return -1;
+
+	memset(ps, 0, sizeof(struct ieee80211_phy_stats));
+
+	if (unlikely(mac == NULL || mac->data ==NULL || ic == NULL || scs_info_lh == NULL)) {
+		return -1;
+	}
+
+	ni = qtn_muc_stats_get_node(ic, mu);
+
+	if (unlikely(ni == NULL)) {
+		return -1;
+	}
+
+	stats = qtn_muc_stats_get_addr_latest_stats(mac, ic, MUC_PHY_STATS_ALTERNATE);
+	if (unlikely(stats == NULL)) {
+		ieee80211_free_node(ni);
+		return -1;
+	}
+
+	rx_stats = &stats->rx_phy_stats;
+	tx_stats = &stats->tx_phy_stats;
+
+	display_choice = qdrv_muc_stats_get_display_choice(stats, ic);
+
+	if (ic->ic_opmode & IEEE80211_M_STA) {
+		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+		if (unlikely(vap == NULL)) {
+			ieee80211_free_node(ni);
+			return -1;
+		}
+		if (vap->iv_state & IEEE80211_S_RUN)
+			ps->assoc = 1;
+		else
+			ps->assoc = 0;
+	} else
+		ps->assoc = ic->ic_sta_assoc;
+
+	node_rxstats = &ni->ni_shared_stats->rx[mu];
+	node_txstats = &ni->ni_shared_stats->tx[mu];
+
+	ps->tstamp	= stats->tstamp;
+
+	if (cca_try) {
+		ps->cca_tx	= cca_tx * 1000 / cca_try;
+		ps->cca_rx	= rx_usecs / cca_try;
+		ps->cca_int	= cca_interference * 1000 / cca_try;
+		ps->cca_idle	= cca_idle * 1000 / cca_try;
+		ps->cca_total	= ps->cca_tx + ps->cca_rx + ps->cca_int + ps->cca_idle;
+	}
+
+	ps->rx_pkts	= rx_stats->num_pkts;
+	ps->rx_gain	= rx_stats->avg_rxgain;
+	ps->rx_cnt_crc	= rx_stats->cnt_mac_crc;
+	ps->rx_noise	= rx_stats->hw_noise;
+	ps->tx_pkts	= tx_stats->num_pkts;
+	ps->tx_defers	= tx_stats->num_defers;
+	ps->tx_touts	= tx_stats->num_timeouts;
+	ps->tx_retries	= tx_stats->num_retries;
+	ps->cnt_sp_fail = rx_stats->cnt_sp_fail;
+	ps->cnt_lp_fail = rx_stats->cnt_lp_fail;
+
+	ps->last_rx_mcs = node_rxstats->last_mcs & QTN_STATS_MCS_RATE_MASK;
+	ps->last_tx_mcs = node_txstats->last_mcs & QTN_STATS_MCS_RATE_MASK;
+	ps->last_tx_scale = node_txstats->last_tx_scale;
+
+	txpower = ic->ic_curchan->ic_maxpower;
+
+	if (ic->ic_rssi)
+		rssi = ic->ic_rssi(ni);
+
+	if (SCS_RSSI_VALID(rssi)) {
+		ps->atten = txpower - rssi / SCS_RSSI_PRECISION_RECIP;
+	} else
+		ps->atten = 0;
+
+	switch (display_choice) {
+	case QDRV_MUC_STATS_SHOW_EVM:
+		qdrv_muc_stats_get_evm(ps, node_rxstats);
+		missing_type = QDRV_MUC_STATS_SHOW_RSSI;
+		break;
+	case QDRV_MUC_STATS_SHOW_RCPI:
+		ps->last_rcpi = node_rxstats->last_rcpi_dbm[NUM_ANT];
+		break;
+	case QDRV_MUC_STATS_SHOW_RSSI:
+	default:
+		/* RSSIs in dBM (units are actually 0.1 dBM) ... */
+		qdrv_muc_stats_get_rssi(ps, node_rxstats);
+		missing_type = QDRV_MUC_STATS_SHOW_EVM;
+	}
+
+	if (all_stats) {
+		qdrv_muc_update_missing_stats(mac, ic, ps, missing_type);
+	}
+
+	ieee80211_free_node(ni);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+u_int32_t qdrv_muc_stats_tx_airtime(const struct ieee80211_node *ni)
+{
+	int mu = STATS_SU;
+	return ni->ni_shared_stats->tx[mu].tx_airtime;
+}
+
+u_int32_t qdrv_muc_stats_tx_accum_airtime(const struct ieee80211_node *ni)
+{
+	int mu = STATS_SU;
+	return ni->ni_shared_stats->tx[mu].tx_accum_airtime;
+}
+
+
+u_int32_t qdrv_muc_stats_rx_airtime(const struct ieee80211_node *ni)
+{
+	int mu = STATS_SU;
+	return ni->ni_shared_stats->rx[mu].rx_airtime;
+}
+
+u_int32_t qdrv_muc_stats_rx_accum_airtime(const struct ieee80211_node *ni)
+{
+	int mu = STATS_SU;
+	return ni->ni_shared_stats->rx[mu].rx_accum_airtime;
+}
+
+int qdrv_muc_get_last_cca_stats(struct qdrv_mac *mac,
+				struct ieee80211com *ic,
+				struct qtn_exp_cca_stats *cs)
+{
+	DBGPRINTF(DBG_LL_ERR, QDRV_LF_TRACE, "-->Enter\n");
+
+	if((cs == NULL) || (mac == NULL))
+		return -1;
+
+	memset(cs, 0, sizeof(struct qtn_exp_cca_stats));
+	if(unlikely(mac->cca_stats == NULL))
+		return -1;
+
+	cs->cca_fat = mac->cca_stats->cca_fat;
+	cs->cca_intf = mac->cca_stats->cca_intf;
+	cs->cca_trfc = mac->cca_stats->cca_trfc;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+
+}
diff --git a/drivers/qtn/qdrv/qdrv_muc_stats.h b/drivers/qtn/qdrv/qdrv_muc_stats.h
new file mode 100644
index 0000000..348766b
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_muc_stats.h
@@ -0,0 +1,72 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+#ifndef _QDRV_MUC_STATS_H
+#define _QDRV_MUC_STATS_H
+
+#include <net80211/ieee80211_var.h>
+#include "qdrv_mac.h"
+
+/* enum disconnects the actual display for a particular line of phy stats from the phy stats mode */
+enum qdrv_muc_stats_display_choice {
+	QDRV_MUC_STATS_SHOW_RSSI = 0,
+	QDRV_MUC_STATS_SHOW_RCPI,
+	QDRV_MUC_STATS_SHOW_EVM,
+};
+
+enum qdrv_muc_stats_display_choice qdrv_muc_stats_get_display_choice(const struct qtn_stats *stats,
+								     const struct ieee80211com *ic);
+struct qtn_stats *qtn_muc_stats_get_addr_latest_stats(struct qdrv_mac *mac,
+						      const struct ieee80211com *ic,
+						      int required_phy_stat_mode);
+int qdrv_muc_stats_printlog(const struct qdrv_cb *data,
+			    struct qdrv_mac *mac,
+			    struct ieee80211com *ic,
+			    int argc,
+			    char **argv);
+int qdrv_muc_get_noise(struct qdrv_mac *mac, const struct ieee80211com *ic);
+int qdrv_muc_get_rssi_by_chain(struct qdrv_mac *mac,
+			       const struct ieee80211com *ic,
+			       unsigned int rf_chain);
+u_int32_t qdrv_muc_get_rx_gain_fields(struct qdrv_mac *mac, const struct ieee80211com *ic);
+int qdrv_muc_get_phy_stat(struct qdrv_mac *mac,
+			  const struct ieee80211com *ic,
+			  const char *name_of_stat,
+			  const unsigned int array_index,
+			  int *stat_value);
+int qdrv_muc_stats_rxtx_phy_rate(const struct ieee80211_node *, const int is_rx,
+		uint8_t *nss, uint8_t *mcs, uint32_t * phy_rate);
+int qdrv_muc_stats_rssi(const struct ieee80211_node *);
+int qdrv_muc_stats_smoothed_rssi(const struct ieee80211_node *ni);
+int qdrv_muc_stats_snr(const struct ieee80211_node *);
+int qdrv_muc_stats_max_queue(const struct ieee80211_node *);
+u_int32_t qdrv_muc_stats_mcs_to_phyrate(u_int8_t bw, u_int8_t sgi, u_int8_t mcs,
+		uint8_t nss, uint8_t vht);
+ssize_t qdrv_muc_get_size_rssi_phy_stats(void);
+u_int32_t qdrv_muc_stats_tx_failed(const struct ieee80211_node *);
+int qdrv_muc_get_last_phy_stats(struct qdrv_mac *mac, struct ieee80211com *ic,
+				struct ieee80211_phy_stats *ps, uint8_t all_stats);
+int qdrv_muc_stats_hw_noise(const struct ieee80211_node *ni);
+
+u_int32_t qdrv_muc_stats_tx_airtime(const struct ieee80211_node *);
+u_int32_t qdrv_muc_stats_tx_accum_airtime(const struct ieee80211_node *);
+u_int32_t qdrv_muc_stats_rx_airtime(const struct ieee80211_node *);
+u_int32_t qdrv_muc_stats_rx_accum_airtime(const struct ieee80211_node *);
+int qdrv_muc_get_last_cca_stats(struct qdrv_mac *mac, struct ieee80211com *ic, struct qtn_exp_cca_stats *cs);
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_netdebug_binary.h b/drivers/qtn/qdrv/qdrv_netdebug_binary.h
new file mode 100644
index 0000000..c61bf5a
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_netdebug_binary.h
@@ -0,0 +1,370 @@
+/* Automatically generated on 2016-11-15 09:25:27.101207 - do not edit */
+const char pktlogger_structs[] = {
+0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00, 0x00, 0x04, 0xE6, 0xD6, 0xB4, 0x46, 0x02, 0x00, 0x21, 0x01,
+0x16, 0x00, 0x00, 0x00, 0x74, 0x2F, 0xE5, 0xA3, 0xE0, 0x74, 0x5C, 0x16, 0xB2, 0x5D, 0x00, 0x39,
+0x9D, 0x0A, 0x8D, 0xE8, 0x21, 0xA4, 0x10, 0x81, 0x11, 0x2D, 0xCF, 0x41, 0x5B, 0x94, 0xFF, 0xA9,
+0xAF, 0x4F, 0xE4, 0xD8, 0x13, 0x51, 0xCB, 0xF4, 0xD7, 0xD0, 0xAA, 0xCC, 0x0F, 0x26, 0xB6, 0x20,
+0xEA, 0x26, 0xFE, 0x50, 0xE1, 0x98, 0xD0, 0xD0, 0x64, 0x3F, 0x94, 0x21, 0x67, 0x35, 0xFA, 0x97,
+0x19, 0xB3, 0x4A, 0xFD, 0x76, 0x27, 0x03, 0xE0, 0x80, 0x33, 0x8F, 0x73, 0x79, 0x6A, 0x8E, 0x7D,
+0xBC, 0x7D, 0x77, 0xB8, 0x2E, 0xC0, 0x05, 0xC9, 0x68, 0x18, 0x53, 0x0A, 0x3B, 0xEF, 0xE1, 0xF5,
+0x59, 0x18, 0x2A, 0x31, 0x31, 0x03, 0x6A, 0x3A, 0xFE, 0x6D, 0x61, 0x89, 0x0D, 0xB9, 0x55, 0x77,
+0x19, 0x95, 0x0C, 0xB0, 0xEC, 0xDB, 0x1C, 0x58, 0x4D, 0x6C, 0x68, 0x5C, 0x4E, 0x0E, 0xEE, 0x6E,
+0x89, 0xA9, 0xE3, 0x95, 0xFC, 0xA6, 0x60, 0xB3, 0xAE, 0x3D, 0x33, 0xB0, 0x48, 0x5C, 0x46, 0x3E,
+0x2F, 0xB3, 0xDF, 0x5E, 0x02, 0x03, 0xA5, 0xE3, 0x1E, 0x9E, 0x41, 0x83, 0x28, 0x91, 0x17, 0x58,
+0x2A, 0x4F, 0x07, 0x4A, 0xCD, 0x95, 0xAA, 0x19, 0x47, 0xAC, 0x7A, 0x3A, 0x65, 0xD0, 0xA8, 0x41,
+0xF7, 0x06, 0x8B, 0xB0, 0xEC, 0xDA, 0x29, 0xE4, 0x7E, 0x7D, 0xEA, 0x11, 0x3F, 0x95, 0x16, 0x79,
+0x60, 0xA9, 0x39, 0xEC, 0xC2, 0x2F, 0x35, 0x7F, 0x76, 0x44, 0x6D, 0x0D, 0x33, 0xB0, 0x66, 0x11,
+0x9F, 0x00, 0x2C, 0x5E, 0xC2, 0x0C, 0x92, 0x20, 0x34, 0x6B, 0x08, 0xBC, 0xC9, 0xAA, 0x4F, 0x0A,
+0x6D, 0xFC, 0xB6, 0x69, 0xEF, 0x47, 0xD9, 0xDD, 0xF9, 0x9F, 0xE4, 0x13, 0xC5, 0x0E, 0x11, 0x94,
+0x2F, 0x24, 0x55, 0x91, 0x40, 0x3C, 0x58, 0x0B, 0x65, 0xD6, 0x15, 0xE0, 0xF3, 0x99, 0x4C, 0x4F,
+0x79, 0xB4, 0x54, 0x6F, 0x5D, 0x3D, 0x3A, 0xB1, 0xC0, 0x80, 0x92, 0x35, 0x5B, 0x63, 0x4E, 0x2F,
+0x4C, 0xD0, 0x2B, 0xE6, 0x4C, 0x3C, 0x18, 0x66, 0x5E, 0x86, 0xC5, 0x71, 0x9B, 0x1E, 0xD2, 0x0C,
+0x28, 0xDD, 0xDE, 0xF7, 0xA6, 0xE4, 0xDF, 0x6D, 0x87, 0xA0, 0xD9, 0x47, 0xA3, 0xB9, 0xA2, 0xD3,
+0x5F, 0xB1, 0xA5, 0x40, 0xE1, 0xBB, 0xE1, 0x9E, 0x46, 0xD2, 0x2F, 0xD4, 0x29, 0xC6, 0x89, 0x40,
+0xBF, 0xF0, 0x6D, 0x56, 0x5B, 0xBC, 0x9B, 0xAE, 0x79, 0x6F, 0xAC, 0x21, 0x79, 0x6A, 0x28, 0xEF,
+0x9E, 0xD9, 0x54, 0x85, 0x17, 0x65, 0x63, 0x6F, 0xED, 0xC1, 0x46, 0x02, 0x65, 0x09, 0x3D, 0x24,
+0x4B, 0xEF, 0x5E, 0xE8, 0xAB, 0x86, 0xDD, 0xEB, 0x0B, 0xF2, 0x74, 0x93, 0xCD, 0xBF, 0xDF, 0xAC,
+0x92, 0x67, 0x8F, 0xC5, 0xD9, 0x38, 0x6A, 0xBB, 0xDC, 0xED, 0x1F, 0x03, 0xCB, 0xC2, 0x38, 0xED,
+0xFD, 0xE3, 0x65, 0xCA, 0x88, 0x86, 0xBC, 0x68, 0x47, 0x4B, 0x78, 0x37, 0xF9, 0xA0, 0xDC, 0x03,
+0x05, 0xB8, 0x57, 0x61, 0xC2, 0xBC, 0x2D, 0xB8, 0xDF, 0xDB, 0xAB, 0x9C, 0xBC, 0x0F, 0x16, 0x55,
+0xE3, 0x02, 0xBC, 0x1C, 0x47, 0x2F, 0x61, 0x88, 0xDC, 0xB8, 0x03, 0x83, 0xA9, 0x81, 0xC3, 0x1C,
+0x71, 0xEF, 0x07, 0xFE, 0xBB, 0xB9, 0x1A, 0xC0, 0xA1, 0x20, 0x1F, 0xC7, 0x3B, 0x55, 0x4A, 0x43,
+0x74, 0xD5, 0xC7, 0x37, 0x1C, 0xF3, 0xB7, 0x5E, 0xFB, 0x1F, 0x4F, 0xCC, 0x0C, 0x16, 0x3C, 0xAF,
+0x95, 0x37, 0xE1, 0x5D, 0x10, 0xF4, 0x76, 0x9C, 0x38, 0x3A, 0x2C, 0x89, 0xC6, 0xF1, 0x9C, 0x22,
+0xD8, 0x79, 0xD0, 0xD2, 0xB2, 0x63, 0x90, 0xAB, 0x48, 0xE8, 0x2F, 0xED, 0x06, 0x2B, 0x22, 0xF5,
+0x02, 0x4E, 0x3F, 0xE5, 0xBF, 0x7C, 0x6E, 0x00, 0xC5, 0x06, 0xBF, 0x5F, 0x60, 0x87, 0xC6, 0x79,
+0xF1, 0xEE, 0x99, 0x78, 0xD5, 0x51, 0x3C, 0x7E, 0x1A, 0xC1, 0xBF, 0x8B, 0x12, 0xAB, 0x26, 0x0A,
+0x8D, 0xCE, 0x28, 0x6A, 0x18, 0x1D, 0xA8, 0xD9, 0x82, 0x8F, 0xC1, 0x05, 0x78, 0x36, 0xE9, 0xFC,
+0xA5, 0xDE, 0xAB, 0x0E, 0x10, 0x84, 0x46, 0x10, 0xCF, 0xC5, 0xEB, 0x3B, 0x01, 0x48, 0xB3, 0xA3,
+0x1B, 0x13, 0x08, 0x9E, 0x1F, 0x01, 0xA6, 0xB7, 0x52, 0x3C, 0xFC, 0x84, 0xF5, 0x18, 0x04, 0x2D,
+0x43, 0xC8, 0xF3, 0xDE, 0x92, 0x62, 0x58, 0xE0, 0x0A, 0x68, 0x20, 0x66, 0xB5, 0xF5, 0x1C, 0x5B,
+0xF8, 0xE3, 0xD9, 0x4A, 0xB8, 0xF9, 0x86, 0x5B, 0xB5, 0x81, 0x02, 0x08, 0x4C, 0x20, 0x87, 0x6B,
+0xFE, 0xF5, 0xA8, 0xE7, 0xCA, 0x7B, 0x65, 0x38, 0x47, 0xB0, 0x4C, 0xFE, 0xDA, 0x8D, 0x33, 0xB8,
+0x49, 0x36, 0xDE, 0x3B, 0xC0, 0x9F, 0x3F, 0x90, 0x4A, 0x95, 0x7B, 0x50, 0x45, 0x74, 0x11, 0xD3,
+0x86, 0x6E, 0x9A, 0xCF, 0x15, 0x0A, 0x4F, 0xC9, 0x10, 0x91, 0x3D, 0xB5, 0xA8, 0x68, 0x70, 0x9A,
+0x28, 0x81, 0x54, 0x91, 0x26, 0xAC, 0x55, 0x0D, 0x1B, 0x5F, 0xCC, 0x22, 0x07, 0x68, 0xAF, 0x1A,
+0x57, 0x44, 0xB4, 0x02, 0xBA, 0x79, 0xD4, 0x36, 0x67, 0x40, 0xC4, 0x7C, 0x83, 0xBE, 0x57, 0xA5,
+0xF7, 0x12, 0x7F, 0x2C, 0x31, 0x9F, 0x3E, 0x39, 0xAB, 0xDA, 0xB4, 0x5B, 0x0B, 0xD6, 0xF2, 0x3D,
+0x9F, 0x7E, 0xB6, 0x8C, 0xD5, 0x66, 0x00, 0x3F, 0x6A, 0x13, 0xCA, 0xF7, 0xD2, 0x99, 0x51, 0x53,
+0xCD, 0x65, 0xC1, 0xD4, 0xE5, 0x4B, 0x46, 0x82, 0x2C, 0x32, 0x1A, 0xE8, 0x67, 0x94, 0x15, 0xC6,
+0xC6, 0x47, 0x8B, 0xD2, 0xC9, 0xE2, 0x9C, 0xB6, 0xF6, 0x35, 0xDE, 0x60, 0x70, 0x70, 0xD4, 0x42,
+0x2B, 0x2C, 0x6E, 0x43, 0x8E, 0x2F, 0x3F, 0x8C, 0x8B, 0x6A, 0xF1, 0x4E, 0x73, 0x92, 0xF0, 0x15,
+0x3A, 0x54, 0xFC, 0xD7, 0x1E, 0x7A, 0x16, 0xC2, 0x25, 0xB8, 0x70, 0x85, 0x43, 0xAE, 0x8B, 0xC9,
+0x36, 0xA9, 0x5F, 0xC9, 0x17, 0x4B, 0x2F, 0x30, 0x43, 0xFF, 0x47, 0x6B, 0xA5, 0x5E, 0x2F, 0xAD,
+0x3A, 0x9F, 0x10, 0x3D, 0x54, 0x72, 0x31, 0x34, 0x50, 0xC7, 0xA4, 0xAF, 0x4F, 0x62, 0x5E, 0xD8,
+0x33, 0x54, 0x0D, 0xCF, 0x84, 0xB1, 0x21, 0xDA, 0x51, 0x44, 0xE5, 0xCB, 0x6A, 0xA0, 0xD6, 0x20,
+0x30, 0xD3, 0x24, 0x0A, 0x2E, 0x44, 0xA7, 0xBC, 0x68, 0x4B, 0x9F, 0x50, 0x59, 0x82, 0x45, 0x02,
+0xF9, 0x85, 0x7B, 0xB0, 0xF9, 0xC9, 0x9F, 0x7A, 0x71, 0x86, 0xE4, 0xE9, 0xA8, 0x93, 0x4B, 0x24,
+0xBD, 0x96, 0xB2, 0x2C, 0x01, 0x47, 0x81, 0x6B, 0x5B, 0xB5, 0xF5, 0xC9, 0x00, 0x1D, 0xB3, 0x6C,
+0xE5, 0xE8, 0x2F, 0xF6, 0x97, 0xD1, 0xDA, 0x20, 0x99, 0xC5, 0x9C, 0xF1, 0x49, 0x55, 0x2C, 0x19,
+0x90, 0x51, 0x2C, 0x4B, 0x9A, 0x98, 0x64, 0x02, 0xB0, 0x8D, 0xD2, 0x45, 0xBA, 0xAF, 0xEA, 0xD9,
+0x93, 0xEE, 0xDC, 0x2D, 0x46, 0x3E, 0x3C, 0xA8, 0xBD, 0xC4, 0x25, 0x85, 0x35, 0x5A, 0x3B, 0x50,
+0x0E, 0x1D, 0xE4, 0x97, 0x04, 0x63, 0x23, 0xB1, 0xDB, 0x15, 0x6A, 0x26, 0x76, 0xF7, 0xEE, 0xDA,
+0x68, 0xD4, 0xDA, 0x6C, 0x1F, 0x21, 0x91, 0xA5, 0xDC, 0xFC, 0xF2, 0xBB, 0x18, 0xE1, 0x8D, 0xC2,
+0x54, 0x0B, 0x62, 0x73, 0xBA, 0xA8, 0xF8, 0x32, 0x09, 0x1F, 0x29, 0x15, 0xA1, 0xA5, 0xA4, 0x39,
+0xBE, 0xBD, 0xA3, 0xF3, 0x2C, 0x90, 0x79, 0x0E, 0x49, 0x62, 0xD8, 0x28, 0x71, 0x1D, 0xF5, 0x9E,
+0x5E, 0x84, 0x45, 0x59, 0x2C, 0x93, 0xE3, 0x57, 0x79, 0x63, 0x8D, 0xCF, 0x6F, 0x17, 0x8D, 0x47,
+0xE1, 0xA8, 0x9F, 0xB1, 0xCF, 0x58, 0x00, 0xBD, 0xB1, 0x68, 0x01, 0x4A, 0x30, 0xA6, 0x58, 0xCD,
+0x0C, 0x11, 0x67, 0x25, 0x5A, 0x27, 0x5A, 0xCE, 0x62, 0xCA, 0x44, 0xAF, 0x85, 0x6C, 0x5E, 0xFF,
+0x25, 0x73, 0x0A, 0x7B, 0xC4, 0xB3, 0x79, 0xDB, 0x03, 0xC9, 0x02, 0xA4, 0x78, 0xBD, 0x39, 0x4D,
+0x06, 0xA7, 0x5D, 0xE6, 0x41, 0xBB, 0x76, 0x31, 0x9E, 0x9D, 0x68, 0xAA, 0xD5, 0x19, 0x1C, 0xAE,
+0x79, 0x1E, 0xAD, 0x14, 0x23, 0x10, 0x3E, 0x99, 0xE3, 0xA0, 0xEA, 0x49, 0x23, 0x8C, 0x22, 0x67,
+0x18, 0xD1, 0x36, 0xD9, 0x4A, 0x16, 0xA7, 0xF3, 0x23, 0xE3, 0x82, 0x0B, 0x90, 0x2F, 0x93, 0x1E,
+0x49, 0x9C, 0x5F, 0xA9, 0x38, 0x43, 0x16, 0x3E, 0xB1, 0x69, 0x93, 0xE5, 0xF6, 0x77, 0x7E, 0x1D,
+0x43, 0x77, 0x34, 0xBA, 0x39, 0x88, 0xDE, 0xEC, 0x68, 0x15, 0xCC, 0xAE, 0xB7, 0x47, 0xB6, 0x55,
+0x99, 0x45, 0xD4, 0x2B, 0x70, 0x98, 0x9F, 0x0D, 0xD0, 0xA1, 0x4A, 0x06, 0x7E, 0x60, 0x68, 0x9C,
+0x16, 0x0C, 0xF2, 0xA9, 0x2A, 0x64, 0x39, 0x3B, 0x9E, 0xFD, 0x4C, 0x31, 0x21, 0x51, 0x9B, 0xE4,
+0x40, 0x18, 0x02, 0xC6, 0x44, 0xF1, 0x3B, 0x8E, 0x5A, 0x26, 0x57, 0xA6, 0x88, 0x63, 0xA3, 0x9C,
+0x8E, 0x11, 0x6E, 0x31, 0xC3, 0x43, 0x08, 0x25, 0x04, 0xE5, 0x1C, 0x28, 0x52, 0x74, 0x75, 0x69,
+0x27, 0x02, 0xA3, 0x13, 0x46, 0x46, 0x20, 0x3C, 0x4B, 0xA5, 0x83, 0x80, 0x5A, 0xB6, 0x15, 0x18,
+0xC8, 0x10, 0x49, 0xA6, 0x7A, 0xDE, 0xC3, 0xA4, 0x5D, 0x59, 0xCF, 0x74, 0xF1, 0x09, 0x79, 0xFF,
+0x84, 0x98, 0xAD, 0x56, 0x2B, 0xDE, 0x0F, 0xD6, 0xCD, 0x32, 0x55, 0x59, 0xA2, 0xE8, 0x2C, 0x92,
+0xF9, 0xD4, 0x4A, 0x42, 0x04, 0x35, 0x63, 0x06, 0xEE, 0xA6, 0x1A, 0xBD, 0x1A, 0xFD, 0xC4, 0x70,
+0x38, 0xDC, 0xCE, 0xAA, 0x83, 0x57, 0x99, 0x78, 0x20, 0x0C, 0xD5, 0x8E, 0xA1, 0x29, 0x29, 0x28,
+0x16, 0x6F, 0x27, 0x12, 0xFB, 0x7D, 0xBC, 0x27, 0x76, 0x89, 0x46, 0x3A, 0xF4, 0xDD, 0xFF, 0xF1,
+0x13, 0x59, 0x9D, 0xE2, 0x95, 0xC9, 0xDC, 0x88, 0xBE, 0xD8, 0xBE, 0xB9, 0xFD, 0xCC, 0xEE, 0xC5,
+0x7F, 0x72, 0x1B, 0x6B, 0xA2, 0x5F, 0x5D, 0xE1, 0xCB, 0x29, 0x80, 0xB0, 0x28, 0xA9, 0x31, 0x34,
+0xCC, 0x8D, 0x2F, 0x94, 0xE8, 0x7F, 0xBC, 0x80, 0x33, 0x2C, 0x0B, 0xE4, 0x87, 0x3D, 0xF3, 0x31,
+0xC5, 0x5D, 0x1F, 0xA9, 0x0F, 0x7D, 0xCF, 0x45, 0x22, 0x07, 0xEF, 0xA7, 0xB4, 0xDE, 0xC5, 0x29,
+0xE1, 0x15, 0x74, 0x8E, 0xE7, 0xA7, 0xAC, 0xB9, 0xC2, 0x84, 0xFB, 0x76, 0x7D, 0x98, 0xEF, 0x1C,
+0x81, 0xA5, 0x28, 0x57, 0x62, 0x33, 0xD7, 0x1B, 0xF2, 0xFF, 0x14, 0x3C, 0xD5, 0x22, 0xC0, 0x8D,
+0x1C, 0x9C, 0x2F, 0x4C, 0x63, 0x30, 0xA4, 0x13, 0x96, 0xED, 0xC4, 0x31, 0x8B, 0x8F, 0x26, 0x19,
+0x80, 0x33, 0xE5, 0x6C, 0x85, 0xD2, 0xE7, 0xCE, 0x58, 0xB7, 0xB5, 0xDE, 0x90, 0x46, 0xB2, 0xF1,
+0x9D, 0x31, 0xBE, 0xC7, 0x61, 0x57, 0x5D, 0xEE, 0x34, 0xFD, 0x1D, 0xCB, 0x4E, 0x22, 0x20, 0x6F,
+0x97, 0x50, 0xBE, 0x10, 0x3B, 0x13, 0x63, 0xB2, 0xC0, 0xE7, 0x22, 0x10, 0xD5, 0x2F, 0x4C, 0xE0,
+0xB3, 0x5A, 0xD0, 0x8C, 0xA9, 0xAF, 0xDB, 0x76, 0xBA, 0xBB, 0xF6, 0xE2, 0xA2, 0x4D, 0x91, 0xEA,
+0x78, 0xAA, 0x86, 0xD0, 0xC3, 0x0D, 0xF5, 0x07, 0xCD, 0xDD, 0xB0, 0x80, 0xF5, 0xEF, 0x0F, 0x0B,
+0xFA, 0xE2, 0xC9, 0x59, 0x58, 0x20, 0x92, 0x35, 0x52, 0x1F, 0x02, 0xE6, 0x7B, 0xAA, 0xE6, 0x26,
+0xD3, 0x00, 0xE5, 0x97, 0x24, 0x75, 0x95, 0xC9, 0xA7, 0x1D, 0xE2, 0x52, 0xC5, 0x24, 0xB0, 0xF5,
+0x12, 0xB0, 0xC6, 0x06, 0x63, 0x96, 0x52, 0x16, 0x98, 0xEE, 0x01, 0xC7, 0xC8, 0x2D, 0xCE, 0x07,
+0x76, 0xC8, 0x37, 0x0D, 0x2A, 0x30, 0x74, 0xC6, 0x1D, 0xDD, 0x90, 0x64, 0x6E, 0x3C, 0x81, 0x09,
+0xEB, 0xEE, 0x0A, 0x09, 0x18, 0x3D, 0xCE, 0xF5, 0xE1, 0x81, 0xA9, 0xB1, 0x49, 0x56, 0xB0, 0x60,
+0x78, 0xCF, 0x94, 0xCA, 0x04, 0xBA, 0xA9, 0x03, 0xCE, 0x9A, 0xB0, 0xE0, 0x74, 0xCF, 0x82, 0x06,
+0x04, 0xBE, 0x5A, 0x5F, 0xF9, 0x4D, 0x06, 0xD1, 0x04, 0xE2, 0x2D, 0xBF, 0x54, 0x03, 0x60, 0x08,
+0x20, 0x9B, 0xBA, 0xFB, 0xEB, 0x1F, 0x00, 0xFF, 0xE3, 0x1A, 0xC1, 0x47, 0xE3, 0x24, 0xF1, 0xB7,
+0x31, 0xFD, 0x6A, 0xDF, 0x0C, 0x3F, 0xB9, 0xA9, 0x6E, 0x67, 0x54, 0x00, 0x45, 0x8E, 0x58, 0x0D,
+0x5A, 0x79, 0x1C, 0xB3, 0xBD, 0xB2, 0xD0, 0x75, 0xA4, 0x75, 0x20, 0xA4, 0x44, 0xFC, 0x27, 0x90,
+0x28, 0xFE, 0xF7, 0x85, 0xF7, 0x5E, 0x77, 0x18, 0xB8, 0x98, 0x44, 0x52, 0x36, 0x4D, 0xA0, 0x3E,
+0x4E, 0xE4, 0x9E, 0x65, 0x12, 0x01, 0xB0, 0xC7, 0xF6, 0x86, 0x9C, 0x92, 0x4F, 0x69, 0xD1, 0xF8,
+0x0D, 0xED, 0x0C, 0xB0, 0x37, 0x02, 0x9A, 0xFC, 0x2B, 0x51, 0xAB, 0xFD, 0x7A, 0x81, 0xCE, 0xEB,
+0xBD, 0x9F, 0x90, 0x47, 0xEB, 0x2A, 0x1B, 0x04, 0xEA, 0xA4, 0x15, 0xD0, 0xC5, 0x1D, 0x0E, 0x28,
+0x31, 0x96, 0x95, 0xC5, 0xAB, 0x92, 0x25, 0xBD, 0x5A, 0x3D, 0x44, 0x5C, 0xB6, 0x35, 0x39, 0x2C,
+0x5F, 0xDC, 0xC1, 0x5E, 0xAD, 0x93, 0x94, 0xC0, 0x9F, 0xF6, 0xC1, 0xA4, 0x63, 0x6C, 0x1A, 0x3A,
+0xA9, 0x15, 0x77, 0xBE, 0x6C, 0xEC, 0x59, 0x2B, 0x79, 0xA8, 0xF7, 0xC8, 0x6C, 0x74, 0xE9, 0xD9,
+0xCD, 0x6B, 0xD0, 0x44, 0x1A, 0x14, 0xBA, 0x11, 0xC4, 0xEB, 0xF9, 0xB2, 0x75, 0x3B, 0x45, 0x7B,
+0x4A, 0xD0, 0x53, 0x2A, 0xD2, 0x93, 0x83, 0x0F, 0x08, 0x04, 0xD5, 0x42, 0x13, 0x61, 0xA2, 0x06,
+0x75, 0x3F, 0x97, 0xA1, 0x20, 0xC9, 0xE2, 0xC1, 0x47, 0x3E, 0x4A, 0xF2, 0x73, 0xB8, 0x09, 0x44,
+0x18, 0xE1, 0x3F, 0x02, 0x5C, 0x7B, 0xF9, 0x5D, 0x20, 0x8E, 0xCC, 0xC1, 0x9E, 0x8D, 0x64, 0xB3,
+0x97, 0x91, 0x7E, 0x9C, 0x9F, 0xF0, 0xA8, 0x7E, 0xAA, 0x68, 0xB1, 0x9E, 0xBB, 0xBE, 0xDA, 0x0A,
+0xBD, 0xEB, 0xE6, 0x3E, 0x8B, 0xAA, 0x77, 0xBC, 0x6A, 0x71, 0x97, 0x38, 0xA9, 0x6A, 0xDF, 0xC4,
+0xF7, 0xAE, 0xF4, 0x2E, 0xFD, 0x4A, 0x4C, 0x09, 0xB9, 0x36, 0xCF, 0xB2, 0xC4, 0x02, 0xD2, 0x0C,
+0x08, 0x44, 0xB3, 0x3F, 0x4B, 0x6F, 0x05, 0x4A, 0xA8, 0x34, 0xF9, 0x5C, 0x94, 0x40, 0x2E, 0x29,
+0x65, 0x31, 0x3F, 0x82, 0xF5, 0x11, 0xBC, 0x45, 0x00, 0xB2, 0x97, 0xC6, 0xD9, 0x12, 0x79, 0x0C,
+0x97, 0x65, 0xD8, 0x9C, 0xD0, 0xA8, 0xBB, 0xC7, 0x7B, 0x70, 0xD8, 0xC4, 0x23, 0x22, 0xC7, 0xD4,
+0xC0, 0x55, 0x12, 0xBE, 0x24, 0xF6, 0x37, 0xD5, 0x13, 0x71, 0x41, 0x5C, 0xEE, 0x0A, 0xB7, 0xED,
+0xA4, 0x68, 0xB8, 0xED, 0xA0, 0x35, 0x5A, 0xE8, 0x91, 0x42, 0xD9, 0x4D, 0x67, 0x31, 0xB1, 0x7B,
+0xB5, 0x80, 0xAA, 0x70, 0xF2, 0x73, 0x0E, 0x38, 0xCC, 0x03, 0x90, 0xE2, 0x04, 0xEE, 0x13, 0x28,
+0xB6, 0xFD, 0x89, 0xDC, 0x6F, 0x93, 0x18, 0xA4, 0x98, 0x7A, 0x4E, 0x00, 0xC4, 0x51, 0x0D, 0xF7,
+0x82, 0x58, 0x10, 0x1C, 0xA3, 0x3E, 0xB4, 0x74, 0x7A, 0x1D, 0x15, 0x42, 0xE7, 0x2C, 0x14, 0x81,
+0xF9, 0x83, 0xF5, 0xE2, 0xAF, 0x9D, 0xF4, 0xD8, 0x32, 0x55, 0x9A, 0xC5, 0xA4, 0xB6, 0x70, 0x8E,
+0x85, 0xCB, 0xC7, 0x66, 0x14, 0x77, 0xE8, 0x2A, 0x50, 0xA6, 0xAD, 0xEF, 0x60, 0x30, 0x61, 0x61,
+0x50, 0x15, 0xFF, 0xFD, 0xB4, 0xF7, 0x4A, 0x62, 0x4B, 0xA6, 0x4F, 0x8A, 0xB4, 0x73, 0x1A, 0xE5,
+0xAB, 0x9A, 0x7C, 0xD1, 0x85, 0x1D, 0x9B, 0xEC, 0x11, 0x4F, 0xF2, 0xFA, 0x5D, 0x4B, 0x3A, 0x9E,
+0x73, 0xFD, 0x0F, 0x1C, 0xC0, 0x08, 0xA5, 0x22, 0x07, 0xBA, 0xB6, 0x2F, 0xA4, 0xC2, 0xC5, 0x3F,
+0xE4, 0x31, 0x01, 0x14, 0x7A, 0x99, 0xFC, 0x17, 0xD3, 0x76, 0x8A, 0x0E, 0xF4, 0x52, 0x46, 0x4B,
+0xB7, 0x0B, 0x3F, 0xD9, 0x46, 0xDE, 0xF9, 0xCC, 0xD7, 0x76, 0x0A, 0xD7, 0x93, 0xE2, 0x78, 0x03,
+0x46, 0x6D, 0x0C, 0xAE, 0x86, 0x86, 0x7E, 0x7D, 0xF3, 0xC7, 0xC5, 0x54, 0x4F, 0x7F, 0xDF, 0x7A,
+0x3A, 0x41, 0xFA, 0xD7, 0x79, 0x0D, 0x32, 0x07, 0x2F, 0xB0, 0xF0, 0x55, 0x7C, 0x40, 0xCA, 0x04,
+0x7F, 0x0D, 0xEA, 0x82, 0x86, 0xFC, 0x0C, 0xAF, 0x3F, 0xA5, 0xB0, 0xD2, 0xA6, 0xF8, 0x5C, 0x41,
+0x78, 0xE6, 0x47, 0x5F, 0x37, 0xE0, 0x1E, 0xCC, 0x90, 0x20, 0xCD, 0xC3, 0xFB, 0x93, 0x44, 0x22,
+0xAB, 0xE2, 0x47, 0xBC, 0x1D, 0x19, 0x41, 0x84, 0x03, 0x8C, 0x03, 0xB8, 0xBA, 0x5E, 0xDC, 0x15,
+0xFD, 0x56, 0xFC, 0x51, 0xE9, 0xF0, 0xD0, 0x41, 0xA2, 0x44, 0xC5, 0xDE, 0xFF, 0xDC, 0xD9, 0x85,
+0xBA, 0x39, 0xFF, 0xEA, 0xC9, 0xD9, 0x8F, 0xA9, 0xC0, 0xC8, 0x4B, 0x07, 0x08, 0xF6, 0xC6, 0x33,
+0xFE, 0x81, 0xC1, 0x81, 0xF2, 0x8B, 0xCC, 0x22, 0x99, 0xB5, 0x48, 0x7E, 0x94, 0x07, 0xEB, 0x85,
+0x83, 0xD0, 0xF7, 0x8F, 0xD7, 0x82, 0xFC, 0x57, 0xE0, 0x20, 0x12, 0x18, 0x1F, 0xF5, 0x60, 0x97,
+0xDE, 0x10, 0x6B, 0x6D, 0xDD, 0x35, 0x0F, 0xE8, 0xF6, 0x9E, 0x21, 0xE0, 0x06, 0x10, 0xDA, 0x94,
+0xDB, 0x85, 0x5E, 0x23, 0x36, 0x5D, 0x7B, 0x3B, 0x51, 0x68, 0xC8, 0xD2, 0xF1, 0x9A, 0xE1, 0xC4,
+0x58, 0xCF, 0xFA, 0x2B, 0x6F, 0x8E, 0xAE, 0xE3, 0x99, 0x4D, 0x80, 0x07, 0x7A, 0x5D, 0x0A, 0x7F,
+0x39, 0xFB, 0x7B, 0xBB, 0xA5, 0x81, 0x15, 0xFD, 0x26, 0xC8, 0x85, 0x78, 0x71, 0xD3, 0x47, 0x9C,
+0xE0, 0x70, 0x6D, 0xD5, 0x8A, 0x04, 0x84, 0xDD, 0x3D, 0xA6, 0x7F, 0xC0, 0xE5, 0xAD, 0x98, 0x45,
+0x86, 0xDD, 0xBC, 0x2D, 0x79, 0x30, 0xD7, 0x8E, 0x88, 0xC7, 0xC3, 0x97, 0x61, 0x91, 0x26, 0x8D,
+0xBE, 0xB2, 0x93, 0xD5, 0x5C, 0x49, 0x59, 0x04, 0x0C, 0xA6, 0x5E, 0x29, 0xC8, 0xFE, 0x39, 0x37,
+0xD7, 0xF4, 0xF0, 0x7D, 0x2E, 0xE2, 0x33, 0xF8, 0x67, 0xE6, 0xFD, 0xBC, 0xB0, 0xC2, 0xC2, 0x65,
+0x86, 0x67, 0x6A, 0x0F, 0xA7, 0x6C, 0xD7, 0x52, 0xF5, 0x28, 0xCD, 0x63, 0xBA, 0x4A, 0xA7, 0x76,
+0x6D, 0x27, 0xE9, 0x82, 0xDB, 0xBD, 0xD7, 0x60, 0x39, 0x82, 0x75, 0x91, 0x7E, 0x29, 0x5E, 0xA3,
+0x46, 0x8F, 0xDD, 0x2F, 0x96, 0xC5, 0xA6, 0xEC, 0x8B, 0xCC, 0x21, 0x23, 0x6E, 0x22, 0xDC, 0x8D,
+0x74, 0xA7, 0x9B, 0x6A, 0x2A, 0x8D, 0x22, 0x2D, 0xD2, 0x79, 0x45, 0x0C, 0x31, 0xFA, 0x6B, 0x88,
+0x9B, 0x21, 0x3D, 0x41, 0x59, 0x64, 0x80, 0xDD, 0x8E, 0x0B, 0x4A, 0xF3, 0x36, 0x9B, 0xFB, 0x58,
+0xAB, 0xA3, 0x72, 0x7E, 0x99, 0x35, 0xC9, 0x0C, 0xB1, 0x9B, 0xCC, 0xD7, 0xE9, 0x61, 0xB8, 0x42,
+0xA5, 0xCD, 0x9D, 0xE8, 0x0E, 0xE2, 0x23, 0xD0, 0x66, 0xC4, 0xBB, 0x85, 0x0F, 0xA1, 0x13, 0x14,
+0x2E, 0x8C, 0x68, 0x59, 0x94, 0x09, 0xB7, 0xE0, 0xEE, 0x51, 0x39, 0xFF, 0xBC, 0x64, 0xF2, 0x9A,
+0x3D, 0x72, 0x57, 0x6B, 0xD5, 0x43, 0x26, 0xC0, 0xBC, 0x6C, 0x5E, 0xAE, 0x11, 0x6C, 0xF4, 0xD0,
+0xD3, 0xEC, 0xE3, 0x04, 0x2E, 0xD5, 0xA1, 0x86, 0xE4, 0x3A, 0x8F, 0xC6, 0x54, 0x13, 0x3A, 0x1D,
+0x17, 0xB7, 0x31, 0x31, 0xF4, 0xA8, 0xA8, 0x58, 0xF6, 0x94, 0x74, 0x4A, 0xF3, 0x67, 0x00, 0xB3,
+0x9D, 0x6F, 0x64, 0x5E, 0xC9, 0x49, 0x34, 0xF3, 0x71, 0x5A, 0x3A, 0x22, 0x73, 0x44, 0x6F, 0xEC,
+0xF9, 0xB9, 0xAF, 0x3F, 0xFD, 0x43, 0x95, 0x68, 0x0D, 0x86, 0xD5, 0xCB, 0x50, 0x9B, 0x25, 0x7B,
+0x26, 0x0A, 0xD8, 0xCC, 0x4D, 0x14, 0x46, 0x3A, 0xB5, 0xBA, 0xFE, 0xDB, 0xFD, 0xEB, 0xBC, 0x15,
+0x70, 0xA7, 0x29, 0x77, 0xA4, 0x55, 0x44, 0x17, 0x85, 0xED, 0x90, 0x12, 0x9F, 0x85, 0x9D, 0xE2,
+0xA5, 0x79, 0x20, 0x30, 0xE6, 0xA2, 0x23, 0xAA, 0xD8, 0xAC, 0xA3, 0xEE, 0xAC, 0xA4, 0x8B, 0x0C,
+0xE2, 0x45, 0x98, 0x33, 0x81, 0x5A, 0x93, 0xB1, 0xD6, 0xD2, 0xA7, 0x4A, 0x09, 0x26, 0xDE, 0x2A,
+0xB7, 0xBA, 0x43, 0xAE, 0x1B, 0x4E, 0xEC, 0xC6, 0xD3, 0x02, 0x86, 0x9C, 0x4F, 0x92, 0x28, 0xA8,
+0xE1, 0x76, 0xA4, 0x69, 0x9D, 0xE5, 0x6B, 0x58, 0xE0, 0x7B, 0x6D, 0x5C, 0xCD, 0x84, 0x89, 0x53,
+0x46, 0x08, 0xE1, 0xE9, 0xE8, 0xE9, 0x89, 0xF9, 0x0F, 0x4C, 0x70, 0x87, 0x86, 0xE4, 0xD4, 0xCE,
+0xEA, 0xF2, 0xD2, 0xA9, 0xE6, 0x49, 0xC2, 0x10, 0x19, 0x18, 0x9A, 0x71, 0xF5, 0xDD, 0x75, 0xF4,
+0xE1, 0xA1, 0xAA, 0x44, 0xBD, 0x11, 0x85, 0x19, 0xA5, 0x7F, 0x6C, 0xC7, 0xC4, 0x2C, 0xE5, 0x59,
+0xC3, 0xA5, 0xA3, 0x51, 0x95, 0xB6, 0x91, 0x99, 0x90, 0x24, 0xF3, 0xDF, 0x67, 0xF4, 0x60, 0x5B,
+0x6F, 0x66, 0x0D, 0xC2, 0x2C, 0xFC, 0xDF, 0xED, 0x6A, 0xB8, 0xCD, 0x7C, 0x1E, 0x41, 0xCB, 0xA8,
+0xF1, 0x7B, 0x30, 0xBA, 0x4D, 0xAF, 0xEB, 0xBB, 0x7B, 0xBB, 0x1F, 0x54, 0x49, 0x71, 0x64, 0x6A,
+0xEB, 0xD9, 0x11, 0xA6, 0xBE, 0x47, 0x4B, 0x41, 0x64, 0x6D, 0x1C, 0xAE, 0x06, 0x93, 0x05, 0x34,
+0x1E, 0x6C, 0x55, 0x49, 0xAF, 0x6E, 0x1A, 0x8A, 0x4D, 0xC1, 0x56, 0x98, 0xE2, 0x78, 0x41, 0x0A,
+0xE7, 0x18, 0x7A, 0xD9, 0xBA, 0x08, 0x7B, 0x9C, 0xEF, 0xBB, 0xA9, 0x29, 0x05, 0x31, 0x3B, 0xFB,
+0x70, 0xD7, 0x1C, 0x18, 0x05, 0x25, 0x30, 0x26, 0x4C, 0xD7, 0xE4, 0xFF, 0x6F, 0xB6, 0x28, 0xFD,
+0x20, 0xF4, 0xC2, 0x35, 0x81, 0xE7, 0xA8, 0x15, 0x64, 0x7D, 0xDC, 0x4E, 0xB1, 0xE5, 0xCF, 0x31,
+0xD7, 0x61, 0xA9, 0x5A, 0xC4, 0x82, 0x34, 0xA8, 0x25, 0x35, 0xC2, 0xCD, 0x54, 0x2B, 0x00, 0x5D,
+0x16, 0xAB, 0x97, 0xDD, 0x2D, 0x43, 0x2A, 0x3D, 0xC7, 0x91, 0xE2, 0x70, 0xDA, 0x45, 0x2D, 0x1F,
+0x7C, 0x4A, 0xF7, 0xC1, 0xAB, 0xBF, 0x83, 0xDA, 0x2D, 0xCC, 0x44, 0xEF, 0xD9, 0x7A, 0x98, 0x1C,
+0xF6, 0xB9, 0x74, 0x07, 0x81, 0x54, 0xEA, 0x00, 0x1E, 0x15, 0x6C, 0x77, 0xBE, 0x66, 0x30, 0x85,
+0xF8, 0x67, 0x9B, 0x00, 0xB0, 0xEF, 0xC8, 0xB5, 0x66, 0x8D, 0xAC, 0xA3, 0xE1, 0x93, 0x82, 0xBC,
+0xA3, 0x3E, 0x54, 0xDA, 0x11, 0x13, 0x85, 0x87, 0x46, 0x57, 0xC1, 0x35, 0x4A, 0x1F, 0x79, 0x15,
+0xE1, 0x3A, 0x7F, 0x96, 0x87, 0x27, 0xA9, 0x39, 0x60, 0x55, 0xC8, 0x21, 0x20, 0x18, 0x4B, 0x59,
+0xB6, 0x23, 0xFB, 0xF1, 0xAF, 0xE8, 0x27, 0x6B, 0x3D, 0x2B, 0xD2, 0xB1, 0xD6, 0xAE, 0x10, 0x81,
+0x48, 0x13, 0x72, 0x65, 0xD3, 0x60, 0x2B, 0x77, 0x93, 0xFD, 0xFF, 0xBD, 0xE4, 0x4E, 0x7B, 0x04,
+0x69, 0x79, 0x1B, 0xEE, 0xFC, 0x6B, 0x3A, 0x5C, 0x52, 0x1B, 0x2D, 0x23, 0x7A, 0xDF, 0xCD, 0x28,
+0x59, 0x0B, 0x7C, 0x9A, 0x6B, 0x35, 0xF2, 0xDA, 0x92, 0x6D, 0x80, 0x75, 0xDA, 0xAA, 0xAE, 0x3E,
+0xAE, 0xDB, 0xCC, 0x3C, 0x34, 0x7C, 0x0C, 0x78, 0x5B, 0x16, 0xD0, 0xB5, 0x89, 0xA4, 0x9C, 0x37,
+0xEC, 0xC9, 0xB3, 0xA8, 0xB9, 0x26, 0xF2, 0xA7, 0xE3, 0x51, 0xEA, 0xC8, 0x53, 0x66, 0x60, 0xA5,
+0xC1, 0xB2, 0xC8, 0x9C, 0x8D, 0xD5, 0xCC, 0xC3, 0x79, 0x17, 0xC7, 0x69, 0x9A, 0xAF, 0x26, 0x39,
+0xC9, 0xF8, 0x9B, 0xAE, 0xFE, 0x17, 0x6D, 0x22, 0x52, 0x1A, 0x80, 0xCD, 0xA3, 0x0A, 0x26, 0x84,
+0x64, 0x29, 0xA2, 0xBE, 0xDB, 0xFA, 0x33, 0x64, 0x21, 0xD1, 0xB6, 0xBF, 0x0F, 0xF0, 0x6B, 0x07,
+0x9B, 0xF1, 0x3B, 0x15, 0x05, 0x33, 0xDC, 0x79, 0xB6, 0x3D, 0x38, 0xFD, 0xE9, 0x7C, 0x4B, 0x84,
+0x48, 0x71, 0x2A, 0xF1, 0x0B, 0xEB, 0xEF, 0x5E, 0x24, 0xE8, 0x0B, 0xD6, 0xEE, 0x2B, 0x72, 0xE3,
+0xC2, 0xF9, 0x8F, 0xD7, 0x3B, 0x07, 0x76, 0x40, 0xAB, 0xB0, 0xA9, 0xA9, 0xEC, 0x46, 0xEB, 0xA8,
+0x1C, 0x17, 0xB9, 0xD8, 0x55, 0x20, 0x9F, 0x52, 0x2C, 0xC7, 0x35, 0x16, 0x33, 0x8F, 0x36, 0xEA,
+0x67, 0x0C, 0x84, 0x84, 0x36, 0x28, 0x5F, 0xC2, 0xBA, 0x6C, 0x71, 0x9C, 0x5A, 0x98, 0xA9, 0x5A,
+0x07, 0x78, 0xA1, 0x4E, 0x87, 0x23, 0xA8, 0xA1, 0xF8, 0x78, 0xBF, 0x12, 0x8D, 0x98, 0x1A, 0x5D,
+0x6D, 0x72, 0x77, 0xD0, 0x87, 0x90, 0x8C, 0xB4, 0xD1, 0x95, 0x28, 0xE3, 0xF0, 0x0E, 0x46, 0x73,
+0xBC, 0x71, 0x2E, 0x86, 0x97, 0xC7, 0x20, 0x45, 0xD0, 0x37, 0xC5, 0x98, 0x49, 0x60, 0xF4, 0xA6,
+0x92, 0x5C, 0x82, 0x91, 0x21, 0xAE, 0xD0, 0xD1, 0xBA, 0xA7, 0x0E, 0x03, 0x42, 0x4F, 0x07, 0xF5,
+0xAA, 0xE2, 0xD8, 0x24, 0x17, 0x2A, 0x28, 0xD6, 0x39, 0x9D, 0x6B, 0xCE, 0x7A, 0x8B, 0x42, 0xC6,
+0x5B, 0xBA, 0xAB, 0x6F, 0xB0, 0xCE, 0x63, 0x88, 0xB1, 0xEC, 0x07, 0x44, 0x98, 0x42, 0x72, 0x05,
+0xBC, 0xBA, 0x55, 0xC4, 0x97, 0xA1, 0x0C, 0x91, 0x49, 0x4D, 0x25, 0x67, 0xA2, 0xEE, 0xA1, 0x74,
+0xE8, 0x9F, 0x2A, 0x8A, 0x75, 0x03, 0x99, 0x21, 0x98, 0x52, 0x0E, 0xAE, 0xF3, 0xFE, 0xD7, 0x68,
+0x65, 0x9D, 0xD4, 0x17, 0x95, 0x00, 0x35, 0x22, 0xA4, 0xBE, 0x12, 0xCF, 0x9F, 0xE8, 0x03, 0x10,
+0x62, 0x3B, 0xEE, 0xED, 0x0A, 0xC0, 0x53, 0x56, 0x59, 0x57, 0x23, 0xC2, 0x03, 0xFB, 0x88, 0xA6,
+0xEE, 0xD0, 0x58, 0x79, 0x66, 0x5E, 0x10, 0x33, 0x84, 0x59, 0x12, 0x81, 0x96, 0x1E, 0xBE, 0x44,
+0x69, 0x0B, 0xDD, 0xEF, 0x6C, 0xCE, 0xBF, 0x71, 0x3D, 0x3A, 0x6F, 0x12, 0x54, 0x22, 0x81, 0x29,
+0x9B, 0x37, 0x1F, 0x83, 0x74, 0xA7, 0x49, 0xF2, 0xE2, 0xBF, 0x8E, 0xC4, 0x07, 0x5C, 0x0D, 0xE6,
+0x1A, 0x05, 0x3C, 0x35, 0x77, 0xC9, 0xC9, 0x3C, 0x7D, 0xE5, 0x85, 0xD1, 0x62, 0x9C, 0x6F, 0xBF,
+0x21, 0xE8, 0x92, 0x42, 0x13, 0xA9, 0x8F, 0xAA, 0xD6, 0x54, 0x52, 0x03, 0xCC, 0x18, 0x0F, 0x0D,
+0xB5, 0x9F, 0x36, 0x3B, 0x1A, 0x85, 0x3C, 0xA6, 0x58, 0xC0, 0x6C, 0x44, 0x3D, 0xC9, 0x68, 0x47,
+0x14, 0x70, 0xA2, 0xD6, 0x89, 0x97, 0x64, 0x15, 0x67, 0x33, 0xAE, 0x9D, 0xD9, 0xB5, 0x91, 0x5C,
+0xD7, 0xBD, 0xCF, 0xD3, 0xF2, 0xBC, 0x53, 0x3A, 0xFD, 0xD5, 0x95, 0xC6, 0xE0, 0xD3, 0x33, 0x6B,
+0x35, 0xE7, 0x27, 0x03, 0x01, 0xA4, 0xEF, 0x3D, 0xC9, 0x23, 0xB4, 0x06, 0x45, 0xF7, 0x8D, 0x79,
+0xA8, 0xA6, 0x5A, 0x8D, 0x7D, 0xC4, 0xA1, 0x65, 0xC7, 0xDC, 0x5F, 0xAB, 0x0C, 0xAD, 0x40, 0x80,
+0x12, 0xB5, 0xE6, 0x55, 0xD9, 0xE6, 0x02, 0x53, 0x15, 0x51, 0x05, 0xA3, 0x95, 0x6C, 0x93, 0x60,
+0x91, 0xFE, 0xF6, 0x30, 0x62, 0x3F, 0x87, 0x3C, 0x5B, 0x5F, 0x31, 0x9A, 0xC7, 0x1D, 0x91, 0xF2,
+0x55, 0xCB, 0xE1, 0x23, 0xAD, 0xBF, 0x92, 0x9F, 0x15, 0x0E, 0xC9, 0xB8, 0xD7, 0x9D, 0xA6, 0x4A,
+0xA1, 0x61, 0x37, 0x09, 0x38, 0x66, 0xDC, 0x6B, 0x4F, 0x88, 0x16, 0xF0, 0xD7, 0x9D, 0xFB, 0xE0,
+0x8D, 0x93, 0xDD, 0xBA, 0x96, 0xA1, 0x69, 0xFE, 0x0C, 0x06, 0xC5, 0xF9, 0x50, 0xF2, 0x43, 0xA7,
+0xFE, 0x95, 0xA1, 0x8D, 0xC5, 0xB0, 0xA7, 0xBE, 0xAE, 0xD0, 0x94, 0xFC, 0x99, 0xD5, 0x7F, 0xFB,
+0x7D, 0xFD, 0xC8, 0x95, 0xCF, 0xB3, 0xB8, 0x25, 0x74, 0x18, 0xDC, 0x47, 0x87, 0xF1, 0x96, 0x8C,
+0xEF, 0x61, 0x96, 0x80, 0x38, 0x06, 0xA7, 0xE6, 0xD4, 0x75, 0x44, 0xFE, 0xD4, 0x47, 0x01, 0xE7,
+0x3C, 0x1A, 0xE2, 0xEB, 0x41, 0xD5, 0x2B, 0xF0, 0x77, 0x7A, 0x54, 0x5D, 0xE6, 0xEC, 0x46, 0x39,
+0x8F, 0x9F, 0x6E, 0x01, 0xBF, 0x39, 0x59, 0x5D, 0xF2, 0x41, 0x82, 0x88, 0x4B, 0xE9, 0x74, 0x8A,
+0x3A, 0x16, 0xD1, 0x7C, 0xF2, 0x9D, 0xE2, 0x54, 0x50, 0x2A, 0xD9, 0x5A, 0x98, 0xC9, 0x9B, 0x0C,
+0xB3, 0x3A, 0x88, 0x7A, 0x3E, 0xC4, 0xFA, 0xB4, 0x9A, 0xD4, 0xAC, 0x31, 0x42, 0x52, 0x49, 0x3C,
+0x5A, 0x59, 0x98, 0xAB, 0x21, 0x6E, 0xC5, 0x88, 0xFC, 0x2E, 0x48, 0x1F, 0x32, 0x48, 0x84, 0xF3,
+0xD5, 0xCB, 0x8F, 0x4D, 0x12, 0xB0, 0xEB, 0xA0, 0x95, 0xCB, 0x14, 0xA1, 0x0B, 0xDC, 0x58, 0xE2,
+0xAE, 0x95, 0x8A, 0x1C, 0x84, 0xD6, 0x44, 0x68, 0xB6, 0x3C, 0x35, 0xAC, 0x84, 0xEB, 0x4E, 0x0B,
+0xC4, 0x0E, 0x61, 0x81, 0x0D, 0xAA, 0x37, 0xB9, 0x5A, 0xD2, 0x07, 0xF3, 0x60, 0xBA, 0xB0, 0xF1,
+0x94, 0xED, 0x45, 0x27, 0x39, 0x02, 0x32, 0x81, 0xAB, 0x8E, 0xCC, 0xE0, 0x2D, 0x55, 0xDC, 0x6F,
+0x0A, 0xA4, 0x0D, 0xFB, 0x07, 0x32, 0x08, 0x23, 0x20, 0x8F, 0x41, 0x2E, 0xF6, 0xAC, 0x7C, 0xF3,
+0x8B, 0x72, 0x03, 0x81, 0x69, 0xFB, 0xEC, 0x28, 0x57, 0x21, 0x05, 0xC4, 0xE6, 0x50, 0xBD, 0x44,
+0xC2, 0x0B, 0x9D, 0x3C, 0x13, 0x4F, 0x47, 0xDA, 0x8D, 0xBE, 0x85, 0x47, 0x98, 0xA7, 0xEC, 0x8F,
+0x1B, 0x4E, 0x99, 0x90, 0x42, 0x04, 0x62, 0x65, 0xBA, 0x1E, 0x91, 0x40, 0x41, 0x41, 0x60, 0x83,
+0x49, 0x24, 0xD8, 0xD1, 0x56, 0x73, 0x5B, 0xA6, 0xDB, 0xDA, 0x8E, 0x4F, 0x83, 0x59, 0x79, 0x78,
+0x53, 0x89, 0x88, 0x72, 0x12, 0x0D, 0x20, 0xA6, 0x28, 0x43, 0x94, 0x71, 0x5F, 0xE3, 0x97, 0x63,
+0x93, 0x33, 0x65, 0xCB, 0x7E, 0x02, 0xD2, 0x18, 0xD9, 0x82, 0x69, 0x0A, 0xA0, 0x1D, 0x1A, 0x90,
+0x37, 0x3D, 0xBC, 0x72, 0xAF, 0xCF, 0xF1, 0xB3, 0x3D, 0x45, 0x2E, 0x65, 0xD4, 0x47, 0x35, 0x90,
+0x32, 0xC4, 0xBA, 0x6F, 0x76, 0x8D, 0x32, 0x8A, 0x42, 0x2E, 0x04, 0xE2, 0x7A, 0x3D, 0x1C, 0x7C,
+0xD2, 0x23, 0x81, 0xE3, 0xFE, 0xF1, 0x0C, 0x01, 0xE1, 0x4D, 0x47, 0x08, 0x64, 0xB6, 0x77, 0x78,
+0x9E, 0xDF, 0x23, 0xA4, 0x02, 0xE4, 0x78, 0x19, 0x4C, 0x0F, 0xCF, 0xE4, 0x85, 0x71, 0x17, 0x13,
+0xFF, 0x71, 0x34, 0x93, 0x90, 0xF0, 0x42, 0x06, 0xE0, 0x26, 0x3F, 0xE3, 0x83, 0x21, 0x1A, 0x9B,
+0x43, 0x34, 0xCB, 0x41, 0x22, 0xA9, 0x20, 0x45, 0xA3, 0xB4, 0x0E, 0xEF, 0x96, 0x45, 0x22, 0x0F,
+0x96, 0x0E, 0xAD, 0xAD, 0x05, 0x31, 0x7B, 0x75, 0xF4, 0xA9, 0xB3, 0x38, 0x8C, 0xB7, 0x95, 0x32,
+0x6E, 0x08, 0x0A, 0xBD, 0xB7, 0xBE, 0x5F, 0xF9, 0x81, 0x01, 0x1F, 0x27, 0xDE, 0xCF, 0xC6, 0x7F,
+0x76, 0x6D, 0x7D, 0xAD, 0xCC, 0x13, 0x25, 0xA6, 0x53, 0x23, 0x6B, 0xF1, 0x39, 0xC7, 0xBF, 0x24,
+0x1A, 0x78, 0xE9, 0x33, 0xE7, 0x57, 0xEB, 0xBF, 0x81, 0x04, 0xB9, 0xB4, 0x6F, 0x5D, 0x22, 0xCC,
+0xF7, 0x60, 0xF0, 0x60, 0xCF, 0x1B, 0x52, 0xDD, 0xD3, 0xFB, 0x38, 0xD4, 0xEC, 0x5C, 0xBA, 0x3C,
+0x9F, 0xB3, 0x9A, 0xE1, 0x57, 0xAF, 0xDA, 0xD8, 0x96, 0x6B, 0xD7, 0xA7, 0x68, 0xD6, 0x8C, 0x66,
+0x96, 0x75, 0x18, 0xEE, 0x11, 0x05, 0x03, 0xA2, 0xE9, 0x65, 0x6E, 0x67, 0xA1, 0xAA, 0xEE, 0xAA,
+0x81, 0x11, 0x5A, 0x46, 0x0E, 0xE2, 0x58, 0x12, 0xBA, 0x51, 0xBF, 0x47, 0x49, 0x72, 0x88, 0x09,
+0xF5, 0xE1, 0x45, 0x8D, 0xB5, 0x7F, 0x4F, 0x25, 0x2E, 0x3E, 0x62, 0x8A, 0x78, 0xC4, 0x8F, 0x76,
+0x97, 0x97, 0x0A, 0x40, 0xBF, 0x14, 0x9E, 0x63, 0xF4, 0xB8, 0xA0, 0x39, 0x4F, 0xA1, 0x88, 0x17,
+0x35, 0x1A, 0x31, 0x10, 0x24, 0xA5, 0xDD, 0x86, 0x12, 0xF7, 0x93, 0x90, 0x55, 0x0E, 0xD9, 0xA9,
+0xD1, 0xE5, 0x26, 0x1C, 0xEF, 0xED, 0x51, 0xDF, 0x51, 0x5F, 0x59, 0xFE, 0x87, 0x52, 0x26, 0x38,
+0xF8, 0xFB, 0x4D, 0xC7, 0xA4, 0x73, 0xC6, 0x08, 0x10, 0xB2, 0xC1, 0x4C, 0xB1, 0xC9, 0x0E, 0x09,
+0xDC, 0x48, 0x7F, 0x7C, 0xBC, 0x47, 0x5E, 0x87, 0xDB, 0x1F, 0xF8, 0x2F, 0x09, 0x3D, 0xC9, 0xCD,
+0x61, 0x1A, 0x69, 0x79, 0xC2, 0x31, 0xE5, 0x21, 0xE8, 0xEC, 0x49, 0x30, 0xC3, 0xC2, 0xDC, 0x89,
+0xF2, 0x9B, 0x0C, 0xE3, 0x13, 0xAF, 0xCC, 0x52, 0x58, 0x42, 0xBE, 0x95, 0x40, 0xD1, 0xC6, 0x7C,
+0xFC, 0x12, 0x8F, 0x14, 0x1E, 0x95, 0xD4, 0x77, 0x0D, 0x7E, 0x3D, 0xF8, 0xB0, 0x91, 0xAE, 0xCE,
+0x75, 0x69, 0xF0, 0x31, 0x4B, 0xA4, 0x60, 0x83, 0xE9, 0x1F, 0x49, 0x19, 0x21, 0x75, 0x27, 0x69,
+0x1C, 0x20, 0x10, 0xF5, 0x9E, 0x23, 0xB9, 0x56, 0xFF, 0x20, 0x0F, 0x8B, 0xAC, 0xC3, 0x21, 0x09,
+0x05, 0x60, 0xDB, 0x80, 0x7A, 0xFB, 0x65, 0x9C, 0x38, 0xA4, 0xF4, 0x50, 0xA2, 0xB7, 0x03, 0x6C,
+0xF5, 0x17, 0x6B, 0xF1, 0xD1, 0xB1, 0xF0, 0x1D, 0xB7, 0xA4, 0x17, 0x46, 0x3C, 0xFF, 0x6C, 0x7C,
+0x57, 0xFC, 0x43, 0x70, 0x55, 0x1B, 0x7A, 0x7D, 0x48, 0x00, 0xD1, 0xCF, 0x02, 0x74, 0xD1, 0xED,
+0x69, 0x7B, 0x81, 0xB2, 0x48, 0x4B, 0x75, 0x73, 0xF8, 0xE6, 0xA1, 0xA3, 0x84, 0x71, 0xFA, 0xAF,
+0xEE, 0x02, 0xC0, 0xFA, 0x3B, 0x23, 0x6D, 0xA9, 0xF9, 0x36, 0x5E, 0x44, 0xBB, 0x6D, 0xB3, 0x52,
+0x4F, 0x14, 0xCA, 0xEA, 0x4D, 0xA7, 0x57, 0x56, 0x48, 0xE0, 0xCE, 0xB7, 0xF1, 0x4B, 0x7B, 0x00,
+0xB4, 0xE4, 0xCA, 0xD6, 0xA9, 0x7B, 0x75, 0x64, 0xEC, 0x1E, 0x15, 0xCE, 0x8D, 0xE9, 0xB7, 0x83,
+0x3A, 0x8E, 0xB8, 0x85, 0x92, 0xC5, 0xCC, 0xC4, 0x25, 0x25, 0x4D, 0x8F, 0x97, 0x32, 0x21, 0x96,
+0x3C, 0xDA, 0x50, 0xB8, 0xE5, 0x76, 0x33, 0x7C, 0x8D, 0x98, 0x16, 0xE1, 0xE9, 0x3C, 0xA5, 0x13,
+0x2B, 0x1F, 0x39, 0x25, 0xFF, 0x7C, 0x9E, 0xE6, 0x14, 0x3C, 0x15, 0x6A, 0x40, 0x72, 0x22, 0x56,
+0xDF, 0xAA, 0x15, 0xED, 0x09, 0xEF, 0x80, 0x02, 0x7D, 0x50, 0x3D, 0x6C, 0xDC, 0x0F, 0x5D, 0x5B,
+0x12, 0xBD, 0x8F, 0x8F, 0xF8, 0xD0, 0xDA, 0xEE, 0xAB, 0x2A, 0x66, 0x4E, 0xDA, 0x86, 0x10, 0x0D,
+0xAB, 0xCB, 0x28, 0x5E, 0x86, 0x08, 0x1B, 0x8E, 0x0B, 0x02, 0xD3, 0xB8, 0x6D, 0xBA, 0x8A, 0x6B,
+0x1B, 0x41, 0xEA, 0x12, 0xEA, 0x2B, 0xC5, 0xCC, 0x12, 0x55, 0xC7, 0x4A, 0xB3, 0x73, 0x08, 0xFA,
+0x4F, 0x82, 0xAA, 0x51, 0x47, 0xCC, 0x12, 0x9F, 0x6D, 0x71, 0xBA, 0x71, 0xD5, 0x7A, 0x38, 0x02,
+0x4C, 0x13, 0xC3, 0xF0, 0x89, 0x82, 0x17, 0x3E, 0xF7, 0x87, 0xB9, 0xE5, 0x01, 0x37, 0x9B, 0x30,
+0xFD, 0x5B, 0x05, 0x0B, 0xAE, 0xF6, 0x16, 0xA3, 0x94, 0x2B, 0x12, 0xE0, 0x1F, 0xAC, 0x3C, 0xBF,
+0xD2, 0x13, 0x11, 0xB8, 0x52, 0x88, 0x98, 0x05, 0xF3, 0x48, 0x51, 0x76, 0xB4, 0x72, 0x1C, 0x07,
+0xA0, 0xF5, 0x65, 0x54, 0xEA, 0x4F, 0xF3, 0x86, 0x87, 0xBC, 0x47, 0xC6, 0x6C, 0x18, 0x93, 0xA3,
+0x92, 0xCA, 0xB6, 0x54, 0xD1, 0x17, 0x0C, 0x8B, 0x73, 0x80, 0xC3, 0xD6, 0x2B, 0x15, 0x26, 0x92,
+0x48, 0x92, 0x32, 0xF7, 0x37, 0x21, 0x16, 0x6C, 0xFB, 0x74, 0x4F, 0x30, 0xD1, 0x21, 0xEA, 0xC7,
+0x8F, 0x0D, 0x1E, 0x3E, 0xA3, 0x49, 0x1C, 0x8D, 0x74, 0xC8, 0x1B, 0x10, 0x89, 0x43, 0x62, 0x2A,
+0x3F, 0x7C, 0xD4, 0x73, 0x46, 0x1D, 0x8C, 0x47, 0x63, 0x3F, 0xA9, 0x59, 0x8C, 0x54, 0x32, 0xD6,
+0x71, 0x73, 0x55, 0x00, 0x03, 0x16, 0x77, 0xAA, 0xA2, 0x41, 0xB8, 0x61, 0x0B, 0x20, 0xB5, 0x43,
+0x5C, 0x89, 0x59, 0x14, 0x85, 0x74, 0xA7, 0x5F, 0x68, 0xFF, 0x72, 0x98, 0xCE, 0x7E, 0xAF, 0x79,
+0x7A, 0xFF, 0x12, 0x63, 0x83, 0x57, 0x57, 0xB9, 0xF1, 0x95, 0xC1, 0x85, 0x71, 0x7D, 0x99, 0x2B,
+0x72, 0xEE, 0x08, 0x2F, 0x70, 0xFA, 0xE9, 0x13, 0x71, 0x9D, 0x7D, 0x5D, 0xD5, 0xFA, 0xCC, 0xBF,
+0x08, 0xA2, 0x0D, 0xB9, 0x97, 0xAC, 0xB9, 0xEC, 0xF5, 0x94, 0x2E, 0x44, 0x4D, 0xAE, 0xF6, 0x5A,
+0xEB, 0x2E, 0xC2, 0x79, 0x43, 0x00, 0x4C, 0x95, 0x1A, 0x0F, 0x54, 0x4E, 0x36, 0xB6, 0x3A, 0x60,
+0xEB, 0xC2, 0x6C, 0xDA, 0x2C, 0xF3, 0x04, 0x80, 0xDD, 0xBC, 0xE1, 0xC0, 0xED, 0x61, 0x7A, 0x18,
+0x2A, 0x9D, 0x13, 0x2F, 0xA4, 0x38, 0x33, 0x3B, 0xB1, 0x5D, 0x92, 0xBF, 0xA7, 0x22, 0x7F, 0xEA,
+0xF5, 0x18, 0xF2, 0xE9, 0x31, 0x33, 0x73, 0xCB, 0x5E, 0xB6, 0x14, 0xB6, 0x66, 0xD7, 0xD5, 0xFF,
+0xBF, 0x4D, 0xB4, 0x36, 0x5E, 0xDA, 0xF3, 0xFE, 0x76, 0x44, 0x3B, 0x53, 0xA4, 0xA2, 0xC3, 0xA7,
+0xF8, 0x55, 0x92, 0x3F, 0x86, 0xB7, 0x82, 0x1D, 0xF6, 0x15, 0xFB, 0xD9, 0x5C, 0x2D, 0x58, 0xA5,
+0x72, 0x25, 0x02, 0x03, 0x0E, 0x8D, 0xCA, 0x28, 0xB0, 0xAA, 0x55, 0xF2, 0x03, 0xC5, 0x51, 0x6E,
+0x04, 0x69, 0xD6, 0x9E, 0x90, 0x76, 0xA6, 0xFD, 0x58, 0xA8, 0x98, 0x56, 0x08, 0xE8, 0x05, 0x04,
+0xF4, 0x7C, 0xCA, 0x34, 0xAD, 0x2F, 0x98, 0xF9, 0x5C, 0xBD, 0x66, 0x01, 0xB7, 0xD2, 0xD6, 0x1C,
+0xDC, 0x19, 0x8B, 0xC5, 0xEF, 0xE8, 0x03, 0x59, 0xC7, 0xCB, 0xB9, 0x0F, 0x90, 0xA3, 0x9B, 0x0D,
+0xD2, 0x77, 0xBD, 0x57, 0xAD, 0xE3, 0x4B, 0x9B, 0x98, 0x65, 0x25, 0x5C, 0xBC, 0xFE, 0x00, 0x5A,
+0x42, 0x68, 0x80, 0x76, 0x00, 0xDA, 0x08, 0x2E, 0x65, 0xCD, 0x2E, 0x59, 0x68, 0xC6, 0x4B, 0x0B,
+0x80, 0x50, 0xEA, 0x60, 0x60, 0xD2, 0xFD, 0xEF, 0x63, 0x40, 0x3D, 0xA0, 0x9A, 0x08, 0x06, 0x06,
+0xA0, 0x4C, 0x5B, 0x42, 0xF3, 0xE6, 0x43, 0x02, 0x96, 0xC7, 0x6C, 0x60, 0xF2, 0xA8, 0x4B, 0xA4,
+0x11, 0xDA, 0x5B, 0x65, 0x21, 0x96, 0xF9, 0xE5, 0xC0, 0x38, 0x42, 0x91, 0x02, 0x4F, 0x59, 0xE0,
+0xD0, 0xF6, 0xC2, 0x59, 0x81, 0xCC, 0x53, 0x18, 0xBA, 0xB2, 0x73, 0x87, 0x92, 0x89, 0x95, 0xC6,
+0xFF, 0x8C, 0xC8, 0x70, 0x28, 0x05, 0x2E, 0x9E, 0x9B, 0x33, 0xF0, 0x2D, 0xE6, 0x73, 0x17, 0xD5,
+0x65, 0x9D, 0xFA, 0xE2, 0x3A, 0x97, 0x1A, 0x99, 0x7B, 0xED, 0xEF, 0xCE, 0x96, 0x63, 0x14, 0x8A,
+0xDA, 0x1A, 0x4E, 0xEC, 0x2A, 0x5F, 0x45, 0x5A, 0x72, 0x60, 0x8D, 0xFF, 0x79, 0xFE, 0xE0, 0x76,
+0x9C, 0x1A, 0x88, 0xFB, 0xA4, 0x89, 0xBD, 0x8A, 0xAE, 0x6F, 0x3A, 0xB3, 0x2B, 0xAD, 0x2F, 0x70,
+0x09, 0xBF, 0x2D, 0xA1, 0xC8, 0x02, 0x3C, 0xE6, 0xD1, 0x38, 0xF0, 0xE1, 0xDE, 0x0C, 0xFF, 0xCC,
+0x0E, 0x60, 0x5A, 0x0C, 0xD5, 0x30, 0x52, 0x88, 0x85, 0xA7, 0xBF, 0x6A, 0xD3, 0x2E, 0x41, 0x48,
+0xFC, 0xA0, 0xC9, 0x3B, 0x93, 0x8C, 0x07, 0x1C, 0xC8, 0xEA, 0x42, 0xA9, 0xFD, 0x82, 0x05, 0xE1,
+0x6F, 0x06, 0xB1, 0x53, 0x70, 0x28, 0x7A, 0x96, 0x0C, 0x4F, 0xDD, 0x2D, 0x72, 0x78, 0x03, 0x01,
+0x68, 0x52, 0xDD, 0x1D, 0x3F, 0x38, 0x96, 0x68, 0x68, 0x72, 0x16, 0x25, 0x5F, 0x18, 0x47, 0xEB,
+0xBD, 0x8E, 0x4D, 0x39, 0xD3, 0x91, 0x72, 0x4E, 0x46, 0xE0, 0x79, 0xC6, 0xB7, 0xA5, 0x75, 0x65,
+0x44, 0x44, 0x1C, 0x8E, 0xB2, 0xE1, 0x3E, 0x94, 0x31, 0xC7, 0xE5, 0xEF, 0xC3, 0x49, 0x84, 0x7B,
+0x90, 0x26, 0x5E, 0xFF, 0x8B, 0x7D, 0xD6, 0xAF, 0xD0, 0x8B, 0x9F, 0xA4, 0x6C, 0xEA, 0xFF, 0x83,
+0xCF, 0xB2, 0x46, 0xF4, 0x04, 0xC3, 0xE4, 0xFA, 0x44, 0xF4, 0x46, 0xB1, 0x5F, 0xEC, 0x70, 0x4D,
+0x17, 0xEE, 0x3A, 0x53, 0x75, 0x9E, 0x67, 0xF4, 0x78, 0x8C, 0x64, 0xDF, 0xBA, 0x6E, 0xDF, 0x46,
+0xC1, 0xF2, 0x20, 0x85, 0x79, 0xB6, 0x20, 0x51, 0xD6, 0xFE, 0xE8, 0x98, 0x6B, 0x43, 0xE9, 0x10,
+0xD2, 0x51, 0x29, 0xB3, 0xF0, 0x63, 0xA0, 0xB2, 0x60, 0x21, 0x98, 0x96, 0xFE, 0x73, 0x14, 0xEC,
+0x9A, 0x6B, 0x35, 0x8E, 0xB5, 0xAA, 0x33, 0x30, 0x3F, 0x09, 0x28, 0x14, 0x00, 0x32, 0x86, 0xBC,
+0xB6, 0xA0, 0x2B, 0x80, 0x92, 0x9C, 0xCD, 0xAE, 0x5A, 0x3B, 0x6B, 0x78, 0xF0, 0x9F, 0x7D, 0x97,
+0x77, 0xAA, 0x1D, 0xC0, 0xCB, 0x2A, 0x2D, 0x86, 0x5B, 0x70, 0xC2, 0x65, 0x29, 0x6E, 0xD5, 0xE4,
+0xBA, 0x54, 0x24, 0x3F, 0xAA, 0x4A, 0x7C, 0x15, 0x90, 0xDA, 0xB4, 0x12, 0xF5, 0x24, 0x2B, 0x74,
+0xA5, 0x76, 0x71, 0x87, 0x85, 0x94, 0x9E, 0x8C, 0x02, 0x68, 0xB3, 0x44, 0xFE, 0xC5, 0xAF, 0xC2,
+0x13, 0x83, 0x89, 0x7C, 0x79, 0xBE, 0xAB, 0x56, 0x54, 0xA4, 0x65, 0xAA, 0xFA, 0x89, 0x3C, 0xCB,
+0x41, 0x69, 0x40, 0x3A, 0x91, 0xC4, 0x37, 0x9B, 0x34, 0xDD, 0xFE, 0xCC, 0x66, 0x15, 0x87, 0x74,
+0xAA, 0xE5, 0x34, 0xF1, 0xB9, 0x78, 0xBA, 0x4E, 0x38, 0xC7, 0xDB, 0x9A, 0xFE, 0x54, 0x32, 0xF5,
+0x08, 0x18, 0x55, 0x0A, 0x22, 0xF3, 0x1B, 0xC0, 0xAC, 0x07, 0x38, 0x93, 0x2E, 0x55, 0x58, 0x62,
+0x20, 0xAA, 0x18, 0x0B, 0x1C, 0x26, 0x56, 0xCE, 0x0B, 0x8A, 0x69, 0x1A, 0xB8, 0x85, 0xCB, 0x8F,
+0xBD, 0x8E, 0x22, 0xED, 0x4E, 0x8A, 0xA1, 0xFB, 0xA7, 0x8B, 0x08, 0xC0, 0x16, 0x31, 0x9F, 0x90,
+0xCA, 0x77, 0x74, 0x6D, 0xCF, 0xB4, 0xB4, 0x62, 0xC6, 0x33, 0xB9, 0xD8, 0xFF, 0x94, 0x22, 0x5F,
+0x3B, 0x46, 0x70, 0xD4, 0x92, 0x4B, 0x25, 0x01, 0x64, 0x3A, 0x07, 0x46, 0x65, 0xAD, 0x71, 0x36,
+0x52, 0x90, 0xFA, 0xBA, 0xE4, 0x88, 0xDA, 0x44, 0xC7, 0xF8, 0x8B, 0x9D, 0xED, 0x6B, 0x85, 0x6F,
+0xBD, 0x23, 0xD8, 0xFF, 0x9B, 0x47, 0x58, 0xDA, 0x5B, 0x1E, 0xC3, 0xAE, 0xF8, 0xF6, 0x96, 0x47,
+0x8F, 0x9E, 0x12, 0x04, 0xA0, 0xE0, 0x71, 0x55, 0xA1, 0xFE, 0xED, 0xDC, 0x66, 0x3B, 0x75, 0x68,
+0xC0, 0xA1, 0x67, 0x10, 0x0D, 0x7B, 0x2B, 0x51, 0x78, 0x05, 0xC6, 0x06, 0x16, 0xF6, 0x51, 0x9C,
+0xD2, 0xA8, 0xD0, 0x28, 0xD9, 0xFC, 0x99, 0x0F, 0x89, 0xDE, 0x3A, 0x74, 0xAA, 0x89, 0xFF, 0xA0,
+0x89, 0x3E, 0xC1, 0x8C, 0x78, 0x28, 0xA8, 0x6A, 0xB1, 0x62, 0x92, 0x38, 0x19, 0xBA, 0x3D, 0x04,
+0x0D, 0x0D, 0xF0, 0xA7, 0xD7, 0x58, 0x01, 0x4A, 0xFB, 0x2A, 0x15, 0x85, 0xBD, 0xDC, 0x2E, 0xEB,
+0x90, 0x0E, 0xC7, 0x13, 0x8F, 0x5E, 0x1A, 0xEC, 0x01, 0xD1, 0x20, 0x06, 0x35, 0x20, 0x49, 0x4F,
+0x60, 0x6D, 0xD6, 0x45, 0x78, 0x74, 0x39, 0x39, 0x8C, 0x2C, 0xBB, 0x9D, 0x96, 0xFA, 0xA3, 0x1A,
+0x3A, 0x45, 0x3E, 0x9A, 0xE1, 0x93, 0x09, 0xFE, 0x3F, 0x7D, 0x0F, 0x9A, 0x02, 0xE3, 0x87, 0x62,
+0xEC, 0xE1, 0xF7, 0x8C, 0x88, 0x50, 0x83, 0xBE, 0xBB, 0xC1, 0xB5, 0x93, 0xAC, 0xDE, 0x88, 0x89,
+0x34, 0x6F, 0xF0, 0x98, 0x5C, 0x7B, 0x1E, 0x2D, 0xDE, 0xBA, 0x1C, 0x09, 0x1D, 0xD0, 0x14, 0xAD,
+0x48, 0x2A, 0x12, 0xC8, 0x54, 0x95, 0x8D, 0x13, 0x74, 0x1F, 0x6B, 0x53, 0x38, 0xC6, 0x97, 0x48,
+0x56, 0x28, 0xFE, 0x62, 0x41, 0x9E, 0xAB, 0x38, 0xAE, 0xEC, 0xE7, 0x53, 0x0D, 0xD3, 0x2A, 0xAF,
+0x9A, 0x07, 0xBE, 0x2D, 0xBC, 0xE6, 0x88, 0x96, 0x72, 0x3F, 0x18, 0x16, 0xCB, 0x83, 0xCE, 0x24,
+0x25, 0x9A, 0x42, 0x72, 0xED, 0x32, 0x78, 0x3F, 0xC0, 0xF0, 0x59, 0x8C, 0x14, 0x47, 0xF3, 0x31,
+0xE1, 0x61, 0x90, 0xFD, 0x2E, 0xFB, 0xCE, 0xAC, 0xA2, 0x11, 0x04, 0xCC, 0x55, 0x57, 0x29, 0xF7,
+0x57, 0x40, 0x1C, 0xAC, 0xF4, 0x12, 0xA9, 0xA6, 0x00, 0x13, 0xAE, 0xA5, 0x3E, 0x67, 0x35, 0x30,
+0x8B, 0x9E, 0xFD, 0xFC, 0x69, 0xB1, 0x58, 0xAB, 0x65, 0x98, 0x87, 0x04, 0xCD, 0x39, 0x10, 0x88,
+0x69, 0xFF, 0x50, 0x78, 0xAE, 0x7D, 0x6F, 0xE8, 0x1C, 0x10, 0x67, 0xCB, 0x21, 0x91, 0xF9, 0x26,
+0xFE, 0x44, 0x69, 0xB8, 0x1D, 0xA2, 0x38, 0xBB, 0x3F, 0xF1, 0x98, 0x28, 0x27, 0x3C, 0x7C, 0xA5,
+0x22, 0x56, 0xC3, 0x3A, 0xEB, 0x3D, 0x65, 0x3B, 0x83, 0x9A, 0x03, 0xCA, 0x64, 0x8D, 0x1E, 0xA8,
+0x00, 0x00, 0x00, 0x00, 0x7D, 0x50, 0xA6, 0x7D, 0xD3, 0xA4, 0x50, 0x12, 0x00, 0x01, 0xCE, 0x2D,
+0xDD, 0xE8, 0x01, 0x00, 0xB1, 0x91, 0x32, 0xBB, 0xB1, 0xC4, 0x67, 0xFB, 0x02, 0x00, 0x00, 0x00,
+0x00, 0x04, 0x59, 0x5A};
diff --git a/drivers/qtn/qdrv/qdrv_netdebug_checksum.h b/drivers/qtn/qdrv/qdrv_netdebug_checksum.h
new file mode 100644
index 0000000..6ed2cdf
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_netdebug_checksum.h
@@ -0,0 +1,3 @@
+/* Automatically generated file.  Do not edit. */
+#define QDRV_NETDEBUG_CHECKSUM 0x72e35823
+#define QDRV_BUILDDATE 1479230732UL
diff --git a/drivers/qtn/qdrv/qdrv_pcap.c b/drivers/qtn/qdrv/qdrv_pcap.c
new file mode 100644
index 0000000..8ee6044
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_pcap.c
@@ -0,0 +1,218 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+#include <linux/version.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_wlan.h"
+#include "qdrv_pcap.h"
+
+#if QTN_GENPCAP
+
+#define PROC_NAME	"pcap"
+static struct qtn_genpcap *pcap_state = NULL;
+
+static int qdrv_pcap_seq_finished(const unsigned long *counter)
+{
+	if (pcap_state == NULL) {
+		return 1;
+	}
+
+	return *counter >= (1 << pcap_state->payloads_count_s);
+}
+
+static void* qdrv_pcap_seq_start(struct seq_file *sfile, loff_t *pos)
+{
+	unsigned long *counter;
+
+	if (pcap_state == NULL || pcap_state->active) {
+		printk(KERN_ERR "%s: only take pcap when inactive\n", __FUNCTION__);
+		return NULL;
+	}
+
+	counter = kmalloc(sizeof(*counter), GFP_KERNEL);
+	if (counter == NULL) {
+		return NULL;
+	}
+
+	*counter = *pos;
+
+	if (qdrv_pcap_seq_finished(counter)) {
+		kfree(counter);
+		return NULL;
+	}
+
+	return counter;
+}
+
+static void* qdrv_pcap_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+	unsigned long *counter = v;
+	(*counter)++;
+	(*pos)++;
+
+	if (qdrv_pcap_seq_finished(counter)) {
+		return NULL;
+	}
+
+	return counter;
+}
+
+static void qdrv_pcap_seq_stop(struct seq_file *sfile, void *v)
+{
+	kfree(v);
+}
+
+static int qdrv_pcap_seq_show(struct seq_file *sfile, void *v)
+{
+	unsigned long *counter = v;
+	unsigned long pkt_index;
+	struct qtn_pcap_hdr *qtnhdr;
+	struct pcaprec_hdr rechdr;
+
+	if (*counter == 0) {
+		struct pcap_hdr file_hdr = qtn_pcap_mkhdr(qtn_pcap_max_payload(pcap_state));
+		seq_write(sfile, &file_hdr, sizeof(file_hdr));
+	}
+
+	pkt_index = (*counter + pcap_state->payloads_written) %
+		(1 << pcap_state->payloads_count_s);
+	qtnhdr = (void *) (pcap_state->payloads_vaddr + ((1 << pcap_state->payload_size_s) * pkt_index));
+	if (qtnhdr->incl) {
+		rechdr.incl_len = qtnhdr->incl;
+		rechdr.orig_len = qtnhdr->orig;
+		rechdr.ts_sec = ((uint32_t) qtnhdr->tsf) / 1000000;
+		rechdr.ts_usec = ((uint32_t) qtnhdr->tsf) % 1000000;
+		seq_write(sfile, &rechdr, sizeof(rechdr));
+		seq_write(sfile, (qtnhdr + 1), qtnhdr->incl);
+	}
+
+	return 0;
+}
+
+
+static struct seq_operations qdrv_pcap_seq_ops = {
+	.start = qdrv_pcap_seq_start,
+	.next  = qdrv_pcap_seq_next,
+	.stop  = qdrv_pcap_seq_stop,
+	.show  = qdrv_pcap_seq_show
+};
+
+static int qdrv_pcap_proc_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &qdrv_pcap_seq_ops);
+}
+
+static struct file_operations qdrv_pcap_proc_ops = {
+	.owner   = THIS_MODULE,
+	.open    = qdrv_pcap_proc_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release
+};
+
+void qdrv_genpcap_exit(struct qdrv_wlan *qw)
+{
+	struct qtn_genpcap_args *gpa = &qw->genpcap_args;
+	size_t alloc_sz;
+
+	if (pcap_state && gpa->vaddr) {
+		remove_proc_entry(PROC_NAME, NULL);
+
+		alloc_sz = sizeof(*pcap_state) +
+			(1 << (pcap_state->payloads_count_s + pcap_state->payload_size_s));
+		dma_free_coherent(NULL, alloc_sz, gpa->vaddr, gpa->paddr);
+		gpa->vaddr = NULL;
+		gpa->paddr = 0;
+		pcap_state = NULL;
+	}
+}
+
+int qdrv_genpcap_set(struct qdrv_wlan *qw, int cfg, dma_addr_t *ctrl_dma)
+{
+	const uint8_t cfg_op = (cfg >> 16) & 0xff;
+	const uint8_t cfg_pktsz_s = (cfg >> 8) & 0xff;
+	const uint8_t cfg_pktcnt_s = (cfg >> 0) & 0xff;
+
+	*ctrl_dma = 0;
+
+	if (pcap_state && ((cfg_op == QTN_GENPCAP_OP_START) || (cfg_op == QTN_GENPCAP_OP_FREE))) {
+		qdrv_genpcap_exit(qw);
+	}
+
+	if ((pcap_state == NULL) && (cfg_op == QTN_GENPCAP_OP_START)) {
+		/* currently uninitialized, start requested */
+		struct qtn_genpcap_args *gpa = &qw->genpcap_args;
+		struct qtn_genpcap *ctrl;
+		uint8_t *payloads_start;
+		size_t payloads_total_size;
+		size_t alloc_sz;
+
+		if (cfg_pktsz_s < 5 || cfg_pktcnt_s < 1) {
+			printk(KERN_ERR "%s: invalid settings\n", __FUNCTION__);
+			return -EINVAL;
+		}
+
+		payloads_total_size = 1 << (cfg_pktsz_s + cfg_pktcnt_s);
+		alloc_sz = payloads_total_size + sizeof(*ctrl);
+		if ((gpa->vaddr = dma_alloc_coherent(NULL,
+						alloc_sz, &gpa->paddr, GFP_KERNEL)) == NULL) {
+			printk(KERN_ERR "%s: could not allocate %u bytes\n",
+					__FUNCTION__, alloc_sz);
+			return -ENOMEM;
+		}
+
+		memset(gpa->vaddr, 0, alloc_sz);
+
+		payloads_start = gpa->vaddr;
+		ctrl = (void *) (payloads_start + payloads_total_size);
+		*ctrl_dma = gpa->paddr + payloads_total_size;
+		pcap_state = ctrl;
+
+		ctrl->active = 0;
+		ctrl->payloads_count_s = cfg_pktcnt_s;
+		ctrl->payload_size_s = cfg_pktsz_s;
+		ctrl->payloads_vaddr = gpa->vaddr;
+		ctrl->payloads_paddr = (void *) gpa->paddr;
+		ctrl->payloads_written = 0;
+
+		if (proc_create_data(PROC_NAME, 0, NULL, &qdrv_pcap_proc_ops, qw) == NULL) {
+			printk(KERN_ERR "%s: could not create procfile %s\n",
+					__FUNCTION__, PROC_NAME);
+			return -1;
+		}
+
+		printk(KERN_INFO "%s: activated\n", __FUNCTION__);
+		pcap_state->active = 1;
+	}
+
+	if (pcap_state && (cfg_op == QTN_GENPCAP_OP_STOP)) {
+		printk(KERN_INFO "%s deactivated, %lu buffers captured (%u max)\n",
+				__FUNCTION__,
+				pcap_state->payloads_written,
+				1 << pcap_state->payloads_count_s);
+		pcap_state->active = 0;
+	}
+
+	return 0;
+}
+
+#endif	/* QTN_GENPCAP */
diff --git a/drivers/qtn/qdrv/qdrv_pcap.h b/drivers/qtn/qdrv/qdrv_pcap.h
new file mode 100644
index 0000000..3be685c
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_pcap.h
@@ -0,0 +1,47 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_PCAP_H
+#define _QDRV_PCAP_H
+#include <qtn/qtn_pcap_public.h>
+#if defined(__KERNEL__)
+struct qdrv_wlan;
+#define QTN_GENPCAP_OP_START	0x1
+#define QTN_GENPCAP_OP_STOP	0x2
+#define QTN_GENPCAP_OP_FREE	0x3
+
+#if QTN_GENPCAP
+int qdrv_genpcap_set(struct qdrv_wlan *qw, int set, dma_addr_t *ctrl_dma);
+void qdrv_genpcap_exit(struct qdrv_wlan *qw);
+#else
+static inline int qdrv_genpcap_set(struct qdrv_wlan *qw, int set, dma_addr_t *ctrl_dma)
+{
+	printk("%s: set QTN_GENPCAP to 1 and recompile\n", __FUNCTION__);
+	*ctrl_dma = 0;
+	return -1;
+}
+static inline void qdrv_genpcap_exit(struct qdrv_wlan *qw)
+{
+}
+#endif	/* QTN_GENPCAP */
+#endif	/* defined(__KERNEL__) */
+
+#endif
+
diff --git a/drivers/qtn/qdrv/qdrv_pktlogger.c b/drivers/qtn/qdrv/qdrv_pktlogger.c
new file mode 100644
index 0000000..24f3ceb
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_pktlogger.c
@@ -0,0 +1,2320 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+#include <linux/version.h>
+#ifndef AUTOCONF_INCLUDED
+# include <linux/config.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
+# include <linux/kconfig.h>
+#else
+# include <generated/autoconf.h>
+#endif
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/stddef.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/sch_generic.h>
+#include <linux/netlink.h>
+#include <trace/ippkt.h>
+
+#include "qdrv_pktlogger.h"
+#include "qdrv_control.h"
+#include "qdrv_debug.h"
+#include "qtn/qdrv_bld.h"
+#include "qdrv_netdebug_checksum.h"
+#include "qdrv_netdebug_binary.h"
+#include "qdrv_muc_stats.h"
+#include <radar/radar.h>
+#include <qtn/qtn_math.h>
+
+#include <asm/board/soc.h>
+#include <qtn/mproc_sync_base.h>
+#include <common/ruby_mem.h>
+#include <common/pktlogger/pktlogger_nl_common.h>
+#include <qtn/qtn_bb_mutex.h>
+#include <qtn/hardware_revision.h>
+#include <qtn/emac_debug.h>
+#include <qtn/ruby_cpumon.h>
+#include <qtn/qtn_muc_stats_print.h>
+#include <qtn/txbf_mbox.h>
+
+struct qdrv_pktlogger *g_pktlogger_p = NULL;
+
+void ruby_cpumon_get_cycles(uint64_t *sleep, uint64_t *awake){}
+
+struct kmem_cache *
+kmem_cache_find(const char *name)
+{
+	return NULL;
+}
+
+void
+kmem_cache_calc_usage(struct kmem_cache *cachep, unsigned int *p_tot,
+		unsigned int *p_cur, unsigned int *p_act, unsigned int *p_hwm){}
+
+void *qdrv_pktlogger_alloc_buffer(char *description, int data_len)
+{
+	void *data_p;
+
+	data_p = kzalloc(data_len, GFP_ATOMIC);
+	if (data_p == NULL) {
+		DBGPRINTF_LIMIT_E("%s():Failed for %s", __FUNCTION__, description);
+		return NULL;
+	}
+
+	return data_p;
+}
+
+void qdrv_pktlogger_free_buffer(void *data_buffer)
+{
+	kfree(data_buffer);
+}
+
+void qdrv_pktlogger_hdr_init(struct qdrv_wlan *qw,
+		struct qdrv_pktlogger_hdr *hdr,
+		int rec_type,
+		int stats_len)
+{
+	uint64_t tsf;
+
+	hdr->udpheader.dest = qw->pktlogger.dst_port;
+	hdr->udpheader.source = qw->pktlogger.src_port;
+	hdr->udpheader.len = htons(stats_len);
+	hdr->udpheader.check = 0;
+
+	hdr->type = rec_type;
+
+	IEEE80211_ADDR_COPY(hdr->src, qw->mac->mac_addr);
+	hdr->version = QDRV_NETDEBUG_CHECKSUM;
+	hdr->builddate = QDRV_BUILDDATE;
+	strncpy(hdr->buildstring, QDRV_BLD_NAME, QDRV_NETDEBUG_BUILDSTRING_SIZE - 1);
+	hdr->buildstring[QDRV_NETDEBUG_BUILDSTRING_SIZE - 1] = '\0';
+
+	hdr->timestamp = jiffies;
+	qw->ic.ic_get_tsf(&tsf);
+	hdr->tsf_lo = U64_LOW32(tsf);
+	hdr->tsf_hi = U64_HIGH32(tsf);
+	hdr->stats_len = stats_len - sizeof(*hdr);
+	hdr->opmode = (u_int8_t)qw->ic.ic_opmode;
+	hdr->platform = get_hardware_revision();
+	memset(hdr->padding, 0, sizeof(hdr->padding));
+}
+
+/*
+ * Remap the statistics structures if not already done.
+ * These are used by netdebug and ratedebug, and are never unmapped.
+ */
+static int qdrv_pktlogger_map(struct qdrv_wlan *qw)
+{
+	struct qtn_stats_log *iw_stats_log;
+	iw_stats_log = (struct qtn_stats_log *)qw->mac->mac_sys_stats;
+	if (iw_stats_log == NULL) {
+		return -1;
+	}
+
+	if (qw->pktlogger.stats_uc_rx_ptr == NULL) {
+		qw->pktlogger.stats_uc_rx_ptr =
+			ioremap_nocache(muc_to_lhost((u32)iw_stats_log->rx_muc_stats),
+					sizeof(struct muc_rx_stats));
+	}
+
+	if (qw->pktlogger.stats_uc_rx_bf_ptr == NULL) {
+		qw->pktlogger.stats_uc_rx_bf_ptr =
+			ioremap_nocache(muc_to_lhost((u32)iw_stats_log->rx_muc_bf_stats),
+					sizeof(struct muc_rx_bf_stats));
+	}
+
+	if (qw->pktlogger.stats_uc_rx_rate_ptr == NULL) {
+		qw->pktlogger.stats_uc_rx_rate_ptr =
+			ioremap_nocache(muc_to_lhost((u32)iw_stats_log->rx_muc_rates),
+					sizeof(struct muc_rx_rates));
+	}
+
+	if (qw->pktlogger.stats_uc_tx_ptr == NULL) {
+		qw->pktlogger.stats_uc_tx_ptr =
+			ioremap_nocache(muc_to_lhost((u32)iw_stats_log->tx_muc_stats),
+					sizeof(struct muc_tx_stats));
+	}
+
+	if (qw->pktlogger.stats_uc_tx_rate_ptr == NULL) {
+		qw->pktlogger.stats_uc_tx_rate_ptr =
+			ioremap_nocache(muc_to_lhost((u32)iw_stats_log->tx_muc_rates),
+					sizeof(struct qtn_rate_tx_stats_per_sec));
+	}
+
+	if (qw->pktlogger.stats_uc_su_rates_read_ptr == NULL) {
+		qw->pktlogger.stats_uc_su_rates_read_ptr =
+			ioremap_nocache(muc_to_lhost((u32)iw_stats_log->muc_su_rate_stats_read),
+					sizeof(uint32_t));
+	}
+
+	if (qw->pktlogger.stats_uc_mu_rates_read_ptr == NULL) {
+		qw->pktlogger.stats_uc_mu_rates_read_ptr =
+			ioremap_nocache(muc_to_lhost((u32)iw_stats_log->muc_mu_rate_stats_read),
+					sizeof(uint32_t));
+	}
+
+	if (qw->pktlogger.stats_uc_scs_cnt == NULL) {
+		qw->pktlogger.stats_uc_scs_cnt =
+			ioremap_nocache(muc_to_lhost((u32)iw_stats_log->scs_cnt),
+					sizeof(struct qdrv_scs_cnt));
+	}
+
+	if (qw->pktlogger.netdev_q_ptr_w == NULL) {
+		struct ieee80211vap *vap = TAILQ_FIRST(&qw->ic.ic_vaps);
+		if (vap) {
+			if (vap->iv_dev != NULL) {
+				struct netdev_queue *ndq = netdev_get_tx_queue(vap->iv_dev, 0);
+				if (ndq != NULL) {
+					qw->pktlogger.netdev_q_ptr_w = ndq;
+				}
+			}
+		}
+	}
+	if (qw->pktlogger.netdev_q_ptr_e == NULL) {
+		if (qw->pktlogger.dev != NULL) {
+			struct netdev_queue *ndq = netdev_get_tx_queue(qw->pktlogger.dev, 0);
+			if (ndq != NULL) {
+				qw->pktlogger.netdev_q_ptr_e = ndq;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/* record Auc status pointer */
+static void qdrv_pktlogger_set_auc_status_ptr(struct qdrv_wlan *qw)
+{
+#if 0
+#if !defined(CONFIG_TOPAZ_PCIE_HOST)
+	const bool fw_no_mu = qw->sp->fw_no_mu;
+	const struct qtn_auc_stat_field auc_field_default[] = {
+	#include <qtn/qtn_auc_stats_fields.default.h>
+	};
+	const struct qtn_auc_stat_field auc_field_nomu[] = {
+	#include <qtn/qtn_auc_stats_fields.nomu.h>
+	};
+	const struct qtn_auc_stat_field *auc_field = fw_no_mu ? auc_field_nomu : auc_field_default;
+	const size_t nstats = fw_no_mu ? ARRAY_SIZE(auc_field_nomu) : ARRAY_SIZE(auc_field_default);
+	unsigned int i;
+
+
+	for (i = 0; i < nstats; i++) {
+		const uintptr_t addr = auc_field[i].addr;
+		const char *const name = auc_field[i].name;
+		if (__in_mem_range(addr, TOPAZ_AUC_DMEM_ADDR, TOPAZ_AUC_DMEM_SIZE)) {
+			if (strcmp(name, "sleep") == 0) {
+				if (qw->pktlogger.stats_auc_sleep_p == NULL)
+					qw->pktlogger.stats_auc_sleep_p = (uint32_t *)addr;
+			} else if (strcmp(name, "jiffies") == 0) {
+				if (qw->pktlogger.stats_auc_jiffies_p == NULL)
+					qw->pktlogger.stats_auc_jiffies_p = (uint32_t *)addr;
+			} else if (strcmp(name, "IRQ_0") == 0) {
+				if (qw->pktlogger.stats_auc_intr_p == NULL)
+					qw->pktlogger.stats_auc_intr_p = (uint32_t *)addr;
+			} else if (strcmp(name, "task_alive_counters[0]") == 0) {
+				if (qw->pktlogger.stats_auc_dbg_p == NULL)
+					qw->pktlogger.stats_auc_dbg_p = (struct auc_dbg_counters *)addr;
+			}
+		}
+	}
+#endif
+#endif
+}
+
+int qdrv_pktlogger_set(struct qdrv_wlan *qw, const char *param, const char *value)
+{
+	int ret = 0;
+	struct net_device *ndev;
+
+	if (strcmp(param, "dstmac") == 0) {
+		ret = qdrv_parse_mac(value, qw->pktlogger.dst_addr);
+	} else if (strcmp(param, "dstip") == 0) {
+		ret = iputil_v4_pton(value, &qw->pktlogger.dst_ip);
+	} else if (strcmp(param, "srcip") == 0) {
+		ret = iputil_v4_pton(value, &qw->pktlogger.src_ip);
+	} else if (strcmp(param, "dstport") == 0) {
+		unsigned portnum;
+		if ((sscanf(value, "%u", &portnum) != 1)) {
+			printk("invalid portnum %s\n", value);
+			ret = -1;
+		} else {
+			qw->pktlogger.dst_port = htons(portnum);
+		}
+	} else if (strcmp(param, "wifimac") == 0) {
+		ret = qdrv_parse_mac(value, qw->pktlogger.recv_addr);
+	} else if (strcmp(param, "interface") == 0) {
+		ndev = dev_get_by_name(&init_net, value);
+		if (ndev) {
+			qw->pktlogger.dev = ndev;
+			dev_put(ndev);
+		}
+	} else {
+		printk("%s is not a valid parameter\n", param);
+		return -1;
+	}
+
+	return ret;
+}
+
+void qdrv_pktlogger_show(struct qdrv_wlan *qw)
+{
+	uint8_t *addr_p;
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+	int i;
+
+	addr_p = (uint8_t*)&qw->pktlogger.dst_ip;
+	printk("Dst IP:          %pI4:%u\n", addr_p, ntohs(qw->pktlogger.dst_port));
+
+	addr_p = (uint8_t*)&qw->pktlogger.src_ip;
+	printk("Src IP:          %pI4:%u\n", addr_p, ntohs(qw->pktlogger.src_port));
+
+	addr_p = (uint8_t*)qw->pktlogger.dst_addr;
+	printk("Dst MAC:         %pM\n", addr_p);
+	addr_p = (uint8_t*)qw->pktlogger.src_addr;
+	printk("Src MAC:         %pM\n", addr_p);
+	addr_p = (uint8_t*)qw->pktlogger.recv_addr;
+
+	printk("Wifi MAC:        %pM\n", addr_p);
+	printk("Max IP frag len: %u\n", qw->pktlogger.maxfraglen);
+	printk("Device name:     %s\n", qw->pktlogger.dev->name);
+	printk("Packet id:       %u\n", qw->pktlogger.ip_id);
+	printk("Queue len:       %u\n", qw->pktlogger.queue_len);
+	printk("Queued:          %u\n", qw->pktlogger.stats.pkt_queued);
+	printk("Requeued:        %u\n", qw->pktlogger.stats.pkt_requeued);
+	printk("Dropped:         %u\n", qw->pktlogger.stats.pkt_dropped);
+	printk("Failed:          %u\n", qw->pktlogger.stats.pkt_failed);
+	printk("Queue send:      %u\n", qw->pktlogger.stats.queue_send);
+
+	printk("\nType      Enabled    Interval\n");
+	for (i = 0; i < QDRV_NETDEBUG_TYPE_MAX; i++) {
+		tbl = qdrv_pktlogger_get_tbls_by_id(i);
+		if (tbl) {
+			printk("%-10s%5d%9d\n", tbl->name,
+					!!(qw->pktlogger.flag & BIT(i)), tbl->interval);
+		}
+	}
+}
+
+/* RSSI monitor and MuC kill signal gen */
+static void qdrv_pktlogger_gen_muc_kill(struct qdrv_wlan *qw,
+			struct qtn_rx_stats *rx_stats)
+{
+#ifdef QDRV_FEATURE_KILL_MUC
+	int ant;
+	static int rssi_zero_cnt[4] = {0, 0, 0, 0};
+
+	if ( qw->flags_ext & QDRV_WLAN_MUC_KILLED) {
+		return;
+	}
+
+	for (ant=0; ant < 4; ant++) {
+
+		if (rx_stats->num_pkts >= 0 &&
+				rx_stats->avg_rssi[ant] == 0) {
+			rssi_zero_cnt[ant]++;
+		} else {
+			rssi_zero_cnt[ant] = 0;
+		}
+
+	    if (rssi_zero_cnt[ant] >= 30) {
+			printk("Killing MuC based on low RSSI\n");
+			qdrv_hostlink_killmuc(qw);
+			qw->flags_ext |= QDRV_WLAN_MUC_KILLED;
+		}
+	}
+
+#if 0
+	/* Test feature */
+	if (rx_stats->sys_temp > 6000000) {
+			qdrv_hostlink_killmuc(qw);
+			qw->flags_ext |= QDRV_WLAN_MUC_KILLED;
+	}
+#endif
+
+	return;
+
+#endif
+}
+
+static void
+qdrv_pktlogger_slab_prepare(struct qdrv_wlan *qw, struct qdrv_slab_watch *p_out)
+{
+	struct qdrv_pktlogger *p_pktlogger = &qw->pktlogger;
+	struct kmem_cache *p_cache = NULL;
+
+#define CACHE(x)\
+	p_cache = p_pktlogger->qmeminfo.caches[QDRV_SLAB_IDX_SIZE_##x]; \
+	if (p_cache) { \
+		kmem_cache_calc_usage(p_cache, &p_out->stat_size_tot_alloc_##x, \
+				&p_out->stat_size_cur_alloc_##x, &p_out->stat_size_act_alloc_##x, \
+				&p_out->stat_size_hwm_alloc_##x); \
+	}
+#define ZACHE(y)\
+	p_cache = p_pktlogger->qmeminfo.caches[QDRV_SLAB_IDX_##y]; \
+	if (p_cache) { \
+		kmem_cache_calc_usage(p_cache, &p_out->stat_tot_alloc_##y, \
+				&p_out->stat_cur_alloc_##y, &p_out->stat_act_alloc_##y, \
+				&p_out->stat_hwm_alloc_##y); \
+	}
+#include "qdrv_slab_watch.h"
+#undef CACHE
+#undef ZACHE
+}
+
+/*
+ * Fill memory stats structure
+ */
+static void
+qdrv_pktlogger_netdebug_mem_stats_prepare(struct qdrv_mem_stats *stats)
+{
+	struct sysinfo si;
+	si_meminfo(&si);
+
+	memset(stats, 0, sizeof(*stats));
+	stats->mem_free = si.freeram;
+	stats->mem_slab_reclaimable = global_page_state(NR_SLAB_RECLAIMABLE);
+	stats->mem_slab_unreclaimable = global_page_state(NR_SLAB_UNRECLAIMABLE);
+	stats->mem_anon = global_page_state(NR_ANON_PAGES);
+	stats->mem_mapped = global_page_state(NR_FILE_MAPPED);
+	stats->mem_cached = global_page_state(NR_FILE_PAGES);
+}
+
+/*
+ * Usual Evm value range is from -5.x to -28.x
+ * Need to decode it into signed integer in pktlogger
+ */
+static void
+qdrv_pktlogger_insert_evms( struct qdrv_netdebug_stats *stats )
+{
+	int iter;
+
+	for (iter = 0; iter < QDRV_NUM_RF_STREAMS; iter++) {
+		if (stats->stats_phy_rx.last_rssi_evm[iter] != MUC_PHY_ERR_SUM_NOT_AVAIL) {
+			stats->stats_evm.rx_evm_val[iter] = stats->stats_phy_rx.last_rssi_evm[iter];
+		} else {
+			stats->stats_evm.rx_evm_val[iter] = 0;
+		}
+	}
+}
+
+static void
+qdrv_pktlogger_insert_pd_vol(struct qdrv_wlan *qw, struct qdrv_netdebug_stats *stats)
+{
+	char *cmd = NULL;
+	dma_addr_t cmd_dma;
+	char calcmd[4] = {51, 0, 4, 0};
+
+	cmd = qdrv_hostlink_alloc_coherent(NULL, QDRV_CMD_LENGTH, &cmd_dma, GFP_ATOMIC);
+	if (cmd == NULL) {
+		DBGPRINTF_E("Failed allocate %d bytes for cmd\n", QDRV_CMD_LENGTH);
+		return;
+	}
+
+	memcpy(cmd, calcmd, sizeof(calcmd));
+
+	qdrv_hostlink_msg_calcmd(qw, sizeof(calcmd), cmd_dma);
+
+	stats->stats_pd_vol.tx_pd_vol[0] = cmd[6] << 8 | cmd[5];
+	stats->stats_pd_vol.tx_pd_vol[1] = cmd[9] << 8 | cmd[8];
+	stats->stats_pd_vol.tx_pd_vol[2] = cmd[12] << 8 | cmd[11];
+	stats->stats_pd_vol.tx_pd_vol[3] = cmd[15] << 8 | cmd[14];
+
+	qdrv_hostlink_free_coherent(NULL, QDRV_CMD_LENGTH, cmd, cmd_dma);
+}
+
+static void qdrv_pktlogger_netdebug_misc_stats_prepare(struct qdrv_wlan *qw, struct qdrv_misc_stats *stats)
+{
+	static uint64_t last_awake = 0;
+	uint64_t this_awake = 0;
+	uint32_t diff_awake = 0;
+
+	ruby_cpumon_get_cycles(NULL, &this_awake);
+	diff_awake = this_awake - last_awake;
+	last_awake = this_awake;
+
+	stats->cpuawake = diff_awake;
+}
+
+static void qdrv_pktlogger_get_tqe_stats(struct qdrv_tqe_stats *tqe_stats)
+{
+	tqe_stats->emac0_outc = readl(TOPAZ_TQE_OUTPORT_EMAC0_CNT);
+	tqe_stats->emac1_outc = readl(TOPAZ_TQE_OUTPORT_EMAC1_CNT);
+	tqe_stats->wmac_outc = readl(TOPAZ_TQE_OUTPORT_WMAC_CNT);
+	tqe_stats->lhost_outc = readl(TOPAZ_TQE_OUTPORT_LHOST_CNT);
+	tqe_stats->muc_outc = readl(TOPAZ_TQE_OUTPORT_MUC_CNT);
+	tqe_stats->dsp_outc = readl(TOPAZ_TQE_OUTPORT_DSP_CNT);
+	tqe_stats->auc_outc = readl(TOPAZ_TQE_OUTPORT_AUC_CNT);
+	tqe_stats->pcie_outc = readl(TOPAZ_TQE_OUTPORT_PCIE_CNT);
+
+	tqe_stats->drop = readl(TOPAZ_TQE_DROP_CNT);
+	tqe_stats->emac0_drop= readl(TOPAZ_TQE_DROP_EMAC0_CNT);
+	tqe_stats->emac1_drop = readl(TOPAZ_TQE_DROP_EMAC1_CNT);
+	tqe_stats->wmac_drop = readl(TOPAZ_TQE_DROP_WMAC_CNT);
+	tqe_stats->lhost_drop = readl(TOPAZ_TQE_DROP_LHOST_CNT);
+	tqe_stats->muc_drop = readl(TOPAZ_TQE_DROP_MUC_CNT);
+	tqe_stats->dsp_drop = readl(TOPAZ_TQE_DROP_DSP_CNT);
+	tqe_stats->auc_drop = readl(TOPAZ_TQE_DROP_AUC_CNT);
+	tqe_stats->pcie_drop = readl(TOPAZ_TQE_DROP_PCIE_CNT);
+}
+
+static void qdrv_pktlogger_get_hbm_stats(struct qdrv_hbm_stats *hbm_stats,
+						struct qdrv_hbm_stats_oth *hbm_stats_oth)
+{
+	int pool;
+	int master;
+	uint32_t *statp = (uint32_t *)hbm_stats;
+	int req = TOPAZ_HBM_BUF_EMAC_RX_COUNT + TOPAZ_HBM_BUF_WMAC_RX_COUNT;
+	int rel = 0;
+
+	COMPILE_TIME_ASSERT(sizeof(*hbm_stats) ==
+			TOPAZ_HBM_POOL_COUNT * TOPAZ_HBM_MASTER_COUNT * 2 * sizeof(uint32_t));
+
+	for (master = 0; master < TOPAZ_HBM_MASTER_COUNT; ++master) {
+		for (pool = 0; pool < TOPAZ_HBM_POOL_COUNT; ++pool) {
+			*statp = readl(TOPAZ_HBM_POOL_REQUEST_CNT(master, pool));
+			req += *statp;
+			statp++;
+		}
+	}
+	for (master = 0; master < TOPAZ_HBM_MASTER_COUNT; ++master) {
+		for (pool = 0; pool < TOPAZ_HBM_POOL_COUNT; ++pool) {
+			*statp = readl(TOPAZ_HBM_POOL_RELEASE_CNT(master, pool));
+			rel += *statp;
+			statp++;
+		}
+	}
+
+	hbm_stats_oth->hbm_req = req;
+	hbm_stats_oth->hbm_rel = rel;
+	hbm_stats_oth->hbm_diff = req - rel;
+	hbm_stats_oth->hbm_overflow = readl(TOPAZ_HBM_OVERFLOW_CNT);
+	hbm_stats_oth->hbm_underflow = readl(TOPAZ_HBM_UNDERFLOW_CNT);
+}
+
+
+static void qdrv_pktlogger_netdebug_dsp_mu_stats_prepare(
+	struct dsp_mu_stats* stats_dsp_mu)
+{
+	volatile struct qtn_txbf_mbox* txbf_mbox = qtn_txbf_mbox_get();
+	int i;
+
+	memset(stats_dsp_mu, 0, sizeof(*stats_dsp_mu));
+
+	for (i = 0; i < ARRAY_SIZE(txbf_mbox->mu_grp_qmat); i++) {
+		if (txbf_mbox->mu_grp_qmat[i].grp_id != 0) {
+			stats_dsp_mu->mu_u0_aid[i] = txbf_mbox->mu_grp_qmat[i].u0_aid;
+			stats_dsp_mu->mu_u1_aid[i] = txbf_mbox->mu_grp_qmat[i].u1_aid;
+			stats_dsp_mu->mu_rank[i]   = txbf_mbox->mu_grp_qmat[i].rank;
+		}
+	}
+}
+
+/*
+ * Gather statistics and send to the configured target
+ */
+void qdrv_pktlogger_netdebug_stats_send(unsigned long data)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)data;
+	struct qdrv_mac *mac = qw->mac;
+	struct qdrv_netdebug_stats *stats;
+	struct qtn_stats_log *iw_stats_log;
+	struct muc_rx_rates *muc_rx_rates_p;
+	struct qdrv_mem_stats mem_stats;
+	struct qdrv_misc_stats misc_stats;
+	int i;
+	int curr_index;
+	void *data_buff;
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+	struct muc_rx_rates *rx_rates_prev = qw->pktlogger.rx_rate_pre;
+	struct muc_rx_rates *rx_rates_curr = qw->pktlogger.rx_rate_cur;
+
+	tbl = qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_STATS);
+	if (!tbl) {
+		return;
+	}
+#if QDRV_NETDEBUG_ETH_DEV_STATS_ENABLED
+	struct net_device_stats *dev_stats;
+#endif
+	qdrv_pktlogger_flush_data(qw);
+
+	muc_rx_rates_p = (struct muc_rx_rates *)qw->pktlogger.stats_uc_rx_rate_ptr;
+	data_buff = qdrv_pktlogger_alloc_buffer("net", sizeof(*stats));
+	if (data_buff == NULL) {
+		return;
+	}
+	stats = (struct qdrv_netdebug_stats*) data_buff;
+	qdrv_pktlogger_hdr_init(qw, &stats->ndb_hdr, QDRV_NETDEBUG_TYPE_STATS,
+			sizeof(*stats));
+
+	/* Gather statistics */
+	memcpy(&stats->stats_wlan_rx, &qw->rx_stats, sizeof(stats->stats_wlan_rx));
+	memcpy(&stats->stats_wlan_tx, &qw->tx_stats, sizeof(stats->stats_wlan_tx));
+	memcpy(&stats->stats_wlan_sm, &qw->sm_stats, sizeof(stats->stats_wlan_sm));
+	memcpy(&stats->stats_scs_cnt, qw->pktlogger.stats_uc_scs_cnt, sizeof(stats->stats_scs_cnt));
+
+	iw_stats_log = (struct qtn_stats_log *)mac->mac_sys_stats;
+
+	/* Use the current entry en to the log, making sure it has not already been sent */
+	curr_index = (iw_stats_log->curr_buff - 1 + NUM_LOG_BUFFS) % NUM_LOG_BUFFS;
+
+	memcpy(&stats->stats_phy_rx, &iw_stats_log->stat_buffs[curr_index].rx_phy_stats,
+			sizeof(stats->stats_phy_rx));
+	memcpy(&stats->stats_phy_tx, &iw_stats_log->stat_buffs[curr_index].tx_phy_stats,
+			sizeof(stats->stats_phy_tx));
+
+	/*
+	 * Show the nss and mcs in a field with using XYY format.
+	 * X means nss (1 ~ 4)
+	 * YY means mcs (0~ 76)
+	 */
+	stats->stats_phy_rx.last_rx_mcs =
+		(stats->stats_phy_rx.last_rx_mcs & QTN_STATS_MCS_RATE_MASK) +
+		MS(stats->stats_phy_rx.last_rx_mcs, QTN_PHY_STATS_MCS_NSS) * 100;
+	stats->stats_phy_tx.last_tx_mcs =
+		(stats->stats_phy_tx.last_tx_mcs & QTN_STATS_MCS_RATE_MASK) +
+		MS(stats->stats_phy_tx.last_tx_mcs, QTN_PHY_STATS_MCS_NSS) * 100;
+
+	if (qw->pktlogger.stats_auc_sleep_p)
+		stats->stats_auc_intr_count.sleep = *qw->pktlogger.stats_auc_sleep_p;
+	if (qw->pktlogger.stats_auc_jiffies_p)
+		stats->stats_auc_intr_count.jiffies = *qw->pktlogger.stats_auc_jiffies_p;
+
+	if (qw->pktlogger.stats_auc_intr_p)
+		memcpy(stats->stats_auc_intr_count.aucirq, qw->pktlogger.stats_auc_intr_p,
+			sizeof(stats->stats_auc_intr_count.aucirq));
+	if (qw->pktlogger.stats_auc_dbg_p)
+		memcpy(&stats->stats_auc_debug_counts, qw->pktlogger.stats_auc_dbg_p,
+			sizeof(stats->stats_auc_debug_counts));
+
+	qdrv_pktlogger_get_tqe_stats(&stats->stats_tqe);
+	memcpy(&stats->stats_cgq, &qw->cgq_stats, sizeof(stats->stats_cgq));
+	qdrv_pktlogger_get_hbm_stats(&stats->stats_hbm, &stats->stats_hbm_oth);
+
+	if (qdrv_muc_stats_get_display_choice(&iw_stats_log->stat_buffs[curr_index],
+			&qw->ic) == QDRV_MUC_STATS_SHOW_EVM) {
+		qdrv_pktlogger_insert_evms(stats);
+	} else {
+		memset(stats->stats_evm.rx_evm_val, 0, sizeof(stats->stats_evm.rx_evm_val));
+	}
+
+	qdrv_pktlogger_insert_pd_vol(qw, stats);
+
+	/* uc_rx_stats are in DMEM, IO doesnt work */
+	memcpy(&stats->stats_muc_rx, qw->pktlogger.stats_uc_rx_ptr, sizeof(stats->stats_muc_rx));
+	memcpy(&stats->stats_muc_rx_bf, qw->pktlogger.stats_uc_rx_bf_ptr, sizeof(stats->stats_muc_rx_bf));
+	memcpy(&stats->stats_muc_tx, qw->pktlogger.stats_uc_tx_ptr, sizeof(stats->stats_muc_tx));
+
+	trace_ippkt_dropped(TRACE_IPPKT_DROP_RSN_MUC_RX_AGG_TIMEOUT,
+				stats->stats_muc_rx.agg_timeout, 1);
+	trace_ippkt_dropped(TRACE_IPPKT_DROP_RSN_MUC_RX_AGG_EMPTY,
+				stats->stats_muc_rx.agg_evict_empty, 1);
+
+	/* Queueing stats on the LHost - within QDisc struct (one per tx queue). */
+	if (qw->pktlogger.netdev_q_ptr_e != NULL) {
+		struct Qdisc *qd = qw->pktlogger.netdev_q_ptr_e->qdisc;
+		if (qd != NULL) {
+			stats->stats_qdisc.eth_sent = qd->bstats.packets;
+			stats->stats_qdisc.eth_dropped = qd->qstats.drops;
+		}
+	}
+	if (qw->pktlogger.netdev_q_ptr_w != NULL) {
+		struct Qdisc *qd = qw->pktlogger.netdev_q_ptr_w->qdisc;
+		if (qd != NULL) {
+			stats->stats_qdisc.wifi_sent = qd->bstats.packets;
+			stats->stats_qdisc.wifi_dropped = qd->qstats.drops;
+		}
+	}
+
+	if (qw->pktlogger.dev && (strncmp(qw->pktlogger.dev->name, "eth", 3) == 0)) {
+		if (qw->pktlogger.dev_emac0)
+			stats->stats_emac.rx_emac0_dma_missed =
+				qtn_eth_rx_lost_get(qw->pktlogger.dev_emac0);
+		if (qw->pktlogger.dev_emac1)
+			stats->stats_emac.rx_emac1_dma_missed =
+				qtn_eth_rx_lost_get(qw->pktlogger.dev_emac1);
+	}
+
+	/*
+	 * There are too many 32 bit rate fields to fit in the debug packet so use 16 bit
+	 * fields and take the difference from the previous value (otherwise it could wrap
+	 * every few seconds).
+	 */
+	memcpy(rx_rates_curr, muc_rx_rates_p, sizeof(*rx_rates_curr));
+	for (i = 0; i < ARRAY_SIZE(stats->rates_muc_rx.rx_mcs); i++) {
+		stats->rates_muc_rx.rx_mcs[i] =
+			(uint16_t) (rx_rates_curr->rx_mcs[i] - rx_rates_prev->rx_mcs[i]);
+	}
+	for (i = 0; i < ARRAY_SIZE(stats->rates_muc_rx_11ac.rx_11ac_mcs); i++) {
+		stats->rates_muc_rx_11ac.rx_11ac_mcs[i] =
+			(uint16_t) (rx_rates_curr->rx_mcs_11ac[i] - rx_rates_prev->rx_mcs_11ac[i]);
+	}
+	qw->pktlogger.rx_rate_pre = rx_rates_curr;
+	qw->pktlogger.rx_rate_cur = rx_rates_prev;
+
+#if QDRV_NETDEBUG_ETH_DEV_STATS_ENABLED
+	if (qw->pktlogger.dev &&
+		qw->pktlogger.dev->netdev_ops->ndo_get_stats) {
+
+		dev_stats = qw->pktlogger.dev->netdev_ops->ndo_get_stats(qw->pktlogger.dev);
+		memcpy(&stats->stats_eth, dev_stats, sizeof(stats->stats_eth));
+	}
+#endif
+
+	/* Prepare memory statistics */
+	qdrv_pktlogger_netdebug_mem_stats_prepare(&mem_stats);
+	memcpy(&stats->stats_mem, &mem_stats, sizeof(stats->stats_mem));
+
+	qdrv_pktlogger_slab_prepare(qw, &stats->stats_slab);
+
+	qdrv_pktlogger_netdebug_misc_stats_prepare(qw, &misc_stats);
+	memcpy(&stats->stats_misc, &misc_stats, sizeof(stats->stats_misc));
+
+	memcpy(&stats->stats_csw, &qw->csw_stats, sizeof(stats->stats_csw));
+
+	qdrv_pktlogger_netdebug_dsp_mu_stats_prepare(&stats->stats_dsp_mu);
+
+	/* Invoke stat monitor to kill MuC possibly */
+	qdrv_pktlogger_gen_muc_kill(qw, &stats->stats_phy_rx);
+
+	qdrv_pktlogger_send(stats, sizeof(*stats));
+
+	/* refresh timer */
+	mod_timer(&qw->pktlogger.stats_timer, jiffies + (tbl->interval * HZ));
+}
+
+static void
+qdrv_pktlogger_slab_init(struct qdrv_wlan *qw)
+{
+	struct qdrv_pktlogger *p_pktlogger = &qw->pktlogger;
+#define CACHE(x)	p_pktlogger->qmeminfo.caches[QDRV_SLAB_IDX_SIZE_##x] = kmem_cache_find("size-"#x);
+#define ZACHE(y)	p_pktlogger->qmeminfo.caches[QDRV_SLAB_IDX_##y] = kmem_cache_find(#y);
+#include "qdrv_slab_watch.h"
+#undef CACHE
+#undef ZACHE
+}
+
+static int qdrv_pktlogger_start_stats(struct qdrv_wlan *qw)
+{
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+
+	tbl = qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_STATS);
+
+	if (!tbl) {
+		return -1;
+	}
+	if (qdrv_pktlogger_map(qw) < 0) {
+		return -1;
+	}
+
+	qdrv_pktlogger_set_auc_status_ptr(qw);
+	qdrv_pktlogger_slab_init(qw);
+	del_timer(&qw->pktlogger.stats_timer);
+	init_timer(&qw->pktlogger.stats_timer);
+	qw->pktlogger.stats_timer.function = qdrv_pktlogger_netdebug_stats_send;
+	qw->pktlogger.stats_timer.data = (unsigned long)qw;
+	qw->pktlogger.stats_timer.expires = jiffies + (tbl->interval * HZ);
+	add_timer(&qw->pktlogger.stats_timer);
+
+	qw->pktlogger.flag |= BIT(QDRV_NETDEBUG_TYPE_STATS);
+	printk("Netdebug is enabled\n");
+
+	return 0;
+}
+
+static void qdrv_pktlogger_stop_stats(struct qdrv_wlan *qw)
+{
+	/*
+	 * Clear the netdev queue pointers so we can uninstall the qdisc and
+	 * resinstall a new one without crashing.
+	 */
+	qw->pktlogger.netdev_q_ptr_w = NULL;
+	qw->pktlogger.netdev_q_ptr_e = NULL;
+
+	del_timer(&qw->pktlogger.stats_timer);
+	qw->pktlogger.flag &= (~BIT(QDRV_NETDEBUG_TYPE_STATS));
+	printk("Netdebug is disabled\n");
+}
+
+static void qdrv_pktlogger_send_iwevent(void *data, int len)
+{
+	struct qdrv_wlan *qw;
+	struct qdrv_netdebug_iwevent* hdr;
+	int payloadlen;
+
+	if (g_pktlogger_p) {
+		qw = g_pktlogger_p->qw;
+	} else {
+		printk("Pktlogger is not ready\n");
+		return;
+	}
+
+	hdr = qdrv_pktlogger_alloc_buffer("iwevent", sizeof(*hdr));
+	if (hdr == NULL) {
+		return;
+	}
+
+	if (len > sizeof(hdr->iwevent_data))
+		len = sizeof(hdr->iwevent_data);
+
+	payloadlen = sizeof(struct qdrv_pktlogger_hdr) + len;
+
+	qdrv_pktlogger_hdr_init(qw, &hdr->ndb_hdr, QDRV_NETDEBUG_TYPE_IWEVENT, payloadlen);
+	memcpy(hdr->iwevent_data, data, len);
+	qdrv_pktlogger_send(hdr, payloadlen);
+}
+
+/* Netlink query for the pktlogger compressed structures. */
+static int
+qdrv_pktlogger_create_pktlogger_data(struct qdrv_wlan *qw, char *p_buf, int buf_len)
+{
+	struct sk_buff *skb_out;
+	struct nlmsghdr *nlho;
+	void *p_pkt;
+	int res = -1;
+	skb_out = nlmsg_new(buf_len, GFP_KERNEL);
+	if (skb_out) {
+		nlho = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, buf_len, 0);
+		NETLINK_CB(skb_out).dst_group = 0;
+		p_pkt = nlmsg_data(nlho);
+		memcpy(p_pkt, p_buf, buf_len);
+		res = nlmsg_unicast(g_pktlogger_p->netlink_socket, skb_out, 12345);
+	}
+	return res;
+}
+
+static int
+qdrv_pktlogger_max_radio_supported(void)
+{
+	return 1;
+}
+
+static int
+qdrv_pktlogger_config_flag_enabled(struct pktlogger_nl_pktlog_config_t *p_cur_pktconfig)
+{
+	return p_cur_pktconfig->flags & 0x1;
+}
+
+static void
+qdrv_pktlogger_config_flag_enable(struct pktlogger_nl_pktlog_config_t *p_cur_pktconfig)
+{
+	p_cur_pktconfig->flags |= 0x1;
+}
+
+static int
+qdrv_pktlogger_runtime_flag_enabled(struct qdrv_wlan *qw, int type)
+{
+	return qw->pktlogger.flag & BIT(type);
+}
+
+static int
+qdrv_pktlogger_config_interval_sanitise(struct pktlogger_nl_pktlog_config_t *p_cur_pktconfig)
+{
+	if (p_cur_pktconfig->rate > 180) {
+		return 180;
+	}
+	if (p_cur_pktconfig->rate < 1) {
+		return 1;
+	}
+	return p_cur_pktconfig->rate;
+}
+
+static void
+qdrv_pktlogger_set_single(struct qdrv_wlan *qw, uint32_t radio_index, struct pktlogger_nl_pktlog_config_t *p_cur_pktconfig)
+{
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+	uint32_t this_type = p_cur_pktconfig->type;
+	tbl = qdrv_pktlogger_get_tbls_by_id(this_type);
+	if (tbl) {
+		int to_enable = qdrv_pktlogger_config_flag_enabled(p_cur_pktconfig);
+		int is_enabled = qdrv_pktlogger_runtime_flag_enabled(qw, this_type);
+		tbl->interval = qdrv_pktlogger_config_interval_sanitise(p_cur_pktconfig);
+		tbl->history = p_cur_pktconfig->history;
+		if (to_enable && !is_enabled) {
+			/* Enable - currently disabled */
+			if (tbl->start) {
+				printk("Enabling %s logger, period %d\n", tbl->name, tbl->interval);
+				tbl->start(qw);
+			}
+		} else if (!to_enable && is_enabled) {
+			/* Disable - currently enabled */
+			if (tbl->stop) {
+				printk("Disabling %s logger\n", tbl->name);
+				tbl->stop(qw);
+			}
+		}
+	}
+}
+
+static void
+qdrv_pktlogger_set_config(struct qdrv_wlan *qw, struct pktlogger_nl_config_set_t *p_s_conf)
+{
+	int this_radio_idx = 0;
+	struct pktlogger_nl_config_t *p_conf = &p_s_conf->config;
+	uint32_t rcontrol = p_conf->rcontrol;
+
+	while (rcontrol) {
+		if (rcontrol & 0x1) {
+			uint32_t radio_config_count;
+			struct pktlogger_nl_radio_config_t *p_cur_radio = &p_conf->per_radio[this_radio_idx];
+			int i = 0;
+			radio_config_count = p_cur_radio->pktlog_ver_cnt & 0xFF;
+			/* Configure the radio appropriately */
+			for (i = 0; i < radio_config_count && i < ARRAY_SIZE(p_cur_radio->pktlog_configs); i++) {
+				struct pktlogger_nl_pktlog_config_t *p_cur_pktconfig = &p_cur_radio->pktlog_configs[i];
+				qdrv_pktlogger_set_single(qw, this_radio_idx, p_cur_pktconfig);
+			}
+		}
+		/* Next radio */
+		rcontrol >>= 1;
+		this_radio_idx++;
+		if (this_radio_idx >= qdrv_pktlogger_max_radio_supported()) {
+			break;
+		}
+	}
+}
+
+static void
+qdrv_pktlogger_get_one_config(struct qdrv_wlan *qw, struct pktlogger_nl_pktlog_config_t *p_cur_pktconfig, struct qdrv_pktlogger_types_tbl *tbl)
+{
+	p_cur_pktconfig->type = tbl->id;
+
+	if (qw->pktlogger.flag & BIT(tbl->id)) {
+		qdrv_pktlogger_config_flag_enable(p_cur_pktconfig);
+	}
+	if (tbl->struct_vsize) {
+		if (tbl->struct_vsize == 0xFFFF) {
+			p_cur_pktconfig->flags |= 0x4;
+		} else {
+			p_cur_pktconfig->flags |= 0x2;
+			p_cur_pktconfig->struct_vsize = tbl->struct_vsize;
+		}
+	}
+	p_cur_pktconfig->struct_bsize = tbl->struct_bsize;
+	p_cur_pktconfig->history = tbl->history;
+	strncpy(&p_cur_pktconfig->name[0], &tbl->name[0], sizeof(p_cur_pktconfig->name));
+	p_cur_pktconfig->rate = tbl->interval;
+}
+
+static void
+qdrv_pktlogger_get_config_one(struct qdrv_wlan *qw, struct pktlogger_nl_config_one_t *p_conf, uint32_t radio_index, uint32_t ptype)
+{
+	struct pktlogger_nl_pktlog_config_t *p_cur_pktconfig = &p_conf->config;
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+
+	memset(p_conf, 0, sizeof(*p_conf));
+	p_conf->radio_index = radio_index;
+	tbl = qdrv_pktlogger_get_tbls_by_id(ptype);
+
+	if (tbl) {
+		qdrv_pktlogger_get_one_config(qw, p_cur_pktconfig, tbl);
+	} else {
+		p_cur_pktconfig->flags = 0x8000;
+	}
+}
+
+static void
+qdrv_pktlogger_get_config(struct qdrv_wlan *qw, struct pktlogger_nl_config_t *p_conf)
+{
+	struct qdrv_pktlogger *p_pktlogger = &qw->pktlogger;
+	struct pktlogger_nl_radio_config_t *p_cur_radio;
+	struct pktlogger_nl_pktlog_config_t *p_cur_pktconfig;
+	int i = 0;
+	int pktcount = 0;
+	memset(p_conf, 0, sizeof(*p_conf));
+	p_conf->rev = 0;
+	p_conf->rcontrol = 0x1;
+	p_cur_radio = &p_conf->per_radio[0];
+	p_cur_radio->destip = p_pktlogger->dst_ip;
+	p_cur_radio->srcip = p_pktlogger->src_ip;
+	p_cur_radio->destport = p_pktlogger->dst_port;
+	p_cur_radio->srcport = p_pktlogger->src_port;
+	memcpy(&p_cur_radio->destmac[0], &p_pktlogger->dst_addr[0], sizeof(p_cur_radio->destmac));
+	memcpy(&p_cur_radio->srcmac[0], &p_pktlogger->src_addr[0], sizeof(p_cur_radio->srcmac));
+	strncpy(&p_cur_radio->radioname[0], qw->mac->vnet[0]->name, sizeof(p_cur_radio->radioname));
+	p_cur_pktconfig = &p_cur_radio->pktlog_configs[0];
+	for (i = 0; i < ARRAY_SIZE(p_cur_radio->pktlog_configs); i++) {
+		struct qdrv_pktlogger_types_tbl *tbl = NULL;
+		tbl = qdrv_pktlogger_get_tbls_by_id(i);
+		if (tbl) {
+			qdrv_pktlogger_get_one_config(qw, p_cur_pktconfig, tbl);
+			p_cur_pktconfig++;
+			pktcount++;
+		}
+	}
+	p_cur_radio->pktlog_ver_cnt = pktcount;
+}
+
+/* Netlink query for a single pktlogger configuration structure */
+static struct sk_buff *
+qdrv_pktlogger_create_config_query_one(struct qdrv_wlan *qw, struct pktlogger_nl_query_t *p_query)
+{
+	struct sk_buff *skb_out;
+	struct nlmsghdr *nlho;
+	struct pktlogger_nl_query_t *p_outq;
+	struct pktlogger_nl_config_one_t *p_outconf;
+	int msg_size = sizeof(*p_outq) + sizeof(*p_outconf);
+
+	/* Ensure sanity of input */
+	if (p_query->arg1 >= qdrv_pktlogger_max_radio_supported()) {
+		return NULL;
+	}
+	//printk("Create config\n");
+	skb_out = nlmsg_new(msg_size, GFP_KERNEL);
+	if (skb_out) {
+		nlho = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0);
+		NETLINK_CB(skb_out).dst_group = 0;
+		p_outq = nlmsg_data(nlho);
+		p_outconf = (struct pktlogger_nl_config_one_t *)&p_outq->data[0];
+		memcpy(p_outq, p_query, sizeof(*p_outq));
+		p_outq->hdr.mlen = sizeof(*p_outconf);
+		qdrv_pktlogger_get_config_one(qw, p_outconf, p_query->arg1, p_query->arg2);
+	}
+	return skb_out;
+}
+
+/* Netlink query for the pktlogger config for all radios */
+static struct sk_buff *
+qdrv_pktlogger_create_config_query(struct qdrv_wlan *qw, struct pktlogger_nl_query_t *p_query)
+{
+	struct sk_buff *skb_out;
+	struct nlmsghdr *nlho;
+	struct pktlogger_nl_query_t *p_outq;
+	struct pktlogger_nl_config_t *p_outconf;
+	int msg_size = sizeof(*p_outq) + sizeof(*p_outconf);
+	skb_out = nlmsg_new(msg_size, GFP_KERNEL);
+	if (skb_out) {
+		nlho = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0);
+		NETLINK_CB(skb_out).dst_group = 0;
+		p_outq = nlmsg_data(nlho);
+		p_outconf = (struct pktlogger_nl_config_t *)&p_outq->data[0];
+		memcpy(p_outq, p_query, sizeof(*p_outq));
+		p_outq->hdr.mlen = sizeof(*p_outconf);
+		qdrv_pktlogger_get_config(qw, p_outconf);
+	}
+	return skb_out;
+}
+
+/* Netlink query for the pktlogger compressed structures. */
+static struct sk_buff *
+qdrv_pktlogger_create_struct_query(struct qdrv_wlan *qw, struct pktlogger_nl_query_t *p_query)
+{
+	struct sk_buff *skb_out;
+	struct nlmsghdr *nlho;
+	int msg_size = sizeof(pktlogger_structs) + sizeof(*p_query);
+	struct pktlogger_nl_query_t *p_outq;
+	void *p_structstart;
+	skb_out = nlmsg_new(msg_size, GFP_KERNEL);
+	if (skb_out) {
+		nlho = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0);
+		NETLINK_CB(skb_out).dst_group = 0;
+		p_outq = nlmsg_data(nlho);
+		p_structstart = &p_outq->data[0];
+		memcpy(p_outq, p_query, sizeof(*p_outq));
+		p_outq->hdr.mlen = sizeof(pktlogger_structs);
+		memcpy(p_structstart, pktlogger_structs, sizeof(pktlogger_structs));
+	}
+	return skb_out;
+}
+
+/* Generic query handler */
+static void qdrv_pktlogger_netlink_query(struct qdrv_wlan *qw, struct nlmsghdr *nlh)
+{
+	int pid;
+	int res = 0;
+	struct sk_buff *skb_out = NULL;
+	struct pktlogger_nl_query_t *p_query = (struct pktlogger_nl_query_t *)nlmsg_data(nlh);
+	pid = nlh->nlmsg_pid;
+
+	switch (p_query->query_num) {
+
+	case PKTLOGGER_QUERY_STRUCT:
+		skb_out = qdrv_pktlogger_create_struct_query(qw, p_query);
+		break;
+	case PKTLOGGER_QUERY_CONFIG:
+		skb_out = qdrv_pktlogger_create_config_query(qw, p_query);
+		break;
+	case PKTLOGGER_QUERY_CONFIG_ONE:
+		skb_out = qdrv_pktlogger_create_config_query_one(qw, p_query);
+		break;
+	default:
+		printk("Unknown query (%d)\n", p_query->query_num);
+		break;
+	}
+
+	if (skb_out) {
+		res = nlmsg_unicast(g_pktlogger_p->netlink_socket, skb_out, pid);
+	}
+	if (res < 0)
+		printk("Error sending msg to uspace\n");
+}
+
+static void
+qdrv_pktlogger_netlink_config_one(struct qdrv_wlan *qw, struct nlmsghdr *nlh)
+{
+	struct pktlogger_nl_config_oneset_t *p_oconfig = (struct pktlogger_nl_config_oneset_t *)nlmsg_data(nlh);
+	struct pktlogger_nl_pktlog_config_t *p_config = &p_oconfig->config.config;
+
+	qdrv_pktlogger_set_single(qw, p_oconfig->config.radio_index, p_config);
+}
+
+static void
+qdrv_pktlogger_netlink_config(struct qdrv_wlan *qw, struct nlmsghdr *nlh)
+{
+	struct pktlogger_nl_config_set_t *p_config = (struct pktlogger_nl_config_set_t *)nlmsg_data(nlh);
+
+	qdrv_pktlogger_set_config(qw, p_config);
+}
+
+/* Pktlogger netlink message incoming */
+static void qdrv_pktlogger_netlink_msg(struct qdrv_wlan *qw, struct nlmsghdr *nlh)
+{
+	struct pktlogger_nl_hdr_t *p_hdr = (struct pktlogger_nl_hdr_t *)nlmsg_data(nlh);
+
+	if (p_hdr->magic != PKTLOGGER_MSG_MAGIC) {
+		printk("Invalid magic in pktlogger netlink msg\n");
+		return;
+	}
+	switch(p_hdr->mtype) {
+
+	case PKTLOGGER_NETLINK_MTYPE_QUERY:
+		qdrv_pktlogger_netlink_query(qw, nlh);
+		break;
+	case PKTLOGGER_NETLINK_MTYPE_CONFIG:
+		qdrv_pktlogger_netlink_config(qw, nlh);
+		break;
+	case PKTLOGGER_NETLINK_MTYPE_CONFIG_ONE:
+		qdrv_pktlogger_netlink_config_one(qw, nlh);
+		break;
+	default:
+		printk("Unknown msg type %d\n", p_hdr->mtype);
+		break;
+	}
+}
+
+static void qdrv_pktlogger_recv_msg(struct sk_buff *skb)
+{
+	struct nlmsghdr *nlh  = (struct nlmsghdr*)skb->data;
+
+	DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_ALL,
+			"%s line %d Netlink received pid:%d, size:%d, type:%d\n",
+			__FUNCTION__, __LINE__, nlh->nlmsg_pid, nlh->nlmsg_len, nlh->nlmsg_type);
+
+	switch (nlh->nlmsg_type) {
+		case QDRV_NETDEBUG_TYPE_IWEVENT:
+			if (g_pktlogger_p->flag & BIT(QDRV_NETDEBUG_TYPE_IWEVENT)) {
+				qdrv_pktlogger_send_iwevent(skb->data + sizeof(struct nlmsghdr),
+					nlh->nlmsg_len);
+			}
+			break;
+
+		case QDRV_NETDEBUG_TYPE_SYSMSG:
+			if (g_pktlogger_p->flag & BIT(QDRV_NETDEBUG_TYPE_SYSMSG)) {
+				qdrv_control_sysmsg_send(g_pktlogger_p->qw,
+					(char *)(skb->data + sizeof(struct nlmsghdr)),
+					nlh->nlmsg_len, 0);
+			}
+			break;
+		case QDRV_NETDEBUG_TYPE_PKTLOGGER:
+			qdrv_pktlogger_netlink_msg(g_pktlogger_p->qw, nlh);
+			break;
+
+		default:
+			printk("%s line %d Netlink Invalid type %d\n",
+				__FUNCTION__, __LINE__, nlh->nlmsg_type);
+			break;
+	}
+}
+
+static int qdrv_pktlogger_start_netlink(struct qdrv_wlan *qw)
+{
+	qw->pktlogger.netlink_ref++;
+	if (qw->pktlogger.netlink_socket == NULL) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0))
+		struct netlink_kernel_cfg cfg = {
+			.input = qdrv_pktlogger_recv_msg
+		};
+		qw->pktlogger.netlink_socket = netlink_kernel_create(&init_net,
+				QDRV_NETLINK_PKTLOGGER, &cfg);
+#else
+		qw->pktlogger.netlink_socket = netlink_kernel_create(&init_net,
+				QDRV_NETLINK_PKTLOGGER, 0, qdrv_pktlogger_recv_msg, NULL, THIS_MODULE);
+#endif
+
+		if (qw->pktlogger.netlink_socket == NULL) {
+			DBGPRINTF_E("Error creating netlink socket.\n");
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static void qdrv_pktlogger_stop_netlink(struct qdrv_wlan *qw)
+{
+	qw->pktlogger.netlink_ref--;
+	if ((qw->pktlogger.netlink_ref == 0) && qw->pktlogger.netlink_socket) {
+			netlink_kernel_release(qw->pktlogger.netlink_socket);
+			qw->pktlogger.netlink_socket = NULL;
+	}
+}
+
+static int qdrv_pktlogger_start_iwevent(struct qdrv_wlan *qw)
+{
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+
+	if ((qw->pktlogger.flag & BIT(QDRV_NETDEBUG_TYPE_IWEVENT)) == 0) {
+		tbl = qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_IWEVENT);
+		if (!tbl) {
+			return -1;
+		}
+		qw->pktlogger.flag |= BIT(QDRV_NETDEBUG_TYPE_IWEVENT);
+		return qdrv_pktlogger_start_netlink(qw);
+	} else {
+		return 0;
+	}
+}
+
+static void qdrv_pktlogger_stop_iwevent(struct qdrv_wlan *qw)
+{
+	if ((qw->pktlogger.flag & BIT(QDRV_NETDEBUG_TYPE_IWEVENT))) {
+		qw->pktlogger.flag &= (~BIT(QDRV_NETDEBUG_TYPE_IWEVENT));
+		qdrv_pktlogger_stop_netlink(qw);
+	}
+}
+
+static int qdrv_pktlogger_start_sysmsg(struct qdrv_wlan *qw)
+{
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+
+	if ((qw->pktlogger.flag & BIT(QDRV_NETDEBUG_TYPE_SYSMSG)) == 0) {
+		tbl = qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_SYSMSG);
+		if (!tbl) {
+			return -1;
+		}
+		qw->pktlogger.flag |= BIT(QDRV_NETDEBUG_TYPE_SYSMSG);
+		del_timer(&qw->pktlogger.sysmsg_timer);
+		init_timer(&qw->pktlogger.sysmsg_timer);
+		qw->pktlogger.sysmsg_timer.function = qdrv_control_sysmsg_timer;
+		qw->pktlogger.sysmsg_timer.data = (unsigned long)qw;
+		mod_timer(&qw->pktlogger.sysmsg_timer, jiffies +
+			(tbl->interval * HZ));
+		return qdrv_pktlogger_start_netlink(qw);
+	} else {
+		return 0;
+	}
+}
+
+static void qdrv_pktlogger_stop_sysmsg(struct qdrv_wlan *qw)
+{
+	if ((qw->pktlogger.flag & BIT(QDRV_NETDEBUG_TYPE_SYSMSG))) {
+		qw->pktlogger.flag &= (~BIT(QDRV_NETDEBUG_TYPE_SYSMSG));
+		qdrv_control_sysmsg_send(qw, NULL, 0, 1);
+		del_timer(&qw->pktlogger.sysmsg_timer);
+		qdrv_pktlogger_stop_netlink(qw);
+	}
+}
+
+static void qdrv_pktlogger_ratedebug_send(unsigned long data)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)data;
+	void *databuf;
+	struct qdrv_netdebug_rate *rate_stats;
+	struct qtn_stats_log *iw_stats_log = (struct qtn_stats_log *)qw->mac->mac_sys_stats;
+	struct muc_rx_rates *muc_rx_rates_p =
+			(struct muc_rx_rates *)qw->pktlogger.stats_uc_rx_rate_ptr;
+	struct qtn_rate_tx_stats_per_sec *tx_rate_stats =
+			(struct qtn_rate_tx_stats_per_sec *)qw->pktlogger.stats_uc_tx_rate_ptr;
+	uint32_t *su_rates_read_ptr = qw->pktlogger.stats_uc_su_rates_read_ptr;
+	uint32_t *mu_rates_read_ptr = qw->pktlogger.stats_uc_mu_rates_read_ptr;
+	struct muc_rx_rates *rx_rates_prev = qw->pktlogger.rx_ratelog_pre;
+	struct muc_rx_rates *rx_rates_curr = qw->pktlogger.rx_ratelog_cur;
+	uint32_t curr_index;
+	int i;
+	int rate_entry = 0;
+	struct qdrv_pktlogger_types_tbl *tbl =
+			qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_RATE);
+
+	if (!tbl) {
+		return;
+	}
+
+	databuf = qdrv_pktlogger_alloc_buffer("rate", sizeof(*rate_stats));
+	if (databuf == NULL) {
+		return;
+	}
+	rate_stats = (struct qdrv_netdebug_rate *) databuf;
+	qdrv_pktlogger_hdr_init(qw, &rate_stats->ndb_hdr, QDRV_NETDEBUG_TYPE_RATE,
+			sizeof(*rate_stats));
+
+	/*
+	 * Copy assuming all rate adaptions for this second collected. The
+	 * parser will need to check the sequence numbers to detect missed data.
+	 */
+	memcpy(&rate_stats->rate_su_tx_stats, tx_rate_stats->stats_su,
+		sizeof(rate_stats->rate_su_tx_stats));
+	memcpy(&rate_stats->rate_mu_tx_stats, tx_rate_stats->stats_mu,
+		sizeof(rate_stats->rate_mu_tx_stats));
+	/* Tell MUC the rate tx stats can be re-written */
+	*su_rates_read_ptr = 1;
+	*mu_rates_read_ptr = 1;
+
+	/*
+	 * Get rates from the MuC and diff from previous values. Copy as many rates as
+	 * possible, starting from the last rate to prefer higher rates.
+	 */
+	memcpy(rx_rates_curr, muc_rx_rates_p, sizeof(*rx_rates_curr));
+
+	for (i = ARRAY_SIZE(rx_rates_curr->rx_mcs_11ac) - 1; i >= 0; i--) {
+		if (rate_entry >= (ARRAY_SIZE(rate_stats->rate_gen_stats.rx_mcs)))
+			break;
+
+		if (rx_rates_curr->rx_mcs_11ac[i] == rx_rates_prev->rx_mcs_11ac[i])
+			continue;
+
+		rate_stats->rate_gen_stats.rx_mcs_rates[rate_entry] =
+			(uint16_t)((QTN_PHY_STATS_MODE_11AC << 8) | i);
+
+		rate_stats->rate_gen_stats.rx_mcs[rate_entry] =
+			rx_rates_curr->rx_mcs_11ac[i] - rx_rates_prev->rx_mcs_11ac[i];
+		rate_entry++;
+	}
+
+	for (i = ARRAY_SIZE(rx_rates_curr->rx_mcs) - 1; i >= 0; i--) {
+		if (rate_entry >= (ARRAY_SIZE(rate_stats->rate_gen_stats.rx_mcs)))
+			break;
+
+		if (rx_rates_curr->rx_mcs[i] == rx_rates_prev->rx_mcs[i])
+			continue;
+
+		rate_stats->rate_gen_stats.rx_mcs_rates[rate_entry] = i;
+
+		rate_stats->rate_gen_stats.rx_mcs[rate_entry] =
+			rx_rates_curr->rx_mcs[i] - rx_rates_prev->rx_mcs[i];
+		rate_entry++;
+	}
+	qw->pktlogger.rx_ratelog_pre = rx_rates_curr;
+	qw->pktlogger.rx_ratelog_cur = rx_rates_prev;
+
+	/* Find the current index into the phy stats */
+	curr_index = (iw_stats_log->curr_buff - 1 + NUM_LOG_BUFFS) % NUM_LOG_BUFFS;
+	if ((iw_stats_log->stat_buffs[curr_index].tstamp & 0x01) == 0) {
+		u_int32_t *p_evms = &rate_stats->rate_gen_stats.rx_evm[0];
+		int32_t evm_int, evm_frac;
+
+		for (i = 0; i < QDRV_NUM_RF_STREAMS; i++) {
+			convert_evm_db(iw_stats_log->stat_buffs[curr_index].rx_phy_stats.last_rssi_evm[i],
+				       iw_stats_log->stat_buffs[curr_index].rx_phy_stats.last_rxsym,
+				       &evm_int,
+				       &evm_frac);
+
+			*p_evms++ = (evm_frac & 0xffff) | (evm_int << 16);
+		}
+	} else {
+		memset(rate_stats->rate_gen_stats.rx_evm, 0,
+			sizeof(rate_stats->rate_gen_stats.rx_evm));
+	}
+
+	rate_stats->rate_gen_stats.rx_crc =
+			iw_stats_log->stat_buffs[curr_index].rx_phy_stats.cnt_mac_crc;
+	rate_stats->rate_gen_stats.rx_sp_errors =
+			iw_stats_log->stat_buffs[curr_index].rx_phy_stats.cnt_sp_fail;
+	rate_stats->rate_gen_stats.rx_lp_errors =
+			iw_stats_log->stat_buffs[curr_index].rx_phy_stats.cnt_lp_fail;
+
+	qdrv_pktlogger_send(rate_stats, sizeof(*rate_stats));
+
+	/* refresh timer */
+	mod_timer(&qw->pktlogger.rate_timer, jiffies + (tbl->interval * HZ));
+}
+
+static int qdrv_pktlogger_start_rate(struct qdrv_wlan *qw)
+{
+	struct timer_list *timer = &qw->pktlogger.rate_timer;
+	struct qdrv_pktlogger_types_tbl *tbl =
+				qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_RATE);
+	del_timer(&qw->pktlogger.rate_timer);
+
+	if (!tbl) {
+		return -1;
+	}
+	if (qdrv_pktlogger_map(qw) < 0) {
+		return -1;
+	}
+
+	init_timer(timer);
+	timer->function = qdrv_pktlogger_ratedebug_send;
+	timer->data = (unsigned long)qw;
+	timer->expires = jiffies + (tbl->interval * HZ);
+	add_timer(timer);
+
+	/* Set the flag to clear old stats */
+	*qw->pktlogger.stats_uc_su_rates_read_ptr = 1;
+	*qw->pktlogger.stats_uc_mu_rates_read_ptr = 1;
+	qw->pktlogger.flag |= BIT(QDRV_NETDEBUG_TYPE_RATE);
+	printk("Ratedebug is enabled\n");
+	return 0;
+}
+
+static void qdrv_pktlogger_stop_rate(struct qdrv_wlan *qw)
+{
+	del_timer(&qw->pktlogger.rate_timer);
+	qw->pktlogger.flag &= (~BIT(QDRV_NETDEBUG_TYPE_RATE));
+	printk("Ratedebug is disabled\n");
+}
+
+static void qdrv_pktlogger_mem_send(unsigned long data)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)data;
+	struct qdrv_netdebug_mem *stats;
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+	u8 *p;
+	int i;
+	int j;
+	u32 *remap_addr;
+	u32 val;
+	void *data_buf;
+
+	tbl = qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_MEM);
+	if (!tbl) {
+		return;
+	}
+	data_buf = qdrv_pktlogger_alloc_buffer("mem", sizeof(*stats));
+	if (data_buf == NULL) {
+		return;
+	}
+	stats = (struct qdrv_netdebug_mem *) data_buf;
+	qdrv_pktlogger_hdr_init(qw, &stats->ndb_hdr, QDRV_NETDEBUG_TYPE_MEM,
+			sizeof(*stats));
+
+	/* copy from each location into the packet */
+	p = &stats->stvec_data[0];
+
+	/* BB registers can be registered into memdebug */
+	qtn_bb_mutex_enter(QTN_LHOST_SOC_CPU);
+
+	for (i = 0; i < qw->pktlogger.mem_wp_index; i++) {
+		memcpy(p, &qw->pktlogger.mem_wps[i].addr, sizeof(qw->pktlogger.mem_wps[i].addr));
+		p += sizeof(qw->pktlogger.mem_wps[i].addr);
+
+		memcpy(p, &qw->pktlogger.mem_wps[i].size, sizeof(qw->pktlogger.mem_wps[i].size));
+		p += sizeof(qw->pktlogger.mem_wps[i].size);
+
+		remap_addr = qw->pktlogger.mem_wps[i].remap_addr;
+		for (j = 0; j < qw->pktlogger.mem_wps[i].size; j++) {
+			val = *remap_addr;
+			memcpy(p, &val, sizeof(u32));
+			p += sizeof(u32);
+			remap_addr++;
+		}
+	}
+
+	qtn_bb_mutex_leave(QTN_LHOST_SOC_CPU);
+
+	/* send completed packet */
+	qdrv_pktlogger_send(stats, sizeof(*stats));
+
+	/* refresh timer */
+	mod_timer(&qw->pktlogger.mem_timer, jiffies + (tbl->interval * HZ));
+}
+
+static int qdrv_pktlogger_start_mem(struct qdrv_wlan *qw) {
+	struct timer_list *timer = &qw->pktlogger.mem_timer;
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+
+	if (!qw->pktlogger.mem_wp_index) {
+		DBGPRINTF_E("no watchpoints defined! not starting\n");
+		return -1;
+	}
+
+	tbl = qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_MEM);
+	if (!tbl) {
+		return -1;
+	}
+	del_timer(timer);
+	init_timer(timer);
+	timer->function = qdrv_pktlogger_mem_send;
+	timer->data = (unsigned long) qw;
+	timer->expires = jiffies + (tbl->interval * HZ);
+
+	add_timer(timer);
+	qw->pktlogger.flag |= BIT(QDRV_NETDEBUG_TYPE_MEM);
+	return 0;
+}
+
+static void qdrv_pktlogger_stop_mem(struct qdrv_wlan *qw) {
+	int i;
+
+	del_timer(&qw->pktlogger.mem_timer);
+	for (i = 0; i < qw->pktlogger.mem_wp_index; i++) {
+		iounmap(qw->pktlogger.mem_wps[i].remap_addr);
+	}
+	qw->pktlogger.mem_wp_index = 0;
+	qw->pktlogger.flag &= (~BIT(QDRV_NETDEBUG_TYPE_MEM));
+}
+
+#ifdef CONFIG_QVSP
+/*
+ * Forward VSP stats to the netdebug target
+ */
+static void qdrv_pktlogger_vspdebug_send(void *data, void *vsp_data, uint32_t size)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)data;
+	void *databuf;
+	struct qdrv_pktlogger_hdr *ndb_hdr;
+	databuf = qdrv_pktlogger_alloc_buffer("vsp", sizeof(struct qdrv_pktlogger_hdr) + size);
+	if (databuf == NULL) {
+		return;
+	}
+	ndb_hdr = (struct qdrv_pktlogger_hdr *)databuf;
+	qdrv_pktlogger_hdr_init(qw, ndb_hdr,
+			QDRV_NETDEBUG_TYPE_VSP, size + sizeof(struct qdrv_pktlogger_hdr));
+	memcpy(ndb_hdr + 1, vsp_data, size);
+
+	qdrv_pktlogger_send(databuf, size + sizeof(struct qdrv_pktlogger_hdr));
+}
+
+static int qdrv_pktlogger_start_vsp(struct qdrv_wlan *qw)
+{
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+	if (qw->qvsp == NULL) {
+		DBGPRINTF_E("VSP is not not initialised\n");
+		return -1;
+	}
+	tbl = qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_VSP);
+	if (!tbl) {
+		return -1;
+	}
+	qvsp_netdbg_init(qw->qvsp, &qdrv_pktlogger_vspdebug_send, tbl->interval);
+	qw->pktlogger.flag |= BIT(QDRV_NETDEBUG_TYPE_VSP);
+	printk("VSP netdebug is enabled\n");
+	return 0;
+}
+
+static void qdrv_pktlogger_stop_vsp(struct qdrv_wlan *qw)
+{
+	if (qw->qvsp == NULL) {
+		DBGPRINTF_E("VSP is not not initialised\n");
+		return;
+	}
+	qvsp_netdbg_exit(qw->qvsp);
+	qw->pktlogger.flag &= (~BIT(QDRV_NETDEBUG_TYPE_VSP));
+	printk("VSP netdebug is disabled\n");
+}
+#endif
+
+/*
+ * Push radar statistics out on every call
+ */
+static void qdrv_control_radar_stats_send(void *data,
+	int (*pulse_copy_iter)(void *dest, void *src, int pulse_indx),
+	void *pulse_buf, int num_pulses)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)data;
+	struct qdrv_radar_stats *stats;
+	int indx;
+	int num_bytes = 0;
+
+	stats = qdrv_pktlogger_alloc_buffer("radar", sizeof(*stats));
+	if (stats == NULL) {
+		radar_register_statcb(NULL, NULL);
+		return;
+	}
+
+	qdrv_pktlogger_hdr_init(qw, &stats->ndb_hdr, QDRV_NETDEBUG_TYPE_RADAR,
+			sizeof(*stats));
+
+	/* Max num of pulses is 175 based on 8 bytes per pulse */
+	if (num_pulses > QDRV_NETDEBUG_RADAR_MAXPULSE) {
+		stats->ndb_hdr.flags = QDRV_NETDEBUG_FLAGS_TRUNCATED;
+		num_pulses = QDRV_NETDEBUG_RADAR_MAXPULSE;
+	}
+
+	stats->numpulses = num_pulses;
+
+	/* Gather statistics */
+	for (indx=0;indx < num_pulses; indx++) {
+		num_bytes += pulse_copy_iter(
+				stats->pulseinfo + num_bytes, pulse_buf, indx);
+	}
+
+	int trimlen = QDRV_NETDEBUG_RADAR_PULSESIZE * QDRV_NETDEBUG_RADAR_MAXPULSE - num_bytes;
+
+	qdrv_pktlogger_send(stats, sizeof(*stats) - trimlen);
+}
+
+static int qdrv_pktlogger_start_radar(struct qdrv_wlan *qw)
+{
+	int ret;
+
+	ret = radar_register_statcb(qdrv_control_radar_stats_send, (void*)qw);
+	qw->pktlogger.flag |= BIT(QDRV_NETDEBUG_TYPE_RADAR);
+	return ret;
+}
+
+static void qdrv_pktlogger_stop_radar(struct qdrv_wlan *qw)
+{
+	radar_register_statcb(NULL, NULL);
+	qw->pktlogger.flag &= (~BIT(QDRV_NETDEBUG_TYPE_RADAR));
+}
+
+void add_per_node_phystat(struct qdrv_netdebug_phystats* stats,
+	int index, struct ieee80211_node *ni)
+{
+	struct qdrv_netdebug_per_node_phystats* item =
+		&stats->per_node_stats[index];
+	memcpy(&item->node_macaddr, &ni->ni_macaddr, sizeof(item->node_macaddr));
+	memcpy(&item->per_node_phystats, ni->ni_shared_stats, sizeof(item->per_node_phystats));
+}
+
+void qdrv_pktlogger_phystats_send(unsigned long data)
+{
+	static struct qtn_stats_log host_log;
+	static int last_tstamp = 0;
+	struct qdrv_wlan* qw = (struct qdrv_wlan *)data;
+	struct qdrv_mac* mac             = qw ? qw->mac : NULL;
+	struct ieee80211_node_table* nt  = qw ? &qw->ic.ic_sta : NULL;
+	struct ieee80211_node* ni;
+
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+	int i;
+
+	tbl = qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_PHY_STATS);
+
+	if (!(tbl && mac && mac->mac_sys_stats && nt)) {
+		return;
+	}
+
+	memcpy(&host_log, mac->mac_sys_stats, sizeof(host_log));
+	/* Gather statistics */
+	/* Why +2:
+	 * the "current" stat is the most recently _complete_ stat written
+	 * item at current+1 is being written right now and probably not safe to read
+	 * item at +2 is the oldest available stat in this circular buffer
+	*/
+	for ( i = (host_log.curr_buff + 2) % NUM_LOG_BUFFS;
+		i != host_log.curr_buff;
+		i = (i+1) % NUM_LOG_BUFFS) {
+
+		struct qtn_stats* curr_log_ptr = &host_log.stat_buffs[i];
+		if(curr_log_ptr->tstamp > last_tstamp) {
+			struct qdrv_netdebug_phystats* phystats;
+			/*
+			 * we do not know how much nodes will be acquired so
+			 * allocate room enough for the worst case
+			 */
+			int phystats_len = sizeof(*phystats) +
+				(QTN_NCIDX_MAX-1) * sizeof(phystats->per_node_stats[0]);
+
+			phystats = qdrv_pktlogger_alloc_buffer("phystats", phystats_len);
+
+			if (phystats != NULL) {
+				int node_index = 0;
+				IEEE80211_NODE_LOCK_IRQ(nt);
+				TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+					add_per_node_phystat(phystats, node_index, ni);
+					node_index++;
+				}
+				IEEE80211_NODE_UNLOCK_IRQ(nt);
+
+				phystats->per_node_stats_count = node_index;
+
+				/* redefine phystats_len as now we know how much nodes do we have*/
+				phystats_len = sizeof(*phystats) +
+					(node_index-1) * sizeof(phystats->per_node_stats[0]);
+
+				qdrv_pktlogger_hdr_init(qw, &phystats->ndb_hdr,
+					QDRV_NETDEBUG_TYPE_PHY_STATS, phystats_len);
+
+				memcpy(&phystats->stats, curr_log_ptr, sizeof(phystats->stats));
+
+				qdrv_pktlogger_send(phystats, phystats_len);
+			}
+
+			last_tstamp = curr_log_ptr->tstamp;
+		}
+	}
+
+	/* refresh timer */
+	mod_timer(&qw->pktlogger.phy_stats_timer, jiffies + (tbl->interval * HZ));
+}
+
+static int qdrv_pktlogger_start_phy_stats(struct qdrv_wlan *qw)
+{
+	struct timer_list *timer = &qw->pktlogger.phy_stats_timer;
+	struct qdrv_pktlogger_types_tbl *tbl =
+				qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_PHY_STATS);
+	del_timer(&qw->pktlogger.phy_stats_timer);
+
+	if (!tbl) {
+		printk("unable to find item at QDRV_NETDEBUG_TYPE_PHY_STATS\n");
+		return -1;
+	}
+
+	init_timer(timer);
+	timer->function = qdrv_pktlogger_phystats_send;
+	timer->data = (unsigned long)qw;
+	timer->expires = jiffies + (tbl->interval * HZ);
+	add_timer(timer);
+
+	/* Set the flag to clear old stats */
+	qw->pktlogger.flag |= BIT(QDRV_NETDEBUG_TYPE_PHY_STATS);
+	printk("phy_stats sending enabled\n");
+	return 0;
+}
+
+static void qdrv_pktlogger_stop_phy_stats(struct qdrv_wlan *qw)
+{
+	del_timer(&qw->pktlogger.phy_stats_timer);
+	qw->pktlogger.flag &= (~BIT(QDRV_NETDEBUG_TYPE_PHY_STATS));
+	printk("phy_stats sending disabled\n");
+}
+
+void qdrv_pktlogger_dspstats_send(unsigned long data)
+{
+	struct qdrv_wlan* qw = (struct qdrv_wlan *)data;
+	struct qdrv_mac* mac             = qw ? qw->mac : NULL;
+
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+	volatile struct qtn_txbf_mbox *txbf_mbox = qtn_txbf_mbox_get();
+	struct qdrv_netdebug_dspstats* dspstats;
+
+	tbl = qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_DSP_STATS);
+
+	if (!(tbl && mac)) {
+		return;
+	}
+
+	dspstats = qdrv_pktlogger_alloc_buffer("dspstats", sizeof(*dspstats));
+
+	if (dspstats != NULL) {
+		qdrv_pktlogger_hdr_init(qw, &dspstats->ndb_hdr,
+			QDRV_NETDEBUG_TYPE_DSP_STATS, sizeof(*dspstats));
+		memcpy(&dspstats->stats, (void *)&txbf_mbox->dsp_stats, sizeof(dspstats->stats));
+		qdrv_pktlogger_send(dspstats, sizeof(*dspstats));
+	}
+
+	/* refresh timer */
+	mod_timer(&qw->pktlogger.dsp_stats_timer, jiffies + (tbl->interval * HZ));
+}
+
+static int qdrv_pktlogger_start_dsp_stats(struct qdrv_wlan *qw)
+{
+	struct timer_list *timer = &qw->pktlogger.dsp_stats_timer;
+	struct qdrv_pktlogger_types_tbl *tbl =
+				qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_DSP_STATS);
+	del_timer(&qw->pktlogger.dsp_stats_timer);
+
+	if (!tbl) {
+		printk("unable to find item at QDRV_NETDEBUG_TYPE_DSP_STATS\n");
+		return -1;
+	}
+
+	init_timer(timer);
+	timer->function = qdrv_pktlogger_dspstats_send;
+	timer->data = (unsigned long)qw;
+	timer->expires = jiffies + (tbl->interval * HZ);
+	add_timer(timer);
+
+	/* Set the flag to clear old stats */
+	qw->pktlogger.flag |= BIT(QDRV_NETDEBUG_TYPE_DSP_STATS);
+	printk("dsp_stats sending enabled\n");
+	return 0;
+}
+
+static void qdrv_pktlogger_stop_dsp_stats(struct qdrv_wlan *qw)
+{
+	del_timer(&qw->pktlogger.dsp_stats_timer);
+	qw->pktlogger.flag &= (~BIT(QDRV_NETDEBUG_TYPE_DSP_STATS));
+	printk("dsp_stats sending disabled\n");
+}
+
+void qdrv_pktlogger_core_dump_send(unsigned long data)
+{
+	struct qdrv_wlan* qw = (struct qdrv_wlan *)data;
+	struct qdrv_mac* mac = qw ? qw->mac : NULL;
+	struct qdrv_pktlogger_types_tbl *tbl = NULL;
+	struct qdrv_netdebug_core_dump *core_dump;
+	uint32_t len_copied = 0;
+
+	tbl = qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_CORE_DUMP);
+	if (!(tbl && mac)) {
+		return;
+	}
+
+	core_dump = qdrv_pktlogger_alloc_buffer(tbl->name, tbl->struct_bsize + tbl->struct_vsize);
+	if (core_dump) {
+		qdrv_copy_core_dump(core_dump->data, sizeof(core_dump->data), &len_copied);
+		if (!len_copied) {
+			qdrv_pktlogger_free_buffer(core_dump);
+			return;
+		}
+
+		qdrv_pktlogger_hdr_init(qw, &core_dump->ndb_hdr,
+			QDRV_NETDEBUG_TYPE_CORE_DUMP, sizeof(core_dump->ndb_hdr) + len_copied);
+
+		qdrv_pktlogger_send(core_dump, sizeof(*core_dump));
+	}
+}
+
+static int qdrv_pktlogger_start_core_dump(struct qdrv_wlan *qw)
+{
+	struct qdrv_pktlogger_types_tbl *tbl =
+				qdrv_pktlogger_get_tbls_by_id(QDRV_NETDEBUG_TYPE_CORE_DUMP);
+
+	if (!tbl) {
+		printk("Unable to find item at QDRV_NETDEBUG_TYPE_CORE_DUMP\n");
+		return -1;
+	}
+
+	/* Set the flag to clear old stats */
+	qw->pktlogger.flag |= BIT(QDRV_NETDEBUG_TYPE_CORE_DUMP);
+	printk("core_dump sending enabled\n");
+
+	/*
+	 * Send the core dump, if it exists; it would be sent only once when this type is enabled
+	 * ('interval' is ignored)
+	 */
+	qdrv_pktlogger_core_dump_send((unsigned long) qw);
+
+	return 0;
+}
+
+static void qdrv_pktlogger_stop_core_dump(struct qdrv_wlan *qw)
+{
+	qw->pktlogger.flag &= (~BIT(QDRV_NETDEBUG_TYPE_CORE_DUMP));
+	printk("core_dump sending disabled\n");
+}
+
+void qdrv_pktlogger_flush_data(struct qdrv_wlan *qw)
+{
+        int enable = 1;
+
+        qdrv_hostlink_enable_flush_data(qw, enable);
+
+}
+
+struct qdrv_pktlogger_types_tbl qdrv_pktlogger_types_tbl_ent[] =
+{
+		{QDRV_NETDEBUG_TYPE_STATS, "stats",
+				qdrv_pktlogger_start_stats, qdrv_pktlogger_stop_stats,
+				QDRV_PKTLOGGER_INTERVAL_DFLT_STATS,
+				sizeof(struct qdrv_netdebug_stats), 0, 0},
+		{QDRV_NETDEBUG_TYPE_RADAR, "radar",
+				qdrv_pktlogger_start_radar, qdrv_pktlogger_stop_radar,
+				QDRV_PKTLOGGER_INTERVAL_DFLT_NONE,
+				sizeof(struct qdrv_radar_stats), 0, 0},
+		{QDRV_NETDEBUG_TYPE_TXBF, "txbf", NULL, NULL,
+				QDRV_PKTLOGGER_INTERVAL_DFLT_NONE,
+				sizeof(struct qdrv_netdebug_txbf), 0, 0},
+		{QDRV_NETDEBUG_TYPE_IWEVENT, "iwevent",
+				qdrv_pktlogger_start_iwevent, qdrv_pktlogger_stop_iwevent,
+				QDRV_PKTLOGGER_INTERVAL_DFLT_NONE,
+				sizeof(struct qdrv_netdebug_iwevent), 0xFFFF, 0},
+		{QDRV_NETDEBUG_TYPE_SYSMSG, "sysmsg",
+				qdrv_pktlogger_start_sysmsg, qdrv_pktlogger_stop_sysmsg,
+				QDRV_PKTLOGGER_INTERVAL_DFLT_SYSMSG,
+				sizeof(struct qdrv_netdebug_sysmsg), 0xFFFF, 0},
+		{QDRV_NETDEBUG_TYPE_MEM, "mem",
+				qdrv_pktlogger_start_mem, qdrv_pktlogger_stop_mem,
+				QDRV_PKTLOGGER_INTERVAL_DFLT_MEM,
+				sizeof(struct qdrv_netdebug_mem), 0, 0},
+		{QDRV_NETDEBUG_TYPE_RATE, "rate",
+				qdrv_pktlogger_start_rate, qdrv_pktlogger_stop_rate,
+				QDRV_PKTLOGGER_INTERVAL_DFLT_RATE,
+				sizeof(struct qdrv_netdebug_rate), 0, 0},
+#ifdef CONFIG_QVSP
+		{QDRV_NETDEBUG_TYPE_VSP, "vsp",
+				qdrv_pktlogger_start_vsp, qdrv_pktlogger_stop_vsp,
+				QDRV_PKTLOGGER_INTERVAL_DFLT_VSP,
+				0, 0, 0},
+#endif
+		{QDRV_NETDEBUG_TYPE_PHY_STATS, "phy_stats",
+				qdrv_pktlogger_start_phy_stats, qdrv_pktlogger_stop_phy_stats,
+				QDRV_PKTLOGGER_INTERVAL_DFLT_PHY_STATS,
+				sizeof(struct qdrv_netdebug_phystats),
+				sizeof(struct qtn_node_shared_stats), 0},
+		{QDRV_NETDEBUG_TYPE_DSP_STATS, "dsp_stats",
+				qdrv_pktlogger_start_dsp_stats, qdrv_pktlogger_stop_dsp_stats,
+				QDRV_PKTLOGGER_INTERVAL_DFLT_DSP_STATS,
+				sizeof(struct qdrv_netdebug_dspstats), 0, 0},
+		{QDRV_NETDEBUG_TYPE_CORE_DUMP, "core_dump",
+				qdrv_pktlogger_start_core_dump, qdrv_pktlogger_stop_core_dump,
+				QDRV_PKTLOGGER_INTERVAL_DFLT_NONE,
+				sizeof(struct qdrv_netdebug_core_dump), 0, 0},
+		{-1, NULL, NULL, NULL, QDRV_PKTLOGGER_INTERVAL_DFLT_NONE, 0, 0}
+};
+
+struct qdrv_pktlogger_types_tbl *qdrv_pktlogger_get_tbls_by_id(int id)
+{
+	struct qdrv_pktlogger_types_tbl * tbl_p = NULL;
+	int i;
+
+	for (i = 0; qdrv_pktlogger_types_tbl_ent[i].id > 0; i++) {
+		if (qdrv_pktlogger_types_tbl_ent[i].id == id) {
+			tbl_p = &qdrv_pktlogger_types_tbl_ent[i];
+			break;
+		}
+	}
+
+	return tbl_p;
+}
+
+static void qdrv_pktlogger_stop_all(struct qdrv_wlan *qw)
+{
+	int index;
+
+	for (index = 0; qdrv_pktlogger_types_tbl_ent[index].name != NULL; index++) {
+		if (qdrv_pktlogger_types_tbl_ent[index].stop != NULL) {
+			qdrv_pktlogger_types_tbl_ent[index].stop(qw);
+		}
+	}
+}
+
+int qdrv_pktlogger_start_or_stop(struct qdrv_wlan *qw, const char *type,
+		int start, uint32_t interval)
+{
+	int ret = 0;
+	int index;
+
+	if (!start && (strncmp(type, "all", strlen(type)) == 0)) {
+		qdrv_pktlogger_stop_all(qw);
+		return 0;
+	}
+
+	for (index = 0; qdrv_pktlogger_types_tbl_ent[index].name != NULL; index++) {
+		if (strncmp(type, qdrv_pktlogger_types_tbl_ent[index].name, strlen(type)) == 0) {
+			if (start) {
+				if (qdrv_pktlogger_types_tbl_ent[index].start != NULL) {
+					if (interval > 0) {
+						qdrv_pktlogger_types_tbl_ent[index].interval = interval;
+					}
+					ret = qdrv_pktlogger_types_tbl_ent[index].start(qw);
+				} else {
+					printk("No start command for log type %s\n", type);
+					ret = -1;
+				}
+			} else {
+				if (qdrv_pktlogger_types_tbl_ent[index].stop != NULL) {
+					qdrv_pktlogger_types_tbl_ent[index].stop(qw);
+					ret = 0;
+				} else {
+					printk("No stop command for log type %s\n", type);
+					ret = -1;
+				}
+			}
+
+			break;
+		}
+	}
+
+	if (qdrv_pktlogger_types_tbl_ent[index].name == NULL) {
+		printk("Log type %s is invalid\n", type);
+		ret = -1;
+	}
+
+	return ret;
+}
+
+static inline void qdrv_pktlogger_set_iphdr(struct iphdr *iphdr, u_int16_t id,
+		u_int16_t frag_off, int len)
+{
+	iphdr->version = 4;
+	iphdr->ihl = 5;
+	iphdr->tos = 0;
+	iphdr->tot_len = htons(PKTLOGGER_IP_HEADER_LEN + len);
+	iphdr->id = htons(id);
+	iphdr->frag_off = htons(frag_off);
+	iphdr->ttl = PKTLOGGER_IP_TTL;
+	iphdr->protocol = IPPROTO_UDP;
+	iphdr->saddr = g_pktlogger_p->src_ip;
+	iphdr->daddr = g_pktlogger_p->dst_ip;
+	iphdr->check = ip_fast_csum((unsigned char *)iphdr, iphdr->ihl);
+}
+
+static inline void qdrv_pktlogger_set_etherhdr(struct ether_header *hdr)
+{
+	IEEE80211_ADDR_COPY(hdr->ether_shost, g_pktlogger_p->src_addr);
+	IEEE80211_ADDR_COPY(hdr->ether_dhost, g_pktlogger_p->dst_addr);
+	hdr->ether_type = htons(ETH_P_IP);
+}
+
+static struct sk_buff *qdrv_pktlogger_alloc_skb(int len)
+{
+	struct sk_buff *skb;
+	int cache_alignment = dma_get_cache_alignment();
+	int alignment;
+
+	skb = dev_alloc_skb(qtn_rx_buf_size());
+	if (skb == NULL) {
+		DBGPRINTF_E("Stopping pktlogger debug - no buffers available\n");
+		return NULL;
+	}
+	alignment = (unsigned int)(skb->data) & (cache_alignment - 1);
+	if (alignment) {
+		skb_reserve(skb, cache_alignment - alignment);
+	}
+
+	skb_put(skb, len);
+	memset(skb->data, 0, len);
+
+	return skb;
+}
+
+static int
+qdrv_pktlogger_ip_send(void *data, int len, u_int16_t flag_off)
+{
+	int length = 0;
+	struct ether_header *etherhdr_p;
+	struct iphdr *iphdr_p = NULL;
+	void *ippayload_p = NULL;
+	struct sk_buff *skb;
+	struct qdrv_wlan *qw = g_pktlogger_p->qw;
+
+	length = len + sizeof(struct iphdr) + sizeof(struct ether_header);
+	skb = qdrv_pktlogger_alloc_skb(length);
+	if (skb == NULL) {
+		DBGPRINTF_LIMIT_E("Failed to allocate SKB\n");
+		return -1;
+	}
+
+	etherhdr_p = (struct ether_header *)skb->data;
+	iphdr_p = (struct iphdr *)((char *)etherhdr_p + sizeof(struct ether_header));
+	ippayload_p = (void *)((char *)iphdr_p + sizeof(struct iphdr));
+
+	memcpy(ippayload_p, data, len);
+	qdrv_pktlogger_set_iphdr(iphdr_p, g_pktlogger_p->ip_id, flag_off, len);
+	qdrv_pktlogger_set_etherhdr(etherhdr_p);
+
+	if ((strncmp(g_pktlogger_p->dev->name, "wifi", 4) == 0) &&
+			(!IEEE80211_IS_MULTICAST(g_pktlogger_p->dst_addr)) &&
+			(!IEEE80211_IS_MULTICAST(g_pktlogger_p->recv_addr))) {
+		skb->dest_port = IEEE80211_AID(ieee80211_find_aid_by_mac_addr(&qw->ic.ic_sta,
+				g_pktlogger_p->recv_addr));
+		if (skb->dest_port == 0) {
+			DBGPRINTF_LIMIT_E("Could not send netdebug packet - wifi peer not found\n");
+			dev_kfree_skb(skb);
+			return -1;
+		}
+	}
+
+	if ((g_pktlogger_p->dev == NULL) ||
+	    (g_pktlogger_p->dev->netdev_ops->ndo_start_xmit == NULL)) {
+		DBGPRINTF_LIMIT_E("Ethernet interface not found\n");
+		dev_kfree_skb(skb);
+		return -1;
+	}
+
+	local_bh_disable();
+
+	if (netif_queue_stopped(g_pktlogger_p->dev))
+		goto qdrv_pktlogger_ip_send_error;
+
+	if (unlikely(strncmp(g_pktlogger_p->dev->name, "wifi", 4) == 0)) {
+		skb->dev = g_pktlogger_p->dev;
+		if (dev_queue_xmit(skb) < 0)
+			goto qdrv_pktlogger_ip_send_error;
+	} else if (g_pktlogger_p->dev->netdev_ops->ndo_start_xmit(skb, g_pktlogger_p->dev) != 0) {
+		goto qdrv_pktlogger_ip_send_error;
+	}
+
+	local_bh_enable();
+	return 0;
+
+qdrv_pktlogger_ip_send_error:
+	local_bh_enable();
+	dev_kfree_skb(skb);
+	return -1;
+}
+
+static int
+qdrv_pktlogger_udp_send(void *data, uint32_t len)
+{
+	int ret = 0;
+	uint16_t flag_off = 0;
+	char *data_s = (char *)data;
+	int len_s = 0;
+
+	if (g_pktlogger_p == NULL) {
+		DBGPRINTF_LIMIT_E("Pktlogger is not initialised\n");
+		return -EINVAL;
+	}
+
+	if (len == 0) {
+		return -EINVAL;
+	}
+
+	while ((len - len_s) > g_pktlogger_p->maxfraglen) {
+		flag_off |= BIT(PKTLOGGER_IP_MORE_FRAG_BIT);
+		ret = qdrv_pktlogger_ip_send(data_s, g_pktlogger_p->maxfraglen, flag_off);
+		if (ret < 0) {
+			/* Do not send rest of fragments. */
+			return -ret;
+		}
+		len_s += g_pktlogger_p->maxfraglen;
+		flag_off = (len_s >> 3);
+		data_s += g_pktlogger_p->maxfraglen;
+	}
+
+	/* Send one IP packet or the last one */
+	flag_off &= (~BIT(PKTLOGGER_IP_MORE_FRAG_BIT));
+	ret = qdrv_pktlogger_ip_send(data_s, len - len_s, flag_off);
+	if (ret >= 0) {
+		g_pktlogger_p->ip_id++;
+		return ret;
+	}
+
+	return ret;
+}
+
+void
+qdrv_pktlogger_sendq_work(struct work_struct *work)
+{
+	struct qdrv_pktlogger_data *first;
+	int rc;
+
+	g_pktlogger_p->stats.queue_send++;
+
+	spin_lock_bh(&g_pktlogger_p->sendq_lock);
+
+	while ((first = STAILQ_FIRST(&g_pktlogger_p->sendq_head))) {
+		STAILQ_REMOVE_HEAD(&g_pktlogger_p->sendq_head, entries);
+
+		spin_unlock_bh(&g_pktlogger_p->sendq_lock);
+
+		/* Send data on the network */
+		rc = qdrv_pktlogger_udp_send(first->data, first->len);
+
+		/* Send the data to pktlogger_d too */
+		qdrv_pktlogger_create_pktlogger_data(g_pktlogger_p->qw, first->data, first->len);
+
+		spin_lock_bh(&g_pktlogger_p->sendq_lock);
+
+		if (rc < 0) {
+			STAILQ_INSERT_HEAD(&g_pktlogger_p->sendq_head, first, entries);
+			g_pktlogger_p->stats.pkt_requeued++;
+			break;
+		}
+
+		qdrv_pktlogger_free_buffer(first->data);
+		kfree(first);
+		g_pktlogger_p->queue_len--;
+	}
+
+	g_pktlogger_p->sendq_scheduled = 0;
+
+	spin_unlock_bh(&g_pktlogger_p->sendq_lock);
+}
+
+void
+qdrv_pktlogger_send(void *data, uint32_t len)
+{
+	struct qdrv_pktlogger_data *tmp;
+	struct qdrv_pktlogger_data *first;
+
+	tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
+	if (!tmp) {
+		g_pktlogger_p->stats.pkt_failed++;
+		qdrv_pktlogger_free_buffer(data);
+		return;
+	}
+
+	tmp->data = data;
+	tmp->len = len;
+
+	spin_lock_bh(&g_pktlogger_p->sendq_lock);
+
+	if (g_pktlogger_p->queue_len >= QDRV_PKTLOGGER_QUEUE_LEN_MAX) {
+		first = STAILQ_FIRST(&g_pktlogger_p->sendq_head);
+		STAILQ_REMOVE_HEAD(&g_pktlogger_p->sendq_head, entries);
+		qdrv_pktlogger_free_buffer(first->data);
+		kfree(first);
+		g_pktlogger_p->queue_len--;
+		g_pktlogger_p->stats.pkt_dropped++;
+	}
+
+	STAILQ_INSERT_TAIL(&g_pktlogger_p->sendq_head, tmp, entries);
+	g_pktlogger_p->queue_len++;
+	g_pktlogger_p->stats.pkt_queued++;
+
+	if (!g_pktlogger_p->sendq_scheduled) {
+		g_pktlogger_p->sendq_scheduled = 1;
+		schedule_work(&g_pktlogger_p->sendq_work);
+	}
+
+	spin_unlock_bh(&g_pktlogger_p->sendq_lock);
+}
+
+static int qdrv_pktlogger_ip_event(struct notifier_block *this,
+			    unsigned long event, void *ptr)
+{
+	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+	struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev;
+	struct qdrv_wlan *qw = g_pktlogger_p->qw;
+	struct net_device *br_dev = qw->br_dev;
+
+	switch (event) {
+	case NETDEV_UP:
+		if (dev == br_dev) {
+			uint8_t *addr_p;
+
+			g_pktlogger_p->src_ip = ifa->ifa_address;
+			addr_p = (uint8_t *)&g_pktlogger_p->src_ip;
+			printk("QDRV: src ip addr %pI4\n", addr_p);
+			return NOTIFY_OK;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block qdrv_pktlogger_ip_notifier = {
+	.notifier_call = qdrv_pktlogger_ip_event,
+};
+
+int
+qdrv_pktlogger_init(struct qdrv_wlan *qw)
+{
+	int i = 0;
+	unsigned int maxfraglen;
+	unsigned int fragheaderlen;
+	uint8_t default_dst_addr[IEEE80211_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+	__be32 srcip = 0;
+
+	g_pktlogger_p = &qw->pktlogger;
+	memset(g_pktlogger_p, 0, sizeof(struct qdrv_pktlogger));
+
+	g_pktlogger_p->rx_rate_cur = &g_pktlogger_p->rx_rates[0];
+	g_pktlogger_p->rx_rate_pre = &g_pktlogger_p->rx_rates[1];
+	g_pktlogger_p->rx_ratelog_cur = &g_pktlogger_p->rx_ratelog[0];
+	g_pktlogger_p->rx_ratelog_pre = &g_pktlogger_p->rx_ratelog[1];
+
+	for (i = 1; ; i++) {
+		g_pktlogger_p->dev = dev_get_by_index(&init_net, i);
+		if (g_pktlogger_p->dev == NULL) {
+			DBGPRINTF_E("Ethernet interface not found\n");
+			qdrv_pktlogger_exit(qw);
+			return -1;
+		}
+
+		if ((strncmp(g_pktlogger_p->dev->name, "eth", 3) == 0) ||
+				(strncmp(g_pktlogger_p->dev->name, "pcie", 4) == 0)) {
+			break;
+		}
+		dev_put(g_pktlogger_p->dev);
+	}
+
+	if (strncmp(g_pktlogger_p->dev->name, "eth", 3) == 0) {
+		g_pktlogger_p->dev_emac0 = dev_get_by_name(&init_net, "eth1_0");
+		g_pktlogger_p->dev_emac1 = dev_get_by_name(&init_net, "eth1_1");
+	}
+
+	if (g_pktlogger_p->dev->netdev_ops->ndo_start_xmit == NULL) {
+		DBGPRINTF_E("Ethernet transmit function not found\n");
+		dev_put(g_pktlogger_p->dev);
+		g_pktlogger_p->dev = NULL;
+		return -1;
+	}
+
+	if (qw->br_dev) {
+		srcip = qdrv_dev_ipaddr_get(qw->br_dev);
+	}
+
+	if (!srcip) {
+		srcip = PKTLOGGER_DEFAULT_SRC_IP;
+	}
+
+	g_pktlogger_p->dst_ip = PKTLOGGER_DEFAULT_DST_IP;
+	g_pktlogger_p->src_ip = srcip;
+	g_pktlogger_p->dst_port = htons(PKTLOGGER_UDP_DST_PORT);
+	g_pktlogger_p->src_port = htons(PKTLOGGER_UDP_SRC_PORT);
+
+	IEEE80211_ADDR_COPY(g_pktlogger_p->dst_addr, default_dst_addr);
+	IEEE80211_ADDR_COPY(g_pktlogger_p->recv_addr, default_dst_addr);
+	IEEE80211_ADDR_COPY(g_pktlogger_p->src_addr, qw->mac->mac_addr);
+
+	fragheaderlen = sizeof(struct iphdr);
+	maxfraglen = ETHERMTU - fragheaderlen;
+	maxfraglen -= (maxfraglen % 8);
+	g_pktlogger_p->maxfraglen = maxfraglen;
+
+	g_pktlogger_p->qw = qw;
+	g_pktlogger_p->queue_len = 0;
+
+	spin_lock_init(&g_pktlogger_p->sendq_lock);
+	INIT_WORK(&g_pktlogger_p->sendq_work, qdrv_pktlogger_sendq_work);
+	STAILQ_INIT(&g_pktlogger_p->sendq_head);
+
+	qdrv_pktlogger_start_netlink(qw);
+
+	return register_inetaddr_notifier(&qdrv_pktlogger_ip_notifier);
+}
+
+void qdrv_pktlogger_exit(struct qdrv_wlan *qw)
+{
+	struct qdrv_pktlogger_data *first;
+
+	unregister_inetaddr_notifier(&qdrv_pktlogger_ip_notifier);
+
+	/* Stop all debug packets */
+	if (qw->pktlogger.flag) {
+		qdrv_pktlogger_stop_all(qw);
+	}
+
+	spin_lock_bh(&g_pktlogger_p->sendq_lock);
+
+	cancel_work_sync(&g_pktlogger_p->sendq_work);
+	g_pktlogger_p->sendq_scheduled = 0;
+	while (!STAILQ_EMPTY(&g_pktlogger_p->sendq_head)) {
+		first = STAILQ_FIRST(&g_pktlogger_p->sendq_head);
+		STAILQ_REMOVE_HEAD(&g_pktlogger_p->sendq_head, entries);
+		qdrv_pktlogger_free_buffer(first->data);
+		kfree(first);
+	}
+
+	spin_unlock_bh(&g_pktlogger_p->sendq_lock);
+
+	g_pktlogger_p->queue_len = 0;
+	g_pktlogger_p = NULL;
+
+	if (qw->pktlogger.dev == NULL) {
+			return;
+	}
+
+	dev_put(qw->pktlogger.dev);
+
+	if (g_pktlogger_p->dev_emac0)
+		dev_put(g_pktlogger_p->dev_emac0);
+	if (g_pktlogger_p->dev_emac1)
+		dev_put(g_pktlogger_p->dev_emac1);
+
+	qw->pktlogger.dev = NULL;
+}
diff --git a/drivers/qtn/qdrv/qdrv_pktlogger.h b/drivers/qtn/qdrv/qdrv_pktlogger.h
new file mode 100644
index 0000000..3af6dc1
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_pktlogger.h
@@ -0,0 +1,106 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+#ifndef _QDRV_PKTLOGGER_H
+#define _QDRV_PKTLOGGER_H
+
+#include "qdrv_mac.h"
+#include "qdrv_wlan.h"
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+
+#define QDRV_NETLINK_PKTLOGGER 30
+#define PKTLOGGER_DEFAULT_DST_IP 0xFFFFFFFF
+#define PKTLOGGER_DEFAULT_SRC_IP 0x01010102
+/* Arbitrary unused UDP port numbers */
+#define PKTLOGGER_UDP_SRC_PORT 6601
+#define PKTLOGGER_UDP_DST_PORT 6602
+#define PKTLOGGER_IP_MORE_FRAG_BIT 13
+#define PKTLOGGER_IP_TTL 64
+#define PKTLOGGER_IP_HEADER_LEN 20
+
+#define QDRV_PKTLOGGER_INTERVAL_DFLT_NONE	0
+#define QDRV_PKTLOGGER_INTERVAL_DFLT_STATS	1
+#define QDRV_PKTLOGGER_INTERVAL_DFLT_SYSMSG	5
+#define QDRV_PKTLOGGER_INTERVAL_DFLT_MEM	1
+#define QDRV_PKTLOGGER_INTERVAL_DFLT_RATE	1
+#define QDRV_PKTLOGGER_INTERVAL_DFLT_VSP	1000
+#define QDRV_PKTLOGGER_INTERVAL_DFLT_PHY_STATS	2
+#define QDRV_PKTLOGGER_INTERVAL_DFLT_DSP_STATS	1
+#define QDRV_PKTLOGGER_INTERVAL_DFLT_FLUSH_DATA 1
+
+#define QDRV_PKTLOGGER_QUEUE_LEN_MAX		32
+
+/*
+ * Collection of Ethernet device statistics causes severe performance degradation.
+ */
+#define QDRV_NETDEBUG_ETH_DEV_STATS_ENABLED	0
+
+enum qdrv_netdbg_rectype_e {
+	QDRV_NETDEBUG_TYPE_STATS = 1,
+	QDRV_NETDEBUG_TYPE_EVENT = 2,
+	QDRV_NETDEBUG_TYPE_RADAR = 3,
+	QDRV_NETDEBUG_TYPE_TXBF = 4,
+	QDRV_NETDEBUG_TYPE_IWEVENT = 5,
+	QDRV_NETDEBUG_TYPE_SYSMSG = 6,
+	QDRV_NETDEBUG_TYPE_MEM = 7,
+	QDRV_NETDEBUG_TYPE_RATE = 8,
+	QDRV_NETDEBUG_TYPE_VSP = 9,
+	QDRV_NETDEBUG_TYPE_PHY_STATS = 10,
+	QDRV_NETDEBUG_TYPE_DSP_STATS = 11,
+	QDRV_NETDEBUG_TYPE_CORE_DUMP = 12,
+	QDRV_NETDEBUG_TYPE_FLUSH_DATA = 20,
+	QDRV_NETDEBUG_TYPE_MAX,
+	QDRV_NETDEBUG_TYPE_PKTLOGGER = 100,
+};
+
+struct qdrv_pktlogger_types_tbl {
+	int id;
+	char *name;
+	int (*start)(struct qdrv_wlan *qw);
+	void (*stop)(struct qdrv_wlan *qw);
+	uint32_t interval;
+	uint16_t struct_bsize;
+	uint16_t struct_vsize;
+	uint32_t history;
+};
+
+struct qdrv_pktlogger_data {
+	void *data;
+	uint32_t len;
+	STAILQ_ENTRY(qdrv_pktlogger_data) entries;
+};
+
+__be32 qdrv_dev_ipaddr_get(struct net_device *dev);
+struct qdrv_pktlogger_types_tbl *qdrv_pktlogger_get_tbls_by_id(int id);
+
+int qdrv_pktlogger_init(struct qdrv_wlan *qw);
+void qdrv_pktlogger_exit(struct qdrv_wlan *qw);
+int qdrv_pktlogger_set(struct qdrv_wlan *qw, const char *param, const char *value);
+void qdrv_pktlogger_show(struct qdrv_wlan *qw);
+int qdrv_pktlogger_start_or_stop(struct qdrv_wlan *qw, const char *type, int start,
+		uint32_t interval);
+
+void qdrv_pktlogger_send(void *data, uint32_t len);
+void *qdrv_pktlogger_alloc_buffer(char *description, int data_len);
+void qdrv_pktlogger_free_buffer(void *data_buffer);
+void qdrv_pktlogger_hdr_init(struct qdrv_wlan *qw, struct qdrv_pktlogger_hdr *hdr, int rec_type, int stats_len);
+
+#endif /* _QDRV_PKTLOGGER_ */
diff --git a/drivers/qtn/qdrv/qdrv_radar.c b/drivers/qtn/qdrv/qdrv_radar.c
new file mode 100644
index 0000000..14e595f
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_radar.c
@@ -0,0 +1,2238 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/time.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <linux/pm_qos.h>
+#else
+#include <linux/pm_qos_params.h>
+#endif
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_hal.h"
+#include "qdrv_muc.h"
+#include "qdrv_dsp.h"
+#include "qtn/registers.h"
+#include "qtn/muc_phy_stats.h"
+#include "qdrv_comm.h"
+#include "qdrv_wlan.h"
+#include "qdrv_radar.h"
+#include "radar/radar.h"
+#include "radar/detect.h"
+#include <net/iw_handler.h> /* wireless_send_event(..) */
+#include "qdrv_debug.h"
+#include <net80211/ieee80211_var.h>
+#include "qdrv_control.h"
+#include <asm/board/pm.h>
+
+
+/* Will move this to a configuration later.  */
+#define CONFIG_QHOP 1
+
+#define CAC_PERIOD		(70 * HZ)
+#define CAC_WEATHER_PERIOD_EU	(600 * HZ)
+#define CAC_PERIOD_QUICK	(30 * HZ)
+#define NONOCCUPY_PERIOD_QUICK	(60 * HZ)
+#define STA_SILENCE_PERIOD	(CAC_PERIOD + 10 * HZ)
+#define STA_WEATHER_CHAN_SILENCE_PERIOD	(CAC_WEATHER_PERIOD_EU + 10 * HZ)
+
+#define DFS_CS_TIMER_VAL	(HZ / 10)
+
+#define QDRV_RADAR_SAMPLE_RATE	1	/* sampling rate (seconds) */
+#define QDRV_RADAR_SAMPLE_DELAY	10	/* Give MuC time to update stats (jiffies) */
+
+static void qdrv_radar_sample_work(struct work_struct *unused);
+
+static bool qdrv_radar_configured = false;
+static bool qdrv_radar_first_call = true;
+static bool qdrv_radar_sta_dfs = false;
+
+/*
+ * Control block for qdrv_radar
+ */
+struct qdrv_radar_sample {
+	struct delayed_work		sample_work;
+	struct detect_drv_sample_t	*sample;
+};
+
+static struct {
+	bool				enabled;
+	bool				xmit_stopped;
+	struct qdrv_mac			*mac;
+	struct ieee80211com		*ic;
+	struct ieee80211_channel	*cac_chan;
+	struct timer_list		cac_timer; /* a timer for CAC */
+	struct timer_list		nonoccupy_timer[IEEE80211_CHAN_MAX+1];
+	struct ieee80211_channel	*dfs_des_chan;
+	struct timer_list		dfs_cs_timer; /* a timer for a channel switch */
+	struct qdrv_radar_sample	muc_sampling;
+	struct muc_tx_stats		*stats_uc_tx_ptr;
+	struct notifier_block		pm_notifier;
+	struct tasklet_struct		ocac_tasklet;
+	uint32_t			region;
+} qdrv_radar_cb;
+
+/*
+ * Utility macros
+ */
+
+/*
+ * True if this mode must behave like a DFS master, ie do Channel
+ * Check Availability and In Service Monitoring. We need to make sure
+ * that all modes cannot send data without being authorized. Such
+ * enforcement is not done in monitor mode however.
+ */
+static inline int ieee80211_is_dfs_master(struct ieee80211com *ic)
+{
+	KASSERT(ic->ic_opmode != IEEE80211_M_WDS,
+		(DBGEFMT "Incorrect ic opmode %d\n", DBGARG, ic->ic_opmode));
+
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP)
+		return 1;
+
+	if (ieee80211_is_repeater(ic))
+		return 1;
+
+	if (ic->ic_opmode == IEEE80211_M_IBSS)
+		return 1;
+
+	if (ic->ic_opmode == IEEE80211_M_AHDEMO)
+		return 1;
+
+	return 0;
+}
+
+static inline int qdrv_is_dfs_master(void)
+{
+	return ieee80211_is_dfs_master(qdrv_radar_cb.ic);
+}
+
+static inline int qdrv_is_dfs_slave(void)
+{
+	return !ieee80211_is_dfs_master(qdrv_radar_cb.ic);
+}
+
+#define GET_CHANIDX(chan)	((chan) - ic->ic_channels)
+
+static void mark_radar(void);
+static void stop_cac(void);
+static void stop_dfs_cs(void);
+static void qdrv_ocac_irqhandler(void *arg1, void *arg2);
+static int qdrv_init_ocac_irqhandler(struct qdrv_wlan *qw);
+static bool qdrv_radar_is_dfs_chan(uint8_t wifi_chan);
+static bool qdrv_radar_is_dfs_weather_chan(uint8_t wifi_chan);
+
+#ifndef SYSTEM_BUILD
+#define ic2dev(ic)	((struct ieee80211vap *)(TAILQ_FIRST(&(ic)->ic_vaps)) ? \
+			((struct ieee80211vap *)(TAILQ_FIRST(&(ic)->ic_vaps)))->iv_dev : NULL)
+#else
+#define ic2dev(ic)	NULL
+#endif
+
+/* used to report RADAR: messages to event server */
+#define radar_event_report(...)			qdrv_eventf(__VA_ARGS__)
+
+#define DBGPRINTF_N_QEVT(qevtdev, ...)		do {\
+							DBGPRINTF_N(__VA_ARGS__);\
+							radar_event_report(qevtdev, __VA_ARGS__);\
+						} while (0)
+
+#ifdef CONFIG_QHOP
+/*
+ *   RBS reports channel change detect to MBS over the WDS link.
+ */
+static void
+qdrv_qhop_send_rbs_report_frame(struct ieee80211vap *vap, u_int8_t new_chan)
+{
+	struct ieee80211_node *ni = ieee80211_get_wds_peer_node_ref(vap);
+	struct sk_buff *skb;
+	int frm_len = sizeof(struct qdrv_vendor_action_header) + sizeof(struct qdrv_vendor_action_qhop_dfs_data);
+	u_int8_t *frm;
+
+	if (!ni) {
+		DBGPRINTF_E("WDS peer is NULL!\n");
+		return;
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+	                "%s: Sending action frame with RBS report IE: %u\n", __func__, new_chan);
+
+	skb = ieee80211_getmgtframe(&frm, frm_len);
+	if (skb == NULL) {
+	        IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni, "%s: cannot get buf; size %u", __func__, frm_len);
+	        vap->iv_stats.is_tx_nobuf++;
+		ieee80211_free_node(ni);
+	        return;
+	}
+
+	/* Fill in QHOP action header and data */
+	*frm++ = IEEE80211_ACTION_CAT_VENDOR;
+	frm += 3;
+	*frm++ = QDRV_ACTION_TYPE_QHOP;
+	*frm++ = QDRV_ACTION_QHOP_DFS_REPORT;
+	*frm++ = new_chan;
+
+	ieee80211_mgmt_output(ni, skb, IEEE80211_FC0_SUBTYPE_ACTION, ni->ni_macaddr);
+}
+#endif
+
+/*
+ * Perioodically sample data from the MuC
+ */
+static void qdrv_radar_sample_work(struct work_struct *unused)
+{
+	struct muc_tx_stats stats_muc_tx;
+	unsigned long lock_flags;
+
+	if (qdrv_radar_cb.stats_uc_tx_ptr == NULL) {
+		return;
+	}
+
+	memcpy(&stats_muc_tx, qdrv_radar_cb.stats_uc_tx_ptr, sizeof(stats_muc_tx));
+
+	/* Update the structure owned by the radar module */
+	spin_lock_irqsave(&qdrv_radar_cb.muc_sampling.sample->lock, lock_flags);
+
+	/* Divide sample values by sample rate to get rate per second */
+	qdrv_radar_cb.muc_sampling.sample->tx_pkts =
+		stats_muc_tx.tx_sample_pkts / qdrv_radar_cb.ic->ic_sample_rate;
+	qdrv_radar_cb.muc_sampling.sample->tx_bytes =
+		stats_muc_tx.tx_sample_bytes / qdrv_radar_cb.ic->ic_sample_rate;
+
+	spin_unlock_irqrestore(&qdrv_radar_cb.muc_sampling.sample->lock, lock_flags);
+
+	schedule_delayed_work(&qdrv_radar_cb.muc_sampling.sample_work,
+		qdrv_radar_cb.ic->ic_sample_rate * HZ);
+}
+
+/*
+ * Status-checking inline functions
+ */
+inline static bool is_cac_started(void)
+{
+	return (qdrv_radar_cb.cac_chan != NULL);
+}
+
+inline static bool is_dfs_cs_started(void)
+{
+	return (qdrv_radar_cb.dfs_des_chan != NULL);
+}
+
+/*
+ * Enable radar detection on channel
+ */
+inline static void sys_enable_rdetection(void)
+{
+	if (DBG_LOG_FUNC_TEST(QDRV_LF_DFS_DISALLOWRADARDETECT)) {
+		DBGPRINTF_N("RADAR: test mode - radar not enabled\n");
+		return;
+	}
+	if (qdrv_is_dfs_slave() && !qdrv_radar_sta_dfs)
+		return;
+	radar_enable();
+}
+
+/*
+ * Disable radar detection on channel
+ */
+inline static void sys_disable_rdetection(void)
+{
+	radar_disable();
+}
+
+/*
+ * Start the radar module
+ */
+inline static bool sys_start_radarmod(const char *region)
+{
+	bool region_enabled;
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+
+	region_enabled = radar_start(region);
+	if (region_enabled) {
+		radar_set_bw(ic->ic_radar_bw);
+		sys_disable_rdetection();
+		radar_register(mark_radar);
+		radar_register_is_dfs_chan(qdrv_radar_is_dfs_chan);
+		radar_register_is_dfs_weather_chan(qdrv_radar_is_dfs_weather_chan);
+	}
+
+	return region_enabled;
+}
+
+/*
+ * Stop the radar module
+ */
+inline static void sys_stop_radarmod(void)
+{
+	radar_stop();
+}
+
+static inline void sys_raw_enable_xmit(void)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	qdrv_hostlink_xmitctl(qw, true);
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR, "transmission enabled\n");
+}
+
+static inline void sys_raw_disable_xmit(void)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	qdrv_hostlink_xmitctl(qw, false);
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_RADAR, "transmission disabled\n");
+}
+/*
+ * Instruct MuC to enable transmission
+ */
+void sys_enable_xmit(void)
+{
+	if (qdrv_radar_cb.xmit_stopped == true) {
+		sys_raw_enable_xmit();
+		qdrv_radar_cb.xmit_stopped = false;
+	} else {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_RADAR, "Xmit already enabled\n");
+	}
+}
+
+/*
+ * Instruct MuC to disable transmission
+ */
+void sys_disable_xmit(void)
+{
+	if (qdrv_radar_cb.xmit_stopped == true) {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_RADAR, "Xmit already disabled\n");
+		return;
+	}
+
+	sys_raw_disable_xmit();
+	qdrv_radar_cb.xmit_stopped = true;
+}
+
+/*
+ * Instruct MuC to enable/disable transmission for STA mode
+ */
+void qdrv_sta_set_xmit(int enable)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	if (!qdrv_radar_cb.ic)
+		return;
+
+	if (qdrv_is_dfs_master())
+		return;
+
+	if (enable) {
+		if (ic->sta_dfs_info.sta_dfs_strict_mode &&
+			((ieee80211_is_chan_radar_detected(ic->ic_curchan)) ||
+			(ieee80211_is_chan_cac_required(ic->ic_curchan)))) {
+			DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR,
+				"\n%s: xmit cannot be enabled on channel %d [%s]\n",
+				__func__, ic->ic_curchan ? ic->ic_curchan->ic_ieee : 0,
+				ieee80211_is_chan_radar_detected(ic->ic_curchan) ?
+				"CAC required" : "in Non-Occupancy list");
+			return;
+		}
+		sys_enable_xmit();
+	} else if (qdrv_radar_cb.xmit_stopped == false) {
+		sys_raw_disable_xmit();
+		qdrv_radar_cb.xmit_stopped = true;
+	} else {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_RADAR, "Xmit already disabled\n");
+	}
+}
+
+/*
+ * Start or restart the non-occupy period
+ */
+static void start_nonoccupy(unsigned chan_idx)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct timer_list *nonoccupy_timer;
+	struct ieee80211_channel *chan;
+	unsigned long expires;
+	unsigned long sta_dfs_timer_expires;
+
+	KASSERT(chan_idx < ic->ic_nchans,
+		(DBGEFMT "out-of-range channel idx %u\n", DBGARG, chan_idx));
+
+	chan = &ic->ic_channels[chan_idx];
+
+	if (ic->sta_dfs_info.sta_dfs_strict_mode) {
+		/* Check IEEE80211_CHAN_RADAR flag to avoid repeated actions */
+		if (chan->ic_flags & IEEE80211_CHAN_RADAR)
+			return;
+		/*
+		 * Mark channel with NOT_AVAILABLE_RADAR_DETECTED flag after a delay
+		 * to allow the transmission of the measurement report to the AP
+		 * by the STA.
+		 */
+		ic->sta_dfs_info.sta_radar_timer.data = chan->ic_ieee;
+		sta_dfs_timer_expires = jiffies + IEEE80211_MS_TO_JIFFIES(ic->sta_dfs_info.sta_dfs_tx_chan_close_time);
+		ic->sta_dfs_info.sta_dfs_radar_detected_timer = true;
+		ic->sta_dfs_info.sta_dfs_radar_detected_channel = chan->ic_ieee;
+		mod_timer(&ic->sta_dfs_info.sta_radar_timer, sta_dfs_timer_expires);
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_RADAR, "%s: Start sta_radar_timer [expiry:CSA/%lums]\n",
+			__func__, ic->sta_dfs_info.sta_dfs_tx_chan_close_time);
+	} else if (qdrv_is_dfs_slave()) {
+		/* DFS slave depends on a master for this period */
+		return;
+	}
+
+	if (ieee80211_is_repeater_associated(ic) && !(ic->sta_dfs_info.sta_dfs_strict_mode))
+		return;
+
+	chan->ic_flags |= IEEE80211_CHAN_RADAR;
+	chan->ic_radardetected++;
+
+	nonoccupy_timer = &qdrv_radar_cb.nonoccupy_timer[chan_idx];
+
+	expires = jiffies + ic->ic_non_occupancy_period;
+
+	if (DBG_LOG_FUNC_TEST(QDRV_LF_DFS_QUICKTIMER)) {
+		DBGPRINTF_N("RADAR: test mode - non-occupancy period will expire quickly\n");
+		expires = jiffies + NONOCCUPY_PERIOD_QUICK;
+	}
+
+	mod_timer(nonoccupy_timer, expires);
+
+	DBGPRINTF_N_QEVT(ic2dev(ic), "RADAR: non-occupancy period started for channel %3d "
+			"(%4d MHz)\n", chan->ic_ieee, chan->ic_freq);
+
+	if (ic->sta_dfs_info.sta_dfs_strict_mode) {
+		return;
+	}
+
+	if (ic->ic_mark_channel_availability_status) {
+		ic->ic_mark_channel_availability_status(ic, chan, IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_RADAR_DETECTED);
+	}
+}
+
+/*
+ * Stop active or inactive nonoccupy period
+ */
+static void raw_stop_nonoccupy(unsigned chan_idx)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct timer_list *nonoccupy_timer;
+	struct ieee80211_channel *chan = &ic->ic_channels[chan_idx];
+
+	KASSERT(chan_idx < ic->ic_nchans,
+		(DBGFMT "out-of-range channel idx %u\n", DBGARG, chan_idx));
+
+	if (!(chan->ic_flags & IEEE80211_CHAN_RADAR)) {
+		return;
+	}
+	chan->ic_flags &= ~IEEE80211_CHAN_RADAR;
+
+	nonoccupy_timer = &qdrv_radar_cb.nonoccupy_timer[chan_idx];
+	del_timer(nonoccupy_timer);
+
+	DBGPRINTF_N_QEVT(ic2dev(ic), "RADAR: non-occupancy period stopped for channel %3d "
+			 "(%4d MHz)\n", chan->ic_ieee, chan->ic_freq);
+}
+
+static void qdrv_radar_enable_action(void)
+{
+	struct ieee80211com *ic;
+	struct qdrv_wlan *qw;
+	struct qtn_stats_log *iw_stats_log;
+
+	if (qdrv_radar_first_call == true || !qdrv_radar_configured) {
+		DBGPRINTF_E("radar unconfigured\n");
+		return;
+	}
+
+	if (qdrv_radar_cb.enabled) {
+		DBGPRINTF_E("radar already enabled\n");
+		return;
+	}
+
+	ic = qdrv_radar_cb.ic;
+	qw = container_of(ic, struct qdrv_wlan, ic);
+
+	qdrv_mac_enable_irq(qw->mac, RUBY_M2L_IRQ_LO_OCAC);
+
+	/* start sampling */
+	iw_stats_log = qdrv_radar_cb.mac->mac_sys_stats;
+	if (qdrv_radar_cb.stats_uc_tx_ptr == NULL && iw_stats_log != NULL) {
+		qdrv_radar_cb.stats_uc_tx_ptr = ioremap_nocache(
+				muc_to_lhost((u32)iw_stats_log->tx_muc_stats),
+				sizeof(struct muc_tx_stats));
+	}
+	schedule_delayed_work(&qdrv_radar_cb.muc_sampling.sample_work,
+		(ic->ic_sample_rate * HZ) + QDRV_RADAR_SAMPLE_DELAY);
+
+	qdrv_radar_cb.enabled = true;
+
+	if (ic->ic_curchan != IEEE80211_CHAN_ANYC) {
+		qdrv_radar_before_newchan();
+		qdrv_radar_on_newchan();
+	}
+
+	/* For external stats */
+	QDRV_SET_SM_FLAG(qw->sm_stats, QDRV_WLAN_SM_STATE_RADAR_EN);
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR, "Radar enabled\n");
+}
+
+/*
+ * Disable DFS feature
+ */
+void qdrv_radar_disable(void)
+{
+	struct ieee80211com *ic;
+	struct qdrv_wlan *qw;
+	struct qdrv_mac *mac;
+	unsigned chan_idx;
+	struct ieee80211_channel *chan;
+
+	if (qdrv_radar_first_call == true || !qdrv_radar_configured) {
+		DBGPRINTF_E("radar unconfigured\n");
+		return;
+	}
+
+	if (!qdrv_radar_cb.enabled) {
+		DBGPRINTF_E("radar already disabled\n");
+		return;
+	}
+
+	sys_disable_rdetection();
+
+	mac = qdrv_radar_cb.mac;
+	qdrv_mac_disable_irq(mac, RUBY_M2L_IRQ_LO_OCAC);
+
+	/* stop CAC if any */
+	stop_cac();
+
+	/* stop CS if any */
+	stop_dfs_cs();
+
+	/* stop sampling */
+	cancel_delayed_work(&qdrv_radar_cb.muc_sampling.sample_work);
+	if (qdrv_radar_cb.stats_uc_tx_ptr != NULL) {
+		iounmap(qdrv_radar_cb.stats_uc_tx_ptr);
+		qdrv_radar_cb.stats_uc_tx_ptr = NULL;
+	}
+
+	ic = qdrv_radar_cb.ic;
+	/* delete all nonoccupy timers and clear CAC done flag */
+	for (chan_idx = 0; chan_idx < ic->ic_nchans; chan_idx++) {
+		chan = &ic->ic_channels[chan_idx];
+		chan->ic_flags &= ~(IEEE80211_CHAN_DFS_CAC_DONE |
+				IEEE80211_CHAN_DFS_CAC_IN_PROGRESS);
+		raw_stop_nonoccupy(chan_idx);
+		if (ic->sta_dfs_info.sta_dfs_strict_mode && (chan->ic_flags & IEEE80211_CHAN_DFS)) {
+			ic->ic_chan_availability_status[chan->ic_ieee]
+					= IEEE80211_CHANNEL_STATUS_NON_AVAILABLE;
+			if (ic->ic_mark_channel_dfs_cac_status) {
+				ic->ic_mark_channel_dfs_cac_status(ic, chan,
+					IEEE80211_CHAN_DFS_CAC_DONE, false);
+				ic->ic_mark_channel_dfs_cac_status(ic, chan,
+					IEEE80211_CHAN_DFS_CAC_IN_PROGRESS, false);
+			}
+		}
+	}
+
+	if (ic->sta_dfs_info.sta_dfs_strict_mode) {
+		del_timer(&ic->sta_dfs_info.sta_radar_timer);
+		ic->sta_dfs_info.sta_dfs_radar_detected_timer = false;
+		ic->sta_dfs_info.sta_dfs_radar_detected_channel = 0;
+	}
+
+#ifdef CONFIG_QHOP
+	del_timer(&ic->rbs_mbs_dfs_info.rbs_dfs_radar_timer);
+#endif
+
+	/* always enable transmission */
+	sys_enable_xmit();
+
+	qdrv_radar_cb.enabled = false;
+	/* For external stats */
+	qw = container_of(ic, struct qdrv_wlan, ic);
+	QDRV_CLEAR_SM_FLAG(qw->sm_stats, QDRV_WLAN_SM_STATE_RADAR_EN);
+
+	/* success */
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR, "radar disabled\n");
+}
+
+void qdrv_set_radar(int enable)
+{
+	if (qdrv_radar_first_call == true || !qdrv_radar_configured) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR,
+				"radar already unconfigured\n");
+		return;
+	}
+
+	enable = !!enable;
+	if (enable == qdrv_radar_cb.enabled) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR, "Radar already %s\n",
+				enable ? "enabled" : "disabled");
+		return;
+	}
+
+	if (enable)
+		qdrv_radar_enable_action();
+	else
+		qdrv_radar_disable();
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_RADAR, "Radar configured manually\n");
+}
+
+int qdrv_radar_detections_num(uint32_t chan)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	uint32_t chan_idx = 0;
+
+	if (!qdrv_radar_cb.enabled)
+		return -1;
+
+	for (chan_idx = 0; chan_idx < ic->ic_nchans; chan_idx++) {
+		if (ic->ic_channels[chan_idx].ic_ieee == chan)
+			break;
+	}
+
+	if (!(ic->ic_channels[chan_idx].ic_flags & IEEE80211_CHAN_DFS)) {
+		return -1;
+	} else {
+		return (ic->ic_channels[chan_idx].ic_radardetected);
+	}
+}
+
+static void qdrv_ocac_tasklet(unsigned long data)
+{
+	struct qtn_ocac_info *ocac_info = (struct qtn_ocac_info *)data;
+        struct radar_ocac_info_s *radar_ocac_info = radar_ocac_info_addr_get();
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	uint8_t array_ps = radar_ocac_info->array_ps;
+	struct ieee80211_ocac_tsflog *p_tsflog;
+	bool radar_enabled = radar_get_status();
+
+	if (!qdrv_radar_cb.enabled)
+		return;
+
+	if (!ic->ic_ocac.ocac_chan)
+		return;
+
+	/* Only do off channel CAC on non-DFS channel */
+	if (qdrv_radar_is_rdetection_required(ic->ic_bsschan))
+		return;
+
+	/* enable radar if it is non-DFS channel and radar is disabled */
+	if (!radar_enabled) {
+		sys_enable_rdetection();
+	}
+
+	spin_lock(&radar_ocac_info->lock);
+	radar_ocac_info->ocac_radar_pts[array_ps].ocac_status = ocac_info->chan_status;
+	DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_RADAR, "status %d\n",
+			radar_ocac_info->ocac_radar_pts[array_ps].ocac_status);
+	radar_record_buffer_pt(&radar_ocac_info->ocac_radar_pts[array_ps].fifo_pt);
+	radar_ocac_info->array_ps++;
+	radar_ocac_info->ocac_scan_chan = ic->ic_ocac.ocac_chan->ic_ieee;
+	spin_unlock(&radar_ocac_info->lock);
+
+	if (ocac_info->chan_status == QTN_OCAC_ON_DATA_CHAN) {
+		ic->ic_ocac.ocac_counts.tasklet_data_chan++;
+		ic->ic_ocac.ocac_accum_cac_time_ms += ocac_info->actual_dwell_time;
+		p_tsflog = &ic->ic_ocac.ocac_tsflog;
+		memcpy(p_tsflog->tsf_log[p_tsflog->log_index], ocac_info->tsf_log,
+				sizeof(p_tsflog->tsf_log[p_tsflog->log_index]));
+		p_tsflog->log_index = (p_tsflog->log_index + 1) % QTN_OCAC_TSF_LOG_DEPTH;
+		ic->ic_chan_switch_reason_record(ic, IEEE80211_CSW_REASON_OCAC_RUN);
+	} else {
+		ic->ic_ocac.ocac_counts.tasklet_off_chan++;
+	}
+}
+
+/*
+ * Send CSA frame to MuC
+ */
+#ifndef CONFIG_QHOP
+static void sys_send_csa(struct ieee80211vap *vap, struct ieee80211_channel* new_chan, u_int64_t tsf)
+{
+	struct ieee80211com *ic;
+
+	if ((vap == NULL) || (new_chan == NULL)) {
+		DBGPRINTF_E("vap 0x%p, new_chan 0x%p\n", vap, new_chan);
+		return;
+	}
+	ic = vap->iv_ic;
+	ic->ic_send_csa_frame(vap, IEEE80211_CSA_MUST_STOP_TX,
+				 new_chan->ic_ieee, IEEE80211_RADAR_11HCOUNT, tsf);
+}
+#endif
+
+static void send_channel_related_event(struct net_device *dev, char *event_string)
+{
+	if (event_string == NULL || dev == NULL) {
+		return;
+	}
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR,
+		"send event to userspace, dev=%s msg=%s\n", dev->name, event_string);
+
+	radar_event_report(dev, "%s", event_string);
+}
+
+
+/* notify the dfs reentry demon of the channel switch info */
+void dfs_reentry_chan_switch_notify(struct net_device *dev, struct ieee80211_channel *new_chan)
+{
+	char *dfs_chan_sw = "dfs_csa";
+	char *nondfs_chan_sw = "non_dfs_csa";
+	char *no_chan_valid = "csa_fail";
+	char *notify_string;
+
+	if (NULL == new_chan) {
+		notify_string = no_chan_valid;
+	} else if (new_chan->ic_flags & IEEE80211_CHAN_DFS){
+		notify_string = dfs_chan_sw;
+	} else {
+		notify_string = nondfs_chan_sw;
+	}
+
+	send_channel_related_event(dev, notify_string);
+}
+EXPORT_SYMBOL(dfs_reentry_chan_switch_notify);
+
+
+
+/*
+ * Initiate a channel switch
+ * - 'new_chan' should not be NULL
+ */
+static void sys_change_chan(struct ieee80211_channel *new_chan)
+{
+#define IS_UP(_dev)	(((_dev)->flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP))
+#define IEEE80211_VAPS_LOCK_BH(_ic)	spin_lock_bh(&(_ic)->ic_vapslock);
+#define IEEE80211_VAPS_UNLOCK_BH(_ic)	spin_unlock_bh(&(_ic)->ic_vapslock);
+
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+	if (!new_chan || !vap) {
+		DBGPRINTF_E("null channel or vap\n");
+		return;
+	}
+	/* if dfs channel the notify will be send after cac */
+	if (!(new_chan->ic_flags & IEEE80211_CHAN_DFS))
+		dfs_reentry_chan_switch_notify(vap->iv_dev, new_chan);
+
+
+	if (IS_UP(vap->iv_dev)) {
+		ic->ic_prevchan = ic->ic_curchan;
+		ic->ic_curchan = ic->ic_des_chan = new_chan;
+		ic->ic_csw_reason = IEEE80211_CSW_REASON_DFS;
+		IEEE80211_VAPS_LOCK_BH(ic);
+		vap->iv_newstate(vap, IEEE80211_S_SCAN, 0);
+		IEEE80211_VAPS_UNLOCK_BH(ic);
+		ic->ic_flags &= ~IEEE80211_F_CHANSWITCH;
+	} else if (vap->iv_state == IEEE80211_S_RUN) {
+		/* Normally, we don't get to here */
+		TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+			if ((vap->iv_opmode == IEEE80211_M_WDS) && (vap->iv_state == IEEE80211_S_RUN)) {
+				IEEE80211_VAPS_LOCK_BH(ic);
+				vap->iv_newstate(vap, IEEE80211_S_INIT, 0);
+				IEEE80211_VAPS_UNLOCK_BH(ic);
+			}
+		}
+
+		ic->ic_prevchan = ic->ic_curchan;
+		ic->ic_curchan = new_chan;
+		ic->ic_bsschan = new_chan;
+		ic->ic_csw_reason = IEEE80211_CSW_REASON_DFS;
+		ic->ic_set_channel(ic);
+		ic->ic_flags &= ~IEEE80211_F_CHANSWITCH;
+
+		TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+			if ((vap->iv_opmode == IEEE80211_M_WDS) && (vap->iv_state == IEEE80211_S_INIT)) {
+				IEEE80211_VAPS_LOCK_BH(ic);
+				vap->iv_newstate(vap, IEEE80211_S_RUN, 0);
+				IEEE80211_VAPS_UNLOCK_BH(ic);
+			}
+
+			if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+				continue;
+
+			if ((vap->iv_state != IEEE80211_S_RUN) && (vap->iv_state != IEEE80211_S_SCAN))
+				continue;
+
+			ic->ic_beacon_update(vap);
+		}
+	} else {
+		ic->ic_flags &= ~IEEE80211_F_CHANSWITCH;
+		DBGPRINTF_E("channel change failed\n");
+	}
+}
+
+/*
+ * CAC has successfully passed
+ */
+static void cac_completed_action(unsigned long data)
+{
+	struct ieee80211com *ic;
+	struct qdrv_wlan *qw;
+	struct ieee80211_channel *chan;
+	struct ieee80211vap *vap;
+	int chan_status = 0;
+
+	ic = qdrv_radar_cb.ic;
+	if (ic == NULL || !is_cac_started()) {
+		DBGPRINTF_E("CAC not in progress\n");
+		return;
+	}
+
+	vap = TAILQ_FIRST(&ic->ic_vaps);
+	if (vap == NULL || vap->iv_dev == NULL) {
+		return;
+	}
+	qw = container_of(ic, struct qdrv_wlan, ic);
+	chan = qdrv_radar_cb.cac_chan;
+	/* resume normal operation on channel */
+	sys_enable_xmit();
+	chan->ic_flags |= IEEE80211_CHAN_DFS_CAC_DONE;
+	chan->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_IN_PROGRESS;
+
+	if (ic->sta_dfs_info.sta_dfs_strict_mode) {
+		if (!(vap->iv_bss && IEEE80211_NODE_AID(vap->iv_bss))) {
+			chan_status = IEEE80211_CHANNEL_STATUS_NON_AVAILABLE;
+		} else {
+			chan_status = IEEE80211_CHANNEL_STATUS_AVAILABLE;
+		}
+	} else {
+		chan_status = IEEE80211_CHANNEL_STATUS_AVAILABLE;
+	}
+
+	if (ic->ic_mark_channel_availability_status) {
+		ic->ic_mark_channel_availability_status(ic, chan, chan_status);
+	}
+
+	if (ic->ic_mark_channel_dfs_cac_status) {
+		if (ic->sta_dfs_info.sta_dfs_strict_mode && (chan_status == IEEE80211_CHANNEL_STATUS_NON_AVAILABLE)) {
+			ic->ic_mark_channel_dfs_cac_status(ic, chan, IEEE80211_CHAN_DFS_CAC_DONE, false);
+		} else {
+			ic->ic_mark_channel_dfs_cac_status(ic, chan, IEEE80211_CHAN_DFS_CAC_DONE, true);
+		}
+		ic->ic_mark_channel_dfs_cac_status(ic, chan, IEEE80211_CHAN_DFS_CAC_IN_PROGRESS, false);
+	}
+
+	DBGPRINTF_N_QEVT(vap->iv_dev, "RADAR: CAC completed for channel %3d (%4d MHz)\n",
+		chan->ic_ieee, chan->ic_freq);
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR, "\n%s: chan_status=%d\n", __func__, chan_status);
+
+	QDRV_CLEAR_SM_FLAG(qw->sm_stats, QDRV_WLAN_SM_STATE_CAC_ACTIVE);
+	/* cac has ended, it means can switch to a dfs channel succed*/
+	dfs_reentry_chan_switch_notify(vap->iv_dev, qdrv_radar_cb.cac_chan);
+	qdrv_radar_cb.cac_chan = NULL;
+
+	ic->ic_pm_reason = IEEE80211_PM_LEVEL_CAC_COMPLETED;
+	ieee80211_pm_queue_work_custom(ic, BOARD_PM_WLAN_IDLE_TIMEOUT);
+
+	if (ic->ic_ap_next_cac) {
+		(void) ic->ic_ap_next_cac(ic, vap, CAC_PERIOD, &qdrv_radar_cb.cac_chan,
+					IEEE80211_SCAN_PICK_NOT_AVAILABLE_DFS_ONLY);
+	}
+}
+
+void qdrv_cac_instant_completed(void)
+{
+	struct timer_list *cac_timer;
+
+	if (!is_cac_started())
+		return;
+
+	KASSERT((qdrv_radar_cb.cac_chan->ic_flags & IEEE80211_CHAN_DFS) != 0,
+			(DBGEFMT "CAC started on non-DFS channel: %3d (%4d MHz)\n",
+			DBGARG, qdrv_radar_cb.cac_chan->ic_ieee,
+			qdrv_radar_cb.cac_chan->ic_freq));
+	KASSERT(!(qdrv_radar_cb.cac_chan->ic_flags & IEEE80211_CHAN_DFS_CAC_DONE),
+			(DBGEFMT "CAC_DONE marked prior to CAC completed\n", DBGARG));
+
+	cac_timer = &qdrv_radar_cb.cac_timer;
+	mod_timer(cac_timer, jiffies);
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR, "RADAR: CAC period will expire instantly\n");
+}
+
+/*
+ * Start or restart the CAC procedure
+ * - precondition: transmission is already disabled
+ */
+static void start_cac(void)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211_channel *cur_chan = ic->ic_curchan;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct timer_list *cac_timer = &qdrv_radar_cb.cac_timer;
+	unsigned long expires;
+
+	/* CAC not required for DFS slave */
+	if (qdrv_is_dfs_slave() && !(ic->sta_dfs_info.sta_dfs_strict_mode))
+		return;
+
+	/* stop cac if any */
+	stop_cac();
+
+	KASSERT(qdrv_radar_cb.cac_chan == NULL,
+		(DBGEFMT "CAC channel is not null\n", DBGARG));
+
+	if (cur_chan == IEEE80211_CHAN_ANYC) {
+		DBGPRINTF_E("operational channel not yet selected\n");
+		return;
+	}
+
+	/* save the operational channel into the control block */
+	qdrv_radar_cb.cac_chan = cur_chan;
+	cur_chan->ic_flags |= IEEE80211_CHAN_DFS_CAC_IN_PROGRESS;
+
+	if (ic->ic_mark_channel_dfs_cac_status) {
+		ic->ic_mark_channel_dfs_cac_status(ic, cur_chan, IEEE80211_CHAN_DFS_CAC_IN_PROGRESS, true);
+	}
+
+	if (ieee80211_is_on_weather_channel(ic, cur_chan))
+		expires = jiffies + CAC_WEATHER_PERIOD_EU;
+	else
+		expires = jiffies + CAC_PERIOD;
+
+	if (DBG_LOG_FUNC_TEST(QDRV_LF_DFS_QUICKTIMER)) {
+		DBGPRINTF_N("RADAR: test mode - CAC period will expire quickly\n");
+		expires = jiffies + CAC_PERIOD_QUICK;
+	}
+	mod_timer(cac_timer, expires);
+
+	QDRV_SET_SM_FLAG(qw->sm_stats, QDRV_WLAN_SM_STATE_CAC_ACTIVE);
+	DBGPRINTF_N_QEVT(ic2dev(ic), "RADAR: CAC started for channel %3d (%4d MHz)\n",
+			 cur_chan->ic_ieee, cur_chan->ic_freq);
+}
+
+/*
+ * Stop cac procedure
+ */
+static void raw_stop_cac(void)
+{
+	struct ieee80211_channel *chan = qdrv_radar_cb.cac_chan;
+	struct timer_list *cac_timer = &qdrv_radar_cb.cac_timer;
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+	if (!is_cac_started()) { /* no cac to stop */
+		DBGPRINTF_E("CAC is not started\n");
+		return;
+	}
+
+	del_timer(cac_timer);
+	chan->ic_flags &= ~(IEEE80211_CHAN_DFS_CAC_DONE |
+			IEEE80211_CHAN_DFS_CAC_IN_PROGRESS);
+
+	if (ic->ic_mark_channel_dfs_cac_status) {
+		ic->ic_mark_channel_dfs_cac_status(ic, chan, IEEE80211_CHAN_DFS_CAC_DONE, false);
+		ic->ic_mark_channel_dfs_cac_status(ic, chan, IEEE80211_CHAN_DFS_CAC_IN_PROGRESS, false);
+	}
+
+	QDRV_CLEAR_SM_FLAG(qw->sm_stats, QDRV_WLAN_SM_STATE_CAC_ACTIVE);
+	DBGPRINTF_N_QEVT(ic2dev(ic), "RADAR: CAC stopped for channel %3d (%4d MHz)\n",
+			 chan->ic_ieee, chan->ic_freq);
+
+	/* no cac now */
+	qdrv_radar_cb.cac_chan = NULL;
+	/* take it as an channel switch failed event
+	 * to satisfy the dfs reentry demon when it's waiting for the dfs reentry result */
+	if (vap && vap->iv_dev)
+		dfs_reentry_chan_switch_notify(vap->iv_dev, NULL);
+}
+
+static void stop_cac(void)
+{
+	if (is_cac_started())
+		raw_stop_cac();
+}
+
+void qdrv_radar_stop_active_cac(void)
+{
+	if (!qdrv_radar_cb.enabled)
+		return;
+
+	if (is_cac_started()) {
+		raw_stop_cac();
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_RADAR, "%s: stop CAC\n", __func__);
+	}
+	return;
+}
+
+void sta_dfs_strict_cac_action(struct ieee80211_channel *chan)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+
+	if (!ic->sta_dfs_info.sta_dfs_strict_mode) {
+		return;
+	}
+
+	if (!qdrv_radar_cb.enabled)
+		return;
+
+	if (ieee80211_is_chan_cac_required(chan)) {
+		sys_disable_xmit();
+		start_cac();
+	} else if (ieee80211_is_chan_not_available(chan) || ieee80211_is_chan_available(chan)) {
+		sys_enable_xmit();
+	}
+}
+
+/*
+ * The non-occupancy period expires
+ * - the channel is now available for use
+ */
+static void nonoccupy_expire_action(unsigned long data)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	unsigned chan_idx = data;
+	struct ieee80211_channel *chan;
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+	KASSERT(chan_idx < ic->ic_nchans,
+		(DBGEFMT "out-of-range channel idx %u\n", DBGARG, chan_idx));
+
+	chan = &ic->ic_channels[chan_idx];
+	chan->ic_flags &= ~IEEE80211_CHAN_RADAR;
+
+	if (ic->ic_flags_qtn & IEEE80211_QTN_RADAR_SCAN_START) {
+		if (ic->ic_initiate_scan) {
+			ic->ic_initiate_scan(vap);
+		}
+	}
+
+	/* Mark the channel as not_available and ready for cac */
+	if (ic->ic_mark_channel_availability_status) {
+		ic->ic_mark_channel_availability_status(ic, chan, IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_CAC_REQUIRED);
+	}
+
+	if ((ic->sta_dfs_info.sta_dfs_strict_mode)
+		&& (ic->ic_curchan == chan)) {
+		TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+			if ((vap->iv_state != IEEE80211_S_RUN)
+				&& (vap->iv_state != IEEE80211_S_SCAN)) {
+				continue;
+			}
+			DBGPRINTF(DBG_LL_INFO, QDRV_LF_RADAR, "%s: Trigger scan\n", __func__);
+			vap->iv_newstate(vap, IEEE80211_S_SCAN, 0);
+		}
+	}
+
+	DBGPRINTF_N_QEVT(ic2dev(ic), "RADAR: non-occupancy period expired for channel %3d "
+			 "(%4d MHz)\n", chan->ic_ieee, chan->ic_freq);
+}
+
+#ifdef CONFIG_QHOP
+static void rbs_radar_detected_timer_action(unsigned long chan_ic_ieee)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211_channel *chan = ieee80211_find_channel_by_ieee(ic, chan_ic_ieee);
+
+	if (ic->ic_chan_compare_equality(ic, ic->ic_curchan, chan)) {
+		sys_disable_xmit();
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_RADAR, "%s expired\n", __func__);
+}
+#endif
+
+static void sta_radar_detected_timer_action(unsigned long chan_ic_ieee)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211_channel *chan = ieee80211_find_channel_by_ieee(ic, chan_ic_ieee);
+	ic->sta_dfs_info.sta_dfs_radar_detected_timer = false;
+
+	if (ic->ic_mark_channel_availability_status) {
+		ic->ic_mark_channel_availability_status(ic, chan,
+				IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_RADAR_DETECTED);
+	}
+	if (ic->ic_chan_compare_equality(ic, ic->ic_curchan, chan)) {
+		sys_disable_xmit();
+	}
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_RADAR, "%s: sta_radar_timer expired\n", __func__);
+}
+
+static void sta_silence_timer_action(unsigned long data)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *)data;
+
+	if (vap)
+		ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+
+	sys_enable_xmit();
+}
+
+/*
+ * Time to perform channel switch
+ */
+static void dfs_cs_timer_expire_action(unsigned long data)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+	if (is_dfs_cs_started()) {
+		struct ieee80211_channel *chan = qdrv_radar_cb.dfs_des_chan;
+
+		if (qdrv_radar_cb.dfs_des_chan != IEEE80211_CHAN_ANYC){
+			DBGPRINTF_N_QEVT(ic2dev(ic), "RADAR: DFS channel switch to %3d (%4d MHz)\n",
+					 chan->ic_ieee, chan->ic_freq);
+			sys_change_chan(chan);
+		} else {
+			/* disable the transmission before starting the AP scan */
+			sys_disable_xmit();
+
+			/* no channel selected by radar module. Call Scanner */
+			DBGPRINTF_N_QEVT(ic2dev(ic), "RADAR: starting AP scan due to radar "
+					 "detection\n");
+			(void) ieee80211_start_scan(vap, IEEE80211_SCAN_NO_DFS,
+				IEEE80211_SCAN_FOREVER, 0, NULL);
+		}
+
+		qdrv_radar_cb.dfs_des_chan = NULL;
+	}
+}
+
+/*
+ * Start a DFS-triggered channel switch
+ */
+#ifndef CONFIG_QHOP
+static void start_dfs_cs(struct ieee80211_channel *new_chan)
+{
+	struct timer_list *dfs_cs_timer = &qdrv_radar_cb.dfs_cs_timer;
+
+	if (is_dfs_cs_started())
+		stop_dfs_cs();
+
+	qdrv_radar_cb.dfs_des_chan = new_chan;
+	mod_timer(dfs_cs_timer, jiffies + DFS_CS_TIMER_VAL);
+}
+#endif
+
+/*
+ * Stop the DFS-triggered channel switch
+ */
+static void stop_dfs_cs()
+{
+	struct timer_list *dfs_cs_timer = &qdrv_radar_cb.dfs_cs_timer;
+
+	if (is_dfs_cs_started()) {
+		del_timer(dfs_cs_timer);
+		qdrv_radar_cb.dfs_des_chan = NULL;
+	}
+}
+
+static struct ieee80211_channel *qdrv_validate_fs_chan(int fast_switch, u_int8_t new_ieee)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211_channel *chan = NULL;
+	struct ieee80211_channel *new_chan = NULL;
+	unsigned chan_idx;
+
+	if (new_ieee == 0) {
+		return NULL;
+	}
+
+	chan = ic->ic_channels;
+	for (chan_idx = 0; chan_idx < ic->ic_nchans; chan_idx++, chan++) {
+		if (chan->ic_ieee == new_ieee) {
+			new_chan = chan;
+			break;
+		}
+	}
+
+	if (new_chan == NULL) {
+		DBGPRINTF_E("channel %d not found\n", new_ieee);
+	} else if (!ic->ic_check_channel(ic,  chan, fast_switch, 0)) {
+		DBGPRINTF_E("channel %d is not usable\n", new_ieee);
+		new_chan = NULL;
+	}
+
+	return new_chan;
+}
+
+/*
+ * Select a new channel to use
+ * - according to FCC/ETSI rules on uniform spreading, we shall select a
+ * channel out of the list of usable channels so that the probability
+ * of selecting a given channel shall be the same for all channels
+ * (reference: ETSI 301 893 v1.5.1 $4.7.2.6)
+ * - possible for this function to return NULL
+ * - a random channel can be returned if the specified channel is neither
+ *	 found nor usable
+ */
+struct ieee80211_channel *qdrv_radar_select_newchan(u_int8_t new_ieee)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211_channel *chan;
+	struct ieee80211_channel *new_chan = NULL;
+	unsigned chan_idx;
+	int fast_switch = (ic->ic_flags_ext & IEEE80211_FEXT_DFS_FAST_SWITCH) != 0;
+
+
+	/* check if we can switch to the user configured channel */
+	new_chan = qdrv_validate_fs_chan(fast_switch, new_ieee);
+
+	if ((new_chan == NULL) && (new_ieee != ic->ic_ieee_best_alt_chan)) {
+		new_chan = qdrv_validate_fs_chan(fast_switch, ic->ic_ieee_best_alt_chan);
+	}
+
+	/* select a random channel */
+	if (new_chan == NULL) {
+		unsigned count;
+		chan = ic->ic_channels;
+		for (count = 0, chan_idx = 0; chan_idx < ic->ic_nchans; chan_idx++, chan++) {
+			if (ic->ic_check_channel(ic, chan, fast_switch, 0)) {
+				count++;
+			}
+		}
+
+		if (count != 0) {
+			unsigned rand = jiffies % count;
+
+			chan = ic->ic_channels;
+			for (count = 0, chan_idx = 0; chan_idx < ic->ic_nchans; chan_idx++, chan++) {
+				if (ic->ic_check_channel(ic, chan, fast_switch, 0)) {
+					if (count++ == rand) {
+						new_chan = &ic->ic_channels[chan_idx];
+						break;
+					}
+				}
+			}
+		}
+	}
+
+	if (new_chan) {
+		chan = new_chan;
+		new_chan = ieee80211_scs_switch_pri_chan(ic->ic_scan, new_chan);
+		if (!new_chan) {
+			DBGPRINTF_W("All subchannels are crowded with BSS, selected anyway\n");
+			new_chan = chan;
+		}
+	}
+
+	if (new_chan) {
+		DBGPRINTF_N_QEVT(ic2dev(ic), "RADAR: new channel selected %d (%d MHz)\n",
+				 new_chan->ic_ieee, new_chan->ic_freq);
+	} else {
+		DBGPRINTF_E("no valid channel found\n");
+	}
+
+	return new_chan;
+}
+EXPORT_SYMBOL(qdrv_radar_select_newchan);
+
+/*
+ * Perform the dfs related action after new channel has been selected
+ */
+static void dfs_action_after_newchan_select(struct ieee80211_channel *new_chan, bool radar_detected_during_cac)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211_channel *cur_chan = ic->ic_curchan;
+	struct ieee80211vap *vap;
+	bool vap_found = false;
+#ifndef CONFIG_QHOP
+	struct ieee80211_channel *csa_chan;
+	uint64_t tsf = 0;
+#endif
+
+	if (new_chan == NULL) {
+		vap = TAILQ_FIRST(&ic->ic_vaps);
+		dfs_reentry_chan_switch_notify(vap->iv_dev, new_chan);
+		/* disable the transmission before starting the AP scan */
+                sys_disable_xmit();
+		/* no channel selected by radar module. Call Scanner */
+		(void) ieee80211_start_scan(vap, IEEE80211_SCAN_NO_DFS,
+			IEEE80211_SCAN_FOREVER, 0, NULL);
+
+		DBGPRINTF_E("new channel not found or usable\n");
+		return;
+	}
+
+#ifdef CONFIG_QHOP
+	/* If the node is MBS send CSA frames */
+	if (!ieee80211_scs_is_wds_rbs_node(ic)) {
+		TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+			if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+				continue;
+
+			if (vap->iv_state != IEEE80211_S_RUN)
+				continue;
+
+			if (radar_detected_during_cac && ic->rbs_mbs_dfs_info.rbs_mbs_allow_tx_frms_in_cac) {
+				ic->rbs_mbs_dfs_info.mbs_allow_csa = true;
+				sys_enable_xmit();
+			}
+			ieee80211_dfs_send_csa(vap, new_chan->ic_ieee);
+		}
+	}
+#else
+	/*
+	 * Just use CSA action frame, so set ic_csa_count to zero and
+	 * avoid CSA ie included in beacon.
+	 */
+	ic->ic_flags |= IEEE80211_F_CHANSWITCH;
+	ic->ic_csa_count = 0;
+
+	/* send CSA action frame for each vap */
+	csa_chan = new_chan;
+
+	ic->ic_get_tsf(&tsf);
+	tsf += IEEE80211_MS_TO_USEC(QDRV_RADAR_DFLT_CHANSW_MS);
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		/* Just skip WDS mode, because not sure if we support DFS on STA later */
+		if (vap->iv_opmode == IEEE80211_M_WDS)
+			continue;
+
+		if ((vap->iv_state != IEEE80211_S_RUN) && (vap->iv_state != IEEE80211_S_SCAN))
+			continue;
+
+		vap_found = true;
+		sys_send_csa(vap, csa_chan, tsf);
+	}
+
+	start_dfs_cs(new_chan);
+#endif
+
+	ic->ic_dfs_cce.cce_previous = cur_chan->ic_ieee;
+	ic->ic_dfs_cce.cce_current = new_chan->ic_ieee;
+
+	if (vap_found != true )
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_RADAR, "no vap running\n");
+}
+
+static void
+dfs_send_report_frame(struct ieee80211com *ic, struct ieee80211vap *vap) {
+	struct ieee80211_node *ni;
+	struct ieee80211_channel *cur_chan;
+	struct ieee80211_meas_report_ctrl mreport_ctrl;
+	struct ieee80211_action_data action_data;
+
+	/* DFS enabled STA sends Autonomous Measurement Report Action Frame to AP*/
+	if (vap == NULL)
+		return;
+
+	KASSERT(vap->iv_state == IEEE80211_S_RUN, (DBGEFMT "Radar send reprot "
+			"frame, vap state incorrect: %d\n", DBGARG, vap->iv_state));
+
+	memset(&mreport_ctrl, 0, sizeof(mreport_ctrl));
+	memset(&action_data, 0, sizeof(action_data));
+	ni = vap->iv_bss;
+	cur_chan = ic->ic_curchan;
+
+	mreport_ctrl.meas_type = IEEE80211_CCA_MEASTYPE_BASIC;
+	mreport_ctrl.report_mode = 0;
+	mreport_ctrl.autonomous = 1;
+	mreport_ctrl.u.basic.channel = ieee80211_chan2ieee(ic, cur_chan);
+	mreport_ctrl.u.basic.basic_report |= IEEE80211_MEASURE_BASIC_REPORT_RADAR;
+	action_data.cat		= IEEE80211_ACTION_CAT_SPEC_MGMT;
+	action_data.action	= IEEE80211_ACTION_S_MEASUREMENT_REPORT;
+	action_data.params	= &mreport_ctrl;
+	ic->ic_send_mgmt(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+
+	return;
+}
+
+static void
+dfs_slave_push_state_machine(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if ((vap->iv_state != IEEE80211_S_RUN) &&
+		    (vap->iv_state != IEEE80211_S_SCAN)) {
+			continue;
+		}
+
+		vap->iv_newstate(vap, IEEE80211_S_SCAN, 0);
+	}
+
+	ic->ic_chan_switch_reason_record(ic, IEEE80211_CSW_REASON_DFS);
+	return;
+}
+
+#ifdef CONFIG_QHOP
+static void
+dfs_send_qhop_report_frame(struct ieee80211com *ic, u_int8_t new_ieee)
+{
+	struct ieee80211vap *vap;
+
+	/*
+	 * If this is an RBS we send the reports to the MBS on the WDS link
+	 * Note: We are assuming hub and spoke topology. For general tree or mesh
+	 * much more sophisticated routing algorithm should be implemented
+	 */
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		/* Note: We are assuming hub and spoke topology. For general tree or mesh */
+		/* much more sophisticated routing algorithm should be implemented */
+		if (IEEE80211_VAP_WDS_IS_RBS(vap)) {
+			qdrv_qhop_send_rbs_report_frame(vap, new_ieee);
+			return;
+		}
+	}
+}
+#endif
+
+/*
+ * Perform the dfs action including channel switch.
+ */
+static void dfs_action(uint8_t new_ieee, bool radar_detected_during_cac)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211_channel *new_chan = NULL;
+	struct ieee80211vap *vap;
+	uint32_t repeater_mode;
+
+#ifdef CONFIG_QHOP
+	if (ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_RBS) {
+		if (radar_detected_during_cac && ic->rbs_mbs_dfs_info.rbs_mbs_allow_tx_frms_in_cac) {
+			ic->rbs_mbs_dfs_info.rbs_allow_qhop_report = true;
+			sys_enable_xmit();
+			ic->rbs_mbs_dfs_info.rbs_dfs_radar_timer.data = ic->ic_curchan->ic_ieee;
+			mod_timer(&ic->rbs_mbs_dfs_info.rbs_dfs_radar_timer,
+				(jiffies + IEEE80211_MS_TO_JIFFIES(ic->rbs_mbs_dfs_info.rbs_dfs_tx_chan_close_time)));
+		}
+
+		dfs_send_qhop_report_frame(ic, new_ieee);
+		return;
+	}
+#endif
+	vap = TAILQ_FIRST(&ic->ic_vaps);
+	repeater_mode = ieee80211_is_repeater(ic);
+	if (qdrv_is_dfs_slave() || repeater_mode) {
+
+		bool send_measurement_in_sta_dfs_strict = ic->sta_dfs_info.sta_dfs_strict_mode &&
+							ic->sta_dfs_info.sta_dfs_strict_msr_cac &&
+							radar_detected_during_cac;
+
+		/* DFS slave just initiates scan as DFS action */
+		if (vap && vap->iv_state == IEEE80211_S_RUN) {
+			if ((qdrv_radar_cb.xmit_stopped == true)
+					&& (!send_measurement_in_sta_dfs_strict)) {
+				DBGPRINTF(DBG_LL_WARNING, QDRV_LF_RADAR,
+						"%s report radar failed\n",
+						repeater_mode ? "Repeater" : "STA");
+			} else {
+				DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR,
+						"%s report radar to master\n",
+						repeater_mode ? "Repeater" : "STA");
+
+				if (send_measurement_in_sta_dfs_strict) {
+					DBGPRINTF_N_QEVT(vap->iv_dev, "STA-DFS: Sending measurement frame during CAC\n");
+					sys_enable_xmit();
+					ic->sta_dfs_info.allow_measurement_report = true;
+				}
+
+				dfs_send_report_frame(ic, vap);
+				if (qdrv_radar_sta_dfs || repeater_mode)
+					return;
+			}
+		}
+
+		if (!repeater_mode) {
+			dfs_slave_push_state_machine(ic);
+			return;
+		}
+		/*
+		 * If STA interface of a repeater do not associated with
+		 * any root-AP,the repeater should act as if it's an AP.
+		 */
+	}
+
+	/*
+	 * Default behavior for AP:
+	 * If radar detected during ICAC, continue ICAC on next NOT_AVAILABLE_DFS channel
+	 * If DFS fast switch configured, do random channel selection or fixed channel
+	 * based on customer's configuration;
+	 * If DFS fast switch not configured, use channel scan to pick up a best non-DFS channel
+	 */
+	if (vap && ic->ic_ap_next_cac
+			&& (ic->ic_ap_next_cac(ic, vap, CAC_PERIOD, &qdrv_radar_cb.cac_chan,
+			IEEE80211_SCAN_PICK_NOT_AVAILABLE_DFS_ONLY)) < 0) {
+		if (ic->ic_flags_ext & IEEE80211_FEXT_DFS_FAST_SWITCH) {
+			/*
+			 * select one channel at random
+			 */
+			new_chan = qdrv_radar_select_newchan(new_ieee);
+			dfs_action_after_newchan_select(new_chan, radar_detected_during_cac);
+		} else {
+			/*
+			 * Using channel scan to pick up a best non-DFS channel to switch
+			 * Channel switch and DFS action will be done after scanning is done
+			 */
+			vap = TAILQ_FIRST(&ic->ic_vaps);
+			ieee80211_start_scan(vap, IEEE80211_SCAN_FLUSH | IEEE80211_SCAN_NO_DFS
+					| IEEE80211_SCAN_DFS_ACTION, IEEE80211_SCAN_FOREVER,
+					vap->iv_des_nssid, vap->iv_des_ssid);
+		}
+	}
+
+	ic->ic_chan_switch_reason_record(ic, IEEE80211_CSW_REASON_DFS);
+}
+
+void qdrv_dfs_action_scan_done(void)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211_channel *new_chan = NULL;
+
+	IEEE80211_LOCK_IRQ(ic);
+	new_chan = ieee80211_scan_pickchannel(ic, IEEE80211_SCAN_NO_DFS);
+	IEEE80211_UNLOCK_IRQ(ic);
+
+	dfs_action_after_newchan_select(new_chan, false);
+}
+
+/*
+ * Decide whether or not to detect radar on the channel
+ */
+bool qdrv_radar_is_rdetection_required(const struct ieee80211_channel *chan)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	bool rdetect = false;
+	bool doth = ic->ic_flags & IEEE80211_F_DOTH;
+
+	if (DBG_LOG_FUNC_TEST(QDRV_LF_DFS_DONTCAREDOTH)) {
+		DBGPRINTF_N("RADAR: test mode - detection enabled\n");
+		doth = true;
+	}
+
+	if (doth) {
+		if (chan == IEEE80211_CHAN_ANYC) {
+			DBGPRINTF_E("channel not yet set\n");
+			return false;
+		}
+
+		if (chan->ic_flags & IEEE80211_CHAN_DFS)
+			rdetect = true;
+	}
+
+	return rdetect;
+}
+
+static int32_t qdrv_radar_off_chan_cac_action(struct ieee80211com *ic)
+{
+	struct radar_ocac_info_s *qdrv_ocac_info;
+	uint8_t ocac_scan_chan;
+
+	if (qdrv_is_dfs_slave())
+		return -EINVAL;
+
+	if (ieee80211_is_repeater(ic))
+		return -EINVAL;
+
+	qdrv_ocac_info = radar_ocac_info_addr_get();
+	ocac_scan_chan = qdrv_ocac_info->ocac_scan_chan;
+	if (qdrv_radar_cb.region == DFS_RQMT_US) {
+		DBGPRINTF_N_QEVT(ic2dev(ic), "RADAR: radar found on channel %u during CAC\n",
+				 ocac_scan_chan);
+	} else {
+		DBGPRINTF_N_QEVT(ic2dev(ic), "RADAR: radar found on off channel %u, current "
+				 "chan %u\n", ocac_scan_chan, ic->ic_curchan->ic_ieee);
+	}
+
+	return ocac_scan_chan;
+}
+
+/*
+ * Invoked when a radar is detected.
+ * Called directly from iwpriv wifi0 doth_radar <new channel>.
+ * Called when AP receives a radar detection report from an associated STA.
+ */
+void qdrv_radar_detected(struct ieee80211com *ic, u_int8_t new_ieee)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	int retval = -1;
+	uint8_t local_new_ieee = new_ieee;
+	uint32_t chan_idx;
+	struct ieee80211_channel *chan = NULL;
+	struct ieee80211vap *vap;
+	bool rdetect;
+	int32_t radar_chan;
+	bool radar_detected_during_cac = is_cac_started();
+
+	if (!qdrv_radar_configured) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR, "radar not initialized\n");
+		return;
+	}
+
+	if (!qdrv_radar_cb.enabled) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR, "radar not enabled\n");
+		return;
+	}
+
+	if (ic != qdrv_radar_cb.ic) {
+		DBGPRINTF_E("ic 0x%p not matching the configured ic 0x%p\n",
+			ic, qdrv_radar_cb.ic);
+		return;
+	}
+
+	/* stop cac if any */
+	stop_cac();
+
+	if (qdrv_is_dfs_slave() && !qdrv_radar_sta_dfs) {
+		DBGPRINTF_E("ic mode is %d and sta dfs is %s\n", ic->ic_opmode,
+				qdrv_radar_sta_dfs ? "enabled" : "disabled");
+		return;
+	}
+
+	rdetect = qdrv_radar_is_rdetection_required(ic->ic_curchan);
+	if (!rdetect) {
+		/* detect radar during off channel CAC */
+		if (!ic->ic_ocac.ocac_chan) {
+			DBGPRINTF_E("radar operating channel invalid\n");
+			return;
+		}
+		radar_chan = qdrv_radar_off_chan_cac_action(ic);
+	} else {
+		radar_chan = ic->ic_curchan->ic_ieee;
+	}
+
+	if (radar_chan < 0) {
+		DBGPRINTF_E("radar operating channel invalid\n");
+		return;
+	}
+
+	/* get an in-service channel */
+	for (chan_idx = 0; chan_idx < ic->ic_nchans; chan_idx++) {
+		if (ic->ic_channels[chan_idx].ic_ieee == radar_chan) {
+			chan = &ic->ic_channels[chan_idx];
+			break;
+		}
+	}
+	if (!chan) {
+		DBGPRINTF_E("no matching in-service channel for freq=%d\n",
+				ic->ic_curchan->ic_freq);
+		return;
+	}
+	KASSERT((chan->ic_flags & IEEE80211_CHAN_DFS), (DBGEFMT "Radar"
+				" detected on non-DFS channel\n", DBGARG));
+	DBGPRINTF_N_QEVT(ic2dev(ic), "RADAR: radar found on channel %3d (%4d MHz)\n",
+			 chan->ic_ieee, chan->ic_freq);
+
+	/*
+	 * To avoid repeated dfs actions when AP and STAs detected
+	 * same radar, test flag here. (only for AP side)
+	 */
+	if ((chan->ic_flags & IEEE80211_CHAN_RADAR) && !(ic->sta_dfs_info.sta_dfs_strict_mode)) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR,
+				"DFS marked on channel %3d (%4d MHz) already\n",
+				chan->ic_ieee, chan->ic_freq);
+		return;
+	}
+
+	if (qw->radar_detect_callback) {
+		retval = qw->radar_detect_callback(chan);
+		if (retval == 0)
+			return;
+	}
+
+	/* check if dfs marking is allowed */
+	if (!(ic->ic_flags_ext & IEEE80211_FEXT_MARKDFS)) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR, "DFS marking disabled\n");
+		return;
+	}
+
+	/* return immediately if we are in the dfs test mode */
+	if (qdrv_radar_is_test_mode()) {
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_RADAR | QDRV_LF_DFS_TESTMODE,
+				"test mode - no DFS action taken\n");
+		if (qdrv_radar_test_mode_csa_en() && qdrv_is_dfs_master()) {
+			DBGPRINTF(DBG_LL_CRIT, QDRV_LF_RADAR | QDRV_LF_DFS_TESTMODE,
+					"send CSA action\n");
+			vap = TAILQ_FIRST(&ic->ic_vaps);
+			ieee80211_dfs_send_csa(vap, ic->ic_curchan->ic_ieee);
+		}
+		return;
+	}
+
+	/* set radar_found_flag eariler to avoid function reentry issue */
+	start_nonoccupy(chan_idx);
+
+	/* stop cac if any */
+	stop_cac();
+
+	/* OCAC do not required for DFS actions */
+	if (!rdetect) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR,
+				"OCAC DFS: no DFS action taken\n");
+		return;
+	}
+
+	/* disable radar detection to avoid redundant detection */
+	sys_disable_rdetection();
+
+	if (local_new_ieee == 0 && ic->ic_ieee_alt_chan != 0) {
+		local_new_ieee = ic->ic_ieee_alt_chan;
+        }
+	/* take a dfs action */
+	dfs_action(local_new_ieee, radar_detected_during_cac);
+
+}
+
+/*
+ * Invoked when radar is detected
+ * - a callback function registered to the radar module
+ */
+static void mark_radar(void)
+{
+	qdrv_radar_detected(qdrv_radar_cb.ic, 0);
+}
+
+int qdrv_radar_test_mode_enabled(void)
+{
+	if ((qdrv_radar_cb.enabled == true) && qdrv_radar_is_test_mode())
+		return 1;
+
+	return 0;
+}
+
+/*
+ * Check if safe to perform channel sampling
+ * Returns 1 if OK, else 0.
+ */
+int qdrv_radar_can_sample_chan(void)
+{
+	if ((qdrv_radar_cb.enabled != 0) &&
+		is_cac_started()) {
+		return 0;
+	}
+
+	if (qdrv_radar_test_mode_enabled()) {
+		return 0;
+	}
+
+	return 1;
+}
+
+/*
+ * Take appropriate action(s) right before channel switch
+ */
+void qdrv_radar_before_newchan(void)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct ieee80211_channel *new_chan = NULL;
+	int silence_period;
+	bool rdetect;
+
+	/* now safe to set 'new_chan' */
+	new_chan = ic->ic_curchan;
+
+	/* check if the new channel requires radar detection */
+	rdetect = qdrv_radar_is_rdetection_required(new_chan);
+
+	if (!qdrv_radar_cb.enabled) {
+		if (rdetect && qdrv_is_dfs_slave() &&
+				!IEEE80211_IS_CHAN_CACDONE(new_chan)) {
+			if (ieee80211_is_on_weather_channel(ic, new_chan))
+				silence_period = STA_WEATHER_CHAN_SILENCE_PERIOD;
+			else
+				silence_period = STA_SILENCE_PERIOD;
+
+			ic->sta_dfs_info.sta_silence_timer.data =
+				(unsigned long)ieee80211_get_sta_vap(ic);
+			mod_timer(&ic->sta_dfs_info.sta_silence_timer,
+				jiffies + silence_period);
+			sys_disable_xmit();
+		}
+
+		return;
+	}
+
+	if (ic->ic_flags & IEEE80211_F_SCAN) {
+		if (is_cac_started()) {
+			/* The ongoing CAC is invalid since channel scan is running */
+			qdrv_radar_cb.cac_chan->ic_flags &=
+				~IEEE80211_CHAN_DFS_CAC_IN_PROGRESS;
+
+			if (ic->ic_mark_channel_dfs_cac_status) {
+				ic->ic_mark_channel_dfs_cac_status(ic, qdrv_radar_cb.cac_chan,
+							IEEE80211_CHAN_DFS_CAC_IN_PROGRESS, false);
+			}
+		}
+		if (rdetect && !IEEE80211_IS_CHAN_CACDONE(new_chan))
+			sys_disable_xmit();
+
+		return;
+	}
+
+	/* stop cac if any */
+	stop_cac();
+
+	/* other channel switches override the DFS-triggered one */
+	if (is_dfs_cs_started() && (qdrv_radar_cb.dfs_des_chan != new_chan)) {
+		stop_dfs_cs();
+	}
+
+	if (rdetect) {
+		QDRV_SET_SM_FLAG(qw->sm_stats, QDRV_WLAN_SM_STATE_RADAR_ACT);
+		if (qdrv_is_dfs_master() || ic->sta_dfs_info.sta_dfs_strict_mode)
+			sys_disable_xmit();
+		sys_disable_rdetection();
+	} else {
+		QDRV_CLEAR_SM_FLAG(qw->sm_stats, QDRV_WLAN_SM_STATE_RADAR_ACT);
+	}
+}
+
+void qdrv_radar_enable_radar_detection(void)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+
+	if (!qdrv_radar_cb.enabled)
+		return;
+
+	if (ic->ic_flags & IEEE80211_F_SCAN)
+		return;
+
+	if (qdrv_radar_is_rdetection_required(ic->ic_curchan)) {
+		radar_set_chan(ic->ic_curchan->ic_ieee);
+		if (!radar_get_status()) {
+			if (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] < BOARD_PM_LEVEL_DUTY) {
+				sys_enable_rdetection();
+			}
+		}
+	}
+}
+
+/*
+ * Decide what to do on the new channel
+ */
+void qdrv_radar_on_newchan(void)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211_channel *new_chan = NULL;
+	bool rdetect;
+        int handle_cac = 0;
+
+	if (!ic->ic_curchan)
+		return;
+
+	/* now safe to set 'new_chan' */
+	new_chan = ic->ic_curchan;
+
+	/* check if radar detection on the channel is requried */
+	rdetect = qdrv_radar_is_rdetection_required(new_chan);
+
+	if (!qdrv_radar_cb.enabled) {
+		if (!rdetect)
+			sys_enable_xmit();
+
+		return;
+	}
+
+	if (ic->ic_flags & IEEE80211_F_SCAN) {
+		if (!rdetect || IEEE80211_IS_CHAN_CACDONE(new_chan))
+			sys_enable_xmit();
+
+		return;
+	}
+
+	handle_cac = !(IEEE80211_IS_CHAN_CACDONE(ic->ic_curchan)) &&
+                        !(IEEE80211_IS_CHAN_CAC_IN_PROGRESS(ic->ic_curchan));
+
+	/* report new channel to the radar module */
+	radar_set_chan(new_chan->ic_ieee);
+
+	/* log a new channel info */
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR,
+			"now on channel %3d (%4d MHz) with DFS %s (F_DOTH %d, CHAN_DFS %d)\n",
+			new_chan->ic_ieee, new_chan->ic_freq,
+			(rdetect) ? "enabled" : "disabled",
+			(ic->ic_flags & IEEE80211_F_DOTH) ? 1 : 0 ,
+			(new_chan->ic_flags & IEEE80211_CHAN_DFS) ? 1 : 0);
+
+	if (rdetect) {
+		if (new_chan->ic_flags & IEEE80211_CHAN_DFS_OCAC_DONE) {
+			DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR,  "Seamless CAC completed "
+					"and no action needed\n");
+			sys_enable_xmit();
+			new_chan->ic_flags |= IEEE80211_CHAN_DFS_CAC_DONE;
+			if (ic->ic_mark_channel_availability_status) {
+				ic->ic_mark_channel_availability_status(ic, new_chan, IEEE80211_CHANNEL_STATUS_AVAILABLE);
+			}
+
+			if (ic->ic_mark_channel_dfs_cac_status) {
+				ic->ic_mark_channel_dfs_cac_status(ic, new_chan, IEEE80211_CHAN_DFS_CAC_DONE, true);
+			}
+
+		} else if (qdrv_is_dfs_master()) {
+			if (handle_cac) {
+				start_cac();
+			} else if (ieee80211_is_chan_available(new_chan)) {
+				sys_enable_xmit();
+			}
+		} else if (ic->sta_dfs_info.sta_dfs_strict_mode) {
+			sta_dfs_strict_cac_action(new_chan);
+		}
+
+		if (!radar_get_status()) {
+			if (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] < BOARD_PM_LEVEL_DUTY) {
+				sys_enable_rdetection();
+			}
+		}
+	} else {
+		sys_enable_xmit();
+		sys_disable_rdetection();
+	}
+}
+
+void qdrv_sta_dfs_enable(int sta_dfs_enable)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	bool rdetect;
+
+	if (!qdrv_radar_configured)
+		return;
+
+	if (qdrv_radar_first_call)
+		return;
+
+	if (qdrv_is_dfs_master())
+		return;
+
+	if (qdrv_is_dfs_slave() && !qdrv_radar_sta_dfs)
+		return;
+
+	if (sta_dfs_enable) {
+		rdetect = qdrv_radar_is_rdetection_required(ic->ic_bsschan);
+		if (rdetect)
+			radar_set_chan(ic->ic_bsschan->ic_ieee);
+
+		qdrv_radar_enable_action();
+
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_RADAR, "Station DFS enable\n");
+	} else {
+		qdrv_radar_disable();
+
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_RADAR, "Station DFS disable\n");
+	}
+}
+
+/*
+ * Enable DFS feature
+ */
+void qdrv_radar_enable(const char *region)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211vap *vap;
+
+	if (!qdrv_radar_configured) {
+		DBGPRINTF_E("radar unconfigured\n");
+		return;
+	}
+
+	if (qdrv_radar_cb.enabled) {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_RADAR, "radar already enabled\n");
+		return;
+	} else if (strcmp(region, "ru") == 0) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR,
+			"no DFS / radar requirement for regulatory region Russia\n");
+		return;
+	}
+
+	ieee80211_ocac_update_params(ic, region);
+
+	if (qdrv_radar_first_call) {
+		if (false == sys_start_radarmod(region)) {
+			DBGPRINTF_E("Fail to start radar module\n");
+			return;
+		}
+		qdrv_radar_first_call = false;
+		qdrv_radar_sta_dfs = sta_dfs_is_region_required();
+		qdrv_radar_cb.region = dfs_rqmt_code(region);
+
+		/* initialise MuC sampling */
+		qdrv_radar_cb.muc_sampling.sample = detect_drv_sample_loc_get();
+		ic->ic_sample_rate = QDRV_RADAR_SAMPLE_RATE;
+		vap = TAILQ_FIRST(&ic->ic_vaps);
+		if (vap != NULL && qdrv_radar_cb.muc_sampling.sample != NULL) {
+			ic->ic_setparam(vap->iv_bss, IEEE80211_PARAM_SAMPLE_RATE,
+					ic->ic_sample_rate, NULL, 0);
+			INIT_DELAYED_WORK(&qdrv_radar_cb.muc_sampling.sample_work,
+				qdrv_radar_sample_work);
+		} else {
+			DBGPRINTF_E("failed to start MuC sampling. vap %p sample %p\n",
+					vap, qdrv_radar_cb.muc_sampling.sample);
+			return;  /* abort further radar initialization if failure */
+		}
+	}
+
+	if (qdrv_radar_cb.enabled == true) {
+		DBGPRINTF_E("re-enabling radar is not supported - reboot\n");
+		/* for future work of re-enabling radar
+		sys_stop_radarmod();
+		sys_start_radarmod(region);
+		 */
+	} else {
+		qdrv_radar_enable_action();
+	}
+}
+
+static bool qdrv_radar_is_dfs_chan(uint8_t wifi_chan)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	uint32_t chan_idx;
+
+	for (chan_idx = 0; chan_idx < ic->ic_nchans; chan_idx++) {
+		if (ic->ic_channels[chan_idx].ic_ieee == wifi_chan) {
+			if (ic->ic_channels[chan_idx].ic_flags & IEEE80211_CHAN_DFS) {
+				return true;
+			}
+		}
+	}
+	return false;
+}
+
+static bool qdrv_radar_is_dfs_weather_chan(uint8_t wifi_chan)
+{
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	uint32_t chan_idx;
+
+	for (chan_idx = 0; chan_idx < ic->ic_nchans; chan_idx++) {
+		if (ic->ic_channels[chan_idx].ic_ieee == wifi_chan) {
+			if (ic->ic_channels[chan_idx].ic_flags & IEEE80211_CHAN_WEATHER) {
+				return true;
+			}
+		}
+	}
+	return false;
+}
+
+struct ieee80211_channel * qdrv_radar_get_current_cac_chan(void)
+{
+	return  qdrv_radar_cb.cac_chan;
+}
+EXPORT_SYMBOL(qdrv_radar_get_current_cac_chan);
+
+bool qdrv_dfs_is_eu_region()
+{
+	return qdrv_radar_cb.region == DFS_RQMT_EU;
+}
+
+int radar_pm_notify(struct notifier_block *b, unsigned long level, void *v)
+{
+	static int pm_prev_level = BOARD_PM_LEVEL_NO;
+	const int switch_level = BOARD_PM_LEVEL_DUTY;
+	struct ieee80211com *ic = qdrv_radar_cb.ic;
+	struct ieee80211_channel *operate_chan;
+	bool rdetect;
+
+	if (!qdrv_radar_cb.enabled)
+		goto out;
+
+	operate_chan = ic->ic_bsschan;
+	rdetect = qdrv_radar_is_rdetection_required(operate_chan);
+
+	if (rdetect) {
+		if ((pm_prev_level < switch_level) && (level >= switch_level)) {
+			sys_disable_rdetection();
+		} else if ((pm_prev_level >= switch_level) && (level < switch_level)) {
+			radar_set_chan(ic->ic_bsschan->ic_ieee);
+			sys_enable_rdetection();
+		}
+	}
+
+out:
+	pm_prev_level = level;
+        return NOTIFY_OK;
+}
+
+static void qdrv_ocac_irqhandler(void *arg1, void *arg2)
+{
+	struct qdrv_wlan *qw = arg1;
+	struct ieee80211com *ic = &qw->ic;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_ocac_info *ocac_info = sp->ocac_lhost;
+
+	if (ocac_info->chan_status == QTN_OCAC_ON_OFF_CHAN) {
+		ic->ic_ocac.ocac_counts.intr_off_chan++;
+	} else {
+		ic->ic_ocac.ocac_counts.intr_data_chan++;
+	}
+
+	tasklet_schedule(&qdrv_radar_cb.ocac_tasklet);
+}
+
+static int qdrv_init_ocac_irqhandler(struct qdrv_wlan *qw)
+{
+	struct int_handler int_handler;
+
+	int_handler.handler = qdrv_ocac_irqhandler;
+	int_handler.arg1 = qw;
+	int_handler.arg2 = NULL;
+
+	if (qdrv_mac_set_handler(qw->mac, RUBY_M2L_IRQ_LO_OCAC, &int_handler) != 0) {
+		DBGPRINTF_E("Could not set ocac irq handler\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * initialize qdrv_radar.
+ * - Has to be invoked inside or after qdrv_wlan_init()
+ */
+int qdrv_radar_init(struct qdrv_mac *mac)
+{
+	struct ieee80211com *ic = &(((struct qdrv_wlan*)mac->data)->ic);
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_ocac_info *ocac_info = sp->ocac_lhost;
+	unsigned chan_idx;
+	struct timer_list *cac_timer;
+	struct timer_list *dfs_cs_timer;
+
+	if (mac->unit != 0) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR,
+			"init radar request for mac%d ignored\n", mac->unit);
+		return 0; /* yes, it is success by design */
+	}
+
+	if (qdrv_radar_configured) {
+		DBGPRINTF_E("radar already configured\n");
+		return -1;
+	}
+
+	/* clear the control block */
+	memset(&qdrv_radar_cb, 0, sizeof(qdrv_radar_cb));
+
+	qdrv_radar_cb.mac = mac;
+	qdrv_radar_cb.ic = ic;
+
+	/* initialize the cac_timer */
+	cac_timer = &qdrv_radar_cb.cac_timer;
+	init_timer(cac_timer);
+	cac_timer->function = cac_completed_action;
+	cac_timer->data = (unsigned long) NULL; /* not used */
+
+	/* initialize all nonoccupy timers */
+	ic->ic_non_occupancy_period = QDRV_RADAR_DFLT_NONOCCUPY_PERIOD * HZ;
+	for (chan_idx = 0; chan_idx < ic->ic_nchans; chan_idx++) {
+		struct timer_list *nonoccupy_timer = &qdrv_radar_cb.nonoccupy_timer[chan_idx];
+
+		init_timer(nonoccupy_timer);
+		nonoccupy_timer->function = nonoccupy_expire_action;
+		nonoccupy_timer->data = chan_idx;
+	}
+
+	/* initialize the dfs_cs_timer */
+	dfs_cs_timer = &qdrv_radar_cb.dfs_cs_timer;
+	dfs_cs_timer->function = dfs_cs_timer_expire_action;
+	init_timer(dfs_cs_timer);
+
+	ic->sta_dfs_info.sta_radar_timer.function = sta_radar_detected_timer_action;
+	init_timer(&ic->sta_dfs_info.sta_radar_timer);
+
+	ic->sta_dfs_info.sta_silence_timer.function = sta_silence_timer_action;
+	init_timer(&ic->sta_dfs_info.sta_silence_timer);
+
+#ifdef CONFIG_QHOP
+	ic->rbs_mbs_dfs_info.rbs_dfs_radar_timer.function = rbs_radar_detected_timer_action;
+	init_timer(&ic->rbs_mbs_dfs_info.rbs_dfs_radar_timer);
+#endif
+
+	qdrv_radar_cb.pm_notifier.notifier_call = radar_pm_notify;
+	pm_qos_add_notifier(PM_QOS_POWER_SAVE, &qdrv_radar_cb.pm_notifier);
+
+	/* For off channel CAC */
+	tasklet_init(&qdrv_radar_cb.ocac_tasklet, &qdrv_ocac_tasklet, (unsigned long)ocac_info);
+	qdrv_init_ocac_irqhandler(qw);
+
+	qdrv_radar_configured = true;
+
+	/* success */
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR, "radar initialized\n");
+
+	return 0;
+}
+
+/*
+ * deinitialize qdrv_radar
+ */
+int qdrv_radar_exit(struct qdrv_mac *mac)
+{
+	if (mac->unit != 0) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR,
+			"exit request for mac%d ignored\n", mac->unit);
+		return 0; /* yes, it is success by design */
+	}
+
+	if (qdrv_radar_first_call == true || !qdrv_radar_configured) {
+		DBGPRINTF_E("radar already unconfigured\n");
+		return -1;
+	}
+
+	qdrv_radar_disable();
+
+	del_timer_sync(&(qdrv_radar_cb.ic->sta_dfs_info.sta_silence_timer));
+
+	tasklet_kill(&qdrv_radar_cb.ocac_tasklet);
+	pm_qos_remove_notifier(PM_QOS_POWER_SAVE, &qdrv_radar_cb.pm_notifier);
+
+	/* disable radar detection */
+	sys_stop_radarmod();
+
+	/* clear the control block */
+	memset(&qdrv_radar_cb, 0, sizeof(qdrv_radar_cb));
+
+	qdrv_radar_configured = false;
+	qdrv_radar_sta_dfs = false;
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR, "radar exited\n");
+
+	return 0;
+}
+
+int qdrv_radar_unload(struct qdrv_mac *mac)
+{
+	if (mac->unit != 0) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR,
+			"exit request for mac%d ignored\n", mac->unit);
+		return 0; /* yes, it is success by design */
+	}
+
+	if (qdrv_radar_first_call == true || !qdrv_radar_configured) {
+		DBGPRINTF_E("radar already unconfigured\n");
+		return -1;
+	}
+
+	qdrv_radar_disable();
+
+	/* success */
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_RADAR, "radar unloaded\n");
+
+	return 0;
+}
diff --git a/drivers/qtn/qdrv/qdrv_radar.h b/drivers/qtn/qdrv/qdrv_radar.h
new file mode 100644
index 0000000..7498909
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_radar.h
@@ -0,0 +1,57 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_RADAR_H_
+#define _QDRV_RADAR_H_
+
+#define QDRV_RADAR_DFLT_CHANSW_MS 50 /* msecs */
+#define QDRV_RADAR_DFLT_NONOCCUPY_PERIOD	1800 /* secs */
+
+
+int qdrv_radar_init(struct qdrv_mac* mac);
+int qdrv_radar_exit(struct qdrv_mac* mac);
+int qdrv_radar_unload(struct qdrv_mac *mac);
+
+void qdrv_radar_enable(const char* region);
+void qdrv_radar_disable(void);
+void qdrv_sta_set_xmit(int enable);
+void qdrv_set_radar(int enable);
+
+void qdrv_radar_detected(struct ieee80211com* ic, u_int8_t new_ieee);
+int qdrv_radar_can_sample_chan(void);
+int qdrv_radar_test_mode_enabled(void);
+void qdrv_radar_before_newchan(void);
+void qdrv_radar_on_newchan(void);
+void qdrv_radar_stop_active_cac(void);
+void sta_dfs_cac_action(struct ieee80211_channel *chan);
+int qdrv_radar_detections_num(uint32_t chan);
+
+bool qdrv_radar_is_rdetection_required(const struct ieee80211_channel *chan);
+bool qdrv_dfs_is_eu_region(void);
+
+void qdrv_dfs_action_scan_done(void);
+
+struct ieee80211_channel * qdrv_radar_get_current_cac_chan(void);
+void qdrv_radar_enable_radar_detection(void);
+
+void sys_enable_xmit(void);
+void sys_disable_xmit(void);
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_rx.c b/drivers/qtn/qdrv/qdrv_rx.c
new file mode 100644
index 0000000..3781aea
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_rx.c
@@ -0,0 +1,2170 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#ifdef CONFIG_IPV6
+#include <net/ipv6.h>
+#endif
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+#include <linux/udp.h>
+#include <linux/igmp.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <trace/skb.h>
+#include <trace/ippkt.h>
+#include <asm/hardware.h>
+#include <asm/board/dma_cache_ops.h>
+#include <asm/board/gpio.h>
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_comm.h"
+#include "qdrv_wlan.h"
+#include "qdrv_vap.h"
+#include "qdrv_bridge.h"
+#include "qdrv_mac_reserve.h"
+#include <qtn/registers.h>
+#include <net80211/if_llc.h>
+#include <net80211/if_ethersubr.h>
+#include <qtn/skb_recycle.h>
+#include <net80211/ieee80211_proto.h>
+#include <qtn/qtn_global.h>
+#include <qtn/iputil.h>
+#ifdef CONFIG_QVSP
+#include "qtn/qvsp.h"
+#endif
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+#include "qtn/qdrv_sch.h"
+#include <linux/if_bridge.h>
+#include <linux/net/bridge/br_public.h>
+#endif
+
+#include <qtn/qtn_decap.h>
+#include <qtn/qtn_vlan.h>
+#include <qtn/topaz_hbm.h>
+#include <qtn/topaz_tqe.h>
+#include <qtn/topaz_tqe_cpuif.h>
+#include <qtn/topaz_fwt_db.h>
+#include <qtn/topaz_fwt_sw.h>
+#include <qtn/qtn_wowlan.h>
+
+#ifdef TOPAZ_AMBER_IP
+#include <qtn/topaz_amber.h>
+#endif
+
+#define QDRV_SKIP_ETH_HDR(_eh) ((_eh) + 1)
+
+typedef enum {
+	REPLACE_IP_MAC = 0,
+	SAVE_IP_MAC = 1
+} ip_mac_flag;
+
+struct arp_message {
+	uint16_t hw_type;
+	uint16_t pro_type;
+	uint8_t hw_size;
+	uint8_t pro_size;
+	uint16_t opcode;
+	uint8_t shost[ETHER_ADDR_LEN];
+	uint32_t sipaddr;
+	uint8_t thost[ETHER_ADDR_LEN];
+	uint32_t tipaddr;
+	uint8_t others[0];
+}__attribute__ ((packed));
+
+static struct host_rxdesc *prev_rxdesc = NULL;
+
+#if !(TOPAZ_HBM_SKB_ALLOCATOR_DEFAULT)
+static struct sk_buff* __sram_text rx_alloc_skb(struct qdrv_wlan *qw, u8** skb_phy_addr, int trace)
+{
+	struct sk_buff *skb;
+	int alignment;
+	int cache_alignment = dma_get_cache_alignment();
+	struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
+
+	if (!skb_phy_addr) {
+		return NULL;
+	}
+
+	skb = qtn_skb_recycle_list_pop(recycle_list, &recycle_list->stats_qdrv);
+	if (skb) {
+		qw->tx_stats.tx_min_cl_cnt = skb_queue_len(&recycle_list->list);
+	} else {
+		skb = dev_alloc_skb(qtn_rx_buf_size());
+	}
+
+	if (skb == NULL) {
+		*skb_phy_addr = NULL;
+		return NULL;
+	}
+
+	skb->recycle_list = recycle_list;
+
+	/* skb->data should be cache aligned - do calculation here to be sure. */
+	alignment = (unsigned int)(skb->data) & (cache_alignment - 1);
+	if (alignment) {
+		skb_reserve(skb, cache_alignment - alignment);
+	}
+
+	*skb_phy_addr = (u8 *) cache_op_before_rx(skb->data,
+					rx_buf_map_size(), skb->cache_is_cleaned);
+	skb->cache_is_cleaned = 0;
+	if (!*skb_phy_addr) {
+		dev_kfree_skb(skb);
+		return NULL;
+	}
+
+	trace_skb_perf_start(skb, trace);
+	trace_skb_perf_stamp_call(skb);
+
+	return skb;
+}
+#endif
+
+static int rxdesc_alloc_buffer(struct qdrv_wlan *qw, struct host_rxdesc *rxdesc)
+{
+#if TOPAZ_HBM_SKB_ALLOCATOR_DEFAULT
+	rxdesc->rd_buffer = NULL;	/* buffer is set by MuC or AuC form hbm pool */
+	return 0;
+#else
+	void *skb = rx_alloc_skb(qw, &rxdesc->rd_buffer, 1);
+	rxdesc->skbuff = skb;
+	return (skb == NULL);
+#endif
+}
+
+static struct host_rxdesc *rx_alloc_desc(struct qdrv_wlan *qw,
+	struct dma_pool *rxdesc_cache, struct host_rxdesc **rd_phys)
+{
+	struct host_rxdesc *rd = (struct host_rxdesc *)
+		dma_pool_alloc(rxdesc_cache, GFP_KERNEL | GFP_DMA, (dma_addr_t*)rd_phys);
+
+	if (!rd) {
+		return NULL;
+	}
+
+	memset(rd, 0, sizeof(*rd));
+
+	rd->rd_va = rd;
+	rd->rd_pa = *rd_phys;
+
+	if (rxdesc_alloc_buffer(qw, rd) != 0) {
+		dma_pool_free(rxdesc_cache, rd, (u32)(*rd_phys));
+		rd = NULL;
+	}
+
+	return rd;
+}
+
+static void rx_fifo_destroy(struct host_fifo_if *hfif)
+{
+	int i;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	prev_rxdesc = NULL;
+
+	for(i = 0; i < hfif->ring_size; i++ ) {
+
+		struct host_rxdesc *rxdesc = hfif->descp[i];
+		if (rxdesc == NULL) {
+			continue;
+		}
+
+		DBGPRINTF(DBG_LL_TRIAL, QDRV_LF_PKT_RX,
+			"i=%d rdesc=%p, skb=%p, %p, stat=%x\n",
+			i, rxdesc, rxdesc->rd_buffer, rxdesc->skbuff,
+			rxdesc->rs_statword);
+
+		dev_kfree_skb((struct sk_buff *)rxdesc->skbuff);
+		dma_pool_free(hfif->df_rxdesc_cache, rxdesc,
+			(dma_addr_t)rxdesc->rd_pa);
+	}
+	dma_pool_destroy(hfif->df_rxdesc_cache);
+	kfree(hfif->descp);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+}
+
+static void destroy_rx_ring(struct host_rxif *rxif)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	rx_fifo_destroy(&rxif->rx);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+}
+
+static __sram_text void qdrv_rx_irq(void *arg1, void *arg2)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) arg1;
+	struct qdrv_mac *mac = (struct qdrv_mac *) arg2;
+	struct qdrv_vap *qv;
+	struct net_device *active_dev = NULL;
+	int i;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	RXSTAT(qw, rx_irq);
+
+	/* FIXME Figure out which VAP device has the traffic */
+	/* Use the first active vap for now */
+
+	for (i = 0; i < QDRV_MAX_VAPS; ++i) {
+		if (mac->vnet[i] && (mac->vnet[i]->flags & IFF_UP)) {
+			active_dev = mac->vnet[i];
+			break;
+		}
+	}
+
+	if (active_dev != NULL) {
+		qdrv_mac_disable_irq(mac, qw->rxirq);
+		DBGPRINTF(DBG_LL_TRIAL, QDRV_LF_TRACE,
+			"Schedule \"%s\" for RX\n", active_dev->name);
+		RXSTAT(qw, rx_irq_schedule);
+		RXSTAT_SET(qw, rx_poll_stopped, 0);
+
+		qv = netdev_priv(active_dev);
+		napi_schedule(&qv->napi);
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+}
+
+static __sram_text int qdrv_rx_decap_data_ratelimit(struct qdrv_wlan *qw)
+{
+	/*
+	 * Rate limit checks - ensure we don't blindly process all of the
+	 * possibly malicious data frames
+	 */
+	if (!qw->unknown_dp_jiffies) {
+		qw->unknown_dp_jiffies = jiffies;
+	}
+	qw->unknown_dp_count++;
+	if (qw->unknown_dp_count > MAX_UNKNOWN_DP_PER_SECOND) {
+		if (time_after(jiffies, qw->unknown_dp_jiffies) &&
+				time_before(jiffies, (qw->unknown_dp_jiffies + HZ))) {
+			return 0;
+		}
+		qw->unknown_dp_jiffies = jiffies;
+		qw->unknown_dp_count = 1;
+	}
+
+	return 1;
+}
+
+static int qdrv_rx_is_br_isolate(struct qdrv_wlan *qw, struct sk_buff *skb)
+{
+	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
+	uint16_t vlanid;
+
+	if ((qw->br_isolate & QDRV_BR_ISOLATE_VLAN)
+			&& (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q))) {
+		if (qw->br_isolate_vid == QVLAN_VID_ALL)
+			return 1;
+
+		vlanid = ntohs(veth->h_vlan_TCI) & VLAN_VID_MASK;
+		if (qw->br_isolate_vid == vlanid)
+			return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Determines whether a frame should be accepted, based on information
+ * about the frame's origin and encryption, and policy for this vap.
+ */
+static __sram_text int qdrv_accept_data_frame(struct qdrv_wlan *qw, struct ieee80211vap *vap,
+	struct ieee80211_node *ni, const struct ieee80211_qosframe_addr4 *wh,
+	struct sk_buff *skb, __be16 ether_type)
+{
+	char *err = NULL;
+	struct ether_header *eh = (struct ether_header *)skb->data;
+	int key = (wh != NULL) ? (wh->i_fc[1] & IEEE80211_FC1_PROT) : 0;
+
+	/*
+	 * Data frames from unknown nodes should not make it here, but just in case...
+	 */
+	if (!ni || ((vap->iv_opmode == IEEE80211_M_HOSTAP) && (skb->src_port == 0))) {
+		vap->iv_stats.is_rx_unauth++;
+		vap->iv_devstats.rx_errors++;
+		err = "unknown node";
+	} else if (qdrv_mac_reserved(eh->ether_shost)) {
+		err = "reserved mac";
+	} else if (ether_type == __constant_htons(ETH_P_PAE)) {
+		/* encrypted eapol is always OK */
+		if (key)
+			return 1;
+		/* cleartext eapol is OK if we don't have pairwise keys yet */
+		if (vap->iv_nw_keys[0].wk_cipher == &ieee80211_cipher_none)
+			return 1;
+		/* cleartext eapol is OK if configured to allow it */
+		if (!IEEE80211_VAP_DROPUNENC_EAPOL(vap))
+			return 1;
+		/* cleartext eapol is OK if other unencrypted is OK */
+		if (!(vap->iv_flags & IEEE80211_F_DROPUNENC))
+			return 1;
+
+		/* not OK */
+		vap->iv_stats.is_rx_unauth++;
+		vap->iv_devstats.rx_errors++;
+		IEEE80211_NODE_STAT(ni, rx_unauth);
+		IEEE80211_NODE_STAT(ni, rx_errors);
+		err = "invalid EAP message";
+	} else if (!ieee80211_node_is_authorized(ni)) {
+		/*
+		 * Deny non-PAE frames received prior to authorization.  For
+		 * open/shared-key authentication the port is mark authorized after
+		 * authentication completes.  For 802.1X the port is not marked
+		 * authorized by the authenticator until the handshake has completed.
+		 */
+		vap->iv_stats.is_rx_unauth++;
+		vap->iv_devstats.rx_errors++;
+		IEEE80211_NODE_STAT(ni, rx_unauth);
+		IEEE80211_NODE_STAT(ni, rx_errors);
+		err = "node not authorized";
+	} else if (!key &&
+		(vap->iv_flags & IEEE80211_F_PRIVACY) &&
+		(vap->iv_flags & IEEE80211_F_DROPUNENC)) {
+
+		/*
+		 * Frame received from external L2 filter will not have
+		 * MAC header. So protection bit will be zero.
+		 */
+		if (g_l2_ext_filter && skb->ext_l2_filter)
+			return 1;
+
+		/* Deny non-PAE frames received without encryption */
+		IEEE80211_NODE_STAT(ni, rx_unencrypted);
+		err = "not encrypted";
+	} else if (qdrv_rx_is_br_isolate(qw, skb)) {
+		err = "br isolate";
+	}
+
+	if (err) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_RX,
+			"dropping frame - %s n=%u s=%pM d=%pM t=%04x\n",
+			err, IEEE80211_NODE_IDX_UNMAP(skb->src_port),
+			eh->ether_shost, eh->ether_dhost, ether_type);
+		return 0;
+	}
+
+	return 1;
+}
+
+static inline int __sram_text qdrv_rx_lncb_should_drop(struct net_device *dev,
+		struct ieee80211vap *vap, const struct ether_header *eh, void *p_iphdr)
+{
+	/*
+	 * AP will send mc/bc in 4 addr format packets, so drop this one if
+	 * it is to be sent reliably.
+	 */
+	if (vap->iv_bss && vap->iv_bss->ni_lncb_4addr &&
+			qdrv_wlan_is_4addr_mc(eh, p_iphdr, vap, 1) ) {
+		return 1;
+	}
+	return 0;
+}
+
+/*
+* This function returns:
+* 0 if DA remains untouched
+* 1 if DA is changed by the qdrv bridge
+*/
+static int __sram_text qdrv_rx_set_dest_mac(struct qdrv_wlan *qw, struct qdrv_vap *qv,
+		struct ether_header *eh, const struct sk_buff *skb)
+{
+	struct net_bridge_port *br_port = get_br_port(skb->dev);
+
+	if ((qv->iv.iv_flags_ext & IEEE80211_FEXT_WDS) ||
+			(IEEE80211_IS_MULTICAST(eh->ether_dhost))) {
+		return 0;
+	} else if (QDRV_FLAG_3ADDR_BRIDGE_ENABLED()) {
+		return !qdrv_br_set_dest_mac(&qw->bridge_table, eh, skb);
+	} else if (!br_fdb_get_attached_hook ||
+			!br_port ||
+			!br_fdb_get_attached_hook(br_port->br, eh->ether_dhost)) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_BRIDGE,
+			"destination mac %pM not updated from bridge\n",
+			eh->ether_dhost);
+		return 0;
+	}
+
+	return 0;
+}
+
+static void qdrv_bridge_set_dest_addr(struct sk_buff *skb, void *eh1)
+{
+	struct net_device *dev = skb->dev;
+	struct qdrv_vap *qv = netdev_priv(dev);
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) qv->parent;
+
+	qdrv_rx_set_dest_mac(qw, qv, (struct ether_header *)eh1, skb);
+}
+
+#ifdef CONFIG_QVSP
+static __always_inline int
+qdrv_rx_strm_check(struct sk_buff *skb, struct qdrv_vap *qv, struct ieee80211_node *ni,
+	struct ether_header *eh, u8 *data_start, int ac, int32_t tid)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) qv->parent;
+	struct iphdr *p_iphdr = (struct iphdr *)data_start;
+	uint32_t l2_header_len = data_start - skb->data;
+
+	if (qvsp_is_active(qw->qvsp) && ni &&
+			iputil_eth_is_ipv4or6(eh->ether_type) &&
+			(skb->len >= (l2_header_len + sizeof(struct udphdr)
+				+ iputil_hdrlen(p_iphdr, skb->len - l2_header_len))) &&
+			(!IEEE80211_IS_MULTICAST(eh->ether_dhost) ||
+				iputil_is_mc_data(eh, p_iphdr))) {
+		if (qvsp_strm_check_add(qw->qvsp, QVSP_IF_QDRV_RX, ni, skb, eh, p_iphdr,
+				skb->len - (data_start - skb->data), ac, tid)) {
+			return 1;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static void qdrv_rx_data_unauthed(struct ieee80211vap *vap, struct ieee80211_node *ni, unsigned char *mac)
+{
+	uint8_t reason;
+
+	if (vap->iv_state <= IEEE80211_S_AUTH) {
+		reason = IEEE80211_REASON_NOT_AUTHED;
+	} else {
+		reason = IEEE80211_REASON_NOT_ASSOCED;
+	}
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH,
+			"send deauth to node "MACSTR" for rxing data when state=%d\n",
+			MAC2STR(mac), vap->iv_state);
+	IEEE80211_SEND_MGMT(ni,	IEEE80211_FC0_SUBTYPE_DEAUTH, reason);
+
+	/*
+	 * In case the current AP we are trying to associate didn't clean up for last session,
+	 * don't wait for wpa_supplicant's next trying command which is 10s later.
+	 * Give AP and STA some time to clean up and then try again quickly.
+	 */
+	if ((vap->iv_state >= IEEE80211_S_AUTH) &&
+		IEEE80211_ADDR_EQ(vap->iv_bss->ni_bssid, mac)) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH,
+				"schedule fast rejoin bssid "MACSTR"\n",
+				MAC2STR(vap->iv_bss->ni_bssid));
+		IEEE80211_ADDR_COPY(vap->iv_sta_fast_rejoin_bssid, vap->iv_bss->ni_bssid);
+		mod_timer(&vap->iv_sta_fast_rejoin, jiffies + HZ);
+		ieee80211_new_state(vap, IEEE80211_S_INIT, IEEE80211_FC0_SUBTYPE_DEAUTH);
+	}
+}
+
+static  int __sram_text qdrv_rx_is_tdls_action_frame(struct sk_buff *skb, int hdrlen)
+{
+	const uint8_t snap_e_header_pref[] = {LLC_SNAP_LSAP, LLC_SNAP_LSAP, LLC_UI, 0x00, 0x00};
+	uint8_t *data = &skb->data[hdrlen];
+	uint16_t ether_type = get_unaligned((uint16_t*)&data[6]);
+	int32_t snap_encap_pref = !memcmp(data, snap_e_header_pref, sizeof(snap_e_header_pref));
+
+	return (snap_encap_pref && (ether_type == htons(ETHERTYPE_80211MGT)));
+}
+
+struct qdrv_rx_skb_list
+{
+	struct sk_buff_head skb_list;
+	struct sk_buff *skb_prev;
+	int budget;
+};
+
+static inline __sram_text void qdrv_rx_skb_list_init(struct qdrv_rx_skb_list *skb_rcv_list, int budget)
+{
+	__skb_queue_head_init(&skb_rcv_list->skb_list);
+	skb_rcv_list->skb_prev = NULL;
+	skb_rcv_list->budget = budget;
+}
+
+static inline __sram_text void qdrv_rx_skb_list_indicate(struct qdrv_rx_skb_list *skb_rcv_list, int last)
+{
+	if (last || (skb_queue_len(&skb_rcv_list->skb_list) >=
+			skb_rcv_list->budget)) {
+		struct sk_buff *skb, *skb_tmp;
+		skb_queue_walk_safe(&skb_rcv_list->skb_list, skb, skb_tmp) {
+			skb->next = NULL;
+			skb = switch_vlan_to_proto_stack(skb, 0);
+			if (skb)
+				netif_receive_skb(skb);
+		}
+		if (!last) {
+			qdrv_rx_skb_list_init(skb_rcv_list, skb_rcv_list->budget);
+		}
+	}
+}
+
+static inline __sram_text void qdrv_rx_skb_list_append(struct qdrv_rx_skb_list *skb_rcv_list, struct sk_buff *skb)
+{
+	if (unlikely(!skb_rcv_list->skb_prev)) {
+		__skb_queue_head(&skb_rcv_list->skb_list, skb);
+	} else {
+		__skb_append(skb_rcv_list->skb_prev, skb, &skb_rcv_list->skb_list);
+	}
+	skb_rcv_list->skb_prev = skb;
+}
+
+static int __sram_text handle_rx_msdu(struct qdrv_wlan *qw,
+		struct qdrv_vap *qv,
+		struct ieee80211_node *ni,
+		const struct ieee80211_qosframe_addr4 *wh,
+		struct sk_buff *skb,
+		bool check_3addr_br);
+
+struct qdrv_rx_decap_context {
+	struct qdrv_vap *qv;
+	struct ieee80211_node *ni;
+	struct sk_buff *skb;
+	uint32_t pseudo_rssi;
+	struct qdrv_rx_skb_list *skb_rcv_list;
+	struct ieee80211_qosframe_addr4 *wh_copy;
+	uint32_t skb_done;
+};
+
+#define STD_MAGIC_PAYLOAD_REPETITION	16
+#define STD_MAGIC_PAYLOAD_LEN		102
+void wowlan_encap_std_magic_pattern(uint8_t *match_pattern, uint8_t *addr)
+{
+	uint8_t br_addr[IEEE80211_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+	uint8_t i;
+	uint8_t pos = 0;
+
+	IEEE80211_ADDR_COPY(match_pattern, br_addr);
+	pos += IEEE80211_ADDR_LEN;
+
+	for (i = 0; i < STD_MAGIC_PAYLOAD_REPETITION; i++) {
+		IEEE80211_ADDR_COPY(&match_pattern[pos], addr);
+		pos += IEEE80211_ADDR_LEN;
+	}
+}
+
+static int wowlan_magic_match(const void *data, const uint16_t length,
+		const void *magic_pattern, const uint8_t magic_len)
+{
+	uint8_t *recv;
+	const uint8_t *match = magic_pattern;
+	const struct ether_header *eh = data;
+	const uint16_t *ether_type = &eh->ether_type;
+	int i = 0;
+	uint16_t len = length;
+
+	if (len < sizeof(struct ether_header)) {
+		return 0;
+	}
+
+	while(qtn_ether_type_is_vlan(*ether_type)) {
+		if (len < sizeof(struct ether_header) + VLAN_HLEN) {
+			return 0;
+		}
+		ether_type += VLAN_HLEN / sizeof(*ether_type);
+		len -= VLAN_HLEN;
+	}
+
+	recv = (void *)(ether_type + 1);
+	len -= sizeof(struct ether_header);
+
+	while (len >= magic_len) {
+		while (recv[i] == match[i]) {
+			if (++i == magic_len)
+				break;
+		}
+
+		if (i == magic_len) {
+			return 1;
+		}
+
+		i = 0;
+		len--;
+		recv++;
+	}
+
+	return 0;
+}
+
+void wowlan_wakeup_host(void)
+{
+#ifndef TOPAZ_AMBER_IP
+	gpio_wowlan_output(WOWLAN_GPIO_OUTPUT_PIN, 1);
+	udelay(10000);
+	gpio_wowlan_output(WOWLAN_GPIO_OUTPUT_PIN, 0);
+#else
+	/*
+	 * In Amber WOWLAN is handled by WIFI2SOC interrupt.
+	 */
+	amber_trigger_wifi2soc_interrupt(TOPAZ_AMBER_WIFI2SOC_WAKE_ON_WLAN);
+#endif
+}
+
+int wowlan_magic_process(struct sk_buff *skb, struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ether_header *eh = (struct ether_header *) skb->data;
+	void *l3hdr = (eh + 1);
+	uint16_t ether_type = QTN_SKB_CB_ETHERTYPE(skb);
+	uint8_t match_pattern[MAX_USER_DEFINED_MAGIC_LEN];
+	uint8_t wake_flag = 0;
+
+	if (!wowlan_is_magic_packet(ether_type, eh, l3hdr,
+		ic->ic_wowlan.wowlan_match,
+		ic->ic_wowlan.L2_ether_type,
+		ic->ic_wowlan.L3_udp_port)) {
+		return 0;
+	}
+
+	memset(match_pattern, 0, sizeof(match_pattern));
+
+	if (ic->ic_wowlan.pattern.len != 0)
+		wake_flag = wowlan_magic_match(skb->data, skb->len, ic->ic_wowlan.pattern.magic_pattern, ic->ic_wowlan.pattern.len);
+	else {
+		wowlan_encap_std_magic_pattern(match_pattern, vap->iv_ic->soc_addr);
+		wake_flag = wowlan_magic_match(skb->data, skb->len, match_pattern, STD_MAGIC_PAYLOAD_LEN);
+		wowlan_encap_std_magic_pattern(match_pattern, vap->iv_bss->ni_bssid);
+		wake_flag |= wowlan_magic_match(skb->data, skb->len, match_pattern, STD_MAGIC_PAYLOAD_LEN);
+	}
+
+	if (wake_flag) {
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX,
+				"%s WoWLAN: Wake up host\n", __func__);
+		wowlan_wakeup_host();
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int qdrv_rx_should_check_3addr_br(struct qdrv_rx_decap_context *ctx)
+{
+	struct ieee80211_node *ni = ctx->ni;
+	struct ieee80211vap *vap = &ctx->qv->iv;
+
+	return (vap->iv_opmode == IEEE80211_M_STA &&
+			(!ieee80211_node_is_qtn(ni) ||
+			!(vap->iv_flags_ext & IEEE80211_FEXT_WDS)));
+}
+
+static inline int qdrv_rx_vlan_ingress(struct qdrv_vap *qv, struct qdrv_node *qn, struct qtn_rx_decap_info *di,
+		struct sk_buff *skb)
+{
+	struct qtn_vlan_dev *vdev = vdev_tbl_lhost[QDRV_WLANID_FROM_DEVID(qv->devid)];
+
+	if (!vlan_enabled)
+		return 1;
+
+	M_FLAG_SET(skb, M_VLAN_TAGGED);
+
+	return qtn_vlan_ingress(vdev, IEEE80211_NODE_IDX_UNMAP(qn->qn_node.ni_node_idx),
+		di->start, 1, di->vlan_tci, 1);
+}
+
+static int __sram_text qdrv_rx_decap_callback(struct qtn_rx_decap_info *di, void *_ctx)
+{
+	struct qdrv_rx_decap_context *ctx = _ctx;
+	struct qdrv_vap *qv = ctx->qv;
+	struct qdrv_wlan *qw = qv->parent;
+	struct ieee80211_node *ni = ctx->ni;
+	struct qdrv_node *qn = container_of(ni, struct qdrv_node, qn_node);
+	struct sk_buff *skb;
+
+	if (unlikely(!di->decapped)) {
+		printk(KERN_ERR "%s: not decapped\n", __FUNCTION__);
+		return -1;
+	}
+
+	if (di->l3_ether_type == htons(ETHERTYPE_8021Q))
+		ni->ni_stats.ns_rx_vlan_pkts++;
+
+	di->check_3addr_br = qdrv_rx_should_check_3addr_br(ctx);
+
+	memcpy(di->start, &di->eh, qtn_rx_decap_newhdr_size(di));
+
+	if (di->last_msdu) {
+		skb = ctx->skb;
+		ctx->skb_done = 1;
+	} else {
+		skb = skb_clone(ctx->skb, GFP_ATOMIC);
+		if (unlikely(skb == NULL)) {
+			printk(KERN_ERR "%s: null skb\n", __FUNCTION__);
+			return 0;
+		}
+	}
+
+	skb->data = di->start;
+	skb->len = 0;
+	skb_reset_network_header(skb);
+	skb_reset_tail_pointer(skb);
+	skb_put(skb, di->len);
+	skb->dev = qv->ndev;
+	skb->is_recyclable = 1;
+	skb->vlan_tci = di->vlan_tci;
+	QTN_SKB_CB_ETHERTYPE(skb) = di->l3_ether_type;
+
+	if (!qdrv_rx_vlan_ingress(qv, qn, di, skb)) {
+		dev_kfree_skb(skb);
+		return 0;
+	}
+
+	if (handle_rx_msdu(qw, qv, ni, ctx->wh_copy, skb, di->check_3addr_br) == 0) {
+		qdrv_rx_skb_list_append(ctx->skb_rcv_list, skb);
+		qdrv_rx_skb_list_indicate(ctx->skb_rcv_list, 0);
+	}
+
+	return 0;
+}
+
+/*
+ * check the data frame is valid or not.
+ */
+static int __sram_text qdrv_rx_data_frm_check(struct ieee80211vap *vap, uint8_t dir,
+	struct ieee80211_qosframe_addr4 *pwh)
+{
+	int ret = 1;
+	uint8_t *bssid;
+
+	switch (vap->iv_opmode) {
+	case IEEE80211_M_HOSTAP:
+		if (unlikely((dir == IEEE80211_FC1_DIR_FROMDS) ||
+			(dir == IEEE80211_FC1_DIR_NODS))) {
+			ret = 0;
+		} else if (dir == IEEE80211_FC1_DIR_TODS) {
+			bssid = pwh->i_addr1;
+
+			if (!IEEE80211_ADDR_EQ(bssid, vap->iv_bss->ni_bssid) &&
+					!ieee80211_is_bcst(bssid))
+				ret = 0;
+		}
+		break;
+	case IEEE80211_M_STA:
+		if (unlikely(dir == IEEE80211_FC1_DIR_TODS))
+			ret = 0;
+		break;
+	case IEEE80211_M_WDS:
+		if (unlikely(dir != IEEE80211_FC1_DIR_DSTODS))
+			ret = 0;
+		break;
+	case IEEE80211_M_IBSS:
+	case IEEE80211_M_AHDEMO:
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static void qdrv_rx_reject(struct qdrv_wlan *qw, struct ieee80211vap *vap,
+				struct ieee80211_qosframe_addr4 *wh_copy)
+{
+	struct ieee80211_node *ni;
+	uint8_t dir = wh_copy->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+
+	if ((dir != IEEE80211_FC1_DIR_NODS) && qdrv_rx_decap_data_ratelimit(qw)) {
+		ni = _ieee80211_tmp_node(vap, wh_copy->i_addr2, wh_copy->i_addr2);
+		if (ni) {
+			IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
+						IEEE80211_REASON_NOT_AUTHED);
+			ieee80211_free_node(ni);
+		}
+	}
+
+	RXSTAT(qw, rx_poll_vap_err);
+	vap->iv_stats.is_rx_unauth++;
+	vap->iv_devstats.rx_errors++;
+}
+
+/*
+ * Decap the incoming frame.
+ * Returns 0 if the frame has been consumed, else 1.
+ */
+static int __sram_text qdrv_rx_decap(struct qdrv_vap *qv, struct sk_buff *skb,
+		struct qdrv_rx_skb_list *skb_rcv_list, uint32_t pseudo_rssi)
+{
+	uint8_t dir;
+	uint8_t type;
+	uint8_t subtype;
+	uint8_t frag;
+	struct qdrv_wlan *qw = qv->parent;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211vap *vap = &qv->iv;
+	struct ieee80211_node *ni = NULL;
+	int node_reference_held = 0;
+	int more_data = 0;
+	struct qdrv_rx_decap_context ctx;
+	struct ieee80211_qosframe_addr4 wh_copy;
+#if !TOPAZ_RX_ACCELERATE
+	int hdrlen;
+#endif
+	struct qtn_vlan_dev *vdev;
+	uint16_t def_vlan_tci;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	memcpy(&wh_copy, skb->data, sizeof(wh_copy));
+	dir = wh_copy.i_fc[1] & IEEE80211_FC1_DIR_MASK;
+	type = wh_copy.i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+	subtype = wh_copy.i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP) &&
+	    (ic->ic_flags & IEEE80211_F_SCAN) &&
+#ifdef QTN_BG_SCAN
+	    !(ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) &&
+#endif /* QTN_BG_SCAN */
+	    (subtype != IEEE80211_FC0_SUBTYPE_BEACON)) {
+		dev_kfree_skb(skb);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return 0;
+	}
+
+	if ((type != IEEE80211_FC0_TYPE_MGT) ||
+		(subtype != IEEE80211_FC0_SUBTYPE_BEACON)) {
+		RXSTAT(qw, rx_non_beacon);
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_PKT_RX,
+			"a1=%pM a2=%pM a3=%pM"
+			" dir=0x%01x type=0x%01x subtype=0x%01x\n",
+			wh_copy.i_addr1, wh_copy.i_addr2, wh_copy.i_addr3,
+			dir, type >> 2, subtype >> 4);
+	} else {
+		RXSTAT(qw, rx_beacon);
+	}
+
+	if (IFF_DUMPPKTS_RECV(&wh_copy, DBG_LOG_FUNC)) {
+		ieee80211_dump_pkt(ic, skb->data,
+			(skb->len > g_dbg_dump_pkt_len) ? g_dbg_dump_pkt_len : skb->len,
+			-1, pseudo_rssi);
+	}
+
+	if (skb->src_port > 0) {
+		ni = ieee80211_find_node_by_node_idx(vap, skb->src_port);
+		if (ni) {
+			node_reference_held = 1;
+			/*
+			 * FIXME
+			 * skb->src_port may be wrong.
+			 * For WLAN_OVER_WDS_P2P test, AP may think that Beacon frames sent by its
+			 * WDS peers to be from associated STAs by mistake.
+			 * Dig into this issue deeply later.
+			 */
+			if (!IEEE80211_ADDR_EQ(ni->ni_macaddr, wh_copy.i_addr2) &&
+					(type != IEEE80211_FC0_TYPE_CTL)) {
+				ieee80211_check_free_node(node_reference_held, ni);
+				node_reference_held = 0;
+				ni = NULL;
+			}
+		}
+	}
+
+	if (unlikely(ni && (type == IEEE80211_FC0_TYPE_DATA) &&
+			!qdrv_rx_data_frm_check(vap, dir, &wh_copy))) {
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_PKT_RX, "ignoring a1=%pM "
+			"a2=%pM a3=%pM opmode=0x%01x dir=0x%01x type=0x%01x"
+			"subtype=0x%01x\n", wh_copy.i_addr1, wh_copy.i_addr2,
+			wh_copy.i_addr3,vap->iv_opmode, dir, type >> 2, subtype >> 4);
+		dev_kfree_skb(skb);
+		ieee80211_check_free_node(node_reference_held, ni);
+		return 0;
+	}
+
+	if (!ni) {
+		ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *) &wh_copy);
+		if (ni) {
+			node_reference_held = 1;
+		} else if (vap->iv_opmode == IEEE80211_M_STA) {
+			if (type == IEEE80211_FC0_TYPE_DATA) {
+				qdrv_rx_reject(qw, vap, &wh_copy);
+				dev_kfree_skb(skb);
+				return 0;
+			} else if (type == IEEE80211_FC0_TYPE_MGT) {
+				if (ic->ic_flags_qtn & IEEE80211_QTN_MONITOR)
+					ni = vap->iv_bss;
+			}
+		}
+	}
+
+	/* Pass up some data packets from unknown stations, so a deauth can be sent */
+	if (!ni && (type == IEEE80211_FC0_TYPE_DATA) &&
+	    (qv->iv.iv_opmode == IEEE80211_M_HOSTAP)) {
+		RXSTAT(qw, rx_data_no_node);
+
+		if (qdrv_rx_decap_data_ratelimit(qw)) {
+			RXSTAT(qw, rx_input_all);
+			type = ieee80211_input_all(ic, skb, 0, 0);
+			DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+			return 0;
+		}
+		dev_kfree_skb(skb);
+		return 0;
+	}
+
+	if (ni) {
+		ni->ni_last_rx = jiffies;
+
+		/* Silently drop frames from blacklisted stations */
+		if (ni->ni_blacklist_timeout > 0) {
+			vap->iv_devstats.rx_dropped++;
+			IEEE80211_NODE_STAT(ni, rx_dropped);
+			dev_kfree_skb(skb);
+			RXSTAT(qw, rx_blacklist);
+			ieee80211_check_free_node(node_reference_held, ni);
+			DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+			return 0;
+		}
+
+		/*
+		 * Store the RSSI value previously calculated.
+		 */
+		if (!((vap->iv_opmode == IEEE80211_M_HOSTAP)
+			&& (type == IEEE80211_FC0_TYPE_MGT)
+			&& ((subtype == IEEE80211_FC0_SUBTYPE_BEACON) || (subtype == IEEE80211_FC0_SUBTYPE_PROBE_REQ)))) {
+			ni->ni_rssi = (u_int8_t)pseudo_rssi;
+		}
+
+		if (type == IEEE80211_FC0_TYPE_DATA) {
+			/* if the node is unauthorized, a deauth should be sent by the sta*/
+			if (unlikely(vap->iv_state < IEEE80211_S_RUN) &&
+			    vap->iv_opmode == IEEE80211_M_STA) {
+
+				if (qdrv_rx_decap_data_ratelimit(qw)) {
+					qdrv_rx_data_unauthed(vap, ni, wh_copy.i_addr2);
+				}
+				dev_kfree_skb(skb);
+				RXSTAT(qw, rx_poll_vap_err);
+				IEEE80211_NODE_STAT(ni, rx_unauth);
+				IEEE80211_NODE_STAT(ni, rx_errors);
+				vap->iv_stats.is_rx_unauth++;
+				vap->iv_devstats.rx_errors++;
+				ieee80211_check_free_node(node_reference_held, ni);
+				return 0;
+			}
+
+			ni->ni_stats.ns_rx_data++;
+			ni->ni_stats.ns_rx_bytes += skb->len;
+		}
+	}
+
+	frag = ((type != IEEE80211_FC0_TYPE_CTL) &&
+		((wh_copy.i_fc[1] & IEEE80211_FC1_MORE_FRAG) ||
+		(le16_to_cpu(*(__le16 *) wh_copy.i_seq) & IEEE80211_SEQ_FRAG_MASK)));
+
+	/* Pass up non-data, fragmented data for reassembly, or data from unknown nodes */
+	if ((type != IEEE80211_FC0_TYPE_DATA) || frag ||
+			((type == IEEE80211_FC0_TYPE_DATA) && !ni)) {
+		if (frag) {
+			RXSTAT(qw, rx_frag);
+		}
+		if ((type == IEEE80211_FC0_TYPE_DATA) && !ni) {
+			/* Rate limit these */
+			if (!qdrv_rx_decap_data_ratelimit(qw)) {
+				ieee80211_check_free_node(node_reference_held, ni);
+				dev_kfree_skb(skb);
+				return 0;
+			}
+		}
+		if ((ni == NULL) ||
+				((vap->iv_opmode == IEEE80211_M_HOSTAP) &&
+				(type == IEEE80211_FC0_TYPE_MGT) &&
+				(subtype == IEEE80211_FC0_SUBTYPE_PROBE_REQ))) {
+			RXSTAT(qw, rx_input_all);
+			type = ieee80211_input_all(ic, skb, pseudo_rssi, 0);
+			if (ni)
+				ieee80211_check_free_node(node_reference_held, ni);
+		} else {
+			RXSTAT(qw, rx_input_node);
+			type = ieee80211_input(ni, skb, pseudo_rssi, 0);
+			ieee80211_check_free_node(node_reference_held, ni);
+		}
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return 0;
+	}
+
+	/* check tdls */
+	if ((vap->iv_opmode == IEEE80211_M_STA) &&
+			qdrv_rx_is_tdls_action_frame(skb, ieee80211_hdrspace(ic, &wh_copy))) {
+		RXSTAT(qw, rx_input_node);
+		ieee80211_input(ni, skb, pseudo_rssi, 0);
+		ieee80211_check_free_node(node_reference_held, ni);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return 0;
+	}
+
+	KASSERT(ni != NULL, ("Node must exist here"));
+
+	trace_ippkt_check(skb->data, skb->len, TRACE_IPPKT_LOC_WLAN_RX);
+	RXSTAT(qw, rx_packets);
+
+	if ((vap->iv_opmode == IEEE80211_M_STA) &&
+			(vap->iv_state == IEEE80211_S_RUN) && (ni == vap->iv_bss)) {
+		more_data = wh_copy.i_fc[1] & IEEE80211_FC1_MORE_DATA;
+		if (unlikely((ni->ni_flags & IEEE80211_NODE_PWR_MGT) &&
+				vap->iv_ap_buffered && more_data)) {
+			ni->ni_flags |= IEEE80211_NODE_PS_DELIVERING;
+			ieee80211_send_pspoll(ni);
+		} else {
+			ni->ni_flags &= ~IEEE80211_NODE_PS_DELIVERING;
+		}
+	}
+
+	vdev = vdev_tbl_lhost[QDRV_WLANID_FROM_DEVID(qv->devid)];
+	if (QVLAN_IS_DYNAMIC(vdev)) {
+		def_vlan_tci = vdev->u.node_vlan[IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx)];
+	} else {
+		def_vlan_tci = vdev->pvid;
+	}
+	def_vlan_tci |= (vdev->priority << QVLAN_PKT_PRIORITY_SHIFT);
+
+	ctx.qv = qv;
+	ctx.ni = ni;
+	ctx.skb = skb;
+	ctx.pseudo_rssi = pseudo_rssi;
+	ctx.skb_rcv_list = skb_rcv_list;
+	ctx.wh_copy = &wh_copy;
+	ctx.skb_done = 0;
+
+	if (qtn_rx_decap(&wh_copy, skb->data, skb->len, def_vlan_tci, &qtn_vlan_info, vlan_enabled,
+				&qdrv_rx_decap_callback, &ctx, NULL) == QTN_RX_DECAP_TRAINING) {
+		RXSTAT(qw, rx_rate_train_invalid);
+	}
+	if (ctx.skb_done == 0) {
+		dev_kfree_skb(skb);
+	}
+
+	ieee80211_check_free_node(node_reference_held, ni);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 1;
+}
+
+static int rx_fifo_init(struct qdrv_wlan *qw, struct host_fifo_if *hfif)
+{
+	struct host_descfifo *fifo_desc;
+	int i;
+
+	if (hfif == NULL) {
+		DBGPRINTF_E("FIFO input is empty\n");
+		return -ENOMEM;
+	}
+
+	if (hfif->fifo == NULL) {
+		DBGPRINTF_E("FIFO information is empty\n");
+		return -ENOMEM;
+	}
+
+	fifo_desc = hfif->fifo;
+	fifo_desc->df_numelems = fifo_desc->df_size;
+
+	hfif->ring_size = fifo_desc->df_size;
+	hfif->descp = kzalloc(sizeof(hfif->descp[0]) * hfif->ring_size, GFP_KERNEL);
+	if (hfif->descp == NULL) {
+		return -ENOMEM;
+	}
+	hfif->pending = NULL;
+	hfif->df_rxdesc_cache = dma_pool_create("rxdesc", NULL,
+					sizeof(struct host_rxdesc), 8, 0);
+	if (hfif->df_rxdesc_cache == NULL) {
+		kfree(hfif->descp);
+		printk("create rxdesc pool error!\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Set pointers in pointer array to the descriptors in the
+	 * descriptor array
+	 */
+	for (i = 0; i < hfif->ring_size; i++ ) {
+		struct host_rxdesc *rxdesc, *rd_dma;
+
+		rxdesc = rx_alloc_desc(qw, hfif->df_rxdesc_cache, &rd_dma);
+		if (rxdesc == NULL) {
+			DBGPRINTF_E("Unable to allocate descriptor\n");
+			return -ENOMEM;
+		}
+
+		if (prev_rxdesc) {
+			prev_rxdesc->rd_next = rd_dma;
+		} else {
+			fifo_desc->df_fifo = rd_dma;
+		}
+		prev_rxdesc = rxdesc;
+
+		hfif->descp[i] = rxdesc;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_PKT_RX,
+		"unit %d hfif %p ringsize %d\n",
+		qw->unit, hfif, hfif->ring_size);
+
+	return 0;
+}
+
+static inline int qdrv_rx_check_bridge(struct net_device *dev, struct sk_buff *skb,
+					uint8_t *addr, struct iphdr *p_iphdr)
+{
+	unsigned char igmp_snoop;
+	struct net_bridge_port *br_port = get_br_port(dev);
+
+	if (IEEE80211_ADDR_BCAST(addr))
+		return 1;
+
+	if (likely(br_port && br_port->br))
+		igmp_snoop = br_port->br->igmp_snoop_enabled;
+	else
+		igmp_snoop = BR_IGMP_SNOOP_DISABLED;
+
+	if (igmp_snoop == BR_IGMP_SNOOP_DISABLED && IEEE80211_IS_MULTICAST(addr))
+		return 1;
+
+	return 0;
+}
+
+static inline int qdrv_rx_should_send_to_bss(struct net_device *dev, struct ieee80211vap *vap,
+		struct sk_buff *skb)
+{
+#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE))
+	u16 ether_type;
+	struct ether_header *eh = (struct ether_header *) skb->data;
+	u8 *data_start = qdrv_sch_find_data_start(skb, eh, &ether_type);
+	struct iphdr *p_iphdr = (struct iphdr *)data_start;
+	struct igmphdr *igmp_p;
+	struct qdrv_vap *qv = netdev_priv(dev);
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)qv->parent;
+	uint8_t *addr = eh->ether_dhost;
+#ifdef CONFIG_IPV6
+	uint32_t data_len;
+	struct ipv6hdr *ip6hdr_p;
+	uint8_t nexthdr;
+	struct icmp6hdr *icmp6hdr;
+	int nhdr_off;
+#endif
+
+	/* drop if Intra BSS isolation enabled */
+	if (br_get_ap_isolate() || QTN_FLAG_IS_INTRA_BSS(skb->dev->qtn_flags))
+		return 0;
+
+	/* If enabled, always send back LNCB and SSDP packets to the BSS */
+	if (vap->iv_ap_fwd_lncb) {
+		if (iputil_is_lncb((uint8_t *)eh, p_iphdr) ||
+				iputil_is_ssdp(eh->ether_dhost, p_iphdr)) {
+			return 1;
+		}
+	}
+
+	/*
+	 * Deliver the IGMP frames to linux bridge module
+	 * Send non-snooped multicast back to the BSS
+	 */
+	if (iputil_eth_is_v4_multicast(eh)) {
+		if (p_iphdr->protocol == IPPROTO_IGMP) {
+			RXSTAT(qw, rx_igmp);
+			igmp_p = iputil_igmp_hdr(p_iphdr);
+
+			DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_PKT_RX,
+					"RX IGMP: type 0x%x src=%pM dst=%pM\n",
+					igmp_p->type, eh->ether_shost, eh->ether_dhost);
+			if (igmp_p->type == IGMP_HOST_MEMBERSHIP_REPORT ||
+					igmp_p->type == IGMPV2_HOST_MEMBERSHIP_REPORT) {
+				return 0;
+			}
+
+			return 1;
+		}
+
+		return qdrv_rx_check_bridge(dev, skb, addr, p_iphdr);
+#ifdef CONFIG_IPV6
+	} else if (iputil_eth_is_v6_multicast(eh)) {
+		data_len = skb->len - (data_start - skb->data);
+		ip6hdr_p = (struct ipv6hdr *)data_start;
+		nhdr_off = iputil_v6_skip_exthdr(ip6hdr_p, sizeof(struct ipv6hdr),
+			&nexthdr, (skb->len - ((uint8_t *)ip6hdr_p - skb->data)), NULL, NULL);
+		 if (nexthdr == IPPROTO_ICMPV6) {
+			 icmp6hdr = (struct icmp6hdr*)(data_start + nhdr_off);
+			 if (icmp6hdr->icmp6_type == ICMPV6_MGM_REPORT) {
+				 RXSTAT(qw, rx_igmp);
+				 DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_PKT_RX,
+						 "RX MLD: type 0x%x src=%pM dst=%pM\n",
+						 icmp6hdr->icmp6_type, eh->ether_shost, eh->ether_dhost);
+				 return 0;
+			 } else if (icmp6hdr->icmp6_type == ICMPV6_MGM_QUERY ||
+					 icmp6hdr->icmp6_type == ICMPV6_MLD2_REPORT ||
+					 icmp6hdr->icmp6_type == ICMPV6_MGM_REDUCTION) {
+				 DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_PKT_RX,
+						 "RX MLD: type 0x%x src=%pM dst=%pM\n",
+						 icmp6hdr->icmp6_type, eh->ether_shost, eh->ether_dhost);
+				 RXSTAT(qw, rx_igmp);
+			 }
+
+			 return 1;
+		}
+
+		return qdrv_rx_check_bridge(dev, skb, addr, p_iphdr);
+#endif
+	}
+
+#endif
+	return 1;
+}
+
+static inline int __sram_text qdrv_rx_mcast_should_drop(struct net_device *dev,
+		struct qdrv_vap *qv, struct sk_buff *skb, struct ieee80211_node *ni)
+{
+#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE))
+	struct net_bridge_fdb_entry *f = NULL;
+	struct ether_header *eh = (struct ether_header *) skb->data;
+	struct net_bridge_port *br_port = get_br_port(dev);
+#endif
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if ((vap->iv_opmode == IEEE80211_M_STA) && !IEEE80211_NODE_IS_NONE_TDLS(ni)) {
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_PKT_RX,
+			"drop multicast frame from tdls peer [%pM]\n", ni->ni_macaddr);
+		return 1;
+	}
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+	if (br_port) {
+		f = br_fdb_get_hook(br_port->br, skb, eh->ether_shost);
+	}
+
+	if ((f != NULL) && (f->dst->dev != dev) &&
+			(f->dst->state == BR_STATE_FORWARDING) &&
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,24)
+			time_after(f->updated, jiffies - (5 * HZ))) {
+#else
+			time_after(f->ageing_timer, jiffies - (5 * HZ))) {
+#endif
+		/* hit from bridge table, drop it since it is probably a multicast echo */
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_BRIDGE,
+			"src=%pM dst=%pM multicast echo\n",
+			eh->ether_shost, eh->ether_dhost);
+		br_fdb_put_hook(f);
+		return 1;
+	}
+	if ((f != NULL) && (f->is_local)) {
+		/* hit from bridge table, drop it since it is probably a multicast echo */
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_BRIDGE,
+			"src=%pM dst=%pM multicast echo\n",
+			eh->ether_shost, eh->ether_dhost);
+		br_fdb_put_hook(f);
+		return 1;
+	}
+	if (f) {
+		br_fdb_put_hook(f);
+	}
+#endif //CONFIG_BRIDGE...
+	return 0;
+}
+
+/*
+ * 1. Save IP corresponding MAC Address to database
+ * 2. Replace MAC for corresponding IP in packets
+ * 3. Replace MAC for existing IP in database
+ *
+ * Fixme: when should we remove these nodes?
+ */
+
+void qdrv_dump_replace_db(struct qdrv_wlan *qw)
+{
+	struct ieee80211com *ic = &qw->ic;
+	struct ip_mac_mapping *q = ic->ic_ip_mac_mapping;
+
+	printk("Current replace db:\n");
+	while (q) {
+		printk("entry: IP " NIPQUAD_FMT ": %pM\n",
+			NIPQUAD(q->ip_addr), q->mac);
+		q = q->next;
+	}
+	printk("End of replace db\n");
+}
+
+static void
+qdrv_replace_handle_ip(struct qdrv_wlan *qw, struct sk_buff *skb, u_int8_t *client_mac,
+		u_int32_t client_ip, ip_mac_flag save_replace_flag)
+{
+	struct ether_header *eh = (struct ether_header *) skb->data;
+	struct ieee80211com *ic = &qw->ic;
+	struct ip_mac_mapping *p;
+	struct ip_mac_mapping *q = ic->ic_ip_mac_mapping;
+
+	if (client_ip == 0)
+		return;
+
+	/* First IP to store in database */
+	if (save_replace_flag == SAVE_IP_MAC && !ic->ic_ip_mac_mapping) {
+		ic->ic_ip_mac_mapping = (struct ip_mac_mapping *)kmalloc(sizeof(struct ip_mac_mapping), GFP_KERNEL);
+		if (!ic->ic_ip_mac_mapping) {
+			printk("***CRITICAL*** Cannot allocate memory for head in %s\n",
+				__FUNCTION__);
+			return;
+		}
+		memset(ic->ic_ip_mac_mapping, 0, sizeof(struct ip_mac_mapping));
+		ic->ic_ip_mac_mapping->ip_addr = client_ip;
+		memcpy(ic->ic_ip_mac_mapping->mac, client_mac, ETHER_ADDR_LEN);
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_RX,
+			"(head) insert to database for IP " NIPQUAD_FMT ": %pM\n",
+			NIPQUAD(ic->ic_ip_mac_mapping->ip_addr),
+			ic->ic_ip_mac_mapping->mac);
+		return;
+	}
+
+	/* Replace current packets */
+	if (save_replace_flag == REPLACE_IP_MAC) {
+		while (q) {
+			if (q->ip_addr == client_ip) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+				if (!ether_addr_equal(eh->ether_shost, q->mac)) {
+#else
+				if (compare_ether_addr(eh->ether_shost, q->mac)) {
+#endif
+					DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_RX,
+						"replacing packets for IP " NIPQUAD_FMT ": %pM->%pM\n",
+						NIPQUAD(client_ip), eh->ether_shost, q->mac);
+					memcpy(eh->ether_shost, q->mac, ETHER_ADDR_LEN);
+				}
+				return;
+			}
+			q = q->next;
+		}
+
+		return;
+	}
+
+	while (q) {
+		/* IP Already here but MAC may be changed */
+		if (q->ip_addr == client_ip) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+			if (!ether_addr_equal(q->mac, client_mac)) {
+#else
+			if (compare_ether_addr(q->mac, client_mac)) {
+#endif
+				DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_RX,
+					"database change for IP " NIPQUAD_FMT ": %pM->%pM\n",
+					NIPQUAD(client_ip), q->mac, client_mac);
+				memcpy(q->mac, client_mac, ETHER_ADDR_LEN);
+			}
+			return;
+		}
+		if (q->next == NULL)
+			break;
+		q = q->next;
+	}
+
+	/* Save corresponding IP and MAC to database as a new entry */
+	p = (struct ip_mac_mapping *)kmalloc(sizeof(struct ip_mac_mapping), GFP_KERNEL);
+	if (p == NULL) {
+		printk("****CRITICAL**** memory allocation failed in %s\n", __FUNCTION__);
+		return;
+	}
+
+	memset(p, 0, sizeof(struct ip_mac_mapping));
+	p->ip_addr = client_ip;
+	memcpy(p->mac, client_mac, ETHER_ADDR_LEN);
+
+	q->next = p;
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_RX,
+			"insert to database for IP " NIPQUAD_FMT ": %pM\n",
+			NIPQUAD(p->ip_addr), p->mac);
+
+	return;
+}
+
+static const uint16_t *
+get_ether_type_skip_vlan(const struct ether_header *eh, uint32_t len)
+{
+	const uint16_t *ether_type_p = &eh->ether_type;
+
+	if (len < sizeof(struct ether_header))
+		return NULL;
+
+	while(qtn_ether_type_is_vlan(*ether_type_p)) {
+		if (len < sizeof(struct ether_header) + VLAN_HLEN)
+			return NULL;
+		ether_type_p += VLAN_HLEN / sizeof(*ether_type_p);
+		len -= VLAN_HLEN;
+	}
+
+	return ether_type_p;
+}
+
+static void qdrv_replace_handle_arp(struct qdrv_wlan *qw, struct sk_buff *skb)
+{
+	struct ether_header *eh = (struct ether_header *) skb->data;
+	struct arp_message *arp_p = NULL;
+	const uint16_t *ether_type_p = NULL;
+
+	ether_type_p = get_ether_type_skip_vlan(eh, skb->len);
+	if (unlikely(!ether_type_p))
+		return;
+	arp_p = (void *)(ether_type_p + 1);
+
+	qdrv_replace_handle_ip(qw, skb, arp_p->shost, arp_p->sipaddr, SAVE_IP_MAC);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if (unlikely(!ether_addr_equal(eh->ether_shost, arp_p->shost))) {
+#else
+	if (unlikely(compare_ether_addr(eh->ether_shost, arp_p->shost))) {
+#endif
+		memcpy(eh->ether_shost, arp_p->shost, ETHER_ADDR_LEN);
+	}
+
+	return;
+}
+
+static uint32_t qdrv_find_src_vendor(struct qdrv_vap *qv, struct sk_buff *skb)
+{
+	struct ieee80211vap *vap = &qv->iv;
+	struct ieee80211_node *ni = NULL;
+	uint32_t vendor = 0;
+
+	ni = ieee80211_find_node_by_node_idx(vap, skb->src_port);
+	if (ni) {
+		if (ni->ni_brcm_flags) {
+			vendor |= PEER_VENDOR_BRCM;
+		}
+		ieee80211_free_node(ni);
+	}
+
+	return vendor;
+}
+
+/*
+ * Replace Source MAC Address and BOOTP Client Address with Client Identifier
+ */
+static inline void __sram_text qdrv_replace_dhcp_packets_header(struct sk_buff *skb, struct iphdr *iphdr_p)
+{
+	struct ether_header *eh = (struct ether_header *) skb->data;
+	struct udphdr *uh = (struct udphdr*)(iphdr_p + 1);
+	struct dhcp_message *dhcp_msg = (struct dhcp_message *)((u8 *)uh + sizeof(struct udphdr));
+
+	u8 *frm;
+	u8 *efrm;
+	int chsum = 0;
+	__wsum csum;
+
+
+	if (dhcp_msg->op != BOOTREQUEST || dhcp_msg->htype != ARPHRD_ETHER)
+		return;
+
+	frm = (u8 *)(dhcp_msg->options);
+	efrm = skb->data + skb->len;
+
+	while (frm < efrm) {
+		if (*frm == 0x3d && *(frm + 1) == 0x07 && *(frm + 2) == 0x01) {
+			if (memcmp(dhcp_msg->chaddr, frm + 3, ETHER_ADDR_LEN)) {
+				memcpy(dhcp_msg->chaddr, frm + 3, ETHER_ADDR_LEN);
+			}
+			if (memcmp(eh->ether_shost, frm + 3, ETHER_ADDR_LEN)) {
+				memcpy(eh->ether_shost, frm + 3, ETHER_ADDR_LEN);
+			}
+			chsum = 1;
+			break;
+		}
+		frm += *(frm+1) + 2;
+	}
+
+	/* Recalculate the UDP checksum */
+	if (chsum && uh->check != 0) {
+		uh->check = 0;
+		csum = csum_partial(uh, ntohs(uh->len), 0);
+
+		/* Add psuedo IP header checksum */
+		uh->check = csum_tcpudp_magic(iphdr_p->saddr, iphdr_p->daddr,
+					      ntohs(uh->len), iphdr_p->protocol, csum);
+
+		/* 0 is converted to -1 */
+		if (uh->check == 0) {
+			uh->check = CSUM_MANGLED_0;
+		}
+	}
+
+	return;
+}
+
+void qdrv_tqe_send_l2_ext_filter(struct qdrv_wlan *qw, struct sk_buff *skb)
+{
+	union topaz_tqe_cpuif_ppctl ppctl;
+	uint8_t port = g_l2_ext_filter_port;
+	uint8_t node = 0;
+
+	topaz_tqe_cpuif_ppctl_init(&ppctl, port, &node, 1,
+			0, 1, 1, 0, 1, 0);
+
+	if (unlikely(tqe_tx(&ppctl, skb) == NETDEV_TX_BUSY)) {
+		dev_kfree_skb(skb);
+		TXSTAT(qw, tx_drop_l2_ext_filter);
+	} else {
+		TXSTAT(qw, tx_l2_ext_filter);
+	}
+}
+
+static inline int qdrv_restrict_wlan_ip(struct qdrv_wlan *qw, struct iphdr *iphdr_p)
+{
+	if (qw->restrict_wlan_ip && qdrv_is_bridge_ipaddr(qw, iphdr_p->daddr))
+		return 1;
+	return 0;
+}
+
+static int __sram_text handle_rx_msdu(struct qdrv_wlan *qw,
+		struct qdrv_vap *qv,
+		struct ieee80211_node *ni,
+		const struct ieee80211_qosframe_addr4 *wh,
+		struct sk_buff *skb,
+		bool check_3addr_br)
+{
+#define CHECK_VENDOR	do {						\
+		if (!vendor_checked) {					\
+			vendor = qdrv_find_src_vendor(qv, skb);		\
+			vendor_checked = 1;				\
+		}							\
+	} while (0)
+
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211vap *vap = &qv->iv;
+	struct net_device *dev = vap->iv_dev;
+	struct ether_header *eh = (struct ether_header *) skb->data;
+	void *l3hdr = NULL;
+	struct iphdr *iphdr_p = NULL;
+	const uint16_t *ether_type_p = NULL;
+	uint8_t ip_proto = 0;
+	uint32_t vendor = 0;
+	int vendor_checked = 0;
+	int rc;
+	uint16_t ether_type = QTN_SKB_CB_ETHERTYPE(skb);
+	int mu = STATS_SU;
+	void *proto_data = NULL;
+	struct net_bridge_port *br_port = get_br_port(dev);
+
+	if (unlikely(ic->ic_wowlan.host_state)) {
+		if (wowlan_magic_process(skb, vap)) {
+			dev_kfree_skb(skb);
+			return 1;
+		}
+	}
+
+	ether_type_p = get_ether_type_skip_vlan(eh, skb->len);
+	if (unlikely(!ether_type_p)) {
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX,
+			"Fail to get Ether type %pM\n", eh->ether_shost);
+		dev_kfree_skb(skb);
+		return 1;
+	}
+
+	l3hdr = (void *)(ether_type_p + 1);
+
+	if (check_3addr_br) {
+		/*
+		 * Set the destination MAC address
+		 * - if in 4-address mode or the frame is multicast, use the supplied DA
+		 * - if 3-address bridge mode is enabled, get the DA from the qdrv bridge
+		 * - in standard 3-address mode, use the bridge device's MAC
+		 *   address as the destination (if found), otherwise use the supplied DA
+		 */
+		qdrv_rx_set_dest_mac(qw, qv, eh, skb);
+		if (qdrv_rx_lncb_should_drop(dev, vap, eh, l3hdr)) {
+			int igmp_type = qdrv_igmp_type(l3hdr, skb->len - sizeof(*eh));
+			if (igmp_type != 0) {
+				RXSTAT(qw, rx_igmp_3_drop);
+			}
+			dev_kfree_skb(skb);
+			RXSTAT(qw, rx_mc_3_drop);
+			return 1;
+		}
+	}
+
+	/* Check if node is authorized to receive */
+	if (!qdrv_accept_data_frame(qw, vap, ni, wh, skb, ether_type)) {
+		dev_kfree_skb(skb);
+		RXSTAT(qw, rx_poll_vap_err);
+		return 1;
+	}
+
+	if ((ether_type == __constant_htons(ETH_P_IP)) ||
+			(ether_type == __constant_htons(ETH_P_IPV6))) {
+		iphdr_p = l3hdr;
+		ip_proto = iputil_proto_info(iphdr_p, skb, &proto_data, NULL, NULL);
+	}
+
+	if (bcast_pps_should_drop(eh->ether_dhost, &vap->bcast_pps, *ether_type_p,
+				ip_proto, proto_data, 1)) {
+		dev_kfree_skb(skb);
+		return 1;
+	}
+
+#if TOPAZ_RX_ACCELERATE
+	if (ieee80211_tdls_tqe_path_check(ni, skb,
+				ni->ni_shared_stats->tx[mu].avg_rssi_dbm,
+				ether_type)) {
+		dev_kfree_skb(skb);
+		return 1;
+	}
+#endif
+
+	/* FIXME If EAPOL need passing externally, this needs fixing up */
+	if (unlikely(g_l2_ext_filter)) {
+		if (!skb->ext_l2_filter &&
+				vap->iv_opmode == IEEE80211_M_HOSTAP &&
+				!(ether_type == __constant_htons(ETH_P_PAE) &&
+				IEEE80211_ADDR_EQ(eh->ether_dhost, vap->iv_myaddr))) {
+			qdrv_tqe_send_l2_ext_filter(qw, skb);
+			return 1;
+		}
+	}
+
+	qdrv_wlan_stats_prot(qw, 0, ether_type, ip_proto);
+
+	if (vap->proxy_arp) {
+		if (ether_type == __constant_htons(ETH_P_ARP)) {
+			if (qdrv_proxy_arp(vap, qw, ni, (uint8_t*)l3hdr)) {
+				dev_kfree_skb(skb);
+				return 1;
+			}
+#ifdef CONFIG_IPV6
+		} else if (ether_type == __constant_htons(ETH_P_IPV6)) {
+			if (qdrv_wlan_handle_neigh_msg(vap, qw, (uint8_t*)l3hdr, 0,
+					skb, ip_proto, proto_data)) {
+				dev_kfree_skb(skb);
+				return 1;
+			}
+#endif
+		}
+	}
+
+	if (unlikely((ic->ic_vendor_fix & (VENDOR_FIX_BRCM_DHCP |
+						VENDOR_FIX_BRCM_DROP_STA_IGMPQUERY |
+						VENDOR_FIX_BRCM_REPLACE_IGMP_SRCMAC)) &&
+			(qv->iv.iv_opmode == IEEE80211_M_HOSTAP))) {
+
+		if (unlikely(ether_type == __constant_htons(ETH_P_ARP))) {
+			CHECK_VENDOR;
+			if (vendor & PEER_VENDOR_BRCM) {
+				qdrv_replace_handle_arp(qw, skb);
+			}
+		}
+
+		if (unlikely(iphdr_p)) {
+			struct igmphdr *igmp_p = (struct igmphdr *)((unsigned int*)iphdr_p + iphdr_p->ihl);
+
+			if ((ic->ic_vendor_fix & VENDOR_FIX_BRCM_DROP_STA_IGMPQUERY) &&
+					(ip_proto == IPPROTO_IGMP) &&
+					(igmp_p->type == IGMP_HOST_MEMBERSHIP_QUERY)) {
+				DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_RX,
+					"suspicious IGMP query received from %pM " NIPQUAD_FMT "\n",
+					eh->ether_shost, NIPQUAD(iphdr_p->saddr));
+				CHECK_VENDOR;
+				if (vendor & PEER_VENDOR_BRCM) {
+					DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_RX,
+						"drop IGMP query from Broadcom STA\n");
+					dev_kfree_skb(skb);
+					return 1;
+				}
+			}
+
+			if ((ic->ic_vendor_fix & VENDOR_FIX_BRCM_REPLACE_IGMP_SRCMAC) &&
+					(ip_proto == IPPROTO_IGMP)) {
+				DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_RX,
+					"IGMP msg received from %pM " NIPQUAD_FMT ", type=0x%x\n",
+					eh->ether_shost, NIPQUAD(iphdr_p->saddr), igmp_p->type);
+				switch (igmp_p->type) {
+				case IGMP_HOST_MEMBERSHIP_REPORT:
+				case IGMPV2_HOST_MEMBERSHIP_REPORT:
+				case IGMPV3_HOST_MEMBERSHIP_REPORT:
+				case IGMP_HOST_LEAVE_MESSAGE:
+					break;
+				default:
+					DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_RX,
+						"suspicious IGMP type 0x%x for group "NIPQUAD_FMT"\n",
+						igmp_p->type, NIPQUAD(igmp_p->group));
+					break;
+				}
+				CHECK_VENDOR;
+				if (vendor & PEER_VENDOR_BRCM) {
+					qdrv_replace_handle_ip(qw, skb, NULL, iphdr_p->saddr, REPLACE_IP_MAC);
+				}
+			}
+
+			if ((ic->ic_vendor_fix & VENDOR_FIX_BRCM_DHCP) &&
+					IEEE80211_IS_MULTICAST(eh->ether_dhost) &&
+					(ip_proto == IPPROTO_UDP)) {
+				CHECK_VENDOR;
+				if (vendor & PEER_VENDOR_BRCM) {
+					qdrv_replace_dhcp_packets_header(skb, iphdr_p);
+				}
+			}
+
+			if ((ic->ic_vendor_fix & VENDOR_FIX_BRCM_REPLACE_IP_SRCMAC) &&
+					((ip_proto != IPPROTO_IGMP))) {
+				DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_RX,
+					"pkt received from %pM " NIPQUAD_FMT ", ip_protocol=0x%x\n",
+					eh->ether_shost, NIPQUAD(iphdr_p->saddr), ip_proto);
+				CHECK_VENDOR;
+				if (vendor & PEER_VENDOR_BRCM) {
+					qdrv_replace_handle_ip(qw, skb, NULL, iphdr_p->saddr, REPLACE_IP_MAC);
+				}
+			}
+		}
+	}
+
+	/*
+	 * If the destination is multicast, LNCB, or another node on the BSS, send the packet back
+	 * to the BSS.
+	 */
+	if ((qv->iv.iv_opmode == IEEE80211_M_HOSTAP) &&
+			IEEE80211_IS_MULTICAST(eh->ether_dhost) &&
+			qdrv_rx_should_send_to_bss(dev, vap, skb)) {
+		struct sk_buff *skb2;
+		skb->is_recyclable = 0;
+		skb2 = skb_copy(skb, GFP_ATOMIC);
+		if (skb2 != NULL) {
+			DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_RX, "send Rx pkt back to BSS\n");
+			skb2->is_recyclable = 0;
+			M_FLAG_SET(skb2, M_ORIG_OUTSIDE);
+			QDRV_TX_DBG(2, NULL, "back to BSS - skb=%p\n", skb);
+			rc = dev_queue_xmit(skb2);
+			if (rc != NET_XMIT_SUCCESS) {
+				QDRV_TX_DBG(2, NULL, "back to BSS failed rc=%u\n", rc);
+			}
+		}
+	}
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+	if (br_fdb_get_hook != NULL && br_port != NULL) {
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		if (unlikely( (!ether_addr_equal(eh->ether_dhost, skb->dev->dev_addr)) ||
+				(!ether_addr_equal(eh->ether_dhost, br_port->dev->dev_addr)))) {
+#else
+		if (unlikely( (!compare_ether_addr(eh->ether_dhost, skb->dev->dev_addr)) ||
+				(!compare_ether_addr(eh->ether_dhost, dev->br_port->dev->dev_addr)))) {
+#endif
+			skb->is_recyclable = 0;
+		}
+
+		if ((qv->iv.iv_opmode != IEEE80211_M_HOSTAP) &&
+				IEEE80211_IS_MULTICAST(eh->ether_dhost) &&
+				qdrv_rx_mcast_should_drop(dev, qv, skb, ni)) {
+			dev_kfree_skb(skb);
+			return 1;
+		}
+	}
+#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
+
+	if (*ether_type_p == __constant_htons(ETH_P_IP)) {
+		if (qdrv_restrict_wlan_ip(qw, (struct iphdr *)l3hdr)) {
+			dev_kfree_skb(skb);
+			return 1;
+		}
+	}
+
+	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		if (!ether_addr_equal(eh->ether_dhost, vap->iv_dev->broadcast)) {
+#else
+		if (!compare_ether_addr(eh->ether_dhost, vap->iv_dev->broadcast)) {
+#endif
+			vap->iv_devstats.rx_broadcast_packets++;
+			ni->ni_stats.ns_rx_bcast++;
+		} else {
+			vap->iv_devstats.multicast++;
+			ni->ni_stats.ns_rx_mcast++;
+		}
+	} else {
+		vap->iv_devstats.rx_unicast_packets++;
+		ni->ni_stats.ns_rx_ucast++;
+	}
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_RX,
+		"send to stack src=%pM dst=%pM\n",
+		 eh->ether_shost, eh->ether_dhost);
+	skb->protocol = eth_type_trans(skb, skb->dev);
+	trace_skb_perf_stamp_call(skb);
+
+	return 0;
+}
+
+static struct sk_buff *qdrv_rx_rxdesc_get_skb(struct host_rxdesc *rxdesc, int pktlen)
+{
+#if TOPAZ_HBM_SKB_ALLOCATOR_DEFAULT
+	void *buf_bus = rxdesc->rd_buffer;
+	struct sk_buff *skb = NULL;
+
+	if (likely(buf_bus)) {
+#if TOPAZ_HBM_BUF_WMAC_RX_QUARANTINE
+		skb = topaz_hbm_attach_skb_quarantine(bus_to_virt((unsigned int)buf_bus),
+				TOPAZ_HBM_BUF_WMAC_RX_POOL, pktlen, NULL);
+		/* no matter if new buf is used, no need for original one */
+		topaz_hbm_release_buf_safe(buf_bus);
+#else
+		skb = topaz_hbm_attach_skb_bus(buf_bus,	TOPAZ_HBM_BUF_WMAC_RX_POOL);
+		if (unlikely(skb == NULL)) {
+			topaz_hbm_put_payload_aligned_bus(buf_bus, TOPAZ_HBM_BUF_WMAC_RX_POOL);
+		}
+#endif
+	}
+	return skb;
+#else
+	return (struct sk_buff *) rxdesc->skbuff;
+#endif
+}
+
+int __sram_text qdrv_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct qdrv_vap *qv = container_of(napi, struct qdrv_vap, napi);
+	struct qdrv_vap *qv1 = qv;
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) qv->parent;
+	struct host_rxdesc *rxdesc = NULL;
+	int processed = 0;
+	u_int32_t df_numelems;
+	struct qdrv_rx_skb_list skb_rcv_list;
+
+	DBGPRINTF_LIMIT(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	RXSTAT(qw, rx_poll);
+
+	/* Init pending list of skb to be indicated */
+	qdrv_rx_skb_list_init(&skb_rcv_list, budget);
+
+	/* Process left over from last poll */
+	if (qw->rx_if.rx.pending != NULL) {
+		rxdesc = qw->rx_if.rx.pending;
+		qw->rx_if.rx.pending = NULL;
+		RXSTAT(qw, rx_poll_pending);
+	}
+
+	DBGPRINTF(DBG_LL_TRIAL, QDRV_LF_PKT_RX,
+			"Limit %d sem @ 0x%08x bit %d\n",
+			budget, (unsigned int) qw->host_sem, qw->rx_if.rx_sem_bit);
+
+	while (processed < budget) {
+		struct sk_buff *skb;
+		struct ieee80211_node *ni;
+		int pktlen;
+		int node_idx_unmapped;
+		uint32_t pseudo_rssi;
+
+		if (rxdesc == NULL) {
+			/* No previous data. */
+			while (!sem_take(qw->host_sem, qw->scan_if.scan_sem_bit));
+			rxdesc = (struct host_rxdesc *)arc_read_uncached_32(&qw->rx_if.rx.fifo->hrdstart);
+			if (rxdesc) {
+				writel_wmb(NULL, &qw->rx_if.rx.fifo->hrdstart);
+			}
+			sem_give(qw->host_sem, qw->scan_if.scan_sem_bit);
+
+			if (rxdesc == NULL) {
+				/* Nothing more to process */
+				RXSTAT(qw, rx_poll_empty);
+				break;
+			}
+
+			DBGPRINTF(DBG_LL_TRIAL, QDRV_LF_PKT_RX, "Retrieving\n");
+			RXSTAT(qw, rx_poll_retrieving);
+		}
+
+		rxdesc = rxdesc->rd_va;
+		DBGPRINTF(DBG_LL_TRIAL, QDRV_LF_PKT_RX,
+				"rxdesc 0x%08x buf 0x%08x stat 0x%08x\n",
+				(unsigned int) rxdesc,
+				(unsigned int) rxdesc->rd_buffer,
+				rxdesc->rs_statword);
+
+		pktlen = (rxdesc->rs_statword >> 16) & 0xffff;
+		node_idx_unmapped = (rxdesc->rs_statword >> 2) & 0x3ff;
+		pseudo_rssi = 70 - (rxdesc->gain_db & 0xFF);
+
+		/* Check descriptor */
+		if (pktlen == 0xFFFF) {
+			skb = NULL; /* This skb has been given to DSP for action frame */
+			goto alloc_desc_buff;
+		} else if (!pktlen) {
+			if (likely(rxdesc->rd_buffer)) {
+				topaz_hbm_put_payload_aligned_bus((void *)rxdesc->rd_buffer,
+						TOPAZ_HBM_BUF_WMAC_RX_POOL);
+			}
+			goto alloc_desc_buff;
+		} else if (!(rxdesc->rs_statword & MUC_RXSTATUS_DONE)) {
+			DBGPRINTF_E("Done bit not set for descriptor 0x%08x\n",
+					(unsigned int) rxdesc);
+			if (likely(rxdesc->rd_buffer)) {
+				topaz_hbm_put_payload_aligned_bus((void *)rxdesc->rd_buffer,
+						TOPAZ_HBM_BUF_WMAC_RX_POOL);
+			}
+			goto alloc_desc_buff;
+		} else if ((skb = qdrv_rx_rxdesc_get_skb(rxdesc, pktlen)) == NULL) {
+			/*
+			DBGPRINTF_E("No buffer for descriptor 0x%08x\n",
+					(unsigned int) rxdesc);
+			*/
+			RXSTAT(qw, rx_poll_buffer_err);
+			goto alloc_desc_buff;
+		}
+
+		trace_skb_perf_stamp_call(skb);
+
+		ni = qw->ic.ic_node_idx_ni[node_idx_unmapped];
+		if (ni) {
+			qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+		} else {
+			qv = netdev_priv(qw->mac->vnet[0]);
+		}
+
+		DBGPRINTF(DBG_LL_TRIAL, QDRV_LF_PKT_RX,
+				"Processed %d pktlen %d node_idx 0x%x ni %p\n",
+				processed, pktlen, node_idx_unmapped, ni);
+
+		if (unlikely(!qv)) {
+			RXSTAT(qw, rx_poll_vap_err);
+			kfree_skb(skb);
+			goto alloc_desc_buff;
+		}
+
+		/* Prepare the skb */
+		skb->dev = qv->ndev;
+		skb->src_port = IEEE80211_NODE_IDX_MAP(node_idx_unmapped);
+		/*
+		 * CCMP MIC error frames may be decapsulated into garbage packets
+		 * with invalid MAC DA, messing up the forwarding table.
+		 */
+		if (unlikely(MS(rxdesc->rs_statword, MUC_RXSTATUS_MIC_ERR))) {
+			M_FLAG_SET(skb, M_NO_L2_LRN);
+		}
+		skb_put(skb, pktlen);
+		topaz_hbm_debug_stamp(skb->head, TOPAZ_HBM_OWNER_LH_RX_MBOX, pktlen);
+		/* Decap the 802.11 frame into one or more (A-MSDU) Ethernet frames */
+		processed += qdrv_rx_decap(qv, skb, &skb_rcv_list, pseudo_rssi);
+
+alloc_desc_buff:
+		if (rxdesc_alloc_buffer(qw, rxdesc)) {
+			/*
+			 * Let's break loop, current descriptor will be added to pending.
+			 * Next time we will try to allocate again.
+			 */
+			RXSTAT(qw, rx_poll_skballoc_err);
+			DBGPRINTF_E("Failed to allocate skb for descriptor 0x%08x\n",
+				(unsigned int) rxdesc);
+			break;
+		}
+		/*
+		 * Get the physical ring pointer we are going to process now
+		 * and pass the previous pointer to the MuC. There is 1 descriptor
+		 * as buffer zone to prevent overwrites.
+		 */
+		if (prev_rxdesc) {
+			writel_wmb(rxdesc->rd_pa, &prev_rxdesc->rd_next);
+		}
+		prev_rxdesc = rxdesc;
+		prev_rxdesc->rs_statword = (0xABBAABBA & ~MUC_RXSTATUS_DONE);
+
+		/*
+		 * Get next descriptor.
+		 */
+		rxdesc = rxdesc->rd_next;
+		if (rxdesc) {
+			rxdesc = rxdesc->rd_va;
+			prev_rxdesc->rd_next = NULL;
+			RXSTAT(qw, rx_poll_next);
+		}
+
+		/* Tell MuC that descriptor is returned */
+		df_numelems = qw->rx_if.rx.fifo->df_numelems + 1;
+		writel_wmb(df_numelems, &qw->rx_if.rx.fifo->df_numelems);
+		RXSTAT_SET(qw, rx_df_numelems, df_numelems);
+	}
+
+	qw->rx_if.rx.pending = rxdesc;
+
+	qdrv_rx_skb_list_indicate(&skb_rcv_list, 1);
+
+	if (processed < budget) {
+		DBGPRINTF(DBG_LL_TRIAL, QDRV_LF_PKT_RX, "Complete\n");
+		RXSTAT(qw, rx_poll_complete);
+		RXSTAT_SET(qw, rx_poll_stopped, 1);
+
+		/* MBSS - Napi is scheduled for vnet[0] so napi_complete should map to correct VAP */
+		/* qv1 = qv at start of this function stores correct VAP for this purpose */
+		napi_complete(&qv1->napi);
+
+		qdrv_mac_enable_irq(qw->mac, qw->rxirq);
+	} else {
+		RXSTAT(qw, rx_poll_continue);
+	}
+
+	DBGPRINTF_LIMIT(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+	return min(processed, budget);
+}
+
+int qdrv_rx_start(struct qdrv_mac *mac)
+{
+	struct int_handler int_handler;
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) mac->data;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	int_handler.handler = qdrv_rx_irq;
+	int_handler.arg1 = (void *) qw;
+	int_handler.arg2 = (void *) mac;
+
+	if (qdrv_mac_set_handler(mac, qw->rxirq, &int_handler) != 0) {
+		DBGPRINTF_E("Failed to register IRQ handler for %d\n", qw->rxirq);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	qdrv_mac_enable_irq(mac, qw->rxirq);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+int qdrv_rx_stop(struct qdrv_mac *mac)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) mac->data;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	qdrv_mac_disable_irq(mac, qw->rxirq);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+/*
+ * we can't get how many layers MUC has applied on this packet
+ * through tqe interface.
+ * but current vlan code indicate only one layer, so just remove it.
+ * */
+static void qdrv_rx_remove_eapol_vlan(struct ieee80211vap *vap, struct sk_buff *skb, uint16_t ether_type)
+{
+	struct ether_header eth, *peth;
+
+	peth = (struct ether_header *)skb->data;
+	if (ether_type == __constant_htons(ETH_P_PAE) &&
+			peth->ether_type == __constant_htons(ETH_P_8021Q)) {
+		memcpy(&eth, peth, sizeof(eth));
+		eth.ether_type = __constant_htons(ETH_P_PAE);
+		skb_pull(skb, sizeof(struct vlan_ethhdr));
+		memcpy(skb_push(skb, sizeof(eth)), &eth, sizeof(eth));
+	}
+}
+
+/*
+ * Frame received from external L2 filter will not have MAC header
+ * So whole_frm_hdr will be NULL for those frames.
+ */
+static void qdrv_rx_tqe_rx_handler(void *token,
+		const union topaz_tqe_cpuif_descr *descr,
+		struct sk_buff *skb, uint8_t *whole_frm_hdr)
+{
+	struct qdrv_wlan *qw = token;
+	struct ieee80211_node *ni;
+	struct ieee80211vap *vap;
+	struct qdrv_vap *qv;
+	const uint16_t misc_user = descr->data.misc_user;
+	const uint16_t node_idx_unmapped = MS(misc_user, TQE_MISCUSER_M2L_DATA_NODE_IDX);
+	const bool check_3addr_br = MS(misc_user, TQE_MISCUSER_M2L_DATA_3ADDR_BR);
+	const struct ieee80211_qosframe_addr4 *wh = (void *) whole_frm_hdr;
+	uint16_t ether_type;
+
+	ni = qw->ic.ic_node_idx_ni[node_idx_unmapped];
+	if (likely(ni)) {
+		ieee80211_ref_node(ni);
+		vap = ni->ni_vap;
+		qv = container_of(vap, struct qdrv_vap, iv);
+
+		qdrv_sch_find_data_start(skb, (struct ether_header *)skb->data, &ether_type);
+		skb->src_port = IEEE80211_NODE_IDX_MAP(node_idx_unmapped);
+		skb->dev = vap->iv_dev;
+		QTN_SKB_CB_ETHERTYPE(skb) = ether_type;
+		qdrv_rx_remove_eapol_vlan(vap, skb, ether_type);
+
+		if (handle_rx_msdu(qw, qv, ni, wh, skb, check_3addr_br) == 0) {
+			skb = switch_vlan_to_proto_stack(skb, 0);
+			if (skb)
+				netif_receive_skb(skb);
+		}
+		ieee80211_free_node(ni);
+	} else {
+		RXSTAT(qw, rx_data_no_node);
+		kfree_skb(skb);
+	}
+}
+
+int qdrv_rx_init(struct qdrv_wlan *qw, struct host_ioctl_hifinfo *hifinfo)
+{
+	struct host_rxfifo *fifos;
+
+	fifos = ioremap_nocache((u32) muc_to_lhost(hifinfo->hi_rxfifo),
+			sizeof(*fifos));
+
+	qw->rxirq = hifinfo->hi_rxdoneirq & IOCTL_DEVATTACH_IRQNUM;
+
+	qw->rx_if.rx_sem_bit = fifos->rf_sem;
+
+	qw->ic.ic_bridge_set_dest_addr = qdrv_bridge_set_dest_addr;
+
+	qw->rx_if.rx.fifo = ioremap_nocache(
+			muc_to_lhost((u32)fifos->rf_fifo),
+			sizeof(*qw->rx_if.rx.fifo));
+
+	iounmap(fifos);
+
+	if (rx_fifo_init(qw, &qw->rx_if.rx) < 0) {
+		DBGPRINTF_E("Failed to setup RX FIFO buffers\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_PKT_RX,
+			"hi_rxfifo %p %p (virt)\n",
+			(void *)hifinfo->hi_rxfifo,
+			(void *)IO_ADDRESS((u32)hifinfo->hi_rxfifo));
+
+	tqe_port_add_handler(TOPAZ_TQE_MUC_PORT, &qdrv_rx_tqe_rx_handler, qw);
+	tqe_port_register(TOPAZ_TQE_WMAC_PORT);
+
+	qdrv_mac_reserve_init(qw);
+
+	return 0;
+}
+
+int qdrv_rx_exit(struct qdrv_wlan *qw)
+{
+	qdrv_mac_reserve_clear();
+
+	tqe_port_unregister(TOPAZ_TQE_WMAC_PORT);
+	tqe_port_remove_handler(TOPAZ_TQE_MUC_PORT);
+
+	destroy_rx_ring(&qw->rx_if);
+
+	return 0;
+}
diff --git a/drivers/qtn/qdrv/qdrv_scan.c b/drivers/qtn/qdrv/qdrv_scan.c
new file mode 100644
index 0000000..cb955c5
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_scan.c
@@ -0,0 +1,164 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <asm/hardware.h>
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_comm.h"
+#include "qdrv_wlan.h"
+
+static void qdrv_scan_irq(void *arg1, void *arg2)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) arg1;
+	unsigned long flags;
+  
+	spin_lock_irqsave(&qw->lock, flags);
+	schedule_work(&qw->scan_task);
+	spin_unlock_irqrestore(&qw->lock, flags);
+}
+
+static void qdrv_scan_work(struct work_struct *work)
+{
+	struct qdrv_wlan *qw = container_of(work, struct qdrv_wlan, scan_task);
+	struct host_scandesc *scan_desc;
+	u32* sd;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Protect the scan fifo contention between MuC & Linux using hw sem */
+	if(!sem_take(qw->host_sem, qw->scan_if.scan_sem_bit))
+	{
+		DBGPRINTF_E("Unable to get semaphore - rescedule.\n");
+		schedule_work(&qw->scan_task);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return;
+	}
+
+	scan_desc = (struct host_scandesc *)
+		IO_ADDRESS((u32)*(qw->scan_if.sc_req_mbox));
+
+	sd = (u32 *) (*qw->scan_if.sc_req_mbox);
+	if (sd == NULL) {
+		sem_give(qw->host_sem, qw->scan_if.scan_sem_bit);
+		DBGPRINTF_E("sd NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return;
+	}
+	writel_wmb(0, qw->scan_if.sc_req_mbox);
+	sem_give(qw->host_sem, qw->scan_if.scan_sem_bit);
+
+	/* MATS FIX this. What is it supposed to do */
+	/* Call Baseband driver with dev, chan and req_type */
+	/* Fake for now. Old wlan_host_scan_process() */
+	scan_desc->status = 1;
+
+	if (sem_take(qw->host_sem, qw->scan_if.scan_sem_bit))
+	{
+		writel_wmb(sd, qw->scan_if.sc_res_mbox);
+		sem_give(qw->host_sem, qw->scan_if.scan_sem_bit);
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return;
+}
+
+int qdrv_scan_start(struct qdrv_mac *mac)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) mac->data;
+	struct int_handler int_handler;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	INIT_WORK(&qw->scan_task, qdrv_scan_work);
+
+	int_handler.handler = qdrv_scan_irq;
+	int_handler.arg1 = (void *) qw;
+	int_handler.arg2 = NULL;
+
+	if(qdrv_mac_set_handler(mac, qw->scanirq, &int_handler) != 0)
+	{
+		DBGPRINTF_E("Failed to register IRQ handler for %d\n",
+			qw->scanirq);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+	qdrv_mac_enable_irq(mac, qw->scanirq);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+int qdrv_scan_stop(struct qdrv_mac *mac)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) mac->data;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	qdrv_mac_disable_irq(mac, qw->scanirq);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+int qdrv_scan_init(struct qdrv_wlan *qw, struct host_ioctl_hifinfo *hifinfo)
+{
+	struct host_scanfifo *scan;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/*
+	hal_range_check_sram_addr(hifinfo->hi_scanfifo)
+	*/
+
+	scan = ioremap_nocache(muc_to_lhost(hifinfo->hi_scanfifo), sizeof(*scan));
+	KASSERT(scan != NULL, (DBGEFMT "Unable to ioremap tx done memory area - reboot\n", DBGARG));
+	qw->scan_fifo = scan;
+	qw->scan_if.sc_res_mbox = (volatile u32 *) &(scan->sf_res);
+	qw->scan_if.sc_req_mbox = (volatile u32 *) &(scan->sf_req);
+	qw->scanirq = hifinfo->hi_scanirq & IOCTL_DEVATTACH_IRQNUM;
+	qw->scan_if.scan_sem_bit = scan->sf_sem;
+	qw->scan_if.tx_sem_bit = scan->tx_sem;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+int qdrv_scan_exit(struct qdrv_wlan *qw)
+{
+	iounmap(qw->scan_fifo);
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
diff --git a/drivers/qtn/qdrv/qdrv_sch.c b/drivers/qtn/qdrv/qdrv_sch.c
new file mode 100644
index 0000000..8a21334
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_sch.c
@@ -0,0 +1,1223 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/moduleloader.h>
+
+#include <asm/board/board_config.h>
+
+#include <net/pkt_sched.h>
+#include <trace/ippkt.h>
+
+#include <qtn/lhost_muc_comm.h>
+#include <qtn/qtn_global.h>
+#include <qtn/qtn_trace.h>
+#include <qtn/qdrv_sch.h>
+
+#include "qdrv_mac.h"
+#include "qdrv_vap.h"
+#include "qdrv_sch_pm.h"
+#include "qdrv_sch_wmm.h"
+#include "qdrv_debug.h"
+#include "qdrv_wlan.h"
+
+#define QDRV_SCH_NAME_NORMAL	"qdrv_sch"
+#define QDRV_SCH_NAME_RED	"qdrv_sch_red"
+#define QDRV_SCH_NAME_JOIN	"qdrv_sch_join"
+
+extern int g_qdrv_non_qtn_assoc;
+
+enum qdrv_sch_type
+{
+	QDRV_SCH_TYPE_NORMAL,
+	QDRV_SCH_TYPE_RED,
+	QDRV_SCH_TYPE_JOIN
+};
+
+
+#ifdef CONFIG_IPV6
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+extern int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
+				__be16 *frag_offp);
+#else
+extern int ipv6_skip_exthdr(const struct sk_buff *skb, int start, uint8_t *nexthdrp);
+#endif
+#endif
+
+struct qdrv_sch_shared_data_list
+{
+	struct list_head head;
+	spinlock_t lock;
+};
+
+const int qdrv_sch_band_prio[] = {
+	QDRV_BAND_CTRL,
+	QDRV_BAND_AC_VO,
+	QDRV_BAND_AC_VI,
+	QDRV_BAND_AC_BE,
+	QDRV_BAND_AC_BK
+};
+
+static const char *ac_name[] = {"BE", "BK", "VI", "VO"};
+
+struct qdrv_sch_band_aifsn qdrv_sch_band_chg_prio[] = {
+	{QDRV_BAND_CTRL, 1},
+	{QDRV_BAND_AC_VO, 1},
+	{QDRV_BAND_AC_VI, 1},
+	{QDRV_BAND_AC_BE, 3},
+	{QDRV_BAND_AC_BK, 7}
+};
+
+static struct qdrv_sch_shared_data_list qdrv_sch_shared_data;
+
+inline const char *qdrv_sch_tos2ac_str(int tos)
+{
+	if ((tos < 0) || (tos >= IEEE8021P_PRIORITY_NUM))
+		return NULL;
+
+	return ac_name[qdrv_sch_tos2ac[tos]];
+}
+
+inline void qdrv_sch_set_ac_map(int tos, int aid)
+{
+	if ((tos >= 0) && (tos < IEEE8021P_PRIORITY_NUM) &&
+			(aid >= 0) && (aid < QDRV_SCH_PRIORITIES)) {
+		qdrv_sch_tos2ac[tos] = aid;
+	}
+}
+
+uint32_t qdrv_sch_get_emac_in_use(void)
+{
+	uint32_t emac_in_use = 0;
+	int emac_cfg = 0;
+
+	if (get_board_config(BOARD_CFG_EMAC0, &emac_cfg) == 0) {
+		if (emac_cfg & EMAC_IN_USE) {
+			emac_in_use |= QDRV_SCH_EMAC0_IN_USE;
+		}
+	}
+	if (get_board_config(BOARD_CFG_EMAC1, &emac_cfg) == 0) {
+		if (emac_cfg & EMAC_IN_USE) {
+			emac_in_use |= QDRV_SCH_EMAC1_IN_USE;
+		}
+	}
+
+	return emac_in_use;
+}
+
+int qdrv_sch_set_dscp2ac_map(const uint8_t vapid, uint8_t *ip_dscp, uint8_t listlen, uint8_t ac)
+{
+	uint8_t i;
+	const uint32_t emac_in_use = qdrv_sch_get_emac_in_use();
+
+	for (i = 0; i < listlen; i++) {
+		qdrv_sch_mask_settid(vapid, ip_dscp[i], WME_AC_TO_TID(ac), emac_in_use);
+	}
+
+	return 0;
+}
+
+void qdrv_sch_set_dscp2tid_map(const uint8_t vapid, const uint8_t *dscp2tid)
+{
+	uint8_t dscp;
+	uint8_t tid;
+	const uint32_t emac_in_use = qdrv_sch_get_emac_in_use();
+
+	for (dscp = 0; dscp < IP_DSCP_NUM; dscp++) {
+		tid = dscp2tid[dscp];
+		if (tid >= IEEE8021P_PRIORITY_NUM)
+			tid = qdrv_dscp2tid_default(dscp);
+		tid = QTN_TID_MAP_UNUSED(tid);
+		qdrv_sch_mask_settid(vapid, dscp, tid, emac_in_use);
+	}
+}
+
+void qdrv_sch_get_dscp2tid_map(const uint8_t vapid, uint8_t *dscp2tid)
+{
+	uint8_t dscp;
+
+	for (dscp = 0; dscp < IP_DSCP_NUM; dscp++) {
+		dscp2tid[dscp] = qdrv_sch_mask_gettid(vapid, dscp);
+	}
+}
+
+int qdrv_sch_get_dscp2ac_map(const uint8_t vapid, uint8_t *dscp2ac)
+{
+	uint8_t i;
+
+	if (!dscp2ac)
+		return -1;
+
+	for (i = 0; i < IP_DSCP_NUM; i++){
+		dscp2ac[i] = TID_TO_WME_AC(qdrv_sch_mask_gettid(vapid, i));
+	}
+
+	return 0;
+}
+
+static void qdrv_sch_init_shared_data_list(struct qdrv_sch_shared_data_list *list)
+{
+	INIT_LIST_HEAD(&list->head);
+	spin_lock_init(&list->lock);
+}
+
+static void *qdrv_sch_alloc_fast_data(size_t sz)
+{
+	void *ret = NULL;
+
+#ifdef CONFIG_ARCH_RUBY_NUMA
+	ret = heap_sram_alloc(sz);
+#endif
+	if (!ret) {
+		ret = kmalloc(sz, GFP_KERNEL);
+	}
+	if (ret) {
+		memset(ret, 0, sz);
+	}
+
+	return ret;
+}
+
+static void qdrv_sch_free_fast_data(void *ptr)
+{
+#ifdef CONFIG_ARCH_RUBY_NUMA
+	if (heap_sram_ptr(ptr)) {
+		heap_sram_free(ptr);
+		ptr = NULL;
+	}
+#endif
+
+	if (ptr) {
+		kfree(ptr);
+	}
+}
+
+static void qdrv_sch_exit_shared_data_list(struct qdrv_sch_shared_data_list *list)
+{
+	struct list_head *pos, *temp;
+
+	list_for_each_safe(pos, temp, &list->head) {
+		struct qdrv_sch_shared_data *qsh =
+			container_of(pos, struct qdrv_sch_shared_data, entry);
+		list_del(pos);
+		qdrv_sch_free_fast_data(qsh);
+	}
+}
+
+static int qdrv_sch_tokens_param(struct Qdisc *sch)
+{
+	/* Maximum combined number of packets we can hold in all queues. */
+	return max((int)qdisc_dev(sch)->tx_queue_len, QDRV_SCH_BANDS);
+}
+
+static int qdrv_sch_random_threshold_param(int tokens, enum qdrv_sch_type sch_type)
+{
+	/*
+	 * Random drop threshold is calculated the way all low bits are set to 1.
+	 * So for example threshold is 511, then if we apply 511 mask to random number we get [0;511] random value,
+	 * which is used as probability.
+	 * If probability value is higher than number of remainining tokens - packet is to be dropped.
+	 * So at the beginning we do not drop anything, but after reaching threshold value we start dropping.
+	 * And as more full queue become - more chances for packet to be dropped.
+	 */
+
+	int random_drop_threshold = 0;
+
+	if (sch_type == QDRV_SCH_TYPE_RED) {
+		int i;
+		random_drop_threshold = tokens / 3;
+		for (i = 0; random_drop_threshold; ++i) {
+			random_drop_threshold = (random_drop_threshold >> 1);
+		}
+		if (i >= 4) {
+			random_drop_threshold = (1 << (i - 1)) - 1;
+		} else {
+			random_drop_threshold = 0;
+		}
+	}
+
+	return random_drop_threshold;
+}
+
+static int qdrv_sch_cmp_netdevice_name(const char *name1, const char *name2)
+{
+	/*
+	 * Help to group devices with matching non-numeric prefixes together.
+	 * E.g. wifi0 and wifi1 would be in one group; eth0 and eth1_0 would be in another.
+	 * Group based on the first letter only; this allows 'wds' and 'wifi' to group together
+	 */
+	if (*name1 == *name2 && isalpha(*name1)) {
+		while (isalpha(*name1))
+			name1++;
+		while (isalpha(*name2))
+			name2++;
+	} else {
+		return 0;
+	}
+
+	while (*name1 && *name2) {
+		if (*name1 != *name2) {
+			return (isdigit(*name1) && isdigit(*name2));
+		}
+		++name1;
+		++name2;
+	}
+
+	return 1;
+}
+
+static void qdrv_tx_sch_node_data_users(struct qdrv_sch_shared_data *sd, uint32_t users)
+{
+	sd->users = MAX(users, 1);
+	sd->reserved_tokens_per_user =
+		(sd->total_tokens - sd->random_drop_threshold) / sd->users;
+}
+
+void qdrv_tx_sch_node_data_init(struct Qdisc *sch, struct qdrv_sch_shared_data *sd,
+				struct qdrv_sch_node_data *nd, const uint32_t users)
+{
+	int i;
+	unsigned long flags;
+
+	if (sch == NULL || sch == nd->qdisc) {
+		return;
+	}
+
+	memset(nd, 0, sizeof(*nd));
+
+	qdrv_sch_shared_data_lock(sd, flags);
+	nd->shared_data = sd;
+	nd->qdisc = sch;
+	qdrv_tx_sch_node_data_users(sd, users);
+	qdrv_sch_shared_data_unlock(sd, flags);
+
+	for (i = 0; i < ARRAY_SIZE(nd->bands); i++) {
+		skb_queue_head_init(&nd->bands[i].queue);
+	}
+}
+
+void qdrv_tx_sch_node_data_exit(struct qdrv_sch_node_data *nd, const uint32_t users)
+{
+	struct qdrv_sch_shared_data *sd = nd->shared_data;
+	unsigned long flags;
+
+	qdrv_sch_shared_data_lock(sd, flags);
+	qdrv_tx_sch_node_data_users(sd, users);
+	qdrv_sch_shared_data_unlock(sd, flags);
+
+	memset(nd, 0, sizeof(*nd));
+}
+
+int qdrv_sch_node_is_active(const struct qdrv_sch_node_band_data *nbd,
+				const struct qdrv_sch_node_data *nd, uint8_t band)
+{
+	struct qdrv_sch_shared_data *sd = nd->shared_data;
+	struct qdrv_sch_shared_band_data *sbd = &sd->bands[band];
+	struct qdrv_sch_node_band_data *nbd_tmp;
+	unsigned long flags;
+
+	qdrv_sch_shared_data_lock(sd, flags);
+
+	nbd_tmp = TAILQ_FIRST(&sbd->active_nodes);
+	while (nbd_tmp) {
+		if (nbd_tmp == nbd) {
+			qdrv_sch_shared_data_unlock(sd, flags);
+			return 1;
+		}
+		nbd_tmp = TAILQ_NEXT(nbd_tmp, nbd_next);
+	}
+
+	qdrv_sch_shared_data_unlock(sd, flags);
+
+	return 0;
+}
+
+struct qdrv_sch_shared_data *qdrv_sch_shared_data_init(int16_t tokens, uint16_t rdt)
+{
+	struct qdrv_sch_shared_data *qsh;
+	int i;
+
+	qsh = qdrv_sch_alloc_fast_data(sizeof(*qsh));
+	if (!qsh) {
+		return NULL;
+	}
+
+	memset(qsh, 0, sizeof(*qsh));
+
+	spin_lock_init(&(qsh->lock));
+	qsh->drop_callback = &consume_skb;
+	qsh->total_tokens = tokens;
+	qsh->available_tokens = tokens;
+	qsh->random_drop_threshold = rdt;
+	for (i = 0; i < ARRAY_SIZE(qsh->bands); i++) {
+		TAILQ_INIT(&qsh->bands[i].active_nodes);
+	}
+	qsh->queuing_alg = QTN_GLOBAL_INIT_TX_QUEUING_ALG;
+
+	return qsh;
+}
+
+void qdrv_sch_shared_data_exit(struct qdrv_sch_shared_data *qsh)
+{
+	if (qsh->users != 0) {
+		panic(KERN_ERR "%s: users is not zero\n", __FUNCTION__);
+	}
+
+	qdrv_sch_free_fast_data(qsh);
+}
+
+static __sram_text void qdrv_sch_drop_callback(struct sk_buff *skb)
+{
+	struct net_device *ndev = skb->dev;
+	struct Qdisc *q;
+	struct qdrv_sch_node_data *nd;
+	struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
+
+	if (likely(ndev)) {
+		q = netdev_get_tx_queue(ndev, 0)->qdisc;
+		nd = qdisc_priv(q);
+		qdrv_sch_complete(nd, skb, 0);
+	} else {
+		if (printk_ratelimit()) {
+			printk(KERN_ERR "%s: skb with NULL device set\n", __FUNCTION__);
+		}
+	}
+
+	if (!qtn_skb_recycle_list_push(recycle_list, &recycle_list->stats_eth, skb)) {
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static struct qdrv_sch_shared_data *qdrv_sch_alloc_shared_data(struct Qdisc *sch,
+	struct qdrv_sch_shared_data_list *list, enum qdrv_sch_type sch_type)
+{
+	struct qdrv_sch_shared_data *qsh;
+	int16_t tokens = qdrv_sch_tokens_param(sch);
+	uint16_t rdt = qdrv_sch_random_threshold_param(tokens, sch_type);
+	size_t len;
+
+	qsh = qdrv_sch_shared_data_init(tokens, rdt);
+	if (!qsh) {
+		return NULL;
+	}
+
+	len = strlen(qdisc_dev(sch)->name);
+	if (len >= sizeof(qsh->dev_name)) {
+		qdrv_sch_shared_data_exit(qsh);
+		return NULL;
+	}
+	strncpy(qsh->dev_name, qdisc_dev(sch)->name, sizeof(qsh->dev_name));
+
+	qsh->users = 1;
+	qsh->drop_callback = &qdrv_sch_drop_callback;
+	qsh->reserved_tokens_per_user = QDRV_SCH_RESERVED_TOKEN_PER_USER;
+
+	spin_lock(&list->lock);
+	list_add(&qsh->entry, &list->head);
+	spin_unlock(&list->lock);
+
+	return qsh;
+}
+
+static struct qdrv_sch_shared_data* qdrv_sch_find_shared_data(struct Qdisc *sch,
+	struct qdrv_sch_shared_data_list *list)
+{
+	struct qdrv_sch_shared_data *qsh = NULL;
+	struct list_head *pos;
+
+	spin_lock(&list->lock);
+	list_for_each(pos, &list->head) {
+		qsh = container_of(pos, struct qdrv_sch_shared_data, entry);
+		if (qdrv_sch_cmp_netdevice_name(qsh->dev_name, qdisc_dev(sch)->name)) {
+			printk(KERN_INFO"%s: %s join %s\n", sch->ops->id,
+				qdisc_dev(sch)->name, qsh->dev_name);
+			break;
+		}
+		qsh = NULL;
+	}
+	spin_unlock(&list->lock);
+
+	return qsh;
+}
+
+static struct qdrv_sch_shared_data* qdrv_sch_acquire_shared_data(struct Qdisc *sch,
+	struct qdrv_sch_shared_data_list *list, enum qdrv_sch_type sch_type)
+{
+	struct qdrv_sch_shared_data *qsh = NULL;
+	unsigned long flags;
+
+	switch (sch_type) {
+	case QDRV_SCH_TYPE_JOIN:
+		qsh = qdrv_sch_find_shared_data(sch, list);
+		if (qsh) {
+			qdrv_sch_shared_data_lock(qsh, flags);
+			qsh->total_tokens += QDRV_SCH_RESERVED_TOKEN_PER_USER;
+			qsh->available_tokens += QDRV_SCH_RESERVED_TOKEN_PER_USER;
+			++qsh->users;
+			qdrv_sch_shared_data_unlock(qsh, flags);
+		}
+		break;
+
+	case QDRV_SCH_TYPE_NORMAL:
+	case QDRV_SCH_TYPE_RED:
+		qsh = qdrv_sch_alloc_shared_data(sch, list, sch_type);
+		break;
+	}
+
+	return qsh;
+}
+
+static void qdrv_sch_release_shared_data(struct qdrv_sch_shared_data_list *list, struct qdrv_sch_shared_data *qsh)
+{
+	unsigned long flags;
+
+	if (qsh) {
+		spin_lock(&list->lock);
+
+		qdrv_sch_shared_data_lock(qsh, flags);
+		if (--qsh->users == 0) {
+			list_del(&qsh->entry);
+			qdrv_sch_shared_data_unlock(qsh, flags);
+			qdrv_sch_shared_data_exit(qsh);
+		} else {
+			qsh->total_tokens -= QDRV_SCH_RESERVED_TOKEN_PER_USER;
+			qsh->available_tokens -= QDRV_SCH_RESERVED_TOKEN_PER_USER;
+			KASSERT((qsh->available_tokens >= 0),
+					("%s available tokens becomes a negative value\n", __FUNCTION__));
+			qdrv_sch_shared_data_unlock(qsh, flags);
+		}
+
+		spin_unlock(&list->lock);
+	}
+}
+
+static ssize_t qdrv_sch_stats_snprintf(struct net_device *ndev, char *buf, ssize_t limit,
+		const struct qdrv_sch_node_data *q)
+{
+	const struct Qdisc *sch = NULL;
+	const struct qdrv_sch_shared_data *qsh = q->shared_data;
+	ssize_t k = 0;
+	int i;
+
+	if (ndev) {
+		sch = netdev_get_tx_queue(ndev, 0)->qdisc;
+	}
+
+	k += snprintf(buf + k, limit - k, "TOS/AC:");
+
+	for (i = 0; i < IEEE8021P_PRIORITY_NUM; i++) {
+		k += snprintf(buf + k, limit - k, " %d/%s", i, qdrv_sch_tos2ac_str(i));
+	}
+
+	k += snprintf(buf + k, limit - k,
+		"\nqlen=%d\navailable_tokens=%d\nreserved_tokens_per_user=%d\nrandom_drop_threshold=%d\n",
+		sch ? sch->q.qlen : 0,
+		qsh->available_tokens,
+		qsh->reserved_tokens_per_user,
+		qsh->random_drop_threshold);
+
+	for (i = 0; i < QDRV_SCH_BANDS; ++i) {
+		const struct qdrv_sch_node_band_data *nbd = &q->bands[i];
+		k += snprintf(buf + k, limit - k,
+			"%d: queue_len=%d dropped=%u dropped_victim=%d sent=%u\n",
+			i, skb_queue_len(&nbd->queue),
+			nbd->dropped, nbd->dropped_victim, nbd->sent);
+	}
+
+	return k;
+}
+
+static ssize_t qdrv_sch_sysfs_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	const struct qdrv_sch_node_data *q = container_of(attr, struct qdrv_sch_node_data, sysfs_attr);
+	struct net_device *ndev = dev_get_by_name(&init_net, q->shared_data->dev_name);
+	int rc;
+
+	rc = qdrv_sch_stats_snprintf(ndev, buf, PAGE_SIZE, q);
+
+	if (ndev)
+		dev_put(ndev);
+
+	return rc;
+}
+
+static ssize_t qdrv_sch_sysfs_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct qdrv_sch_node_data *q = container_of(attr, struct qdrv_sch_node_data, sysfs_attr);
+	struct net_device *ndev = dev_get_by_name(&init_net, q->shared_data->dev_name);
+	int tos = -1;
+	int aid = -1;
+
+	if (sscanf(buf, "%d %d", &tos, &aid) < 2)
+		goto ready_to_return;
+
+	if (ndev) {
+		netif_stop_queue(ndev);
+		qdrv_sch_set_ac_map(tos, aid);
+		netif_start_queue(ndev);
+	}
+
+ready_to_return:
+	if (ndev)
+		dev_put(ndev);
+
+	return count;
+}
+
+static int qdrv_sch_sysfs_init(struct Qdisc *sch)
+{
+	struct qdrv_sch_node_data *q = qdisc_priv(sch);
+	struct net_device *dev = qdisc_dev(sch);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	DEVICE_ATTR(qdrv_sch, S_IRUSR | S_IWUSR, qdrv_sch_sysfs_show, qdrv_sch_sysfs_store);
+#else
+	DEVICE_ATTR(qdrv_sch, S_IRUGO | S_IWUGO, qdrv_sch_sysfs_show, qdrv_sch_sysfs_store);
+#endif
+	q->sysfs_attr = dev_attr_qdrv_sch;
+
+	return sysfs_create_file(&dev->dev.kobj, &q->sysfs_attr.attr);
+}
+
+static void qdrv_sch_sysfs_destroy(struct Qdisc *sch)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	struct qdrv_sch_node_data *q = qdisc_priv(sch);
+	sysfs_remove_file(&dev->dev.kobj, &q->sysfs_attr.attr);
+}
+
+static inline uint32_t qdrv_sch_band(struct sk_buff *skb)
+{
+	if (unlikely(!QTN_SKB_ENCAP_IS_80211_MGMT(skb) &&
+			qdrv_sch_classify_ctrl(skb))) {
+		return QDRV_BAND_CTRL;
+	}
+
+	if (unlikely(skb->priority >= QDRV_SCH_BANDS)) {
+		return QDRV_BAND_AC_BK;
+	}
+
+	return skb->priority;
+}
+
+/*
+ * Check if a packet should be dropped
+ *
+ * Returns:
+ *     0  if the packet should be queued
+ *     1  a packet should be dropped from any queue
+ *     -1 a packet should be dropped from the current node's queues
+ */
+static inline int qdrv_sch_enqueue_drop_cond(const struct qdrv_sch_shared_data *sd,
+		struct qdrv_sch_node_data *nd, bool is_low_rate)
+{
+	/* prevent a station that stops responding from hogging */
+	if (sd->users > 1) {
+		if (nd->used_tokens > (sd->total_tokens / 2)) {
+			return -1;
+		}
+		if (is_low_rate && (nd->used_tokens > QDRV_TX_LOW_RATE_TOKENS_MAX)) {
+			++nd->low_rate;
+			return -1;
+		}
+	}
+
+	if (unlikely(sd->available_tokens == 0)) {
+		return 1;
+	}
+
+	if (nd->used_tokens < sd->reserved_tokens_per_user) {
+		return 0;
+	}
+
+	if (unlikely(sd->available_tokens <= sd->random_drop_threshold)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		uint32_t drop_chance = prandom_u32() & sd->random_drop_threshold;
+#else
+		uint32_t drop_chance = net_random() & sd->random_drop_threshold;
+#endif
+		if (unlikely((drop_chance >= sd->available_tokens))) {
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static inline uint8_t qdrv_sch_get_band(uint8_t i)
+{
+	if (g_qdrv_non_qtn_assoc) {
+		return qdrv_sch_band_chg_prio[i].band_prio;
+	} else {
+		return qdrv_sch_band_prio[i];
+	}
+}
+
+static struct sk_buff *
+qdrv_sch_peek_prio(struct qdrv_sch_shared_data *sd, uint8_t prio)
+{
+	struct qdrv_sch_shared_band_data *sbd = &sd->bands[prio];
+	struct qdrv_sch_node_band_data *nbd = TAILQ_FIRST(&sbd->active_nodes);
+
+	if (!nbd) {
+		return NULL;
+	}
+
+	return skb_peek(&nbd->queue);
+}
+
+static __sram_text struct sk_buff *qdrv_sch_peek(struct Qdisc *sch)
+{
+	struct qdrv_sch_node_data *nd = qdisc_priv(sch);
+	struct qdrv_sch_shared_data *sd = nd->shared_data;
+	struct sk_buff *skb;
+	int p;
+
+	for (p = 0; p < ARRAY_SIZE(sd->bands); p++) {
+		skb = qdrv_sch_peek_prio(sd, qdrv_sch_get_band(p));
+		if (skb) {
+			return skb;
+		}
+	}
+
+	return NULL;
+}
+
+static inline bool qdrv_sch_node_band_is_queued(const struct qdrv_sch_node_band_data *nbd)
+{
+	return nbd->nbd_next.tqe_prev != NULL;
+}
+
+static inline void qdrv_sch_band_active_enqueue(struct qdrv_sch_shared_band_data *sbd,
+		struct qdrv_sch_node_band_data *nbd)
+{
+	if (!qdrv_sch_node_band_is_queued(nbd)) {
+		TAILQ_INSERT_TAIL(&sbd->active_nodes, nbd, nbd_next);
+	}
+}
+
+static inline void qdrv_sch_band_active_dequeue(struct qdrv_sch_shared_band_data *sbd,
+		struct qdrv_sch_node_band_data *nbd)
+{
+	if (qdrv_sch_node_band_is_queued(nbd)) {
+		TAILQ_REMOVE(&sbd->active_nodes, nbd, nbd_next);
+		nbd->nbd_next.tqe_prev = NULL;
+	}
+}
+
+static void __sram_text
+qdrv_sch_node_reactivate(struct qdrv_sch_node_data *nd)
+{
+	struct qdrv_sch_shared_data *sd = nd->shared_data;
+	struct qdrv_sch_node_band_data *nbd;
+	struct qdrv_sch_shared_band_data *sbd;
+	int prio;
+	uint8_t band;
+
+	for (prio = 0; prio < ARRAY_SIZE(sd->bands); prio++) {
+		band = qdrv_sch_get_band(prio);
+		nbd = &nd->bands[band];
+		sbd = &sd->bands[band];
+		if (skb_queue_len(&nbd->queue) > 0) {
+			qdrv_sch_band_active_enqueue(sbd, nbd);
+		}
+	}
+}
+
+void __sram_text
+qdrv_sch_complete(struct qdrv_sch_node_data *nd, struct sk_buff *skb,
+			uint8_t under_thresh)
+{
+	struct qdrv_sch_shared_data *sd = nd->shared_data;
+	unsigned long flags;
+
+	if (unlikely(!sd)) {
+		printk(KERN_ERR "%s: qdrv_sch_node_data 0x%p invalid\n",
+				__FUNCTION__, nd);
+		return;
+	}
+
+	qdrv_sch_shared_data_lock(sd, flags);
+
+	if (likely(M_FLAG_ISSET(skb, M_ENQUEUED_SCH))) {
+		++sd->available_tokens;
+		--nd->used_tokens;
+	}
+
+	if (nd->over_thresh && under_thresh) {
+		qdrv_sch_node_reactivate(nd);
+		nd->over_thresh = 0;
+	}
+
+	qdrv_sch_shared_data_unlock(sd, flags);
+}
+
+static __sram_text struct sk_buff *
+qdrv_sch_dequeue_node_band(struct qdrv_sch_shared_data *sd, struct qdrv_sch_node_band_data *nbd,
+		uint8_t band, bool dropped_victim)
+{
+	struct qdrv_sch_node_data *nd = qdrv_sch_get_node_data(nbd, band);
+	struct qdrv_sch_shared_band_data *sbd = &sd->bands[band];
+	int empty;
+	int limit_hit;
+	struct sk_buff *skb;
+	unsigned long flags;
+
+	qdrv_sch_shared_data_lock(sd, flags);
+
+	if (unlikely(skb_queue_len(&nbd->queue) == 0)) {
+		qdrv_sch_shared_data_unlock(sd, flags);
+		return NULL;
+	}
+
+	skb = __skb_dequeue(&nbd->queue);
+	++sbd->consec_dequeues;
+	if (dropped_victim) {
+		++nbd->dropped_victim;
+	} else {
+		++nbd->sent;
+	}
+	--nd->qdisc->q.qlen;
+
+	limit_hit = (sbd->consec_dequeues %
+			QDRV_SCH_SHARED_AC_DATA_DEQUEUE_LIMIT) == 0;
+	empty = skb_queue_len(&nbd->queue) == 0;
+
+	/* remove or rotate this node for this AC */
+	if (empty || limit_hit) {
+		sbd->consec_dequeues = 0;
+		qdrv_sch_band_active_dequeue(sbd, nbd);
+	}
+
+	if (!empty && !nd->over_thresh) {
+		qdrv_sch_band_active_enqueue(sbd, nbd);
+	}
+
+	qdrv_sch_shared_data_unlock(sd, flags);
+
+	return skb;
+}
+
+static __sram_text struct sk_buff *
+qdrv_sch_dequeue_band(struct qdrv_sch_shared_data *sd, uint8_t band, bool dropped_victim)
+{
+	struct qdrv_sch_shared_band_data *sbd = &sd->bands[band];
+	struct qdrv_sch_node_band_data *nbd;
+	struct qdrv_sch_node_data *nd;
+	unsigned long flags;
+
+	qdrv_sch_shared_data_lock(sd, flags);
+
+	/* Skip any node that is over threshold and remove it from the active list */
+	while ((nbd = TAILQ_FIRST(&sbd->active_nodes)) != NULL) {
+		nd = qdrv_sch_get_node_data(nbd, band);
+		if (!nd->over_thresh) {
+			break;
+		}
+		qdrv_sch_band_active_dequeue(sbd, nbd);
+	}
+
+	qdrv_sch_shared_data_unlock(sd, flags);
+
+	if (!nbd) {
+		return NULL;
+	}
+
+	return qdrv_sch_dequeue_node_band(sd, nbd, band, dropped_victim);
+}
+
+__sram_text struct sk_buff *qdrv_sch_dequeue_nostat(struct qdrv_sch_shared_data *sd,
+							struct Qdisc *sch)
+{
+	struct sk_buff *skb;
+	unsigned long flags;
+	int prio;
+
+	if (unlikely(sd->held_skb)) {
+		qdrv_sch_shared_data_lock(sd, flags);
+		/* recheck state while locked */
+		if (unlikely(sd->held_skb)) {
+			skb = sd->held_skb;
+			--sd->held_skb_sch->q.qlen;
+			sd->held_skb = NULL;
+			sd->held_skb_sch = NULL;
+			qdrv_sch_shared_data_unlock(sd, flags);
+			return skb;
+		}
+		qdrv_sch_shared_data_unlock(sd, flags);
+	}
+
+	for (prio = 0; prio < ARRAY_SIZE(sd->bands); prio++) {
+		skb = qdrv_sch_dequeue_band(sd, qdrv_sch_get_band(prio), 0);
+		if (skb) {
+			return skb;
+		}
+	}
+
+	return NULL;
+}
+
+__sram_text int qdrv_sch_requeue(struct qdrv_sch_shared_data *sd, struct sk_buff *skb,
+					struct Qdisc *sch)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	qdrv_sch_shared_data_lock(sd, flags);
+
+	if (sd->held_skb) {
+		/* this should never happen */
+		sd->drop_callback(sd->held_skb);
+		rc = -1;
+	} else {
+		++sch->q.qlen;
+	}
+
+	sd->held_skb = skb;
+	sd->held_skb_sch = sch;
+
+	qdrv_sch_shared_data_unlock(sd, flags);
+
+	return rc;
+}
+
+static __sram_text struct sk_buff *
+qdrv_sch_dequeue_node(struct qdrv_sch_node_data *nd, bool dropped_victim)
+{
+	struct qdrv_sch_shared_data *sd = nd->shared_data;
+	uint8_t prio;
+	struct sk_buff *skb;
+
+	for (prio = 0; prio < ARRAY_SIZE(sd->bands); prio++) {
+		uint8_t band = qdrv_sch_get_band(prio);
+
+		skb = qdrv_sch_dequeue_node_band(sd, &nd->bands[band], band, dropped_victim);
+		if (skb) {
+			return skb;
+		}
+	}
+
+	return NULL;
+}
+
+int qdrv_sch_flush_node(struct qdrv_sch_node_data *nd)
+{
+	struct sk_buff *skb;
+	struct qdrv_sch_shared_data *sd = nd->shared_data;
+	int flushed = 0;
+
+	while ((skb = qdrv_sch_dequeue_node(nd, 1)) != NULL) {
+		sd->drop_callback(skb);
+		++flushed;
+	}
+
+	return flushed;
+}
+
+static __sram_text struct sk_buff *qdrv_sch_dequeue(struct Qdisc* sch)
+{
+	struct sk_buff *skb;
+	struct qdrv_sch_node_data *nd = qdisc_priv(sch);
+
+	skb = qdrv_sch_dequeue_node(nd, 0);
+	if (skb) {
+		qdrv_sch_complete(nd, skb, 0);
+		return skb;
+	}
+
+	return NULL;
+}
+
+/*
+ * Try to drop a frame from a lower priority ac, preferring to drop
+ * from the enqueuing node over others.
+ *
+ * Returns 0 if a victim was successfully dropped, 1 otherwise.
+ */
+static int qdrv_sch_enqueue_drop_victim(struct qdrv_sch_shared_data *sd,
+		struct qdrv_sch_node_data *preferred_victim, uint8_t band, uint8_t any_node)
+{
+	struct sk_buff *victim = NULL;
+	int prio;
+
+	for (prio = ARRAY_SIZE(sd->bands) - 1; prio >= 0; prio--) {
+		int victim_band = qdrv_sch_get_band(prio);
+
+		if (victim_band == band) {
+			break;
+		}
+
+		/* prefer to victimize the enqueuing node */
+		victim = qdrv_sch_dequeue_node_band(sd,
+				&preferred_victim->bands[victim_band], victim_band, 1);
+		if (victim) {
+			break;
+		}
+
+		/* otherwise drop from any node with lower priority data queued */
+		if (any_node) {
+			victim = qdrv_sch_dequeue_band(sd, victim_band, 1);
+			if (victim) {
+				break;
+			}
+		}
+	}
+
+	if (victim) {
+		sd->drop_callback(victim);
+		return 0;
+	}
+
+	return 1;
+}
+
+int __sram_text qdrv_sch_enqueue_node(struct qdrv_sch_node_data *nd, struct sk_buff *skb,
+					bool is_over_quota, bool is_low_rate)
+{
+	struct qdrv_sch_node_band_data *nbd;
+	struct qdrv_sch_shared_data *sd;
+	struct qdrv_sch_shared_band_data *sbd;
+	uint8_t band;
+	unsigned long flags;
+	int rc;
+
+	band = qdrv_sch_band(skb);
+	nbd = &nd->bands[band];
+	sd = nd->shared_data;
+	sbd = &sd->bands[band];
+
+	qdrv_sch_shared_data_lock(sd, flags);
+
+	rc = qdrv_sch_enqueue_drop_cond(sd, nd, is_low_rate);
+	if (rc != 0) {
+		if (qdrv_sch_enqueue_drop_victim(sd, nd, band, (rc > 0))) {
+			sd->drop_callback(skb);
+			++nbd->dropped;
+			qdrv_sch_shared_data_unlock(sd, flags);
+			return NET_XMIT_DROP;
+		}
+	}
+
+	/* enqueue the new frame */
+	__skb_queue_tail(&nbd->queue, skb);
+	M_FLAG_SET(skb, M_ENQUEUED_SCH);
+	++nd->used_tokens;
+	--sd->available_tokens;
+	++nd->qdisc->q.qlen;
+
+	/* prevent dequeuing while over quota */
+	if (is_over_quota) {
+		if (!nd->over_thresh) {
+			nd->over_thresh = 1;
+			++nd->over_thresh_cnt;
+		}
+	} else {
+		qdrv_sch_band_active_enqueue(sbd, nbd);
+	}
+
+	qdrv_sch_shared_data_unlock(sd, flags);
+
+	return NET_XMIT_SUCCESS;
+}
+
+static __sram_text int qdrv_sch_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct qdrv_sch_node_data *nd = qdisc_priv(sch);
+	struct ether_header *eh = (struct ether_header *) skb->data;
+	uint16_t ether_type;
+	uint8_t *data_start = qdrv_sch_find_data_start(skb, eh, &ether_type);
+
+	qdrv_sch_classify(skb, ether_type, data_start);
+
+	return qdrv_sch_enqueue_node(nd, skb, 0, 0);
+}
+
+static int qdrv_sch_common_init(struct Qdisc *sch, enum qdrv_sch_type sch_type)
+{
+	int error = 0;
+	struct qdrv_sch_node_data *q = qdisc_priv(sch);
+	int i;
+
+	/* Initialize private data */
+	memset(q, 0, sizeof(*q));
+
+	q->qdisc = sch;
+
+	/* Initialize shared data */
+	q->shared_data = qdrv_sch_acquire_shared_data(sch, &qdrv_sch_shared_data, sch_type);
+	if (!q->shared_data) {
+		printk(KERN_ERR"%s: cannot assign shared data\n", sch->ops->id);
+		error = -EINVAL;
+		goto error_quit;
+	}
+
+	q->used_tokens = 0;
+	/* Initialize all queues */
+	for (i = 0; i < QDRV_SCH_BANDS; ++i) {
+		skb_queue_head_init(&q->bands[i].queue);
+	}
+
+	/* Initialize sysfs */
+	error = qdrv_sch_sysfs_init(sch);
+	if (error) {
+		printk(KERN_ERR"%s: sysfs init failed %d\n", sch->ops->id, error);
+		goto error_quit;
+	}
+
+#if QDRV_SCH_PM
+	/* initialize power management */
+	qdrv_sch_pm_init();
+#endif
+
+	return 0;
+
+error_quit:
+	qdrv_sch_release_shared_data(&qdrv_sch_shared_data, q->shared_data);
+
+	printk(KERN_ERR"%s: failed to attach to %s: %d\n",
+		sch->ops->id, qdisc_dev(sch)->name, error);
+
+	return error;
+}
+
+static int qdrv_sch_normal_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	return qdrv_sch_common_init(sch, QDRV_SCH_TYPE_NORMAL);
+}
+
+static int qdrv_sch_red_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	return qdrv_sch_common_init(sch, QDRV_SCH_TYPE_RED);
+}
+
+static int qdrv_sch_join_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	return qdrv_sch_common_init(sch, QDRV_SCH_TYPE_JOIN);
+}
+
+static void qdrv_sch_destroy(struct Qdisc *sch)
+{
+	struct qdrv_sch_node_data *q = qdisc_priv(sch);
+	int i;
+	unsigned long flags;
+	struct sk_buff *skb;
+
+#if QDRV_SCH_PM
+	qdrv_sch_pm_exit();
+#endif
+
+	qdrv_sch_sysfs_destroy(sch);
+
+	if (q->shared_data) {
+		skb = qdrv_sch_dequeue_nostat(q->shared_data, sch);
+		if (skb != NULL) {
+			q->shared_data->drop_callback(skb);
+		}
+		qdrv_sch_shared_data_lock(q->shared_data, flags);
+		q->shared_data->available_tokens += q->used_tokens;
+		qdrv_sch_shared_data_unlock(q->shared_data, flags);
+	}
+
+	for (i = 0; i < QDRV_SCH_BANDS; ++i) {
+		__qdisc_reset_queue(sch, &q->bands[i].queue);
+	}
+
+	qdrv_sch_release_shared_data(&qdrv_sch_shared_data, q->shared_data);
+}
+
+struct Qdisc_ops qdrv_sch_normal_qdisc_ops __read_mostly = {
+	.id		=	QDRV_SCH_NAME_NORMAL,
+	.priv_size	=	sizeof(struct qdrv_sch_node_data),
+	.enqueue	=	qdrv_sch_enqueue,
+	.dequeue	=	qdrv_sch_dequeue,
+	.peek		=	qdrv_sch_peek,
+	.init		=	qdrv_sch_normal_init,
+	.destroy	=	qdrv_sch_destroy,
+	.owner		=	THIS_MODULE,
+};
+
+struct Qdisc_ops qdrv_sch_red_qdisc_ops __read_mostly = {
+	.id		=	QDRV_SCH_NAME_RED,
+	.priv_size	=	sizeof(struct qdrv_sch_node_data),
+	.enqueue	=	qdrv_sch_enqueue,
+	.dequeue	=	qdrv_sch_dequeue,
+	.peek		=	qdrv_sch_peek,
+	.init		=	qdrv_sch_red_init,
+	.destroy	=	qdrv_sch_destroy,
+	.owner		=	THIS_MODULE,
+};
+
+struct Qdisc_ops qdrv_sch_join_qdisc_ops __read_mostly = {
+	.id		=	QDRV_SCH_NAME_JOIN,
+	.priv_size	=	sizeof(struct qdrv_sch_node_data),
+	.enqueue	=	qdrv_sch_enqueue,
+	.dequeue	=	qdrv_sch_dequeue,
+	.peek		=	qdrv_sch_peek,
+	.init		=	qdrv_sch_join_init,
+	.destroy	=	qdrv_sch_destroy,
+	.owner		=	THIS_MODULE,
+};
+
+int qdrv_sch_module_init(void)
+{
+	int ret;
+
+	qdrv_sch_init_shared_data_list(&qdrv_sch_shared_data);
+
+	ret = register_qdisc(&qdrv_sch_normal_qdisc_ops);
+	if (ret) {
+		goto sch_normal_fail;
+	}
+
+	ret = register_qdisc(&qdrv_sch_red_qdisc_ops);
+	if (ret) {
+		goto sch_red_fail;
+	}
+
+	ret = register_qdisc(&qdrv_sch_join_qdisc_ops);
+	if (ret) {
+		goto sch_join_fail;
+	}
+
+	return 0;
+
+sch_join_fail:
+	unregister_qdisc(&qdrv_sch_red_qdisc_ops);
+sch_red_fail:
+	unregister_qdisc(&qdrv_sch_normal_qdisc_ops);
+sch_normal_fail:
+	return ret;
+}
+
+void qdrv_sch_module_exit(void)
+{
+	unregister_qdisc(&qdrv_sch_join_qdisc_ops);
+	unregister_qdisc(&qdrv_sch_red_qdisc_ops);
+	unregister_qdisc(&qdrv_sch_normal_qdisc_ops);
+	qdrv_sch_exit_shared_data_list(&qdrv_sch_shared_data);
+}
+
diff --git a/drivers/qtn/qdrv/qdrv_sch_pm.c b/drivers/qtn/qdrv/qdrv_sch_pm.c
new file mode 100644
index 0000000..dc7b5f9
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_sch_pm.c
@@ -0,0 +1,112 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+#include <linux/module.h>
+#include <asm/board/pm.h>
+#include "qdrv_sch_pm.h"
+#include <qtn/qdrv_sch.h>
+
+#if QDRV_SCH_PM
+
+atomic_t qdrv_sch_pm_enqueued_counter = ATOMIC_INIT(0);
+
+static unsigned int pm_slow_counter = 0;
+static unsigned int pm_avg_enqueued = 0;
+static atomic_t pm_slow_state = ATOMIC_INIT(0);
+static atomic_t pm_init_counter = ATOMIC_INIT(0);
+static struct timer_list pm_timer;
+static struct delayed_work pm_work;
+
+static inline void qdrv_sch_pm_queue_work(unsigned long delay)
+{
+	pm_slow_counter = 0;
+	pm_avg_enqueued = 0;
+	pm_queue_work(&pm_work, delay);
+}
+
+static void qdrv_sch_pm_wq_func(struct work_struct *work)
+{
+	pm_qos_update_requirement(PM_QOS_POWER_SAVE, BOARD_PM_GOVERNOR_QDISC,
+		atomic_read(&pm_slow_state) ? BOARD_PM_LEVEL_SLOW_DOWN : PM_QOS_DEFAULT_VALUE);
+}
+
+static void qdrv_sch_pm_timer_func(unsigned long data)
+{
+	int enqueued = atomic_xchg(&qdrv_sch_pm_enqueued_counter, 0);
+
+	/* calculate average enqueued packets number */
+	if (pm_avg_enqueued == 0) {
+		pm_avg_enqueued = (enqueued << 1);
+	} else {
+		pm_avg_enqueued = ((enqueued + pm_avg_enqueued) >> 1);
+	}
+
+	/* update counter of how long low level of traffic observed */
+	if ((enqueued >= BOARD_PM_QDISC_SPEEDUP_THRESHOLD) ||
+			(pm_avg_enqueued >= BOARD_PM_QDISC_SLOWDOWN_THRESHOLD)) {
+		pm_slow_counter = 0;
+	} else if (pm_slow_counter < BOARD_PM_QDISC_SLOWDOWN_COUNT) {
+		++pm_slow_counter;
+	}
+
+	/* handle state transition */
+	if (enqueued >= BOARD_PM_QDISC_SPEEDUP_THRESHOLD) {
+		if (atomic_xchg(&pm_slow_state, 0)) {
+			qdrv_sch_pm_queue_work(BOARD_PM_QDISC_DEFAULT_TIMEOUT);
+		}
+	} else if (pm_slow_counter >= BOARD_PM_QDISC_SLOWDOWN_COUNT) {
+		if (!atomic_xchg(&pm_slow_state, 1)) {
+			qdrv_sch_pm_queue_work(BOARD_PM_QDISC_SLOWDOWN_TIMEOUT);
+		}
+	}
+
+	/* restart timer */
+	mod_timer(&pm_timer, jiffies + BOARD_PM_QDISC_TIMER_TIMEOUT);
+}
+
+void qdrv_sch_pm_init(void)
+{
+	if (atomic_add_return(1, &pm_init_counter) != 1) {
+		return;
+	}
+
+	pm_qos_add_requirement(PM_QOS_POWER_SAVE, BOARD_PM_GOVERNOR_QDISC, BOARD_PM_LEVEL_SLOW_DOWN);
+
+	INIT_DELAYED_WORK(&pm_work, qdrv_sch_pm_wq_func);
+
+	init_timer(&pm_timer);
+	pm_timer.function = qdrv_sch_pm_timer_func;
+	pm_timer.expires = jiffies + BOARD_PM_QDISC_TIMER_TIMEOUT;
+	add_timer(&pm_timer);
+
+	atomic_set(&pm_slow_state, 1);
+}
+
+void qdrv_sch_pm_exit(void)
+{
+	if (atomic_sub_return(1, &pm_init_counter) != 0) {
+		return;
+	}
+
+	del_timer(&pm_timer);
+	pm_flush_work(&pm_work);
+	pm_qos_remove_requirement(PM_QOS_POWER_SAVE, BOARD_PM_GOVERNOR_QDISC);
+}
+
+#endif	/* QDRV_SCH_PM */
diff --git a/drivers/qtn/qdrv/qdrv_sch_pm.h b/drivers/qtn/qdrv/qdrv_sch_pm.h
new file mode 100644
index 0000000..02f3f16
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_sch_pm.h
@@ -0,0 +1,44 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+#ifndef __QDRV_SCH_PM_H
+#define __QDRV_SCH_PM_H
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include <asm/atomic.h>
+
+#include "qdrv_sch_const.h"
+
+#define	QDRV_SCH_PM	0
+
+#if QDRV_SCH_PM
+void qdrv_sch_pm_init(void);
+void qdrv_sch_pm_exit(void);
+
+static inline __sram_text void qdrv_sch_enqueue_pm(void)
+{
+	extern atomic_t qdrv_sch_pm_enqueued_counter;
+	atomic_inc(&qdrv_sch_pm_enqueued_counter);
+}
+#endif	// QDRV_SCH_PM
+
+#endif // __QDRV_SCH_PM_H
+
diff --git a/drivers/qtn/qdrv/qdrv_sch_wmm.h b/drivers/qtn/qdrv/qdrv_sch_wmm.h
new file mode 100644
index 0000000..f8d3f51
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_sch_wmm.h
@@ -0,0 +1,32 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef __QDRV_SCH_WMM_H
+#define __QDRV_SCH_WMM_H
+
+struct qdrv_sch_band_aifsn {
+	int band_prio;
+	int aifsn;
+};
+
+void qdrv_sch_set_remap_qos(u32 value);
+u32 qdrv_sch_get_remap_qos(void);
+
+#endif // __QDRV_SCH_WMM_H
diff --git a/drivers/qtn/qdrv/qdrv_show.c b/drivers/qtn/qdrv/qdrv_show.c
new file mode 100644
index 0000000..e8813e5
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_show.c
@@ -0,0 +1,494 @@
+/**
+  Copyright (c) 2014 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <qtn/shared_defs.h>
+#include <qtn/muc_phy_stats.h>
+#include <net80211/if_media.h>
+#include <net80211/ieee80211_var.h>
+
+#include "qdrv_show.h"
+#include "qdrv_control.h"
+
+#define QDRV_INTBASE_10			10
+
+#define QDRV_EMPTY_FILTER_IDX		0xFFFF
+
+#define QDRV_MIN_DBMVAL_BUFSIZE		6
+#define QDRV_SHORT_TEXT_BUFSIZE		8
+
+struct qdrv_mcs_info {
+	char mcs[QDRV_SHORT_TEXT_BUFSIZE];
+	char rate[QDRV_SHORT_TEXT_BUFSIZE];
+	char bw[QDRV_SHORT_TEXT_BUFSIZE];
+};
+
+static int qdrv_show_assoc_parse_group(const char *text, enum qdrv_show_assoc_group *group)
+{
+	int retcode = 0;
+
+	if (strcmp(text, "state") == 0)
+		*group = QDRV_SHOW_ASSOC_STATE;
+	else if (strcmp(text, "ver") == 0)
+		*group = QDRV_SHOW_ASSOC_VER;
+	else if (strcmp(text, "phy") == 0)
+		*group = QDRV_SHOW_ASSOC_PHY;
+	else if (strcmp(text, "tx") == 0)
+		*group = QDRV_SHOW_ASSOC_TX;
+	else if (strcmp(text, "rx") == 0)
+		*group = QDRV_SHOW_ASSOC_RX;
+	else if (strcmp(text, "all") == 0)
+		*group = QDRV_SHOW_ASSOC_ALL;
+	else
+		retcode = -EINVAL;
+
+	return retcode;
+}
+
+void qdrv_show_assoc_init_params(struct qdrv_show_assoc_params *params, struct qdrv_mac *mac)
+{
+	params->mac = mac;
+	params->show_group = QDRV_SHOW_ASSOC_STATE;
+	IEEE80211_ADDR_SET_NULL(params->filter_macaddr);
+	params->filter_idx = QDRV_EMPTY_FILTER_IDX;
+}
+
+int qdrv_show_assoc_parse_params(struct qdrv_show_assoc_params *params, int argc, char *argv[])
+{
+	int i;
+	const char* text;
+	unsigned long num;
+	enum qdrv_show_assoc_group curr_group = QDRV_SHOW_ASSOC_STATE;
+	unsigned char found_group = 0;
+	unsigned char found_filter = 0;
+	int retcode = 0;
+
+	if (!params)
+		return -EINVAL;
+
+	for (i = 0; i < argc; ++i) {
+		text = argv[i];
+
+		if (text && *text) {
+			if (qdrv_show_assoc_parse_group(text, &params->show_group) == 0) {
+				if (found_group) {
+					if ((curr_group == QDRV_SHOW_ASSOC_PHY)
+						&& (params->show_group == QDRV_SHOW_ASSOC_ALL)) {
+						/* detected 'phy all' */
+						params->show_group = QDRV_SHOW_ASSOC_PHY_ALL;
+					} else {
+						retcode = -EINVAL;
+						break;
+					}
+				}
+
+				curr_group = params->show_group;
+				found_group = 1;
+
+			} else if (qdrv_parse_mac(text, &params->filter_macaddr[0]) == 0) {
+				if (found_filter) {
+					retcode = -EINVAL;
+					break;
+				}
+
+				found_filter = 1;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+			} else if (kstrtoul(text, QDRV_INTBASE_10, &num) == 0) {
+#else
+			} else if (strict_strtoul(text, QDRV_INTBASE_10, &num) == 0) {
+#endif
+				if (found_filter) {
+					retcode = -EINVAL;
+					break;
+				}
+
+				params->filter_idx = (uint16_t)num;
+
+				if (params->filter_idx == QDRV_EMPTY_FILTER_IDX) {
+					retcode = -EINVAL;
+					break;
+				}
+
+				found_filter = 1;
+			} else {
+				retcode = -EINVAL;
+				break;
+			}
+		}
+	}
+
+	return retcode;
+}
+
+void qdrv_show_assoc_print_usage(struct seq_file *s, void *data, uint32_t num)
+{
+	if (!s)
+		return;
+
+	seq_printf(s, "Invalid parameter. show_assoc {state | ver | phy {all}| tx | rx | all}"
+		      " {macaddr | index}\n");
+}
+
+static void qdrv_show_assoc_state(struct seq_file *s, struct ieee80211com *ic,
+		struct ieee80211_node *ni)
+{
+	seq_printf(s, "%-17s %4s %4s %6s %4s %8s %4s %7s %6s   %8s %12s %10s %16s\n",
+			"MAC", "Idx", "AID", "Type", "Mode", "Vendor", "BW", "Assoc", "Auth",
+			"BA State", "TDLS State", "VAP", "PowerSaveSchemes");
+
+	if (ni)
+		get_node_assoc_state(s, ni);
+	else
+		ic->ic_iterate_nodes(&ic->ic_sta, get_node_assoc_state, (void *)s, 1);
+}
+
+static void qdrv_show_assoc_ver(struct seq_file *s, struct ieee80211com *ic,
+		struct ieee80211_node *ni)
+{
+	seq_printf(s, "%-17s %4s %-15s %-8s %-6s %-10s %-s\n",
+			"MAC", "Idx", "SW Version", "Platform", "HW Rev", "Timestamp", "Flags");
+
+	if (ni)
+		get_node_ver(s, ni);
+	else
+		ic->ic_iterate_nodes(&ic->ic_sta, get_node_ver, (void *)s, 1);
+}
+
+static void qdrv_show_assoc_parse_mcs(uint32_t val, struct qdrv_mcs_info* m)
+{
+	unsigned char mcs = (unsigned char)(val  & QTN_STATS_MCS_RATE_MASK);
+	unsigned char nss = (unsigned char)MS(val, QTN_PHY_STATS_MCS_NSS);
+	unsigned phy_rate = MS(val, QTN_PHY_STATS_MCS_PHYRATE);
+	const char *bw;
+	const char *ht_mode;
+
+	if (val) {
+		snprintf(m->mcs, sizeof(m->mcs), "%3u", (unsigned)nss * 100 + mcs);
+
+		if (phy_rate) {
+			snprintf(m->rate, sizeof(m->rate), "%4uM", phy_rate);
+		} else {
+			strcpy(m->rate, "-");
+		}
+
+		switch(MS(val, QTN_PHY_STATS_MCS_BW)) {
+		case QTN_BW_20M:
+			bw = IEEE80211_BWSTR_20;
+			break;
+		case QTN_BW_40M:
+			bw = IEEE80211_BWSTR_40;
+			break;
+		case QTN_BW_80M:
+			bw = IEEE80211_BWSTR_80;
+			break;
+		default:
+			bw = NULL;
+			break;
+		}
+
+		switch(MS(val, QTN_PHY_STATS_MCS_MODE)) {
+		case QTN_PHY_STATS_MODE_11N:
+			ht_mode = "h";
+			break;
+		case QTN_PHY_STATS_MODE_11AC:
+			ht_mode = "v";
+			break;
+		default:
+			ht_mode = "";
+			break;
+		}
+
+		if (bw) {
+			snprintf(m->bw, sizeof(m->bw), "%s%s", bw, ht_mode);
+		} else {
+			strcpy(m->bw, "-");
+		}
+	} else {
+		/* undefined */
+		strcpy(m->mcs, "-");
+		strcpy(m->rate, "-");
+		strcpy(m->bw, "-");
+	}
+}
+
+static int qdrv_show_assoc_conv_dbm2str(unsigned int val, unsigned char zero_allowed,
+		char* buf, unsigned int len)
+{
+	if (buf && (len > QDRV_MIN_DBMVAL_BUFSIZE)) {
+		if (val || zero_allowed) {
+			snprintf(buf, len, "%4d.%d", (int)val / 10, ABS((int)val) % 10);
+		} else {
+			strcpy(buf, "    - ");
+		}
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static void qdrv_show_assoc_print_indication(struct seq_file *s, const char *name,
+		const int32_t *values, unsigned int num,
+		int32_t sum_val, const char* sum_name, unsigned char zero_allowed)
+{
+	unsigned int i;
+	char tmpbuf[QDRV_SHORT_TEXT_BUFSIZE];
+
+	seq_printf(s, "  %-8s", name);
+
+	for (i = 0; i < num; ++i) {
+		if (qdrv_show_assoc_conv_dbm2str(values[i], zero_allowed,
+				tmpbuf, sizeof(tmpbuf)) != 0) {
+			seq_printf(s, "error\n");
+			return;
+		}
+
+		seq_printf(s, " %6s", tmpbuf);
+	}
+
+	/* summary value */
+	if (qdrv_show_assoc_conv_dbm2str(sum_val, zero_allowed, tmpbuf, sizeof(tmpbuf)) != 0) {
+		seq_printf(s, "error\n");
+		return;
+	}
+
+	seq_printf(s, "  %-3s %6s\n", sum_name, tmpbuf);
+}
+
+static void qdrv_show_assoc_print_phy_header(struct seq_file *s)
+{
+	/*                   3    4   5    6    7     8   9     10   11   12    13      14   15 */
+	const char* htx = "Pkts Qual  SNR MCS  Rate   BW Sca   RSSI Cost Fail AvgPER   Acks TxPwr";
+	/*                  16   17   18    19  20  21 */
+	const char* hrx = "Pkts MCS  Rate   BW Sym Cost";
+
+	/* header */
+	seq_printf(s, "%-17s %4s (TX) %s (RX) %s\n", "MAC", "Idx", htx, hrx);
+}
+
+static void qdrv_show_assoc_print_phy_detail(void *data, struct ieee80211_node *ni)
+{
+	struct seq_file *s = (struct seq_file *)data;
+	struct qtn_node_shared_stats_tx *tx = &ni->ni_shared_stats->tx[STATS_SU];
+	struct qtn_node_shared_stats_rx *rx = &ni->ni_shared_stats->rx[STATS_SU];
+	struct qdrv_mcs_info mi;
+	char rssi[QDRV_SHORT_TEXT_BUFSIZE];
+
+	ieee80211_update_node_assoc_qual(ni);
+
+	/* TX */
+	qdrv_show_assoc_parse_mcs(tx->last_mcs, &mi);
+
+	if (qdrv_show_assoc_conv_dbm2str(tx->avg_rssi_dbm, 0, rssi, sizeof(rssi)) != 0) {
+		seq_printf(s, "error\n");
+		return;
+	}
+
+	/*              1   2   3   4   5   6   7   8   9  10  11  12  13  14  15 */
+	seq_printf(s, "%pM %4u %9u %4u %4d %3s %5s %4s %3u %6s %4u %4u %6u %6u %5u",
+			ni->ni_macaddr,
+			IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx),
+			tx->pkts,
+			ni->ni_linkqual,
+			ni->ni_snr,
+			mi.mcs,
+			mi.rate,
+			mi.bw,
+			tx->last_tx_scale,
+			rssi,
+			tx->cost,
+			tx->txdone_failed_cum,
+			tx->avg_per,
+			tx->acks,
+			ni->ni_txpower);
+
+	/* RX */
+	qdrv_show_assoc_parse_mcs(rx->last_mcs, &mi);
+
+	/*              16  17  18  19  20  21 */
+	seq_printf(s, " %9u %3s %5s %4s %3u %4u\n",
+			tx->pkts,
+			mi.mcs,
+			mi.rate,
+			mi.bw,
+			rx->last_rxsym,
+			rx->cost);
+}
+
+static void qdrv_show_assoc_print_phy_full(void *data, struct ieee80211_node *ni)
+{
+	struct seq_file *s = (struct seq_file *)data;
+	struct qtn_node_shared_stats_rx *rx = &ni->ni_shared_stats->rx[STATS_SU];
+	int i;
+	int evm_sum = 0;
+
+	qdrv_show_assoc_print_phy_detail(s, ni);
+
+	qdrv_show_assoc_print_indication(s, "RSSI:", rx->last_rssi_dbm, NUM_ANT,
+			rx->last_rssi_dbm[NUM_ANT], "avg", 0);
+
+	qdrv_show_assoc_print_indication(s, "RCPI:", rx->last_rcpi_dbm, NUM_ANT,
+			rx->last_rcpi_dbm[NUM_ANT], "max", 0);
+
+	for (i = 0; i < NUM_ANT; i++) {
+		evm_sum += (int)rx->last_evm_dbm[i];
+	}
+
+	qdrv_show_assoc_print_indication(s, "EVM:", rx->last_evm_dbm, NUM_ANT,
+			(uint32_t)evm_sum, "sum", 1);
+
+	qdrv_show_assoc_print_indication(s, "HWNOISE:", rx->last_hw_noise, NUM_ANT,
+			rx->last_hw_noise[NUM_ANT], "avg", 0);
+}
+
+static void qdrv_show_assoc_phy_stats(struct seq_file *s, struct ieee80211com *ic,
+		struct ieee80211_node *ni, unsigned char full)
+{
+	ieee80211_iter_func *print_phy_stats =
+			full ? qdrv_show_assoc_print_phy_full : qdrv_show_assoc_print_phy_detail;
+
+	qdrv_show_assoc_print_phy_header(s);
+
+	if (ni)
+		print_phy_stats(s, ni);
+	else
+		ic->ic_iterate_nodes(&ic->ic_sta, print_phy_stats, (void *)s, 1);
+}
+
+static void qdrv_show_assoc_tx_stats(struct seq_file *s, struct ieee80211com *ic,
+		struct ieee80211_node *ni)
+{
+	seq_printf(s, "%-17s %4s %8s %10s %12s %8s %8s %10s %10s %10s %5s %7s\n",
+			"MAC (TX)", "Idx",  "Max PR", "Frames", "Bytes", "Errors",
+			"Drops", "Unicast", "Multicast", "Broadcast", "MaxQ", "Fails");
+
+	if (ni)
+		get_node_tx_stats(s, ni);
+	else
+		ic->ic_iterate_nodes(&ic->ic_sta, get_node_tx_stats, (void *)s, 1);
+}
+
+static void qdrv_show_assoc_rx_stats(struct seq_file *s, struct ieee80211com *ic,
+		struct ieee80211_node *ni)
+{
+	seq_printf(s, "%-17s %4s %8s %8s %10s %12s %8s %8s %10s %10s %10s\n",
+			"MAC (RX)", "Idx", "Max PR", "PhyRate", "Frames", "Bytes", "Errors",
+			"Drops", "Unicast", "Multicast", "Broadcast");
+
+	if (ni)
+		get_node_rx_stats(s, ni);
+	else
+		ic->ic_iterate_nodes(&ic->ic_sta, get_node_rx_stats, (void *)s, 1);
+}
+
+static void qdrv_show_assoc_all(struct seq_file *s, struct ieee80211com *ic,
+		struct ieee80211_node *ni)
+{
+	seq_printf(s, "Assoc State:\n");
+	qdrv_show_assoc_state(s, ic, ni);
+
+	seq_printf(s, "\nVersion:\n");
+	qdrv_show_assoc_ver(s, ic, ni);
+
+	seq_printf(s, "\nTx Stats:\n");
+	qdrv_show_assoc_tx_stats(s, ic, ni);
+
+	seq_printf(s, "\nRx Stats:\n");
+	qdrv_show_assoc_rx_stats(s, ic, ni);
+
+	seq_printf(s, "\nPHY Stats:\n");
+	qdrv_show_assoc_phy_stats(s, ic, ni, 1);
+}
+
+void qdrv_show_assoc_print_stats(struct seq_file *s, void *data, uint32_t num)
+{
+	struct qdrv_show_assoc_params *params = (struct qdrv_show_assoc_params *)data;
+	struct qdrv_wlan *qw;
+	struct ieee80211com *ic;
+	struct ieee80211_node_table *nt;
+	struct ieee80211_node *ni = NULL;
+
+	if (!s || !params || !params->mac)
+		return;
+
+	qw = (struct qdrv_wlan*)params->mac->data;
+
+	if (!qw)
+		return;
+
+	ic = &qw->ic;
+	nt = &ic->ic_sta;
+
+	if (!IEEE80211_ADDR_NULL(params->filter_macaddr)) {
+		ni = ieee80211_find_node(nt, params->filter_macaddr);
+
+		if (!ni) {
+			seq_printf(s, "node %pM not found\n", params->filter_macaddr);
+			return;
+		}
+
+	} else if (params->filter_idx != QDRV_EMPTY_FILTER_IDX) {
+		ni = ieee80211_find_node_by_idx(ic, NULL, params->filter_idx);
+
+		if (!ni) {
+			seq_printf(s, "node index %u not found\n", (unsigned)params->filter_idx);
+			return;
+		}
+	}
+
+	switch(params->show_group) {
+	case QDRV_SHOW_ASSOC_VER:
+		qdrv_show_assoc_ver(s, ic, ni);
+		break;
+	case QDRV_SHOW_ASSOC_PHY:
+		qdrv_show_assoc_phy_stats(s, ic, ni, 0);
+		break;
+	case QDRV_SHOW_ASSOC_PHY_ALL:
+		qdrv_show_assoc_phy_stats(s, ic, ni, 1);
+		break;
+	case QDRV_SHOW_ASSOC_TX:
+		qdrv_show_assoc_tx_stats(s, ic, ni);
+		break;
+	case QDRV_SHOW_ASSOC_RX:
+		qdrv_show_assoc_rx_stats(s, ic, ni);
+		break;
+	case QDRV_SHOW_ASSOC_ALL:
+		qdrv_show_assoc_all(s, ic, ni);
+		break;
+	case QDRV_SHOW_ASSOC_STATE:
+	default:
+		qdrv_show_assoc_state(s, ic, ni);
+		break;
+	}
+
+	if (ni)
+		ieee80211_free_node(ni);
+}
diff --git a/drivers/qtn/qdrv/qdrv_show.h b/drivers/qtn/qdrv/qdrv_show.h
new file mode 100644
index 0000000..b31d468
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_show.h
@@ -0,0 +1,51 @@
+/**
+  Copyright (c) 2014 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_SHOW_H
+#define _QDRV_SHOW_H
+
+#include <linux/seq_file.h>
+#include "qdrv_mac.h"
+
+enum qdrv_show_assoc_group {
+	QDRV_SHOW_ASSOC_STATE,
+	QDRV_SHOW_ASSOC_VER,
+	QDRV_SHOW_ASSOC_PHY,
+	QDRV_SHOW_ASSOC_PHY_ALL,
+	QDRV_SHOW_ASSOC_TX,
+	QDRV_SHOW_ASSOC_RX,
+	QDRV_SHOW_ASSOC_ALL
+};
+
+/* arguments for "show_assoc" command */
+struct qdrv_show_assoc_params {
+	struct qdrv_mac *mac;
+	enum qdrv_show_assoc_group show_group;
+	uint8_t filter_macaddr[IEEE80211_ADDR_LEN];
+	uint16_t filter_idx;
+};
+
+void qdrv_show_assoc_init_params(struct qdrv_show_assoc_params *params, struct qdrv_mac *mac);
+int qdrv_show_assoc_parse_params(struct qdrv_show_assoc_params *params, int argc, char *argv[]);
+void qdrv_show_assoc_print_usage(struct seq_file *s, void *data, uint32_t num);
+void qdrv_show_assoc_print_stats(struct seq_file *s, void *data, uint32_t num);
+
+
+#endif /* _QDRV_SHOW_H */
diff --git a/drivers/qtn/qdrv/qdrv_slab_def.h.in b/drivers/qtn/qdrv/qdrv_slab_def.h.in
new file mode 100644
index 0000000..7ebb7b1
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_slab_def.h.in
@@ -0,0 +1,75 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2013 Quantenna Communications Inc                   **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  Date        : 2013-02-19                                                 **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+EH0*/
+
+#ifndef _QDRV_SLAB_WATCH_H
+#define _QDRV_SLAB_WATCH_H
+
+/**
+ * For each field we want to monitor, add another line into
+ * this structure.
+ */
+struct qdrv_slab_watch {
+#define CACHE(x)	unsigned int stat_size_tot_alloc_##x; \
+	unsigned int stat_size_cur_alloc_##x; \
+	unsigned int stat_size_act_alloc_##x; \
+	unsigned int stat_size_hwm_alloc_##x;
+#define ZACHE(y)	unsigned int stat_tot_alloc_##y; \
+	unsigned int stat_cur_alloc_##y; \
+	unsigned int stat_act_alloc_##y; \
+	unsigned int stat_hwm_alloc_##y;
+#include "qdrv_slab_watch.h"
+#undef CACHE
+#undef ZACHE
+} __packed;
+
+enum qdrv_slab_index {
+#define CACHE(x)	QDRV_SLAB_IDX_SIZE_##x,
+#define ZACHE(x)	QDRV_SLAB_IDX_##x,
+#include "qdrv_slab_watch.h"
+#undef CACHE
+#undef ZACHE
+	QDRV_SLAB_IDX_MAX
+};
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_slab_watch.h b/drivers/qtn/qdrv/qdrv_slab_watch.h
new file mode 100644
index 0000000..23a9420
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_slab_watch.h
@@ -0,0 +1,11 @@
+CACHE(64)
+CACHE(96)
+CACHE(128)
+CACHE(192)
+CACHE(256)
+CACHE(512)
+CACHE(1024)
+CACHE(2048)
+CACHE(4096)
+CACHE(RX_BUF_SIZE_KMALLOC)
+ZACHE(skbuff_head_cache)
diff --git a/drivers/qtn/qdrv/qdrv_soc.c b/drivers/qtn/qdrv/qdrv_soc.c
new file mode 100644
index 0000000..f9f46f2
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_soc.c
@@ -0,0 +1,882 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/if_vlan.h>
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_hal.h"
+#include "qdrv_muc.h"
+#include "qdrv_uc_print.h"
+#include "qdrv_dsp.h"
+#include "qdrv_auc.h"
+#include <qtn/registers.h>
+#include <qtn/shared_params.h>
+#include <qtn/txbf_mbox.h>
+#include <qtn/qtn_bb_mutex.h>
+#include <qtn/bootcfg.h>
+#include <net80211/if_ethersubr.h>
+#include <qtn/qtn_vlan.h>
+#include <asm/board/board_config.h>
+#include "qdrv_comm.h"
+#include "qdrv_wlan.h"
+#include "qdrv_radar.h"
+#include "qdrv_vap.h"
+#include "qdrv_config.h"
+#include "qtn/qdrv_bld.h"
+#include "qdrv_mu.h"
+#include "qtn/qdrv_sch.h"
+#include <qtn/lhost_muc_comm.h>
+#include <common/ruby_config.h>
+#include <qtn/qtn_global.h>
+#include <qtn/hardware_revision.h>
+#include <qtn/topaz_fwt_sw.h>
+
+extern unsigned int g_catch_fcs_corruption;
+extern unsigned int g_qos_q_merge;
+
+static int tqe_sem_en = 0;
+module_param(tqe_sem_en, int, 0644);
+
+static int env_wifi_hw = 0;
+module_param(env_wifi_hw, int, 0644);
+
+extern void qtn_show_info_register(void *fn);
+extern void qtn_show_info_unregister(void);
+extern int qtn_get_hw_config_id(void);
+
+struct _shared_params_alloc
+{
+	struct shared_params	params;
+	struct qtn_txbf_mbox	txbf_mbox;
+	struct qtn_muc_dsp_mbox	muc_dsp_mbox;
+	struct qtn_bb_mutex	bb_mutex;
+	struct qtn_csa_info	csa;
+	struct qtn_samp_chan_info	chan_sample;
+	struct qtn_scan_chan_info	chan_scan;
+	struct qtn_scs_info_set		scs_info_set;
+	struct qtn_remain_chan_info	remain_chan;
+	struct qtn_ocac_info	ocac;
+	struct qtn_meas_chan_info	chan_meas;
+#if QTN_SEM_TRACE
+	struct qtn_sem_trace_log        sem_trace_log;
+#endif
+#if defined(QBMPS_ENABLE)
+	struct qtn_bmps_info	bmps;
+#endif
+#ifdef CONFIG_NAC_MONITOR
+	struct nac_mon_info nac_mon;
+#endif
+};
+
+int qdrv_soc_cb_size(void)
+{
+	return(sizeof(struct qdrv_cb));
+}
+
+int qdrv_soc_start_vap(struct qdrv_cb *qcb, int devid, struct qdrv_mac *mac,
+	char *name, uint8_t *mac_addr, int opmode, int flags)
+{
+	if ((mac->enabled != 1) || (mac->data == NULL)) {
+		DBGPRINTF_E("MAC unit not enabled\n");
+		return(-1);
+	}
+
+	if (qdrv_wlan_start_vap((struct qdrv_wlan*)mac->data, name, mac_addr,
+				devid, opmode, flags) < 0) {
+		DBGPRINTF_E("Failed to start VAP\n");
+		return(-1);
+	}
+
+	return(0);
+}
+
+int qdrv_soc_stop_vap(struct qdrv_cb *qcb, struct qdrv_mac *mac, struct net_device *vdev)
+{
+	if ((mac->enabled != 1) || (mac->data == NULL)) {
+		DBGPRINTF_E("MAC unit not enabled\n");
+		return -1;
+	}
+
+	if (qdrv_wlan_stop_vap(mac, vdev) < 0) {
+		DBGPRINTF_E("qdrv_wlan_stop_vap failed\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+int qdrv_soc_stats(void *data, struct qdrv_mac *mac)
+{
+	if ((mac->enabled != 1) || (mac->data == NULL)) {
+		DBGPRINTF_E("MAC unit not enabled\n");
+		return(-1);
+	}
+
+	if (qdrv_wlan_stats(mac) < 0) {
+		DBGPRINTF_E("Failed to get statistics for VAP\n");
+		return(-1);
+	}
+
+	return(0);
+}
+
+static struct shared_params *params_bus = NULL;
+
+u_int32_t qdrv_soc_get_hostlink_mbox(void)
+{
+	return soc_shared_params->m2l_hostlink_mbox;
+}
+
+char *qdrv_soc_get_hw_desc(enum hw_opt_t bond_opt)
+{
+	char *desc = "unknown";
+	uint8_t rf_chipid = soc_shared_params->rf_chip_id;
+
+	if (bond_opt == 0)
+		bond_opt = soc_shared_params->hardware_options;
+
+	/*
+	 * These strings are in use by customers and should not be changed.
+	 * The platform ID must appear at the end of the string.
+	 */
+	switch (bond_opt) {
+	case HW_OPTION_BONDING_TOPAZ_QD840:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "4x4 11n/ac Data Only QD842";
+		else
+			desc = "4x4 11n/ac Data Only QD840";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV840:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "4x4 11ac FO RGMII/PCIe QV842";
+		else
+			desc = "4x4 11ac FO RGMII/PCIe QV840";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV840_2X4:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "2x4 11ac FO RGMII/PCIe QV842";
+		else
+			desc = "2x4 11ac FO RGMII/PCIe QV840";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV840C:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "4x4 11ac FO RGMII/PCIe QV842C";
+		else
+			desc = "4x4 11ac FO RGMII/PCIe QV840C";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV860:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "4x4 11ac FO RGMII DBDC QV862";
+		else
+			desc = "4x4 11ac FO RGMII DBDC QV860";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV860_2X2:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "2x2 11ac FO RGMII DBDC QV862";
+		else
+			desc = "2x2 11ac FO RGMII DBDC QV860";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV860_2X4:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "2x4 11ac FO RGMII DBDC QV862";
+		else
+			desc = "2x4 11ac FO RGMII DBDC QV860";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV860_3X3:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "3x3 11ac FO RGMII DBDC QV862";
+		else
+			desc = "3x3 11ac FO RGMII DBDC QV860";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV880:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "4x4 11ac FO RGMII DBDC QV882";
+		else
+			desc = "4x4 11ac FO RGMII DBDC QV880";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV880_2X2:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "2x2 11ac FO RGMII DBDC QV882";
+		else
+			desc = "2x2 11ac FO RGMII DBDC QV880";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV880_2X4:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "2x4 11ac FO RGMII DBDC QV882";
+		else
+			desc = "2x4 11ac FO RGMII DBDC QV880";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV880_3X3:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "3x3 11ac FO RGMII DBDC QV882";
+		else
+			desc = "3x3 11ac FO RGMII DBDC QV880";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV920:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "4x4 11ac PCIe Memoryless QV922";
+		else
+			desc = "4x4 11ac PCIe Memoryless QV920";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV920_2X4:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "2x4 11ac PCIe Memoryless QV922";
+		else
+			desc = "2x4 11ac PCIe Memoryless QV920";
+		break;
+	case HW_OPTION_BONDING_TOPAZ_QV940:
+		if (rf_chipid == CHIPID_DUAL)
+			desc = "2x4 11ac FO RGMII/PCIe QV942";
+		else
+			desc = "2x4 11ac FO RGMII/PCIe QV940";
+		break;
+	}
+
+	return desc;
+}
+
+char *qdrv_soc_get_hw_id(enum hw_opt_t bond_opt)
+{
+	char *hw_desc = qdrv_soc_get_hw_desc(bond_opt);
+
+	/* Last string in the hardware desc is the ID */
+	return strrchr(hw_desc, ' ') + 1;
+}
+
+uint32_t qdrv_soc_get_hw_options(void)
+{
+	if (!soc_shared_params)
+		return 0;
+
+	return soc_shared_params->hardware_options;
+}
+
+static const char *qdrv_hw_ver_descs[] = {
+	[HARDWARE_REVISION_UNKNOWN] = "unknown",
+	[HARDWARE_REVISION_RUBY_A] = "bbic3_rev_a",
+	[HARDWARE_REVISION_RUBY_B] = "bbic3_rev_b_c",
+	[HARDWARE_REVISION_RUBY_D] = "bbic3_rev_d",
+	[HARDWARE_REVISION_TOPAZ_A] = "bbic4_rev_a0",
+	[HARDWARE_REVISION_TOPAZ_B] = "bbic4_rev_a1",
+	[HARDWARE_REVISION_TOPAZ_A2] = "bbic4_rev_a2"
+};
+
+const char *qdrv_soc_get_hw_rev_desc(uint16_t hw_rev)
+{
+	if (hw_rev >= ARRAY_SIZE(qdrv_hw_ver_descs))
+		hw_rev = HARDWARE_REVISION_UNKNOWN;
+
+	return qdrv_hw_ver_descs[hw_rev];
+}
+EXPORT_SYMBOL(qdrv_soc_get_hw_rev_desc);
+
+static void qdrv_soc_revoke_params(void)
+{
+	if (soc_shared_params) {
+		dma_free_coherent(
+			NULL,
+			sizeof(struct _shared_params_alloc),
+			container_of(soc_shared_params, struct _shared_params_alloc, params),
+			(dma_addr_t)container_of(params_bus, struct _shared_params_alloc, params));
+		soc_shared_params = params_bus = NULL;
+	}
+
+	qtn_mproc_sync_shared_params_set(0);
+}
+
+static int read_hardware_revision(void)
+{
+	/*
+	 * BB should be active and out of reset. ok to query version register
+	 *
+	 * check that soft reset is low, and global enable is high
+	 */
+	int ret = HARDWARE_REVISION_UNKNOWN;
+
+	qtn_bb_mutex_enter(QTN_LHOST_SOC_CPU);
+
+	if (readl(RUBY_QT3_BB_GLBL_SOFT_RST) == 0x0) {
+		ret = _read_hardware_revision();
+	} else {
+		printk(KERN_ERR "%s called when BB in soft reset\n", __FUNCTION__);
+	}
+
+	qtn_bb_mutex_leave(QTN_LHOST_SOC_CPU);
+
+	return ret;
+}
+
+static uint8_t
+get_bootcfg_calstate(void)
+{
+	char tmpbuf[256];
+	char *varstart;
+	int calstate = QTN_CALSTATE_DEFAULT;
+
+	varstart = bootcfg_get_var("calstate", tmpbuf);
+	if (varstart != NULL) {
+		if (sscanf(varstart, "=%d", &calstate) != 1) {
+			calstate = QTN_CALSTATE_DEFAULT;
+		}
+	}
+
+	return calstate;
+}
+
+static uint8_t get_bootcfg_power_recheck(void)
+{
+	char tmpbuf[256];
+	char *varstart;
+	int recheck = 1;
+
+	varstart = bootcfg_get_var("power_recheck", tmpbuf);
+	if (varstart != NULL) {
+		sscanf(varstart, "=%d", &recheck);
+	}
+
+	return recheck;
+}
+
+static uint8_t
+get_bootcfg_post_mask(void)
+{
+	char tmpbuf[256];
+	char *varstart;
+	int post_mask = 0;
+
+	varstart = bootcfg_get_var("post_mask", tmpbuf);
+	if (varstart != NULL) {
+		if (sscanf(varstart, "=%d", &post_mask) != 1) {
+			post_mask = 0;
+		}
+	}
+
+	return post_mask;
+}
+
+static int
+get_ext_lna_gain_from_bootcfg(const char *var)
+{
+	char tmpbuf[256];
+	char *varstart;
+	int  value;
+	int  lna_gain = QTN_EXT_LNA_GAIN_MAX;
+
+	varstart = bootcfg_get_var(var, tmpbuf);
+	if (varstart != NULL) {
+		if (sscanf(varstart, "=%d", &value) == 1) {
+			if ((-127 < value) && (value < QTN_EXT_LNA_GAIN_MAX)) {
+				lna_gain = value;
+			}
+		}
+	}
+
+	return lna_gain;
+}
+
+static int
+get_bootcfg_tx_power_cal(void)
+{
+	char tmpbuf[256];
+	char *varstart;
+	int tx_power_cal = 0;
+
+	varstart = bootcfg_get_var("tx_power_cal", tmpbuf);
+	if (varstart != NULL) {
+		sscanf(varstart, "=%d", &tx_power_cal);
+	}
+
+	return tx_power_cal;
+}
+
+static int
+get_bootcfg_min_tx_power(void)
+{
+        char tmpbuf[256];
+        char *varstart;
+        int min_tx_power = 0;
+
+        varstart = bootcfg_get_var("min_tx_power", tmpbuf);
+        if (varstart != NULL) {
+                sscanf(varstart, "=%d", &min_tx_power);
+        }
+
+        return min_tx_power;
+}
+
+static int
+get_bootcfg_max_tx_power(void)
+{
+        char tmpbuf[256];
+        char *varstart;
+        int max_tx_power = 0;
+        
+        varstart = bootcfg_get_var("max_tx_power", tmpbuf);
+        if (varstart != NULL) {
+                sscanf(varstart, "=%d", &max_tx_power);
+        }
+
+        return max_tx_power;
+}
+
+static int
+qdrv_soc_publish_params(struct qdrv_cb *qcb)
+{
+	int ret = 0;
+	int current_wifi_hw = 0;
+	int current_rf_chip_id;
+	struct _shared_params_alloc *params_alloc = NULL, *params_alloc_bus = NULL;
+
+	/* Guard againt second call to function */
+	qdrv_soc_revoke_params();
+
+	/* Allocate what we are going to publish.
+	 * Pointer can be used by any processor in system,
+	 * so published pointer must be "bus" pointer.
+	 * If other processors want to convert pointer for example
+	 * to be non-cacheable, they must remap it themself.
+	 * Structure must be allocated using dma_alloc_coherent().
+	 */
+	if((params_alloc = (struct _shared_params_alloc*)dma_alloc_coherent(NULL,
+		sizeof(struct _shared_params_alloc), (dma_addr_t*)(&params_alloc_bus), GFP_KERNEL)) == NULL)
+	{
+		DBGPRINTF_E("%s: failed to alloc soc_shared_params\n", __FUNCTION__);
+		ret = -1;
+		goto bad;
+	}
+	memset(params_alloc, 0, sizeof(*params_alloc));
+
+	/* Initialize shared soc_shared_params structure */
+	soc_shared_params = &params_alloc->params;
+	params_bus = &params_alloc_bus->params;
+	soc_shared_params->hardware_id = qtn_get_hw_config_id();
+	memcpy(soc_shared_params->fw_version, QDRV_BLD_NAME,
+			MIN(QTN_FW_VERSION_LENGTH, strlen(QDRV_BLD_NAME)));
+
+	/* Initialize beamforming message box structure */
+	soc_shared_params->txbf_mbox_lhost = &params_alloc->txbf_mbox;
+	soc_shared_params->txbf_mbox_bus = &params_alloc_bus->txbf_mbox;
+	soc_shared_params->muc_dsp_mbox_lhost = &params_alloc->muc_dsp_mbox;
+	soc_shared_params->muc_dsp_mbox_bus = &params_alloc_bus->muc_dsp_mbox;
+	soc_shared_params->muc_dsp_mbox_lhost->muc_to_dsp_mbox = QTN_TXBF_MBOX_BAD_IDX;
+	soc_shared_params->muc_dsp_mbox_lhost->dsp_to_muc_mbox = QTN_TXBF_MBOX_BAD_IDX;
+	soc_shared_params->txbf_mbox_lhost->muc_to_dsp_ndp_mbox = QTN_TXBF_MBOX_BAD_IDX;
+	int i;
+	for (i = 0; i < ARRAY_SIZE(soc_shared_params->txbf_mbox_lhost->muc_to_dsp_action_frame_mbox); i++) {
+		soc_shared_params->txbf_mbox_lhost->muc_to_dsp_action_frame_mbox[i] = QTN_TXBF_MBOX_BAD_IDX;
+	}
+
+	soc_shared_params->txbf_mbox_lhost->dsp_to_host_mbox = QTN_TXBF_MBOX_BAD_IDX;
+
+	/* Initialize RIFS mode structure */
+	soc_shared_params->bb_mutex_lhost = &params_alloc->bb_mutex;
+	soc_shared_params->bb_mutex_bus = &params_alloc_bus->bb_mutex;
+
+	/* deferred channel switch */
+	soc_shared_params->csa_lhost = &params_alloc->csa;
+	soc_shared_params->csa_bus = &params_alloc_bus->csa;
+
+	/* cca scan */
+	soc_shared_params->chan_sample_lhost = &params_alloc->chan_sample;
+	soc_shared_params->chan_sample_bus = &params_alloc_bus->chan_sample;
+
+	soc_shared_params->chan_scan_lhost = &params_alloc->chan_scan;
+	soc_shared_params->chan_scan_bus = &params_alloc_bus->chan_scan;
+
+	/* SCS info */
+	soc_shared_params->scs_info_lhost = &params_alloc->scs_info_set;
+	soc_shared_params->scs_info_bus = &params_alloc_bus->scs_info_set;
+
+	/* remain channel info */
+	soc_shared_params->remain_chan_lhost = &params_alloc->remain_chan;
+	soc_shared_params->remain_chan_bus = &params_alloc_bus->remain_chan;
+	/* ocac */
+	soc_shared_params->ocac_lhost = &params_alloc->ocac;
+	soc_shared_params->ocac_bus = &params_alloc_bus->ocac;
+
+	/* Measurement info */
+	soc_shared_params->chan_meas_lhost = &params_alloc->chan_meas;
+	soc_shared_params->chan_meas_bus = &params_alloc_bus->chan_meas;
+
+	/* remain channel info */
+	soc_shared_params->remain_chan_lhost = &params_alloc->remain_chan;
+	soc_shared_params->remain_chan_bus = &params_alloc_bus->remain_chan;
+
+#if QTN_SEM_TRACE
+	/* semaphore calltrace log */
+	soc_shared_params->sem_trace_log_lhost = &params_alloc->sem_trace_log;
+	soc_shared_params->sem_trace_log_bus = &params_alloc_bus->sem_trace_log;
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE,
+			"semaphore calltrace log buffer: %p %p\n",
+			soc_shared_params->sem_trace_log_lhost, soc_shared_params->sem_trace_log_bus);
+#endif
+#ifdef CONFIG_NAC_MONITOR
+	soc_shared_params->nac_mon_info = &params_alloc->nac_mon;
+	soc_shared_params->nac_mon_info_bus = &params_alloc_bus->nac_mon;
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE, "nac_mon_info %p bus %x\n",
+			soc_shared_params->nac_mon_info,
+			(uint32_t)soc_shared_params->nac_mon_info_bus);
+#endif
+
+	/* FIXME: to be replaced */
+	soc_shared_params->vdev_lhost = vdev_tbl_lhost;
+	soc_shared_params->vdev_bus = (struct qtn_vlan_dev **)virt_to_bus(vdev_tbl_bus);
+	soc_shared_params->vport_lhost = vport_tbl_lhost;
+	soc_shared_params->vport_bus = (struct qtn_vlan_dev **)virt_to_bus(vport_tbl_bus);
+	soc_shared_params->vlan_info = (struct qtn_vlan_info *)virt_to_bus(&qtn_vlan_info);
+	soc_shared_params->auc.vdev_bus = soc_shared_params->vdev_bus;
+	soc_shared_params->auc.vport_bus = soc_shared_params->vport_bus;
+
+#if defined(QBMPS_ENABLE)
+	/* bmps info */
+	soc_shared_params->bmps_lhost = &params_alloc->bmps;
+	soc_shared_params->bmps_bus = &params_alloc_bus->bmps;
+#endif
+
+	soc_shared_params->ipmac_table_bus = (struct topaz_ipmac_uc_table *)ipmac_hash_bus;
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE,
+			"txbf_mbox %p %p shared soc_shared_params %p %p bb_mutex %p %p\n",
+		soc_shared_params->txbf_mbox_bus, soc_shared_params->txbf_mbox_lhost, soc_shared_params, params_bus,
+			soc_shared_params->bb_mutex_bus, soc_shared_params->bb_mutex_lhost);
+
+	/* Fill shared parameters structure */
+	if (get_board_config( BOARD_CFG_WIFI_HW, &current_wifi_hw ) != 0) {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE,
+				"%s: get board config returned error status\n", __FUNCTION__);
+		/* This error is relatively harmless, so carry on. */
+	}
+
+	/* Initialise flag for TQE hang WAR */
+#ifdef CONFIG_TOPAZ_PCIE_TARGET
+	soc_shared_params->tqe_sem_en = tqe_sem_en;
+	soc_shared_params->auc.auc_tqe_sem_en = tqe_sem_en;
+#else
+	soc_shared_params->tqe_sem_en = 0;
+	soc_shared_params->auc.auc_tqe_sem_en = 0;
+#endif
+
+	printk("%s: parames->tqe_sem_en %d, auc_tqe_sem_en %d\n", __FUNCTION__, soc_shared_params->tqe_sem_en,
+			soc_shared_params->auc.auc_tqe_sem_en);
+
+	soc_shared_params->lh_wifi_hw = current_wifi_hw;
+	if (get_board_config( BOARD_CFG_RFIC, &current_rf_chip_id ) != 0) {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE,
+				"%s: get board config returned error status\n", __FUNCTION__);
+		/* This error is relatively harmless, so carry on. */
+	}
+	soc_shared_params->rf_chip_id = current_rf_chip_id;
+	printk("..... Current RFIC Chip ID -- %d\n", soc_shared_params->rf_chip_id );
+
+
+	memcpy(soc_shared_params->lh_mac_0, qcb->mac0, sizeof(soc_shared_params->lh_mac_0));
+	memcpy(soc_shared_params->lh_mac_1, qcb->mac1, sizeof(soc_shared_params->lh_mac_1));
+	soc_shared_params->lh_chip_id = (u_int16_t) readl( RUBY_SYS_CTL_CSR );
+	soc_shared_params->lh_num_devices = 1;
+
+	soc_shared_params->uc_flags = g_catch_fcs_corruption;
+	soc_shared_params->uc_flags |= g_qos_q_merge;
+	soc_shared_params->fw_no_mu = qcb->fw_no_mu;
+
+	soc_shared_params->hardware_revision = read_hardware_revision();
+	soc_shared_params->hardware_options = get_bootcfg_bond_opt();
+
+	soc_shared_params->shortrange_scancnt = get_bootcfg_scancnt();
+	soc_shared_params->ext_lna_gain = get_ext_lna_gain_from_bootcfg("ext_lna_gain");
+	soc_shared_params->ext_lna_bypass_gain = get_ext_lna_gain_from_bootcfg("ext_lna_bypass_gain");
+	soc_shared_params->tx_power_cal = get_bootcfg_tx_power_cal();
+	soc_shared_params->min_tx_power = get_bootcfg_min_tx_power();
+	soc_shared_params->max_tx_power = get_bootcfg_max_tx_power();
+
+	/* This slow ethernet check is done twice in qdrv as sc is initialized at this point */
+	soc_shared_params->slow_ethernet_war = board_slow_ethernet();
+	soc_shared_params->iot_tweaks = QTN_IOT_DEFAULT_TWEAK;
+	soc_shared_params->calstate = get_bootcfg_calstate();
+	soc_shared_params->post_rfloop = (get_bootcfg_post_mask() & 0x4) ? 1 : 0;
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE,
+			"System rev: %08X\n",
+			soc_shared_params->lh_chip_id);
+	/*
+	 * CPUs will access the shared parameters.
+	 */
+	qtn_mproc_sync_shared_params_set((struct shared_params*)params_bus);
+
+	return ret;
+
+bad:
+	qdrv_soc_revoke_params();
+	return ret;
+}
+
+static void qtn_show_info (void) {
+	printk("\nFirmware build version: %s", QDRV_BLD_NAME);
+	printk("\nFirmware configuration: %s", QDRV_CFG_TYPE);
+	printk("\nHardware ID           : %d\n", qtn_get_hw_config_id());
+}
+
+int qdrv_start_dsp_only(struct device *dev)
+{
+	struct qdrv_cb *qcb;
+	int retval = 0;
+
+#ifdef QTN_RC_ENABLE_HDP
+	/* for RC only: if not PCIE_TQE_INTR_WORKAROUND ignore the dsp fw download */
+	if (!((readl(RUBY_SYS_CTL_CSR) & 0xff) == TOPAZ_BOARD_REVB))
+		return retval;
+#endif
+
+	qcb = (struct qdrv_cb *) dev_get_drvdata(dev);
+	qcb->dev = dev;
+
+	/* Bring up the DSP */
+	retval = qdrv_dsp_init(qcb);
+	if(retval == 0)
+		qcb->resources |= QDRV_RESOURCE_DSP;
+
+	return retval;
+}
+
+int qdrv_soc_init(struct device *dev)
+{
+	struct qdrv_cb *qcb;
+	int retval = 0;
+	int error_code = 0;
+
+	/* Get the private device data */
+	qcb = (struct qdrv_cb *) dev_get_drvdata(dev);
+
+	/* MATS FIX This should be initialized better somewhere else */
+	qcb->instances = 1;
+
+	/* Make sure we have firmware image specified for the MuC */
+	if(qcb->muc_firmware[0] == '\0')
+	{
+		error_code = 0x00000001;
+		retval = -ENODEV;
+		goto error;
+	}
+
+	/* Set the device in the control block */
+	qcb->dev = dev;
+
+	/* initiate the power control block */
+	qcb->power_table_ctrl.power_recheck = get_bootcfg_power_recheck();
+
+	/*
+	 * Reset the SoC.
+	 *
+	 * Must be called *before* qdrv_soc_publish_params, so that BB is out of reset
+	 * when reading the version register
+	 */
+	hal_reset();
+
+	/* Publish SoC parameters */
+	if (qdrv_soc_publish_params(qcb) < 0)
+	{
+		error_code = 0x00000002;
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	/* Register scheduler */
+	if(qdrv_sch_module_init() != 0)
+	{
+		error_code = 0x00000004;
+		retval = -ENODEV;
+		goto error;
+	}
+
+	/* Initialize the MAC 0 device */
+	if(qdrv_mac_init(&qcb->macs[0], qcb->mac0, 0, IRQ_MAC0_0, &qcb->params) < 0)
+	{
+		error_code = 0x00000010;
+		retval = -ENODEV;
+		goto error;
+	}
+
+	/* Mark that we have successfully allocated a resource */
+	qcb->resources |= QDRV_RESOURCE_MAC0;
+
+	/* Ruby (ARC host) does not support MAC 1 */
+#ifndef CONFIG_ARC
+	/* Initialize the MAC 1 device */
+	if(qdrv_mac_init(&qcb->macs[1], qcb->mac1, 1, IRQ_MAC0_1, &qcb->params) < 0)
+	{
+		error_code = 0x00000020;
+		retval = -ENODEV;
+		goto error;
+	}
+
+	/* Mark that we have successfully allocated a resource */
+	qcb->resources |= QDRV_RESOURCE_MAC1;
+#endif
+
+	/* Initialize the message module */
+	if(qdrv_comm_init(qcb) < 0)
+	{
+		error_code = 0x00000040;
+		retval = -ENODEV;
+		goto error;
+	}
+
+	/* Mark that we have successfully allocated a resource */
+	qcb->resources |= QDRV_RESOURCE_COMM;
+
+	/* Bring up the DSP */
+	if(qdrv_dsp_init(qcb) != 0)
+	{
+		error_code = 0x00000100;
+		retval = -ENODEV;
+		goto error;
+	}
+
+	/* Mark that we have successfully allocated a resource */
+	qcb->resources |= QDRV_RESOURCE_DSP;
+
+	/* Initialise MuC print buf */
+	if(qdrv_uc_print_init(qcb) != 0) {
+		DBGPRINTF_E("Could not initialise MuC shared print buffer!\n");
+	} else {
+		qcb->resources |= QDRV_RESOURCE_UC_PRINT;
+	}
+
+	/* Bring up the MuC  */
+	if(qdrv_muc_init(qcb) != 0)
+	{
+		error_code = 0x00000080;
+		retval = -ENODEV;
+		goto error;
+	}
+
+	/* Mark that we have successfully allocated a resource */
+	qcb->resources |= QDRV_RESOURCE_MUC;
+
+	/* Bring up the AuC  */
+	if(qdrv_auc_init(qcb) != 0)
+	{
+		error_code = 0x00000100;
+		retval = -ENODEV;
+		goto error;
+	}
+	if (qdrv_mu_stat_init(qcb) != 0)
+	{
+		error_code = 0x00000200;
+		retval = -ENODEV;
+		goto error;
+	}
+	/* Mark that we have successfully allocated a resource */
+	qcb->resources |= QDRV_RESOURCE_AUC;
+
+	qtn_show_info_register(qtn_show_info);
+
+	/* That went well .... */
+	return(0);
+
+error:
+
+	DBGPRINTF_E("Failed with error code 0x%08x\n", error_code);
+
+	/* Clean up as much as we can */
+	(void) qdrv_soc_exit(dev);
+
+	/* Not so good .... */
+	return(retval);
+}
+
+int qdrv_soc_exit(struct device *dev)
+{
+	struct qdrv_cb *qcb;
+
+	/* Get the private device data */
+	qcb = dev_get_drvdata(dev);
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE, "Begin resources 0x%08x\n", qcb->resources);
+
+	qtn_show_info_unregister();
+
+	(void)qdrv_mu_stat_exit(qcb);
+	/* Release resources in reverse order */
+	if(qcb->resources & QDRV_RESOURCE_AUC)
+	{
+		(void) qdrv_auc_exit(qcb);
+		qcb->resources &= ~QDRV_RESOURCE_AUC;
+	}
+
+	/* Release resources in reverse order */
+	if(qcb->resources & QDRV_RESOURCE_DSP)
+	{
+		(void) qdrv_dsp_exit(qcb);
+		qcb->resources &= ~QDRV_RESOURCE_DSP;
+	}
+
+	if(qcb->resources & QDRV_RESOURCE_MUC)
+	{
+		(void) qdrv_muc_exit(qcb);
+		qcb->resources &= ~QDRV_RESOURCE_MUC;
+	}
+
+	if(qcb->resources & QDRV_RESOURCE_UC_PRINT)
+	{
+		(void) qdrv_uc_print_exit(qcb);
+		qcb->resources &= ~QDRV_RESOURCE_UC_PRINT;
+	}
+
+	if(qcb->resources & QDRV_RESOURCE_MAC0)
+	{
+		(void) qdrv_mac_exit(&qcb->macs[0]);
+		qcb->resources &= ~QDRV_RESOURCE_MAC0;
+	}
+
+	if(qcb->resources & QDRV_RESOURCE_MAC1)
+	{
+		(void) qdrv_mac_exit(&qcb->macs[1]);
+		qcb->resources &= ~QDRV_RESOURCE_MAC1;
+	}
+
+	if(qcb->resources & QDRV_RESOURCE_COMM)
+	{
+		(void) qdrv_comm_exit(qcb);
+		qcb->resources &= ~QDRV_RESOURCE_COMM;
+	}
+
+	if(qcb->resources & QDRV_RESOURCE_WLAN)
+	{
+		(void) qdrv_wlan_exit(&qcb->macs[0]);
+		qcb->resources &= ~QDRV_RESOURCE_WLAN;
+	}
+
+
+	qdrv_sch_module_exit();
+
+	qdrv_soc_revoke_params();
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE, "End resources 0x%08x\n", qcb->resources);
+
+	return(0);
+}
diff --git a/drivers/qtn/qdrv/qdrv_soc.h b/drivers/qtn/qdrv/qdrv_soc.h
new file mode 100644
index 0000000..34e56fe
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_soc.h
@@ -0,0 +1,203 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_SOC_H
+#define _QDRV_SOC_H
+
+#include <linux/workqueue.h>
+#include <linux/seq_file.h>
+#include <asm/io.h>
+#include <compat.h>
+
+#include <qtn/mproc_sync.h>
+
+#define  MAX_GPIO_PIN	31
+#define  MAX_GPIO_INTR	23
+
+
+#define HAL_REGISTER_TSF_LOW 0xE5053014 /* FIXME ADM: Variable based on HW version.*/
+#define HAL_REGISTER_TSF_HIGH 0xE5053018 /* FIXME ADM: Variable based on HW version.*/
+
+#define VERSION_SIZE	16
+
+struct qdrv_packet_counters
+{
+	u32	num_tx;
+	u32	num_rx;
+}; 
+
+struct qdrv_packet_report
+{
+	struct qdrv_packet_counters	rf1;
+	struct qdrv_packet_counters	rf2;
+};
+
+struct qdrv_cal_test_setting
+{
+	u8 antenna;
+	u8 mcs;
+	u8 bw_set;
+	u8 pkt_len;
+	u8 is_eleven_N;
+	u8 bf_factor_set;
+};
+
+#define QDRV_POWER_TABLE_FNAME_MAX_LEN		63
+#define	QDRV_POWER_TABLE_CHECKSUM_LEN		32	/* MD5 Hex */
+
+struct qdrv_power_table_checksum_entry
+{
+	struct qdrv_power_table_checksum_entry *next;
+	char fname[QDRV_POWER_TABLE_FNAME_MAX_LEN + 1];
+	char checksum[QDRV_POWER_TABLE_CHECKSUM_LEN + 1];
+};
+
+struct qdrv_power_table_control
+{
+	/* the checksum list of the power tables built into image */
+	struct qdrv_power_table_checksum_entry *checksum_list;
+	struct qdrv_power_table_checksum_entry *reading_checksum;
+	uint8_t checksum_list_locked;
+	uint8_t power_selection;
+	uint8_t power_recheck;
+};
+
+struct qdrv_cb
+{
+	struct device *dev;
+	unsigned int resources;
+#define QDRV_RESOURCE_MAC0		0x00000008
+#define QDRV_RESOURCE_MAC1		0x00000010
+#define QDRV_RESOURCE_COMM		0x00000020
+#define QDRV_RESOURCE_MUC		0x00000040
+#define QDRV_RESOURCE_WLAN		0x00000080
+#define QDRV_RESOURCE_DSP		0x00000100
+#define QDRV_RESOURCE_AUC		0x00000200
+#define QDRV_RESOURCE_MUC_BOOTED	0x00000400
+#define QDRV_RESOURCE_UC_PRINT		0x00000800
+#define QDRV_RESOURCE_VAP_0		0x00001000
+#define QDRV_RESOURCE_VAP(unit)		(QDRV_RESOURCE_VAP_0 << (unit))
+
+	struct qdrv_mac macs[MAC_UNITS];
+	u8 mac0[IEEE80211_ADDR_LEN];
+	u8 mac1[IEEE80211_ADDR_LEN];
+	u8 instances;
+	char muc_firmware[64];
+	char dsp_firmware[64];
+	char auc_firmware[64];
+	char algo_version[VERSION_SIZE];
+#define QDRV_CMD_LENGTH	128
+	char command[QDRV_CMD_LENGTH];
+	struct workqueue_struct *hlink_work_queue;
+	struct work_struct comm_wq;
+	int rc;
+	int fw_no_mu;
+
+	/* Generic driver read support (sequence file in /proc) */
+	void (*read_show)(struct seq_file *s, void *data, u32 num);
+	int read_start_num;
+	int read_num;
+	int read_decr;
+	void *read_data;
+
+	u32 value_from_muc;
+
+	/* Memory read specific */
+	u32 read_addr;
+	u32 read_count;
+	int values_per_line;
+
+	/* RF register value is now returned from the MuC */
+	u32 rf_reg_val;
+
+	/*
+	 * DSP GPIO pin levels.  All are reported,
+	 * but currently only pins 0 and 8 have any significance
+	 */
+	u32 dspgpios;
+
+	struct qdrv_packet_report	packet_report;
+
+	struct qdrv_power_table_control power_table_ctrl;
+
+	u8 current_gpio_pin;
+	u8 current_gpio_setting;
+	struct qdrv_mac_params params; /* MAC parameters configured prior to bringup of the device */
+	volatile u32 *hlink_mbox;
+
+	int temperature_rfic_external;
+	int temperature_rfic_internal;
+
+	int calstate_vpd;
+
+	union _qdrv_cal_test_report_u_{
+		u32 tx_power[4];
+		int rssi[4];
+		struct qdrv_cal_test_setting setting;
+		int post_rfloop_success;
+		int pd_voltage_level[4];
+	}qdrv_cal_test_report;
+
+	struct qtn_cca_stats *cca_stats_all; /* pointing to continuous stats for all MAC units */
+};
+
+static __always_inline int sem_take(u32 sem, u32 bit)
+{
+	return qtn_mproc_sync_set_hw_sem(sem, bit);
+}
+
+static __always_inline void sem_give(u32 sem, u32 bit)
+{
+	qtn_mproc_sync_clear_hw_sem(sem , bit);
+}
+
+static __always_inline void _writel_wmb(u32 val, u32 addr, int arc_bypass_cache)
+{
+	if (arc_bypass_cache)
+		arc_write_uncached_32((uint32_t *)addr, val);
+	else
+		writel(val, addr);
+	wmb();
+	qtn_addr_wmb(addr);
+}
+/*
+ *  * FIXME: writel_wmb has some potential issues with mainline, it is not safe
+ *   * to update a share memory because arc-gcc (4.2.1) doesn't always bypass cache for writel.
+ *    */
+#define writel_wmb_old(v, a) _writel_wmb((__force __u32)(v), (__force __u32)(a), 0)
+#define writel_wmb(v, a) _writel_wmb((__force __u32)(v), (__force __u32)(a), 1)
+
+
+int qdrv_soc_cb_size(void);
+int qdrv_soc_start_vap(struct qdrv_cb *qcb, int devid, struct qdrv_mac *mac,
+	char *name, uint8_t *mac_addr, int opmode, int flags);
+int qdrv_soc_stop_vap(struct qdrv_cb *qcb, struct qdrv_mac *mac, struct net_device *vdev);
+int qdrv_soc_stats(void *data, struct qdrv_mac *mac);
+uint32_t qdrv_soc_get_hostlink_mbox(void);
+uint32_t qdrv_soc_get_hw_options(void);
+char *qdrv_soc_get_hw_desc(enum hw_opt_t bond_opt);
+char *qdrv_soc_get_hw_id(enum hw_opt_t bond_opt);
+const char *qdrv_soc_get_hw_rev_desc(uint16_t hw_rev);
+int qdrv_soc_init(struct device *dev);
+int qdrv_soc_exit(struct device *dev);
+struct device *qdrv_soc_get_addr_dev(void);
+int qdrv_start_dsp_only(struct device *dev);
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_tx.c b/drivers/qtn/qdrv/qdrv_tx.c
new file mode 100644
index 0000000..c3c99ed
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_tx.c
@@ -0,0 +1,2634 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/jhash.h>
+#include <net/sch_generic.h>
+#include <net/ip6_checksum.h>
+#include <asm/hardware.h>
+#include <ruby/plat_dma_addr.h>
+#include <asm/board/dma_cache_ops.h>
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_comm.h"
+#include "qdrv_wlan.h"
+#include "qdrv_vap.h"
+#include "qtn/qdrv_sch.h"
+#include "qdrv_bridge.h"
+#include "qdrv_muc_stats.h"
+#include "qdrv_pktlogger.h"
+#include "qdrv_vlan.h"
+#include <qtn/qtn_buffers.h>
+#include <qtn/qtn_global.h>
+#include <qtn/registers.h>
+#include <qtn/lhost_muc_comm.h>
+#include <qtn/qtn_vlan.h>
+#include <qtn/iputil.h>
+#include <net80211/if_llc.h>
+#include <net80211/if_ethersubr.h>
+#include <common/queue.h>
+#include <asm/cacheflush.h>
+#include <linux/if_arp.h>
+#include <net/arp.h>
+#include <linux/inetdevice.h>
+#include <trace/skb.h>
+#include <trace/ippkt.h>
+#include "qtn/shared_defs.h"
+#ifdef CONFIG_IPV6
+#include <net/addrconf.h>
+#endif
+
+#include <qtn/topaz_tqe.h>
+#include <qtn/topaz_hbm.h>
+#include <qtn/topaz_fwt_sw.h>
+
+#define QDRV_WBSP_CTRL_DISABLED	0
+#define QDRV_WBSP_CTRL_ENABLED	1
+extern int qdrv_wbsp_ctrl;
+
+#define HTTPS_PORT 443
+#define ETHER_TYPE_UNKNOWN 0XFFFF
+#define QTN_RSSI_FOR_AMPDU_91DBM (-910)
+#define QTN_RSSI_FOR_AMPDU_88DBM (-880)
+#define QTN_RSSI_FOR_AMPDU_80DBM (-800)
+struct qdrv_tx_sch_priv {
+	struct qdrv_sch_shared_data *shared_data;
+	struct qdrv_wlan *qw;
+};
+
+static int qos_acm_remap[4] = {
+	WMM_AC_BK,
+	WMM_AC_BK,
+	WMM_AC_BE,
+	WMM_AC_VI
+};
+
+static inline uint32_t qdrv_tx_data_max_count(const struct qdrv_wlan *qw)
+{
+	return qw->tx_if.list_max_size - 1;
+}
+
+static inline uint32_t qdrv_tx_80211_max_count(const struct qdrv_wlan *qw)
+{
+	return QDRV_MAX_QUEUED_MGMT_FRAMES;
+}
+
+static inline bool is_qtn_oui_packet(unsigned char *pkt_header)
+{
+	if ((pkt_header[0] == (QTN_OUI & 0xFF)) &&
+		(pkt_header[1] == ((QTN_OUI >> 8) & 0xFF)) &&
+		(pkt_header[2] == ((QTN_OUI >> 16) & 0xFF)) &&
+		(pkt_header[3] >= QTN_OUIE_WIFI_CONTROL_MIN) &&
+		(pkt_header[3] <= QTN_OUIE_WIFI_CONTROL_MAX))
+		return true;
+	else
+		return false;
+}
+
+/*
+ * Find the first IP address for a device.
+ */
+__be32 qdrv_dev_ipaddr_get(struct net_device *dev)
+{
+	struct in_device *in_dev;
+	__be32 addr = 0;
+
+	rcu_read_lock();
+	in_dev = __in_dev_get_rcu(dev);
+	if (in_dev && in_dev->ifa_list) {
+		addr = in_dev->ifa_list->ifa_address;
+	}
+	rcu_read_unlock();
+
+	return addr;
+}
+
+int qdrv_get_br_ipaddr(struct qdrv_wlan *qw, __be32 *ipaddr) {
+	if (qw->br_dev) {
+		*ipaddr = qdrv_dev_ipaddr_get(qw->br_dev);
+	} else {
+		return -1;
+	}
+
+	return 0;
+}
+
+int qdrv_is_bridge_ipaddr(struct qdrv_wlan *qw, __be32 ipaddr) {
+	struct in_device *in_dev;
+	struct in_ifaddr *addr;
+	int match = 0;
+
+	if (!qw->br_dev)
+		return 0;
+
+	rcu_read_lock();
+	in_dev = __in_dev_get_rcu(qw->br_dev);
+	if (in_dev) {
+		for (addr = in_dev->ifa_list; addr; addr = addr->ifa_next) {
+			if (addr->ifa_address == ipaddr) {
+				match = 1;
+				break;
+			}
+		}
+	}
+	rcu_read_unlock();
+
+	return match;
+}
+
+#ifdef CONFIG_QVSP
+static __always_inline int
+qdrv_tx_strm_check(struct sk_buff *skb, struct qdrv_wlan *qw, struct ieee80211_node *ni,
+	struct ether_header *eh, uint16_t ether_type, uint8_t *data_start, uint8_t ac)
+{
+	struct iphdr *p_iphdr = (struct iphdr *)data_start;
+	uint32_t l2_header_len = data_start - skb->data;
+
+	if (qvsp_is_active(qw->qvsp) && ni &&
+			iputil_eth_is_ipv4or6(ether_type) &&
+			(skb->len >= (l2_header_len + sizeof(struct udphdr)
+				+ iputil_hdrlen(p_iphdr, skb->len - l2_header_len))) &&
+			(!IEEE80211_IS_MULTICAST(eh->ether_dhost) ||
+				iputil_is_mc_data(eh, p_iphdr))) {
+		return qvsp_strm_check_add(qw->qvsp, QVSP_IF_QDRV_TX, ni, skb, eh, p_iphdr,
+				skb->len - (data_start - skb->data), ac, WME_TID_UNKNOWN);
+	}
+
+	return 0;
+}
+#endif
+
+static __sram_text void qdrv_tx_skb_return(struct qdrv_wlan *qw, struct sk_buff *skb)
+{
+	struct ieee80211_node *ni = QTN_SKB_CB_NI(skb);
+	struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
+	struct qdrv_vap *qv;
+	unsigned long flags;
+
+	if (likely(ni)) {
+		qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+
+		local_irq_save(flags);
+		QTN_SKB_CB_NI(skb) = NULL;
+		if (M_FLAG_ISSET(skb, M_ENQUEUED_MUC)) {
+			--ni->ni_tx_sch.muc_queued;
+			--qv->muc_queued;
+		}
+
+		qdrv_sch_complete(&ni->ni_tx_sch, skb,
+			(ni->ni_tx_sch.muc_queued <= qw->tx_if.muc_thresh_low));
+		local_irq_restore(flags);
+
+		ieee80211_free_node(ni);
+	}
+
+	if (qtn_skb_recycle_list_push(recycle_list, &recycle_list->stats_qdrv, skb)) {
+		if (likely(qw)) {
+			qw->tx_stats.tx_min_cl_cnt = skb_queue_len(&recycle_list->list);
+		}
+	} else {
+		dev_kfree_skb_any(skb);
+	}
+}
+
+/*
+ * Special handling for ARP packets when in 3-address mode.
+ * Returns 0 if OK, or 1 if the frame should be dropped.
+ * The original skb may be copied and modified.
+ */
+static int __sram_text qdrv_tx_3addr_check_arp(struct sk_buff **skb, struct qdrv_wlan *qw,
+	uint8_t *data_start)
+{
+	struct ether_arp *arp = (struct ether_arp *)data_start;
+	struct sk_buff *skb1 = *skb;
+	struct sk_buff *skb2;
+	struct ieee80211_node *ni;
+	__be32 ipaddr = 0;
+
+	if ((skb1->len < (data_start - skb1->data) + sizeof(*arp)) ||
+		 (!(arp->ea_hdr.ar_op == __constant_htons(ARPOP_REQUEST)) &&
+			 !(arp->ea_hdr.ar_op == __constant_htons(ARPOP_REPLY)))) {
+		return 0;
+	}
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+		"ARP hrd=%04x pro=%04x ln=%02x/%02x op=%04x sha=%pM tha=%pM\n",
+		arp->ea_hdr.ar_hrd, arp->ea_hdr.ar_pro, arp->ea_hdr.ar_hln,
+		arp->ea_hdr.ar_pln, arp->ea_hdr.ar_op,
+		arp->arp_sha, arp->arp_tha);
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_BRIDGE,
+		"    sip=" NIPQUAD_FMT " tip=" NIPQUAD_FMT " new sha=%pM\n",
+		NIPQUAD(arp->arp_spa), NIPQUAD(arp->arp_tpa), qw->ic.ic_myaddr);
+
+	if (QDRV_FLAG_3ADDR_BRIDGE_ENABLED()) {
+		/* update the qdrv bridge table if doing 3-address mode bridging */
+		qdrv_br_uc_update_from_arp(&qw->bridge_table, arp);
+	} else {
+		/*
+		 * In basic 3-address mode, don't send ARP requests for our own
+		 * bridge IP address to the wireless network because the hack
+		 * below will associate it with the wireless MAC, making the
+		 * bridge IP address unreachable.  The bridge module will
+		 * respond to the request.
+		 */
+		if (arp->ea_hdr.ar_op == __constant_htons(ARPOP_REQUEST)) {
+			ipaddr = get_unaligned((uint32_t *)&arp->arp_tpa);
+			if (qdrv_is_bridge_ipaddr(qw, ipaddr)) {
+				DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_BRIDGE,
+						"Not forwarding ARP request for bridge IP ("
+						NIPQUAD_FMT ")\n",
+						NIPQUAD(ipaddr));
+				return 1;
+			}
+		}
+	}
+
+	/*
+	 * ### Hack alert ###
+	 * In 3-addr mode, the source host address in upstream ARP packets from
+	 * the STA must be changed to the local wireless address.  Use a new
+	 * skb to avoid modifying the bridge's copy of the frame.
+	 */
+	skb2 = skb_copy(skb1, GFP_ATOMIC);
+	if (!skb2) {
+		DBGPRINTF_E("ARP buffer copy failed\n");
+		return 1;
+	}
+	ni = QTN_SKB_CB_NI(skb1);
+	if (ni) {
+		ieee80211_ref_node(ni);
+	}
+
+	/* The offset of the arp structure in the new skb is the same as in the old skb */
+	arp = (struct ether_arp *)(skb2->data + ((unsigned char *)arp - skb1->data));
+	IEEE80211_ADDR_COPY(&arp->arp_sha[0], qw->ic.ic_myaddr);
+
+	qdrv_tx_skb_return(qw, skb1);
+	*skb = skb2;
+
+	return 0;
+}
+
+#if defined(CONFIG_IPV6)
+static inline
+int qdrv_tx_icmpv6_should_masq(const struct icmp6hdr *icmpv6h,
+		const struct nd_opt_hdr *opt)
+{
+	return ((icmpv6h->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION &&
+			opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR) ||
+		(icmpv6h->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT &&
+			opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR));
+}
+
+/*
+ * Special handling for IPv6 packets when in 3-address mode.
+ * Returns 0 if OK, or 1 if the frame should be dropped.
+ * The original skb may be copied and modified.
+ */
+static int __sram_text qdrv_tx_3addr_check_ipv6(struct sk_buff **skb, struct qdrv_wlan *qw,
+			uint8_t *data_start)
+{
+	struct sk_buff *skb1 = *skb;
+	struct ethhdr *eth = (struct ethhdr *)skb1->data;
+	struct ipv6hdr *ipv6h;
+	struct sk_buff *skb2;
+	struct ieee80211_node *ni;
+	struct icmp6hdr *icmpv6h;
+	struct nd_opt_hdr *opt;
+	int l3hdr_off = data_start - skb1->data;
+	int l4hdr_off;
+	int icmpv6_len;
+	uint8_t nexthdr;
+	int srcll_opt_ofst = 0;
+
+	if (skb1->len < l3hdr_off + sizeof(struct ipv6hdr))
+		return 0;
+
+	skb2 = skb_copy(skb1, GFP_ATOMIC);
+	if (!skb2) {
+		DBGPRINTF_E("SKB buffer copy failed\n");
+		return 1;
+	}
+
+	ni = QTN_SKB_CB_NI(skb1);
+	if (ni) {
+		ieee80211_ref_node(ni);
+	}
+
+	data_start = skb2->data + (data_start - skb1->data);
+
+	qdrv_tx_skb_return(qw, skb1);
+
+	eth = (struct ethhdr *)(skb2->data);
+	ipv6h = (struct ipv6hdr *)data_start;
+
+	l4hdr_off = iputil_v6_skip_exthdr(ipv6h, sizeof(struct ipv6hdr),
+			&nexthdr, skb2->len - l3hdr_off, NULL, NULL);
+
+	if (nexthdr == IPPROTO_ICMPV6) {
+		icmpv6h = (struct icmp6hdr *)(data_start + l4hdr_off);
+
+		qdrv_br_ipv6uc_update_from_icmpv6(&qw->bridge_table, eth, ipv6h, icmpv6h);
+
+		srcll_opt_ofst = l3hdr_off + l4hdr_off
+			+ sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
+		if (skb2->len >= srcll_opt_ofst + sizeof(*opt) + ETH_ALEN) {
+			opt = (struct nd_opt_hdr *)(skb2->data + srcll_opt_ofst);
+			if (qdrv_tx_icmpv6_should_masq(icmpv6h, opt)) {
+				IEEE80211_ADDR_COPY((uint8_t *)(opt + 1), qw->ic.ic_myaddr);
+				icmpv6_len = skb2->len - l3hdr_off - l4hdr_off;
+				/* re-calculate chksum */
+				icmpv6h->icmp6_cksum = 0;
+				icmpv6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+						icmpv6_len, IPPROTO_ICMPV6,
+						csum_partial(icmpv6h, icmpv6_len, 0));
+			}
+		}
+	}
+
+	IEEE80211_ADDR_COPY(eth->h_source, qw->ic.ic_myaddr);
+	*skb = skb2;
+
+	return 0;
+}
+#endif
+
+/*
+ * Special handling for IP packets when in 3-address mode.
+ * Returns 0 if OK, or 1 if the frame should be dropped.
+ */
+static int __sram_text qdrv_tx_3addr_check_ip(struct sk_buff *skb,
+	struct qdrv_wlan *qw, struct ether_header *eh, uint8_t *data_start)
+{
+	struct iphdr *p_iphdr = (struct iphdr *)data_start;
+
+	if (skb->len < (data_start - skb->data) + sizeof(*p_iphdr)) {
+		return 0;
+	}
+
+	switch (p_iphdr->protocol) {
+	case IPPROTO_UDP:
+		qdrv_br_uc_update_from_dhcp(&qw->bridge_table, skb, p_iphdr);
+		break;
+	case IPPROTO_IGMP:
+		if (qdrv_br_mc_update_from_igmp(&qw->bridge_table,
+						skb, eh, p_iphdr) != 0) {
+			DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_BRIDGE|QDRV_LF_PKT_TX,
+					"Dropping IGMP packet - "
+					"not last downstream client to unsubscribe\n");
+			return 1;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void __sram_text qdrv_tx_stop_queue(struct qdrv_wlan *qw)
+{
+	struct qdrv_mac *mac = qw->mac;
+	int i;
+
+	QDRV_TX_CTR_INC(8);
+
+	for (i = 0; i <= mac->vnet_last; ++i) {
+		if (mac->vnet[i]) {
+			netif_stop_queue(mac->vnet[i]);
+		}
+	}
+}
+
+static void __sram_text qdrv_tx_wake_queue(struct qdrv_wlan *qw)
+{
+	struct qdrv_mac *mac = qw->mac;
+	int i;
+
+	QDRV_TX_CTR_INC(9);
+
+	for (i = 0; i <= mac->vnet_last; ++i) {
+		if (mac->vnet[i]) {
+			netif_wake_queue(mac->vnet[i]);
+		}
+	}
+}
+
+static void __sram_text qdrv_tx_disable_queues(struct qdrv_wlan *qw)
+{
+	struct host_txif *txif = &qw->tx_if;
+	unsigned long flags;
+
+	if (qw->queue_enabled == 0) {
+		return;
+	}
+
+	TXSTAT(qw, tx_queue_stop);
+
+	QDRV_TX_CTR_INC(10);
+	spin_lock_irqsave(&qw->lock, flags);
+
+	qdrv_tx_stop_queue(qw);
+
+	qw->queue_enabled = 0;
+
+	if (unlikely(txif->txdesc_cnt[QDRV_TXDESC_MGMT] == 0)) {
+		printk_once(DBGEFMT "MuC is dead!\n", DBGARG);
+		qw->mac->mgmt_dead = 1;
+		qdrv_mac_disable_irq(qw->mac, qw->txdoneirq);
+		qdrv_mac_die_action(qw->mac);
+	}
+
+	spin_unlock_irqrestore(&qw->lock, flags);
+}
+
+static void __sram_text qdrv_tx_enable_queues(struct qdrv_wlan *qw)
+{
+	qw->queue_enabled = 1;
+	qdrv_tx_wake_queue(qw);
+
+	QDRV_TX_CTR_INC(11);
+	TXSTAT(qw, tx_done_enable_queues);
+}
+
+static __always_inline struct lhost_txdesc *qdrv_tx_done_mbox(struct qdrv_wlan *qw)
+{
+	uint32_t ret = readl(qw->scan_if.sc_req_mbox);
+
+	if (likely(ret)) {
+		writel_wmb(0, qw->scan_if.sc_req_mbox);
+	}
+
+	return (struct lhost_txdesc*)ret;
+}
+
+static __sram_text void qdrv_tx_free_txdesc(struct qdrv_wlan *qw, struct lhost_txdesc *txdesc,
+						int is_80211_encap)
+{
+	struct host_txif *txif = &qw->tx_if;
+
+	++qw->tx_if.txdesc_cnt[is_80211_encap];
+
+	if (likely(!is_80211_encap)) {
+		txdesc->next = NULL;
+		if (txif->df_txdesc_list_tail) {
+			txif->df_txdesc_list_tail->next = txdesc;
+			txif->df_txdesc_list_tail = txdesc;
+		} else {
+			txif->df_txdesc_list_head = txdesc;
+			txif->df_txdesc_list_tail = txdesc;
+		}
+	} else {
+		dma_pool_free(txif->df_txdesc_cache,
+			txdesc->hw_desc.hd_va, txdesc->hw_desc.hd_pa);
+	}
+}
+
+static __sram_text struct lhost_txdesc *qdrv_tx_alloc_txdesc(struct qdrv_wlan *qw, int is_80211_encap)
+{
+	struct host_txif *txif = &qw->tx_if;
+	struct lhost_txdesc *ret = NULL;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	if (likely(txif->df_txdesc_list_head)) {
+		ret = txif->df_txdesc_list_head;
+		txif->df_txdesc_list_head = txif->df_txdesc_list_head->next;
+		if (!txif->df_txdesc_list_head) {
+			txif->df_txdesc_list_tail = NULL;
+		}
+	} else {
+		dma_addr_t phys;
+		ret = (struct lhost_txdesc*)
+			dma_pool_alloc(txif->df_txdesc_cache, GFP_ATOMIC | GFP_DMA, &phys);
+		if (ret) {
+			ret->hw_desc.hd_pa = phys;
+			ret->hw_desc.hd_va = ret;
+		}
+	}
+
+	if (likely(ret)) {
+		--qw->tx_if.txdesc_cnt[is_80211_encap];
+	}
+
+	local_irq_restore(flags);
+
+	return ret;
+}
+
+void __sram_text qdrv_tx_release_txdesc(struct qdrv_wlan *qw, struct lhost_txdesc* txdesc)
+{
+	int is_80211_encap = 1;
+	struct sk_buff *skb = txdesc->skb;
+	struct ieee80211_node *ni;
+
+	skb = txdesc->skb;
+	QDRV_TX_CTR_INC(38);
+
+	if (likely(skb)) {
+		QDRV_TX_CTR_INC(39);
+		is_80211_encap = QTN_SKB_ENCAP_IS_80211(skb);
+
+		trace_skb_perf_stamp_call(skb);
+		trace_skb_perf_finish(skb);
+
+		ni = QTN_SKB_CB_NI(skb);
+		if (likely(ni)) {
+			if (txdesc->hw_desc.hd_txstatus == QTN_TXSTATUS_TX_SUCCESS) {
+				TXSTAT(qw, tx_done_success);
+			}
+		}
+
+		qdrv_tx_skb_return(qw, skb);
+	}
+
+	qdrv_tx_free_txdesc(qw, txdesc, is_80211_encap);
+}
+
+static __always_inline void qdrv_tx_done(struct qdrv_wlan *qw)
+{
+	struct lhost_txdesc *iter;
+	struct lhost_txdesc *txdesc;
+	uint32_t cnt = 0;
+
+	QDRV_TX_CTR_INC(36);
+
+	iter = qdrv_tx_done_mbox(qw);
+
+	while (iter) {
+		QDRV_TX_CTR_INC(37);
+		txdesc = iter;
+		iter = (struct lhost_txdesc*)txdesc->hw_desc.hd_nextva_rev;
+		cnt++;
+
+		if (txdesc->hw_desc.hd_status == MUC_TXSTATUS_DONE) {
+			qdrv_tx_release_txdesc(qw, txdesc);
+			TXSTAT(qw, tx_complete);
+		} else {
+			TXSTAT(qw, tx_done_muc_ready_err);
+		}
+	}
+
+	if (!qw->queue_enabled && (cnt > 0)) {
+		QDRV_TX_CTR_INC(17);
+		qdrv_tx_enable_queues(qw);
+	}
+}
+
+static void __sram_text qdrv_tx_done_irq(void *arg1, void *arg2)
+{
+	struct qdrv_mac *mac = (struct qdrv_mac *)arg1;
+	struct qdrv_wlan *qw = mac->data;
+
+	QDRV_TX_CTR_INC(13);
+	qdrv_tx_done(qw);
+}
+
+static __always_inline int qdrv_txdesc_queue_is_empty(struct qdrv_wlan *qw)
+{
+	struct host_txif *txif = &qw->tx_if;
+
+	if ((txif->txdesc_cnt[QDRV_TXDESC_DATA] == 0) ||
+			(txif->txdesc_cnt[QDRV_TXDESC_MGMT] == 0)) {
+		return 1;
+	}
+
+	return 0;
+}
+
+static int __sram_text qdrv_tx_done_chk(struct qdrv_wlan *qw)
+{
+	if (unlikely(qdrv_txdesc_queue_is_empty(qw))) {
+		QDRV_TX_CTR_INC(42);
+		qdrv_tx_disable_queues(qw);
+		return -1;
+	}
+
+	return 0;
+}
+
+static void qdrv_tx_done_flush_vap_fail(struct qdrv_wlan *qw,
+		struct qdrv_vap *qv, unsigned long time_start)
+{
+	struct host_txif *txif = &qw->tx_if;
+
+	panic(KERN_ERR
+		"%s MuC packets not returned: data %d/%d mgmt %d/%d muc_queued vap %u %u msecs\n",
+		__FUNCTION__,
+		txif->txdesc_cnt[QDRV_TXDESC_DATA],
+		qdrv_tx_data_max_count(qw),
+		txif->txdesc_cnt[QDRV_TXDESC_MGMT],
+		qdrv_tx_80211_max_count(qw),
+		qv->muc_queued,
+		jiffies_to_msecs((long)jiffies - (long)time_start));
+}
+
+void qdrv_tx_done_flush_vap(struct qdrv_vap *qv)
+{
+	struct qdrv_wlan *qw = qv->parent;
+	unsigned long time_start = jiffies;
+	unsigned long time_limit = time_start + HZ * 5;
+	int muc_queued_start = qv->muc_queued;
+	int muc_queued_last = muc_queued_start;
+	int delta;
+	unsigned long flags;
+
+	while (1) {
+		local_irq_save(flags);
+		qdrv_tx_done(qw);
+		local_irq_restore(flags);
+
+		delta = muc_queued_last - qv->muc_queued;
+		if (delta) {
+			time_limit = jiffies + HZ * 5;
+		}
+		muc_queued_last = qv->muc_queued;
+
+		if (qv->muc_queued == 0) {
+			break;
+		} else if (time_after(jiffies, time_limit)) {
+			qdrv_tx_done_flush_vap_fail(qw, qv, time_start);
+		}
+
+		msleep(1000 / HZ);
+	}
+
+	printk(KERN_INFO "%s: %d bufs retrieved in %u msecs\n",
+			__FUNCTION__, muc_queued_start,
+			jiffies_to_msecs(jiffies - time_start));
+}
+
+static __always_inline struct host_txdesc *qdrv_tx_prepare_hostdesc(
+	struct qdrv_wlan *qw, struct sk_buff *skb,
+	uint8_t devid, uint8_t tid, uint8_t ac, uint16_t node_idx_mapped, int is_80211_encap)
+{
+	struct lhost_txdesc *lhost_txdesc = qdrv_tx_alloc_txdesc(qw, is_80211_encap);
+	struct host_txdesc *txdesc;
+
+	QDRV_TX_CTR_INC(27);
+	if (!lhost_txdesc) {
+		QDRV_TX_CTR_INC(28);
+		return NULL;
+	}
+
+	lhost_txdesc->skb = skb;
+
+	txdesc = &lhost_txdesc->hw_desc;
+	txdesc->hd_tid = tid;
+	txdesc->hd_node_idx = IEEE80211_NODE_IDX_UNMAP(node_idx_mapped);
+	txdesc->hd_txstatus = QTN_TXSTATUS_TX_ON_MUC;
+	txdesc->hd_wmmac = ac;
+	txdesc->hd_pktlen = skb->len;
+	txdesc->hd_ts = jiffies;
+	txdesc->hd_nextpa = 0;
+	txdesc->hd_seglen[0] = skb->len;
+	txdesc->hd_flags = 0;
+	txdesc->hd_muc_txdone_cb = NULL;
+	txdesc->hd_status = MUC_TXSTATUS_READY;
+
+	return txdesc;
+}
+
+static int qdrv_is_old_intel(struct ieee80211_node *ni)
+{
+	u_int16_t peer_cap = IEEE80211_HTCAP_CAPABILITIES(&ni->ni_ie_htcap);
+
+	return (ieee80211_node_is_intel(ni) &&
+		!(peer_cap & IEEE80211_HTCAP_C_RXSTBC));
+}
+
+static int qdrv_tx_rssi_is_good_for_ba(struct ieee80211_node *ni, uint8_t tid)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	int setup = 1;
+
+	if ((ni->rssi_avg_dbm <= QTN_RSSI_FOR_AMPDU_91DBM) ||
+		(ni->rssi_avg_dbm <= QTN_RSSI_FOR_AMPDU_80DBM &&
+			qdrv_is_old_intel(ni))) {
+		setup = 0;
+	} else {
+		if (ni->ni_ba_tx[tid].state == IEEE80211_BA_BLOCKED) {
+			ieee80211_node_tx_ba_set_state(ni, tid, IEEE80211_BA_NOT_ESTABLISHED, 0);
+		}
+	}
+
+	if (!setup) {
+		ieee80211_node_tx_ba_set_state(ni, tid, IEEE80211_BA_BLOCKED, 0);
+		if (ic->ic_htaddba)
+			(*ic->ic_htaddba)(ni, tid, 1);
+	}
+
+	return setup;
+}
+
+static __always_inline int
+qdrv_tx_ba_should_establish(struct ieee80211_node *ni, uint8_t tid, enum ieee80211_ba_state state)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (vap->tx_ba_disable) {
+		return 0;
+	}
+
+	if ((ni->ni_qtn_flags & QTN_IS_INTEL_NODE)
+				&& !IEEE80211_NODE_IS_VHT(ni)) {
+		if (!qdrv_tx_rssi_is_good_for_ba(ni, tid))
+			return 0;
+	}
+
+	return ((state == IEEE80211_BA_NOT_ESTABLISHED) ||
+		(state == IEEE80211_BA_REQUESTED) ||
+		(state == IEEE80211_BA_FAILED));
+}
+
+/*
+ * Start block ack negotiation if needed.
+ */
+void qdrv_tx_ba_establish(struct qdrv_vap *qv,
+		struct ieee80211_node *ni, uint8_t tid)
+{
+	struct ieee80211_ba_tid *ba = &ni->ni_ba_tx[tid];
+	struct ieee80211vap *vap = &qv->iv;
+
+	if (unlikely(qdrv_tx_ba_should_establish(ni, tid, ba->state) &&
+			ieee80211_node_is_authorized(ni) &&
+			((ni->ni_flags & IEEE80211_NODE_HT) ||
+			(ni->ni_flags & IEEE80211_NODE_VHT)))) {
+
+		enum ieee80211_ba_state state;
+		unsigned long state_deadline;
+		unsigned int seq;
+
+		do {
+			seq = read_seqbegin(&ba->state_lock);
+			state = ba->state;
+			state_deadline = ba->state_deadline;
+		} while (read_seqretry(&ba->state_lock, seq));
+
+		if (unlikely(qdrv_tx_ba_should_establish(ni, tid, state) &&
+				((state_deadline == 0) ||
+				 time_after_eq(jiffies, state_deadline)))) {
+			IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, ni,
+					"start block ack negotiation on aid %d node_idx %d tid %d",
+					IEEE80211_NODE_AID(ni),
+					IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx),
+					tid);
+			ieee80211_node_tx_ba_set_state(ni, tid, IEEE80211_BA_REQUESTED,
+					IEEE80211_TX_BA_REQUEST_RETRY_TIMEOUT);
+			ieee80211_ref_node(ni);
+			if (!schedule_work(&ni->ni_tx_addba_task)) {
+				/* Already scheduled */
+				ieee80211_free_node(ni);
+			}
+		}
+	}
+}
+
+static __sram_text void qdrv_tx_dropped(struct ieee80211vap *vap, struct qdrv_wlan *qw,
+					struct ieee80211_node *ni)
+{
+	QDRV_TX_CTR_INC(58);
+	if (vap) {
+		vap->iv_devstats.tx_dropped++;
+	}
+
+	if (ni) {
+		QDRV_TX_CTR_INC(59);
+		IEEE80211_NODE_STAT(ni, tx_dropped);
+	}
+
+	if (qw) {
+		TXSTAT(qw, tx_drop_total);
+	}
+}
+
+/* If ACM bit is set for this AC, then this AC can't be  used. Lower the priority */
+static __always_inline void qdrv_acm_bit_qos_remap(struct sk_buff *skb, struct ieee80211com *ic)
+{
+	while (ic->ic_wme.wme_chanParams.cap_wmeParams[skb->priority].wmm_acm &&
+		(skb->priority != WMM_AC_BK)) {
+		skb->priority = qos_acm_remap[skb->priority];
+	}
+}
+
+static void qdrv_tx_store_soc_ipaddr(struct ether_arp *arp, struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+
+	if (arp->ea_hdr.ar_op == __constant_htons(ARPOP_REQUEST) ||
+			arp->ea_hdr.ar_op == __constant_htons(ARPOP_REPLY)) {
+		if (IEEE80211_ADDR_EQ(&arp->arp_sha[0], &ic->soc_addr[0])) {
+			ic->ic_soc_ipaddr = (arp->arp_spa[3] << 24) |
+					(arp->arp_spa[2] << 16) |
+					(arp->arp_spa[1] << 8) |
+					arp->arp_spa[0];
+			DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_TX,
+				"client soc mac=%pM ip=" NIPQUAD_FMT "\n",
+				arp->arp_sha, NIPQUAD(arp->arp_spa));
+		}
+	}
+}
+
+static __sram_text void qdrv_tx_skb_drop(struct ieee80211vap *vap, struct qdrv_wlan *qw,
+					struct ieee80211_node *ni, struct sk_buff *skb,
+					enum trace_ippkt_drop_rsn drop_rsn)
+{
+	QDRV_TX_DBG(0, ni, "drop_rsn=%u\n", drop_rsn);
+	if (drop_rsn != TRACE_IPPKT_DROP_RSN_SHOULD_DROP) {
+		qdrv_tx_dropped(vap, qw, ni);
+		trace_ippkt_dropped(drop_rsn, 1, 0);
+	}
+
+	if (skb) {
+		qdrv_tx_skb_return(qw, skb);
+	}
+}
+
+static __sram_text void qdrv_tx_sch_drop_callback(struct sk_buff *skb)
+{
+	struct ieee80211_node *ni = QTN_SKB_CB_NI(skb);
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct net_device *vdev = vap->iv_dev;
+	struct qdrv_vap *qv = netdev_priv(vdev);
+	struct qdrv_wlan *qw = qv->parent;
+
+	QDRV_TX_CTR_INC(46);
+	qdrv_tx_skb_drop(&qv->iv, qw, ni, skb, TRACE_IPPKT_DROP_RSN_SCH);
+}
+
+static uint8_t qdrv_tx_peer_use_4addr(const struct ieee80211_node *ni, const uint8_t *mac_be)
+{
+	if (ni->ni_vap->iv_opmode == IEEE80211_M_WDS) {
+		return 1;
+	}
+
+	/*
+	 * TODO FIXME check if peer supports 4 address frames
+	 * For the moment, assume Quantenna peers supports it,
+	 * and third party doesn't
+	 */
+	if (ni->ni_qtn_assoc_ie == NULL) {
+		return 0;
+	}
+
+	if (memcmp(ni->ni_macaddr, mac_be, IEEE80211_ADDR_LEN)) {
+		return 1;
+	}
+
+	return 0;
+}
+static int qdrv_tx_fwt_use_4addr(void *_mac,
+		const uint8_t *mac_be, uint8_t port, uint8_t node_num)
+{
+	struct qdrv_mac *mac = _mac;
+	struct qdrv_wlan *qw = mac->data;
+	struct ieee80211_node *ni;
+	uint8_t use_4addr;
+
+	if (port != TOPAZ_TQE_WMAC_PORT) {
+		return 0;
+	}
+
+	ni = ieee80211_find_node_by_idx(&qw->ic, NULL, node_num);
+	if (ni) {
+		use_4addr = qdrv_tx_peer_use_4addr(ni, mac_be);
+		ieee80211_free_node(ni);
+		return use_4addr;
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * This path is used in station mode.  In AP mode, EAPOL packets are sent directly from hostapd to
+ * ieee80211_ioctl_txeapol() in the WLAN driver.
+ */
+static struct sk_buff *qdrv_tx_encap_eapol(struct qdrv_vap *const qv,
+		struct ieee80211_node *const ni, struct sk_buff *const skb)
+{
+	struct qdrv_wlan *qw = qv->parent;
+	const struct ether_header *const eh = (const struct ether_header *) skb->data;
+
+	/* Ignore EAPOLs to WDS peers and EAPOLs not originating from the BSS node */
+	if (qv->iv.iv_opmode == IEEE80211_M_WDS ||
+			memcmp(eh->ether_shost, qv->iv.iv_myaddr, ETH_ALEN) != 0) {
+		return skb;
+	}
+
+	ieee80211_eap_output(qv->iv.iv_dev, skb->data, skb->len);
+	qdrv_tx_skb_drop(&qv->iv, qw, ni, skb, TRACE_IPPKT_DROP_RSN_AUTH);
+
+	return NULL;
+}
+
+static __sram_text int qdrv_tx_to_auc(struct qdrv_vap *qv, struct sk_buff *skb)
+{
+	union topaz_tqe_cpuif_ppctl ctl;
+	struct ieee80211_node *ni = QTN_SKB_CB_NI(skb);
+	uint8_t port;
+	uint8_t node;
+	uint8_t tid;
+	uint8_t use_4addr = 0;
+	uint16_t misc_user = 0;
+	int tqe_queued;
+
+	if (likely(IEEE80211_NODE_IDX_VALID(skb->dest_port))) {
+		node = IEEE80211_NODE_IDX_UNMAP(skb->dest_port);
+		if (unlikely(node >= QTN_NCIDX_MAX)) {
+			DBGPRINTF_LIMIT_E("%s: invalid idx %u\n", __func__,
+				skb->dest_port);
+			node = IEEE80211_NODE_IDX_UNMAP(qv->iv.iv_vapnode_idx);
+		}
+	} else {
+		node = IEEE80211_NODE_IDX_UNMAP(qv->iv.iv_vapnode_idx);
+	}
+
+	if (QTN_SKB_ENCAP_IS_80211(skb)) {
+		port = TOPAZ_TQE_MUC_PORT;
+		misc_user = node;
+		if (QTN_SKB_ENCAP_IS_80211_MGMT(skb) || !(ni->ni_flags & IEEE80211_NODE_QOS))
+			tid = QTN_TID_MGMT;
+		else
+			tid = QTN_TID_WLAN;
+	} else {
+		const uint8_t *dstmac = skb->data;
+
+		port = TOPAZ_TQE_WMAC_PORT;
+                if (!(ni->ni_flags & IEEE80211_NODE_QOS)) {
+                        skb->priority = 0;
+                }
+                tid = WME_AC_TO_TID(skb->priority);
+		use_4addr = qdrv_tx_peer_use_4addr(ni, dstmac);
+		/* No AMSDU aggregation during training */
+		if (unlikely(M_FLAG_ISSET(skb, M_NO_AMSDU))) {
+			misc_user |= TQE_MISCUSER_L2A_NO_AMSDU;
+		}
+
+		if (unlikely(M_FLAG_ISSET(skb, M_RATE_TRAINING))) {
+			misc_user |= TQE_MISCUSER_L2A_RATE_TRAINING;
+		}
+	}
+
+	if (likely(ni)) {
+		/* always free ref when passing to TQE */
+		ieee80211_free_node(ni);
+	}
+
+	topaz_tqe_cpuif_ppctl_init(&ctl,
+			port, &node, 1, tid,
+			use_4addr, 1, 0, 1, misc_user);
+	tqe_queued = tqe_tx(&ctl, skb);
+	if (tqe_queued == NETDEV_TX_BUSY) {
+		kfree_skb(skb);
+	}
+
+	return NET_XMIT_SUCCESS;
+}
+
+/*
+ * If the node can only receive at a low rate, the qdisc queue size will be reduced to prevent
+ * large latencies.
+ */
+#define QDRV_TX_NODE_LOW_RATE	15
+#define QDRV_TX_NODE_LOW_RSSI	-900
+static inline uint8_t qdrv_tx_nd_is_low_rate(struct ieee80211_node *ni)
+{
+	int mu = STATS_SU;
+
+	if (ni->ni_shared_stats &&
+			(ni->ni_shared_stats->tx[mu].avg_rssi_dbm < QDRV_TX_NODE_LOW_RSSI)) {
+		QDRV_TX_DBG(3, ni, "avg_rssi_dbm=%u\n",
+			ni->ni_shared_stats->tx[mu].avg_rssi_dbm);
+	}
+
+	return (ni->ni_shared_stats &&
+		(ni->ni_shared_stats->tx[mu].avg_rssi_dbm < QDRV_TX_NODE_LOW_RSSI));
+}
+
+/*
+ * If the node is over MuC quota, packets can be added to the queue, but will not be dequeued until
+ * the node's MuC queue shrinks.
+ */
+static inline uint8_t qdrv_tx_nd_is_over_quota(struct qdrv_wlan *qw, struct ieee80211_node *ni)
+{
+	return (ni->ni_tx_sch.muc_queued >= qw->tx_if.muc_thresh_high);
+}
+
+static __sram_text void qdrv_tx_stats_prot(struct net_device *vdev, struct sk_buff *skb)
+{
+	struct qdrv_vap *qv = netdev_priv(vdev);
+	struct qdrv_wlan *qw = qv->parent;
+	uint16_t ether_type = QTN_SKB_CB_ETHERTYPE(skb);
+	uint8_t ip_proto = QTN_SKB_CB_IPPROTO(skb);
+
+	qdrv_wlan_stats_prot(qw, 1, ether_type, ip_proto);
+}
+
+/*
+ * A node reference must be held before calling this function.  It will be released
+ * during tx_done processing.
+ */
+static __sram_text int qdrv_tx_sch_enqueue_to_node(struct qdrv_wlan *qw,
+		struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct net_device *dev = qv->iv.iv_dev;
+	int rc;
+
+	QDRV_TX_CTR_INC(53);
+	skb->dev = dev;
+
+	if (QDRV_WLAN_TX_USE_AUC(qw)) {
+		if (!QTN_SKB_ENCAP_IS_80211(skb))
+			qdrv_tx_stats_prot(skb->dev, skb);
+		return qdrv_tx_to_auc(qv, skb);
+	}
+
+	rc = qdrv_sch_enqueue_node(&ni->ni_tx_sch, skb,
+					qdrv_tx_nd_is_over_quota(qw, ni),
+					qdrv_tx_nd_is_low_rate(ni));
+
+	return rc;
+}
+
+static __always_inline bool
+qdrv_tx_is_unauth_bcast_allowed(const struct ieee80211vap *vap, const struct ieee80211_node *ni)
+{
+	return ((ni == vap->iv_bss) && (vap->iv_opmode != IEEE80211_M_STA));
+}
+
+static __sram_text int
+qdrv_tx_unauth_node_data_drop(struct qdrv_wlan *qw, struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (unlikely(!ieee80211_node_is_authorized(ni) &&
+			!qdrv_tx_is_unauth_bcast_allowed(vap, ni) &&
+			(QTN_SKB_CB_ETHERTYPE(skb) != __constant_htons(ETH_P_PAE)))) {
+		vap->iv_stats.is_tx_unauth++;
+		vap->iv_devstats.tx_errors++;
+		IEEE80211_NODE_STAT(ni, tx_unauth);
+		IEEE80211_NODE_STAT(ni, tx_errors);
+		qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_AUTH);
+		TXSTAT(qw, tx_drop_auth);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int qdrv_tx_is_br_isolate(struct qdrv_wlan *qw, struct sk_buff *skb,
+		uint16_t ether_type, uint8_t *l3_data_start)
+{
+	/*
+	 * The RGMII bridge interface is only used for RPC communication with the host
+	 * device, so do not forward ARP requests or replies to the wireless interface.
+	 */
+	if ((qw->br_isolate & QDRV_BR_ISOLATE_NORMAL)
+			&& (ether_type == __constant_htons(ETH_P_ARP))) {
+		struct ether_arp *arp = (struct ether_arp *)l3_data_start;
+		__be32 ipaddr = 0;
+
+		if (arp->ea_hdr.ar_op == __constant_htons(ARPOP_REQUEST)) {
+			ipaddr = get_unaligned((uint32_t *)&arp->arp_tpa);
+		} else if (arp->ea_hdr.ar_op == __constant_htons(ARPOP_REPLY)) {
+			ipaddr = get_unaligned((uint32_t *)&arp->arp_spa);
+		}
+
+		if (qdrv_is_bridge_ipaddr(qw, ipaddr))
+			return 1;
+	}
+
+
+	if (qw->br_isolate & QDRV_BR_ISOLATE_VLAN) {
+		uint16_t vlanid;
+
+		if (skb->vlan_tci && qw->br_isolate_vid == QVLAN_VID_ALL)
+			return 1;
+
+		vlanid = skb->vlan_tci & VLAN_VID_MASK;
+		if (qw->br_isolate_vid == vlanid)
+			return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Prepare and enqueue a data frame for transmission.
+ * If a node pointer is passed in, a node reference must have been acquired.
+ * Returns NET_XMIT_SUCCESS if the frame is enqueued.
+ * Returns NET_XMIT_DROP if the frame is dropped, in which case the skb and noderef have been freed.
+ */
+static __sram_text int qdrv_tx_prepare_data_frame(struct qdrv_wlan *qw, struct qdrv_vap *qv,
+		struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct ieee80211vap *vap = &qv->iv;
+	struct ether_header *eh = (struct ether_header *) skb->data;
+	uint16_t ether_type;
+	uint8_t *data_start = qdrv_sch_find_data_start(skb, eh, &ether_type);
+	struct iphdr *iphdr_p = (struct iphdr *) data_start;
+	void *proto_data = NULL;
+	uint8_t ip_proto = iputil_proto_info(iphdr_p, skb, &proto_data, NULL, NULL);
+	struct udphdr *udph = proto_data;
+	struct dhcp_message *dhcp_msg = (struct dhcp_message*)((uint8_t*)udph +
+					sizeof(struct udphdr));
+	struct ether_arp *arp = (struct ether_arp *)data_start;
+	uint8_t tid = 0;
+	uint8_t ac = 0;
+#ifdef CONFIG_IPV6
+	uint32_t data_len;
+#endif
+	uint32_t ipaddr;
+	int drop = 0;
+
+	qdrv_sch_classify(skb, ether_type, data_start);
+
+	if (qv->iv.iv_opmode == IEEE80211_M_STA) {
+		qdrv_acm_bit_qos_remap(skb, vap->iv_ic);
+	}
+	if ((ether_type == __constant_htons(ETH_P_IP)) &&
+			(skb->len >= (data_start - skb->data) + sizeof(*iphdr_p)) &&
+			(ip_proto == IPPROTO_IGMP)) {
+		TXSTAT(qw, tx_igmp);
+#ifdef CONFIG_IPV6
+	} else if (ether_type == __constant_htons(ETH_P_IPV6)) {
+		data_len = skb->len - (data_start - skb->data);
+		if (iputil_eth_is_v6_mld(data_start, data_len)) {
+			TXSTAT(qw, tx_igmp);
+		}
+#endif
+	}
+
+#ifdef CONFIG_QVSP
+	if (qdrv_tx_strm_check(skb, qw, ni, eh, ether_type, data_start, skb->priority) != 0) {
+		qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_VSP);
+		TXSTAT(qw, tx_drop_vsp);
+		return NET_XMIT_DROP;
+	}
+#endif
+
+	/* drop Wi-Fi control messages */
+	if (unlikely((qv->iv.iv_opmode == IEEE80211_M_STA) &&
+		     (ether_type == __constant_htons(ETHERTYPE_802A)) &&
+		     (data_start[0] == (QTN_OUI & 0xFF)) &&
+		     (data_start[1] == ((QTN_OUI >> 8) & 0xFF)) &&
+		     (data_start[2] == ((QTN_OUI >> 16) & 0xFF)) &&
+		     (data_start[3] >= QTN_OUIE_WIFI_CONTROL_MIN) &&
+		     (data_start[3] <= QTN_OUIE_WIFI_CONTROL_MAX))) {
+		qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_CTRL);
+		return NET_XMIT_DROP;
+	}
+
+	if (qdrv_tx_is_br_isolate(qw, skb, ether_type, data_start)) {
+		qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_RGMII);
+		return NET_XMIT_DROP;
+	}
+
+	if ((qv->iv.iv_opmode == IEEE80211_M_STA) &&
+			!(qv->iv.iv_flags_ext & IEEE80211_FEXT_WDS) &&
+			!IEEE80211_IS_MULTICAST(eh->ether_shost)) {
+
+		if (QDRV_FLAG_3ADDR_BRIDGE_ENABLED()) {
+			if (ether_type == __constant_htons(ETH_P_IP)) {
+				drop = qdrv_tx_3addr_check_ip(skb, qw, eh, data_start);
+			}
+#if defined(CONFIG_IPV6)
+			else if (ether_type == __constant_htons(ETH_P_IPV6)) {
+				drop = qdrv_tx_3addr_check_ipv6(&skb, qw, data_start);
+			}
+#endif
+
+			if (drop) {
+				qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_3ADDR);
+				TXSTAT(qw, tx_drop_3addr);
+				return NET_XMIT_DROP;
+			}
+		}
+
+		if (ether_type == __constant_htons(ETH_P_ARP)) {
+			if (qdrv_tx_3addr_check_arp(&skb, qw, data_start) != 0) {
+				qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_3ADDR);
+				TXSTAT(qw, tx_drop_3addr);
+				return NET_XMIT_DROP;
+			}
+		}
+
+		/* skb may have been modified - adjust pointers */
+		eh = (struct ether_header *)skb->data;
+
+	} else if (qv->iv.iv_opmode == IEEE80211_M_HOSTAP && ni) {
+		if (ether_type == __constant_htons(ETH_P_IP) &&
+				ip_proto == IPPROTO_UDP &&
+				udph->dest == __constant_htons(DHCPCLIENT_PORT) &&
+				!memcmp(&ni->ni_macaddr[0], &dhcp_msg->chaddr[0], ETHER_ADDR_LEN)) {
+
+			ni->ni_ip_addr = dhcp_msg->yiaddr;
+			ni->ni_ip_addr_filter = IEEE80211_IP_ADDR_FILTER_DHCP_RSP;
+		}
+
+		if (ether_type == __constant_htons(ETH_P_ARP) &&
+				arp->ea_hdr.ar_op == __constant_htons(ARPOP_REPLY) &&
+				!memcmp(&ni->ni_macaddr[0], &arp->arp_tha[0], ETHER_ADDR_LEN)) {
+
+			ipaddr = (arp->arp_tpa[3] << 24) |
+				(arp->arp_tpa[2] << 16) |
+				(arp->arp_tpa[1] << 8) |
+				arp->arp_tpa[0];
+
+			if (ipaddr && ni->ni_ip_addr != ipaddr) {
+				ni->ni_ip_addr = ipaddr;
+				ni->ni_ip_addr_filter = IEEE80211_IP_ADDR_FILTER_ARP_RSP;
+			}
+		}
+	} else if (unlikely(ether_type == __constant_htons(ETH_P_ARP)) && (qv->iv.iv_opmode == IEEE80211_M_STA)) {
+		qdrv_tx_store_soc_ipaddr(arp, ni);
+	}
+
+	if (unlikely(qdrv_tx_unauth_node_data_drop(qw, ni, skb) != 0)) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_BRIDGE | QDRV_LF_PKT_TX,
+			"drop Tx to unauth node S=%pM D=%pM type=0x%04x\n",
+			eh->ether_shost, eh->ether_dhost, ether_type);
+		return NET_XMIT_DROP;
+	}
+
+	if (vap->disable_dgaf &&
+			IEEE80211_IS_MULTICAST(eh->ether_dhost) &&
+			!ni->ni_qtn_assoc_ie) {
+		qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_SHOULD_DROP);
+		return NET_XMIT_DROP;
+	}
+
+	if ((ni->ni_qtn_flags & QTN_IS_INTEL_NODE) &&
+			(ether_type == __constant_htons(ETH_P_ARP)) &&
+		(qv->iv.iv_opmode == IEEE80211_M_HOSTAP)) {
+			skb->priority = WMM_AC_BE;
+	}
+
+	ac = skb->priority;
+	tid = (ni->ni_flags & IEEE80211_NODE_QOS) ? WMM_AC_TO_TID(ac) : 0;
+
+	ni->ni_stats.ns_tx_data++;
+	ni->ni_stats.ns_tx_bytes += skb->len;
+
+	if (ether_type != __constant_htons(ETH_P_PAE)) {
+		qdrv_tx_ba_establish(qv, ni, tid);
+	} else {
+		skb = qdrv_tx_encap_eapol(qv, ni, skb);
+		if (skb == NULL) {
+			return NET_XMIT_DROP;
+		}
+	}
+
+	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		if (ether_addr_equal(eh->ether_dhost, vap->iv_dev->broadcast)) {
+#else
+		if (!compare_ether_addr(eh->ether_dhost, vap->iv_dev->broadcast)) {
+#endif
+			vap->iv_devstats.tx_broadcast_packets++;
+			ni->ni_stats.ns_tx_bcast++;
+		} else {
+			vap->iv_devstats.tx_multicast_packets++;
+			ni->ni_stats.ns_tx_mcast++;
+		}
+	} else {
+		vap->iv_devstats.tx_unicast_packets++;
+		ni->ni_stats.ns_tx_ucast++;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_BRIDGE | QDRV_LF_PKT_TX,
+			"src=%pM dst=%pM to Node_idx %d: %pM TID=%d AC=%d type=%04x\n",
+			eh->ether_shost, eh->ether_dhost,
+			IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx), ni->ni_macaddr, tid, ac,
+			ntohs(ether_type));
+
+	return qdrv_tx_sch_enqueue_to_node(qw, ni, skb);
+}
+
+static __always_inline void qdrv_tx_amsdu(struct host_txdesc *txdesc,
+		struct sk_buff *skb)
+{
+	/*
+	 * Check that skb sender does not indicate that AMSDU
+	 * must be not applied for this frame.
+	 */
+	if (likely(!M_FLAG_ISSET(skb, M_NO_AMSDU))) {
+		uint32_t htxd_flags = 0;
+		/*
+		 * Check for recycle flag should guarantee that no stall data
+		 * in cache beyond what had just flushed by cache_op_before_tx().
+		 * This is important as subsequent frames be appended to this one
+		 * when do AMSDU aggregation.
+		 */
+		if (likely(skb->is_recyclable)) {
+			/*
+			 * Make sure that buffer has good amount of free space to
+			 * 2nd frame append in.
+			 */
+			if ((skb_end_pointer(skb) - skb->data >= QTN_AMSDU_DEST_CAPABLE_SIZE +
+					QTN_AMSDU_DEST_CAPABLE_GUARD_SIZE) &&
+					(skb->len < QTN_AMSDU_DEST_CAPABLE_OCCUPY_SIZE)) {
+				htxd_flags |= HTXD_FLAG_AMSDU_DEST_CAPABLE;
+			}
+		}
+		if (skb->len <= QTN_AMSDU_SRC_FRAME_SIZE) {
+			/*
+			 * Make sure that frame to copy is small enough.
+			 * Appending of large frames has no sense.
+			 */
+			htxd_flags |= HTXD_FLAG_AMSDU_SRC_CAPABLE;
+		}
+		if (htxd_flags) {
+			HTXD_FLAG_SET(txdesc, htxd_flags);
+		}
+	}
+}
+
+/*
+ * Post a pre-populated host tx descriptor to the MuC.
+ *
+ * Prerequisite is that the host descriptor is filled in with a
+ * call to 'qdrv_tx_prepare_hostdesc'.
+ */
+static __always_inline void qdrv_tx_muc_post_hostdesc(struct qdrv_wlan *qw,
+		struct qdrv_vap *qv, struct host_txdesc *txdesc,
+		struct sk_buff *skb, uint16_t node_idx_unmapped, int is_80211_encap)
+{
+	static struct host_txdesc *prev_txdesc[HOST_NUM_HOSTIFQ];
+	volatile uint32_t *mbox;
+	unsigned int mbox_indx;
+
+	if (likely(!is_80211_encap)) {
+		mbox_indx = HOST_DATA_INDEX_BASE + node_idx_unmapped;
+	} else {
+		mbox_indx = HOST_MGMT_INDEX_BASE;
+	}
+
+	mbox = &qw->tx_if.tx_mbox[mbox_indx];
+
+	txdesc->hd_segaddr[0] = cache_op_before_tx(skb->head,
+		skb_headroom(skb) + skb->len) + skb_headroom(skb);
+	skb->cache_is_cleaned = 1;
+
+	qdrv_tx_amsdu(txdesc, skb);
+
+	/* Take the semaphore before reading the mailbox - the MuC will modify it. */
+	while(!sem_take(qw->host_sem, qw->semmap[mbox_indx]));
+
+	trace_skb_perf_stamp_call(skb);
+
+	if (*mbox == QTN_MAILBOX_INVALID) {
+		qdrv_tx_release_txdesc(qw, (struct lhost_txdesc *)txdesc);
+		txdesc = NULL;
+	} else if (*mbox && prev_txdesc[mbox_indx]) {
+		prev_txdesc[mbox_indx]->hd_nextpa = txdesc->hd_pa;
+		TXSTAT(qw, tx_muc_enqueue);
+	} else {
+		writel_wmb(txdesc->hd_pa, mbox);
+		TXSTAT(qw, tx_muc_enqueue_mbox);
+	}
+
+	sem_give(qw->host_sem, qw->semmap[mbox_indx]);
+
+	prev_txdesc[mbox_indx] = txdesc;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	netif_trans_update(qv->iv.iv_dev);
+#else
+	qv->iv.iv_dev->trans_start = jiffies;
+#endif
+}
+
+static __sram_text void qdrv_tx_muc_post_stats(struct qdrv_wlan *qw, struct qdrv_vap *qv,
+					struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	QDRV_TX_CTR_INC(30);
+
+	if (unlikely(ni != QTN_SKB_CB_NI(skb))) {
+		DBGPRINTF_LIMIT_E("skb recycled prematurely (%p/%p)\n",
+			ni, QTN_SKB_CB_NI(skb));
+	} else {
+		M_FLAG_SET(skb, M_ENQUEUED_MUC);
+		++ni->ni_tx_sch.muc_queued;
+		++qv->muc_queued;
+	}
+
+	local_irq_restore(flags);
+}
+
+/*
+ * Duplicate a packet so it can be sent as a directed reliable frame to a given node.
+ */
+static __always_inline void qdrv_copy_to_node(struct ieee80211_node *ni,
+			struct sk_buff *skb_orig, int convert_to_uc)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct qdrv_wlan *qw = qv->parent;
+	struct ether_header *eh;
+	struct sk_buff *skb;
+
+	skb = skb_copy(skb_orig, GFP_ATOMIC);
+	if (!skb) {
+		qdrv_tx_dropped(&qv->iv, qw, ni);
+		TXSTAT(qw, tx_copy_fail);
+		return;
+	}
+
+	skb->dest_port = ni->ni_node_idx;
+	skb->is_recyclable = 1;
+
+	if (convert_to_uc && !ni->ni_qtn_assoc_ie) {
+		eh = (struct ether_header *)skb->data;
+		IEEE80211_ADDR_COPY(eh->ether_dhost, ni->ni_macaddr);
+		TXSTAT(qw, tx_copy_uc);
+	} else {
+		TXSTAT(qw, tx_copy4);
+	}
+
+	ieee80211_ref_node(ni);
+	QTN_SKB_CB_NI(skb) = ni;
+
+	qdrv_tx_prepare_data_frame(qw, qv, ni, skb);
+}
+
+static void qdrv_dump_tx_pkt(struct sk_buff *skb)
+{
+	uint32_t len, i;
+
+	len = skb->len>g_dbg_dump_pkt_len?g_dbg_dump_pkt_len:skb->len;
+
+	if (len > 0) {
+		for (i = 0; i < len; i++) {
+			if ((i % 8) == 0)
+				printk(" ");
+			if ((i % 16) == 0)
+				printk("\n");
+			printk("%02x ", skb->data[i]);
+		}
+		printk("\n");
+	}
+	printk("\n");
+}
+
+/*
+ * Periodically attempt to discover the location of unknown destination MAC addresses.
+ */
+static void __sram_text qdrv_tx_mac_discover(struct qdrv_wlan *qw, void *p_data)
+{
+	struct iphdr *p_iphdr = p_data;
+	uint32_t ipaddr;
+	__be32 addr;
+#ifdef CONFIG_IPV6
+	struct ipv6hdr *ip6hdr_p = p_data;
+	struct in6_addr mcaddr;
+	struct in6_addr *target;
+#endif
+
+	if (!qw->br_dev)
+		return;
+
+	if (likely(p_iphdr->version == 4)) {
+		addr = qdrv_dev_ipaddr_get(qw->br_dev);
+		if (!addr)
+			return;
+		ipaddr = get_unaligned((uint32_t *)&p_iphdr->daddr);
+
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_BRIDGE,
+				"sending ARP from " NIPQUAD_FMT " for " NIPQUAD_FMT " (%08x)\n",
+				NIPQUAD(addr), NIPQUAD(ipaddr), ipaddr);
+
+		TXSTAT(qw, tx_arp_req);
+
+		arp_send(ARPOP_REQUEST, ETH_P_ARP,
+				ipaddr,
+				qw->br_dev,
+				addr,
+				NULL, qw->br_dev->dev_addr, NULL);
+#ifdef CONFIG_IPV6
+	} else if (p_iphdr->version == 6) {
+		target = &ip6hdr_p->daddr;
+
+		addrconf_addr_solict_mult(target, &mcaddr);
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_BRIDGE,
+				"sending neighbour solicitation from dev %s to IP "
+				NIPV6OCTA_FMT "\n", qw->br_dev->name, NIPV6OCTA(target));
+		TXSTAT(qw, tx_arp_req);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		ndisc_send_ns(qw->br_dev, target, &mcaddr, NULL);
+#else
+		ndisc_send_ns(qw->br_dev, NULL, target, &mcaddr, NULL);
+#endif
+#endif
+	}
+}
+
+static void __sram_text qdrv_tx_copy_to_node_cb(void *data, struct ieee80211_node *ni)
+{
+	struct sk_buff *skb = data;
+
+	if (unlikely(!ni))
+		return;
+
+	/* Exclude unauthorised stations, self (if from the BSS), the BSS node, WDS and MBSS nodes */
+	if ((ni->ni_in_auth_state != 1) || (skb->src_port == ni->ni_node_idx)) {
+		return;
+	}
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_TX,
+		" to %s\n",
+		ni->ni_macaddr);
+
+	qdrv_copy_to_node(ni, skb, 1);
+}
+
+/*
+ * Unicast selected SSDP broadcast packets to all nodes.
+ */
+static int __sram_text qdrv_tx_ssdp_copy_to_nodes(struct qdrv_vap *qv, struct sk_buff *skb,
+					struct iphdr *p_iphdr)
+{
+	struct ieee80211vap *vap = &qv->iv;
+
+	if (p_iphdr->version != 4) {
+		return 0;
+	}
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_TX, "SSDP ucast\n");
+
+	vap->iv_ic->ic_iterate_dev_nodes(vap->iv_dev, &vap->iv_ic->ic_sta,
+					qdrv_tx_copy_to_node_cb, skb, 1);
+
+	return 1;
+}
+
+/*
+ * Copy a packet to multiple nodes.
+ *
+ * If LNCB multicast or if we're forwarding all unknown multicasts,
+ * or if this is a broadcast, unicast to Quantenna nodes that are configured to recieve
+ * these frames in 4-address mode.  These stations will silently drop the 3-address
+ * copy of the frame.  This only applies to packets that are destined for the BSS - ie
+ * genuine broadcasts and multicasts, not IGMP snooped multicasts.
+ *
+ * Packets destined to unknown endpoints are sent to every bridge station.  They are not
+ * sent in 3-addr mode, as 3-addr clients do not support multiple endpoints.
+ */
+static int __sram_text qdrv_tx_copy_to_nodes(struct qdrv_vap *qv, struct qdrv_wlan *qw,
+		struct sk_buff *skb, struct net_device *dev,
+		uint8_t lncb, int igmp_type)
+{
+#define QDRV_MAX_COPY_NODES QTN_ASSOC_LIMIT	/* Max nodes we can transmit to */
+	struct ieee80211_node *ni_lst[QDRV_MAX_COPY_NODES];
+	struct ieee80211_node *ni;
+	unsigned long flags;
+	uint8_t copy_nodes = 0;
+
+	spin_lock_irqsave(&qv->ni_lst_lock, flags);
+	if (lncb) {
+		ni = TAILQ_FIRST(&qv->ni_lncb_lst);
+	} else {
+		ni = TAILQ_FIRST(&qv->ni_bridge_lst);
+	}
+	while (ni && (copy_nodes < ARRAY_SIZE(ni_lst))) {
+
+		if (ni->ni_node_idx != skb->src_port) {
+			ieee80211_ref_node(ni);
+			ni_lst[copy_nodes] = ni;
+			copy_nodes++;
+		}
+
+		if (unlikely(qw->flags_ext & QDRV_WLAN_DEBUG_TEST_LNCB)) {
+			TXSTAT(qw, tx_copy4_busy);
+			qw->flags_ext &= ~QDRV_WLAN_DEBUG_TEST_LNCB;
+			break;
+		}
+
+		if (lncb) {
+			ni = TAILQ_NEXT(ni, ni_lncb_lst);
+		} else {
+			ni = TAILQ_NEXT(ni, ni_bridge_lst);
+		}
+	}
+	spin_unlock_irqrestore(&qv->ni_lst_lock, flags);
+
+	/* Send the frame to each station */
+	while (copy_nodes--) {
+		if (!lncb) {
+			TXSTAT(qw, tx_copy4_unknown);
+		} else if (igmp_type != 0) {
+			DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_IGMP,
+					"IGMP %s -> %pM\n",
+					qdrv_igmp_type_to_string(igmp_type),
+					ni_lst[copy_nodes]->ni_macaddr);
+			TXSTAT(qw, tx_copy4_igmp);
+		} else {
+			TXSTAT(qw, tx_copy4_mc);
+		}
+
+		qdrv_copy_to_node(ni_lst[copy_nodes], skb, 0);
+		ieee80211_free_node(ni_lst[copy_nodes]);
+	}
+
+	return 0;
+}
+
+/*
+ * Copy a unicast packet to all bridge nodes.
+ */
+static __always_inline int qdrv_tx_copy_unknown_uc_to_nodes(struct qdrv_vap *qv,
+		struct qdrv_wlan *qw, struct sk_buff *skb, struct net_device *dev,
+		uint16_t ether_type, uint8_t *data_start)
+{
+#define QDRV_TX_ARP_FREQ_MS 1000	/* Max frequency for ARP requests */
+	if (qw->flags_ext & QDRV_WLAN_FLAG_UNKNOWN_ARP) {
+		if (time_after_eq(jiffies, qw->arp_last_sent + msecs_to_jiffies(QDRV_TX_ARP_FREQ_MS))) {
+			if (iputil_eth_is_ipv4or6(ether_type)) {
+				qdrv_tx_mac_discover(qw, data_start);
+				qw->arp_last_sent = jiffies;
+			}
+		}
+	}
+
+	if (qv->ni_bridge_cnt && (qw->flags_ext & QDRV_WLAN_FLAG_UNKNOWN_FWD)) {
+		return qdrv_tx_copy_to_nodes(qv, qw, skb, dev, 0, 0);
+	}
+
+	return 1;
+}
+
+#ifdef CONFIG_IPV6
+/*
+ * Convert multicast router advertisement frame to unicast and send to all
+ * associated nodes (stations) in the current BSS
+ */
+static void qdrv_router_adv_mc_to_unicast(struct ieee80211vap *vap,
+							struct sk_buff *skb)
+{
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_TX, "Router Advertisement ucast\n");
+
+	vap->iv_ic->ic_iterate_dev_nodes(vap->iv_dev, &vap->iv_ic->ic_sta,
+					qdrv_tx_copy_to_node_cb, skb, 1);
+
+}
+#endif
+
+static int qdrv_tx_uc_dhcp(struct ieee80211vap *vap, struct iphdr *p_iphdr,
+			struct sk_buff *skb, uint8_t ip_proto, void *proto_data,
+			uint8_t *p_dest_mac, uint16_t ether_type)
+{
+	struct udphdr *udph = proto_data;
+	struct dhcp_message *dhcp_msg;
+	struct ieee80211_node *ni;
+
+	if (udph && IEEE80211_ADDR_BCAST(p_dest_mac) &&
+			ether_type == __constant_htons(ETH_P_IP) &&
+			ip_proto == IPPROTO_UDP &&
+			udph->dest == __constant_htons(DHCPCLIENT_PORT)) {
+		dhcp_msg = (struct dhcp_message*)((uint8_t*)udph +
+						sizeof(struct udphdr));
+
+		ni = ieee80211_find_node(&vap->iv_ic->ic_sta, &dhcp_msg->chaddr[0]);
+		if (ni && (IEEE80211_AID(ni->ni_associd) != 0) &&
+				IEEE80211_ADDR_EQ(vap->iv_myaddr, ni->ni_bssid)) {
+			qdrv_copy_to_node(ni, skb, 1);
+			ieee80211_free_node(ni);
+			return 1;
+		} else if (ni) {
+			ieee80211_free_node(ni);
+		}
+	}
+
+	return 0;
+}
+
+static int qdrv_handle_dgaf(struct ieee80211vap *vap, struct iphdr *p_iphdr,
+		struct sk_buff *skb, uint8_t *p_dest_mac, uint16_t ether_type,
+		uint8_t ip_proto, void *proto_data)
+{
+#ifdef CONFIG_IPV6
+	struct icmp6hdr *icmp6hdr;
+#endif
+	/*
+	 * Assume that DHCP server always runs in back end or in the AP.
+	 * if DHCP packet from server to client is having broadcast
+	 * destination address then convert to unicast to client
+	 */
+	if (qdrv_tx_uc_dhcp(vap, p_iphdr, skb, ip_proto,
+				proto_data, p_dest_mac, ether_type)) {
+		return 1;
+	}
+
+#ifdef CONFIG_IPV6
+	if (ip_proto == IPPROTO_ICMPV6 && iputil_ipv6_is_ll_all_nodes_mc(p_dest_mac, p_iphdr)) {
+		icmp6hdr = (struct icmp6hdr*)proto_data;
+		if (icmp6hdr->icmp6_type == NDISC_ROUTER_ADVERTISEMENT) {
+			qdrv_router_adv_mc_to_unicast(vap, skb);
+			return 1; /* don't send as multicast */
+		}
+	}
+#endif
+
+	return 0;
+}
+
+static __sram_text void qdrv_tx_pkt_debug(struct qdrv_wlan *qw, struct sk_buff *skb, int is_80211_encap)
+{
+	if (!is_80211_encap) {
+		trace_ippkt_check(skb->data, skb->len, TRACE_IPPKT_LOC_WLAN_TX);
+	}
+
+	if (unlikely(is_80211_encap)){
+		struct ieee80211_frame *pwh = (struct ieee80211_frame *)skb->data;
+		if (IFF_DUMPPKTS_XMIT_MGT(pwh, DBG_LOG_FUNC)) {
+			ieee80211_dump_pkt(&qw->ic, skb->data,
+				skb->len>g_dbg_dump_pkt_len ?  g_dbg_dump_pkt_len : skb->len,
+				-1, -1);
+
+		}
+	} else if (IFF_DUMPPKTS_XMIT_DATA(DBG_LOG_FUNC)) {
+		printk("%pM->%pM:proto 0x%x\n",
+			&skb->data[6], skb->data, ntohs(*((unsigned short *)(&skb->data[12]))));
+		qdrv_dump_tx_pkt(skb);
+	}
+}
+
+static __sram_text void qdrv_tx_muc_post(struct qdrv_wlan *qw,
+		struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_txdesc *txdesc = NULL;
+	int is_80211_encap = QTN_SKB_ENCAP_IS_80211(skb);
+	uint8_t ac = skb->priority;
+	uint8_t tid;
+	uint16_t node_idx = ni->ni_node_idx;
+
+	if (likely(!is_80211_encap)) {
+		if (unlikely(qdrv_tx_unauth_node_data_drop(qw, ni, skb))) {
+			/* node has deauthed since queuing */
+			QDRV_TX_CTR_INC(43);
+			return;
+		}
+		tid = (ni->ni_flags & IEEE80211_NODE_QOS) ? WMM_AC_TO_TID(ac) : 0;
+	} else {
+		if (QTN_SKB_ENCAP_IS_80211_MGMT(skb) || !(ni->ni_flags & IEEE80211_NODE_QOS))
+			tid = QTN_TID_MGMT;
+		else
+			tid = QTN_TID_WLAN;
+
+		if (node_idx == 0) {
+			node_idx = ni->ni_vap->iv_vapnode_idx;
+		}
+	}
+
+	txdesc = qdrv_tx_prepare_hostdesc(qw, skb, qv->iv.iv_unit, tid, ac, node_idx, is_80211_encap);
+
+	if (unlikely(!txdesc)) {
+		qdrv_tx_skb_drop(ni->ni_vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_NO_DESC);
+		TXSTAT(qw, tx_drop_nodesc);
+		QDRV_TX_CTR_INC(29);
+		return;
+	}
+
+	if (M_FLAG_ISSET(skb, M_TX_DONE_IMM_INT))
+		txdesc->hd_flags |= HTXD_FLAG_IMM_RETURN;
+	qdrv_tx_pkt_debug(qw, skb, is_80211_encap);
+
+	qdrv_tx_muc_post_stats(qw, qv, ni, skb);
+
+	qdrv_tx_muc_post_hostdesc(qw, qv, txdesc, skb,
+		IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx), is_80211_encap);
+}
+
+static inline int qdrv_tx_sub_port_check(struct ieee80211vap *vap,
+		struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	int ret;
+	int src_port;
+	int dst_port;
+	int ni_port;
+	int vap_port;
+
+	src_port = IEEE80211_NODE_IDX_UNMAP(skb->src_port);
+	ni_port = IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx);
+	dst_port = IEEE80211_NODE_IDX_UNMAP(skb->dest_port);
+	vap_port = IEEE80211_NODE_IDX_UNMAP(vap->iv_vapnode_idx);
+
+	ret = src_port && (src_port == ni_port) && (src_port != vap_port);
+
+	if (!ret && dst_port && dst_port != ni_port && dst_port != vap_port)
+		ret = 1;
+
+	return ret;
+}
+
+static inline struct ieee80211_node *qdrv_tx_node_get_and_ref(struct ieee80211vap *vap,
+			struct qdrv_wlan *qw, struct sk_buff *skb, uint8_t *p_dest_mac, uint8_t *vlan_group)
+{
+	struct ieee80211_node *ni = NULL;
+	struct ieee80211_node *src_ni = NULL;
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct qtn_vlan_pkt *pkt;
+	uint8_t vlan_group_mac[ETH_ALEN];
+
+	*vlan_group = 0;
+
+	switch (vap->iv_opmode) {
+	case IEEE80211_M_STA:
+		if (!IEEE80211_IS_MULTICAST(p_dest_mac)) {
+			if (skb->dest_port) {
+				QDRV_TX_CTR_INC(50);
+				ni = ieee80211_find_node_by_node_idx(vap, skb->dest_port);
+			} else if (!IEEE80211_IS_MULTICAST(p_dest_mac)) {
+				QDRV_TX_CTR_INC(51);
+				ni = ieee80211_find_node(&vap->iv_ic->ic_sta, p_dest_mac);
+			}
+
+			if (ni && !ieee80211_node_is_running(ni)) {
+				ieee80211_free_node(ni);
+				ni = NULL;
+			}
+		}
+
+		if (!ni) {
+			QDRV_TX_CTR_INC(52);
+			ni = vap->iv_bss;
+			if (unlikely(!ni)) {
+				TXSTAT(qw, tx_dropped_config);
+				qdrv_tx_skb_drop(vap, qw, NULL, skb, TRACE_IPPKT_DROP_RSN_RECONFIG);
+				return NULL;
+			}
+			ieee80211_ref_node(ni);
+		}
+
+		/* Drop packets from a TDLS peer that are destined for the AP or other TDLS peers. */
+		if (unlikely(IEEE80211_NODE_IDX_VALID(skb->src_port))) {
+			src_ni = ieee80211_find_node_by_node_idx(vap, skb->src_port);
+			if (src_ni) {
+				if (unlikely(!IEEE80211_NODE_IS_NONE_TDLS(src_ni))) {
+					ieee80211_free_node(ni);
+					ieee80211_free_node(src_ni);
+					return NULL;
+				}
+				ieee80211_free_node(src_ni);
+			}
+		}
+
+		if (qdrv_tx_sub_port_check(vap, ni, skb)) {
+			TXSTAT(qw, tx_dropped_config);
+			qdrv_tx_skb_drop(vap, qw, NULL, skb, TRACE_IPPKT_DROP_RSN_RECONFIG);
+			ieee80211_free_node(ni);
+			return NULL;
+		}
+		break;
+	case IEEE80211_M_WDS:
+		ni = ieee80211_get_wds_peer_node_ref(vap);
+		if (unlikely(!ni)) {
+			TXSTAT(qw, tx_drop_wds);
+			qdrv_tx_skb_drop(vap, qw, NULL, skb, TRACE_IPPKT_DROP_RSN_NO_WDS);
+			return NULL;
+		}
+		break;
+	case IEEE80211_M_HOSTAP:
+	default:
+		if (skb->dest_port) {
+			QDRV_TX_CTR_INC(50);
+			ni = ieee80211_find_node_by_node_idx(vap, skb->dest_port);
+			if (!ni) {
+				DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_BRIDGE | QDRV_LF_PKT_TX,
+					"dropping pkt to %pM - node_idx 0x%x is stale\n",
+					p_dest_mac, skb->dest_port);
+				TXSTAT(qw, tx_drop_aid);
+				qdrv_tx_skb_drop(vap, qw, NULL, skb, TRACE_IPPKT_DROP_RSN_AID_STALE);
+				return NULL;
+			}
+		} else if (!IEEE80211_IS_MULTICAST(p_dest_mac)) {
+			QDRV_TX_CTR_INC(51);
+			ni = ieee80211_find_node(&vap->iv_ic->ic_sta, p_dest_mac);
+		}
+
+		if (!ni) {
+			QDRV_TX_CTR_INC(52);
+
+			struct qtn_vlan_dev *vlandev = vdev_tbl_lhost[QDRV_WLANID_FROM_DEVID(qv->devid)];
+
+			if (QVLAN_IS_DYNAMIC(vlandev) && M_FLAG_ISSET(skb, M_VLAN_TAGGED)) {
+				pkt = qtn_vlan_get_info(skb->data);
+				BUG_ON(pkt->magic != QVLAN_PKT_MAGIC);
+
+				qtn_vlan_gen_group_addr(vlan_group_mac,
+					pkt->vlan_info & QVLAN_MASK_VID, vap->iv_dev->dev_id);
+
+				ni = ieee80211_find_node(&vap->iv_ic->ic_sta, vlan_group_mac);
+				if (ni) {
+					*vlan_group = 1;
+				} else {
+					qdrv_tx_skb_drop(vap, qw, NULL, skb, TRACE_IPPKT_DROP_RSN_INVALID);
+					return NULL;
+				}
+			} else {
+				ni = vap->iv_bss;
+				if (unlikely(!ni)) {
+					TXSTAT(qw, tx_dropped_config);
+					qdrv_tx_skb_drop(vap, qw, NULL, skb, TRACE_IPPKT_DROP_RSN_RECONFIG);
+					return NULL;
+				}
+				ieee80211_ref_node(ni);
+			}
+		}
+		break;
+	}
+	if (qdrv_tx_sub_port_check(vap, ni, skb)) {
+		TXSTAT(qw, tx_dropped_config);
+		qdrv_tx_skb_drop(vap, qw, NULL, skb, TRACE_IPPKT_DROP_RSN_RECONFIG);
+		ieee80211_free_node(ni);
+		return NULL;
+	}
+	return ni;
+}
+
+/*
+ * Unicast a layer 2 multicast frame to selected stations or to every station on the vap.
+ *
+ *     if multicast-to-unicast is disabled
+ *         send a single group-addressed frame
+ *     else if SSDP (239.255.255.250 and MAC address 01:00:5e:74:ff:fa)
+ *         unicast to every station and drop the multicast packet
+ *     else if LNCB (224.0.0.0/24)
+ *             or non-snooped multicast (224.0.0.0/4) and 'forward unknown multicast' flag is set
+ *                 - e.g. IGMP
+ *             or L2 broadcast (ff:ff:ff:ff:ff:ff) and the 'reliable broadcast' flag is set
+ *                 - e.g. ARP, DHCP
+ *         if the 'multicast to unicast' flag is set
+ *             unicast to every station and drop the multicast packet
+ *         else
+ *             unicast to each QSTA and send a group-addressed frame for 3rd party stations
+ *                 (these group-addressed frames are always ignored by QSTAs)
+ *     else
+ *         send a single group-addressed frame
+ *
+ *     Snooped multicast and IP flood-forwarding are handled in switch_tqe and do not come through
+ *     here.
+ *
+ * Returns 0 if a group-addressed frame should be transmitted.
+ * Returns 1 if a group-addressed frame should not be transmitted.
+ */
+static int qdrv_tx_multicast_to_unicast(struct qdrv_vap *qv, struct qdrv_wlan *qw,
+					struct sk_buff *skb, struct ieee80211vap *vap,
+					struct ieee80211_node *ni, int is_4addr_mc,
+					uint8_t *data_start)
+{
+	int igmp_type;
+	struct ether_header *eh = (struct ether_header*)skb->data;
+	struct iphdr *p_iphdr = (struct iphdr *)data_start;
+
+	if (vap->iv_mc_to_uc == IEEE80211_QTN_MC_TO_UC_NEVER)
+		return 0;
+
+	if (iputil_is_ssdp(eh->ether_dhost, p_iphdr)) {
+		qdrv_tx_ssdp_copy_to_nodes(qv, skb, p_iphdr);
+		TXSTAT(qw, tx_copy_ssdp);
+		return 1;
+	}
+
+	if (is_4addr_mc) {
+		if (vap->iv_mc_to_uc == IEEE80211_QTN_MC_TO_UC_ALWAYS) {
+			DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_TX, "mc to uc\n");
+			/* unicast to all stations */
+			vap->iv_ic->ic_iterate_dev_nodes(vap->iv_dev, &vap->iv_ic->ic_sta,
+							qdrv_tx_copy_to_node_cb, skb, 1);
+			TXSTAT(qw, tx_copy_mc_to_uc);
+			return 1;
+		}
+		/* Legacy multicast - unicast to QSTAs and broadcast for 3rd party stations */
+		if (qv->ni_lncb_cnt > 0) {
+			/* unicast to each QSTA */
+			igmp_type = qdrv_igmp_type(p_iphdr, skb->len - sizeof(*eh));
+			qdrv_tx_copy_to_nodes(qv, qw, skb, vap->iv_dev, 1, igmp_type);
+			TXSTAT(qw, tx_copy_mc_to_uc);
+			if (qv->iv_3addr_count > 0) {
+				if (igmp_type != 0)
+					TXSTAT(qw, tx_copy3_igmp);
+				else
+					TXSTAT(qw, tx_copy3_mc);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void qdrv_tx_unicast_to_unknown(struct qdrv_vap *qv, struct qdrv_wlan *qw,
+					struct sk_buff *skb, struct ieee80211vap *vap,
+					struct ieee80211_node *ni, uint16_t ether_type,
+					uint8_t *data_start)
+{
+	TXSTAT(qw, tx_unknown);
+
+	/* unicast to unknown endpoint - broadcast to bridge STAs only */
+	if (qdrv_tx_copy_unknown_uc_to_nodes(qv, qw, skb, vap->iv_dev, ether_type, data_start) != 0)
+		qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_NO_DEST);
+	else
+		qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_SHOULD_DROP);
+}
+
+/*
+ * Enqueue a data frame
+ * Returns NET_XMIT_SUCCESS if the frame is enqueued.
+ * Returns NET_XMIT_DROP if the frame is dropped, in which case the skb and noderef have been freed.
+ *
+ * If the skb contains a node pointer, the caller must have already incremented
+ * the node reference count.
+ * It the skb does not contain a node pointer, this function will find the destination
+ * node, and increment the node reference count
+ */
+static inline int qdrv_tx_sch_enqueue_data(struct qdrv_wlan *qw, struct qdrv_vap *qv_tx,
+		struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct qdrv_vap *qv;
+	struct ieee80211vap *vap;
+	uint16_t ether_type;
+	struct ether_header *eh;
+	uint8_t *data_start;
+	uint8_t *p_dest_mac;
+	int is_4addr_mc;
+	struct iphdr *p_iphdr;
+	uint32_t node_idx_mapped;
+	bool is_vap_node;
+	uint8_t vlan_group = 0;
+	uint8_t ip_proto;
+	void *proto_data = NULL;
+	struct qtn_vlan_dev *vlandev = vdev_tbl_lhost[QDRV_WLANID_FROM_DEVID(qv_tx->devid)];
+
+	TXSTAT(qw, tx_enqueue_data);
+
+	eh = (struct ether_header*)skb->data;
+	p_dest_mac = eh->ether_dhost;
+	data_start = qdrv_sch_find_data_start(skb, eh, &ether_type);
+	p_iphdr = (struct iphdr *)data_start;
+
+	if (likely(!ni)) {
+		ni = qdrv_tx_node_get_and_ref(&qv_tx->iv, qw, skb, p_dest_mac, &vlan_group);
+		if (!ni)
+			return NET_XMIT_DROP;
+		QTN_SKB_CB_NI(skb) = ni;
+	} else {
+		QDRV_TX_CTR_INC(49);
+	}
+
+	skb = switch_vlan_from_proto_stack(skb, vlandev, IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx), 1);
+	if (!skb) {
+		ieee80211_free_node(ni);
+		return NET_XMIT_DROP;
+	}
+
+	node_idx_mapped = ni->ni_node_idx;
+	skb->dest_port = node_idx_mapped;
+	vap = ni->ni_vap;
+	qv = container_of(vap, struct qdrv_vap, iv);
+
+	is_vap_node = (vlan_group || (ni == vap->iv_bss));
+	ip_proto = iputil_proto_info(p_iphdr, skb, &proto_data, NULL, NULL);
+
+	if (bcast_pps_should_drop(p_dest_mac, &vap->bcast_pps, ether_type,
+				ip_proto, proto_data, 0)) {
+		qdrv_tx_skb_return(qw, skb);
+		return NET_XMIT_DROP;
+	}
+
+	if (vap->disable_dgaf) {
+		if (qdrv_handle_dgaf(vap, p_iphdr, skb, p_dest_mac, ether_type,
+					ip_proto, proto_data)) {
+			qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_SHOULD_DROP);
+			return NET_XMIT_DROP;
+		}
+	}
+
+	if (vap->proxy_arp) {
+		if (ether_type == __constant_htons(ETH_P_ARP)) {
+			struct ether_arp *arp = (struct ether_arp *)data_start;
+			if (qdrv_proxy_arp(vap, qw, NULL, data_start) ||
+					arp->ea_hdr.ar_op == __constant_htons(ARPOP_REQUEST)) {
+				qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_PROXY_ARP);
+				return NET_XMIT_DROP;
+			}
+#ifdef CONFIG_IPV6
+		} else if (ether_type == __constant_htons(ETH_P_IPV6)) {
+			if (qdrv_wlan_handle_neigh_msg(vap, qw, data_start, 1, skb,
+							ip_proto, proto_data)) {
+				qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_PROXY_ARP);
+				return NET_XMIT_DROP;
+			}
+#endif
+		}
+	}
+
+	if (qdrv_wlan_mc_should_drop(eh, p_iphdr, &qv->iv, is_vap_node, ip_proto)) {
+		/* Flood-forwarding of unknown IP multicast is disabled */
+		qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_NO_DEST);
+		return NET_XMIT_DROP;
+	}
+
+	is_4addr_mc = qdrv_wlan_is_4addr_mc(eh, data_start, &qv->iv, is_vap_node);
+
+	if (qv->iv.iv_opmode == IEEE80211_M_HOSTAP && is_vap_node &&
+			(vap->iv_flags_ext & IEEE80211_FEXT_WDS)) {
+
+		if (unlikely(!IEEE80211_IS_MULTICAST(p_dest_mac))) {
+			qdrv_tx_unicast_to_unknown(qv, qw, skb, vap, ni, ether_type, data_start);
+			return NET_XMIT_DROP;
+		}
+
+		if (qdrv_tx_multicast_to_unicast(qv, qw, skb, vap, ni, is_4addr_mc, data_start)) {
+			qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_COPY);
+			return NET_XMIT_DROP;
+		}
+
+		if (unlikely(is_4addr_mc && (vap->iv_mc_to_uc == IEEE80211_QTN_MC_TO_UC_LEGACY))) {
+			if (qv->iv_3addr_count > 0) {
+				TXSTAT(qw, tx_copy3);
+			} else {
+				qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_SHOULD_DROP);
+				return NET_XMIT_DROP;
+			}
+		}
+
+		if (vap->iv_bss->ni_rsn.rsn_mcastcipher ==
+					IEEE80211_CIPHER_AES_CCM) {
+			TXSTAT(qw, tx_copy_mc_enc);
+		}
+
+		/* drop through to send as L2 multicast */
+		TXSTAT(qw, tx_copy_mc);
+	}
+
+	return qdrv_tx_prepare_data_frame(qw, qv, ni, skb);
+}
+
+/*
+ * Enqueue a locally generated 802.11-encapped management or data frame.
+ * A node reference must have been acquired.
+ * Returns NET_XMIT_SUCCESS if the frame is enqueued.
+ * Returns NET_XMIT_DROP if the frame is dropped, in which case the skb has been freed.
+ */
+static inline int qdrv_tx_sch_enqueue_80211(struct qdrv_wlan *qw, struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct ieee80211_frame *pwh;
+	uint32_t type;
+	uint32_t subtype;
+	uint8_t ac;
+
+	if (unlikely(!ni)) {
+		qdrv_tx_skb_return(qw, skb);
+		return NET_XMIT_DROP;
+	}
+
+	if (QTN_SKB_ENCAP_IS_80211_MGMT(skb))
+		TXSTAT(qw, tx_enqueue_mgmt);
+	else
+		TXSTAT(qw, tx_enqueue_80211_data);
+
+	if (DBG_LOG_LEVEL >= DBG_LL_INFO) {
+		pwh = (struct ieee80211_frame *)skb->data;
+		type = pwh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+		subtype = pwh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+		ac = skb->priority;
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_PKT_TX,
+			"a1=%pM a2=%pM a3=%pM aid=%u ac=%u encap=%u type=0x%x subtype=0x%x\n",
+			pwh->i_addr1, pwh->i_addr2, pwh->i_addr3,
+			IEEE80211_NODE_AID(ni), ac, QTN_SKB_ENCAP(skb), type, subtype);
+	}
+
+	return qdrv_tx_sch_enqueue_to_node(qw, ni, skb);
+}
+
+/*
+ * This is the entry point for wireless transmission.
+ * If the mgmt flag is set, the frame is not necessarily an 802.11 management frame, but the 802.11
+ *   header must already be present and a node reference must have been acquired.
+ * If the mgmt flag is not set, the packet is still Ethernet-encapsulated.  If the SBK node pointer
+ *   is set, a node reference must have been acquired.
+ *
+ * Returns NET_XMIT_SUCCESS if the frame is enqueued.
+ * Returns NET_XMIT_DROP if the frame is dropped, in which case the skb and noderef have been freed.
+ */
+static __sram_text int qdrv_tx_sch_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	struct qdrv_vap *qv;
+	struct qdrv_wlan *qw;
+	struct ieee80211vap *vap;
+	int is_80211_encap = QTN_SKB_ENCAP_IS_80211(skb);
+	struct ieee80211_node *ni = QTN_SKB_CB_NI(skb);
+
+	QDRV_TX_CTR_INC(2);
+	trace_skb_perf_stamp_call(skb);
+
+	if (unlikely(ni && is_80211_encap)) {
+		ieee80211_ref_node(ni);
+	}
+
+	if (unlikely(!dev)) {
+		DBGPRINTF_LIMIT_E("[%s] missing dev\n",
+			ni ? ether_sprintf(ni->ni_macaddr) : "");
+		QDRV_TX_CTR_INC(3);
+		qdrv_tx_skb_drop(NULL, NULL, QTN_SKB_CB_NI(skb), skb, TRACE_IPPKT_DROP_RSN_RECONFIG);
+		return NET_XMIT_DROP;
+	}
+
+	qv = netdev_priv(dev);
+	qw = qv->parent;
+	vap = &qv->iv;
+
+	QDRV_TX_CTR_INC(4);
+
+	if (unlikely(qw->mac->dead || qw->mac->mgmt_dead)) {
+		TXSTAT(qw, tx_dropped_mac_dead);
+		qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_MAC_DEAD);
+		return NET_XMIT_DROP;
+	}
+
+	/* drop any WBSP control packet (Ethernet type 88b7)
+	Quantenna OUI (00 26 86) is located at data[14-16] followed by 1-byte type field [17] */
+	if (qdrv_wbsp_ctrl == QDRV_WBSP_CTRL_ENABLED &&
+		skb->protocol == __constant_htons(ETHERTYPE_802A) &&
+		skb->len > 17 && is_qtn_oui_packet(&skb->data[14])) {
+		qdrv_tx_skb_drop(vap, qw, ni, skb, TRACE_IPPKT_DROP_RSN_SHOULD_DROP);
+		return NET_XMIT_DROP;
+	}
+
+	if (likely(!is_80211_encap)) {
+		return qdrv_tx_sch_enqueue_data(qw, qv, ni, skb);
+	}
+
+	return qdrv_tx_sch_enqueue_80211(qw, ni, skb);
+}
+
+static __sram_text struct sk_buff *qdrv_tx_sch_dequeue(struct Qdisc* sch)
+{
+	struct sk_buff *skb;
+	struct qdrv_tx_sch_priv *priv = qdisc_priv(sch);
+
+	if (netif_queue_stopped(qdisc_dev(sch))) {
+		return NULL;
+	}
+
+	if (qdrv_tx_done_chk(priv->qw) != 0) {
+		return NULL;
+	}
+
+	skb = qdrv_sch_dequeue_nostat(priv->shared_data, sch);
+
+	if (skb) {
+		if (!QTN_SKB_ENCAP_IS_80211(skb))
+			qdrv_tx_stats_prot(skb->dev, skb);
+	} else if (sch->q.qlen > 0) {
+		/*
+		 * There are packets in the queue but none were returned, which indicates that all
+		 * nodes with queued data are over their MuC quota.  Stop the queue until some
+		 * descriptors have been returned to prevent thrashing.
+		 */
+		QDRV_TX_CTR_INC(16);
+		qdrv_tx_disable_queues(priv->qw);
+	}
+
+	return skb;
+}
+
+static int qdrv_tx_sch_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct qdrv_tx_sch_priv *priv = qdisc_priv(sch);
+	struct net_device *vdev = qdisc_dev(sch);
+	struct qdrv_vap *qv = netdev_priv(vdev);
+	struct qdrv_wlan *qw = qv->parent;
+
+	priv->shared_data = qw->tx_sch_shared_data;
+	priv->qw = qw;
+
+	return 0;
+}
+
+static void qdrv_tx_sch_reset(struct Qdisc *sch)
+{
+	struct net_device *vdev = qdisc_dev(sch);
+	struct qdrv_vap *qv = netdev_priv(vdev);
+	struct qdrv_wlan *qw = qv->parent;
+	struct qdrv_node *qn;
+	struct qdrv_node *qn_tmp;
+	struct ieee80211_node *ni;
+
+	local_bh_disable();
+
+	TAILQ_FOREACH_SAFE(qn, &qv->allnodes, qn_next, qn_tmp) {
+		ni = &qn->qn_node;
+		qdrv_sch_flush_node(&ni->ni_tx_sch);
+	}
+
+	if (sch->gso_skb) {
+		ni = QTN_SKB_CB_NI(sch->gso_skb);
+		if (ni->ni_vap == &qv->iv) {
+			qdrv_tx_skb_return(qw, sch->gso_skb);
+			sch->gso_skb = NULL;
+		}
+	}
+
+	local_bh_enable();
+}
+
+static void qdrv_tx_sch_destroy(struct Qdisc *sch)
+{
+	struct net_device *vdev = qdisc_dev(sch);
+	struct qdrv_vap *qv = netdev_priv(vdev);
+
+	qdrv_tx_sch_reset(sch);
+	qdrv_tx_done_flush_vap(qv);
+}
+
+static struct Qdisc_ops qdrv_tx_sch_qdisc_ops __read_mostly = {
+	.id		=	"qdrv_tx_sch",
+	.priv_size	=	sizeof(struct qdrv_tx_sch_priv),
+	.enqueue	=	qdrv_tx_sch_enqueue,
+	.dequeue	=	qdrv_tx_sch_dequeue,
+	.init		=	qdrv_tx_sch_init,
+	.reset		=	qdrv_tx_sch_reset,
+	.destroy	=	qdrv_tx_sch_destroy,
+	.owner		=	THIS_MODULE,
+};
+
+int __sram_text qdrv_tx_hardstart(struct sk_buff *skb, struct net_device *dev)
+{
+	struct qdrv_vap *qv = netdev_priv(dev);
+	struct qdrv_wlan *qw = qv->parent;
+	struct ieee80211_node *ni = QTN_SKB_CB_NI(skb);
+
+	TXSTAT(qw, tx_hardstart);
+	QDRV_TX_CTR_INC(24);
+
+	if (unlikely(!ni)) {
+		QDRV_TX_CTR_INC(26);
+		qdrv_tx_skb_return(qw, skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (unlikely(QDRV_WLAN_TX_USE_AUC(qw))) {
+		/* Data in the qdisc which is dequeued after enabling auc tx. Drop */
+		qdrv_tx_skb_return(qw, skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (unlikely(qdrv_txdesc_queue_is_empty(qw))) {
+		/*
+		 * Requeue locally instead of returning NETDEV_TX_BUSY, which can cause out-of-order
+		 * pkts.
+		 * This condition should never occur because dequeuing is disabled when there are no
+		 * descriptors. This check is for safety only.
+		 */
+		DBGPRINTF_LIMIT_E("no descriptors for dequeue\n");
+		if (qdrv_sch_requeue(qw->tx_sch_shared_data, skb, ni->ni_tx_sch.qdisc) != 0) {
+			DBGPRINTF_LIMIT_E("held skb was not empty\n");
+			TXSTAT(qw, tx_requeue_err);
+		}
+		TXSTAT(qw, tx_requeue);
+		return NETDEV_TX_OK;
+	}
+
+	qdrv_tx_muc_post(qw, ni, skb);
+
+	return NETDEV_TX_OK;
+}
+
+static void qdrv_tx_sch_attach_queue(struct net_device *dev,
+		struct netdev_queue *dev_queue,	void *_unused)
+{
+	struct Qdisc *sch;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	sch = qdisc_create_dflt(dev_queue, &qdrv_tx_sch_qdisc_ops, TC_H_ROOT);
+#else
+	sch = qdisc_create_dflt(dev, dev_queue,
+			&qdrv_tx_sch_qdisc_ops, TC_H_ROOT);
+#endif
+	if (!sch) {
+		panic("%s: could not create qdisc\n", __func__);
+	}
+
+	dev_queue->qdisc_sleeping = sch;
+	dev_queue->qdisc = sch;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)
+	dev->qdisc = sch;
+#endif
+}
+
+void qdrv_tx_sch_attach(struct qdrv_vap *qv)
+{
+	netdev_for_each_tx_queue(qv->iv.iv_dev, &qdrv_tx_sch_attach_queue, NULL);
+}
+
+/*
+ * Get a txdesc for this managment frame, but don't add this
+ * txdesc to tx mbox, just return it
+ */
+struct host_txdesc *qdrv_tx_get_mgt_txdesc(struct sk_buff *skb, struct net_device *dev)
+{
+	struct qdrv_vap *qv = netdev_priv(dev);
+	struct qdrv_wlan *qw = qv->parent;
+	struct host_txdesc *txdesc = NULL;
+	struct ieee80211_frame *pwh = (struct ieee80211_frame *)skb->data;
+	uint8_t ac = skb->priority;
+	uint16_t node_idx_mapped = skb->dest_port;
+
+	trace_skb_perf_stamp_call(skb);
+	QDRV_TX_CTR_INC(47);
+
+	/* MAC is dead. Drop packets. */
+	if (unlikely(qw->mac->dead || qw->mac->mgmt_dead)) {
+		TXSTAT(qw, tx_dropped_mac_dead);
+		trace_ippkt_dropped(TRACE_IPPKT_DROP_RSN_MAC_DEAD, 1, 0);
+		return NULL;
+	}
+
+	if (qdrv_tx_done_chk(qw) != 0) {
+		return NULL;
+	}
+
+	/* Make sure the device is set on the skb (for flow control) */
+	skb->dev = dev;
+	pwh = (struct ieee80211_frame *)skb->data;
+
+	txdesc = qdrv_tx_prepare_hostdesc(qw, skb, qv->iv.iv_unit, QTN_TID_MGMT, ac,
+						node_idx_mapped, 1);
+	if (unlikely(!txdesc)) {
+		trace_ippkt_dropped(TRACE_IPPKT_DROP_RSN_NO_DESC, 1, 0);
+		TXSTAT(qw, tx_drop_nodesc);
+		return NULL;
+	}
+
+	if (IFF_DUMPPKTS_XMIT_MGT(pwh, DBG_LOG_FUNC)) {
+		ieee80211_dump_pkt(&qw->ic, skb->data,
+			skb->len>g_dbg_dump_pkt_len ? g_dbg_dump_pkt_len : skb->len, -1, -1);
+	}
+
+	txdesc->hd_segaddr[0] = cache_op_before_tx(skb->head,
+		skb_headroom(skb) + skb->len) + skb_headroom(skb);
+	skb->cache_is_cleaned = 1;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	netif_trans_update(dev);
+#else
+	dev->trans_start = jiffies;
+#endif
+	return txdesc;
+}
+
+int qdrv_tx_start(struct qdrv_mac *mac)
+{
+	struct int_handler int_handler;
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) mac->data;
+	int irq = qw->txdoneirq;
+
+	memset(&int_handler, 0, sizeof(int_handler));
+	int_handler.handler = qdrv_tx_done_irq;
+	int_handler.arg1 = mac;
+	int_handler.arg2 = NULL;
+
+	if (qdrv_mac_set_handler(mac, irq, &int_handler) != 0) {
+		DBGPRINTF_E("Failed to register IRQ handler for %d\n", irq);
+		return -1;
+	}
+
+	qdrv_mac_enable_irq(mac, qw->txdoneirq);
+
+	return 0;
+}
+
+int qdrv_tx_stop(struct qdrv_mac *mac)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) mac->data;
+
+	qdrv_mac_disable_irq(mac, qw->txdoneirq);
+
+	return 0;
+}
+
+int qdrv_tx_init(struct qdrv_mac *mac, struct host_ioctl_hifinfo *hifinfo, uint32_t arg2)
+{
+	int i;
+	struct qdrv_wlan *qw = mac->data;
+	struct host_txif *txif = &qw->tx_if;
+	unsigned int txif_list_size;
+	int nmbox = arg2 & IOCTL_DEVATTACH_NMBOX_MASK;
+
+	/* Read in the tx list max length */
+	txif_list_size = mac->params.txif_list_max;
+
+	if ((txif_list_size >= QNET_TXLIST_ENTRIES_MIN) &&
+			(txif_list_size <= QNET_TXLIST_ENTRIES_MAX)) {
+		txif->list_max_size = txif_list_size;
+	} else {
+		txif->list_max_size = QNET_TXLIST_ENTRIES_DEFAULT;
+	}
+
+	if (txif->list_max_size != QNET_TXLIST_ENTRIES_DEFAULT) {
+		DBGPRINTF_E("Non default MuC tx list size: %d\n", txif->list_max_size);
+	} else {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_TX,
+				"MuC tx list max len: %d\n", txif->list_max_size);
+	}
+
+	txif->muc_thresh_high = txif->list_max_size;
+	txif->muc_thresh_low = txif->list_max_size - QDRV_TXDESC_THRESH_MIN_DIFF;
+
+	/* Initialize TX mailbox */
+	txif->tx_mbox = ioremap_nocache(muc_to_lhost(hifinfo->hi_mboxstart), HOST_MBOX_SIZE);
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_TRACE,
+			"qw 0x%p txif 0x%p nmbox %d, mbox 0x%p hifinfo->hi_mboxstart 0x%x\n",
+			qw, txif, nmbox, txif->tx_mbox, hifinfo->hi_mboxstart);
+
+	memset((void *) txif->tx_mbox, 0, HOST_MBOX_SIZE);
+
+	for (i = 0; i < nmbox; i++) {
+		qw->semmap[i] = hifinfo->hi_semmap[i];
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_TX,
+				"%d sem 0x%08x\n", i,
+				(unsigned int) hifinfo->hi_semmap[i]);
+	}
+	qw->txdoneirq = hifinfo->hi_txdoneirq;
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_PKT_TX,
+			"txdoneirq %u\n", qw->txdoneirq);
+
+	/* hw fix: make sure that packet header does not goes over 1K boundary */
+	txif->df_txdesc_cache = dma_pool_create("txdesc", NULL,
+						sizeof(struct lhost_txdesc), 8, 1024);
+
+	txif->df_txdesc_list_tail = NULL;
+	txif->df_txdesc_list_head = NULL;
+
+	txif->txdesc_cnt[QDRV_TXDESC_DATA] = qdrv_tx_data_max_count(qw);
+	txif->txdesc_cnt[QDRV_TXDESC_MGMT] = qdrv_tx_80211_max_count(qw);
+	qw->tx_stats.tx_min_cl_cnt = qw->tx_if.list_max_size;
+
+	qw->tx_sch_shared_data->drop_callback = &qdrv_tx_sch_drop_callback;
+
+	fwt_sw_4addr_callback_set(&qdrv_tx_fwt_use_4addr, mac);
+
+	return 0;
+}
+
+int qdrv_tx_exit(struct qdrv_wlan *qw)
+{
+	struct host_txif *txif = &qw->tx_if;
+
+	fwt_sw_4addr_callback_set(NULL, NULL);
+
+	while(txif->df_txdesc_list_head) {
+		struct lhost_txdesc *tmp = txif->df_txdesc_list_head;
+		txif->df_txdesc_list_head = txif->df_txdesc_list_head->next;
+		dma_pool_free(txif->df_txdesc_cache, tmp->hw_desc.hd_va, tmp->hw_desc.hd_pa);
+	}
+	txif->df_txdesc_list_tail = NULL;
+
+	dma_pool_destroy(txif->df_txdesc_cache);
+
+	if (txif->tx_mbox) {
+		iounmap((void *) txif->tx_mbox);
+		txif->tx_mbox = 0;
+	}
+
+	return 0;
+}
diff --git a/drivers/qtn/qdrv/qdrv_txbf.c b/drivers/qtn/qdrv/qdrv_txbf.c
new file mode 100644
index 0000000..7338685
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_txbf.c
@@ -0,0 +1,656 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <asm/hardware.h>
+
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_comm.h"
+#include "qdrv_wlan.h"
+#include "qdrv_vap.h"
+#include "qdrv_txbf.h"
+#include "qdrv_hal.h"
+#include "qdrv_control.h"
+#include "qdrv_soc.h"
+#include <qtn/qtn_global.h>
+#include <qtn/txbf_mbox.h>
+#include <qtn/topaz_hbm.h>
+
+/* send the MU qmat install/delete event to Muc */
+static void qdrv_txbf_mu_grp_qmat_update(struct qtn_mu_group_update_args *args, int slot,
+					struct ieee80211_node *ni, uint8_t grp_id,
+					int delete, int feedback)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+
+	args->groups[slot].grp_id = grp_id;
+	args->groups[slot].ap_devid = qv->devid;
+	memcpy(&args->groups[slot].ap_macaddr[0], &ni->ni_macaddr[0], sizeof(uint8_t)*IEEE80211_ADDR_LEN);
+}
+
+static int qdrv_txbf_node_mu_grp_update(struct qtn_mu_group_update_args *args,
+					int grp_i, int node_i,
+					struct ieee80211_node *ni,
+					uint8_t grp, uint8_t pos, uint8_t delete)
+{
+	KASSERT(grp_i >= 0 && grp_i < QTN_MU_QMAT_MAX_SLOTS,
+		(DBGEFMT "group index exceeds the limits: %d\n", DBGARG, grp_i));
+	KASSERT(node_i >= 0 && node_i < QTN_MU_QMAT_MAX_SLOTS,
+		(DBGEFMT "group index exceeds the limits: %d\n", DBGARG, grp_i));
+
+	if (!IEEE80211_MU_GRP_VALID(grp)) {
+		IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_VHT,
+				  "%s: MU grp id %u invalid\n",
+				  __func__, ether_sprintf(ni->ni_macaddr), grp);
+		return 0;
+	}
+	if (!IEEE80211_MU_POS_VALID(pos)) {
+		IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_VHT,
+				  "%s: MU pos %u invalid\n",
+				  __func__, ether_sprintf(ni->ni_macaddr), pos);
+		return 0;
+	}
+
+	if (delete) {
+		IEEE80211_NODE_MU_DEL_GRP(ni, grp);
+	} else {
+		IEEE80211_NODE_MU_ADD_GRP(ni, grp, pos);
+	}
+
+	args->groups[grp_i].nodes[node_i].as_sta = ni->ni_vap->iv_opmode == IEEE80211_M_STA;
+	memcpy(&args->groups[grp_i].nodes[node_i].macaddr[0],&ni->ni_macaddr[0], sizeof(uint8_t)*IEEE80211_ADDR_LEN);
+	memcpy(&args->groups[grp_i].nodes[node_i].grp,       &ni->ni_mu_grp,     sizeof(struct ieee80211_vht_mu_grp));
+	return 1;
+}
+
+static struct ieee80211_node * qdrv_txbf_find_txnode(struct qdrv_mac *mac,
+		volatile struct txbf_ndp_info *ndp_info)
+{
+	struct net_device *vdev;
+	struct qdrv_vap *qv;
+
+	/* Transmit frame out the primary VAP */
+	vdev = mac->vnet[0];
+	if (unlikely(vdev == NULL)) {
+		return NULL;
+	}
+
+	qv = netdev_priv(vdev);
+
+	return ieee80211_find_txnode(&qv->iv, (uint8_t*)ndp_info->macaddr);
+}
+
+static size_t qdrv_txbf_act_frm_allheaders_len(void)
+{
+	return sizeof(struct ieee80211_frame)
+		 + sizeof(struct ieee80211_action)
+		 + sizeof(struct ht_mimo_ctrl);
+}
+
+static struct sk_buff *qdrv_txbf_get_skb(volatile struct txbf_pkts *pkt_info)
+{
+#if TOPAZ_HBM_SKB_ALLOCATOR_DEFAULT
+	const int8_t pool = TOPAZ_HBM_BUF_WMAC_RX_POOL;
+	void *buf_bus = (void *) pkt_info->buffer_start;
+	struct sk_buff *skb = NULL;
+
+	if (likely(buf_bus)) {
+		skb = topaz_hbm_attach_skb_bus(buf_bus, pool);
+		if (skb == NULL) {
+			topaz_hbm_put_payload_aligned_bus(buf_bus, pool);
+		}
+	}
+	return skb;
+#else
+	return (struct sk_buff *) pkt_info->skb;
+#endif
+}
+
+static inline void qdrv_txbf_free_nodes(struct ieee80211_node **node1,
+						struct ieee80211_node **node2)
+{
+	if (*node1) {
+		ieee80211_free_node(*node1);
+		*node1 = NULL;
+	}
+
+	if (*node2) {
+		ieee80211_free_node(*node2);
+		*node2 = NULL;
+	}
+}
+
+static inline void qdrv_txbf_dbg_printout(struct ieee80211_node *ni)
+{
+	if (ni == NULL) {
+		return;
+	}
+
+	IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_VHT,
+			"%s: %s MU grp: "
+			"%02x%02x%02x%02x%02x%02x%02x%02x\n"
+			"MU pos: %02x%02x%02x%02x%02x%02x%02x%02x"
+			"%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			__func__, ether_sprintf(ni->ni_macaddr),
+			ni->ni_mu_grp.member[7],
+			ni->ni_mu_grp.member[6],
+			ni->ni_mu_grp.member[5],
+			ni->ni_mu_grp.member[4],
+			ni->ni_mu_grp.member[3],
+			ni->ni_mu_grp.member[2],
+			ni->ni_mu_grp.member[1],
+			ni->ni_mu_grp.member[0],
+			ni->ni_mu_grp.pos[15],
+			ni->ni_mu_grp.pos[14],
+			ni->ni_mu_grp.pos[13],
+			ni->ni_mu_grp.pos[12],
+			ni->ni_mu_grp.pos[11],
+			ni->ni_mu_grp.pos[10],
+			ni->ni_mu_grp.pos[9],
+			ni->ni_mu_grp.pos[8],
+			ni->ni_mu_grp.pos[7],
+			ni->ni_mu_grp.pos[6],
+			ni->ni_mu_grp.pos[5],
+			ni->ni_mu_grp.pos[4],
+			ni->ni_mu_grp.pos[3],
+			ni->ni_mu_grp.pos[2],
+			ni->ni_mu_grp.pos[1],
+			ni->ni_mu_grp.pos[0]);
+}
+
+static void qdrv_txbf_send_vht_grp_id_act_frm(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = NULL;
+	struct ieee80211vap *vap = NULL;
+	if (ni == NULL) {
+		return;
+	}
+	ic = ni->ni_ic;
+	vap = ni->ni_vap;
+	if (vap->iv_opmode != IEEE80211_M_STA) {
+		ic->ic_send_vht_grp_id_act(ni->ni_vap, ni);
+	}
+}
+
+static int qdrv_txbf_process_mu_grp_mbox(struct qdrv_wlan *qw, volatile struct qtn_txbf_mbox *txbf_mbox, uint32_t opcode)
+{
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211_node *u0 = NULL;
+	struct ieee80211_node *u1 = NULL;
+	struct ieee80211_node *ap = NULL;
+	volatile struct qtn_sram_qmat *mu_qmat = &txbf_mbox->mu_grp_qmat[0];
+	struct qtn_mu_group_update_args grp_update_args = {0};
+	int grp_i, node_i;
+
+	opcode -= QTN_TXBF_MUC_DSP_MSG_RING_SIZE;
+
+	switch (opcode) {
+	case QTN_TXBF_DSP_TO_HOST_INST_MU_GRP:
+		grp_update_args.op = MU_GRP_INST;
+		break;
+	case QTN_TXBF_DSP_TO_HOST_DELE_MU_GRP:
+		grp_update_args.op = MU_GRP_DELE;
+		break;
+	case QTN_TXBF_MBOX_BAD_IDX:
+		return QTN_TXBF_MBOX_NOT_PROCESSED;
+	default:
+		return QTN_TXBF_MBOX_NOT_PROCESSED;
+	}
+
+	if (!ieee80211_swfeat_is_supported(SWFEAT_ID_MU_MIMO, 0)) {
+		return QTN_TXBF_MBOX_PROCESSED;
+	}
+
+	/* Collect all the necessary data needed to update groups and qmats at MuC */
+	for (grp_i = 0; grp_i < QTN_MU_QMAT_MAX_SLOTS; qdrv_txbf_free_nodes(&u0, &u1), grp_i++) {
+		node_i = 0;
+		if (!mu_qmat[grp_i].valid) {
+			continue;
+		}
+
+		if (qw->ic.ic_mu_debug_level) {
+			printk("dsp to lhost(%u): %s mu grp %d node0 %d node1 %d rank %d tk: 0x%02x\n",
+				grp_i, (opcode == QTN_TXBF_DSP_TO_HOST_INST_MU_GRP) ? "install" : "delete",
+				mu_qmat[grp_i].grp_id, mu_qmat[grp_i].u0_aid, mu_qmat[grp_i].u1_aid,
+				mu_qmat[grp_i].rank, mu_qmat[grp_i].tk);
+		}
+
+		u0 = ieee80211_find_node_by_aid(ic, mu_qmat[grp_i].u0_aid);
+		u1 = ieee80211_find_node_by_aid(ic, mu_qmat[grp_i].u1_aid);
+
+		if (opcode == QTN_TXBF_DSP_TO_HOST_INST_MU_GRP) {
+			if (u0 == NULL || u1 == NULL) {
+				continue;
+			}
+			ap = u0->ni_vap->iv_bss;
+			if (qdrv_txbf_node_mu_grp_update(&grp_update_args, grp_i, node_i,
+							u0, mu_qmat[grp_i].grp_id, 0, 0)) {
+				node_i++;
+			}
+			if (qdrv_txbf_node_mu_grp_update(&grp_update_args, grp_i, node_i,
+							u1, mu_qmat[grp_i].grp_id, 1, 0)) {
+				node_i++;
+			}
+		} else { /* (opcode == QTN_TXBF_DSP_TO_HOST_DELE_MU_GRP) */
+			if (u0 == NULL && u1 == NULL) {
+				continue;
+			}
+
+			if (u0 != NULL) {
+				ap = u0->ni_vap->iv_bss;
+				if (qdrv_txbf_node_mu_grp_update(&grp_update_args, grp_i,
+						node_i, u0, mu_qmat[grp_i].grp_id, 0, 1)) {
+					node_i++;
+				}
+			}
+
+			if (u1 != NULL) {
+				ap = u1->ni_vap->iv_bss;
+				if (qdrv_txbf_node_mu_grp_update(&grp_update_args, grp_i,
+						node_i, u1, mu_qmat[grp_i].grp_id, 1, 1)) {
+					node_i++;
+				}
+			}
+
+		}
+
+		qdrv_txbf_mu_grp_qmat_update(&grp_update_args, grp_i, ap, mu_qmat[grp_i].grp_id, 1, 0);
+
+		qdrv_txbf_send_vht_grp_id_act_frm(u0);
+		qdrv_txbf_send_vht_grp_id_act_frm(u1);
+		qdrv_txbf_dbg_printout(u0);
+		qdrv_txbf_dbg_printout(u1);
+	}
+
+	ic->ic_mu_group_update(ic, &grp_update_args);
+
+	return QTN_TXBF_MBOX_PROCESSED;
+}
+
+static void qdrv_txbf_pkt_info_clear(volatile struct txbf_pkts *pkt_info, struct sk_buff *skb)
+{
+	pkt_info->skb = 0;
+	pkt_info->act_frame_phys = 0;
+	pkt_info->buffer_start = 0;
+	qtn_txbf_mbox_free_msg_buf(pkt_info);
+	dev_kfree_skb_irq(skb);
+}
+
+static int qdrv_txbf_process_txbf_mbox(struct qdrv_wlan *qw, volatile struct qtn_txbf_mbox *txbf_mbox, uint32_t pkt_offset)
+{
+	volatile struct txbf_state *txbf_state = qw->txbf_state;
+	volatile struct txbf_pkts *pkt_info;
+	struct ieee80211_node *ni;
+	struct sk_buff *skb;
+
+	if (QTN_TXBF_MBOX_BAD_IDX == pkt_offset || pkt_offset >= QTN_TXBF_MUC_DSP_MSG_RING_SIZE) {
+		DBGPRINTF_E("%s: bad txbf mbox pkt_offset value %d\n", __func__, pkt_offset);
+		return QTN_TXBF_MBOX_NOT_PROCESSED;
+	}
+
+	pkt_info = txbf_mbox->txbf_msg_bufs + pkt_offset;
+
+	/* Attach SKB so we can free up the buffer properly. */
+	skb = qdrv_txbf_get_skb(pkt_info);
+	if (skb == NULL) {
+		qtn_txbf_mbox_free_msg_buf(pkt_info);
+		return QTN_TXBF_MBOX_NOT_PROCESSED;
+	}
+
+	/* DSP is done with a received action frame */
+	if (pkt_info->msg_type == QTN_TXBF_ACT_FRM_FREE_MSG) {
+		uint8_t slot = pkt_info->slot;
+
+		if (pkt_info->success) {
+			txbf_state->stvec_install_success++;
+			if (slot < QTN_STATS_NUM_BF_SLOTS) {
+				RXSTAT(qw, rx_bf_success[slot]);
+			}
+			TXSTAT_SET(qw, txbf_qmat_wait, pkt_info->txbf_qmat_install_wait);
+		} else {
+			txbf_state->stvec_install_fail++;
+			if (slot < QTN_STATS_NUM_BF_SLOTS) {
+				RXSTAT(qw, rx_bf_rejected[slot]);
+			}
+		}
+		if (pkt_info->bf_compressed) {
+			txbf_state->cmp_act_frms_rxd++;
+		} else {
+			txbf_state->uncmp_act_frms_rxd++;
+		}
+		txbf_state->qmat_offset = pkt_info->qmat_offset;
+		txbf_state->bf_ver = pkt_info->bf_ver;
+
+		if (pkt_info->ndp_info.bw_mode == QTN_BW_80M) {
+			txbf_state->qmat_bandwidth = BW_HT80;
+		} else if (pkt_info->ndp_info.bw_mode == QTN_BW_40M) {
+			txbf_state->qmat_bandwidth = BW_HT40;
+		} else {
+			txbf_state->qmat_bandwidth = BW_HT20;
+		}
+		txbf_state->bf_tone_grp = pkt_info->bf_tone_grp;
+
+		qdrv_txbf_pkt_info_clear(pkt_info, skb);
+
+		return QTN_TXBF_MBOX_PROCESSED;
+	} else if (pkt_info->msg_type != QTN_TXBF_ACT_FRM_TX_MSG) {
+		/* Print an error message for unexpected messages */
+		if (pkt_info->msg_type != QTN_TXBF_NDP_DISCARD_MSG) {
+			DBGPRINTF_E("Received message not for me: %x\n", pkt_info->msg_type);
+		}
+		qdrv_txbf_pkt_info_clear(pkt_info, skb);
+
+		return QTN_TXBF_MBOX_PROCESSED;
+	}
+
+	/* Process a transmit action frame from the DSP */
+	skb_put(skb, pkt_info->act_frame_len);
+
+	if ( txbf_state->send_txbf_netdebug ) {
+		txbf_state->send_txbf_netdebug = 0;
+		qdrv_control_txbf_pkt_send(qw,
+				skb->data +
+				qdrv_txbf_act_frm_allheaders_len() + 2,
+				pkt_info->ndp_info.bw_mode);
+	}
+
+	ni = qdrv_txbf_find_txnode(qw->mac, &pkt_info->ndp_info);
+
+	/* Clear the packet info ready for the next NDP */
+	pkt_info->act_frame_phys = 0;
+	pkt_info->buffer_start = 0;
+	pkt_info->skb = 0;
+	qtn_txbf_mbox_free_msg_buf(pkt_info);
+
+	if (ni == NULL) {
+		dev_kfree_skb_irq(skb);
+	} else {
+		ni->ni_ic->ic_send_80211(ni->ni_ic, ni, skb, WME_AC_VO, 0);
+		if (pkt_info->bf_compressed) {
+			txbf_state->cmp_act_frms_sent++;
+		} else {
+			txbf_state->uncmp_act_frms_sent++;
+		}
+		if (pkt_info->ndp_info.bw_mode == QTN_BW_80M) {
+			txbf_state->qmat_bandwidth = BW_HT80;
+		} else if (pkt_info->ndp_info.bw_mode == QTN_BW_40M) {
+			txbf_state->qmat_bandwidth = BW_HT40;
+		} else {
+			txbf_state->qmat_bandwidth = BW_HT20;
+		}
+		txbf_state->bf_tone_grp = pkt_info->bf_tone_grp;
+	}
+
+	return QTN_TXBF_MBOX_PROCESSED;
+}
+
+/*
+ * Mailbox tasklet
+ * The node structure must be locked before scheduling this process.
+ */
+static void qdrv_txbf_mbox_tasklet(unsigned long data)
+{
+	volatile struct qtn_txbf_mbox *txbf_mbox = qtn_txbf_mbox_get();
+	struct qdrv_wlan *qw = (struct qdrv_wlan*)data;
+	uint32_t pkt_offset;
+	uint32_t res;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	pkt_offset = qtn_txbf_mbox_recv(qtn_mproc_sync_addr(&(txbf_mbox->dsp_to_host_mbox)));
+	if (QTN_TXBF_MBOX_BAD_IDX == pkt_offset) {
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+		return;
+	}
+
+	res = qdrv_txbf_process_mu_grp_mbox(qw, txbf_mbox, pkt_offset);
+	if (res == QTN_TXBF_MBOX_NOT_PROCESSED) {
+		qdrv_txbf_process_txbf_mbox(qw, txbf_mbox, pkt_offset);
+	}
+
+	/* Enable the mbx interrupts */
+	qtn_txbf_lhost_irq_enable(qw->mac);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+}
+
+/* Handler for the DSP interrupt for action frame */
+static void qdrv_txbf_mbox_interrupt(void *arg1, void *dev_id)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan*)dev_id;
+	struct txbf_state *txbf_state = qw->txbf_state;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Disable mbx interrupts */
+	qtn_txbf_lhost_irq_disable(qw->mac);
+
+	if (txbf_state != NULL) {
+		tasklet_schedule(&txbf_state->txbf_dsp_mbox_task);
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+}
+
+int qdrv_txbf_config_get(struct qdrv_wlan *qw, u32 *value)
+{
+	struct txbf_state *txbf_state = (struct txbf_state *) qw->txbf_state;
+	volatile struct qtn_txbf_mbox *txbf_mbox = qtn_txbf_mbox_get();
+	volatile struct txbf_ctrl *bf_ctrl = NULL;
+
+	printk("Current TXBF Config values are:\n");
+	if (txbf_mbox != NULL) {
+		bf_ctrl = &txbf_mbox->bfctrl_params;
+
+		printk("    CalcChanInv               = %d\n",
+			!!(bf_ctrl->svd_mode & BIT(SVD_MODE_CHANNEL_INV)));
+		printk("    CalcTwoStreams            = %d\n",
+			!!(bf_ctrl->svd_mode & BIT(SVD_MODE_TWO_STREAM)));
+		printk("    ApplyPerAntScaling        = %d\n",
+			!!(bf_ctrl->svd_mode & BIT(SVD_MODE_PER_ANT_SCALE)));
+		printk("    ApplyStreamMixing         = %d\n",
+			!!(bf_ctrl->svd_mode & BIT(SVD_MODE_STREAM_MIXING)));
+		printk("    SVD Bypass                = %d\n",
+			!!(bf_ctrl->svd_mode & BIT(SVD_MODE_BYPASS)));
+	} else {
+		printk("    SVD settings not available\n");
+	}
+	printk("    Stvec install bypass      = %d\n", txbf_state->stmat_install_bypass);
+	printk("    Reg Scale fac             = %d\n", txbf_state->st_mat_reg_scale_fac);
+	printk("    Stvec install success     = %d\n", txbf_state->stvec_install_success);
+	printk("    Stvec install failed      = %d\n", txbf_state->stvec_install_fail);
+	printk("    Stvec overwrite           = %d\n", txbf_state->stvec_overwrite);
+	printk("    Comp Action Frames Sent   = %d\n", txbf_state->cmp_act_frms_sent);
+	printk("    Uncomp Action Frames Sent = %d\n", txbf_state->uncmp_act_frms_sent);
+	printk("    Comp Action Frames Recv   = %d\n", txbf_state->cmp_act_frms_rxd);
+	printk("    Uncomp Action Frames Recv = %d\n", txbf_state->uncmp_act_frms_rxd);
+	printk("    Bandwidth                 = %d\n", txbf_state->qmat_bandwidth);
+	if (((txbf_state->qmat_bandwidth == 0) || (txbf_state->qmat_bandwidth == BW_HT80)) &&
+			(txbf_state->bf_tone_grp == QTN_TXBF_DEFAULT_QMAT_NG)) {
+		/* Assume 80 MHz 11ac node if bw is 0, as hw is providing feedback */
+		printk("    1 Stream Stvec offset     = %u\n", QTN_TXBF_QMAT80_1STRM_OFFSET(txbf_state->qmat_offset));
+		printk("    2 Stream Stvec offset     = %u\n", QTN_TXBF_QMAT80_2STRM_OFFSET(txbf_state->qmat_offset));
+		printk("    3 Stream Stvec offset     = %u\n", QTN_TXBF_QMAT80_3STRM_OFFSET(txbf_state->qmat_offset));
+		printk("    4 Stream Stvec offset     = %u\n", QTN_TXBF_QMAT80_4STRM_OFFSET(txbf_state->qmat_offset));
+		printk("    1 Stream 40M Stvec offset = %u\n", QTN_TXBF_QMAT80_1STRM_40M_OFFSET(txbf_state->qmat_offset));
+		printk("    2 Stream 40M Stvec offset = %u\n", QTN_TXBF_QMAT80_2STRM_40M_OFFSET(txbf_state->qmat_offset));
+		printk("    1 Stream 20M Stvec offset = %u\n", QTN_TXBF_QMAT80_1STRM_20M_OFFSET(txbf_state->qmat_offset));
+		printk("    2 Stream 20M Stvec offset = %u\n", QTN_TXBF_QMAT80_2STRM_20M_OFFSET(txbf_state->qmat_offset));
+	} else if ((txbf_state->qmat_bandwidth == 0) || (txbf_state->qmat_bandwidth == BW_HT80)) {
+		/* Assume 80 MHz 11ac node if bw is 0, as hw is providing feedback */
+		printk("    1 Stream Stvec offset     = %u\n", QTN_TXBF_QMAT80_NG2_1STRM_OFFSET(txbf_state->qmat_offset));
+		printk("    2 Stream Stvec offset     = %u\n", QTN_TXBF_QMAT80_NG2_2STRM_OFFSET(txbf_state->qmat_offset));
+		printk("    3 Stream Stvec offset     = %u\n", QTN_TXBF_QMAT80_NG2_3STRM_OFFSET(txbf_state->qmat_offset));
+		printk("    4 Stream Stvec offset     = %u\n", QTN_TXBF_QMAT80_NG2_4STRM_OFFSET(txbf_state->qmat_offset));
+		printk("    1 Stream 40M Stvec offset = %u\n", QTN_TXBF_QMAT80_NG2_1STRM_40M_OFFSET(txbf_state->qmat_offset));
+		printk("    2 Stream 40M Stvec offset = %u\n", QTN_TXBF_QMAT80_NG2_2STRM_40M_OFFSET(txbf_state->qmat_offset));
+		printk("    1 Stream 20M Stvec offset = %u\n", QTN_TXBF_QMAT80_NG2_1STRM_20M_OFFSET(txbf_state->qmat_offset));
+		printk("    2 Stream 20M Stvec offset = %u\n", QTN_TXBF_QMAT80_NG2_2STRM_20M_OFFSET(txbf_state->qmat_offset));
+	} else
+	{
+		printk("    1 Stream Stvec offset     = %u\n", QTN_TXBF_QMAT40_1STRM_OFFSET(txbf_state->qmat_offset));
+		printk("    2 Stream Stvec offset     = %u\n", QTN_TXBF_QMAT40_2STRM_OFFSET(txbf_state->qmat_offset));
+		printk("    3 Stream Stvec offset     = %u\n", QTN_TXBF_QMAT40_3STRM_OFFSET(txbf_state->qmat_offset));
+		printk("    4 Stream Stvec offset     = %u\n", QTN_TXBF_QMAT40_4STRM_OFFSET(txbf_state->qmat_offset));
+		printk("    1 Stream 40M Stvec offset = %u\n", QTN_TXBF_QMAT40_1STRM_40M_OFFSET(txbf_state->qmat_offset));
+		printk("    2 Stream 40M Stvec offset = %u\n", QTN_TXBF_QMAT40_2STRM_40M_OFFSET(txbf_state->qmat_offset));
+		printk("    1 Stream 20M Stvec offset = %u\n", QTN_TXBF_QMAT40_1STRM_20M_OFFSET(txbf_state->qmat_offset));
+		printk("    2 Stream 20M Stvec offset = %u\n", QTN_TXBF_QMAT40_2STRM_20M_OFFSET(txbf_state->qmat_offset));
+	}
+	printk("    BF version                = %u\n", txbf_state->bf_ver);
+
+	*value = 0;
+	if (bf_ctrl == NULL) {
+		return(0);
+	}
+	*value |= !!(bf_ctrl->svd_mode & BIT(SVD_MODE_CHANNEL_INV)) << 16;
+	*value |= !!(bf_ctrl->svd_mode & BIT(SVD_MODE_TWO_STREAM)) << 12;
+	*value |= !!(bf_ctrl->svd_mode & BIT(SVD_MODE_PER_ANT_SCALE)) << 8;
+	*value |= !!(bf_ctrl->svd_mode & BIT(SVD_MODE_STREAM_MIXING)) << 4;
+	*value |= !!(bf_ctrl->svd_mode & BIT(SVD_MODE_BYPASS));
+
+	return(0);
+}
+
+int qdrv_txbf_config_set(struct qdrv_wlan *qw, u32 value)
+{
+	struct txbf_state *txbf_state = (struct txbf_state *) qw->txbf_state;
+	int par0,par1, par2, par3, par4, par5;
+        volatile struct qtn_txbf_mbox *txbf_mbox = qtn_txbf_mbox_get();
+
+    DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	txbf_state->send_txbf_netdebug = 1;
+
+	if(value & (0xFF << 24)){
+		txbf_state->st_mat_reg_scale_fac = (signed char)((int)value >>24);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(0);
+	}
+
+	par5 = (value >> 0) & 0xf;
+	par4 = (value >> 4) & 0xf;
+	par3 = (value >> 8) & 0xf;
+	par2 = (value >> 12) & 0xf;
+	par1 = (value >> 16) & 0xf;
+	par0 = (value >> 20) & 0xf;
+
+	if (txbf_mbox != NULL) {
+		volatile struct txbf_ctrl *bf_ctrl = &txbf_mbox->bfctrl_params;
+
+		bf_ctrl->svd_mode = par0 ? BIT(SVD_MODE_CHANNEL_INV) : 0;
+		bf_ctrl->svd_mode |= par1 ? BIT(SVD_MODE_TWO_STREAM) : 0;
+		bf_ctrl->svd_mode |= par2 ? BIT(SVD_MODE_PER_ANT_SCALE) : 0;
+		bf_ctrl->svd_mode |= par3 ? BIT(SVD_MODE_STREAM_MIXING) : 0;
+		bf_ctrl->svd_mode |= par4 ? BIT(SVD_MODE_BYPASS) : 0;
+		printk("Beamforming svd mode set to 0x%x\n", bf_ctrl->svd_mode);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(0);
+	}
+	printk("Beamforming svd mode not set\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+	return (-1);
+}
+
+int qdrv_txbf_init(struct qdrv_wlan *qw)
+{
+	struct txbf_state *txbf_state;
+	struct int_handler dsp_intr_handler;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if((txbf_state = (struct txbf_state *)
+		kmalloc(sizeof(struct txbf_state), GFP_KERNEL)) == NULL)
+	{
+		DBGPRINTF_E("Unable to allocate memory for TXBF state\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-ENOMEM);
+	}
+
+	/* Clean the state */
+	memset(txbf_state, 0, sizeof(struct txbf_state));
+
+	txbf_state->st_mat_calc_chan_inv = 1;
+	txbf_state->st_mat_calc_two_streams = 1;
+	txbf_state->st_mat_apply_per_ant_scaling = 1;
+	txbf_state->st_mat_apply_stream_mixing = 1;
+	txbf_state->st_mat_reg_scale_fac = 10;
+
+	tasklet_init(&txbf_state->txbf_dsp_mbox_task, qdrv_txbf_mbox_tasklet, (unsigned long)qw);
+
+	/*
+	 * Register the interrupt handler to be called when DSP pushes
+	 * completion message into DSP-to-LHOST mbox
+	 */
+	dsp_intr_handler.handler = qdrv_txbf_mbox_interrupt;
+	dsp_intr_handler.arg1 = NULL;
+	dsp_intr_handler.arg2 = qw;
+	if (qdrv_mac_set_host_dsp_handler(qw->mac, QTN_TXBF_DSP_TO_HOST_MBOX_INT,
+			&dsp_intr_handler) != 0) {
+		/* Handle error case */
+		DBGPRINTF_E("Set handler error\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+		kfree(txbf_state);
+
+		return(-ENODEV);
+	}
+
+	/* Enable the mbx interrupts */
+	qtn_txbf_lhost_irq_enable(qw->mac);
+
+	/* We need a back pointer */
+	txbf_state->owner = (void *)qw;
+
+	/* Attach the state to the wlan once we are done with everything */
+	qw->txbf_state = (void *)txbf_state;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+int qdrv_txbf_exit(struct qdrv_wlan *qw)
+{
+	struct txbf_state *txbf_state = (struct txbf_state *) qw->txbf_state;
+
+	/* Disable the mbox interrupts */
+	qtn_txbf_lhost_irq_disable(qw->mac);
+
+	if (txbf_state)
+		tasklet_kill(&txbf_state->txbf_dsp_mbox_task);
+
+	/* Free the memory for maintaining state */
+	kfree(txbf_state);
+
+	qw->txbf_state = NULL;
+
+	return(0);
+}
+
diff --git a/drivers/qtn/qdrv/qdrv_txbf.h b/drivers/qtn/qdrv/qdrv_txbf.h
new file mode 100644
index 0000000..4c90ae0
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_txbf.h
@@ -0,0 +1,63 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+#ifndef _QDRV_TXBF_H_
+#define _QDRV_TXBF_H_
+
+#include <qtn/txbf_common.h>
+#include <common/queue.h>
+
+/* Number of NDPs that can be in process at a given time
+ * DSP and ARM inclusive
+ */
+#define NUM_TXBF_PKTS (1)
+
+#define TXBF_BUFF_SIZE (sizeof(u32) * 4 * 4 * 64 * 2)
+
+struct txbf_state
+{
+	struct tasklet_struct txbf_dsp_mbox_task;
+	volatile u8 send_txbf_netdebug; 
+	volatile u8 st_mat_calc_chan_inv ; 
+	volatile u8 st_mat_calc_two_streams; 
+	volatile u8 st_mat_apply_per_ant_scaling;
+	volatile u8 st_mat_apply_stream_mixing;
+	volatile s8 st_mat_reg_scale_fac;
+	unsigned stvec_install_success;
+	unsigned stvec_install_fail;	
+	unsigned stvec_overwrite;	
+	unsigned svd_comp_bypass;
+	unsigned stmat_install_bypass;
+	unsigned cmp_act_frms_sent;
+	unsigned uncmp_act_frms_sent;
+	unsigned cmp_act_frms_rxd;
+	unsigned uncmp_act_frms_rxd;
+	unsigned qmat_bandwidth;
+	unsigned qmat_offset;
+	unsigned bf_ver;
+	uint8_t	bf_tone_grp;
+	void *owner;
+};
+
+int qdrv_txbf_init(struct qdrv_wlan *qw);
+int qdrv_txbf_exit(struct qdrv_wlan *qw);
+int qdrv_txbf_config_set(struct qdrv_wlan *qw, u32 value);
+int qdrv_txbf_config_get(struct qdrv_wlan *qw, u32 *value);
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_uc_print.c b/drivers/qtn/qdrv/qdrv_uc_print.c
new file mode 100644
index 0000000..97054fe
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_uc_print.c
@@ -0,0 +1,222 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#include <linux/version.h>
+#include <linux/syscalls.h>
+#include <asm/hardware.h>
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_muc.h"
+#include "qdrv_uc_print.h"
+#include "qdrv_comm.h"
+#include "qdrv_control.h"
+#include <qtn/shared_params.h>
+#include <qtn/shared_print_buf.h>
+#include <qtn/mproc_sync_base.h>
+
+#if AUC_LHOST_PRINT_FORMAT
+#include <auc/auc_print.h>
+#endif
+
+#define	AUC_PREFIX	"AuC"
+#define	MUC_PREFIX	"MuC"
+
+static struct shared_print_consumer muc_printbuf = {0};
+static struct shared_print_consumer auc_printbuf = {0};
+static struct work_struct uc_print_wq;
+static struct qdrv_cb *uc_print_qcb = NULL;
+
+/*
+ * the uC acts as a producer, writing data into the buffer and updating a count of
+ * bytes written. This function acts as a sole consumer, reading data from the
+ * uC's buffer line by line.
+ *
+ * The producer is not aware of the consumer(s), so failure to consume bytes quickly enough
+ * result in lost printouts
+ */
+static void uc_print(struct shared_print_consumer* shared_buf, const char *prefix)
+{
+#define LINE_MAX	128
+	int took_line = 1;
+
+	while(took_line) {
+		u32 chars_to_consume = shared_buf->producer->produced - shared_buf->consumed;
+		u32 i;
+		char stackbuf[LINE_MAX];
+
+		took_line = 0;
+		for (i = 0; i < chars_to_consume && i < LINE_MAX - 1; i++) {
+			char c = shared_buf->buf[(shared_buf->consumed + i) % shared_buf->producer->bufsize];
+			stackbuf[i] = c;
+			if (!c || i == LINE_MAX - 2 || c == '\n') {
+				took_line = 1;
+				if (c == '\n')
+					stackbuf[i] = '\0';
+				stackbuf[i + 1] = '\0';
+				shared_buf->consumed += i + 1;
+				if ((uc_print_qcb != NULL) &&
+					(uc_print_qcb->macs[0].data != NULL)) {
+					qdrv_control_sysmsg_send(uc_print_qcb->macs[0].data,
+								stackbuf, i + 1, 0);
+				}
+				printk(KERN_INFO "%s: %s\n", prefix, stackbuf);
+				break;
+			}
+		}
+	}
+}
+
+static int uc_print_initialise_shared_data(const char *uc_name, uint32_t addr,
+						struct shared_print_consumer *printbuf)
+{
+	struct shared_print_producer *prod;
+
+	if (!addr) {
+		return 0;
+	}
+
+	prod = ioremap_nocache(muc_to_lhost(addr), sizeof(*prod));
+	if (!prod) {
+		panic("%s to lhost printbuf could not translate metadata\n", uc_name);
+	}
+
+	printbuf->producer = prod;
+	printbuf->buf = ioremap_nocache(muc_to_lhost((u32)prod->buf), prod->bufsize);
+	if (!printbuf->buf) {
+		panic("%s to lhost printbuf could not translate char buffer %p : %u\n",
+			uc_name, prod->buf, prod->bufsize);
+	}
+
+	return 1;
+}
+
+static int muc_print_initialise_shared_data(void)
+{
+	return uc_print_initialise_shared_data(MUC_PREFIX,
+		qtn_mproc_sync_shared_params_get()->m2l_printbuf_producer,
+		&muc_printbuf);
+}
+
+static int auc_print_initialise_shared_data(void)
+{
+	return uc_print_initialise_shared_data(AUC_PREFIX,
+		qtn_mproc_sync_shared_params_get()->auc.a2l_printbuf_producer,
+		&auc_printbuf);
+}
+
+void uc_print_schedule_work(void)
+{
+	schedule_work(&uc_print_wq);
+}
+
+static void muc_print_irq_handler(void *arg1, void *arg2)
+{
+	uc_print_schedule_work();
+}
+
+static void uc_print_work(struct work_struct *work)
+{
+	if (muc_printbuf.producer || muc_print_initialise_shared_data()) {
+		uc_print(&muc_printbuf, MUC_PREFIX);
+	}
+	if (auc_printbuf.producer || auc_print_initialise_shared_data()) {
+#if AUC_LHOST_PRINT_FORMAT
+		if (uc_print_auc_cb)
+			uc_print_auc_cb(&auc_printbuf);
+#else
+			uc_print(&auc_printbuf, AUC_PREFIX);
+#endif
+	}
+}
+
+static int qdrv_uc_set_irq_handler(struct qdrv_cb *qcb, void(*handler)(void*, void*), uint32_t num)
+{
+	struct int_handler int_handler;
+
+	int_handler.handler = handler;
+	int_handler.arg1 = qcb;
+	int_handler.arg2 = NULL;
+
+	if(qdrv_mac_set_handler(&qcb->macs[0], num, &int_handler) != 0) {
+		DBGPRINTF_E( "Set handler failed\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	qdrv_mac_enable_irq(&qcb->macs[0], num);
+
+	return 0;
+}
+
+int qdrv_uc_print_init(struct qdrv_cb *qcb)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	uc_print_qcb = qcb;
+
+	INIT_WORK(&uc_print_wq, uc_print_work);
+
+	if (qdrv_uc_set_irq_handler(qcb, muc_print_irq_handler, RUBY_M2L_IRQ_LO_PRINT) != 0) {
+		return -1;
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+int qdrv_uc_print_exit(struct qdrv_cb *qcb)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	uc_print_qcb = NULL;
+
+	qdrv_mac_disable_irq(&qcb->macs[0], RUBY_M2L_IRQ_LO_PRINT);
+
+	/* TODO: AuC need to use own IRQ handler, similar to RUBY_M2L_IRQ_LO_PRINT. Need  to unregister it here. */
+
+	flush_scheduled_work();
+
+	if (muc_printbuf.buf) {
+		iounmap(muc_printbuf.buf);
+		muc_printbuf.buf = NULL;
+	}
+
+	if (muc_printbuf.producer) {
+		iounmap((void *)muc_printbuf.producer);
+		muc_printbuf.producer = NULL;
+	}
+
+	if (auc_printbuf.buf) {
+		iounmap(auc_printbuf.buf);
+		auc_printbuf.buf = NULL;
+	}
+
+	if (auc_printbuf.producer) {
+		iounmap((void *)auc_printbuf.producer);
+		auc_printbuf.producer = NULL;
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
diff --git a/drivers/qtn/qdrv/qdrv_uc_print.h b/drivers/qtn/qdrv/qdrv_uc_print.h
new file mode 100644
index 0000000..6015f00
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_uc_print.h
@@ -0,0 +1,31 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef __QDRV_UC_PRINT_H
+#define __QDRV_UC_PRINT_H
+
+#include "qdrv_soc.h"
+
+void uc_print_schedule_work(void);
+int qdrv_uc_print_init(struct qdrv_cb *qcb);
+int qdrv_uc_print_exit(struct qdrv_cb *qcb);
+
+#endif // __QDRV_UC_PRINT_H
+
diff --git a/drivers/qtn/qdrv/qdrv_vap.c b/drivers/qtn/qdrv/qdrv_vap.c
new file mode 100644
index 0000000..76add03a
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_vap.c
@@ -0,0 +1,778 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/rtnetlink.h>
+#include <asm/hardware.h>
+#include <asm/board/board_config.h>
+
+#include <qtn/qdrv_sch.h>
+#include <qtn/topaz_tqe_cpuif.h>
+#include <qtn/topaz_fwt_db.h>
+
+#ifdef CONFIG_QVSP
+#include "qtn/qvsp.h"
+#endif
+
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_muc.h"
+#include "qdrv_hal.h"
+#include "qdrv_comm.h"
+#include "qdrv_vap.h"
+#include "qdrv_wlan.h"
+
+extern void indicate_association(void);
+extern void indicate_disassociation(void);
+extern unsigned int g_led_assoc_indicate;
+
+static bool wps_button_not_initd = 1;
+static bool igmp_query_timer_not_initd = 1;
+// TBD - Need to move this to a more suitable place
+static int vnet_init(struct net_device *dev)
+{
+	struct qdrv_vap *qv = netdev_priv(dev);
+	struct qdrv_wlan *qw = qv->parent;
+	struct host_ioctl *ioctl;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ioctl = vnet_alloc_ioctl(qv);
+	if (!ioctl) {
+		DBGPRINTF_E("Failed to allocate message\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_DEVOPEN;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_arg2 = 0;
+	ioctl->ioctl_argp = (u32) NULL;
+	ioctl->ioctl_next = 0;
+	ioctl->ioctl_status = 0;
+
+	vnet_send_ioctl(qv, ioctl);
+
+	napi_enable(&qv->napi);
+	napi_schedule(&qv->napi);
+
+	netif_start_queue(dev);
+
+	/* Open the device for the 802.11 layer */
+	ieee80211_open(dev);
+
+	/* Set up the WPS button IRQ handler */
+	// TBD - Need to move this to a more suitable place
+	if (wps_button_not_initd) {
+		wps_button_not_initd = 0;
+		qdrv_wps_button_init(dev);
+	}
+
+	if (igmp_query_timer_not_initd &&
+			qv->iv.iv_opmode == IEEE80211_M_HOSTAP) {
+		qdrv_wlan_igmp_query_timer_start(qw);
+		igmp_query_timer_not_initd = 0;
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
+static int vnet_stop(struct net_device *dev)
+{
+	struct qdrv_vap *qv = netdev_priv(dev);
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* close the device for the 802.11 layer */
+	ieee80211_stop(dev);
+
+	napi_disable(&qv->napi);
+	netif_stop_queue(dev);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+static __sram_data struct net_device_ops vnet_device_ops;
+
+static void vnet_start(struct net_device * dev)
+{
+	ether_setup(dev);
+
+	dev->netdev_ops = &vnet_device_ops;
+	dev->tx_queue_len = 500;
+}
+
+static int qdrv_vap_80211_newstate_callback(struct ieee80211vap *vap,
+	enum ieee80211_state nstate, int arg)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)qv->parent;
+	int error;
+	int stamode;
+	struct ieee80211com *ic = vap->iv_ic;
+	enum ieee80211_state ostate;
+
+	ostate = vap->iv_state;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter, caller %p\n", __builtin_return_address(0));
+
+	if(vap->iv_state != nstate)
+	{
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_VAP,
+				"New state for \"%s\" %s -> %s\n",
+				vap->iv_dev->name, ieee80211_state_name[vap->iv_state],
+				ieee80211_state_name[nstate]);
+	}
+
+	stamode = (vap->iv_opmode == IEEE80211_M_STA ||
+				vap->iv_opmode == IEEE80211_M_IBSS ||
+				vap->iv_opmode == IEEE80211_M_AHDEMO);
+
+	switch (nstate) {
+	case IEEE80211_S_RUN:
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+				"IEEE80211_S_RUN\n");
+		switch (vap->iv_opmode) {
+
+		case IEEE80211_M_HOSTAP:
+			DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+					"IEEE80211_M_HOSTAP - Send Beacon\n");
+			ic->ic_beacon_update(vap);
+
+			break;
+		case IEEE80211_M_STA:
+		case IEEE80211_M_IBSS:
+		case IEEE80211_M_AHDEMO:
+			DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+				"IEEE80211_M_STA\n");
+			indicate_association();
+			ic->ic_join_bss(vap);
+			SMSTAT(qw, sm_sta_associated);
+			break;
+		default:
+			break;
+		}
+		break;
+	case IEEE80211_S_INIT://    = 0,    /* default state */
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+			"IEEE80211_S_INIT\n");
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			ic->ic_beacon_stop(vap);
+		} else if (vap->iv_opmode == IEEE80211_M_STA) {
+			/* Pend disassociation with AP before TDLS link return to base channel */
+			if (!ieee80211_tdls_return_to_base_channel(vap, 1))
+				return 0;
+		}
+		indicate_disassociation();
+		break;
+	case IEEE80211_S_SCAN://    = 1,    /* scanning */
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+				"IEEE80211_S_SCAN\n");
+		indicate_disassociation();
+		if (arg == IEEE80211_SCAN_FAIL_TIMEOUT) {
+			if (ostate == IEEE80211_S_AUTH) {
+				SMSTAT(qw, sm_scan_auth_fail_scan_pend);
+			} else if (ostate == IEEE80211_S_ASSOC) {
+				SMSTAT(qw, sm_scan_assoc_fail_scan_pend);
+			}
+		}
+		else if (arg == 0) {
+			SMSTAT(qw, sm_scan_pend);
+		}
+		break;
+
+	case IEEE80211_S_AUTH://    = 2,    /* try to authenticate */
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+				"IEEE80211_S_AUTH\n");
+		indicate_disassociation();
+		if (stamode) {
+			if (ostate == IEEE80211_S_SCAN) {
+				SMSTAT(qw, sm_auth_pend);
+			} else {
+				SMSTAT(qw, sm_run_deauth_auth_pend);
+			}
+		}
+		break;
+	case IEEE80211_S_ASSOC://   = 3,    /* try to assoc */
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+				"IEEE80211_S_ASSOC\n");
+		indicate_disassociation();
+		if (stamode) {
+			if (ostate == IEEE80211_S_AUTH) {
+				SMSTAT(qw, sm_assoc_pend);
+			} else {
+				SMSTAT(qw, sm_run_disassoc_assoc_pend);
+			}
+		}
+		break;
+	default:
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+				"<unknown state %d>\n", nstate);
+		indicate_disassociation();
+		break;
+	}
+
+	/* Pend disassociation before tearing down all of TDLS links */
+	if (ieee80211_tdls_pend_disassociation(vap, nstate, arg))
+		return 0;
+
+	/* Invoke the parent method to complete the work.*/
+	error = (*qv->qv_newstate)(vap, nstate, arg);
+
+	return error;
+}
+
+int qdrv_vap_wds_mode(struct qdrv_vap *qv)
+{
+	return(qv->iv.iv_flags_ext & IEEE80211_FEXT_WDS ? 1 : 0);
+}
+
+#ifdef CONFIG_QVSP
+static int qdrv_qvsp_ioctl(void *qw_, uint32_t param, uint32_t value)
+{
+#if TOPAZ_QTM
+	struct qdrv_wlan *qw = qw_;
+
+	switch (param) {
+	case QVSP_CFG_FAT_MIN_CHECK_INTV:
+		qw->vsp_check_intvl = value / MSEC_PER_SEC;
+		break;
+	case QVSP_CFG_ENABLED:
+		if (value) {
+			/* Give stats part 2 check interval to warm up */
+			qw->vsp_enabling = 2;
+		}
+		break;
+	default:
+		break;
+	}
+#endif
+
+	return qdrv_hostlink_qvsp(qw_, param, value);
+}
+#endif
+
+static void qdrv_vap_set_last(struct qdrv_mac *mac)
+{
+	int i = QDRV_MAX_VAPS;
+
+	while (i--) {
+		if (mac->vnet[i]) {
+			break;
+		}
+	}
+
+	mac->vnet_last = i;
+}
+
+int qdrv_vap_vlan2index_sync(struct qdrv_vap *qv, uint16_t mode, uint16_t vid)
+{
+	uint8_t vap_id = qv->qv_vap_idx;
+	uint8_t last = 0;
+
+	if (mode == QVLAN_MODE_DYNAMIC)
+		return 0;
+
+	if (mode != QVLAN_MODE_ACCESS)
+		vid = VLANID_INDEX_INITVAL;
+
+	qdrv_sch_vlan2index[vap_id] = vid;
+
+	/* find the last vap bound to a vlan */
+	for (vap_id = 0; vap_id < ARRAY_SIZE(qdrv_sch_vlan2index); vap_id++) {
+		if (qdrv_sch_vlan2index[vap_id] != VLANID_INDEX_INITVAL) {
+			last = vap_id + 1;
+		}
+	}
+	qdrv_vap_vlan_max = last;
+
+#if !defined(CONFIG_TOPAZ_PCIE_HOST) && !defined(CONFIG_TOPAZ_PCIE_TARGET)
+	qdrv_sch_set_vlanpath();
+#endif
+	return 0;
+}
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+int qdrv_get_active_sub_port(const struct net_bridge_port *p,
+		uint32_t *sub_port_bitmap, int size)
+{
+	struct topaz_fwt_sw_mcast_entry *mcast_entry;
+	int i;
+	struct ieee80211vap *vap;
+	struct ieee80211com *ic;
+	struct ieee80211_node *ni;
+	int skip;
+
+	if (size != sizeof(mcast_entry->node_bitmap)) {
+		DBGPRINTF_LIMIT_E("bitmap length is invalid - %d/%d\n",
+			size, sizeof(mcast_entry->node_bitmap));
+		return 0;
+	}
+
+	/* Use the ipff entry, which has the bit set for each active node */
+	mcast_entry = fwt_db_get_sw_mcast_ff();
+	memcpy(sub_port_bitmap, mcast_entry->node_bitmap, size);
+
+	vap = (struct ieee80211vap *)netdev_priv(p->dev);
+	/* something unexpected */
+	if (vap->iv_dev != p->dev) {
+		DBGPRINTF_LIMIT_E("%s: net_device mismatch - %p/%p\n", __FUNCTION__,
+			vap->iv_dev, p->dev);
+		return 0;
+	}
+
+	ic = vap->iv_ic;
+
+	/* some nodes(sub ports) in 'sub_port_bitmap' may not belong to 'vap' */
+	for (i = 0; i < QTN_NCIDX_MAX; i++) {
+		if ((sub_port_bitmap[BR_SUBPORT_IDX(i)] & BR_SUBPORT_BITMAP(i)) == 0)
+			continue;
+
+		ni = ieee80211_find_node_by_idx(ic, vap, i);
+		if (ni == NULL) {
+			/* node not found or doesn't belong to 'vap' */
+			skip = 1;
+		} else if (ni->ni_node_idx == 0) {
+			/* race condition  */
+			skip = 1;
+		} else {
+			skip = 0;
+		}
+
+		if (ni != NULL)
+			ieee80211_free_node(ni);
+
+		if (skip)
+			sub_port_bitmap[BR_SUBPORT_IDX(i)] &= ~BR_SUBPORT_BITMAP(i);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mcast_entry->node_bitmap); i++) {
+		if (sub_port_bitmap[i])
+			return 1;
+	}
+
+	return 0;
+}
+
+int qdrv_check_active_sub_port(const struct net_bridge_port *p,
+		const uint32_t sub_port)
+{
+	struct net_device *dev;
+	struct ieee80211vap *vap;
+	struct ieee80211com *ic;
+	struct ieee80211_node_table *nt;
+	struct ieee80211_node *ni;
+
+	dev = p->dev;
+	vap = netdev_priv(dev);
+
+	if (vap->iv_dev != dev)
+		return -1;
+
+	BUG_ON(QTN_NCIDX_MAX < IEEE80211_NODE_IDX_UNMAP(sub_port));
+
+	ic = vap->iv_ic;
+	nt = &ic->ic_sta;
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	ni = ic->ic_node_idx_ni[IEEE80211_NODE_IDX_UNMAP(sub_port)];
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+
+	return (ni && ieee80211_node_is_running(ni));
+}
+#endif
+
+int qdrv_vap_init(struct qdrv_mac *mac, struct host_ioctl_hifinfo *hifinfo,
+	u32 arg1, u32 arg2)
+{
+	struct qdrv_vap *qv;
+	struct net_device *vdev;
+	int opmode = 0;
+	int vap_idx = 0;
+	int i;
+	struct ieee80211com *ic = NULL;
+	struct ieee80211vap *vap = NULL;
+	struct qdrv_wlan *qw;
+	int stamode;
+	int repeater_mode;
+	unsigned int qv_devid = arg1 & IOCTL_DEVATTACH_DEVID_MASK;
+	unsigned int dev_devid = QDRV_WLANID_FROM_DEVID(qv_devid);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+			"name         : %s\n", hifinfo->hi_name);
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+			"semmap       : %d\n", hifinfo->hi_semmap[0]);
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+			"mbox         : 0x%08x\n", hifinfo->hi_mboxstart);
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+			"rxfifo       : 0x%08x\n", hifinfo->hi_rxfifo);
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+			"scanirq      : 0x%08x\n", hifinfo->hi_scanirq);
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+			"scanfifo     : 0x%08x\n", hifinfo->hi_scanfifo);
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+			"qv_devid     : 0x%08x\n", qv_devid);
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+			"dev_devid    : 0x%08x\n", dev_devid);
+
+	/* Search for an empty VAP slot */
+	for (i = 0; i < QDRV_MAX_VAPS; i++) {
+		if (mac->vnet[i] == NULL) {
+			/* Found one */
+			vap_idx = i;
+			break;
+		}
+	}
+
+	/* Check if we found one */
+	if (i == QDRV_MAX_VAPS) {
+		DBGPRINTF_E("No empty VAP slot available for \"%s\"\n",
+			hifinfo->hi_name);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+	vdev = dev_get_by_name(&init_net, hifinfo->hi_name);
+	if (vdev != NULL) {
+		DBGPRINTF_E("The device name \"%s\" already exists\n",
+			hifinfo->hi_name);
+		dev_put(vdev);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+	/* Allocate our device */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	vdev = alloc_netdev(sizeof(struct qdrv_vap), hifinfo->hi_name, NET_NAME_UNKNOWN, vnet_start);
+#else
+	vdev = alloc_netdev(sizeof(struct qdrv_vap), hifinfo->hi_name, vnet_start);
+#endif
+	if (vdev == NULL) {
+		DBGPRINTF_E("Unable to allocate device \"%s\"\n",
+			hifinfo->hi_name);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return(-1);
+	}
+
+	opmode = (arg1  >> 24) & 0xF;
+
+	qv = netdev_priv(vdev);
+	memset(qv, 0, sizeof(struct qdrv_vap));
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+			"qv 0x%08x iv 0x%08x dev 0x%08x\n",
+			(unsigned int) qv, (unsigned int) &qv->iv, (unsigned int) vdev);
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+			"vap_idx %d opmode %d\n", vap_idx, opmode);
+
+	qv->ndev = vdev;
+	qv->parent = mac->data;
+	qw = (struct qdrv_wlan *)qv->parent;
+	qv->devid = qv_devid;
+	qv->qv_vap_idx = hifinfo->hi_vapid;
+	if (qv->qv_vap_idx >= QTN_MAX_BSS_VAPS)
+		panic("vapid: %u is out of range!\n", qv->qv_vap_idx);
+	vdev->dev_id = dev_devid;
+
+	vdev->qtn_flags |= QTN_FLAG_WIFI_DEVICE;
+	TAILQ_INIT(&qv->allnodes);
+
+	vdev->if_port = TOPAZ_TQE_WMAC_PORT;
+
+	netif_napi_add(vdev, &qv->napi, qdrv_rx_poll, board_napi_budget());
+
+	memcpy(vdev->dev_addr, hifinfo->hi_macaddr, IEEE80211_ADDR_LEN);
+
+	spin_lock_init(&qv->lock);
+	spin_lock_init(&qv->bc_lock);
+	spin_lock_init(&qv->ni_lst_lock);
+
+	/* Initiate a VAP setup */
+	if (ieee80211_vap_setup(&((struct qdrv_wlan *) mac->data)->ic, vdev,
+		hifinfo->hi_name, qv->devid, opmode, IEEE80211_NO_STABEACONS) < 0) {
+		DBGPRINTF_E("The 802.11 layer failed to setup the VAP\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	/* Replace newstate function with our own */
+	qv->qv_newstate = qv->iv.iv_newstate;
+	qv->iv.iv_newstate = qdrv_vap_80211_newstate_callback;
+
+	/* Take the RTNL lock since register_netdevice() is used instead of */
+	/* register_netdev() in ieee80211_vap_attach()                      */
+	rtnl_lock();
+
+	/* Complete the VAP setup */
+	if (ieee80211_vap_attach(&qv->iv,
+		ieee80211_media_change, ieee80211_media_status) < 0) {
+		DBGPRINTF_E("The 802.11 layer failed to attach the VAP\n");
+		rtnl_unlock();
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	ic = &((struct qdrv_wlan *) mac->data)->ic;
+	vap = &qv->iv;
+
+	vap->iv_sta_assoc_limit = QTN_ASSOC_LIMIT;
+	vap->iv_ssid_group = 0;
+
+	stamode = (vap->iv_opmode == IEEE80211_M_STA ||
+				vap->iv_opmode == IEEE80211_M_IBSS ||
+				vap->iv_opmode == IEEE80211_M_AHDEMO);
+
+	repeater_mode = (ic->ic_flags_ext & IEEE80211_FEXT_REPEATER);
+
+	if (stamode || repeater_mode) {
+		ic->ic_mindwell_active = QDRV_WLAN_STA_MIN_DWELLTIME_ACTIVE;
+		ic->ic_mindwell_passive = QDRV_WLAN_STA_MIN_DWELLTIME_PASSIVE;
+		ic->ic_maxdwell_active = QDRV_WLAN_STA_MAX_DWELLTIME_ACTIVE;
+		ic->ic_maxdwell_passive = QDRV_WLAN_STA_MAX_DWELLTIME_PASSIVE;
+		ic->ic_opmode = IEEE80211_M_STA;
+		QDRV_SET_SM_FLAG(qw->sm_stats, QDRV_WLAN_SM_STATE_STA);
+
+		if (repeater_mode)
+			ic->ic_roaming = IEEE80211_ROAMING_AUTO;
+	} else {
+		ic->ic_mindwell_active = QDRV_WLAN_AP_MIN_DWELLTIME_ACTIVE;
+		ic->ic_mindwell_passive = QDRV_WLAN_AP_MIN_DWELLTIME_PASSIVE;
+		ic->ic_maxdwell_active = QDRV_WLAN_AP_MAX_DWELLTIME_ACTIVE;
+		ic->ic_maxdwell_passive = QDRV_WLAN_AP_MAX_DWELLTIME_PASSIVE;
+		ic->ic_opmode = IEEE80211_M_HOSTAP;
+		QDRV_SET_SM_FLAG(qw->sm_stats, QDRV_WLAN_SM_STATE_AP);
+
+		/* Force auto roaming for AP in case it was set to manual when in STA mode */
+		ic->ic_roaming = IEEE80211_ROAMING_AUTO;
+	}
+
+#ifdef QTN_BG_SCAN
+	ic->ic_qtn_bgscan.dwell_msecs_active = QDRV_WLAN_QTN_BGSCAN_DWELLTIME_ACTIVE;
+	ic->ic_qtn_bgscan.dwell_msecs_passive = QDRV_WLAN_QTN_BGSCAN_DWELLTIME_PASSIVE;
+	ic->ic_qtn_bgscan.duration_msecs_active = QDRV_WLAN_QTN_BGSCAN_DURATION_ACTIVE;
+	ic->ic_qtn_bgscan.duration_msecs_passive_fast = QDRV_WLAN_QTN_BGSCAN_DURATION_PASSIVE_FAST;
+	ic->ic_qtn_bgscan.duration_msecs_passive_normal = QDRV_WLAN_QTN_BGSCAN_DURATION_PASSIVE_NORMAL;
+	ic->ic_qtn_bgscan.duration_msecs_passive_slow = QDRV_WLAN_QTN_BGSCAN_DURATION_PASSIVE_SLOW;
+	ic->ic_qtn_bgscan.thrshld_fat_passive_fast = QDRV_WLAN_QTN_BGSCAN_THRESHLD_PASSIVE_FAST;
+	ic->ic_qtn_bgscan.thrshld_fat_passive_normal = QDRV_WLAN_QTN_BGSCAN_THRESHLD_PASSIVE_NORMAL;
+	ic->ic_qtn_bgscan.debug_flags = 0;
+#endif /* QTN_BG_SCAN */
+
+	rtnl_unlock();
+
+	/* vlan configuration structure associated with the device */
+	if (switch_alloc_vlan_dev(TOPAZ_TQE_WMAC_PORT, dev_devid, vdev->ifindex) == NULL) {
+		DBGPRINTF_E("failed to bind vlan dev to VAP\n");
+		ieee80211_vap_detach(&qv->iv);
+		return -1;
+	}
+
+	/* Set some debug stuff */
+	qv->iv.iv_debug |= IEEE80211_MSG_DEBUG |
+				IEEE80211_MSG_INPUT |
+				IEEE80211_MSG_ASSOC |
+				IEEE80211_MSG_AUTH |
+				IEEE80211_MSG_OUTPUT;
+
+	/* Disable some ... */
+	qv->iv.iv_debug &= ~IEEE80211_MSG_DEBUG;
+	qv->iv.iv_debug  = 0;
+	((struct net_device_ops *)(vdev->netdev_ops))->ndo_start_xmit = qdrv_tx_hardstart;
+	((struct net_device_ops *)(vdev->netdev_ops))->ndo_open = vnet_init;
+	((struct net_device_ops *)(vdev->netdev_ops))->ndo_stop = vnet_stop;
+
+	TAILQ_INIT(&qv->ni_lncb_lst);
+
+#ifdef CONFIG_QVSP
+	if (qw->qvsp == NULL) {
+		qw->qvsp = qvsp_init(&qdrv_qvsp_ioctl, qw, vdev, stamode,
+			ic->ic_vsp_cb_cfg, ic->ic_vsp_cb_strm_ctrl, ic->ic_vsp_cb_strm_ext_throttler,
+			sizeof(struct ieee80211_node), sizeof(struct ieee80211vap));
+		if (qw->qvsp && qdrv_wlan_vsp_3rdpt_init(qw)) {
+			printk("Could not initialize VSP 3rd party client control\n");
+		}
+	} else if (vap->iv_opmode == IEEE80211_M_WDS) {
+		qvsp_inactive_flag_set(qw->qvsp, QVSP_INACTIVE_WDS);
+	}
+	if (repeater_mode && vap->iv_opmode == IEEE80211_M_HOSTAP)
+		ic->ic_vsp_change_stamode(ic, 0);
+#endif
+
+	qv->iv.iv_vapnode_idx = IEEE80211_NODE_IDX_MAP(hifinfo->hi_vapnode_idx);
+	qdrv_tx_sch_attach(qv);
+	if (ic->ic_rf_chipid == CHIPID_DUAL && vap->iv_opmode != IEEE80211_M_WDS) {
+		/* Disable one-bit dynamic auto-correlation on RFIC5 */
+		ic->ic_setparam(vap->iv_bss, IEEE80211_PARAM_DYNAMIC_AC, 0, NULL, 0);
+	}
+	/* initial bss node will be created before the qdisc; reinitialize */
+	if (vap->iv_bss) {
+		qdrv_tx_sch_node_data_init(qdrv_tx_sch_vap_get_qdisc(vdev),
+				qw->tx_sch_shared_data, &vap->iv_bss->ni_tx_sch, 1);
+	}
+
+	/*
+	 * Finally, set vnet pointer. Needs to be done after all init is
+	 * complete, or there will be synchronization problem with
+	 * qdrv_tx_wake_queue or others.
+	 */
+	mac->vnet[vap_idx] = vdev;
+	qdrv_vap_set_last(mac);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+/* Any resource allocated to VAP cleanup here */
+void qdrv_vap_resource_cleanup(struct qdrv_vap *qv)
+{
+	if (qv->bc_skb != NULL) {
+		dev_kfree_skb_any(qv->bc_skb);
+		qv->bc_skb = NULL;
+		}
+}
+
+static int qdrv_get_hostap_count(struct ieee80211com *ic, struct ieee80211vap *vap)
+{
+	int count = 0;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+			count++;
+	}
+
+	return count;
+}
+
+int qdrv_vap_exit(struct qdrv_mac *mac, struct net_device *vdev)
+{
+	struct qdrv_vap *qv = netdev_priv(vdev);
+#ifdef CONFIG_QVSP
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)qv->parent;
+	struct ieee80211com *ic = qv->iv.iv_ic;
+	struct ieee80211vap *vap = &qv->iv;
+	int repeater_mode = ic->ic_flags_ext & IEEE80211_FEXT_REPEATER;
+
+	if (repeater_mode &&
+			vap->iv_opmode == IEEE80211_M_HOSTAP &&
+			qdrv_get_hostap_count(ic, vap) == 1)
+		ic->ic_vsp_change_stamode(ic, 1);
+
+	qvsp_exit(&qw->qvsp, vdev);
+
+	if (qw->qvsp == NULL) {
+		qdrv_wlan_vsp_3rdpt_exit(qw);
+	}
+#endif
+
+	rtnl_lock();
+	ieee80211_vap_detach(&qv->iv);
+	rtnl_unlock();
+
+#ifdef CONFIG_QVSP
+	if (qw->qvsp && (qdrv_wlan_query_wds(ic) == 0) ) {
+		qvsp_inactive_flag_clear(qw->qvsp, QVSP_INACTIVE_WDS);
+	}
+#endif
+
+	return 0;
+}
+
+int qdrv_vap_exit_muc_done(struct qdrv_mac *mac, struct net_device *vdev)
+{
+	struct qdrv_vap *qv = netdev_priv(vdev);
+	int vnet_found = 0;
+	int i;
+
+	for (i = 0; i < QDRV_MAX_VAPS; i++) {
+		if (mac->vnet[i] == vdev) {
+			mac->vnet[i] = NULL;
+			qdrv_vap_set_last(mac);
+			vnet_found = 1;
+		}
+	}
+
+	if (!vnet_found) {
+		DBGPRINTF_E("vap %s not found in mac\n", vdev->name);
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENODEV;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VAP,
+			"Delete VAP qv 0x%p \"%s\" (%d)\n", qv, vdev->name, i);
+
+	if (mac->vnet[0] == NULL && igmp_query_timer_not_initd == 0) {
+		struct qdrv_wlan *qw = (struct qdrv_wlan *)qv->parent;
+		qdrv_wlan_igmp_timer_stop(qw);
+		igmp_query_timer_not_initd = 1;
+	}
+
+	if (mac->vnet[0] == NULL && wps_button_not_initd == 0) {
+		qdrv_wps_button_exit();
+		wps_button_not_initd = 1;
+	}
+
+	qdrv_vap_resource_cleanup(qv);
+
+	qdrv_tx_done_flush_vap(qv);
+
+	/* release switch vlan */
+	switch_free_vlan_dev_by_idx(vdev->dev_id);
+
+	/* Destroy it ... */
+	rtnl_lock();
+	ieee80211_vap_detach_late(&qv->iv);
+	rtnl_unlock();
+
+	return 0;
+}
+
+int qdrv_exit_all_vaps(struct qdrv_mac *mac)
+{
+	int i;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	for (i = 0; i < QDRV_MAX_VAPS; i++) {
+		if (mac->vnet[i]) {
+			qdrv_vap_exit(mac, mac->vnet[i]);
+		}
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return(0);
+}
+
diff --git a/drivers/qtn/qdrv/qdrv_vap.h b/drivers/qtn/qdrv/qdrv_vap.h
new file mode 100644
index 0000000..12a05bc
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_vap.h
@@ -0,0 +1,154 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_VAP_H
+#define _QDRV_VAP_H
+
+/* Include the WLAN 802.11 layer here */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net80211/if_media.h>
+#include <net80211/ieee80211_var.h>
+#include "qdrv_mac.h"
+#include <qtn/qtn_vlan.h>
+/*
+ * Default dwell times for scanning channels.
+ * These are the original values from ieee80211_scan_sta.c
+ * and ieee80211_scan_ap.c
+ * Units are milliseconds.
+ *
+ * Minimum dwell time (MIN_DWELLTIME) must be larger than 1/2 of
+ * the maximum dwell time (MAX_DWELLTIME) to allow the delayed
+ * probe request timer to work correctly.
+ * This is necessary for active scans as the probe requests are
+ * sent out at maxdwell/2 and we need to allow time for the probe
+ * requests to be sent and the response to come back before mindwell.
+ *
+ * Note that on the STA passive channels have a larger min and max
+ * dwell to increase the probability that we'll 'hear' a beacon on
+ * passive channel.
+ */
+#define QDRV_WLAN_STA_MIN_DWELLTIME_ACTIVE	100
+#define QDRV_WLAN_STA_MIN_DWELLTIME_PASSIVE	450
+#define QDRV_WLAN_STA_MAX_DWELLTIME_ACTIVE	150
+#define QDRV_WLAN_STA_MAX_DWELLTIME_PASSIVE	600
+
+#define QDRV_WLAN_AP_MIN_DWELLTIME_ACTIVE	200
+#define QDRV_WLAN_AP_MIN_DWELLTIME_PASSIVE	200
+#define QDRV_WLAN_AP_MAX_DWELLTIME_ACTIVE	300
+#define QDRV_WLAN_AP_MAX_DWELLTIME_PASSIVE	300
+
+#define QDRV_WLAN_QTN_BGSCAN_DWELLTIME_ACTIVE	20 /* milliseconds */
+#define QDRV_WLAN_QTN_BGSCAN_DWELLTIME_PASSIVE	20 /* milliseconds */
+
+#define QDRV_WLAN_QTN_BGSCAN_DURATION_ACTIVE		200 /* milliseconds */
+#define QDRV_WLAN_QTN_BGSCAN_DURATION_PASSIVE_FAST	450 /* milliseconds */
+#define QDRV_WLAN_QTN_BGSCAN_DURATION_PASSIVE_NORMAL	750 /* milliseconds */
+#define QDRV_WLAN_QTN_BGSCAN_DURATION_PASSIVE_SLOW	1400 /* milliseconds */
+
+#define QDRV_WLAN_QTN_BGSCAN_THRESHLD_PASSIVE_FAST	600 /* FAT 600/1000 */
+#define QDRV_WLAN_QTN_BGSCAN_THRESHLD_PASSIVE_NORMAL	300 /* FAT 300/1000 */
+
+/**
+ * Structure to contain a mapping of DMA mapped addresses to host addresses.
+ * This structure lives as a ring, same size as the vap IOCTL ring, with a 
+ * 1-1 mapping of IOCTL ring entry to dma allocation entry.
+ *
+ * When an ioctl ptr is passed back via vnet_alloc_ioctl, the corresponding
+ * entry in the dma ring will be filled in based on the input parameters to
+ * the alloc call.
+ *
+ * When the ioctl call completes (interrupt from the MuC), the corresponding
+ * dma ring pointer will be freed (if non-NULL), ensuring we don't leak the
+ * dma buffers.
+ *
+ * Synchronisation between the send of the IOCTL and the IOCTL complete
+ * should be done for all structures involved or we could get into a bad situation.
+ */
+struct dma_allocation
+{
+	void *p_host_addr;
+	u_int32_t dma_addr;
+	size_t size;
+};
+
+struct qdrv_vap_tx_stats
+{
+	unsigned int tx_;
+};
+
+struct qdrv_vap_rx_stats
+{
+	unsigned int rx_;
+};
+
+struct qdrv_vap
+{
+	struct ieee80211vap iv;	/* Must be first for 802.11 layer */
+	struct net_device *ndev;
+	uint32_t muc_queued;
+	TAILQ_HEAD(, qdrv_node) allnodes;
+	void *parent;
+	spinlock_t bc_lock;
+	struct sk_buff *bc_skb;
+	unsigned int devid;
+
+	/* Synchronisation */
+	spinlock_t lock;
+
+	struct net_device_stats stats;
+	struct qdrv_vap_tx_stats tx_stats;
+	struct qdrv_vap_rx_stats rx_stats;
+
+	/* 802.11 layer callbacks and interface */
+	int (*qv_newstate)(struct ieee80211vap *, enum ieee80211_state, int);
+	struct ieee80211_beacon_offsets qv_boff; /* dynamic update state */
+	struct napi_struct napi;
+
+	TAILQ_HEAD(, ieee80211_node) ni_lncb_lst;	/* STAs supporting 4-addr LNCB reception */
+	int ni_lncb_cnt;				/* Total entries in LNCB list */
+	TAILQ_HEAD(, ieee80211_node) ni_bridge_lst;	/* Associated bridge stations */
+	int ni_bridge_cnt;				/* Total entries in bridge STA list */
+	spinlock_t ni_lst_lock;				/* Lock for the above fields */
+	int iv_3addr_count;
+	uint8_t         qv_vap_idx;
+	uint32_t	qv_bmps_mode;
+};
+
+int qdrv_vap_init(struct qdrv_mac *mac, struct host_ioctl_hifinfo *hifinfo,
+	u32 arg1, u32 arg2);
+int qdrv_vap_exit(struct qdrv_mac *mac, struct net_device *vnet);
+int qdrv_vap_exit_muc_done(struct qdrv_mac *mac, struct net_device *vnet);
+int qdrv_exit_all_vaps(struct qdrv_mac *mac);
+int qdrv_vap_wds_mode(struct qdrv_vap *qv);
+char *qdrv_vap_wds_peer(struct qdrv_vap *qv);
+int qdrv_vap_vlan2index_sync(struct qdrv_vap *qv, uint16_t command, uint16_t vlanid);
+struct host_ioctl *vnet_alloc_ioctl(struct qdrv_vap *qv);
+void vnet_free_ioctl(struct host_ioctl *ioctl);
+int vnet_send_ioctl(struct qdrv_vap *qv, struct host_ioctl *block);
+void qdrv_tx_sch_attach(struct qdrv_vap *qv);
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+int qdrv_get_active_sub_port(const struct net_bridge_port *p,
+		uint32_t *sub_port_bitmap, int size);
+int qdrv_check_active_sub_port(const struct net_bridge_port *p,
+		const uint32_t sub_port);
+#endif
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_vlan.c b/drivers/qtn/qdrv/qdrv_vlan.c
new file mode 100644
index 0000000..2e860eb
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_vlan.c
@@ -0,0 +1,95 @@
+/**
+  Copyright (c) 2008 - 2014 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+**/
+
+#include <linux/kernel.h>
+#include <linux/if_ether.h>
+
+#include <net80211/if_llc.h>
+#include <net80211/if_ethersubr.h>
+
+#include <qtn/qtn_uc_comm.h>
+#include <qtn/qtn_vlan.h>
+
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_wlan.h"
+#include "qdrv_vap.h"
+#include "qdrv_vlan.h"
+
+struct qdrv_node *qdrv_vlan_alloc_group(struct qdrv_vap *qv, uint16_t vid)
+{
+	struct ieee80211com *ic = qv->iv.iv_ic;
+	struct ieee80211_node *ni, *bss_ni;
+	struct qtn_vlan_dev *vdev = vdev_tbl_lhost[QDRV_WLANID_FROM_DEVID(qv->devid)];
+	uint8_t mac[ETH_ALEN];
+
+	qtn_vlan_gen_group_addr(mac, vid, QDRV_WLANID_FROM_DEVID(qv->devid));
+
+	ni = ieee80211_alloc_node(&ic->ic_sta, &qv->iv, mac, "VLAN group");
+	if (!ni)
+		return NULL;
+
+	bss_ni = qv->iv.iv_bss;
+	ni->ni_capinfo = bss_ni->ni_capinfo;
+	ni->ni_txpower = bss_ni->ni_txpower;
+	ni->ni_ath_flags = qv->iv.iv_ath_cap;
+	ni->ni_flags = bss_ni->ni_flags;
+	ni->ni_start_time_assoc = get_jiffies_64();
+	ni->ni_flags |= (IEEE80211_NODE_AUTH | IEEE80211_NODE_HT);
+
+	if (ic->ic_newassoc)
+		ic->ic_newassoc(ni, 1);
+
+	switch_vlan_set_node(vdev, IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx), vid);
+
+	return container_of(ni, struct qdrv_node, qn_node);
+}
+
+void qdrv_vlan_free_group(struct qdrv_node *qn)
+{
+	struct qdrv_vap *qv = container_of(qn->qn_node.ni_vap, struct qdrv_vap, iv);
+	struct qtn_vlan_dev *vdev = vdev_tbl_lhost[QDRV_WLANID_FROM_DEVID(qv->devid)];
+
+	switch_vlan_clr_node(vdev, qn->qn_node_idx);
+	ieee80211_free_node(&qn->qn_node);
+}
+
+struct qdrv_node *qdrv_vlan_find_group_noref(struct qdrv_vap *qv, uint16_t vid)
+{
+	struct qdrv_node *ret = NULL, *qn;
+	uint8_t	mac[ETH_ALEN];
+	unsigned long flags;
+
+	qtn_vlan_gen_group_addr(mac, vid, QDRV_WLANID_FROM_DEVID(qv->devid));
+
+	local_irq_save(flags);
+
+	TAILQ_FOREACH(qn, &qv->allnodes, qn_next) {
+		if (memcmp(qn->qn_node.ni_macaddr, mac, ETH_ALEN) == 0) {
+			ret = qn;
+			break;
+		}
+	}
+
+	local_irq_restore(flags);
+
+	return ret;
+}
diff --git a/drivers/qtn/qdrv/qdrv_vlan.h b/drivers/qtn/qdrv/qdrv_vlan.h
new file mode 100644
index 0000000..cb2ff58
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_vlan.h
@@ -0,0 +1,29 @@
+/**
+  Copyright (c) 2008 - 2014 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_VLAN_H
+#define _QDRV_VLAN_H
+
+struct qdrv_node *qdrv_vlan_alloc_group(struct qdrv_vap *qv, uint16_t vid);
+void qdrv_vlan_free_group(struct qdrv_node *);
+
+struct qdrv_node *qdrv_vlan_find_group_noref(struct qdrv_vap *qv, uint16_t vid);
+
+#endif
diff --git a/drivers/qtn/qdrv/qdrv_wlan.c b/drivers/qtn/qdrv/qdrv_wlan.c
new file mode 100644
index 0000000..b628e53
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_wlan.c
@@ -0,0 +1,9875 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+**/
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/netdevice.h>
+#include <linux/igmp.h>
+#include <net/iw_handler.h> /* wireless_send_event(..) */
+#include <net/sch_generic.h>
+#include <asm/hardware.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <linux/net/bridge/br_public.h>
+#include <linux/gpio.h>
+#else
+#include <asm/gpio.h>
+#endif
+#include <qtn/qdrv_sch.h>
+#include "qdrv_features.h"
+#include "qdrv_debug.h"
+#include "qdrv_mac.h"
+#include "qdrv_soc.h"
+#include "qdrv_comm.h"
+#include "qtn/qdrv_bld.h"
+#include "qdrv_wlan.h"
+#include "qdrv_hal.h"
+#include "qdrv_vap.h"	/* For vnet_send_ioctl() etc ... */
+#include "qdrv_control.h"
+#include "qdrv_txbf.h"
+#include "qdrv_radar.h"
+#include "qdrv_pktlogger.h"
+#include "qdrv_config.h"
+#include "qdrv_pcap.h"
+#include "qdrv_auc.h"
+#include "qdrv_mac_reserve.h"
+
+#include "qdrv_netdebug_checksum.h"
+#include <qtn/qtn_buffers.h>
+#include <qtn/qtn_global.h>
+#include <qtn/registers.h> /* To get to mac->reg-> .... */
+#include <qtn/muc_phy_stats.h> /* To get to qtn_stats_log .... */
+#include <qtn/shared_params.h>
+#include <qtn/hardware_revision.h>
+#include <qtn/bootcfg.h>
+#include <qtn/qtn_trace.h>
+#include <qtn/qtn_global.h>
+#include "qdrv_muc_stats.h"
+#include "qdrv_sch_const.h"
+#include "qdrv_sch_wmm.h"
+#include "net80211/ieee80211_beacon_desc.h"
+#ifdef CONFIG_QVSP
+#include "qtn/qvsp.h"
+#endif
+
+#include <radar/radar.h>
+#include <qtn/muc_phy_stats.h>
+
+#include <linux/file.h>
+#include <linux/syscalls.h>
+#include <linux/ctype.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <linux/pm_qos.h>
+#else
+#include <linux/pm_qos_params.h>
+#endif
+#include <common/ruby_pm.h>
+#include <ruby/gpio.h>
+#include <ruby/pm.h>
+#include <ruby/plat_dma_addr.h>
+
+#include <asm/board/board_config.h>
+#include <asm/board/troubleshoot.h>
+#include <asm/cacheflush.h>
+#include <qtn/topaz_hbm.h>
+#include <qtn/topaz_fwt_sw.h>
+#include <qtn/topaz_congest_queue.h>
+#include "soc.h"
+#include "qtn_logging.h"
+#include <net/arp.h>
+#ifdef CONFIG_IPV6
+#include <net/ip6_checksum.h>
+#endif
+
+/* Delay prior to enabling hang detection */
+#define QDRV_WLAN_HR_DELAY_SECS 3
+#define QDRV_DFLT_MIN_TXPOW	((int8_t) -20)
+#define QDRV_DFLT_MAX_TXPOW	19
+
+#define QDRV_WLAN_IGMP_QUERY_INTERVAL 125
+#define NET_IP_ALIGN 2
+
+#define SE95_DEVICE_ADDR	0x49
+
+#define RSSI_OFFSET_FROM_10THS_DBM    900
+
+u_int8_t g_bb_enabled = 0;
+
+extern uint32_t g_carrier_id;
+
+extern __sram_data const int qdrv_sch_band_prio[5];
+extern struct qdrv_sch_band_aifsn qdrv_sch_band_chg_prio[5];
+#ifdef CONFIG_QVSP
+static void qdrv_wlan_manual_ba_throt(struct qdrv_wlan *qw, struct qdrv_vap *qv, unsigned int value);
+static void qdrv_wlan_manual_wme_throt(struct qdrv_wlan *qw, struct qdrv_vap *qv, unsigned int value);
+#endif
+
+int g_qdrv_non_qtn_assoc = 0;
+
+void enable_bb(int index, u32 channel);
+void bb_rf_drv_set_channel(u32 bb_index, u32 freq_band, u32 channel);
+
+struct timer_list qdrv_wps_button_timer;
+
+int g_triggers_on = 0;
+
+static void qdrv_wlan_set_11g_erp(struct ieee80211vap *vap, int on);
+static struct qtn_rateentry rate_table_11a[] =
+{
+/*	ieee	rate	ctl	short	basic	phy			*/
+/*	rate	100kbps	indx	pre	rate	type			*/
+	{12,	 60,	0,	0,	1,	QTN_RATE_PHY_OFDM},
+	{18,	 90,	0,	0,	0,	QTN_RATE_PHY_OFDM},
+	{24,	120,	2,	0,	1,	QTN_RATE_PHY_OFDM},
+	{36,	180,	2,	0,	0,	QTN_RATE_PHY_OFDM},
+	{48,	240,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{72,	360,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{96,	480,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{108,	540,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+};
+
+static struct qtn_rateentry rate_table_11b[] =
+{
+/*	ieee	rate	ctl	short	basic	phy			*/
+/*	rate	100kbps	indx	pre	rate	type			*/
+	{2,	 10,	0,	0,	1,	QTN_RATE_PHY_CCK},
+	{4,	 20,	1,	1,	1,	QTN_RATE_PHY_CCK},
+	{11,	 55,	1,	1,	0,	QTN_RATE_PHY_CCK},
+	{22,	110,	1,	1,	0,	QTN_RATE_PHY_CCK},
+};
+
+static struct qtn_rateentry rate_table_11g[] =
+{
+/*	ieee	rate	ctl		short	basic	phy			*/
+/*	rate	100kbps	indx	pre		rate	type			*/
+	{2,	10,	0,		0,		1,		QTN_RATE_PHY_CCK},
+	{4,	20,	1,		1,		1,		QTN_RATE_PHY_CCK},
+	{11,	55,	2,		1,		1,		QTN_RATE_PHY_CCK},
+	{22,	110,	3,		1,		1,		QTN_RATE_PHY_CCK},
+	{12,	60,	4,		0,		1,		QTN_RATE_PHY_OFDM},
+	{18,	90,	4,		0,		0,		QTN_RATE_PHY_OFDM},
+	{24,	120,	6,		0,		1,		QTN_RATE_PHY_OFDM},
+	{36,	180,	6,		0,		0,		QTN_RATE_PHY_OFDM},
+	{48,	240,	8,		0,		1,		QTN_RATE_PHY_OFDM},
+	{72,	360,	8,		0,		0,		QTN_RATE_PHY_OFDM},
+	{96,	480,	8,		0,		0,		QTN_RATE_PHY_OFDM},
+	{108,	540,	8,		0,		0,		QTN_RATE_PHY_OFDM},
+};
+
+struct qtn_rateentry rate_table_11na[] = {
+/*	ieee	rate	ctl	short	basic	phy			*/
+/*	rate	100kbps	indx	pre	rate	type			*/
+	{12,	 60,	0,	0,	1,	QTN_RATE_PHY_OFDM},
+	{18,	 90,	0,	0,	0,	QTN_RATE_PHY_OFDM},
+	{24,	120,	2,	0,	1,	QTN_RATE_PHY_OFDM},
+	{36,	180,	2,	0,	0,	QTN_RATE_PHY_OFDM},
+	{48,	240,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{72,	360,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{96,	480,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{108,	540,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{QTN_RATE_11N | 0,	 65,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 1,	130,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 2,	195,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 3,	260,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 4,	390,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 5,	520,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 6,	585,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 7,	650,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 8,	130,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 9,	260,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 10,	390,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 11,	520,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 12,	780,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 13,   1040,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 14,   1170,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 15,   1300,	4,	0,	0,	QTN_RATE_PHY_HT},
+
+};
+
+/*
+ * Only legacy rate are been added to it. Other rate flags will be updated
+ * when we MCS changes are made
+ */
+struct qtn_rateentry rate_table_11ac[] = {
+/*	ieee	rate	ctl	short	basic	phy			*/
+/*	rate	100kbps	indx	pre	rate	type			*/
+	{12,	 60,	0,	0,	1,	QTN_RATE_PHY_OFDM},
+	{18,	 90,	0,	0,	0,	QTN_RATE_PHY_OFDM},
+	{24,	120,	2,	0,	1,	QTN_RATE_PHY_OFDM},
+	{36,	180,	2,	0,	0,	QTN_RATE_PHY_OFDM},
+	{48,	240,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{72,	360,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{96,	480,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{108,	540,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+};
+
+static struct qtn_rateentry rate_table_11ng[] =
+{
+/*	ieee			rate	ctl	short	basic	phy		*/
+/*	rate			100kbps	indx	pre	rate	type		*/
+	{2,			10,	0,	0,	1,	QTN_RATE_PHY_CCK},
+	{4,			20,	1,	1,	1,	QTN_RATE_PHY_CCK},
+	{11,			55,	2,	1,	1,	QTN_RATE_PHY_CCK},
+	{22,			110,	3,	1,	1,	QTN_RATE_PHY_CCK},
+	{12,			60,	0,	0,	1,	QTN_RATE_PHY_OFDM},
+	{18,			90,	0,	0,	0,	QTN_RATE_PHY_OFDM},
+	{24,			120,	2,	0,	1,	QTN_RATE_PHY_OFDM},
+	{36,			180,	2,	0,	0,	QTN_RATE_PHY_OFDM},
+	{48,			240,	4,	0,	1,	QTN_RATE_PHY_OFDM},
+	{72,			360,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{96,			480,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{108,			540,	4,	0,	0,	QTN_RATE_PHY_OFDM},
+	{QTN_RATE_11N | 0,	 65,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 1,	130,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 2,	195,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 3,	260,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 4,	390,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 5,	520,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 6,	585,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 7,	650,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 8,	130,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 9,	260,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 10,	390,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 11,	520,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 12,	780,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 13,	1040,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 14,	1170,	4,	0,	0,	QTN_RATE_PHY_HT},
+	{QTN_RATE_11N | 15,	1300,	4,	0,	0,	QTN_RATE_PHY_HT},
+};
+
+static struct qtn_channel qtn_channels_2ghz[] =
+{
+	/* FIXME: Not assigning the correct pri chan for 2G mode */
+/*	channel number		frequency 40M upper / lower*/
+	{1,		2412,		IEEE80211_CHAN_HT40U,	3,	0,	0,	0},
+	{2,		2417,		IEEE80211_CHAN_HT40U,	4,	0,	0,	0},
+	{3,		2422,		IEEE80211_CHAN_HT40U,	5,	0,	0,	0},
+	{4,		2427,		IEEE80211_CHAN_HT40U,	6,	0,	0,	0},
+	{5,		2432,		IEEE80211_CHAN_HT40U,	7,	0,	0,	0},
+	{6,		2437,		IEEE80211_CHAN_HT40U,	8,	0,	0,	0},
+	{7,		2442,		IEEE80211_CHAN_HT40U,	9,	0,	0,	0},
+	{8,		2447,		IEEE80211_CHAN_HT40D,	6,	0,	0,	0},
+	{9,		2452,		IEEE80211_CHAN_HT40D,	7,	0,	0,	0},
+	{10,		2457,		IEEE80211_CHAN_HT40D,	8,	0,	0,	0},
+	{11,		2462,		IEEE80211_CHAN_HT40D,	9,	0,	0,	0},
+	{12,		2467,		IEEE80211_CHAN_HT40D,	10,	0,	0,	0},
+	{13,		2472,		IEEE80211_CHAN_HT40D,	11,	0,	0,	0},
+};
+
+static struct qtn_channel qtn_channels_5ghz[] =
+{
+/* channel number	frequency       40MHz mode		CFreq40	CFreq80	Cfreq160	80MHZ mode*/
+	{36,		5180,		IEEE80211_CHAN_HT40U,	38,	42,	50,		IEEE80211_CHAN_VHT80_LL},
+	{40,		5200,		IEEE80211_CHAN_HT40D,	38,	42,	50,		IEEE80211_CHAN_VHT80_LU},
+	{44,		5220,		IEEE80211_CHAN_HT40U,	46,	42,	50,		IEEE80211_CHAN_VHT80_UL},
+	{48,		5240,		IEEE80211_CHAN_HT40D,	46,	42,	50,		IEEE80211_CHAN_VHT80_UU},
+	{52,		5260,		IEEE80211_CHAN_HT40U,	54,	58,	50,		IEEE80211_CHAN_VHT80_LL},
+	{56,		5280,		IEEE80211_CHAN_HT40D,	54,	58,	50,		IEEE80211_CHAN_VHT80_LU},
+	{60,		5300,		IEEE80211_CHAN_HT40U,	62,	58,	50,		IEEE80211_CHAN_VHT80_UL},
+	{64,		5320,		IEEE80211_CHAN_HT40D,	62,	58,	50,		IEEE80211_CHAN_VHT80_UU},
+	{100,		5500,		IEEE80211_CHAN_HT40U,	102,	106,	114,		IEEE80211_CHAN_VHT80_LL},
+	{104,		5520,		IEEE80211_CHAN_HT40D,	102,	106,	114,		IEEE80211_CHAN_VHT80_LU},
+	{108,		5540,		IEEE80211_CHAN_HT40U,	110,	106,	114,		IEEE80211_CHAN_VHT80_UL},
+	{112,		5560,		IEEE80211_CHAN_HT40D,	110,	106,	114,		IEEE80211_CHAN_VHT80_UU},
+	{116,		5580,		IEEE80211_CHAN_HT40U,	118,	122,	114,		IEEE80211_CHAN_VHT80_LL},
+	{120,		5600,		IEEE80211_CHAN_HT40D,	118,	122,	114,		IEEE80211_CHAN_VHT80_LU},
+	{124,		5620,		IEEE80211_CHAN_HT40U,	126,	122,	114,		IEEE80211_CHAN_VHT80_UL},
+	{128,		5640,		IEEE80211_CHAN_HT40D,	126,	122,	114,		IEEE80211_CHAN_VHT80_UU},
+	{132,		5660,		IEEE80211_CHAN_HT40U,	134,	138,	0,		IEEE80211_CHAN_VHT80_LL},
+	{136,		5680,		IEEE80211_CHAN_HT40D,	134,	138,	0,		IEEE80211_CHAN_VHT80_LU},
+	{140,		5700,		IEEE80211_CHAN_HT40U,	142,	138,	0,		IEEE80211_CHAN_VHT80_UL},
+	{144,		5720,		IEEE80211_CHAN_HT40D,	142,	138,	0,		IEEE80211_CHAN_VHT80_UU},
+	{149,		5745,		IEEE80211_CHAN_HT40U,	151,	155,	0,		IEEE80211_CHAN_VHT80_LL},
+	{153,		5765,		IEEE80211_CHAN_HT40D,	151,	155,	0,		IEEE80211_CHAN_VHT80_LU},
+	{157,		5785,		IEEE80211_CHAN_HT40U,	159,	155,	0,		IEEE80211_CHAN_VHT80_UL},
+	{161,		5805,		IEEE80211_CHAN_HT40D,	159,	155,	0,		IEEE80211_CHAN_VHT80_UU},
+	{165,		5825,		IEEE80211_CHAN_HT40U,	0,	0,	0,		0},
+	{169,		5845,		IEEE80211_CHAN_HT40D,	0,	0,	0,		0},
+	{184,		4920,		IEEE80211_CHAN_HT40U,	0,	190,	0,		IEEE80211_CHAN_VHT80_LL},
+	{188,		4940,		IEEE80211_CHAN_HT40D,	0,	190,	0,		IEEE80211_CHAN_VHT80_LU},
+	{192,		4960,		IEEE80211_CHAN_HT40U,	0,	190,	0,		IEEE80211_CHAN_VHT80_UL},
+	{196,		4980,		IEEE80211_CHAN_HT40D,	0,	190,	0,		IEEE80211_CHAN_VHT80_UU},
+};
+
+static void set_channels(struct ieee80211com *ic, int nchans,
+	struct ieee80211_channel *inchans)
+{
+	int i;
+
+	if (nchans > IEEE80211_CHAN_MAX + 1) {
+		nchans = IEEE80211_CHAN_MAX + 1;
+	}
+
+	ic->ic_nchans = nchans;
+
+	memset(ic->ic_chan_avail, 0, sizeof(ic->ic_chan_avail));
+	for(i = 0; i < nchans; i++) {
+		ic->ic_channels[i] = inchans[i];
+		/* make sure only valid 2.4G or 5G channels are set as available */
+		if (((inchans[i].ic_ieee >= QTN_2G_FIRST_OPERATING_CHAN) && (inchans[i].ic_ieee <= QTN_2G_LAST_OPERATING_CHAN)) ||
+		    ((inchans[i].ic_ieee >= QTN_5G_FIRST_OPERATING_CHAN) && (inchans[i].ic_ieee <= QTN_5G_LAST_OPERATING_CHAN))) {
+			setbit(ic->ic_chan_avail, inchans[i].ic_ieee);
+
+			if (IEEE80211_IS_CHAN_HT40(&inchans[i])) {
+				setbit(ic->ic_chan_active_40, inchans[i].ic_ieee);
+			}
+			if (IEEE80211_IS_CHAN_VHT80(&inchans[i])) {
+				setbit(ic->ic_chan_active_80, inchans[i].ic_ieee);
+			}
+			setbit(ic->ic_chan_active_20, inchans[i].ic_ieee);
+		}
+	}
+	memcpy(ic->ic_chan_active, ic->ic_chan_avail, sizeof(ic->ic_chan_avail));
+}
+
+static int set_rates(struct qdrv_wlan *qw, enum ieee80211_phymode mode)
+{
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211_rateset *rs;
+	struct qtn_ratetable *rt;
+	int maxrates, i;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+#define N(a)	(sizeof(a)/sizeof(a[0]))
+	switch (mode) {
+	case IEEE80211_MODE_11A:
+		qw->qw_rates[mode].rt_entries = rate_table_11a;
+		qw->qw_rates[mode].rt_num = N(rate_table_11a);
+		qw->qw_rates[mode].rt_legacy_num = N(rate_table_11a);
+		break;
+	case IEEE80211_MODE_11B:
+		qw->qw_rates[mode].rt_entries = rate_table_11b;
+		qw->qw_rates[mode].rt_num = N(rate_table_11b);
+		qw->qw_rates[mode].rt_legacy_num = N(rate_table_11b);
+		break;
+	case IEEE80211_MODE_11G:
+		qw->qw_rates[mode].rt_entries = rate_table_11g;
+		qw->qw_rates[mode].rt_num = N(rate_table_11g);
+		qw->qw_rates[mode].rt_legacy_num = N(rate_table_11g);
+		break;
+	case IEEE80211_MODE_11NA:
+	case IEEE80211_MODE_11NA_HT40PM:
+		qw->qw_rates[mode].rt_entries = rate_table_11na;
+		qw->qw_rates[mode].rt_num = N(rate_table_11na);
+		qw->qw_rates[mode].rt_legacy_num = N(rate_table_11a);
+		break;
+	case IEEE80211_MODE_11NG:
+	case IEEE80211_MODE_11NG_HT40PM:
+		qw->qw_rates[mode].rt_entries = rate_table_11ng;
+		qw->qw_rates[mode].rt_num = N(rate_table_11ng);
+		qw->qw_rates[mode].rt_legacy_num = N(rate_table_11g);
+		break;
+	case IEEE80211_MODE_11AC_VHT20PM:
+	case IEEE80211_MODE_11AC_VHT40PM:
+	case IEEE80211_MODE_11AC_VHT80PM:
+		qw->qw_rates[mode].rt_entries = rate_table_11ac;
+		qw->qw_rates[mode].rt_num = N(rate_table_11ac);
+		qw->qw_rates[mode].rt_legacy_num = N(rate_table_11a);
+		break;
+	default:
+		DBGPRINTF_E("mode unknown\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -EINVAL;
+	}
+#undef N
+
+	if ((rt = &qw->qw_rates[mode]) == NULL) {
+		DBGPRINTF_E("rt NULL\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -EINVAL;
+	}
+
+	if (rt->rt_num > IEEE80211_RATE_MAXSIZE) {
+		DBGPRINTF_E("Rate table is too small (%u > %u)\n",
+			rt->rt_num, IEEE80211_RATE_MAXSIZE);
+		maxrates = IEEE80211_RATE_MAXSIZE;
+	} else {
+		maxrates = rt->rt_num;
+	}
+
+	rs = &ic->ic_sup_rates[mode];
+	memset(rs, 0, sizeof(struct ieee80211_rateset));
+
+	for (i = 0; i < maxrates; i++) {
+		rs->rs_rates[i] = (rt->rt_entries[i].re_basicrate) ?
+			(rt->rt_entries[i].re_ieeerate | IEEE80211_RATE_BASIC) :
+			rt->rt_entries[i].re_ieeerate;
+	}
+
+	rs->rs_legacy_nrates = rt->rt_legacy_num;
+	rs->rs_nrates = maxrates;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+static int set_mode(struct qdrv_wlan *qw, enum ieee80211_phymode mode)
+{
+	qw->qw_currt = &qw->qw_rates[mode];
+	qw->qw_curmode = mode;
+#if 0
+	qw->qw_minrateix = 0;
+#endif
+
+	return 0;
+}
+
+static void qdrv_wlan_tx_sch_node_info(void *s, struct ieee80211_node *ni)
+{
+	const struct Qdisc *sch = netdev_get_tx_queue(ni->ni_vap->iv_dev, 0)->qdisc;
+	struct seq_file *sq = (struct seq_file *)s;
+	const struct qdrv_sch_node_data *nd = &ni->ni_tx_sch;
+	int i;
+	static const char *band_id[] = {"BE", "BK", "VI", "VO", "CT"};
+
+	if (!sq) {
+		return;
+	}
+
+	seq_printf(sq, "%s AID=%u ref=%u qdisc=%p tokens=%u muc=%d over_thresh=%u/%u low_rate=%u\n",
+			ether_sprintf(ni->ni_macaddr),
+			IEEE80211_AID(ni->ni_associd),
+			ieee80211_node_refcnt(ni),
+			sch,
+			ni->ni_tx_sch.used_tokens,	/* enqueued or sent to MuC */
+			ni->ni_tx_sch.muc_queued,	/* dequeued, sent to MuC, not yet tx done */
+			nd->over_thresh,
+			nd->over_thresh_cnt,
+			nd->low_rate);
+
+	seq_printf(sq, "    Queue Depth Sent       Dropped    Victim     Active\n");
+	for (i = 0; i < ARRAY_SIZE(nd->bands); i++) {
+		const struct qdrv_sch_node_band_data *nbd;
+		nbd = &nd->bands[i];
+		seq_printf(sq, "    %i-%s  %-5i %-10u %-10u %-10u %u\n",
+				i,
+				band_id[i],
+				skb_queue_len(&nbd->queue),
+				nbd->sent,
+				nbd->dropped,
+				nbd->dropped_victim,
+				qdrv_sch_node_is_active(nbd, nd, i));
+	}
+}
+
+static void qdrv_wlan_tx_sch_init(struct qdrv_wlan *qw)
+{
+	struct qdrv_sch_shared_data *sd;
+
+	sd = qdrv_sch_shared_data_init(QTN_BUFS_WMAC_TX_QDISC, QDRV_TX_SCH_RED_MASK);
+	if (sd == NULL) {
+		panic("%s: could not allocate tx_sch shared data\n", __FUNCTION__);
+	}
+
+	qw->tx_sch_shared_data = sd;
+}
+
+/*
+ * Set a per-node threshold for packets queued to the MuC, based on the number of associated nodes,
+ * including WDS nodes.
+ * PERNODE_TBD - all WDS nodes should count as one under the current scheme
+ */
+static void qdrv_wlan_muc_node_thresh_set(struct qdrv_wlan *qw, struct ieee80211com *ic,
+						uint8_t assoc_cnt)
+{
+	if (assoc_cnt == 0) {
+		assoc_cnt = 1;
+	}
+
+	qw->tx_if.muc_thresh_high = MAX(
+		(qw->tx_if.list_max_size / assoc_cnt), QDRV_TXDESC_THRESH_MAX_MIN);
+	qw->tx_if.muc_thresh_low = qw->tx_if.muc_thresh_high - QDRV_TXDESC_THRESH_MIN_DIFF;
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN,
+		"per-node thresholds changed - high=%u low=%u nodes=%u\n",
+		qw->tx_if.muc_thresh_high, qw->tx_if.muc_thresh_low, ic->ic_sta_assoc);
+}
+
+extern int g_wlan_tot_node_alloc;
+extern int g_wlan_tot_node_alloc_tmp;
+extern int g_wlan_tot_node_free;
+extern int g_wlan_tot_node_free_tmp;
+
+static struct ieee80211_node *qdrv_node_alloc(struct ieee80211_node_table *nt,
+		struct ieee80211vap *vap, const uint8_t *macaddr, const uint8_t tmp_node)
+{
+	struct qdrv_node *qn;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct net_device *vdev = vap->iv_dev;
+	struct qtn_node_shared_stats_list *shared_stats;
+	unsigned long flags;
+
+	qn = kmalloc(sizeof(struct qdrv_node), GFP_ATOMIC);
+
+	if (qn == NULL) {
+		DBGPRINTF_E("kmalloc failed\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return NULL;
+	}
+	memset(qn, 0, sizeof(struct qdrv_node));
+
+	dev_hold(vdev); /* Increase the reference count of the VAP netdev */
+	ic->ic_node_count++;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE | QDRV_LF_WLAN,
+			"Allocated node %p (total %d/%d)\n",
+			qn, ic->ic_node_count, netdev_refcnt_read(vdev));
+#else
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_TRACE | QDRV_LF_WLAN,
+			"Allocated node %p (total %d/%d)\n",
+			qn, ic->ic_node_count, atomic_read(&vdev->refcnt));
+#endif
+
+	qn->qn_node.ni_vap = vap;
+	TAILQ_INSERT_TAIL(&qv->allnodes, qn, qn_next);
+
+	qdrv_tx_sch_node_data_init(qdrv_tx_sch_vap_get_qdisc(vdev), qw->tx_sch_shared_data,
+				&qn->qn_node.ni_tx_sch, ic->ic_sta_assoc + 1);
+
+	if (!tmp_node) {
+		local_irq_save(flags);
+		shared_stats = TAILQ_FIRST(&qw->shared_pernode_stats_head);
+		if (shared_stats == NULL) {
+			DBGPRINTF_E("Failed to obtain shared_stats for new node\n");
+			local_irq_restore(flags);
+			dev_put(vdev);
+			ic->ic_node_count--;
+			kfree(qn);
+			DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+			return NULL;
+		}
+
+		TAILQ_REMOVE(&qw->shared_pernode_stats_head, shared_stats, next);
+		local_irq_restore(flags);
+
+		memset(shared_stats, 0, sizeof(*shared_stats));
+
+		qn->qn_node.ni_shared_stats = (struct qtn_node_shared_stats *) shared_stats;
+		qn->qn_node.ni_shared_stats_phys = (void*)((unsigned long)shared_stats -
+				(unsigned long)qw->shared_pernode_stats_pool +
+				(unsigned long)qw->shared_pernode_stats_phys);
+
+#ifdef CONFIG_QVSP
+		qvsp_node_init(&qn->qn_node);
+#endif
+		g_wlan_tot_node_alloc++;
+
+		qdrv_wlan_muc_node_thresh_set(qw, ic, ic->ic_sta_assoc + 1);
+	} else {
+		g_wlan_tot_node_alloc_tmp++;
+	}
+
+	return &qn->qn_node;
+}
+
+static void qdrv_node_free(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct qdrv_node *qn = container_of(ni, struct qdrv_node, qn_node);
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct net_device *vdev = vap->iv_dev;
+	unsigned long flags;
+
+	if (ni->ni_shared_stats) {
+		local_irq_save(flags);
+		TAILQ_INSERT_TAIL(&qw->shared_pernode_stats_head,
+				(struct qtn_node_shared_stats_list *) ni->ni_shared_stats, next);
+		local_irq_restore(flags);
+		g_wlan_tot_node_free++;
+		qdrv_wlan_muc_node_thresh_set(qw, ic, ic->ic_sta_assoc);
+	} else {
+		g_wlan_tot_node_free_tmp++;
+	}
+	qdrv_tx_sch_node_data_exit(&ni->ni_tx_sch, ic->ic_sta_assoc);
+
+	TAILQ_REMOVE(&qv->allnodes, qn, qn_next);
+	dev_put(vdev);
+}
+
+static u_int32_t qdrv_set_channel_setup(const struct ieee80211com *ic,
+		const struct ieee80211_channel *chan)
+{
+	u_int32_t ieee_chan;
+	uint32_t qtn_chan = 0;
+	int32_t pwr;
+	int force_bw_20 = 0;
+	int force_bw_40 = 0;
+	int max_bw = BW_HT80;
+	int tdls_offchan = !!(chan->ic_ext_flags & IEEE80211_CHAN_TDLS_OFF_CHAN);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ieee_chan = (u_int32_t)chan->ic_ieee;
+	pwr = chan->ic_maxpower;
+
+	force_bw_20 = (ic->ic_flags_ext & IEEE80211_FEXT_SCAN_20) &&
+			((ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) || tdls_offchan);
+
+	force_bw_40 = ic->ic_flags_ext & IEEE80211_FEXT_SCAN_40;
+
+	if ((ic->ic_opmode == IEEE80211_M_STA) && ic->ic_bss_bw && !tdls_offchan) {
+		max_bw = ic->ic_bss_bw;
+	}
+
+	qtn_chan = SM(ieee_chan, QTN_CHAN_IEEE);
+
+	if (!force_bw_20 && ((ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) || tdls_offchan)) {
+		if ((chan->ic_flags & IEEE80211_CHAN_HT40D) && (max_bw >= BW_HT40)) {
+			qtn_chan |= QTN_CHAN_FLG_PRI_HI | QTN_CHAN_FLG_HT40;
+		} else if ((chan->ic_flags & IEEE80211_CHAN_HT40U) && (max_bw >= BW_HT40)) {
+			qtn_chan |= QTN_CHAN_FLG_HT40;
+		}
+		if (!force_bw_40 && IS_IEEE80211_VHT_ENABLED(ic) &&
+				IEEE80211_IS_VHT_80(ic) &&
+				(chan->ic_flags & IEEE80211_CHAN_VHT80) &&
+				(max_bw >= BW_HT80)) {
+			qtn_chan |= QTN_CHAN_FLG_VHT80;
+		}
+	}
+	qtn_chan |= SM(pwr, QTN_CHAN_PWR);
+	if (chan->ic_flags & IEEE80211_CHAN_DFS) {
+		qtn_chan |= QTN_CHAN_FLG_DFS;
+	}
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN,
+				"Hlink setting channel %08X Chan %d Pwr %d\n",
+			qtn_chan, ieee_chan, pwr);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return qtn_chan;
+}
+
+static uint32_t qdrv_set_channel_freqband_setup(const struct ieee80211com *ic, const struct ieee80211_channel *chan)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	uint32_t freqband = 0;
+
+	freqband |= SM(qw->rf_chipid, QTN_BAND_FREQ);
+
+	return freqband;
+}
+
+static int qdrv_wlan_80211_get_cap_bw(struct ieee80211com *ic)
+{
+	int bw;
+
+	switch (ic->ic_phymode) {
+	case IEEE80211_MODE_11A:
+	case IEEE80211_MODE_11B:
+	case IEEE80211_MODE_11G:
+	case IEEE80211_MODE_11NA:
+	case IEEE80211_MODE_11NG:
+	case IEEE80211_MODE_11AC_VHT20PM:
+		bw = BW_HT20;
+		break;
+	case IEEE80211_MODE_11NA_HT40PM:
+	case IEEE80211_MODE_11NG_HT40PM:
+	case IEEE80211_MODE_11AC_VHT40PM:
+		bw = BW_HT40;
+		break;
+	case IEEE80211_MODE_11AC_VHT80PM:
+		bw = BW_HT80;
+		break;
+	case IEEE80211_MODE_11AC_VHT160PM:
+		bw = BW_HT160;
+		break;
+	default:
+		bw = BW_INVALID;
+		break;
+	}
+
+	return bw;
+}
+
+static int qdrv_chan_has_radar(struct ieee80211com *ic, struct ieee80211_channel *base_chan,
+				struct ieee80211_channel *chan, int is_req_chan)
+{
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN,
+		"%s: check chan=%u (%u MHz)\n",
+	       __func__, chan->ic_ieee, chan->ic_freq);
+
+	if ((chan < ic->ic_channels) || (chan > (ic->ic_channels + ic->ic_nchans))) {
+		DBGPRINTF_E("%schannel %u (%u MHz) is invalid\n",
+			chan == base_chan ? "" : "secondary ",
+			chan->ic_ieee, chan->ic_freq);
+		return 1;
+	}
+
+	if (chan->ic_flags & IEEE80211_CHAN_RADAR) {
+		if (is_req_chan) {
+			if (chan == base_chan)
+				printk("selected channel %u cannot be used - has radar\n",
+					base_chan->ic_ieee);
+			else
+				printk("selected channel %u cannot be used - "
+					"secondary channel %u has radar\n",
+					base_chan->ic_ieee, chan->ic_ieee);
+		}
+		return 1;
+	}
+
+	return 0;
+}
+
+static int qdrv_check_channel(struct ieee80211com *ic, struct ieee80211_channel *chan,
+	int fast_switch, int is_req_chan)
+{
+	uint16_t band_flags;
+	struct ieee80211_channel *low_chan = NULL;
+	int check_curchan = !(ic->ic_flags & IEEE80211_F_SCAN);
+	int bw = qdrv_wlan_80211_get_cap_bw(ic);
+
+	if (ic->ic_curchan == NULL) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "%s: rejected - ic_curchan NULL\n", __func__);
+		return 0;
+	}
+
+	if (chan == NULL || chan == IEEE80211_CHAN_ANYC) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "%s: rejected - channel invalid\n", __func__);
+		return 0;
+	}
+
+	band_flags = ic->ic_curchan->ic_flags &
+			(IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
+
+	if (fast_switch && (!(ieee80211_is_chan_available(chan)) ||
+			isset(ic->ic_chan_pri_inactive, chan->ic_ieee))) {
+		return 0;
+	}
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN,
+		"%s: chan=%u isset=%u bw=%u flags=0x%08x/0x%04x cur=%u set=%u\n",
+	       __func__,
+	       chan->ic_ieee, !!isset(ic->ic_chan_active, chan->ic_ieee), bw,
+	       chan->ic_flags, band_flags, ic->ic_curchan->ic_ieee, ic->ic_chan_is_set);
+
+	if (check_curchan && (chan->ic_freq == ic->ic_curchan->ic_freq) && ic->ic_chan_is_set) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "%s: rejected - current\n", __func__);
+		return 0;
+	};
+
+	if (!isset(ic->ic_chan_active, chan->ic_ieee)) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "%s: rejected - inactive\n", __func__);
+		return 0;
+	};
+
+	if (!(chan->ic_flags & band_flags)) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "%s: rejected - off band\n", __func__);
+		return 0;
+	};
+
+	/* ignore channels that do not match the required bandwidth */
+	if (IEEE80211_IS_VHT_80(ic)) {
+		if (!(chan->ic_flags & IEEE80211_CHAN_VHT80)) {
+			DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "%s: rejected - bw\n", __func__);
+			return 0;
+		}
+	}
+
+	/* ignore channels where radar has been detected */
+	switch (bw) {
+	case BW_HT160:
+	case BW_INVALID:
+		DBGPRINTF_E("invalid phy mode %u (needs adding to qdrv_wlan_80211_get_cap_bw?)\n",
+				ic->ic_phymode);
+		return 0;
+	case BW_HT80:
+		if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LL) {
+			low_chan = chan;
+		} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LU) {
+			low_chan = chan - 1;
+		} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UL) {
+			low_chan = chan - 2;
+		} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UU) {
+			low_chan = chan - 3;
+		}
+		if (low_chan == NULL) {
+			DBGPRINTF_E("invalid ext flags %08x\n", chan->ic_ext_flags);
+			return 0;
+		}
+		if (qdrv_chan_has_radar(ic, chan, low_chan, is_req_chan) ||
+				qdrv_chan_has_radar(ic, chan, low_chan + 1, is_req_chan) ||
+				qdrv_chan_has_radar(ic, chan, low_chan + 2, is_req_chan) ||
+				qdrv_chan_has_radar(ic, chan, low_chan + 3, is_req_chan)) {
+			return 0;
+		}
+		break;
+	case BW_HT40:
+		if (chan->ic_flags & IEEE80211_CHAN_HT40D) {
+			low_chan = chan - 1;
+		} else {
+			low_chan = chan;
+		}
+		if (qdrv_chan_has_radar(ic, chan, low_chan, is_req_chan) ||
+				qdrv_chan_has_radar(ic, chan, low_chan + 1, is_req_chan)) {
+			return 0;
+		}
+		break;
+	case BW_HT20:
+		if (qdrv_chan_has_radar(ic, chan, chan, is_req_chan)) {
+			return 0;
+		}
+		break;
+	}
+
+	ic->ic_chan_is_set = 1;
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+		"%s: channel %u (%u MHz) selected\n",
+	       __func__, chan->ic_ieee, chan->ic_freq);
+
+
+	return 1;
+}
+
+static void qdrv_chan_occupy_record_finish(struct ieee80211com *ic, uint8_t new_chan)
+{
+	struct ieee80211_chan_occupy_record *occupy_record = &ic->ic_chan_occupy_record;
+	uint8_t cur_chan = occupy_record->cur_chan;
+
+	if ((ic->ic_flags & IEEE80211_F_SCAN) &&
+			(ic->ic_scan->ss_flags & IEEE80211_SCAN_NOPICK)) {
+		return;
+	}
+
+	if (cur_chan && (new_chan != cur_chan)) {
+		occupy_record->cur_chan = 0;
+		occupy_record->prev_chan = cur_chan;
+		occupy_record->duration[cur_chan] += (jiffies - INITIAL_JIFFIES) / HZ -
+				occupy_record->occupy_start;
+	}
+}
+
+static void qdrv_chan_occupy_record_start(struct ieee80211com *ic, uint8_t new_chan)
+{
+	struct ieee80211_chan_occupy_record *occupy_record = &ic->ic_chan_occupy_record;
+
+	if (occupy_record->cur_chan == 0) {
+		occupy_record->cur_chan = new_chan;
+		if (occupy_record->prev_chan != new_chan) {
+			occupy_record->times[new_chan]++;
+		}
+		occupy_record->occupy_start = (jiffies - INITIAL_JIFFIES) / HZ;
+	}
+}
+
+static bool qdrv_chan_compare_equality(struct ieee80211com *ic,
+		struct ieee80211_channel *prev_chan, struct ieee80211_channel *new_chan)
+{
+	int bw = qdrv_wlan_80211_get_cap_bw(ic);
+	struct ieee80211_channel *low_chan = NULL;
+	int ret = false;
+
+	if ((!prev_chan) || (!new_chan)) {
+		return ret;
+	}
+
+	switch (bw) {
+		case BW_HT80:
+			if (prev_chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LL) {
+				low_chan = prev_chan;
+			} else if (prev_chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LU) {
+				low_chan = prev_chan - 1;
+			} else if (prev_chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UL) {
+				low_chan = prev_chan - 2;
+			} else if (prev_chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UU) {
+				low_chan = prev_chan - 3;
+			}
+			if ((low_chan == new_chan) || ((low_chan + 1) == new_chan) ||
+					((low_chan + 2) == new_chan) || ((low_chan + 3) == new_chan)) {
+				ret = true;
+			}
+			break;
+		case BW_HT40:
+			if (prev_chan->ic_flags & IEEE80211_CHAN_HT40D) {
+				low_chan = prev_chan - 1;
+			} else {
+				low_chan = prev_chan;
+			}
+			if ((low_chan == new_chan) || ((low_chan + 1) == new_chan)) {
+				ret = true;
+			}
+			break;
+		case BW_HT20:
+			if (prev_chan == new_chan) {
+				ret = true;
+			}
+			break;
+		default:
+			DBGPRINTF_N("%s: Invalid bandwidth\n", __func__);
+			break;
+	}
+	return ret;
+}
+
+static void qdrv_set_channel(struct ieee80211com *ic)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	uint32_t freq_band;
+	uint32_t qtn_chan;
+	int handle_radar = !(IEEE80211_IS_CHAN_CACDONE(ic->ic_curchan)) &&
+			!(IEEE80211_IS_CHAN_CAC_IN_PROGRESS(ic->ic_curchan));
+
+	ic->sta_dfs_info.allow_measurement_report = false;
+
+	qtn_chan = qdrv_set_channel_setup(ic, ic->ic_curchan);
+	freq_band = qdrv_set_channel_freqband_setup(ic, ic->ic_curchan);
+	qw->tx_stats.tx_channel = ic->ic_curchan->ic_ieee;
+
+	/* store normal txpower for short range workaround */
+	qdrv_hostlink_store_txpow(qw, ic->ic_curchan->ic_maxpower_normal);
+
+	if (ic->ic_chan_compare_equality(ic, qdrv_radar_get_current_cac_chan(), ic->ic_curchan) == false) {
+		qdrv_radar_stop_active_cac();
+	}
+
+	if (handle_radar) {
+		qdrv_radar_before_newchan();
+	}
+
+	qdrv_hostlink_setchan(qw, freq_band, qtn_chan);
+
+	qdrv_radar_on_newchan();
+
+	qdrv_chan_occupy_record_finish(ic, ic->ic_curchan->ic_ieee);
+
+	if (!(ic->ic_flags & IEEE80211_F_SCAN)) {
+		ic->ic_chan_switch_record(ic, ic->ic_curchan, ic->ic_csw_reason);
+		qdrv_eventf((TAILQ_FIRST(&ic->ic_vaps))->iv_dev,
+				QEVT_COMMON_PREFIX" Channel Changed to %d",
+				ic->ic_curchan->ic_ieee);
+
+		/* Reset ocac_rx_state as we have moved to another channel and should start gathering afresh */
+		spin_lock(&ic->ic_ocac.ocac_lock);
+		memset(&ic->ic_ocac.ocac_rx_state, 0, sizeof(ic->ic_ocac.ocac_rx_state));
+		spin_unlock(&ic->ic_ocac.ocac_lock);
+	}
+}
+
+static void qtn_scan_start(struct ieee80211com *ic)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	if ((TAILQ_FIRST(&ic->ic_vaps))->iv_opmode == IEEE80211_M_STA) {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "Sending SCAN START to MuC\n");
+		qdrv_hostlink_setscanmode(qw,1);
+	}
+
+	/* FIXME: Disabling MODE changes for now */
+	if (1)
+		return;
+	/* Fixme : Do proper support for STA scan on MuC */
+	if ((TAILQ_FIRST(&ic->ic_vaps))->iv_opmode == IEEE80211_M_HOSTAP) {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "Sending SCAN START to MuC\n");
+		qdrv_hostlink_setscanmode(qw,1);
+	}
+}
+
+static void qtn_scan_end(struct ieee80211com *ic)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if ((TAILQ_FIRST(&ic->ic_vaps))->iv_opmode == IEEE80211_M_STA) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "Sending SCAN STOP to MuC\n");
+		qdrv_hostlink_setscanmode(qw,0);
+	}
+
+	/* FIXME: Disabling MODE changes for now */
+	if (1) {
+		return;
+	}
+	/* Fixme : Do proper support for STA scan on MuC */
+	if ((TAILQ_FIRST(&ic->ic_vaps))->iv_opmode == IEEE80211_M_HOSTAP) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "Sending SCAN STOP to MuC\n");
+		qdrv_hostlink_setscanmode(qw,0);
+	}
+}
+
+static struct host_txdesc * qdrv_wlan_get_mgt_txdesc(struct ieee80211com *ic,
+	struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct net_device *vdev;
+	struct qdrv_vap *qv;
+	struct host_txdesc *txdesc;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	vdev = qv->ndev;
+
+	skb->dev = vdev;
+	QTN_SKB_ENCAP(skb) = QTN_SKB_ENCAP_80211_DATA;
+	skb->dest_port = ni->ni_node_idx;
+
+	M_FLAG_SET(skb, M_NO_AMSDU);
+
+	/*
+	 * These frames are inserted into the tx datapath on the MuC, not in qdrv.
+	 * The node ID is unset in order to avoid multiple node free operations during tx_done.
+	 */
+	QTN_SKB_CB_NI(skb) = NULL;
+
+	local_bh_disable();
+	txdesc = qdrv_tx_get_mgt_txdesc(skb, vdev);
+	local_bh_enable();
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return txdesc;
+}
+
+static int qdrv_wlan_get_ocs_frame(struct ieee80211com *ic, struct ieee80211_node *ni,
+		struct sk_buff *skb, uint32_t *frame_host, uint32_t *frame_bus,
+		uint16_t *frame_len, uint16_t *node_idx)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	if (QDRV_WLAN_TX_USE_AUC(qw))
+	{
+		void *buf_virt;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+		uintptr_t flush_start;
+		size_t flush_size;
+#endif
+
+		buf_virt = topaz_hbm_get_payload_virt(TOPAZ_HBM_BUF_EMAC_RX_POOL);
+		if (unlikely(!buf_virt)) {
+			return -1;
+		}
+
+		memcpy(buf_virt, skb->data, skb->len);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		dma_cache_wback_inv((unsigned long) buf_virt, skb->len);
+#else
+		flush_start = (uintptr_t) align_buf_cache(buf_virt);
+		flush_size = align_buf_cache_size(buf_virt, skb->len);
+		flush_and_inv_dcache_range(flush_start, flush_start + flush_size);
+#endif
+
+		*frame_host = (uint32_t)buf_virt;
+		*frame_bus = (uint32_t)virt_to_bus(buf_virt);
+		*frame_len = skb->len;
+		*node_idx = IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx);
+		dev_kfree_skb(skb);
+	}
+	else
+	{
+		struct host_txdesc *txdesc = qdrv_wlan_get_mgt_txdesc(ic, ni, skb);
+		if (!txdesc) {
+			return -1;
+		}
+
+		*frame_host = (uint32_t)txdesc->hd_va;
+		*frame_bus = (uint32_t)txdesc->hd_pa;
+		*frame_len = txdesc->hd_pktlen;
+		*node_idx = txdesc->hd_node_idx;
+	}
+
+	return 0;
+}
+
+static void qdrv_wlan_release_ocs_frame(struct ieee80211com *ic, uint32_t frame_host)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	if (QDRV_WLAN_TX_USE_AUC(qw))
+	{
+		void *buf_bus = (void *)virt_to_bus((void *)frame_host);
+		const int8_t pool = topaz_hbm_payload_get_pool_bus(buf_bus);
+
+		if (unlikely(!topaz_hbm_pool_valid(pool))) {
+			printk("%s: buf %x is not from hbm pool!\n", __FUNCTION__, frame_host);
+		} else {
+			topaz_hbm_put_payload_realign_bus(buf_bus, pool);
+		}
+	}
+	else
+	{
+		local_bh_disable();
+		qdrv_tx_release_txdesc(qw,
+			(struct lhost_txdesc *)frame_host);
+		local_bh_enable();
+	}
+}
+
+#if defined(QBMPS_ENABLE)
+/*
+ * qdrv_bmps_release_frame: release the null frame queued in sp->bmps_lhost
+ * return 0: succeed; -1: failed
+ */
+static int qdrv_bmps_release_frame(struct ieee80211com *ic)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_bmps_info *bmps = sp->bmps_lhost;
+
+	if (bmps->state != BMPS_STATE_OFF) {
+		printk("%s: release frame error - status: %d!\n", __FUNCTION__, bmps->state);
+		return -1;
+	}
+
+	if (bmps->null_txdesc_host) {
+		qdrv_wlan_release_ocs_frame(ic, bmps->null_txdesc_host);
+		bmps->null_txdesc_host = 0;
+		bmps->null_txdesc_bus = 0;
+	}
+
+	return 0;
+}
+
+/*
+ * qdrv_bmps_set_frame: set the frame to sp->bmps_lhost
+ * NOTE: MUST be called with a reference to the node entry within
+ * the SKB CB structure, and free the reference to the node entry
+ * after this calling.
+ * return 0: succeed; -1: faild, need free skb by the caller.
+ */
+static int qdrv_bmps_set_frame(struct ieee80211com *ic,
+		struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_bmps_info *bmps = sp->bmps_lhost;
+
+	if (bmps->state != BMPS_STATE_OFF) {
+		printk("%s: set frame error - status: %d!\n", __FUNCTION__, bmps->state);
+		return -1;
+	}
+
+	if (!skb) {
+		printk("%s: set frame error - skb is null\n", __FUNCTION__);
+		return -1;
+	}
+
+	/*
+         * Release the previous null frame if it has not been released,
+         * and then get one new tx descriptor
+         */
+	if (qdrv_bmps_release_frame(ic) != 0) {
+		printk("%s: set frame error - release previous frame fail!\n", __FUNCTION__);
+		return -1;
+	}
+
+	if (qdrv_wlan_get_ocs_frame(ic, ni, skb,
+			&bmps->null_txdesc_host, &bmps->null_txdesc_bus,
+			&bmps->null_frame_len, &bmps->tx_node_idx)) {
+		printk("%s: set frame error - no ocs frame\n", __FUNCTION__);
+		return -1;
+	}
+
+	return 0;
+}
+#endif
+
+#ifdef QSCS_ENABLED
+/*
+ * qdrv_scs_release_frame: release the qosnull frame queued in sp->chan_sample_lhost
+ * return 0: succeed; -1: failed
+ */
+static int qdrv_scs_release_frame(struct ieee80211com *ic, int force)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_samp_chan_info *sample = sp->chan_sample_lhost;
+	struct qtn_off_chan_info *off_chan_info = &sample->base;
+
+	if (!force && off_chan_info->muc_status != QTN_CCA_STATUS_IDLE) {
+		SCSDBG(SCSLOG_INFO, "release frame error - status: %u!\n",
+				off_chan_info->muc_status);
+		return -1;
+	}
+
+	if (sample->qosnull_txdesc_host) {
+		qdrv_wlan_release_ocs_frame(ic, sample->qosnull_txdesc_host);
+
+		sample->qosnull_txdesc_host = 0;
+		sample->qosnull_txdesc_bus = 0;
+	}
+
+	SCSDBG(SCSLOG_VERBOSE, "qosnull frame released.\n");
+
+	return 0;
+}
+
+/*
+ * qdrv_scs_set_frame: set the frame to sp->chan_sample_lhost
+ * return 0: succeed; -1: failed.
+ */
+static int
+qdrv_scs_set_frame(struct ieee80211vap *vap, struct qtn_samp_chan_info *sample)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct qtn_off_chan_info *off_chan_info = &sample->base;
+	struct sk_buff *skb = NULL;
+	int ret = -1;
+
+	if (off_chan_info->muc_status != QTN_CCA_STATUS_IDLE) {
+		SCSDBG(SCSLOG_INFO, "set frame error - status=%u\n",
+				off_chan_info->muc_status);
+		return -1;
+	}
+
+	if (sample->qosnull_txdesc_host) {
+		SCSDBG(SCSLOG_NOTICE, "Qos Null frame already configured -ignore\n");
+		return 0;
+	}
+
+	ieee80211_ref_node(ni);
+
+	/* set qosnull frame */
+	skb = ieee80211_get_qosnulldata(ni, WME_AC_VO);
+	if (!skb) {
+		SCSDBG(SCSLOG_NOTICE, "get qosnulldata skb error\n");
+		goto done;
+	}
+	if (qdrv_wlan_get_ocs_frame(ic, ni, skb,
+			&sample->qosnull_txdesc_host, &sample->qosnull_txdesc_bus,
+			&sample->qosnull_frame_len, &sample->tx_node_idx)) {
+		dev_kfree_skb_irq(skb);
+		SCSDBG(SCSLOG_NOTICE, "set frame error - no ocs frame\n");
+		goto done;
+	}
+
+	SCSDBG(SCSLOG_VERBOSE, "set qosnull frame successfully.\n");
+	ret = 0;
+
+done:
+	ieee80211_free_node(ni);
+	return ret;
+}
+
+static void qdrv_scs_update_scan_stats(struct ieee80211com *ic)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	if (qdrv_hostlink_send_ioctl_args(qw, IOCTL_DEV_SCS_UPDATE_SCAN_STATS, 0, 0)) {
+		SCSDBG(SCSLOG_INFO, "IOCTL_DEV_SCS_UPDATE_SCAN_STATS failed\n");
+	}
+}
+
+static int qtn_is_traffic_heavy_for_sampling(struct ieee80211com *ic)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct qdrv_mac *mac = qw->mac;
+	struct net_device *dev;
+	struct netdev_queue *txq;
+	struct Qdisc *sch;
+	u_int32_t i, total_queued_pkts = 0;
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+
+	/* check the airtime of this bss */
+	if (as->as_tx_ms_smth > ic->ic_scs.scs_thrshld_smpl_airtime ||
+			as->as_rx_ms_smth > ic->ic_scs.scs_thrshld_smpl_airtime) {
+		SCSDBG(SCSLOG_INFO, "not sampling - tx_ms_smth: %u, rx_ms_smth: %u\n",
+				as->as_tx_ms_smth, as->as_rx_ms_smth);
+		return 1;
+	}
+
+	/* check the packet number in tx queue */
+	for (i = 0; i <= mac->vnet_last; ++i) {
+		dev = mac->vnet[i];
+		if (dev && (dev->flags & IFF_UP)) {
+			txq = netdev_get_tx_queue(dev, 0);
+			sch = txq->qdisc;
+			if (sch) {
+				total_queued_pkts += sch->q.qlen;
+			}
+		}
+	}
+	if (total_queued_pkts > ic->ic_scs.scs_thrshld_smpl_pktnum) {
+		SCSDBG(SCSLOG_INFO, "not sampling - queued packet number: %u\n",
+				total_queued_pkts);
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * disable radar detection and tx queue,
+ * trigger a cca read to start.
+ * returns 0 if a cca read ioctl was sent to the MuC successfully,
+ * < 0 if we are currently performing a cca read or other error
+ */
+int qdrv_async_cca_read(struct ieee80211com *ic, const struct ieee80211_channel *cca_channel,
+		u_int64_t start_tsf, u_int32_t duration)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_samp_chan_info *sample = sp->chan_sample_lhost;
+	struct qtn_off_chan_info *off_chan_info = &sample->base;
+
+	if (ic == NULL || cca_channel == NULL || duration == 0) {
+		return -EINVAL;
+	}
+
+	/* sample channel not allowed in power-saving mode */
+	if (((ic->ic_opmode == IEEE80211_M_HOSTAP)
+#if defined(QBMPS_ENABLE)
+	     || (ic->ic_opmode == IEEE80211_M_STA)
+#endif
+	    ) && (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] >= BOARD_PM_LEVEL_DUTY)) {
+		SCSDBG(SCSLOG_INFO, "not sampling - CoC idle\n");
+		return -EWOULDBLOCK;
+	}
+
+	if (off_chan_info->muc_status != QTN_CCA_STATUS_IDLE) {
+		return -EWOULDBLOCK;
+	}
+	sample->start_tsf = start_tsf;
+	off_chan_info->dwell_msecs = duration;
+	off_chan_info->freq_band = qdrv_set_channel_freqband_setup(ic, cca_channel);
+	off_chan_info->channel = qdrv_set_channel_setup(ic, cca_channel);
+	off_chan_info->flags = QTN_OFF_CHAN_FLAG_PASSIVE_ONESHOT;
+	sample->type = QTN_CCA_TYPE_DIRECTLY;
+
+	/*
+	 * TODO SCS: radar.
+	 */
+
+	off_chan_info->muc_status = QTN_CCA_STATUS_HOST_IOCTL_SENT;
+	if (qdrv_hostlink_sample_chan(qw, sp->chan_sample_bus) >= 0) {
+		off_chan_info->muc_status = QTN_CCA_STATUS_IDLE;
+	}
+
+	/* indicate sample channel is on-going */
+	ic->ic_flags_qtn |= IEEE80211_QTN_SAMP_CHAN;
+
+	return 0;
+}
+EXPORT_SYMBOL(qdrv_async_cca_read);
+
+static int qdrv_sample_channel_cancel(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_samp_chan_info *sample = sp->chan_sample_lhost;
+	struct qtn_off_chan_info *off_chan_info = &sample->base;
+
+	if (qdrv_hostlink_sample_chan_cancel(qw, sp->chan_sample_bus) < 0) {
+		off_chan_info->muc_status = QTN_CCA_STATUS_IDLE;
+		SCSDBG(SCSLOG_INFO, "hostlink cancel off channel sampling error!\n");
+		return -1;
+	}
+
+	SCSDBG(SCSLOG_INFO, "cancel off channel sampling\n");
+
+	return 0;
+}
+
+static int qdrv_sample_channel(struct ieee80211vap *vap, struct ieee80211_channel *sampled_channel)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_samp_chan_info *sample = sp->chan_sample_lhost;
+	struct qtn_off_chan_info *off_chan_info = &sample->base;
+
+	QDRV_SCS_LOG_TSF(sample, SCS_LOG_TSF_POS_LHOST_TASK_KICKOFF);
+
+	if (ic == NULL || sampled_channel == NULL) {
+		return -1;
+	}
+
+	if ((ic->ic_flags & IEEE80211_F_SCAN)
+#ifdef QTN_BG_SCAN
+		|| (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+#endif /* QTN_BG_SCAN */
+	) {
+		SCSDBG(SCSLOG_INFO, "not sampling - scan in progress\n");
+		return -1;
+	}
+
+	if (!qdrv_radar_can_sample_chan()) {
+		IEEE80211_SCS_CNT_INC(&ic->ic_scs, IEEE80211_SCS_CNT_RADAR);
+		SCSDBG(SCSLOG_INFO, "not sampling - radar\n");
+		return -1;
+	}
+
+	if (qtn_is_traffic_heavy_for_sampling(ic)) {
+		IEEE80211_SCS_CNT_INC(&ic->ic_scs, IEEE80211_SCS_CNT_TRAFFIC_HEAVY);
+		SCSDBG(SCSLOG_INFO, "not sampling - traffic too heavy\n");
+		return -1;
+	}
+
+	/* sample channel not allowed in power-saving mode */
+	if (((ic->ic_opmode == IEEE80211_M_HOSTAP)
+#if defined(QBMPS_ENABLE)
+	     || (ic->ic_opmode == IEEE80211_M_STA)
+#endif
+	    ) && (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] >= BOARD_PM_LEVEL_DUTY)) {
+		SCSDBG(SCSLOG_INFO, "not sampling - CoC idle\n");
+		return -1;
+	}
+
+	if (off_chan_info->muc_status != QTN_CCA_STATUS_IDLE) {
+		SCSDBG(SCSLOG_INFO, "not sampling - sampling in progress!\n");
+		return -1;
+	}
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "sampling channel %u\n", sampled_channel->ic_ieee);
+
+	if (!sample->qosnull_txdesc_host && qdrv_scs_set_frame(vap, sample)) {
+		IEEE80211_SCS_CNT_INC(&ic->ic_scs, IEEE80211_SCS_CNT_QOSNULL_NOTREADY);
+		SCSDBG(SCSLOG_INFO, "not sampling - set qosnull frame error\n");
+		return -1;
+	}
+	sample->start_tsf = 0;
+	off_chan_info->dwell_msecs = ic->ic_scs.scs_smpl_dwell_time;
+	off_chan_info->freq_band = qdrv_set_channel_freqband_setup(ic, sampled_channel);
+	off_chan_info->channel = qdrv_set_channel_setup(ic, sampled_channel);
+	off_chan_info->flags = ic->ic_scs.scs_sample_type;
+	sample->type = QTN_CCA_TYPE_BACKGROUND;
+
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
+			!(off_chan_info->flags & QTN_OFF_CHAN_FLAG_ACTIVE)) {
+		off_chan_info->flags |= QTN_OFF_CHAN_TURNOFF_RF;
+	}
+
+	QDRV_SCS_LOG_TSF(sample, SCS_LOG_TSF_POS_LHOST_IOCTL2MUC);
+	IEEE80211_SCS_CNT_INC(&ic->ic_scs, IEEE80211_SCS_CNT_IOCTL);
+
+	off_chan_info->muc_status = QTN_CCA_STATUS_HOST_IOCTL_SENT;
+	if (qdrv_hostlink_sample_chan(qw, sp->chan_sample_bus) < 0) {
+		off_chan_info->muc_status = QTN_CCA_STATUS_IDLE;
+		SCSDBG(SCSLOG_INFO, "hostlink sample channel error!\n");
+		return -1;
+	}
+
+	/* indicate sample channel is on-going */
+	ic->ic_flags_qtn |= IEEE80211_QTN_SAMP_CHAN;
+	ic->ic_chan_switch_reason_record(ic, IEEE80211_CSW_REASON_SAMPLING);
+
+	return 0;
+}
+#endif /* QSCS_ENABLED */
+
+#ifdef QTN_BG_SCAN
+/*
+ * qdrv_bgscan_start: reset bgscan status if it is not idle or completed
+ * return 0: succeed; -1: failed
+ */
+static int qdrv_bgscan_start(struct ieee80211com *ic)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_scan_chan_info *scan_host = sp->chan_scan_lhost;
+	struct qtn_off_chan_info *off_chan_info = &scan_host->base;
+
+	if (off_chan_info->muc_status != QTN_SCAN_CHAN_MUC_IDLE
+			&& off_chan_info->muc_status != QTN_SCAN_CHAN_MUC_COMPLETED) {
+		printk("BG_SCAN: status (%u) is not idle or completed!\n",
+				off_chan_info->muc_status);
+		off_chan_info->muc_status = QTN_SCAN_CHAN_MUC_IDLE;
+	}
+
+	return 0;
+}
+
+/*
+ * qdrv_bgscan_release_frame: delete the frame queued in sp->chan_scan_lhost
+ * return 0: succeed; -1: failed
+ */
+static int qdrv_bgscan_release_frame(struct ieee80211com *ic, int frm_flag, int force)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_scan_chan_info *scan_host = sp->chan_scan_lhost;
+	struct qtn_off_chan_info *off_chan_info = &scan_host->base;
+
+	if (!force && off_chan_info->muc_status != QTN_SCAN_CHAN_MUC_IDLE
+			&& off_chan_info->muc_status != QTN_SCAN_CHAN_MUC_COMPLETED) {
+		if (ic->ic_qtn_bgscan.debug_flags >= 1) {
+			printk("BG_SCAN: release frame error for current muc_status: %u!\n",
+					off_chan_info->muc_status);
+		}
+		return -1;
+	}
+
+	if (frm_flag == IEEE80211_SCAN_FRAME_START
+			|| frm_flag == IEEE80211_SCAN_FRAME_ALL) {
+		if (scan_host->start_txdesc_host) {
+			qdrv_wlan_release_ocs_frame(ic, scan_host->start_txdesc_host);
+			scan_host->start_txdesc_host = 0;
+			scan_host->start_txdesc_bus = 0;
+			if (ic->ic_qtn_bgscan.debug_flags >= 3) {
+				printk("BG_SCAN: delete start frame!\n");
+			}
+		}
+	}
+
+	if (frm_flag == IEEE80211_SCAN_FRAME_PRBREQ
+			|| frm_flag == IEEE80211_SCAN_FRAME_ALL) {
+		if (scan_host->prbreq_txdesc_host) {
+			qdrv_wlan_release_ocs_frame(ic, scan_host->prbreq_txdesc_host);
+			scan_host->prbreq_txdesc_host = 0;
+			scan_host->prbreq_txdesc_bus = 0;
+			if (ic->ic_qtn_bgscan.debug_flags >= 3) {
+				printk("BG_SCAN: delete prbreq frame!\n");
+			}
+		}
+	}
+
+	if (frm_flag == IEEE80211_SCAN_FRAME_FINISH
+			|| frm_flag == IEEE80211_SCAN_FRAME_ALL) {
+		if (scan_host->finish_txdesc_host) {
+			qdrv_wlan_release_ocs_frame(ic, scan_host->finish_txdesc_host);
+			scan_host->finish_txdesc_host = 0;
+			scan_host->finish_txdesc_bus = 0;
+			if (ic->ic_qtn_bgscan.debug_flags >= 3) {
+				printk("BG_SCAN: delete finish frame!\n");
+			}
+		}
+	}
+
+	return 0;
+}
+/*
+ * qdrv_bgscan_set_frame: set the frame to sp->chan_scan_lhost
+ * NOTE: MUST be called with a reference to the node entry within
+ * the SKB CB structure, and free the reference to the node entry
+ * after this calling.
+ * return 0: succeed; -1: faild, need free skb by the caller.
+ */
+static int qdrv_bgscan_set_frame(struct ieee80211com *ic,
+		struct ieee80211_node *ni, struct sk_buff *skb, int frm_flag)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_scan_chan_info *scan_host = sp->chan_scan_lhost;
+	uint32_t frame_host;
+	uint32_t frame_bus;
+	uint16_t frame_len;
+	uint16_t node_idx;
+
+	if (qdrv_wlan_get_ocs_frame(ic, ni, skb,
+			&frame_host, &frame_bus, &frame_len, &node_idx)) {
+		if (ic->ic_qtn_bgscan.debug_flags >= 1) {
+			printk("BG_SCAN: set frame error - no ocs frame\n");
+		}
+		return -1;
+	}
+
+	switch (frm_flag) {
+	case IEEE80211_SCAN_FRAME_START:
+		scan_host->start_txdesc_host = frame_host;
+		scan_host->start_txdesc_bus = frame_bus;
+		scan_host->start_frame_len = frame_len;
+		scan_host->start_node_idx = node_idx;
+		break;
+	case IEEE80211_SCAN_FRAME_PRBREQ:
+		scan_host->prbreq_txdesc_host = frame_host;
+		scan_host->prbreq_txdesc_bus = frame_bus;
+		scan_host->prbreq_frame_len = frame_len;
+		scan_host->prbreq_node_idx = node_idx;
+		break;
+	case IEEE80211_SCAN_FRAME_FINISH:
+		scan_host->finish_txdesc_host = frame_host;
+		scan_host->finish_txdesc_bus = frame_bus;
+		scan_host->finish_frame_len = frame_len;
+		scan_host->finish_node_idx = node_idx;
+		break;
+	}
+
+	if (ic->ic_qtn_bgscan.debug_flags >= 3) {
+		printk("BG_SCAN: set frame flag %u\n", frm_flag);
+	}
+
+	return 0;
+}
+
+static int
+qdrv_bgscan_init_frames(struct ieee80211vap *vap, struct qtn_scan_chan_info *scan_host)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct net_device *dev = vap->iv_dev;
+	struct sk_buff *skb = NULL;
+	int ret = -1;
+
+	ieee80211_ref_node(ni);
+
+	if (!scan_host->start_txdesc_host) {
+		/* set start frame */
+		skb = ieee80211_get_qosnulldata(ni, WME_AC_VO);
+		if (!skb) {
+			if (ic->ic_qtn_bgscan.debug_flags >= 1) {
+				printk("BG_SCAN: get qosnulldata skb error\n");
+			}
+			goto done;
+		}
+		if (qdrv_bgscan_set_frame(ic, ni, skb, IEEE80211_SCAN_FRAME_START)) {
+			dev_kfree_skb_irq(skb);
+			goto done;
+		}
+	}
+
+	if (!scan_host->prbreq_txdesc_host) {
+		/* set probe request frame */
+		skb = ieee80211_get_probereq(vap->iv_bss,
+			vap->iv_myaddr, dev->broadcast,
+			dev->broadcast, (u_int8_t *)"", 0,
+			vap->iv_opt_ie, vap->iv_opt_ie_len);
+		if (!skb) {
+			if (ic->ic_qtn_bgscan.debug_flags >= 1) {
+				printk("BG_SCAN: get probereq skb error\n");
+			}
+			goto done;
+		}
+		if (qdrv_bgscan_set_frame(ic, ni, skb, IEEE80211_SCAN_FRAME_PRBREQ)) {
+			dev_kfree_skb_irq(skb);
+			goto done;
+		}
+	}
+
+	ret = 0;
+
+done:
+	ieee80211_free_node(ni);
+	return ret;
+}
+
+enum scan_mode_index {
+	SCAN_MODE_INDEX_ACTIVE = 0,
+	SCAN_MODE_INDEX_PASSIVE_FAST,
+	SCAN_MODE_INDEX_PASSIVE_NORMAL,
+	SCAN_MODE_INDEX_PASSIVE_SLOW
+};
+
+static char *scan_mode_str[] = {
+		"active",
+		"passive_fast",
+		"passive_normal",
+		"passive_slow"
+};
+
+static int qdrv_bgscan_channel(struct ieee80211vap *vap,
+		struct ieee80211_channel *scanned_channel, int scan_mode, int dwelltime)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_scan_chan_info *scan_host = sp->chan_scan_lhost;
+	struct qtn_off_chan_info *off_chan_host = &scan_host->base;
+	int mode_index;
+
+	if ((vap->iv_state != IEEE80211_S_RUN) && !(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER))
+		return -1;
+
+	if (scanned_channel == NULL) {
+		if (ic->ic_qtn_bgscan.debug_flags >= 1)
+			printk("BG_SCAN: stop - wrong parameters\n");
+		return -1;
+	}
+
+	if (ic->ic_flags & IEEE80211_F_SCAN) {
+		if (ic->ic_qtn_bgscan.debug_flags >= 1) {
+			printk("BG_SCAN: stop - scan in progress\n");
+		}
+		return -1;
+	}
+
+	if (!qdrv_radar_can_sample_chan()) {
+		if (ic->ic_qtn_bgscan.debug_flags >= 1) {
+			printk("BG_SCAN: stop scan - radar\n");
+		}
+		return -1;
+	}
+
+	if (off_chan_host->muc_status != QTN_SCAN_CHAN_MUC_IDLE
+			&& off_chan_host->muc_status != QTN_SCAN_CHAN_MUC_COMPLETED) {
+		if (ic->ic_qtn_bgscan.debug_flags >= 1) {
+			printk("BG_SCAN: stop scan - status=%u\n",
+					off_chan_host->muc_status);
+		}
+		return -1;
+	}
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "scanning channel %u\n", scanned_channel->ic_ieee);
+
+	if (!scan_host->start_txdesc_host || !scan_host->prbreq_txdesc_host) {
+		if (qdrv_bgscan_init_frames(vap, scan_host)) {
+			if (ic->ic_qtn_bgscan.debug_flags >= 1) {
+				printk("BG_SCAN: Initiate scan frames error\n");
+			}
+			return -1;
+		}
+	}
+
+	off_chan_host->freq_band = qdrv_set_channel_freqband_setup(ic, scanned_channel);
+	off_chan_host->channel = qdrv_set_channel_setup(ic, scanned_channel);
+	off_chan_host->dwell_msecs = dwelltime;
+	scan_host->start_node_idx = IEEE80211_NODE_IDX_UNMAP(vap->iv_bss->ni_node_idx) ? IEEE80211_NODE_IDX_UNMAP(vap->iv_bss->ni_node_idx) :
+			IEEE80211_NODE_IDX_UNMAP(vap->iv_vapnode_idx);
+	scan_host->prbreq_node_idx = scan_host->start_node_idx;
+	scan_host->finish_node_idx = scan_host->start_node_idx;
+	off_chan_host->flags = 0;
+
+	if (scan_mode & IEEE80211_PICK_BG_ACTIVE) {
+		off_chan_host->flags |= QTN_OFF_CHAN_FLAG_ACTIVE;
+		mode_index = SCAN_MODE_INDEX_ACTIVE;
+	} else if (scan_mode & IEEE80211_PICK_BG_PASSIVE_FAST) {
+		off_chan_host->flags |= QTN_OFF_CHAN_FLAG_PASSIVE_FAST;
+		mode_index = SCAN_MODE_INDEX_PASSIVE_FAST;
+	} else if (scan_mode & IEEE80211_PICK_BG_PASSIVE_NORMAL) {
+		off_chan_host->flags |= QTN_OFF_CHAN_FLAG_PASSIVE_NORMAL;
+		mode_index = SCAN_MODE_INDEX_PASSIVE_NORMAL;
+	} else {
+		off_chan_host->flags |= QTN_OFF_CHAN_FLAG_PASSIVE_SLOW;
+		mode_index = SCAN_MODE_INDEX_PASSIVE_SLOW;
+	}
+
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
+			!(off_chan_host->flags & QTN_OFF_CHAN_FLAG_ACTIVE)) {
+		off_chan_host->flags |= QTN_OFF_CHAN_TURNOFF_RF;
+	}
+
+	if (ic->ic_qtn_bgscan.debug_flags >= 3) {
+		printk("scan channel %u, scan mode: %s, cca_idle: %u\n",
+				scanned_channel->ic_ieee, scan_mode_str[mode_index],
+				ic->ic_scs.scs_cca_idle_smthed);
+	}
+
+	IEEE80211_LOCK_IRQ(ic);
+	ic->ic_flags |= IEEE80211_F_SCAN;
+	IEEE80211_UNLOCK_IRQ(ic);
+	QDRV_SCAN_LOG_TSF(scan_host, SCAN_CHAN_TSF_LHOST_HOSTLINK_IOCTL);
+	if (qdrv_hostlink_bgscan_chan(qw, sp->chan_scan_bus) < 0) {
+		IEEE80211_LOCK_IRQ(ic);
+		ic->ic_flags &= ~IEEE80211_F_SCAN;
+		IEEE80211_UNLOCK_IRQ(ic);
+		if (ic->ic_qtn_bgscan.debug_flags >= 1) {
+			printk("BG_SCAN: hostlink error\n");
+		}
+		return -1;
+	}
+	ic->ic_chan_switch_reason_record(ic, IEEE80211_CSW_REASON_BGSCAN);
+
+	return 0;
+}
+#endif /* QTN_BG_SCAN */
+
+static void qdrv_update_sta_dfs_strict_flags(struct ieee80211com *ic)
+{
+	if (ic->sta_dfs_info.sta_dfs_radar_detected_timer) {
+		ic->sta_dfs_info.sta_dfs_radar_detected_timer = false;
+		del_timer(&ic->sta_dfs_info.sta_radar_timer);
+		if (ic->ic_mark_channel_availability_status) {
+			struct ieee80211_channel *chan = ieee80211_find_channel_by_ieee(ic,
+					ic->sta_dfs_info.sta_dfs_radar_detected_channel);
+			ic->ic_mark_channel_availability_status(ic, chan,
+					IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_RADAR_DETECTED);
+			ic->sta_dfs_info.sta_dfs_radar_detected_channel = 0;
+		}
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "\n%s: sta_radar_timer: deleted\n", __func__);
+	}
+
+	if (ic->ic_chan_compare_equality(ic, ic->ic_curchan, ic->ic_prevchan) == false) {
+		if (ic->ic_mark_channel_availability_status) {
+			if (ieee80211_is_chan_not_available(ic->ic_curchan)) {
+				ic->ic_mark_channel_availability_status(ic,
+						ic->ic_curchan,
+						IEEE80211_CHANNEL_STATUS_AVAILABLE);
+			}
+			if (ieee80211_is_chan_available(ic->ic_prevchan)) {
+				ic->ic_mark_channel_availability_status(ic,
+						ic->ic_prevchan,
+						IEEE80211_CHANNEL_STATUS_NON_AVAILABLE);
+			}
+		}
+	}
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "%s: qdrv_set_channel_deferred: "
+		"ic->ic_prevchan = %d, ic->ic_curchan = %d\n",
+		__func__, ic->ic_prevchan->ic_ieee, ic->ic_curchan->ic_ieee);
+	ic->sta_dfs_info.allow_measurement_report = false;
+}
+
+
+/*
+ * deferred channel change, where the MuC handles the channel change,
+ * aiming to change at a particular tsf, and notifies the lhost when it occurs.
+ */
+static void qdrv_set_channel_deferred(struct ieee80211com *ic, u64 tsf, int csaflags)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_csa_info *csa = sp->csa_lhost;
+	unsigned long irqflags;
+	int newchan_radar = qdrv_radar_is_rdetection_required(ic->ic_csa_chan);
+	uint8_t prev_ieee = ic->ic_curchan->ic_ieee;
+	uint8_t cur_ieee = ic->ic_csa_chan->ic_ieee;
+
+	spin_lock_irqsave(&qw->csa_lock, irqflags);
+
+	if (csaflags & IEEE80211_SET_CHANNEL_DEFERRED_CANCEL) {
+		/*
+		 * the muc may pick this up, but if it doesn't it will
+		 * complete the entire channel change
+		 */
+		csa->lhost_status |= QTN_CSA_CANCEL;
+		spin_unlock_irqrestore(&qw->csa_lock, irqflags);
+		return;
+	}
+
+	if (csa->lhost_status & QTN_CSA_STATUS_LHOST_ACTIVE) {
+		/* csa in progress */
+		spin_unlock_irqrestore(&qw->csa_lock, irqflags);
+		DBGPRINTF_E("CSA already active\n");
+		return;
+	}
+
+	csa->lhost_status = QTN_CSA_STATUS_LHOST_ACTIVE;
+	if (csaflags & IEEE80211_SET_CHANNEL_TSF_OFFSET) {
+		csa->lhost_status |= QTN_CSA_STATUS_LHOST_UNITS_OFFSET;
+	}
+	if (!newchan_radar) {
+		csa->lhost_status |= QTN_CSA_RESTART_QUEUE;
+	}
+	spin_unlock_irqrestore(&qw->csa_lock, irqflags);
+
+	ic->ic_prevchan = ic->ic_curchan;
+	ic->ic_curchan = ic->ic_des_chan = ic->ic_csa_chan;
+	csa->channel = qdrv_set_channel_setup(ic, ic->ic_curchan);
+	csa->freq_band = qdrv_set_channel_freqband_setup(ic, ic->ic_curchan);
+	csa->req_tsf = tsf;
+	csa->pre_notification_tu = 10; /* Time Units */
+	csa->post_notification_tu = 10;
+
+	if (ic->sta_dfs_info.sta_dfs_strict_mode) {
+		qdrv_update_sta_dfs_strict_flags(ic);
+	}
+
+	qdrv_hostlink_setchan_deferred(qw, sp->csa_bus);
+
+	ic->ic_aci_cci_cce.cce_previous = prev_ieee;
+	ic->ic_aci_cci_cce.cce_current = cur_ieee;
+}
+
+static void csa_work(struct work_struct *work)
+{
+	struct qdrv_wlan *qw = container_of(work, struct qdrv_wlan, csa_wq);
+	struct ieee80211com *ic = &qw->ic;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_csa_info *csa = sp->csa_lhost;
+	struct ieee80211_channel *chan = qw->ic.ic_curchan;
+	unsigned long irqflags;
+	u64 tsf = csa->switch_tsf;
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+#endif
+
+	/* mark the entire process as done */
+	spin_lock_irqsave(&qw->csa_lock, irqflags);
+	csa->lhost_status = 0;
+	/* clear csa count at last to avoid any possibility of dual CS */
+	ic->ic_csa_count = 0;
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "total rx CSA frame: beacon=%d, action=%d\n",
+			ic->ic_csa_frame[IEEE80211_CSA_FRM_BEACON],
+			ic->ic_csa_frame[IEEE80211_CSA_FRM_ACTION]);
+	memset(ic->ic_csa_frame, 0x0, sizeof(ic->ic_csa_frame));
+	spin_unlock_irqrestore(&qw->csa_lock, irqflags);
+
+	if (qw->csa_callback) {
+		qw->csa_callback(chan, tsf);
+	}
+
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+	if (vap && vap->iv_state == IEEE80211_S_RUN)
+		ieee80211_send_action_dfs_report(vap->iv_bss);
+#endif
+}
+
+static void channel_work(struct work_struct *work)
+{
+	struct qdrv_wlan *qw = container_of(work, struct qdrv_wlan, channel_work_wq);
+	struct ieee80211com *ic = &qw->ic;
+
+	ieee80211_channel_switch_post(ic);
+	qdrv_radar_on_newchan();
+}
+
+static void remain_channel_work(struct work_struct *work)
+{
+	struct qdrv_wlan *qw = container_of(work, struct qdrv_wlan, remain_chan_wq);
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+	ieee80211_sta_pwrsave(vap, 0);
+	vap->tdls_chan_switching = 0;
+	vap->tdls_cs_node = NULL;
+
+	if (vap->tdls_cs_disassoc_pending == 1) {
+		vap->tdls_cs_disassoc_pending = 0;
+		vap->iv_newstate(vap, IEEE80211_S_INIT, 0);
+	}
+
+#if defined(QBMPS_ENABLE)
+	/* indicate sample channel is done */
+	/* start power-saving if allowed */
+	ic->ic_flags_qtn &= ~IEEE80211_QTN_SAMP_CHAN;
+	ic->ic_pm_reason = IEEE80211_PM_LEVEL_REMAIN_CHANNEL_WORK;
+	ieee80211_pm_queue_work(ic);
+#endif
+}
+
+static void qdrv_csa_irqhandler(void *arg1, void *arg2)
+{
+	struct qdrv_wlan *qw = arg1;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_remain_chan_info *rc = sp->remain_chan_lhost;
+	struct qtn_csa_info *csa = sp->csa_lhost;
+	struct ieee80211_channel *chan = NULL;
+	u32 muc_status = csa->muc_status;
+	u32 lhost_status = csa->lhost_status;
+
+	if (rc->status == QTN_REM_CHAN_STATUS_MUC_STARTED) {
+		chan = ic->ic_findchannel(ic,
+			QTNCHAN_TO_IEEENUM(rc->off_channel), ic->ic_des_mode);
+		if (chan) {
+			ic->ic_curchan = chan;
+			vap->tdls_chan_switching = 1;
+		}
+		return;
+	} else if ((rc->status == QTN_REM_CHAN_STATUS_MUC_COMPLETE) ||
+			(rc->status == QTN_REM_CHAN_STATUS_MUC_CANCELLED)) {
+		rc->status = QTN_REM_CHAN_STATUS_IDLE;
+		ic->ic_curchan = ic->ic_bsschan;
+		schedule_work(&qw->remain_chan_wq);
+		return;
+	}
+
+	if (muc_status & QTN_CSA_STATUS_MUC_PRE &&
+			!(lhost_status & QTN_CSA_STATUS_LHOST_PRE_DONE)) {
+		qdrv_radar_before_newchan();
+		lhost_status |= QTN_CSA_STATUS_LHOST_PRE_DONE;
+	}
+
+	if (muc_status & QTN_CSA_STATUS_MUC_SWITCHED &&
+			!(lhost_status & QTN_CSA_STATUS_LHOST_SWITCH_DONE)) {
+		/* just switched channel, restart radar */
+
+		qw->tx_stats.tx_channel = ic->ic_curchan->ic_ieee;
+		schedule_work(&qw->channel_work_wq);
+
+		lhost_status |= QTN_CSA_STATUS_LHOST_SWITCH_DONE;
+	}
+
+	if (muc_status & QTN_CSA_STATUS_MUC_POST &&
+			!(lhost_status & QTN_CSA_STATUS_LHOST_POST_DONE)) {
+		/* all done! */
+		lhost_status |= QTN_CSA_STATUS_LHOST_POST_DONE;
+		/* call workqueue to handle callback */
+		schedule_work(&qw->csa_wq);
+	}
+
+	/* successfully cancelled */
+	if ((muc_status & QTN_CSA_STATUS_MUC_CANCELLED) &&
+			(muc_status & QTN_CSA_STATUS_MUC_COMPLETE)) {
+		lhost_status = 0;
+		ic->ic_csa_count = 0;
+		memset(ic->ic_csa_frame, 0x0, sizeof(ic->ic_csa_frame));
+	}
+
+	csa->lhost_status = lhost_status;
+}
+
+static int qdrv_init_csa_irqhandler(struct qdrv_wlan *qw)
+{
+	struct int_handler int_handler;
+
+	int_handler.handler = qdrv_csa_irqhandler;
+	int_handler.arg1 = qw;
+	int_handler.arg2 = NULL;
+
+	if (qdrv_mac_set_handler(qw->mac, RUBY_M2L_IRQ_LO_CSA, &int_handler) != 0) {
+		DBGPRINTF_E("Could not set csa irq handler\n");
+		return -EINVAL;
+	}
+
+	qdrv_mac_enable_irq(qw->mac, RUBY_M2L_IRQ_LO_CSA);
+
+	return 0;
+}
+
+static struct off_chan_tsf_dbg scs_tsf_index_name[] = {
+	{SCS_LOG_TSF_POS_LHOST_TASK_KICKOFF,			"host_task_start"},
+	{SCS_LOG_TSF_POS_LHOST_IOCTL2MUC,			"host_send_ioctl"},
+	{SCS_LOG_TSF_POS_MUC_POLL_IOCTL_FROM_LHOST,		"muc_ioctl_proc"},
+	{SCS_LOG_TSF_POS_MUC_QOSNULL_SENT,			"muc_qosnull_sent"},
+	{SCS_LOG_TSF_POS_MUC_SMPL_START_BEFORE_CHAN_CHG,	"muc_goto_offchan"},
+	{SCS_LOG_TSF_POS_MUC_SMPL_START_AFTER_CHAN_CHG,		"muc_offchan_done"},
+	{SCS_LOG_TSF_POS_MUC_SMPL_FINISH_BEFORE_CHAN_CHG,	"muc_goto_datachan"},
+	{SCS_LOG_TSF_POS_MUC_SMPL_FINISH_AFTER_CHAN_CHG,	"muc_datachan_done"},
+	{SCS_LOG_TSF_POS_LHOST_CCA_INTR,			"host_interrupt"},
+	{SCS_LOG_TSF_POS_LHOST_CCA_WORK,			"host_scswork"},
+	{0,NULL}
+};
+
+static void cca_work(struct work_struct *work)
+{
+	struct qdrv_wlan *qw = container_of(work, struct qdrv_wlan, cca_wq);
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_samp_chan_info *sample = sp->chan_sample_lhost;
+	struct qtn_off_chan_info *off_chan_info = &sample->base;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211vap *vap;
+	uint32_t tsf_hi;
+	uint32_t tsf_lo;
+	uint32_t delta;
+	uint32_t cur_index;
+	uint32_t pre_index = 0;
+	int i;
+
+	if (off_chan_info->muc_status == QTN_CCA_STATUS_MUC_COMPLETE) {
+		QDRV_SCS_LOG_TSF(sample, SCS_LOG_TSF_POS_LHOST_CCA_WORK);
+		IEEE80211_SCS_CNT_INC(&ic->ic_scs, IEEE80211_SCS_CNT_COMPLETE);
+		ic->ic_scs.scs_last_smpl_chan = ic->ic_scs.scs_des_smpl_chan;
+
+		if (ic->ic_flags & IEEE80211_F_CCA) {
+			ic->ic_flags &= ~IEEE80211_F_CCA;
+			TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+				if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+					continue;
+
+				if ((vap->iv_state != IEEE80211_S_RUN) && (vap->iv_state != IEEE80211_S_SCAN))
+					continue;
+
+				ic->ic_beacon_update(vap);
+			}
+		}
+
+		SCSDBG(SCSLOG_INFO, "Sample channel %u completed\n",
+				QTNCHAN_TO_IEEENUM(off_chan_info->channel));
+		off_chan_info->muc_status = QTN_CCA_STATUS_IDLE;
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "Sampling finished\n");
+
+		if (ic->ic_scs.scs_debug_enable == 3) {
+			for (i = 0; scs_tsf_index_name[i].log_name; i++) {
+				cur_index = scs_tsf_index_name[i].pos_index;
+				tsf_hi = U64_HIGH32(sample->tsf[cur_index]);
+				tsf_lo = U64_LOW32(sample->tsf[cur_index]);
+				if (i) {
+					delta = (uint32_t)(sample->tsf[cur_index] -
+								sample->tsf[pre_index]);
+				} else {
+					delta = 0;
+					printk("\n\nSCS_SAMPLE_tsf:\n");
+				}
+				pre_index = cur_index;
+				printk("  %s:    %08x_%08x (+%u)\n",
+					scs_tsf_index_name[i].log_name, tsf_hi, tsf_lo, delta);
+			}
+		}
+	} else if (off_chan_info->muc_status == QTN_CCA_STATUS_MUC_CANCELLED) {
+		SCSDBG(SCSLOG_INFO, "Sample channel %u cancelled\n",
+				QTNCHAN_TO_IEEENUM(off_chan_info->channel));
+		off_chan_info->muc_status = QTN_CCA_STATUS_IDLE;
+	}
+
+	ic->ic_flags_qtn &= ~IEEE80211_QTN_SAMP_CHAN;
+#if defined(QBMPS_ENABLE)
+	/* indicate sample channel is done */
+	/* start power-saving if allowed */
+	if (ic->ic_opmode == IEEE80211_M_STA) {
+		ic->ic_pm_reason = IEEE80211_PM_LEVEL_CCA_WORK;
+		ieee80211_pm_queue_work(ic);
+	}
+#endif
+}
+
+static void qdrv_cca_irqhandler(void *arg1, void *arg2)
+{
+	struct qdrv_wlan *qw = arg1;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_samp_chan_info *cca = sp->chan_sample_lhost;
+	struct qtn_off_chan_info *off_chan_info = &cca->base;
+
+	/*
+	 * TODO SCS: radar disable during the cca read
+	 */
+	if (off_chan_info->muc_status == QTN_CCA_STATUS_MUC_COMPLETE ||
+			off_chan_info->muc_status == QTN_CCA_STATUS_MUC_CANCELLED) {
+		QDRV_SCS_LOG_TSF(cca, SCS_LOG_TSF_POS_LHOST_CCA_INTR);
+		schedule_work(&qw->cca_wq);
+	}
+}
+
+static int qdrv_init_cca_irqhandler(struct qdrv_wlan *qw)
+{
+	struct int_handler int_handler;
+
+	int_handler.handler = qdrv_cca_irqhandler;
+	int_handler.arg1 = qw;
+	int_handler.arg2 = NULL;
+
+	if (qdrv_mac_set_handler(qw->mac, RUBY_M2L_IRQ_LO_SCS, &int_handler) != 0) {
+		DBGPRINTF_E("Could not set cca irq handler\n");
+		return -1;
+	}
+
+	qdrv_mac_enable_irq(qw->mac, RUBY_M2L_IRQ_LO_SCS);
+
+	return 0;
+}
+
+static char *meas_err_msg[] = {
+	"off-channel not supported",
+	"duration too short for measurement",
+	"macfw timer scheduled fail",
+	"measurement type unsupport"
+};
+
+static void meas_work(struct work_struct *work)
+{
+	struct qdrv_wlan *qw = container_of(work, struct qdrv_wlan, meas_wq);
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_meas_chan_info *meas_info = sp->chan_meas_lhost;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211_global_measure_info *ic_meas_info = &ic->ic_measure_info;
+
+	if (ic_meas_info->status == MEAS_STATUS_DISCRAD) {
+		ic_meas_info->status = MEAS_STATUS_IDLE;
+		return;
+	}
+
+	if (meas_info->meas_reason == QTN_MEAS_REASON_SUCC) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "measurement success\n");
+		switch (meas_info->meas_type) {
+		case QTN_MEAS_TYPE_CCA:
+		{
+			u_int16_t cca_result;
+
+			cca_result = (uint16_t)meas_info->inter_data.cca_and_chanload.cca_busy_ms;
+			cca_result = cca_result * 1000 / meas_info->inter_data.cca_and_chanload.cca_try_ms;
+			cca_result = cca_result * 255 / 1000;
+			ic_meas_info->results.cca = (u_int8_t)cca_result;
+
+			break;
+		}
+		case QTN_MEAS_TYPE_RPI:
+		{
+			u_int32_t rpi_sum;
+			u_int32_t i;
+
+			rpi_sum = 0;
+			for (i = 0; i < 8; i++)
+				rpi_sum += meas_info->inter_data.rpi_counts[i];
+			if (rpi_sum == 0) {
+				memset(ic_meas_info->results.rpi, 0, sizeof(ic_meas_info->results.rpi));
+			} else {
+				for (i = 0; i < 8; i++)
+					ic_meas_info->results.rpi[i] = (u_int8_t)((meas_info->inter_data.rpi_counts[i] * 255) / rpi_sum);
+			}
+
+			break;
+		}
+		case QTN_MEAS_TYPE_BASIC:
+		{
+			int radar_num;
+
+			radar_num = ic->ic_radar_detections_num(ic_meas_info->param.basic.channel);
+			if ((radar_num >= 0) && ((radar_num - meas_info->inter_data.basic_radar_num) > 0))
+				ic_meas_info->results.basic |=  IEEE80211_MEASURE_BASIC_REPORT_RADAR;
+			ic_meas_info->results.basic |= meas_info->inter_data.basic;
+			break;
+		}
+		case QTN_MEAS_TYPE_CHAN_LOAD:
+		{
+			u_int16_t cca_result;
+
+			cca_result = (uint16_t)meas_info->inter_data.cca_and_chanload.cca_busy_ms;
+			cca_result = cca_result * 1000 / meas_info->inter_data.cca_and_chanload.cca_try_ms;
+			cca_result = cca_result * 255 / 1000;
+			ic_meas_info->results.chan_load = (u_int8_t)cca_result;
+
+			break;
+		}
+		case QTN_MEAS_TYPE_NOISE_HIS:
+		{
+			int32_t local_noise = 0;
+			struct ieee80211_phy_stats phy_stats;
+
+			if (ic->ic_get_phy_stats
+					&& !ic->ic_get_phy_stats(ic->ic_dev, ic, &phy_stats, 0)) {
+				local_noise = (int32_t)phy_stats.rx_noise;
+			}
+
+			ic_meas_info->results.noise_his.anpi = ABS(local_noise) / 10;
+			memset(&ic_meas_info->results.noise_his.ipi, 0,
+					sizeof(ic_meas_info->results.noise_his.ipi));
+
+			break;
+		}
+		default:
+			break;
+		}
+
+		ic->ic_finish_measurement(ic, 0);
+	} else {
+		printk("measurement fail:%s\n",meas_err_msg[meas_info->meas_reason - QTN_MEAS_REASON_OFF_CHANNEL_UNSUPPORT]);
+		ic->ic_finish_measurement(ic, IEEE80211_CCA_REPMODE_REFUSE);
+	}
+	ic_meas_info->status = MEAS_STATUS_IDLE;
+}
+
+static void qdrv_meas_irqhandler(void *arg1, void *arg2)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)arg1;
+
+	schedule_work(&qw->meas_wq);
+}
+
+static int qdrv_init_meas_irqhandler(struct qdrv_wlan *qw)
+{
+	struct int_handler int_handler;
+
+	int_handler.handler = qdrv_meas_irqhandler;
+	int_handler.arg1 = qw;
+	int_handler.arg2 = NULL;
+
+	if (qdrv_mac_set_handler(qw->mac, RUBY_M2L_IRQ_LO_MEAS, &int_handler) != 0) {
+		DBGPRINTF_E("Could not set measurement irq handler\n");
+		return -1;
+	}
+
+	qdrv_mac_enable_irq(qw->mac, RUBY_M2L_IRQ_LO_MEAS);
+
+	return 0;
+}
+
+#ifdef QTN_BG_SCAN
+static struct off_chan_tsf_dbg scan_tsf_index_name[] = {
+		{SCAN_CHAN_TSF_LHOST_HOSTLINK_IOCTL,		"host_send_ioctl"},
+		{SCAN_CHAN_TSF_MUC_IOCTL_PROCESS,		"muc_ioctl_proc"},
+		{SCAN_CHAN_TSF_MUC_SEND_START_FRM,		"muc_send_start"},
+		{SCAN_CHAN_TSF_MUC_SEND_START_FRM_DONE,		"muc_start_done"},
+		{SCAN_CHAN_TSF_MUC_GOTO_OFF_CHAN,		"muc_goto_offchan"},
+		{SCAN_CHAN_TSF_MUC_GOTO_OFF_CHAN_DONE,		"muc_offchan_done"},
+		{SCAN_CHAN_TSF_MUC_SEND_PRBREQ_FRM,		"muc_send_prbreq"},
+		{SCAN_CHAN_TSF_MUC_SEND_PRBREQ_FRM_DONE,	"muc_prbreq_done"},
+		{SCAN_CHAN_TSF_MUC_GOTO_DATA_CHAN,		"muc_goto_datachan"},
+		{SCAN_CHAN_TSF_MUC_GOTO_DATA_CHAN_DONE,		"muc_datachan_done"},
+		{SCAN_CHAN_TSF_LHOST_INTERRUPT,			"host_interrupt"},
+		{SCAN_CHAN_TSF_LHOST_SCANWORK,			"host_scanwork"},
+		{0,NULL}
+};
+static void scan_work(struct work_struct *work)
+{
+	struct qdrv_wlan *qw = container_of(work, struct qdrv_wlan, scan_wq);
+	struct ieee80211com *ic = &qw->ic;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_scan_chan_info *scan_host = sp->chan_scan_lhost;
+	struct qtn_off_chan_info *off_chan_info = &scan_host->base;
+
+	QDRV_SCAN_LOG_TSF(scan_host, SCAN_CHAN_TSF_LHOST_SCANWORK);
+
+	if (off_chan_info->muc_status == QTN_SCAN_CHAN_MUC_FAILED) {
+		if (ic->ic_qtn_bgscan.debug_flags >= 1) {
+			printk("BG_SCAN: scan channel %u failed!\n",
+				QTNCHAN_TO_IEEENUM(off_chan_info->channel));
+		}
+		off_chan_info->muc_status = QTN_SCAN_CHAN_MUC_IDLE;
+	} else if (ic->ic_qtn_bgscan.debug_flags == 2) {
+		u32 tsf_hi;
+		u32 tsf_lo;
+		u32 delta;
+		u32 cur_index;
+		u32 pre_index = 0;
+		int i;
+
+		for (i = 0; scan_tsf_index_name[i].log_name; i++) {
+			cur_index = scan_tsf_index_name[i].pos_index;
+			tsf_hi = U64_HIGH32(scan_host->tsf[cur_index]);
+			tsf_lo = U64_LOW32(scan_host->tsf[cur_index]);
+			if (i) {
+				delta = (u32)(scan_host->tsf[cur_index] -
+						scan_host->tsf[pre_index]);
+			} else {
+				delta = 0;
+				printk("\n\nBGSCAN_tsf:\n");
+			}
+			pre_index = cur_index;
+			printk("  %s:    %08x_%08x (+%u)\n",
+					scan_tsf_index_name[i].log_name, tsf_hi, tsf_lo, delta);
+		}
+	}
+}
+
+static void qdrv_scan_irqhandler(void *arg1, void *arg2)
+{
+	struct qdrv_wlan *qw = arg1;
+	struct ieee80211com *ic = &qw->ic;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_scan_chan_info *scan_host = sp->chan_scan_lhost;
+	u32 muc_status = scan_host->base.muc_status;
+
+	if (muc_status == QTN_SCAN_CHAN_MUC_COMPLETED
+			|| muc_status == QTN_SCAN_CHAN_MUC_FAILED) {
+		QDRV_SCAN_LOG_TSF(scan_host, SCAN_CHAN_TSF_LHOST_INTERRUPT);
+		ic->ic_flags &= ~IEEE80211_F_SCAN;
+		if (muc_status == QTN_SCAN_CHAN_MUC_FAILED ||
+				(ic->ic_qtn_bgscan.debug_flags == 2)) {
+			schedule_work(&qw->scan_wq);
+		}
+	}
+}
+
+static int qdrv_init_scan_irqhandler(struct qdrv_wlan *qw)
+{
+	struct int_handler int_handler;
+
+	int_handler.handler = qdrv_scan_irqhandler;
+	int_handler.arg1 = qw;
+	int_handler.arg2 = NULL;
+
+	if (qdrv_mac_set_handler(qw->mac, RUBY_M2L_IRQ_LO_SCAN, &int_handler) != 0) {
+		DBGPRINTF_E("BG_SCAN: could not set irq handler\n");
+		return -1;
+	}
+
+	qdrv_mac_enable_irq(qw->mac, RUBY_M2L_IRQ_LO_SCAN);
+
+	return 0;
+}
+#endif /* QTN_BG_SCAN */
+
+/*
+ * Set the qosnull frame to sp->ocac_lhost
+ * NOTE: MUST be called with a reference to the node entry within
+ * the SKB CB structure, and free the reference to the node entry
+ * after this call.
+ * Return 0 for success or -1 on failure.
+ * If failed, skb must be freed by the caller.
+ */
+static int qdrv_ocac_set_frame(struct ieee80211com *ic,
+		struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_ocac_info *ocac_info = sp->ocac_lhost;
+
+	if (ocac_info->qosnull_txdesc_host) {
+		OCACDBG(OCACLOG_NOTICE, "qosnull frame exists\n");
+		return 0;
+	}
+
+	if (!skb) {
+		OCACDBG(OCACLOG_WARNING, "set frame error - skb is null\n");
+		return -1;
+	}
+
+	if (qdrv_wlan_get_ocs_frame(ic, ni, skb,
+			&ocac_info->qosnull_txdesc_host, &ocac_info->qosnull_txdesc_bus,
+			&ocac_info->qosnull_frame_len, &ocac_info->tx_node_idx)) {
+		OCACDBG(OCACLOG_WARNING, "set frame error - no ocs frame\n");
+		return -1;
+	}
+
+	OCACDBG(OCACLOG_VERBOSE, "set qosnull frame successfully\n");
+
+	return 0;
+}
+
+static int qdrv_ocac_release_frame(struct ieee80211com *ic, int force)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_ocac_info *ocac_info = sp->ocac_lhost;
+
+	if (!force && ic->ic_ocac.ocac_running) {
+		OCACDBG(OCACLOG_WARNING, "release frame error - CAC in progress!\n");
+		return -1;
+	}
+
+	if (ocac_info->qosnull_txdesc_host) {
+		qdrv_wlan_release_ocs_frame(ic, ocac_info->qosnull_txdesc_host);
+		ocac_info->qosnull_txdesc_host = 0;
+		ocac_info->qosnull_txdesc_bus = 0;
+	}
+
+	OCACDBG(OCACLOG_VERBOSE, "qosnull frame released.\n");
+
+	return 0;
+}
+
+static void qdrv_update_ocac_state_ie(struct ieee80211com *ic, uint8_t state, uint8_t param)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	qdrv_hostlink_update_ocac_state_ie(qw, state, param);
+}
+
+static int qdrv_set_ocac(struct ieee80211vap *vap, struct ieee80211_channel *chan)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_ocac_info *ocac_info = sp->ocac_lhost;
+	struct sk_buff *skb = NULL;
+	uint32_t off_channel;
+	struct ieee80211_ocac_params *ocac_params = &ic->ic_ocac.ocac_cfg.ocac_params;
+
+	if (chan) {
+		/* start ocac in MuC */
+		off_channel = qdrv_set_channel_setup(ic, chan);
+		if (off_channel == ocac_info->off_channel) {
+			ic->ic_ocac.ocac_counts.skip_set_run++;
+			OCACDBG(OCACLOG_VERBOSE, "duplicate setting, skip it!\n");
+			return -1;
+		}
+		if (ocac_params->traffic_ctrl && !ocac_info->qosnull_txdesc_host) {
+			/* Set qosnull frame */
+			ieee80211_ref_node(ni);
+			skb = ieee80211_get_qosnulldata(ni, WMM_AC_VO);
+			if (!skb) {
+				ieee80211_free_node(ni);
+				OCACDBG(OCACLOG_WARNING, "alloc skb error!\n");
+				ic->ic_ocac.ocac_counts.alloc_skb_error++;
+				return -1;
+			}
+			if (qdrv_ocac_set_frame(ic, ni, skb)) {
+				dev_kfree_skb_irq(skb);
+				ieee80211_free_node(ni);
+				OCACDBG(OCACLOG_WARNING, "set ocs frame error!\n");
+				ic->ic_ocac.ocac_counts.set_frame_error++;
+				return -1;
+			}
+			ieee80211_free_node(ni);
+		}
+		ocac_info->freq_band = qdrv_set_channel_freqband_setup(ic, chan);
+		ocac_info->off_channel = off_channel;
+		ocac_info->secure_dwell =  ocac_params->secure_dwell_ms;
+		ocac_info->threshold_fat =  ocac_params->thresh_fat;
+		ocac_info->threshold_traffic =  ocac_params->thresh_traffic;
+		ocac_info->threshold_fat_dec =  ocac_params->thresh_fat_dec;
+		ocac_info->traffic_ctrl =  ocac_params->traffic_ctrl;
+		ocac_info->offset_txhalt =  ocac_params->offset_txhalt;
+		ocac_info->offset_offchan =  ocac_params->offset_offchan;
+		if (ieee80211_is_on_weather_channel(ic, chan))
+			ocac_info->dwell_time = ocac_params->wea_dwell_time_ms;
+		else
+			ocac_info->dwell_time = ocac_params->dwell_time_ms;
+		ic->ic_ocac.ocac_counts.set_run++;
+
+		if (ocac_params->traffic_ctrl)
+			ic->ic_update_ocac_state_ie(ic, OCAC_STATE_ONGOING, 0);
+	} else {
+		/* stop ocac in MuC */
+		if (ocac_info->off_channel == 0) {
+			OCACDBG(OCACLOG_VERBOSE, "duplicate setting, skip it!\n");
+			ic->ic_ocac.ocac_counts.skip_set_pend++;
+			return -1;
+		}
+		ocac_info->off_channel = 0;
+		ic->ic_ocac.ocac_counts.set_pend++;
+
+		if (ocac_params->traffic_ctrl)
+			ic->ic_update_ocac_state_ie(ic, OCAC_STATE_NONE, 0);
+	}
+
+	if (qdrv_hostlink_set_ocac(qw, sp->ocac_bus) < 0) {
+		ic->ic_ocac.ocac_counts.hostlink_err++;
+		OCACDBG(OCACLOG_WARNING, "hostlink set seamless dfs error!\n");
+		return -1;
+	}
+	ic->ic_ocac.ocac_counts.hostlink_ok++;
+
+	OCACDBG(OCACLOG_VERBOSE, "hostlink set seamless dfs succeed. off-chan: %u\n",
+			chan ? chan->ic_ieee : 0);
+
+	return 0;
+}
+
+int qtn_do_measurement(struct ieee80211com *ic)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_meas_chan_info *meas_info = sp->chan_meas_lhost;
+	struct ieee80211_global_measure_info *ic_meas_info = &ic->ic_measure_info;
+	int ret;
+
+	meas_info->meas_reason = 0;
+	meas_info->work_channel = qdrv_set_channel_setup(ic, ic->ic_curchan);
+
+	switch (ic_meas_info->type) {
+	case IEEE80211_CCA_MEASTYPE_BASIC:
+	{
+		struct ieee80211_channel *meas_channel;
+		u_int8_t ieee_ch;
+
+		/* channel 0 means current channel */
+		if (ic_meas_info->param.basic.channel == 0)
+			ieee_ch = ic->ic_curchan->ic_ieee;
+		else
+			ieee_ch = ic_meas_info->param.basic.channel;
+
+		meas_channel = findchannel(ic, ieee_ch, ic->ic_des_mode);
+		if (NULL == meas_channel) {
+			return -EINVAL;
+		}
+
+		if (ic_meas_info->param.basic.duration_tu == 0) {
+			return -EINVAL;
+		}
+
+		meas_info->meas_type = QTN_MEAS_TYPE_BASIC;
+		meas_info->meas_channel = qdrv_set_channel_setup(ic, meas_channel);
+		meas_info->meas_dur_ms = IEEE80211_TU_TO_MS(ic_meas_info->param.basic.duration_tu);
+		meas_info->meas_start_tsf = ic_meas_info->param.basic.tsf;
+		meas_info->inter_data.basic_radar_num = ic->ic_radar_detections_num(ieee_ch);
+
+		break;
+	}
+	case IEEE80211_CCA_MEASTYPE_CCA:
+	{
+		struct ieee80211_channel *meas_channel;
+		int ieee_ch;
+
+		if (ic_meas_info->param.cca.channel == 0)
+			ieee_ch = ic->ic_curchan->ic_ieee;
+		else
+			ieee_ch = ic_meas_info->param.cca.channel;
+		meas_channel = findchannel(ic, ieee_ch, 0);
+		if (NULL == meas_channel)
+			return -EINVAL;
+
+		if (ic_meas_info->param.cca.duration_tu == 0)
+			return -EINVAL;
+
+		meas_info->meas_type = QTN_MEAS_TYPE_CCA;
+		meas_info->meas_channel = qdrv_set_channel_setup(ic, meas_channel);
+		meas_info->meas_dur_ms = IEEE80211_TU_TO_MS(ic_meas_info->param.cca.duration_tu);
+		meas_info->meas_start_tsf = ic_meas_info->param.cca.tsf;
+
+		break;
+	}
+	case IEEE80211_CCA_MEASTYPE_RPI:
+	{
+		struct ieee80211_channel *meas_channel;
+		int ieee_ch;
+
+		if (ic_meas_info->param.rpi.channel == 0)
+			ieee_ch = ic->ic_curchan->ic_ieee;
+		else
+			ieee_ch = ic_meas_info->param.rpi.channel;
+		meas_channel = findchannel(ic, ieee_ch, 0);
+		if (NULL == meas_channel)
+			return -EINVAL;
+
+		if (ic_meas_info->param.rpi.duration_tu == 0)
+			return -EINVAL;
+
+		meas_info->meas_type = QTN_MEAS_TYPE_RPI;
+		meas_info->meas_channel = qdrv_set_channel_setup(ic, meas_channel);
+		meas_info->meas_dur_ms = IEEE80211_TU_TO_MS(ic_meas_info->param.rpi.duration_tu);
+		meas_info->meas_start_tsf = ic_meas_info->param.rpi.tsf;
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_CH_LOAD:
+	{
+		struct ieee80211_channel *meas_channel;
+		int ieee_ch;
+
+		if (ic_meas_info->param.chan_load.op_class == 0 &&
+				ic_meas_info->param.chan_load.channel == 0)
+			ieee_ch = ic->ic_curchan->ic_ieee;
+		else
+			ieee_ch = ic_meas_info->param.chan_load.channel;
+
+		meas_channel = findchannel(ic, ieee_ch, 0);
+		if (NULL == meas_channel)
+			return -EINVAL;
+
+		if (ic_meas_info->param.chan_load.duration_tu == 0)
+			return -EINVAL;
+
+		meas_info->meas_type = QTN_MEAS_TYPE_CHAN_LOAD;
+		meas_info->meas_channel = qdrv_set_channel_setup(ic, meas_channel);
+		meas_info->meas_dur_ms = IEEE80211_TU_TO_MS(ic_meas_info->param.chan_load.duration_tu);
+		meas_info->meas_start_tsf = 0;
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_NOISE:
+	{
+		struct ieee80211_channel *meas_channel;
+		int ieee_ch;
+
+		if (ic_meas_info->param.noise_his.op_class == 0 &&
+				ic_meas_info->param.noise_his.channel == 0)
+			ieee_ch = ic->ic_curchan->ic_ieee;
+		else
+			ieee_ch = ic_meas_info->param.noise_his.channel;
+
+		meas_channel = findchannel(ic, ieee_ch, 0);
+		if (NULL == meas_channel)
+			return -EINVAL;
+
+		if (ic_meas_info->param.noise_his.duration_tu == 0)
+			return -EINVAL;
+
+		meas_info->meas_type = QTN_MEAS_TYPE_NOISE_HIS;
+		meas_info->meas_channel = qdrv_set_channel_setup(ic, meas_channel);
+		meas_info->meas_dur_ms = IEEE80211_TU_TO_MS(ic_meas_info->param.noise_his.duration_tu);
+		meas_info->meas_start_tsf = 0;
+
+		break;
+	}
+	default:
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_WLAN,
+			"[%s]unsupported measurement type\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	ic_meas_info->status = MEAS_STATUS_RUNNING;
+	ret = qdrv_hostlink_meas_chan(qw, sp->chan_meas_bus);
+	if (ret & QTN_HLINK_RC_ERR) {
+		printk("measurement fail:%s\n",
+				meas_err_msg[meas_info->meas_reason - QTN_MEAS_REASON_OFF_CHANNEL_UNSUPPORT]);
+		ic_meas_info->status = MEAS_STATUS_IDLE;
+		return -EFAULT;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qtn_do_measurement);
+
+static int qdrv_remain_on_channel(struct ieee80211com *ic,
+		struct ieee80211_node *ni, struct ieee80211_channel *off_chan,
+		int bandwidth, uint64_t start_tsf, uint32_t timeout, uint32_t duration, int flags)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_remain_chan_info *remain_info = sp->remain_chan_lhost;
+
+	if (!ic || !off_chan)
+		goto error;
+
+	if ((bandwidth < BW_HT20) || (bandwidth > BW_HT160))
+		goto error;
+
+	if ((ic->ic_flags & IEEE80211_F_SCAN)
+#ifdef QTN_BG_SCAN
+		|| (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+#endif /* QTN_BG_SCAN */
+	) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "REMAIN CHANNEL %s:"
+			" Don't switch to off channel - scan in progress\n", __func__);
+		goto error;
+	}
+
+	if (!qdrv_radar_can_sample_chan()) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "REMAIN CHANNEL %s:"
+			" Don't switch to off channel - radar\n", __func__);
+		goto error;
+	}
+
+	/* sample channel not allowed in power-saving mode */
+	if (((ic->ic_opmode == IEEE80211_M_HOSTAP)
+#if defined(QBMPS_ENABLE)
+	     || (ic->ic_opmode == IEEE80211_M_STA)
+#endif
+	    ) && (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] >= BOARD_PM_LEVEL_DUTY)) {
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "REMAIN CHANNEL %s:"
+			" Don't switch to off channel - CoC idle\n", __func__);
+		goto error;
+	}
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "remain on channel %u %dus\n",
+			off_chan->ic_ieee, duration);
+
+	remain_info->start_tsf = start_tsf;
+	remain_info->timeout_usecs = timeout;
+	remain_info->duration_usecs = duration;
+	remain_info->freq_band = qdrv_set_channel_freqband_setup(ic, ic->ic_bsschan);
+	remain_info->data_channel = qdrv_set_channel_setup(ic, ic->ic_bsschan);
+	memcpy(remain_info->peer_mac, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+
+	off_chan->ic_ext_flags |= IEEE80211_CHAN_TDLS_OFF_CHAN;
+	if (bandwidth == BW_HT20) {
+		ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_20;
+		remain_info->off_channel = qdrv_set_channel_setup(ic, off_chan);
+		ic->ic_flags_ext &= ~IEEE80211_FEXT_SCAN_20;
+	} else if (bandwidth == BW_HT40) {
+		ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_40;
+		remain_info->off_channel = qdrv_set_channel_setup(ic, off_chan);
+		ic->ic_flags_ext &= ~IEEE80211_FEXT_SCAN_40;
+	} else {
+		remain_info->off_channel = qdrv_set_channel_setup(ic, off_chan);
+	}
+	off_chan->ic_ext_flags &= ~IEEE80211_CHAN_TDLS_OFF_CHAN;
+
+	remain_info->status = QTN_REM_CHAN_STATUS_HOST_IOCTL_SENT;
+	if (qdrv_hostlink_remain_chan(qw, sp->remain_chan_bus) < 0) {
+		remain_info->status = QTN_REM_CHAN_STATUS_IDLE;
+		DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "REMAIN CHANNEL %s:"
+			" hostlink set remain channel error!\n", __func__);
+		goto error;
+	}
+#if defined(QBMPS_ENABLE)
+	/* indicate sample channel is on-going */
+	ic->ic_flags_qtn |= IEEE80211_QTN_SAMP_CHAN;
+#endif
+	ic->ic_chan_switch_reason_record(ic, IEEE80211_CSW_REASON_TDLS_CS);
+
+	return 0;
+
+error:
+	return -1;
+}
+
+static void
+qdrv_use_rts_cts(struct ieee80211com *ic)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	qdrv_hostlink_use_rtscts(qw, ic->ic_local_rts);
+}
+
+#if QTN_ENABLE_TRACE_BUFFER
+static void
+qdrv_dump_trace_buffer(struct shared_params *sp)
+{
+	if (sp->p_debug_1 && sp->p_debug_2) {
+		int i;
+		/* NOTE: the number of records cannot dynamically change or buffer overflow may happen */
+		int records = sp->debug_1_arg;
+		static struct qtn_trace_record *p_local = NULL;
+		u_int32_t index = *((u_int32_t *)(muc_to_lhost((int)sp->p_debug_2))) + 1;
+		struct qtn_trace_record *p_first = (struct qtn_trace_record *)(muc_to_lhost)((int)sp->p_debug_1);
+		struct qtn_trace_record *p_entry = p_first + index;
+		struct qtn_trace_record *p_past = p_first + records;
+		if (records) {
+			size_t alloc_size = records * sizeof(struct qtn_trace_record);
+			if (p_local == NULL) {
+				p_local = kmalloc(alloc_size, GFP_KERNEL);
+			}
+			if (p_local) {
+				memcpy((void *)p_local, (void *)muc_to_lhost((int)sp->p_debug_1), alloc_size);
+				/*
+				 * Re-cache the index - it may have changed by this point, if alloc above
+				 * takes a while.
+				 */
+				index = *((u_int32_t *)(muc_to_lhost((int)sp->p_debug_2))) + 1;
+				p_first = p_local;
+				p_entry = p_first + index;
+				p_past = p_first + records;
+			}
+		}
+		if (index >= records) {
+			index = 0;
+		}
+		printk("Trace buffer for %d records %p %p %d:\n", records, p_first,
+				(u_int32_t *)(muc_to_lhost((int)sp->p_debug_2)), index);
+		for (i = 0; i < records; i++) {
+			printk(" T:0x%08X,E:0x%08X,D:0x%08X", p_entry->tsf, p_entry->event, p_entry->data);
+			if (i && (((i + 1) % 2) == 0)) {
+				printk("\n");
+			}
+			p_entry++;
+			if (p_entry >= p_past) {
+				p_entry = p_first;
+			}
+		}
+	}
+}
+
+static void qdrv_sleep_ms(int ms)
+{
+	mdelay(ms);
+}
+#endif
+
+void qdrv_muc_traceback(int force)
+{
+#if QTN_ENABLE_TRACE_BUFFER
+	int i;
+	struct net_device *dev;
+
+	for (i = 1; ; i++) {
+		dev = dev_get_by_index(&init_net, i);
+		if (dev == NULL) {
+			panic("Can't find a network device\n");
+		}
+		if (strncmp(dev->name, "wifi", 4) == 0) {
+			break;
+		} else {
+			dev_put(dev);
+		}
+	}
+	if (dev) {
+		struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+		struct ieee80211vap *vap = netdev_priv(dev);
+		struct ieee80211com *ic = vap->iv_ic;
+		struct qdrv_wlan *qw =  container_of(ic, struct qdrv_wlan, ic);
+		struct qdrv_mac *mac = qw->mac;
+		if (!mac->dead || force) {
+			qdrv_dump_trace_buffer(sp);
+		}
+		dev_put(dev);
+	}
+#endif
+}
+EXPORT_SYMBOL(qdrv_muc_traceback);
+
+void qdrv_halt_muc(void)
+{
+#if QTN_ENABLE_TRACE_BUFFER
+	int i;
+	struct net_device *dev;
+
+	for (i = 1; ; i++) {
+		dev = dev_get_by_index(&init_net, i);
+		if (dev == NULL) {
+			panic("Can't find a network device\n");
+		}
+		if (strncmp(dev->name, "wifi", 4) == 0) {
+			break;
+		} else {
+			dev_put(dev);
+		}
+	}
+	if (dev) {
+		struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+		struct ieee80211vap *vap = netdev_priv(dev);
+		struct ieee80211com *ic = vap->iv_ic;
+		struct qdrv_wlan *qw =  container_of(ic, struct qdrv_wlan, ic);
+		struct qdrv_mac *mac = qw->mac;
+		if (!mac->dead) {
+			printk("Interrupting MuC to halt (MAC:%p)\n",  mac);
+			mac->dead = 1;
+			qdrv_mac_interrupt_muc_high(mac);
+			qtn_sleep_ms(100);
+			qdrv_dump_trace_buffer(sp);
+		} else {
+			printk("MAC already dead, not triggering another trace\n");
+		}
+		dev_put(dev);
+	}
+#endif
+}
+EXPORT_SYMBOL(qdrv_halt_muc);
+
+static void qtn_set_coverageclass(struct ieee80211com *ic)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+}
+
+static u_int qtn_mhz2ieee(struct ieee80211com *ic, u_int freq, u_int flags)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+static struct ieee80211vap *qtn_vap_create(struct ieee80211com *ic,
+	const char *name, int unit, int opmode, int flags, struct net_device *dev)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return NULL;
+}
+
+static void qtn_vap_delete(struct ieee80211vap *iv)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return;
+}
+
+static uint8_t qdrv_get_vap_idx(struct ieee80211vap *vap)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+
+	return qv->qv_vap_idx;
+}
+
+static void qdrv_wlan_80211_updateslot(struct ieee80211com *ic)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return;
+}
+
+static int qdrv_wlan_80211_start(struct ieee80211com *ic)
+{
+
+	struct qdrv_wlan *qw =  container_of(ic, struct qdrv_wlan, ic);
+	struct qdrv_mac *mac = qw->mac;
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	/* Just in case re-enable rx interrupts */
+	qdrv_mac_enable_irq(mac, qw->rxirq);
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+static int qdrv_wlan_80211_reset(struct ieee80211com *ic)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+static int qdrv_allow_report_frames_in_cac(struct ieee80211com *ic, struct ieee80211_node *ni,
+						const int user_conf_switch, bool *allow_rpt_frm)
+{
+	if (NULL == allow_rpt_frm) {
+		ieee80211_free_node(ni);
+		return 0;
+	}
+
+	if (user_conf_switch) {
+		if (*allow_rpt_frm) {
+			*allow_rpt_frm = false;
+		} else if ((ieee80211_is_chan_radar_detected(ic->ic_curchan)) ||
+				(ieee80211_is_chan_cac_required(ic->ic_curchan))) {
+			ieee80211_free_node(ni);
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+/*
+ * Transmit an 802.11 encapped management or data frame via the management path.
+ * This function must be called with a reference to the node structure.
+ */
+static int qdrv_wlan_80211_send(struct ieee80211com *ic, struct ieee80211_node *ni,
+				struct sk_buff *skb, uint32_t priority, uint8_t is_mgmt)
+{
+	struct net_device *vdev;
+	struct qdrv_vap *qv;
+
+
+	if (!qdrv_allow_report_frames_in_cac(ic, ni, ic->sta_dfs_info.sta_dfs_strict_mode,
+						&ic->sta_dfs_info.allow_measurement_report)) {
+		return 0;
+	}
+
+#ifdef CONFIG_QHOP
+	if (ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_RBS) {
+		if (!qdrv_allow_report_frames_in_cac(ic, ni, ic->rbs_mbs_dfs_info.rbs_mbs_allow_tx_frms_in_cac,
+					&ic->rbs_mbs_dfs_info.rbs_allow_qhop_report)) {
+			return 0;
+		}
+	}
+
+	if (ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_MBS) {
+		if (!qdrv_allow_report_frames_in_cac(ic, ni, ic->rbs_mbs_dfs_info.rbs_mbs_allow_tx_frms_in_cac,
+					&ic->rbs_mbs_dfs_info.mbs_allow_csa)) {
+			return 0;
+		}
+	}
+#endif
+	qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	vdev = qv->ndev;
+
+	skb->dev = vdev;
+	skb->dest_port = ni->ni_node_idx;
+	skb->priority = priority;
+
+	QTN_SKB_CB_NI(skb) = ni;
+
+	if (is_mgmt)
+		QTN_SKB_ENCAP(skb) = QTN_SKB_ENCAP_80211_MGMT;
+	else
+		QTN_SKB_ENCAP(skb) = QTN_SKB_ENCAP_80211_DATA;
+
+	M_FLAG_SET(skb, M_NO_AMSDU);
+
+	dev_queue_xmit(skb);
+
+	ieee80211_free_node(ni);
+
+	return 0;
+}
+
+static void qdrv_wlan_80211_disassoc(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct qdrv_node *qn = container_of(ni, struct qdrv_node, qn_node);
+	struct qtn_vlan_dev *vdev = vdev_tbl_lhost[QDRV_WLANID_FROM_DEVID(qv->devid)];
+	struct host_ioctl *ioctl;
+	struct qtn_node_args *args = NULL;
+	dma_addr_t args_dma;
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE,
+		"Node %02x:%02x:%02x:%02x:%02x:%02x %s for BSSID %02x:%02x:%02x:%02x:%02x:%02x\n",
+		DBGMACFMT(ni->ni_macaddr),
+		"dissociated",
+		DBGMACFMT(ni->ni_bssid));
+
+	if (ic->ic_wowlan.host_state) {
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_PKT_RX,
+				"%s WoWLAN: Wake up host\n", __func__);
+		wowlan_wakeup_host();
+	}
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate DISASSOC message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	if (ic->sta_dfs_info.sta_dfs_strict_mode) {
+		if (ieee80211_is_chan_available(ic->ic_bsschan)) {
+			ic->ic_mark_channel_availability_status(ic, ic->ic_bsschan,
+					IEEE80211_CHANNEL_STATUS_NON_AVAILABLE);
+		}
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(1)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+	memset(args, 0, sizeof(*args));
+
+	memcpy(args->ni_bssid, ni->ni_bssid, IEEE80211_ADDR_LEN);
+	memcpy(args->ni_macaddr, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+	args->ni_associd = IEEE80211_NODE_AID(ni);
+
+	ioctl->ioctl_command = IOCTL_DEV_DISASSOC;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+
+	if (qn->qn_node_idx)
+		switch_vlan_clr_node(vdev, qn->qn_node_idx);
+}
+
+static void qdrv_wlan_qtnie_parse(struct ieee80211_node *ni, struct ieee80211com *ic,
+					struct qtn_node_args *args)
+{
+	struct ieee80211_ie_qtn *qtnie = (struct ieee80211_ie_qtn *)ni->ni_qtn_assoc_ie;
+
+	if (IEEE80211_QTN_TYPE_ENVY(qtnie)) {
+		args->ni_qtn_ie_flags = qtnie->qtn_ie_flags;
+	} else {
+		args->ni_qtn_ie_flags = qtnie->qtn_ie_my_flags;
+	}
+
+	if (IEEE80211_QTN_IE_GE_V5(qtnie)) {
+		args->ni_ver_sw = min(ni->ni_ver_sw, ic->ic_ver_sw);
+		args->ni_rate_train = ni->ni_rate_train;
+		args->ni_rate_train_peer = ntohl(get_unaligned(&qtnie->qtn_ie_rate_train));
+	}
+}
+
+static void qdrv_wlan_80211_newassoc(struct ieee80211_node *ni, int isnew)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct qdrv_node *qn = container_of(ni, struct qdrv_node, qn_node);
+	struct ieee80211com *ic = ni->ni_vap->iv_ic;
+	struct host_ioctl *ioctl;
+	struct qtn_node_args *args = NULL;
+	dma_addr_t args_dma;
+	struct qtn_vlan_dev *vdev = vdev_tbl_lhost[QDRV_WLANID_FROM_DEVID(qv->devid)];
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE,
+		"Node %02x:%02x:%02x:%02x:%02x:%02x %s for BSSID %02x:%02x:%02x:%02x:%02x:%02x\n",
+		DBGMACFMT(ni->ni_macaddr),
+		isnew ? "associated" : "reassociated",
+		DBGMACFMT(ni->ni_bssid));
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate NEWASSOC message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+	memset(args, 0, sizeof(*args));
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(2)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+
+	memset(ni->ni_shared_stats, 0, sizeof(*ni->ni_shared_stats));
+	args->ni_node_idx = 0;
+	args->ni_shared_stats = ni->ni_shared_stats_phys;
+	memcpy(args->ni_bssid, ni->ni_bssid, IEEE80211_ADDR_LEN);
+	memcpy(args->ni_macaddr, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+	args->ni_raw_bintval = ni->ni_raw_bintval;
+	args->ni_associd = IEEE80211_NODE_AID(ni);
+	args->ni_flags = ni->ni_flags;
+	args->ni_qtn_flags = ni->ni_qtn_flags;
+	args->ni_tdls_status = ni->tdls_status;
+	args->ni_vendor = ni->ni_vendor;
+	args->ni_bbf_disallowed = ni->ni_bbf_disallowed;
+	args->ni_std_bf_disallowed = ni->ni_std_bf_disallowed;
+	args->ni_uapsd = ni->ni_uapsd;
+	memcpy(args->ni_rates, ni->ni_rates.rs_rates, IEEE80211_RATE_MAXSIZE);
+	memcpy(args->ni_htrates, ni->ni_htrates.rs_rates, IEEE80211_HT_RATE_MAXSIZE);
+	args->ni_nrates = ni->ni_rates.rs_nrates;
+	args->ni_htnrates = ni->ni_htrates.rs_nrates;
+	memcpy(args->ni_htcap, &ni->ni_htcap,
+		sizeof(args->ni_htcap));
+	memcpy(args->ni_htinfo, &ni->ni_htinfo,
+		sizeof(args->ni_htinfo));
+	if (IS_IEEE80211_DUALBAND_VHT_ENABLED(ic) && (ni->ni_flags & IEEE80211_NODE_VHT)) {
+		memcpy(args->ni_vhtcap, &ni->ni_vhtcap,
+			sizeof(args->ni_vhtcap));
+		memcpy(args->ni_vhtop, &ni->ni_vhtop,
+			sizeof(args->ni_vhtop));
+		memcpy(args->ni_mu_grp, &ni->ni_mu_grp,
+			sizeof(args->ni_mu_grp));
+	}
+	args->ni_rsn_caps = ni->ni_rsn.rsn_caps;
+	args->rsn_ucastcipher = ni->ni_rsn.rsn_ucastcipher;
+	args->tdls_peer_associd = ni->tdls_peer_associd;
+	/* Automatic install of BA */
+	if (ni->ni_implicit_ba_valid) {
+		args->ni_implicit_ba_rx = ni->ni_implicit_ba;
+		args->ni_implicit_ba_tx = ni->ni_vap->iv_implicit_ba;
+		args->ni_implicit_ba_size = ni->ni_implicit_ba_size;
+	}
+
+	if (ni->ni_qtn_assoc_ie)
+		qdrv_wlan_qtnie_parse(ni, ic, args);
+
+	ioctl->ioctl_command = IOCTL_DEV_NEWASSOC;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_arg2 = isnew;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+
+	if (args->ni_node_idx == 0) {
+		DBGPRINTF_E("[%pM] node alloc failed\n", ni->ni_macaddr);
+	} else {
+		uint32_t ni_node_idx = ni->ni_node_idx;
+
+		ieee80211_idx_add(ni, args->ni_node_idx);
+		if (((ni->ni_vap->iv_opmode == IEEE80211_M_WDS)
+				&& (ni_node_idx != 0))
+			|| (ni->ni_vap->iv_opmode == IEEE80211_M_HOSTAP
+				&& ni_node_idx != ni->ni_vap->iv_bss->ni_node_idx)) {
+			qdrv_remove_invalid_sub_port(ni->ni_vap, ni_node_idx);
+		}
+
+		if (qv->iv.iv_opmode == IEEE80211_M_HOSTAP
+				&& QVLAN_IS_DYNAMIC(vdev)) {
+			qn->qn_node_idx = args->ni_node_idx;
+			switch_vlan_set_node(vdev, args->ni_node_idx, QVLAN_DEF_PVID); /* By default put a STA in VLAN 1 */
+		}
+	}
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+static void qdrv_wlan_80211_node_update(struct ieee80211_node *ni)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+	struct qtn_node_args *args = NULL;
+	dma_addr_t args_dma;
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE,
+		"Node "DBGMACVAR" updated for BSSID "DBGMACVAR"\n",
+		DBGMACFMT(ni->ni_macaddr),
+		DBGMACFMT(ni->ni_bssid));
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate NODE_UPDATE message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+	memset(args, 0, sizeof(*args));
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(2)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+
+	memcpy(args->ni_macaddr, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+	args->ni_qtn_flags = ni->ni_qtn_flags;
+	args->ni_vendor = ni->ni_vendor;
+	args->ni_bbf_disallowed = ni->ni_bbf_disallowed;
+
+	ioctl->ioctl_command = IOCTL_DEV_NODE_UPDATE;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_arg2 = 0;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+static void qdrv_wlan_register_node(struct ieee80211_node *ni)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct qtn_vlan_dev *vdev = vdev_tbl_lhost[QDRV_WLANID_FROM_DEVID(qv->devid)];
+
+	fwt_sw_register_node(ni->ni_node_idx);
+	switch_vlan_register_node(IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx), vdev);
+}
+
+static void qdrv_wlan_unregister_node(struct ieee80211_node *ni)
+{
+	fwt_sw_unregister_node(ni->ni_node_idx);
+	switch_vlan_unregister_node(IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx));
+}
+
+static void qdrv_wlan_80211_resetmaxqueue(struct ieee80211_node *ni)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+	struct qtn_node_args *args = NULL;
+	dma_addr_t args_dma;
+
+	if ((ioctl = vnet_alloc_ioctl(qv)) == NULL)
+		return;
+
+	if ((args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC)) == NULL) {
+		DBGPRINTF_E("Failed to allocate message for resetting queue\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	memset(args, 0, sizeof(*args));
+
+	memcpy(args->ni_bssid, ni->ni_bssid, IEEE80211_ADDR_LEN);
+	memcpy(args->ni_macaddr, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+	args->ni_associd = IEEE80211_NODE_AID(ni);
+	args->ni_node_idx = ni->ni_node_idx;
+
+	ioctl->ioctl_command = IOCTL_DEV_RST_QUEUE_DEPTH;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+/*
+ *  Process setkey request
+ */
+static void qdrv_wlan_80211_setkey(struct ieee80211vap *vap,
+	const struct ieee80211_key *k, const u_int8_t mac[IEEE80211_ADDR_LEN])
+{
+	struct qtn_key_args *args = NULL;
+	dma_addr_t args_dma;
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate SETKEY message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	/* Copy the values over */
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(3)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+
+	memcpy((u_int8_t*)&args->key, (u_int8_t*)k, sizeof(struct qtn_key));
+	memcpy(args->wk_addr, mac, IEEE80211_ADDR_LEN);
+
+	ioctl->ioctl_command = IOCTL_DEV_SETKEY;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+/*
+ *  Process delkey request
+ */
+static void qdrv_wlan_80211_delkey(struct ieee80211vap *vap,
+	const struct ieee80211_key *k, const u_int8_t mac[IEEE80211_ADDR_LEN])
+{
+	struct qtn_key_args *args = NULL;
+	dma_addr_t args_dma;
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E( "Failed to allocate DELKEY message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	/* Copy the values over */
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(4)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+
+	memcpy((u_int8_t*)&args->key, (u_int8_t*)k, sizeof(struct qtn_key));
+	if (mac) {
+		memcpy(args->wk_addr, mac, IEEE80211_ADDR_LEN);
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_DELKEY;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+/*
+ *  Process addba request
+ */
+static void qdrv_wlan_80211_process_addba(struct ieee80211_node *ni, int tid,
+				int direction)
+{
+	struct qtn_baparams_args *args = NULL;
+	dma_addr_t args_dma;
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+
+	if (tid >= WME_NUM_TID) {
+		return;
+	}
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate ADDBA message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(5)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+
+	args->tid = tid;
+	memcpy(args->ni_addr, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+
+	args->type = IEEE80211_BA_IMMEDIATE;
+	if (direction) {
+		args->state = ni->ni_ba_tx[tid].state;
+		args->start_seq_num = ni->ni_ba_tx[tid].seq;
+		args->window_size = ni->ni_ba_tx[tid].buff_size;
+		args->lifetime = ni->ni_ba_tx[tid].timeout;
+		args->flags = ni->ni_ba_tx[tid].flags;
+	} else {
+		args->state = ni->ni_ba_rx[tid].state;
+		args->start_seq_num = ni->ni_ba_rx[tid].seq;
+		args->window_size = ni->ni_ba_rx[tid].buff_size;
+		args->lifetime = ni->ni_ba_rx[tid].timeout;
+		args->flags = ni->ni_ba_rx[tid].flags;
+	}
+
+	if (direction) {
+		ioctl->ioctl_command = IOCTL_DEV_BA_ADDED_TX;
+	} else {
+		ioctl->ioctl_command = IOCTL_DEV_BA_ADDED_RX;
+	}
+
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = args_dma;
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+/*
+ * Send addba request
+ */
+static void qdrv_wlan_80211_send_addba(struct ieee80211_node *ni, int tid)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_action_data act;
+	struct ba_action_req ba_req;
+
+	memset(&act, 0, sizeof(act));
+	memset(&ba_req, 0, sizeof(ba_req));
+	act.cat = IEEE80211_ACTION_CAT_BA;
+	act.action = IEEE80211_ACTION_BA_ADDBA_REQ;
+	ba_req.tid = tid;
+	ba_req.frag = 0;
+	ba_req.type = IEEE80211_BA_IMMEDIATE;
+	ba_req.seq = ni->ni_ba_tx[tid].seq;
+	ba_req.buff_size = ni->ni_ba_tx[tid].buff_size;
+	ba_req.timeout = ni->ni_ba_tx[tid].timeout;
+	act.params = (void *)&ba_req;
+	ic->ic_send_mgmt(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&act);
+
+}
+
+static void qdrv_wlan_update_wmm_params(struct ieee80211vap *vap)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_wme_state *wme = &ic->ic_wme;
+	struct qtn_node_args *args = NULL;
+	dma_addr_t args_dma;
+	struct host_ioctl *ioctl;
+	int i;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate WMM params message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(6)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+
+	for (i = 0; i < WME_NUM_AC; i++) {
+		args->wmm_params[i] = wme->wme_chanParams.cap_wmeParams[i];
+	}
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "%s - %x Send new WMM parameters\n",
+			vap->iv_dev->name, ioctl->ioctl_argp);
+
+	ioctl->ioctl_command = IOCTL_DEV_WMM_PARAMS;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_arg2 = 1;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+static void qdrv_wlan_update_chan_power_table(struct ieee80211vap *vap,
+		struct ieee80211_channel *chan)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct ieee80211_chan_power_table *args = NULL;
+	dma_addr_t args_dma;
+	struct host_ioctl *ioctl;
+	int8_t *s_pwrs;
+	int8_t *d_pwrs;
+	int idx_bf;
+	int idx_ss;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate ioctl message for channel power table\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(6)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+
+	args->chan_ieee = chan->ic_ieee;
+	for (idx_bf = PWR_IDX_BF_OFF; idx_bf < PWR_IDX_BF_MAX; idx_bf++) {
+		for (idx_ss = PWR_IDX_1SS; idx_ss < PWR_IDX_SS_MAX; idx_ss++) {
+			s_pwrs = chan->ic_maxpower_table[idx_bf][idx_ss];
+			d_pwrs = args->maxpower_table[idx_bf][idx_ss];
+			d_pwrs[PWR_IDX_20M] = s_pwrs[PWR_IDX_20M];
+			d_pwrs[PWR_IDX_40M] = s_pwrs[PWR_IDX_40M];
+			d_pwrs[PWR_IDX_80M] = s_pwrs[PWR_IDX_80M];
+		}
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_SET_CHAN_POWER_TABLE;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_arg2 = 1;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+static void qdrv_wlan_bbsort_prio(struct ieee80211_wme_state *wme)
+{
+	int i, j, temp_aifsn, temp_band;
+
+	for (i = 1; i < QDRV_SCH_BANDS - 1; i++) {
+		for (j = 1; j < QDRV_SCH_BANDS - i; j++) {
+			if (qdrv_sch_band_chg_prio[j].aifsn > qdrv_sch_band_chg_prio[j + 1].aifsn) {
+				temp_aifsn = qdrv_sch_band_chg_prio[j].aifsn;
+				temp_band = qdrv_sch_band_chg_prio[j].band_prio;
+
+				qdrv_sch_band_chg_prio[j].aifsn = qdrv_sch_band_chg_prio[j + 1].aifsn;
+				qdrv_sch_band_chg_prio[j].band_prio = qdrv_sch_band_chg_prio[j + 1].band_prio;
+
+				qdrv_sch_band_chg_prio[j + 1].aifsn = temp_aifsn;
+				qdrv_sch_band_chg_prio[j + 1].band_prio = temp_band;
+			}
+		}
+	}
+}
+
+static void qdrv_wlan_80211_join_bss(struct ieee80211vap *vap)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_wme_state *wme = &ic->ic_wme;
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct qtn_node_args *args = NULL;
+	dma_addr_t args_dma;
+	struct host_ioctl *ioctl;
+	int i;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate JOIN BSS message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+	memset(args, 0, sizeof(*args));
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(6)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+
+	for (i = 0; i < WME_NUM_AC; i++) {
+		args->wmm_params[i] = wme->wme_chanParams.cap_wmeParams[i];
+
+		/* This is to inform qdrv scheduler that better AC may have worse parameter settings */
+		qdrv_sch_band_chg_prio[i].band_prio = qdrv_sch_band_prio[i];
+		qdrv_sch_band_chg_prio[i + 1].aifsn =
+			wme->wme_chanParams.cap_wmeParams[qdrv_sch_band_prio[i + 1]].wmm_aifsn;
+	}
+
+	qdrv_sch_band_chg_prio[i].band_prio = qdrv_sch_band_prio[i];
+	qdrv_sch_band_chg_prio[0].aifsn = wme->wme_chanParams.cap_wmeParams[3].wmm_aifsn;
+
+	qdrv_wlan_bbsort_prio(wme);
+
+	memset(ni->ni_shared_stats, 0, sizeof(*ni->ni_shared_stats));
+	args->ni_shared_stats = ni->ni_shared_stats_phys;
+	memcpy(args->ni_bssid, ni->ni_bssid, IEEE80211_ADDR_LEN);
+	memcpy(args->ni_macaddr, vap->iv_myaddr, IEEE80211_ADDR_LEN);
+	args->ni_associd = IEEE80211_NODE_AID(ni);
+	args->ni_flags = ni->ni_flags;
+	args->ni_vendor = ni->ni_vendor;
+	memcpy(args->ni_rates, ni->ni_rates.rs_rates, IEEE80211_RATE_MAXSIZE);
+	memcpy(args->ni_htrates, ni->ni_htrates.rs_rates, IEEE80211_HT_RATE_MAXSIZE);
+	args->ni_nrates = ni->ni_rates.rs_nrates;
+	args->ni_htnrates = ni->ni_htrates.rs_nrates;
+	memcpy(args->ni_htcap, &ni->ni_htcap, sizeof(struct ieee80211_htcap));
+	memcpy(args->ni_htinfo, &ni->ni_htinfo, sizeof(struct ieee80211_htinfo));
+	if (IS_IEEE80211_DUALBAND_VHT_ENABLED(ic) && (ni->ni_flags & IEEE80211_NODE_VHT)) {
+		memcpy(args->ni_vhtcap, &ni->ni_vhtcap,
+			sizeof(args->ni_vhtcap));
+		memcpy(args->ni_vhtop, &ni->ni_vhtop,
+			sizeof(args->ni_vhtop));
+	}
+
+	if (ni->ni_rsn_ie != NULL) {
+		args->ni_rsn_caps = ni->ni_rsn.rsn_caps;
+	}
+
+	args->rsn_ucastcipher = ni->ni_rsn.rsn_ucastcipher;
+
+	/* Automatic install of BA */
+	if (ni->ni_implicit_ba_valid) {
+		args->ni_implicit_ba_rx = ni->ni_implicit_ba;
+		args->ni_implicit_ba_tx = ni->ni_vap->iv_implicit_ba;
+		args->ni_implicit_ba_size = ni->ni_implicit_ba_size;
+	} else {
+		args->ni_implicit_ba_rx = 0;
+		args->ni_implicit_ba_tx = 0;
+	}
+
+	if (ni->ni_qtn_assoc_ie) {
+		qdrv_wlan_qtnie_parse(ni, ic, args);
+		g_qdrv_non_qtn_assoc = 0;
+	} else {
+		g_qdrv_non_qtn_assoc = 1;
+	}
+
+	DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN, "%s - %x Send new BSSID\n",
+			vap->iv_dev->name, ioctl->ioctl_argp);
+
+	ioctl->ioctl_command = IOCTL_DEV_NEWBSSID;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_arg2 = 1;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+
+	ieee80211_idx_add(ni, args->ni_node_idx);
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+
+#if defined(QBMPS_ENABLE)
+	if (ic->ic_flags_qtn & IEEE80211_QTN_BMPS) {
+		/* allocate or free/re-allocate null frame */
+		ieee80211_sta_bmps_update(vap);
+	}
+#endif
+}
+
+static void qdrv_wlan_80211_beacon_update(struct ieee80211vap *vap)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_wme_state *wme = &ic->ic_wme;
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct qtn_beacon_args *bc_args = NULL;
+	dma_addr_t args_dma;
+	struct host_ioctl *ioctl;
+	struct sk_buff *beacon_skb;
+	int i;
+
+	if (!(vap->iv_dev->flags & IFF_RUNNING) || ic->ic_bsschan == IEEE80211_CHAN_ANYC)
+		return;
+
+	if (vap->iv_opmode == IEEE80211_M_WDS)
+		return;
+
+	spin_lock(&qv->bc_lock);
+	if (ieee80211_beacon_create_param(vap) != 0) {
+		spin_unlock(&qv->bc_lock);
+		return;
+	}
+	memset(&qv->qv_boff, 0, sizeof(qv->qv_boff));
+	beacon_skb = ieee80211_beacon_alloc(ni, &qv->qv_boff);
+	if (beacon_skb == NULL) {
+		spin_unlock(&qv->bc_lock);
+		return;
+	}
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(bc_args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*bc_args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate BEACON UPDATE message\n");
+		dev_kfree_skb_any(beacon_skb);
+		vnet_free_ioctl(ioctl);
+		ieee80211_beacon_destroy_param(vap);
+		spin_unlock(&qv->bc_lock);
+		return;
+	}
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(7)ioctl %p dma ptr %p\n", ioctl, (void *)bc_args);
+
+	ieee80211_beacon_update(ni, &qv->qv_boff, beacon_skb, 0);
+
+	for (i = 0; i < WME_NUM_AC; i++) {
+		bc_args->wmm_params[i] = wme->wme_chanParams.cap_wmeParams[i];
+	}
+
+	bc_args->bo_tim_len = qv->qv_boff.bo_tim_len;
+	bc_args->bintval = ic->ic_lintval;
+	bc_args->bo_htcap = 0;
+	if (ic->ic_htinfo.choffset) {
+		/* Network is operating in 40 MHZ mode */
+		bc_args->bo_htinfo = 1;
+	} else {
+		/* Network is operating in 20 MHZ mode */
+		bc_args->bo_htinfo = 0;
+	}
+	/* This is an 11AC network */
+	if (IS_IEEE80211_VHT_ENABLED(ic)) {
+		bc_args->bo_vhtcap = 1;
+		bc_args->bo_vhtop = ic->ic_vhtop.chanwidth;
+	} else if (IS_IEEE80211_11NG_VHT_ENABLED(ic)) {
+		bc_args->bo_vhtcap = 1;
+		bc_args->bo_vhtop = ic->ic_vhtop_24g.chanwidth;
+	}
+	ieee80211_beacon_flush_param(vap->param);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	flush_dcache_sizerange_safe(beacon_skb->data, beacon_skb->len);
+#else
+	flush_dcache_range((uint32_t)beacon_skb->data,
+			(uint32_t)beacon_skb->data + beacon_skb->len);
+#endif
+	/* Convert to MuC mapping address before ioctl request */
+	bc_args->bc_ie_head = plat_kernel_addr_to_dma(NULL, vap->param->head);
+	bc_args->bc_ie_buf_start = plat_kernel_addr_to_dma(NULL, vap->param->buf);
+	ioctl->ioctl_command = IOCTL_DEV_BEACON_START;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_arg2 = beacon_skb->len | (1 << 16);
+	ioctl->ioctl_argp = args_dma;
+#ifdef LHOST_DEBUG_BEACON
+	printk("LHOST send a beacon %p length %d\n", beacon_skb->data, beacon_skb->len);
+	ieee80211_dump_beacon_desc_ie(vap->param);
+#endif
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*bc_args), bc_args, args_dma);
+	/* Require MuC to receive and copy the list as well as the beacon buffer */
+	ieee80211_beacon_destroy_param(vap);
+
+	dev_kfree_skb_any(beacon_skb);
+	spin_unlock(&qv->bc_lock);
+
+	ic->ic_init(ic);
+}
+
+static void qdrv_wlan_80211_beacon_stop(struct ieee80211vap *vap)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct qtn_node_args *args = NULL;
+	dma_addr_t args_dma;
+	struct host_ioctl *ioctl;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate BEACON STOP message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(11)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+
+	ioctl->ioctl_command = IOCTL_DEV_BEACON_STOP;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+/*
+ *  Process delba request
+ */
+static void qdrv_wlan_80211_process_delba(struct ieee80211_node *ni, int tid,
+				int direction)
+{
+	struct qtn_baparams_args *args = NULL;
+	dma_addr_t args_dma;
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate PROCESS_DELBA message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(8)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+
+	/* Copy the values over */
+	if (direction) {
+		args->state = ni->ni_ba_tx[tid].state;
+		args->tid = tid;
+		args->type = IEEE80211_BA_IMMEDIATE;
+		args->start_seq_num = ni->ni_ba_tx[tid].seq;
+		args->window_size = ni->ni_ba_tx[tid].buff_size;
+		args->lifetime = ni->ni_ba_tx[tid].timeout;
+	} else {
+		args->tid = tid;
+		args->state = ni->ni_ba_rx[tid].state;
+		args->type = IEEE80211_BA_IMMEDIATE;
+		args->start_seq_num = ni->ni_ba_rx[tid].seq;
+		args->window_size = ni->ni_ba_rx[tid].buff_size;
+		args->lifetime = ni->ni_ba_rx[tid].timeout;
+	}
+	memcpy(args->ni_addr, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+
+	ioctl->ioctl_command = direction ?  IOCTL_DEV_BA_REMOVED_TX : IOCTL_DEV_BA_REMOVED_RX;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+static void qdrv_wlan_80211_tdls_operation(struct ieee80211_node *ni,
+		uint32_t ioctl_cmd, int cmd, uint32_t *value)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+	struct qtn_tdls_args *args = NULL;
+	dma_addr_t args_dma;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate TDLS set message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(1)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+	memset(args, 0, sizeof(*args));
+
+	memcpy(args->ni_macaddr, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+	args->tdls_cmd = cmd;
+	args->ni_ncidx = ni->ni_node_idx;
+	args->tdls_params = *value;
+
+	ioctl->ioctl_command = ioctl_cmd;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+	*value = args->tdls_params;
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+static void qdrv_wlan_80211_tdls_set_params(struct ieee80211_node *ni, int cmd, int value)
+{
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE, "Node %pM set tdls param %d to "
+			"%d-0x%x\n", ni->ni_macaddr, cmd, value, value);
+
+	qdrv_wlan_80211_tdls_operation(ni, IOCTL_DEV_SET_TDLS_PARAM, cmd, (uint32_t*)&value);
+}
+
+static uint32_t qdrv_wlan_80211_tdls_get_params(struct ieee80211_node *ni, int cmd)
+{
+	uint32_t value = 0;
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE, "Node %pM get tdls param %d\n",
+			ni->ni_macaddr, cmd);
+
+	qdrv_wlan_80211_tdls_operation(ni, IOCTL_DEV_GET_TDLS_PARAM, cmd, &value);
+
+	return value;
+}
+
+/*
+ *  Enter/Leave power save state on STA mode
+ */
+static void qdrv_wlan_80211_power_save(struct ieee80211_node *ni, int enable)
+{
+	struct qtn_power_save_args *args = NULL;
+	dma_addr_t args_dma;
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate POWER_SAVE message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(8)ioctl %p dma ptr %p\n", ioctl, (void *)args);
+
+	args->enable = !!enable;
+	memcpy(args->ni_addr, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+
+	ioctl->ioctl_command = IOCTL_DEV_POWER_SAVE;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+#ifndef ifr_media
+#define	ifr_media	ifr_ifru.ifru_ivalue
+#endif
+
+static void qdrv_wlan_80211_set_cap_bw(struct ieee80211_node *ni, int bw)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ifreq ifr;
+	int retv;
+
+	if ((bw > BW_HT40) && !ieee80211_swfeat_is_supported(SWFEAT_ID_VHT, 1))
+		return;
+
+	if (bw == qdrv_wlan_80211_get_cap_bw(ic))
+		return;
+
+	if (bw == BW_HT20) {
+		if (IS_IEEE80211_VHT_ENABLED(ic))
+			ic->ic_phymode = IEEE80211_MODE_11AC_VHT20PM;
+		else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
+			ic->ic_phymode = IEEE80211_MODE_11NA;
+		else if ((IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) &&
+				(ic->ic_phymode == IEEE80211_MODE_11NG_HT40PM))
+			ic->ic_phymode = IEEE80211_MODE_11NG;
+	} else if (bw == BW_HT40) {
+		if (IS_IEEE80211_VHT_ENABLED(ic))
+			ic->ic_phymode = IEEE80211_MODE_11AC_VHT40PM;
+		else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
+			ic->ic_phymode = IEEE80211_MODE_11NA_HT40PM;
+		else
+			ic->ic_phymode = IEEE80211_MODE_11NG_HT40PM;
+	} else if (bw == BW_HT80) {
+		if (IS_IEEE80211_VHT_ENABLED(ic)) {
+			ic->ic_phymode = IEEE80211_MODE_11AC_VHT80PM;
+		} else {
+			DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN,
+				"BW %d cannot be configured in current phymode\n", bw);
+			return;
+		}
+	} else {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "BW %d is not valid\n", bw);
+		return;
+	}
+
+	ieee80211_update_bw_capa(vap, bw);
+
+	memset(&ifr, 0, sizeof(ifr));
+	if(vap->iv_media.ifm_cur == NULL)
+		return;
+
+	ifr.ifr_media = vap->iv_media.ifm_cur->ifm_media &~ IFM_MMASK;
+	ifr.ifr_media |= IFM_MAKEMODE(ic->ic_phymode);
+	retv = ifmedia_ioctl(vap->iv_dev, &ifr, &vap->iv_media, SIOCSIFMEDIA);
+	if (retv == -ENETRESET) {
+		ic->ic_des_mode = ic->ic_phymode;
+		ieee80211_setmode(ic, ic->ic_des_mode);
+	}
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "QDRV: PHY mode %d\n", ic->ic_phymode);
+}
+
+static void qdrv_wlan_80211_set_cap_sgi(struct ieee80211_node *ni, int sgi)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+
+	if (sgi) {
+		ic->ic_vhtcap.cap_flags |= IEEE80211_VHTCAP_C_SHORT_GI_80;
+		ic->ic_vhtcap_24g.cap_flags |= IEEE80211_VHTCAP_C_SHORT_GI_80;
+		if (ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40)
+			ic->ic_htcap.cap |= (IEEE80211_HTCAP_C_SHORTGI40 |
+					     IEEE80211_HTCAP_C_SHORTGI20);
+		else
+			ic->ic_htcap.cap |= IEEE80211_HTCAP_C_SHORTGI20;
+
+	} else {
+		ic->ic_vhtcap.cap_flags &= ~IEEE80211_VHTCAP_C_SHORT_GI_80;
+		ic->ic_vhtcap_24g.cap_flags &= ~IEEE80211_VHTCAP_C_SHORT_GI_80;
+		if (ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40)
+			ic->ic_htcap.cap &= ~(IEEE80211_HTCAP_C_SHORTGI40 |
+					      IEEE80211_HTCAP_C_SHORTGI20);
+		else
+			ic->ic_htcap.cap &= ~IEEE80211_HTCAP_C_SHORTGI20;
+	}
+}
+
+static void qdrv_wlan_80211_set_ldpc(struct ieee80211_node *ni, int ldpc)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	ic->ldpc_enabled = (ldpc & 0x1);
+}
+
+static void qdrv_wlan_80211_set_stbc(struct ieee80211_node *ni, int stbc)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	ic->stbc_enabled = (stbc & 0x1);
+}
+
+static void qdrv_wlan_80211_set_rts_cts(struct ieee80211_node *ni, int rts_cts)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	ic->rts_cts_prot = (rts_cts & 0x1);
+}
+
+static void qdrv_wlan_80211_set_peer_rts_mode(struct ieee80211_node *ni, int mode)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap;
+	uint8_t beacon_update_required = 0;
+
+	if (mode > IEEE80211_PEER_RTS_MAX) {
+		mode = IEEE80211_PEER_RTS_DEFAULT;
+	}
+	ic->ic_peer_rts_mode = mode;
+
+	if ((mode == IEEE80211_PEER_RTS_DYN) &&
+			(ic->ic_peer_rts != ic->ic_dyn_peer_rts)) {
+		ic->ic_peer_rts = ic->ic_dyn_peer_rts;
+		beacon_update_required = 1;
+	} else if (mode == IEEE80211_PEER_RTS_PMP) {
+		if ((ic->ic_sta_assoc - ic->ic_nonqtn_sta) >= IEEE80211_MAX_STA_CCA_ENABLED) {
+			if (ic->ic_peer_rts != 1) {
+				ic->ic_peer_rts = 1;
+				beacon_update_required = 1;
+			}
+		} else {
+			if (ic->ic_peer_rts != 0) {
+				ic->ic_peer_rts = 0;
+				beacon_update_required = 1;
+			}
+		}
+	} else if (mode == IEEE80211_PEER_RTS_OFF) {
+		ic->ic_peer_rts = 0;
+		beacon_update_required = 1;
+	}
+
+	if (beacon_update_required) {
+		TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+			if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+				continue;
+			if (vap->iv_state != IEEE80211_S_RUN)
+				continue;
+
+			ic->ic_beacon_update(vap);
+		}
+	}
+}
+
+static void qdrv_wlan_80211_set_11n40_only_mode(struct ieee80211_node *ni, int mode)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	ic->ic_11n_40_only_mode = (mode & 0x1);
+}
+
+static void qdrv_wlan_80211_set_legacy_retry(struct ieee80211_node *ni, int retry_count)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+
+	ic->ic_legacy_retry_limit = (u_int8_t)retry_count;
+}
+
+static void qdrv_wlan_80211_set_retry_count(struct ieee80211_node *ni, int retry_count)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+
+	ic->ic_retry_count = (u_int8_t)retry_count;
+}
+
+static void qdrv_wlan_80211_set_mcsset(struct ieee80211com *ic)
+{
+	memset(ic->ic_htcap.mcsset, 0, sizeof(ic->ic_htcap.mcsset));
+
+	if (ic->ic_ht_nss_cap == IEEE80211_HT_NSS1) {
+		ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS1] = 0xff;
+	} else if (ic->ic_ht_nss_cap == IEEE80211_HT_NSS2) {
+		ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS1] = 0xff;
+		ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS2] = 0xff;
+		if (ic->ic_caps & IEEE80211_C_UEQM) {
+			ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM1] =
+				IEEE80211_HT_MCSSET_20_40_UEQM1_2SS;
+		}
+	} else if (ic->ic_ht_nss_cap == IEEE80211_HT_NSS3) {
+		ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS1] = 0xff;
+		ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS2] = 0xff;
+		ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] = 0xff;
+		if (ic->ic_caps & IEEE80211_C_UEQM) {
+			ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM1] =
+				IEEE80211_HT_MCSSET_20_40_UEQM1_2SS |
+				IEEE80211_HT_MCSSET_20_40_UEQM1_3SS;
+			ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM2] =
+				IEEE80211_HT_MCSSET_20_40_UEQM2_3SS;
+			ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM3] =
+				IEEE80211_HT_MCSSET_20_40_UEQM3_3SS;
+		}
+	} else if (ic->ic_ht_nss_cap == IEEE80211_HT_NSS4) {
+		ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS1] = 0xff;
+		ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS2] = 0xff;
+		ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] = 0xff;
+		ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS4] = 0xff;
+		if (ic->ic_caps & IEEE80211_C_UEQM) {
+			ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM1] =
+				IEEE80211_HT_MCSSET_20_40_UEQM1_2SS |
+				IEEE80211_HT_MCSSET_20_40_UEQM1_3SS;
+			ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM2] =
+				IEEE80211_HT_MCSSET_20_40_UEQM2_3SS;
+			ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM3] =
+				IEEE80211_HT_MCSSET_20_40_UEQM3_3SS |
+				IEEE80211_HT_MCSSET_20_40_UEQM3_4SS;
+			ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM4] =
+				IEEE80211_HT_MCSSET_20_40_UEQM4_4SS;
+			ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM5] =
+				IEEE80211_HT_MCSSET_20_40_UEQM5_4SS;
+			ic->ic_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM6] =
+				IEEE80211_HT_MCSSET_20_40_UEQM6_4SS;
+		}
+	}
+}
+
+static void qdrv_wlan_80211_set_mcsparams(struct ieee80211com *ic)
+{
+	ic->ic_htcap.mcsparams = IEEE80211_HTCAP_MCS_TX_SET_DEFINED;
+	ic->ic_htcap.numtxspstr = 0;
+	switch (ic->ic_ht_nss_cap) {
+	case IEEE80211_HT_NSS4:
+		if (ieee80211_swfeat_is_supported(SWFEAT_ID_2X4, 0)) {
+			ic->ic_htcap.numtxspstr = IEEE80211_HTCAP_MCS_TWO_TX_SS;
+			ic->ic_htcap.mcsparams |= IEEE80211_HTCAP_MCS_TX_RX_SET_NEQ;
+			if (ic->ic_caps & IEEE80211_C_UEQM)
+				ic->ic_htcap.mcsparams |= IEEE80211_HTCAP_MCS_TX_UNEQ_MOD;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static u_int16_t qdrv_wlan_80211_vhtmcs_map(enum ieee80211_vht_nss vhtnss,
+					    enum ieee80211_vht_mcs_supported vhtmcs)
+{
+	/* For Spatial stream from 1-8; set MCS=3 (not supported) */
+	u_int16_t vhtmcsmap = IEEE80211_VHTMCS_ALL_DISABLE;
+
+	switch(vhtnss) {
+	case IEEE80211_VHT_NSS8:
+		vhtmcsmap &= 0x3FFF;
+		vhtmcsmap |= (vhtmcs << 14);
+	case IEEE80211_VHT_NSS7:
+		vhtmcsmap &= 0xCFFF;
+		vhtmcsmap |= (vhtmcs << 12);
+	case IEEE80211_VHT_NSS6:
+		vhtmcsmap &= 0xF3FF;
+		vhtmcsmap |= (vhtmcs << 10);
+	case IEEE80211_VHT_NSS5:
+		vhtmcsmap &= 0xFCFF;
+		vhtmcsmap |= (vhtmcs << 8);
+	case IEEE80211_VHT_NSS4:
+		vhtmcsmap &= 0xFF3F;
+		vhtmcsmap |= (vhtmcs << 6);
+	case IEEE80211_VHT_NSS3:
+		vhtmcsmap &= 0xFFCF;
+		vhtmcsmap |= (vhtmcs << 4);
+	case IEEE80211_VHT_NSS2:
+		vhtmcsmap &= 0xFFF3;
+		vhtmcsmap |= (vhtmcs << 2);
+	case IEEE80211_VHT_NSS1:
+	default:	/* At least 1 spatial stream supported */
+		vhtmcsmap &= 0xFFFC;
+		vhtmcsmap |= vhtmcs;
+		break;
+	}
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_WLAN,
+		"vhtmcsmap: %x for NSS=%d & MCS=%d \n", vhtmcsmap, vhtnss, vhtmcs);
+
+	return (vhtmcsmap);
+}
+
+static void qdrv_wlan_80211_set_vht_mcsset(struct ieee80211_vhtcap *vhtcap, enum ieee80211_vht_nss vht_nss_cap,
+	enum ieee80211_vht_mcs_supported vht_mcs_cap)
+{
+	enum ieee80211_vht_nss max_vht_tx_nss_cap;
+	enum ieee80211_vht_nss max_vht_rx_nss_cap;
+
+	if (ieee80211_swfeat_is_supported(SWFEAT_ID_2X2, 0)) {
+		max_vht_tx_nss_cap = IEEE80211_VHT_NSS2;
+		max_vht_rx_nss_cap = IEEE80211_VHT_NSS2;
+	} else if (ieee80211_swfeat_is_supported(SWFEAT_ID_2X4, 0)) {
+		max_vht_tx_nss_cap = IEEE80211_VHT_NSS2;
+		max_vht_rx_nss_cap = IEEE80211_VHT_NSS4;
+	} else if (ieee80211_swfeat_is_supported(SWFEAT_ID_3X3, 0)) {
+		max_vht_tx_nss_cap = IEEE80211_VHT_NSS3;
+		max_vht_rx_nss_cap = IEEE80211_VHT_NSS3;
+	} else {
+		max_vht_tx_nss_cap = IEEE80211_VHT_NSS4;
+		max_vht_rx_nss_cap = IEEE80211_VHT_NSS4;
+	}
+	vhtcap->rxmcsmap = qdrv_wlan_80211_vhtmcs_map(min(max_vht_rx_nss_cap, vht_nss_cap),
+				vht_mcs_cap);
+	vhtcap->txmcsmap = qdrv_wlan_80211_vhtmcs_map(min(max_vht_tx_nss_cap, vht_nss_cap),
+				vht_mcs_cap);
+}
+
+static int qdrv_wlan_80211_get_11ac_mode(struct ieee80211com *ic)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_WLAN,
+		"802.11ac mode = %d\n", ic->ic_phymode);
+
+	if (IS_IEEE80211_VHT_ENABLED(ic)) {
+		return (QTN_11NAC_ENABLE);
+	} else {
+		return (QTN_11NAC_DISABLE);
+	}
+}
+
+static void qdrv_wlan_80211_set_11ac_mode(struct ieee80211com *ic, int vht)
+{
+#ifdef QDRV_FEATURE_HT
+	int ic_phymode_save = ic->ic_phymode;
+
+	/*
+	 * phymode has already been initialized through set_bw
+	 * - need to reinitialize if in 11ac mode
+	 */
+	if (vht == QTN_11NAC_ENABLE) {
+		if (!IS_IEEE80211_VHT_ENABLED(ic)) {
+			if (ic->ic_phymode == IEEE80211_MODE_11NA)
+				ic->ic_phymode = IEEE80211_MODE_11AC_VHT20PM;
+			else if (ic->ic_phymode == IEEE80211_MODE_11NA_HT40PM)
+				ic->ic_phymode = IEEE80211_MODE_11AC_VHT40PM;
+			else
+				ic->ic_phymode = IEEE80211_MODE_11AC_VHT80PM;
+		}
+	} else {
+		if ((ic->ic_phymode == IEEE80211_MODE_11AC_VHT80PM) ||
+				(ic->ic_phymode == IEEE80211_MODE_11AC_VHT40PM))
+			ic->ic_phymode = IEEE80211_MODE_11NA_HT40PM;
+		else if (ic->ic_phymode == IEEE80211_MODE_11AC_VHT20PM)
+			ic->ic_phymode = IEEE80211_MODE_11NA;
+	}
+
+	if ((vht == QTN_11NAC_ENABLE) && !ieee80211_swfeat_is_supported(SWFEAT_ID_VHT, 1))
+		ic->ic_phymode = ic_phymode_save;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_WLAN,
+			"802.11ac mode = %d\n", ic->ic_phymode);
+#endif
+}
+
+#ifdef CONFIG_QVSP
+static void qdrv_wlan_notify_qvsp_coc_state_changed(struct qvsp_s *qvsp, struct ieee80211com *ic)
+{
+	if (qvsp) {
+		if (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] >= BOARD_PM_LEVEL_DUTY) {
+			qvsp_inactive_flag_set(qvsp, QVSP_INACTIVE_COC);
+		} else {
+			qvsp_inactive_flag_clear(qvsp, QVSP_INACTIVE_COC);
+		}
+	}
+}
+#endif
+
+static void qdrv_wlan_notify_pm_state_changed(struct ieee80211com *ic, int pm_level_prev)
+{
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+	const char *tag = QEVT_PM_PREFIX;
+	const char *msg = "PM-LEVEL-CHANGE";
+
+	if (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] != pm_level_prev &&
+			ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] != BOARD_PM_LEVEL_FORCE_NO) {
+
+		qdrv_eventf(vap->iv_dev, "%s%s from %u to %u", tag, msg,
+				(unsigned)pm_level_prev,
+				(unsigned)ic->ic_pm_state[QTN_PM_CURRENT_LEVEL]);
+	}
+}
+
+static void qdrv_send_to_l2_ext_filter(struct ieee80211vap *vap, struct sk_buff *skb)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct qdrv_wlan *qw = qv->parent;
+
+	qdrv_tqe_send_l2_ext_filter(qw, skb);
+}
+
+static void qdrv_wlan_80211_setparam(struct ieee80211_node *ni, int param,
+	int value, unsigned char *data, int len)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct ieee80211vap *vap = &qv->iv;
+	struct qdrv_wlan *qw = qv->parent;
+	struct ieee80211com *ic = &qw->ic;
+	struct qtn_setparams_args *args = NULL;
+	dma_addr_t args_dma;
+	dma_addr_t ctrl_dma;
+	struct host_ioctl *ioctl;
+	u_int8_t tid;
+	u_int16_t seq, size, time;
+	struct device *dev = qdrv_soc_get_addr_dev();
+
+	switch (param) {
+	case IEEE80211_PARAM_FORCE_MUC_TRACE:
+		qdrv_muc_traceback(value == 0xdead ? 1 : 0);
+		return;
+	case IEEE80211_PARAM_FORCE_ENABLE_TRIGGERS:
+		g_triggers_on = value;
+		return;
+	case IEEE80211_PARAM_FORCE_MUC_HALT:
+		qdrv_halt_muc();
+		return;
+	case IEEE80211_PARAM_HTBA_SEQ_CTRL:
+		seq = value & 0xFFFF;
+		tid = (value & 0xFF0000) >> 16;
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_HTBA_SEQ_CTRL (%d), %d, %d\n",
+			param, tid, seq);
+		ni->ni_ba_tx[tid].seq = seq;
+		return;
+	case IEEE80211_PARAM_HTBA_SIZE_CTRL:
+		size = value & 0xFFFF;
+		tid = (value & 0xFF0000) >> 16;
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_HTBA_SIZE_CTRL (%d), %d, %d\n",
+			param, tid, size);
+		ni->ni_ba_tx[tid].buff_size = size;
+		return;
+	case IEEE80211_PARAM_HTBA_TIME_CTRL:
+		time = value & 0xFFFF;
+		tid = (value & 0xFF0000) >> 16;
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_HTBA_TIME_CTRL (%d), %d, %d\n",
+			param, tid, time);
+		ni->ni_ba_tx[tid].timeout = time;
+		return;
+	case IEEE80211_PARAM_HT_ADDBA:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_HT_ADDBA (%d)\n", param);
+		qdrv_wlan_80211_send_addba(ni, value);
+		return;
+	case IEEE80211_PARAM_HT_DELBA:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_HT_DELBA (%d)\n", param);
+		qdrv_wlan_drop_ba(ni, value, 1, IEEE80211_REASON_UNSPECIFIED);
+		qdrv_wlan_drop_ba(ni, value, 0, IEEE80211_REASON_UNSPECIFIED);
+		return;
+	case IEEE80211_PARAM_TXBF_CTRL:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_TXBF_CTRL (%d)\n", param);
+		qdrv_txbf_config_set((struct qdrv_wlan *) qv->parent, value);
+		return;
+	case IEEE80211_PARAM_BW_SEL_MUC:
+	case IEEE80211_PARAM_BW_SEL:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_BW_SEL_MUC(%d)\n", param);
+		qdrv_wlan_80211_set_cap_bw(ni, value);
+		if (qv->iv.iv_opmode == IEEE80211_M_HOSTAP)
+			qdrv_wlan_80211_beacon_update((struct ieee80211vap *)qv);
+		break;
+	case IEEE80211_PARAM_SHORT_GI:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_SHORT_GI(%d)\n", param);
+		qdrv_wlan_80211_set_cap_sgi(ni, value);
+		break;
+	case IEEE80211_PARAM_LDPC:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_LDPC(%d)\n", param);
+		qdrv_wlan_80211_set_ldpc(ni, value);
+		break;
+	case IEEE80211_PARAM_STBC:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_STBC(%d)\n", param);
+		qdrv_wlan_80211_set_stbc(ni, value);
+		break;
+	case IEEE80211_PARAM_RTS_CTS:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_RTS_CTS(%d)\n", param);
+		qdrv_wlan_80211_set_rts_cts(ni, value);
+		break;
+	case IEEE80211_PARAM_TX_QOS_SCHED:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_TX_QOS_SCHED(%d)\n", param);
+		ni->ni_ic->ic_tx_qos_sched = (value & 0xf);
+		break;
+	case IEEE80211_PARAM_PEER_RTS_MODE:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_PEER_RTS_MODE(%d)\n", param);
+		qdrv_wlan_80211_set_peer_rts_mode(ni, value);
+		break;
+	case IEEE80211_PARAM_DYN_WMM:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_DYN_WMM(%d)\n", param);
+		ic->ic_dyn_wmm = value;
+		break;
+	case IEEE80211_PARAM_GET_CH_INUSE:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_GET_CH_INUSE(%d)\n", param);
+		if (value)
+			ic->ic_flags_qtn |= IEEE80211_QTN_PRINT_CH_INUSE;
+		else
+			ic->ic_flags_qtn &= ~IEEE80211_QTN_PRINT_CH_INUSE;
+		break;
+	case IEEE80211_PARAM_11N_40_ONLY_MODE:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_11N_40_ONLY_MODE (%d)\n", param);
+		qdrv_wlan_80211_set_11n40_only_mode(ni, value);
+		break;
+	case IEEE80211_PARAM_MAX_MGMT_FRAMES:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_MAX_MGMT_FRAMES (%d)\n", param);
+		qw->tx_if.txdesc_cnt[QDRV_TXDESC_MGMT] = value;
+		break;
+	case IEEE80211_PARAM_MCS_ODD_EVEN:
+		qw->mcs_odd_even = value;
+		break;
+	case IEEE80211_PARAM_RESTRICTED_MODE:
+		qw->tx_restrict = value;
+		break;
+	case IEEE80211_PARAM_RESTRICT_RTS:
+		qw->tx_restrict_rts = value;
+		break;
+	case IEEE80211_PARAM_RESTRICT_LIMIT:
+		qw->tx_restrict_limit = value;
+		break;
+	case IEEE80211_PARAM_RESTRICT_RATE:
+		qw->tx_restrict_rate = value;
+		break;
+	case IEEE80211_PARAM_SWRETRY_AGG_MAX:
+		qw->tx_swretry_agg_max = value;
+		break;
+	case IEEE80211_PARAM_SWRETRY_NOAGG_MAX:
+		qw->tx_swretry_noagg_max = value;
+		break;
+	case IEEE80211_PARAM_SWRETRY_SUSPEND_XMIT:
+		qw->tx_swretry_suspend_xmit = value;
+		break;
+	case IEEE80211_PARAM_TEST_LNCB:
+		if (value) {
+			qw->flags_ext |= QDRV_WLAN_DEBUG_TEST_LNCB;
+		} else {
+			qw->flags_ext &= ~QDRV_WLAN_DEBUG_TEST_LNCB;
+		}
+		break;
+	case IEEE80211_PARAM_UNKNOWN_DEST_ARP:
+		if (value) {
+			qw->flags_ext |= QDRV_WLAN_FLAG_UNKNOWN_ARP;
+		} else {
+			qw->flags_ext &= ~QDRV_WLAN_FLAG_UNKNOWN_ARP;
+		}
+		break;
+	case IEEE80211_PARAM_MUC_FLAGS:
+	case IEEE80211_PARAM_HT_NSS_CAP:
+		qdrv_wlan_80211_set_mcsset(ic);
+		qdrv_wlan_80211_set_mcsparams(ic);
+		break;
+	case IEEE80211_PARAM_VHT_MCS_CAP:
+	case IEEE80211_PARAM_VHT_NSS_CAP:
+		qdrv_wlan_80211_set_vht_mcsset(&ic->ic_vhtcap, ic->ic_vht_nss_cap, ic->ic_vht_mcs_cap);
+		qdrv_wlan_80211_set_vht_mcsset(&ic->ic_vhtcap_24g, ic->ic_vht_nss_cap_24g, ic->ic_vht_mcs_cap);
+		break;
+	case IEEE80211_PARAM_UNKNOWN_DEST_FWD:
+		if (value) {
+			qw->flags_ext |= QDRV_WLAN_FLAG_UNKNOWN_FWD;
+		} else {
+			qw->flags_ext &= ~QDRV_WLAN_FLAG_UNKNOWN_FWD;
+		}
+		break;
+	case IEEE80211_PARAM_PWR_SAVE: {
+		uint32_t pm_param = QTN_PM_UNPACK_PARAM(value);
+		uint32_t pm_value = QTN_PM_UNPACK_VALUE(value);
+		int level_prev = ic->ic_pm_state[QTN_PM_CURRENT_LEVEL];
+
+		if (pm_param < QTN_PM_IOCTL_MAX) {
+			ic->ic_pm_state[pm_param] = pm_value;
+#ifdef CONFIG_QVSP
+			qdrv_wlan_notify_qvsp_coc_state_changed(qw->qvsp, ic);
+#endif
+			qdrv_wlan_notify_pm_state_changed(ic, level_prev);
+		}
+
+		if (pm_param == QTN_PM_PDUTY_PERIOD_MS &&
+				pm_qos_requirement(PM_QOS_POWER_SAVE) >= BOARD_PM_LEVEL_DUTY) {
+			if (ic->ic_lintval != ieee80211_pm_period_tu(ic)) {
+				/* Configure beacon interval to power duty interval */
+				ieee80211_beacon_interval_set(ic, ieee80211_pm_period_tu(ic));
+			}
+		}
+		break;
+	}
+	case IEEE80211_PARAM_TEST_TRAFFIC:
+		value = msecs_to_jiffies(value);
+		if (value != vap->iv_test_traffic_period) {
+			vap->iv_test_traffic_period = value;
+			if (value == 0) {
+				del_timer(&vap->iv_test_traffic);
+			} else {
+				mod_timer(&vap->iv_test_traffic,
+						jiffies + vap->iv_test_traffic_period);
+			}
+		}
+		break;
+	case IEEE80211_PARAM_QCAT_STATE: {
+		struct net_device *dev = qv->iv.iv_dev;
+
+		qdrv_eventf(dev, "QCAT state=%d", value);
+		printk("QCAT state=%d\n", value);
+
+		TXSTAT_SET(qw, qcat_state, value);
+		break;
+	}
+	case IEEE80211_PARAM_MIMOMODE:
+		qw->tx_mimomode = value;
+		break;
+	case IEEE80211_PARAM_SHORT_RETRY_LIMIT:
+	case IEEE80211_PARAM_LONG_RETRY_LIMIT:
+		//Currenty don't supporte to set this param, because we don't implement this feature in MacFW.
+		break;
+	case IEEE80211_PARAM_RETRY_COUNT:
+		qdrv_wlan_80211_set_retry_count(ni, value);
+		break;
+	case IEEE80211_PARAM_LEGACY_RETRY_LIMIT:
+		qdrv_wlan_80211_set_legacy_retry(ni, value);
+		break;
+	case IEEE80211_PARAM_RTSTHRESHOLD:
+		/* pass through, let the rts threshold value packed as normal param below */
+		break;
+	case IEEE80211_PARAM_CARRIER_ID:
+		g_carrier_id = value;
+		break;
+	case IEEE80211_PARAM_TX_QUEUING_ALG:
+		qw->tx_sch_shared_data->queuing_alg = value;
+		break;
+	case IEEE80211_PARAM_BA_THROT:
+#ifdef CONFIG_QVSP
+		qdrv_wlan_manual_ba_throt(qw, qv, value);
+#endif
+		return;
+	case IEEE80211_PARAM_WME_THROT:
+#ifdef CONFIG_QVSP
+		qdrv_wlan_manual_wme_throt(qw, qv, value);
+#endif
+		return;
+	case IEEE80211_PARAM_MODE:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_11AC_MODE (%d)\n", param);
+		qdrv_wlan_80211_set_11ac_mode(ic, value);
+		if (qv->iv.iv_opmode == IEEE80211_M_HOSTAP) {
+			qdrv_wlan_80211_beacon_update((struct ieee80211vap *)qv);
+		}
+		break;
+	case IEEE80211_PARAM_GENPCAP:
+		if (qdrv_genpcap_set(qw, value, &ctrl_dma) == 0) {
+			data = (uint8_t *) &ctrl_dma;
+			len = sizeof(ctrl_dma);
+		}
+		break;
+	case IEEE80211_PARAM_TXBF_PERIOD:
+		if (!value) {
+			/*
+			 * Turn off BF capabilities in the beacon when bfoff. Should
+			 * work for both AP beamformer and STA beamformee disabling
+			 * when bfoff is set.
+			 * */
+			ic->ic_vhtcap.cap_flags &=
+				~(IEEE80211_VHTCAP_C_SU_BEAM_FORMER_CAP |
+				  IEEE80211_VHTCAP_C_SU_BEAM_FORMEE_CAP);
+			ic->ic_vhtcap_24g.cap_flags &=
+				~(IEEE80211_VHTCAP_C_SU_BEAM_FORMER_CAP |
+				  IEEE80211_VHTCAP_C_SU_BEAM_FORMEE_CAP);
+		} else {
+			ic->ic_vhtcap.cap_flags |=
+				(IEEE80211_VHTCAP_C_SU_BEAM_FORMER_CAP |
+				  IEEE80211_VHTCAP_C_SU_BEAM_FORMEE_CAP);
+			ic->ic_vhtcap_24g.cap_flags |=
+				(IEEE80211_VHTCAP_C_SU_BEAM_FORMER_CAP |
+				  IEEE80211_VHTCAP_C_SU_BEAM_FORMEE_CAP);
+		}
+
+		ic->ic_txbf_period = value;
+
+		if (qv->iv.iv_opmode == IEEE80211_M_HOSTAP)
+			qdrv_wlan_80211_beacon_update((struct ieee80211vap *)qv);
+
+		break;
+	case IEEE80211_PARAM_CONFIG_PMF:
+		if (qv->iv.iv_opmode == IEEE80211_M_HOSTAP)
+			qdrv_wlan_80211_beacon_update((struct ieee80211vap *)qv);
+		break;
+	case IEEE80211_PARAM_WOWLAN:
+		if ((IEEE80211_WOWLAN_HOST_POWER_SAVE == (value>>16)) &&
+				(1 == (value & 0xffff))) {
+#ifndef TOPAZ_AMBER_IP
+			gpio_config(WOWLAN_GPIO_OUTPUT_PIN, GPIO_MODE_OUTPUT);
+			gpio_wowlan_output(WOWLAN_GPIO_OUTPUT_PIN, 0);
+#else
+			/*
+			 * In Amber WOWLAN is handled by WIFI2SOC interrupt.
+			 */
+#endif
+		}
+		break;
+	case IEEE80211_PARAM_MAX_AGG_SIZE:
+		ic->ic_tx_max_ampdu_size = value;
+		break;
+	case IEEE80211_PARAM_RX_AGG_TIMEOUT:
+		ic->ic_rx_agg_timeout = value;
+		break;
+	case IEEE80211_PARAM_RESTRICT_WLAN_IP:
+		qw->restrict_wlan_ip = !!value;
+		break;
+	case IEEE80211_PARAM_OFF_CHAN_SUSPEND:
+		qdrv_hostlink_suspend_off_chan(qw, !!value);
+		break;
+	case IEEE80211_PARAM_CCA_FIXED:
+		ic->cca_fix_disable = !!value;
+		break;
+	case IEEE80211_PARAM_AUTO_CCA_ENABLE:
+		ic->auto_cca_enable = !!value;
+		break;
+	case IEEE80211_PARAM_BEACON_HANG_TIMEOUT:
+		ic->ic_bcn_hang_timeout = value;
+		break;
+	case IEEE80211_PARAM_VMODE:
+		qdrv_calcmd_set_tx_power(dev, value);
+		break;
+	case IEEE80211_PARAM_BB_DEAFNESS_WAR_EN:
+		ic->bb_deafness_war_disable = !!value;
+		break;
+	default:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"<0x%08x> (%d)\n", param, param);
+		break;
+	}
+
+	/* Make sure the data fits if it is provided */
+	if (data != NULL && len > sizeof(args->ni_data)) {
+		DBGPRINTF_E("Unable to transport %d bytes of data (max is %d)\n",
+			len, sizeof(args->ni_data));
+		return;
+	}
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate SETPARAM message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	/* Copy the values over */
+	args->ni_param = param;
+	if (param == IEEE80211_PARAM_MODE) {
+		args->ni_value = ic->ic_phymode;
+	} else {
+		args->ni_value = value;
+	}
+	args->ni_len = 0;
+	if (data != NULL && len > 0) {
+		memcpy(args->ni_data, data, len);
+		args->ni_len = len;
+		if (param == IEEE80211_PARAM_UPDATE_MU_GRP) {
+			/* place the sta's mac addr at the end of its group/pos arrays */
+			memcpy(&args->ni_data[len], &ni->ni_macaddr[0], IEEE80211_ADDR_LEN);
+			args->ni_len += IEEE80211_ADDR_LEN;
+		}
+	}
+
+	ioctl->ioctl_command = IOCTL_DEV_SETPARAMS;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+static int qdrv_wlan_set_l2_ext_filter_port(struct ieee80211vap *vap, int port)
+{
+	struct net_device *pcie_dev;
+	int cfg;
+	int cfg_val;
+	int cfg_mask;
+	uint8_t tqe_port;
+
+	switch (port) {
+	case L2_EXT_FILTER_EMAC_0_PORT:
+		cfg = BOARD_CFG_EMAC0;
+		cfg_mask = EMAC_IN_USE;
+		tqe_port = TOPAZ_TQE_EMAC_0_PORT;
+		break;
+	case L2_EXT_FILTER_EMAC_1_PORT:
+		cfg = BOARD_CFG_EMAC1;
+		cfg_mask = EMAC_IN_USE;
+		tqe_port = TOPAZ_TQE_EMAC_1_PORT;
+		break;
+	case L2_EXT_FILTER_PCIE_PORT:
+		cfg = BOARD_CFG_PCIE;
+		cfg_mask = PCIE_IN_USE;
+		/*
+		 * PCIE TQE port is determined at runtime
+		 */
+		pcie_dev = dev_get_by_name(&init_net, "pcie0");
+		if (!pcie_dev) {
+			printk("QDRV: Error setting L2 external filter port: no pcie0 device\n");
+			return -ENODEV;
+		}
+		tqe_port = pcie_dev->if_port;
+		dev_put(pcie_dev);
+		break;
+	default:
+		printk("QDRV: Error setting L2 external filter port: port %d is invalid\n", port);
+		return -EINVAL;
+	}
+
+	if (get_board_config(cfg, &cfg_val) < 0) {
+		printk("QDRV: Error setting L2 external filter port: error getting board config\n");
+		return -ENODEV;
+	}
+
+	if (!(cfg_val & cfg_mask)) {
+		printk("QDRV: Error setting L2 external filter port: no such port\n");
+		return -ENODEV;
+	}
+
+	g_l2_ext_filter_port = tqe_port;
+
+	qdrv_wlan_80211_setparam(vap->iv_bss, IEEE80211_PARAM_L2_EXT_FILTER_PORT,
+				 g_l2_ext_filter_port, NULL, 0);
+	return 0;
+}
+
+static int qdrv_wlan_set_l2_ext_filter(struct ieee80211vap *vap, int enable)
+{
+	int ret;
+
+	if (enable && (g_l2_ext_filter_port == TOPAZ_TQE_NUM_PORTS)) {
+		ret = qdrv_wlan_set_l2_ext_filter_port(vap, L2_EXT_FILTER_DEF_PORT);
+		if (ret < 0)
+			return ret;
+	}
+
+	g_l2_ext_filter = !!enable;
+
+	qdrv_wlan_80211_setparam(vap->iv_bss, IEEE80211_PARAM_L2_EXT_FILTER,
+				 g_l2_ext_filter, NULL, 0);
+	return 0;
+}
+
+static int qdrv_wlan_get_l2_ext_filter_port(void)
+{
+	if (g_l2_ext_filter_port == TOPAZ_TQE_NUM_PORTS) {
+		return L2_EXT_FILTER_DEF_PORT;
+	} else if (g_l2_ext_filter_port == TOPAZ_TQE_EMAC_0_PORT) {
+		return L2_EXT_FILTER_EMAC_0_PORT;
+	} else if (g_l2_ext_filter_port == TOPAZ_TQE_EMAC_1_PORT) {
+		return L2_EXT_FILTER_EMAC_1_PORT;
+	} else {
+		return L2_EXT_FILTER_PCIE_PORT;
+	}
+}
+
+static __sram_text void
+qdrv_wlan_stats_prot_ip(struct qdrv_wlan *qw, uint8_t is_tx, uint8_t ip_proto)
+{
+	switch (ip_proto) {
+	case IPPROTO_UDP:
+		QDRV_STAT(qw, is_tx, prot_ip_udp);
+		break;
+	case IPPROTO_TCP:
+		QDRV_STAT(qw, is_tx, prot_ip_tcp);
+		break;
+	case IPPROTO_ICMP:
+	case IPPROTO_ICMPV6:
+		QDRV_STAT(qw, is_tx, prot_ip_icmp);
+		break;
+	case IPPROTO_IGMP:
+		QDRV_STAT(qw, is_tx, prot_ip_igmp);
+		break;
+	default:
+		DBGPRINTF(DBG_LL_NOTICE, is_tx ? QDRV_LF_PKT_TX : QDRV_LF_PKT_RX,
+			"%s ip pkt type %u\n",
+			is_tx ? "tx" : "rx", ip_proto);
+		QDRV_STAT(qw, is_tx, prot_ip_other);
+		break;
+	}
+}
+
+__sram_text void
+qdrv_wlan_stats_prot(struct qdrv_wlan *qw, uint8_t is_tx, uint16_t ether_type, uint8_t ip_proto)
+{
+	switch (ether_type) {
+	case 0:
+		break;
+	case __constant_htons(ETH_P_IP):
+		qdrv_wlan_stats_prot_ip(qw, is_tx, ip_proto);
+		break;
+	case __constant_htons(ETH_P_IPV6):
+		QDRV_STAT(qw, is_tx, prot_ipv6);
+		qdrv_wlan_stats_prot_ip(qw, is_tx, ip_proto);
+		break;
+	case __constant_htons(ETH_P_ARP):
+		QDRV_STAT(qw, is_tx, prot_arp);
+		break;
+	case __constant_htons(ETH_P_PAE):
+		QDRV_STAT(qw, is_tx, prot_pae);
+		break;
+	default:
+		DBGPRINTF(DBG_LL_NOTICE, is_tx ? QDRV_LF_PKT_TX : QDRV_LF_PKT_RX,
+			"%s pkt type 0x%04x\n",
+			is_tx ? "tx" : "rx", ether_type);
+		QDRV_STAT(qw, is_tx, prot_other);
+		break;
+	}
+}
+
+/*
+ * This function performs proxy ARP for 3-address stations.  It is intended for use
+ * with HS 2.0 vaps, which do not support 4-address stations.
+ */
+int qdrv_proxy_arp(struct ieee80211vap *iv,
+		struct qdrv_wlan *qw,
+		struct ieee80211_node *ni_rx,
+		uint8_t *data_start)
+{
+	struct ieee80211_node *ni_target;
+	struct ether_arp *arp = (struct ether_arp *)data_start;
+	uint32_t s_ipaddr = get_unaligned((u32 *)&arp->arp_spa);
+	uint32_t t_ipaddr = get_unaligned((u32 *)&arp->arp_tpa);
+	int gratuitous_arp = (s_ipaddr == t_ipaddr);
+	uint8_t macaddr[IEEE80211_ADDR_LEN];
+
+	if (gratuitous_arp) {
+		/*
+		 * If the ARP announcement came from an associated station,
+		 * update the node's IP address.
+		 */
+		if (ni_rx && (arp->ea_hdr.ar_op == __constant_htons(ARPOP_REQUEST))
+				&& IEEE80211_ADDR_EQ(ni_rx->ni_macaddr, arp->arp_sha)) {
+			if (ni_rx->ni_ip_addr == t_ipaddr)
+				return 1;
+
+			ni_target = ieee80211_find_node_by_ip_addr(iv, t_ipaddr);
+			if (ni_target) {
+				ni_target->ni_ip_addr = 0;
+				ieee80211_free_node(ni_target);
+			}
+			ni_rx->ni_ip_addr = t_ipaddr;
+		}
+
+		return 1;
+	}
+
+	if (arp->ea_hdr.ar_op == __constant_htons(ARPOP_REQUEST)) {
+		if (ipv4_is_loopback(t_ipaddr) || ipv4_is_multicast(t_ipaddr) ||
+				ipv4_is_zeronet(t_ipaddr)) {
+			return 0;
+		}
+
+		ni_target = ieee80211_find_node_by_ip_addr(iv, t_ipaddr);
+		if (ni_target) {
+			IEEE80211_ADDR_COPY(macaddr, ni_target->ni_macaddr);
+			ieee80211_free_node(ni_target);
+			arp_send(ARPOP_REPLY, ETH_P_ARP, s_ipaddr, qw->br_dev, t_ipaddr,
+					arp->arp_sha, macaddr, arp->arp_sha);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_IPV6
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+static struct sk_buff * qdrv_build_neigh_adv_skb(struct net_device *dev,
+			const struct in6_addr *daddr, const struct in6_addr *saddr,
+			uint8_t *src_mac, uint8_t *dest_mac, struct icmp6hdr *icmp6h,
+			const struct in6_addr *target, int llinfo)
+{
+	struct net *net = dev_net(dev);
+	struct sock *sk = net->ipv6.ndisc_sk;
+	struct sk_buff *skb;
+	struct icmp6hdr *hdr;
+	struct ether_header *eh;
+	uint8_t *opt;
+	int len;
+	int err;
+
+	len = sizeof(struct icmp6hdr) + (target ? sizeof(*target) : 0);
+	if (llinfo) {
+		len += NDISC_OPT_SPACE(IEEE80211_ADDR_LEN + ndisc_addr_option_pad(dev->type));
+		/* type(1byte) + len(1byte) + Dev addr len + pad */
+	}
+
+	skb = sock_alloc_send_skb(sk, (MAX_HEADER + sizeof(struct ipv6hdr) +
+				len + LL_ALLOCATED_SPACE(dev)), 1, &err);
+	if (!skb) {
+		DBGPRINTF_LIMIT_E("%s: failed to allocate an skb, err=%d\n",
+							__func__, err);
+		return NULL;
+	}
+
+	skb->dev = dev;
+	skb->priority = WME_AC_VO;
+
+	eh = (struct ether_header *)skb->tail;
+	IEEE80211_ADDR_COPY(eh->ether_dhost, dest_mac);
+	IEEE80211_ADDR_COPY(eh->ether_shost, src_mac);
+	eh->ether_type = htons(ETH_P_IPV6);
+	skb_put(skb, sizeof(*eh));
+	skb->data += sizeof(*eh);
+
+	ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
+	skb->data -= sizeof(*eh);
+
+	skb->transport_header = skb->tail;
+	skb_put(skb, len);
+
+	hdr = (struct icmp6hdr *)skb_transport_header(skb);
+	memcpy(hdr, icmp6h, sizeof(*hdr));
+
+	opt = skb_transport_header(skb) + sizeof(struct icmp6hdr);
+	if (target) {
+		ipv6_addr_copy((struct in6_addr *)opt, target);
+		opt += sizeof(*target);
+	}
+
+	if (llinfo) {
+		ndisc_fill_addr_option(opt, llinfo, src_mac,
+					IEEE80211_ADDR_LEN, dev->type);
+	}
+
+	hdr->icmp6_cksum = csum_ipv6_magic(saddr, daddr, len,
+					IPPROTO_ICMPV6,
+					csum_partial(hdr,len, 0));
+
+	return skb;
+}
+
+#else
+
+static struct sk_buff *qdrv_build_neigh_adv_skb(struct net_device *dev,
+			const struct in6_addr *daddr, const struct in6_addr *target,
+			uint8_t *src_mac, uint8_t *dest_mac, struct icmp6hdr *icmp6h)
+{
+	struct sk_buff *skb;
+	struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
+	struct ether_header *eh;
+	struct nd_msg *msg;
+	int len = sizeof(struct nd_msg) + NDISC_OPT_SPACE(dev->addr_len);
+
+	skb = ndisc_alloc_skb(dev, len);
+
+	if (!skb)
+		return NULL;
+
+	skb->priority = QTN_TID_VO;
+
+	msg = (struct nd_msg *)skb_put(skb, sizeof(*msg));
+	memcpy(&msg->icmph, icmp6h, sizeof(msg->icmph));
+	msg->target = *target;
+
+	ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, src_mac);
+
+	msg->icmph.icmp6_cksum = csum_ipv6_magic(target, daddr, len,
+					IPPROTO_ICMPV6,
+					csum_partial(msg, len, 0));
+
+	ip6_nd_hdr(skb, target, daddr, inet6_sk(sk)->hop_limit, len);
+
+	eh = (struct ether_header *)skb_push(skb, sizeof(*eh));
+	IEEE80211_ADDR_COPY(eh->ether_dhost, dest_mac);
+	IEEE80211_ADDR_COPY(eh->ether_shost, src_mac);
+	eh->ether_type = htons(ETH_P_IPV6);
+
+	return skb;
+}
+
+#endif
+
+static int qdrv_send_neigh_adv(struct net_device *dev, const struct in6_addr *daddr,
+			const struct in6_addr *solicited_addr, uint8_t *src_mac,
+			uint8_t *dest_mac, int router, int solicited, int override, int llinfo)
+{
+	struct sk_buff *skb;
+	struct icmp6hdr icmp6h = {
+		.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
+	};
+
+	icmp6h.icmp6_router = router;
+	icmp6h.icmp6_solicited = solicited;
+	icmp6h.icmp6_override = override;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	skb = qdrv_build_neigh_adv_skb(dev, daddr, solicited_addr, src_mac, dest_mac, &icmp6h);
+#else
+	skb = qdrv_build_neigh_adv_skb(dev, daddr, solicited_addr, src_mac, dest_mac,
+			&icmp6h, solicited_addr, llinfo ? ND_OPT_TARGET_LL_ADDR : 0);
+#endif
+
+	if (!skb)
+		return 1;
+
+	dev_queue_xmit(skb);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_IPV6
+static int qdrv_wlan_handle_neigh_sol(struct ieee80211vap *vap, struct qdrv_wlan *qw, void *proto_data,
+			uint8_t *data_start, struct ether_header *eh, uint8_t in_tx)
+{
+	struct ipv6hdr *ipv6 = (struct ipv6hdr *)data_start;
+	struct nd_msg *msg = (struct nd_msg *)proto_data;
+	struct in6_addr *saddr = &ipv6->saddr;
+	struct in6_addr *daddr = &ipv6->daddr;
+	struct in6_addr target;
+	int dup_addr_detect = ipv6_addr_any(saddr);
+
+	const struct in6_addr qdrv_in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
+	uint8_t all_node_mc_mac_addr[] = {0x33, 0x33, 0x00, 0x00, 0x00, 0x01};
+	struct ieee80211_node_table *nt = &qw->ic.ic_sta;
+	struct ieee80211_node *ni;
+	uint8_t *dest_mac;
+	uint8_t src_mac[IEEE80211_ADDR_LEN];
+	int llinfo;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	iputil_in6_addr_copy(saddr, &ipv6->saddr);
+	iputil_in6_addr_copy(daddr, &ipv6->daddr);
+	iputil_in6_addr_copy(&target, &msg->target);
+	dup_addr_detect = ipv6_addr_any(saddr);
+
+	if (!iputil_ipv6_is_neigh_sol_msg(dup_addr_detect, &target, daddr))
+		return 1;
+#else
+	if (!iputil_ipv6_is_neigh_sol_msg(dup_addr_detect, msg, ipv6))
+		return 1;
+#endif
+
+	if (!qw->br_dev)
+		return 1;
+
+	if (dup_addr_detect) {
+		/* Duplicate address detection */
+		ni = ieee80211_find_node_by_ipv6_addr(vap, &msg->target);
+		if (ni && !IEEE80211_ADDR_EQ(ni->ni_macaddr, eh->ether_shost)) {
+			if (in_tx) {
+				/* send multicast neighbour advertisement frame to back end only */
+				dest_mac = all_node_mc_mac_addr;
+			} else {
+				/* send unicast neighbour advertisement frame to STA */
+				dest_mac = eh->ether_shost;
+			}
+			IEEE80211_ADDR_COPY(src_mac, ni->ni_macaddr);
+			ieee80211_free_node(ni);
+			qdrv_send_neigh_adv(qw->br_dev, &qdrv_in6addr_linklocal_allnodes,
+					&msg->target, src_mac, dest_mac,
+					false, false, true, true);
+			return 1;
+		} else if (ni) {
+			ieee80211_free_node(ni);
+			return 1;
+		}
+
+		ni = ieee80211_find_node(nt, eh->ether_shost);
+		if (ni && (IEEE80211_AID(ni->ni_associd) != 0)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+			ni->ipv6_llocal = target;
+#else
+			ipv6_addr_copy(&ni->ipv6_llocal, &msg->target);
+#endif
+			ieee80211_free_node(ni);
+		} else if (ni) {
+			ieee80211_free_node(ni);
+		}
+
+		return 1;
+	}
+
+	ni = ieee80211_find_node_by_ipv6_addr(vap, &msg->target);
+	if (ni && IEEE80211_AID(ni->ni_associd) != 0) {
+		IEEE80211_ADDR_COPY(src_mac, ni->ni_macaddr);
+		ieee80211_free_node(ni);
+		llinfo = ipv6_addr_is_multicast(daddr);
+		qdrv_send_neigh_adv(qw->br_dev, saddr, &msg->target, src_mac,
+				eh->ether_shost, false, true, llinfo, llinfo);
+		return 1;
+	} else if (ni) {
+		ieee80211_free_node(ni);
+		return 1;
+	}
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_IPV6
+int qdrv_wlan_handle_neigh_msg(struct ieee80211vap *vap, struct qdrv_wlan *qw,
+			uint8_t *data_start, uint8_t in_tx, struct sk_buff *skb,
+			uint8_t ip_proto, void *proto_data)
+{
+	struct ipv6hdr *ipv6;
+	struct icmp6hdr *icmpv6;
+	struct iphdr *p_iphdr = (struct iphdr *)data_start;
+	struct ether_header *eh = (struct ether_header *) skb->data;
+
+	if (ip_proto == IPPROTO_ICMPV6) {
+		ipv6 = (struct ipv6hdr *)data_start;
+		icmpv6 = (struct icmp6hdr *)proto_data;
+
+		switch(icmpv6->icmp6_type) {
+		case NDISC_NEIGHBOUR_ADVERTISEMENT:
+		case NDISC_NEIGHBOUR_SOLICITATION:
+
+			if (!iputil_ipv6_is_neigh_msg(ipv6, icmpv6))
+				return 1;
+
+			if (icmpv6->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
+				/* Verify unsolicited neighbour advertisement */
+				if (iputil_ipv6_is_ll_all_nodes_mc(eh->ether_dhost, p_iphdr) &&
+						!icmpv6->icmp6_solicited && in_tx) {
+					return 1;
+				}
+			}
+
+			if (icmpv6->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+				if (qdrv_wlan_handle_neigh_sol(vap, qw, proto_data,
+						data_start, eh, in_tx) || in_tx)
+					return 1;
+			}
+			break;
+		default:
+			return 0;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static int qdrv_wlan_80211_get_phy_stats(struct net_device *dev,
+					struct ieee80211com *ic,
+					struct ieee80211_phy_stats *ps,
+					uint8_t all_stats)
+{
+	struct qdrv_wlan *qw;
+	struct qdrv_mac *mac;
+
+	qw = container_of(ic, struct qdrv_wlan, ic);
+	mac = qw->mac;
+
+	return qdrv_muc_get_last_phy_stats(mac, ic, ps, all_stats);
+}
+
+static int qdrv_wlan_80211_get_ldpc(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	return (ic->ldpc_enabled);
+}
+
+static int qdrv_wlan_80211_get_stbc(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	return (ic->stbc_enabled);
+}
+
+static int qdrv_wlan_80211_get_rts_cts(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	return (ic->rts_cts_prot);
+}
+
+static int qdrv_wlan_80211_get_peer_rts_mode(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	return (ic->ic_peer_rts_mode);
+}
+
+static int qdrv_wlan_80211_get_11n40_only_mode(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	return (ic->ic_11n_40_only_mode);
+}
+
+static int qdrv_wlan_80211_get_legacy_retry_limit(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+
+	return (ic->ic_legacy_retry_limit);
+}
+
+static int qdrv_wlan_80211_get_retry_count(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+
+	return (ic->ic_retry_count);
+}
+
+static int qdrv_wlan_80211_get_rx_agg_timeout(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	return ic->ic_rx_agg_timeout;
+}
+
+static int qdrv_wlan_80211_get_cca_stats(struct net_device *dev,
+					struct ieee80211com *ic,
+					struct qtn_exp_cca_stats *cs)
+{
+	struct qdrv_wlan *qw;
+	struct qdrv_mac *mac;
+
+	qw = container_of(ic, struct qdrv_wlan, ic);
+	mac = qw->mac;
+
+	return qdrv_muc_get_last_cca_stats(mac, ic, cs);
+}
+
+static int qdrv_is_gain_low(void)
+{
+
+	struct device *dev = qdrv_soc_get_addr_dev();
+	uint32_t mixval;
+	uint32_t pgaval;
+	uint8_t lowgain_mixer;
+	uint8_t lowgain_pga;
+#define RFMIX_LOAD_S	14
+#define RFMIX_LOAD_M	0x1c000
+#define RFMIX_PGA_S	2
+#define RFMIX_PGA_M	0xc
+
+	mixval = qdrv_command_read_rf_reg(dev, 166);
+	pgaval = qdrv_command_read_rf_reg(dev, 168);
+
+	lowgain_mixer = (((mixval & RFMIX_LOAD_M) >> RFMIX_LOAD_S) == 0x7);
+	lowgain_pga = (((pgaval & RFMIX_PGA_M) >> RFMIX_PGA_S) == 0);
+
+	if (lowgain_mixer && lowgain_pga)
+		return 1;
+	else
+		return 0;
+}
+
+static int
+qdrv_wlan_get_congestion_index(struct qdrv_wlan *qw)
+{
+#define QDRV_CONGEST_IX_ROUNDED(_pc)	(((_pc) + 5) / 10)
+	struct qtn_stats_log *iw_stats_log = qw->mac->mac_sys_stats;
+	struct muc_tx_stats *tx_stats = NULL;
+	int congest_idx;
+
+	if (qw->pktlogger.stats_uc_tx_ptr == NULL && iw_stats_log != NULL) {
+		qw->pktlogger.stats_uc_tx_ptr =
+			ioremap_nocache(muc_to_lhost((u32)iw_stats_log->tx_muc_stats),
+							sizeof(struct muc_tx_stats));
+	}
+
+	tx_stats = (struct muc_tx_stats *)qw->pktlogger.stats_uc_tx_ptr;
+	if (!tx_stats)
+		return -EFAULT;
+
+	congest_idx = QDRV_CONGEST_IX_ROUNDED(tx_stats->cca_fat);
+	if ((congest_idx < 0) || (congest_idx > 10))
+		return -EFAULT;
+
+	return congest_idx;
+}
+
+static uint32_t qdrv_wlan_get_michael_errcnt(struct qdrv_wlan *qw)
+{
+	struct qtn_stats_log *iw_stats_log = qw->mac->mac_sys_stats;
+	struct muc_rx_stats *rx_stats = NULL;
+
+	if (qw->pktlogger.stats_uc_rx_ptr == NULL && iw_stats_log != NULL) {
+		qw->pktlogger.stats_uc_rx_ptr =
+				ioremap_nocache(muc_to_lhost((u32)iw_stats_log->rx_muc_stats),
+						sizeof(struct muc_rx_stats));
+	}
+
+	rx_stats = (struct muc_rx_stats *)qw->pktlogger.stats_uc_rx_ptr;
+	if (!rx_stats)
+		return 0;
+
+	return rx_stats->rx_tkip_mic_err;
+}
+
+static void qdrv_get_mu_grp(struct ieee80211_node *ni,
+	struct qtn_mu_grp_args *mu_grp_tbl_cpy)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+	dma_addr_t dma;
+
+	struct qtn_mu_grp_args *mu_grp_tbl;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(mu_grp_tbl = qdrv_hostlink_alloc_coherent(NULL,
+				sizeof(*mu_grp_tbl)*IEEE80211_MU_GRP_NUM_MAX,
+				&dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate DISASSOC message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(1)ioctl %p dma ptr %p\n", ioctl, (void *)mu_grp_tbl);
+
+	ioctl->ioctl_command = IOCTL_DEV_GET_MU_GRP;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = dma;
+
+	if (vnet_send_ioctl(qv, ioctl)) {
+		memcpy(mu_grp_tbl_cpy, mu_grp_tbl, sizeof(*mu_grp_tbl)*IEEE80211_MU_GRP_NUM_MAX);
+	}
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(*mu_grp_tbl)*IEEE80211_MU_GRP_NUM_MAX,
+				mu_grp_tbl, dma);
+}
+
+static int32_t qdrv_get_mu_enable(struct ieee80211_node *ni)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+	dma_addr_t dma;
+
+	int32_t *mu_enable_ptr;
+	int32_t mu_enable = -1;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(mu_enable_ptr = qdrv_hostlink_alloc_coherent(NULL,
+				sizeof(*mu_enable_ptr),
+				&dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate DISASSOC message\n");
+		vnet_free_ioctl(ioctl);
+		goto exit;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(1)ioctl %p dma ptr %p\n", ioctl, (void *)mu_enable_ptr);
+
+	ioctl->ioctl_command = IOCTL_DEV_GET_MU_ENABLE;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = dma;
+
+	if (vnet_send_ioctl(qv, ioctl)) {
+		mu_enable = *mu_enable_ptr;
+	}
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(*mu_enable_ptr), mu_enable_ptr, dma);
+exit:
+	return mu_enable;
+}
+
+static int32_t qdrv_get_mu_grp_qmat(struct ieee80211_node *ni, uint8_t grp)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+	dma_addr_t dma;
+
+	int32_t *prec_enable_ptr;
+	int32_t prec_enable = -1;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(prec_enable_ptr = qdrv_hostlink_alloc_coherent(NULL,
+				sizeof(*prec_enable_ptr),
+				&dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate DISASSOC message\n");
+		vnet_free_ioctl(ioctl);
+		goto exit;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(1)ioctl %p dma ptr %p\n", ioctl, (void *)prec_enable_ptr);
+
+	ioctl->ioctl_command = IOCTL_DEV_GET_PRECODE_ENABLE;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_arg2 = grp;
+	ioctl->ioctl_argp = dma;
+
+	if (vnet_send_ioctl(qv, ioctl)) {
+		prec_enable = *prec_enable_ptr;
+	}
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(*prec_enable_ptr), prec_enable_ptr, dma);
+exit:
+	return prec_enable;
+}
+
+static int32_t qdrv_get_mu_use_eq(struct ieee80211_node *ni)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+	dma_addr_t dma;
+
+	int32_t *eq_enable_ptr;
+	int32_t eq_enable = -1;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(eq_enable_ptr = qdrv_hostlink_alloc_coherent(NULL,
+				sizeof(*eq_enable_ptr),
+				&dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate DISASSOC message\n");
+		vnet_free_ioctl(ioctl);
+		goto exit;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN, "(1)ioctl %p dma ptr %p\n", ioctl, (void *)eq_enable_ptr);
+
+	ioctl->ioctl_command = IOCTL_DEV_GET_MU_USE_EQ;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = dma;
+
+	if (vnet_send_ioctl(qv, ioctl)) {
+		eq_enable = *eq_enable_ptr;
+	}
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(*eq_enable_ptr), eq_enable_ptr, dma);
+exit:
+	return eq_enable;
+}
+
+static int qdrv_wlan_80211_getparam(struct ieee80211_node *ni, int param,
+	int *value, unsigned char *data, int *len)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct qdrv_wlan *qw = qv->parent;
+	struct ieee80211com *ic = &qw->ic;
+	struct qtn_stats_log *iw_stats_log = qw->mac->mac_sys_stats;
+
+	if (!ic->ic_muc_tx_stats) {
+		ic->ic_muc_tx_stats = (struct muc_tx_stats *) ioremap_nocache(
+				muc_to_lhost((u32)iw_stats_log->tx_muc_stats),
+				sizeof(struct muc_tx_stats));
+	}
+
+	static uint32_t keep_alive_cnt;
+	uint32_t val;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	switch (param) {
+	case IEEE80211_PARAM_TXBF_CTRL:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_TXBF_CTRL (%d)\n", param);
+		qdrv_txbf_config_get(qw, &val);
+		*value = val;
+		break;
+	case IEEE80211_PARAM_TXBF_PERIOD:
+		*value = ic->ic_txbf_period;
+		break;
+	case IEEE80211_PARAM_GET_RFCHIP_ID:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_GET_RFCHIP_ID (%d)\n", param);
+		*value = qw->rf_chipid;
+		break;
+	case IEEE80211_PARAM_GET_RFCHIP_VERID:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_GET_RFCHIP_VERID (%d)\n", param);
+		*value = qw->rf_chip_verid;
+		break;
+	case IEEE80211_PARAM_BW_SEL_MUC:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_BW_SEL_MUC (%d)\n", param);
+		*value = qdrv_wlan_80211_get_cap_bw(ni->ni_ic);
+		break;
+	case IEEE80211_PARAM_LDPC:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_LDPC (%d)\n", param);
+		*value = qdrv_wlan_80211_get_ldpc(ni);
+		break;
+	case IEEE80211_PARAM_STBC:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_STBC (%d)\n", param);
+		*value = qdrv_wlan_80211_get_stbc(ni);
+		break;
+	case IEEE80211_PARAM_RTS_CTS:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_RTS_CTS (%d)\n", param);
+		*value = qdrv_wlan_80211_get_rts_cts(ni);
+		break;
+	case IEEE80211_PARAM_TX_QOS_SCHED:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_TX_QOS_SCHED (%d)\n", param);
+		*value = ni->ni_ic->ic_tx_qos_sched;
+		break;
+	case IEEE80211_PARAM_PEER_RTS_MODE:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_PEER_RTS_MODE (%d)\n", param);
+		*value = qdrv_wlan_80211_get_peer_rts_mode(ni);
+		break;
+	case IEEE80211_PARAM_DYN_WMM:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_DYN_WMM (%d)\n", param);
+		*value = ic->ic_dyn_wmm;
+		break;
+	case IEEE80211_PARAM_11N_40_ONLY_MODE:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_11N_40_ONLY_MODE (%d)\n", param);
+		*value = qdrv_wlan_80211_get_11n40_only_mode(ni);
+		break;
+	case IEEE80211_PARAM_MAX_MGMT_FRAMES:
+		*value = qw->tx_if.txdesc_cnt[QDRV_TXDESC_MGMT];
+		break;
+	case IEEE80211_PARAM_MCS_ODD_EVEN:
+		*value = qw->mcs_odd_even;
+		break;
+	case IEEE80211_PARAM_RESTRICTED_MODE:
+		*value = qw->tx_restrict;
+		break;
+	case IEEE80211_PARAM_RESTRICT_RTS:
+		*value = qw->tx_restrict_rts;
+		break;
+	case IEEE80211_PARAM_RESTRICT_LIMIT:
+		*value = qw->tx_restrict_limit;
+		break;
+	case IEEE80211_PARAM_RESTRICT_RATE:
+		*value = qw->tx_restrict_rate;
+		break;
+	case IEEE80211_PARAM_SWRETRY_AGG_MAX:
+		*value = qw->tx_swretry_agg_max;
+		break;
+	case IEEE80211_PARAM_SWRETRY_NOAGG_MAX:
+		*value = qw->tx_swretry_noagg_max;
+		break;
+	case IEEE80211_PARAM_SWRETRY_SUSPEND_XMIT:
+		*value = qw->tx_swretry_suspend_xmit;
+		break;
+	case IEEE80211_PARAM_RX_AGG_TIMEOUT:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"IEEE80211_PARAM_RX_AGG_TIMEOUT (%d)\n", param);
+		*value = qdrv_wlan_80211_get_rx_agg_timeout(ni);
+		break;
+	case IEEE80211_PARAM_CONFIG_TXPOWER:
+		*value = qdrv_is_gain_low();
+		break;
+	case IEEE80211_PARAM_LEGACY_RETRY_LIMIT:
+		*value = qdrv_wlan_80211_get_legacy_retry_limit(ni);
+		break;
+	case IEEE80211_PARAM_MIMOMODE:
+		*value = qw->tx_mimomode;
+		break;
+	case IEEE80211_PARAM_SHORT_RETRY_LIMIT:
+	case IEEE80211_PARAM_LONG_RETRY_LIMIT:
+		//Just return max software retry for aggregation.
+		*value = QTN_TX_SW_ATTEMPTS_AGG_MAX;
+		break;
+	case IEEE80211_PARAM_RETRY_COUNT:
+		*value = qdrv_wlan_80211_get_retry_count(ni);
+		break;
+	case IEEE80211_PARAM_BR_IP_ADDR:
+		qdrv_get_br_ipaddr(qw, (__be32 *)value);
+		break;
+	case IEEE80211_PARAM_CACSTATUS:
+		*value = (qw->sm_stats.sm_state & QDRV_WLAN_SM_STATE_CAC_ACTIVE) ? 1 : 0;
+		break;
+	case IEEE80211_PARAM_CARRIER_ID:
+		*value = g_carrier_id;
+		break;
+	case IEEE80211_PARAM_TX_QUEUING_ALG:
+		*value = qw->tx_sch_shared_data->queuing_alg;
+		break;
+	case IEEE80211_PARAM_MODE:
+		*value = qdrv_wlan_80211_get_11ac_mode(ic);
+		break;
+	case IEEE80211_PARAM_CONGEST_IDX:
+		*value = qdrv_wlan_get_congestion_index(qw);
+		break;
+	case IEEE80211_PARAM_MICHAEL_ERR_CNT:
+		*value = qdrv_wlan_get_michael_errcnt(qw);
+		break;
+	case IEEE80211_PARAM_MAX_AGG_SIZE:
+		*value = ic->ic_tx_max_ampdu_size;
+		break;
+	case IEEE80211_PARAM_GET_MU_GRP:
+		qdrv_get_mu_grp(ni, (void*)data);
+		break;
+	case IEEE80211_PARAM_MU_ENABLE:
+		*value = qdrv_get_mu_enable(ni);
+		break;
+	case IEEE80211_PARAM_GET_MU_GRP_QMAT:
+		*value = qdrv_get_mu_grp_qmat(ni, (*value) >> 16);
+		break;
+	case IEEE80211_PARAM_MU_USE_EQ:
+		*value = qdrv_get_mu_use_eq(ni);
+		break;
+	case IEEE80211_PARAM_RESTRICT_WLAN_IP:
+		*value = qw->restrict_wlan_ip;
+		break;
+	case IEEE80211_PARAM_EP_STATUS:
+		*value = keep_alive_cnt++;
+		break;
+	case IEEE80211_PARAM_CCA_FIXED:
+		*value = ic->cca_fix_disable;
+		break;
+	case IEEE80211_PARAM_AUTO_CCA_ENABLE:
+		*value = ic->auto_cca_enable;
+		break;
+	case IEEE80211_IOCTL_GETKEY:
+		if (ni->ni_vap->iv_bss->ni_rsn.rsn_mcastcipher ==
+					IEEE80211_CIPHER_AES_CCM)
+			*value = qw->tx_stats.tx_copy_mc_enc;
+		else
+			/* In case of WEP rsc value is '0'*/
+			*value = 0;
+		break;
+	case IEEE80211_PARAM_GET_CCA_STATS:
+		/* 4-byte value field can accomodate more CCA stats if required */
+		*value = ic->ic_muc_tx_stats->cca_sec40;
+		break;
+	case IEEE80211_PARAM_BEACON_HANG_TIMEOUT:
+		*value = ic->ic_bcn_hang_timeout;
+		break;
+	case IEEE80211_PARAM_BB_DEAFNESS_WAR_EN:
+		*value = !!ic->bb_deafness_war_disable;
+		break;
+	default:
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_WLAN,
+			"<0x%08x> (%d)\n", param, param);
+		break;
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+	return 0;
+}
+
+void qdrv_wlan_80211_stats(struct ieee80211com *ic, struct iw_statistics *is)
+{
+	struct qdrv_wlan *qw;
+	struct qdrv_mac *mac;
+	struct qtn_stats_log *iw_stats_log;
+	int curr_index;
+	int rssi;
+	int noise;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/*
+	 * NB: the referenced node pointer is in the
+	 * control block of the sk_buff.  This is
+	 * placed there by ieee80211_mgmt_output because
+	 * we need to hold the reference with the frame.
+	 */
+
+	qw = container_of(ic, struct qdrv_wlan, ic);
+	mac = qw->mac;
+
+	/* Get the data from MuC */
+	iw_stats_log = (struct qtn_stats_log *)mac->mac_sys_stats;
+
+	if (iw_stats_log == NULL) {
+		/* No stats available from MuC, mark all as invalid */
+		is->qual.qual  = 0;
+		is->qual.noise = 0;
+		is->qual.level = 0;
+		is->qual.updated = IW_QUAL_ALL_INVALID;
+		return;
+	}
+
+	curr_index = iw_stats_log->curr_buff;
+
+	/* Take the previous value */
+	curr_index = (curr_index - 1 + NUM_LOG_BUFFS)%NUM_LOG_BUFFS;
+
+	/* Collect error stats */
+	is->discard.misc += iw_stats_log->stat_buffs[curr_index].rx_phy_stats.cnt_mac_crc;
+	is->discard.retries += iw_stats_log->stat_buffs[curr_index].tx_phy_stats.num_retries;
+
+	/*
+	 * Collect PHY Stats
+	 *
+	 * The RSSI values reported in the TX/RX descriptors in the driver are the SNR
+	 * expressed in dBm. Thus 'rssi' is signal level above the noise floor in dBm.
+	 *
+	 * Noise is measured in dBm and is negative unless there is an unimaginable
+	 * level of RF noise.
+	 *
+	 * The signal level is noise + rssi.
+	 *
+	 * Note that the iw_quality values are 1 byte, and can be signed, unsigned or
+	 * negative depending on context.
+         *
+         * Note iwconfig's quality parameter is a relative value while rssi here is a
+         * absolute/dBm value.
+         * Convert rssi from absolute/dBm value to a relative one.
+         * The convertion logic is reused from qcsapi
+	 */
+	rssi = iw_stats_log->stat_buffs[curr_index].rx_phy_stats.last_rssi_all;
+	noise = iw_stats_log->stat_buffs[curr_index].rx_phy_stats.hw_noise;
+
+	if (rssi < 0)
+		is->qual.level = (rssi - 5) / 10;
+	else
+		is->qual.level = (rssi + 5) / 10;
+
+	rssi += RSSI_OFFSET_FROM_10THS_DBM;
+	if (rssi < 0 || rssi >= RSSI_OFFSET_FROM_10THS_DBM)
+		is->qual.qual = 0;
+	else
+		is->qual.qual = (rssi + 5) / 10;
+
+	if (noise < 0)
+		is->qual.noise = (noise - 5) / 10;
+	else
+		is->qual.noise = (noise + 5) / 10;
+
+	is->qual.updated = IW_QUAL_ALL_UPDATED;
+	is->qual.updated |= IW_QUAL_DBM;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return;
+}
+
+/*
+ * MIMO power save mode change.
+ */
+static void qdrv_wlan_80211_smps(struct ieee80211_node *ni, int new_mode)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+	struct qtn_node_args *args = NULL;
+	dma_addr_t args_dma;
+
+	DBGPRINTF(DBG_LL_CRIT, QDRV_LF_TRACE,
+			"Node %02x:%02x:%02x:%02x:%02x:%02x MIMO PS change to %02X"
+			 " for BSSID %02x:%02x:%02x:%02x:%02x:%02x\n",
+			 DBGMACFMT(ni->ni_macaddr),
+			(u_int8_t)new_mode,
+			 DBGMACFMT(ni->ni_bssid));
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+			!(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+			&args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate SMPS message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	memset(args, 0, sizeof(*args));
+
+	/* BSSID for the node and it's MAC address */
+	memcpy(args->ni_bssid, ni->ni_bssid, IEEE80211_ADDR_LEN);
+	memcpy(args->ni_macaddr, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+
+	ioctl->ioctl_command = IOCTL_DEV_SMPS;
+	ioctl->ioctl_arg1 = qv->devid;
+	/* new_mode is one of the enumerations starting 'IEEE80211_HTCAP_C_MIMOPWRSAVE_...' */
+	ioctl->ioctl_arg2 = new_mode;
+	ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+	qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+static void qdrv_qvsp_node_auth_state_change(struct ieee80211_node *ni, int auth)
+{
+#ifdef CONFIG_QVSP
+	struct ieee80211com *ic = ni->ni_ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	if (!auth) {
+		qvsp_node_del(qw->qvsp, ni);
+	}
+#endif
+}
+
+static void qdrv_wlan_new_assoc(struct ieee80211_node *ni)
+{
+#ifdef CONFIG_QVSP
+	struct ieee80211com *ic = ni->ni_ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	qvsp_reset(qw->qvsp);
+#endif
+}
+
+static void qdrv_wlan_auth_state_change(struct ieee80211_node *ni, int auth)
+{
+	const struct ieee80211_node *ni_iter;
+	const struct ieee80211_node *ni_iter_tmp;
+	unsigned long flags;
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct ieee80211com *ic = ni->ni_ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	int is_ap = 0;
+
+	if (IEEE80211_NODE_AID(ni) == 0) {
+		return;
+	}
+
+	qdrv_qvsp_node_auth_state_change(ni, auth);
+
+	is_ap = (ni->ni_vap->iv_opmode == IEEE80211_M_HOSTAP);
+
+	spin_lock_irqsave(&qv->ni_lst_lock, flags);
+	if (auth) {
+		qw->sm_stats.sm_nd_auth++;
+		qw->sm_stats.sm_nd_auth_tot++;
+		/* List of bridge clients */
+		if (ni->ni_qtn_assoc_ie && !ni->is_in_bridge_lst) {
+			TAILQ_INSERT_HEAD(&qv->ni_bridge_lst, ni, ni_bridge_lst);
+			qv->ni_bridge_cnt++;
+			ni->is_in_bridge_lst = 1;
+		}
+		/* List of Quantenna bridge clients that support 4-addr LNCB reception */
+		if (ni->ni_lncb_4addr && (!ni->is_in_lncb_lst)) {
+			TAILQ_INSERT_HEAD(&qv->ni_lncb_lst, ni, ni_lncb_lst);
+			qv->ni_lncb_cnt++;
+			ni->is_in_lncb_lst = 1;
+		} else {
+			/* Don't count this STA more than once. Can happen when reauthenticating */
+			if (!ni->ni_in_auth_state && is_ap) {
+				qv->iv_3addr_count++;
+				DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN,
+					"3 address STA auth'd (count %d)\n",
+					qv->iv_3addr_count);
+			}
+		}
+		ni->ni_in_auth_state = 1;
+	} else {
+		if (ni->ni_in_auth_state && is_ap) {
+			qw->sm_stats.sm_nd_unauth++;
+			qw->sm_stats.sm_nd_auth_tot--;
+		}
+		if (ni->ni_node_idx)
+			qdrv_remove_invalid_sub_port(ni->ni_vap, ni->ni_node_idx);
+
+		if (ni->ni_qtn_assoc_ie) {
+			TAILQ_FOREACH_SAFE(ni_iter, &qv->ni_bridge_lst, ni_bridge_lst, ni_iter_tmp) {
+				if (ni == ni_iter) {
+					TAILQ_REMOVE(&qv->ni_bridge_lst, ni, ni_bridge_lst);
+					qv->ni_bridge_cnt--;
+					ni->is_in_bridge_lst = 0;
+					KASSERT((qv->ni_bridge_cnt >= 0),
+							("Negative bridge station count"));
+					break;
+				}
+			}
+		}
+
+		if (ni->ni_lncb_4addr) {
+			TAILQ_FOREACH_SAFE(ni_iter, &qv->ni_lncb_lst, ni_lncb_lst, ni_iter_tmp) {
+				if (ni == ni_iter) {
+					TAILQ_REMOVE(&qv->ni_lncb_lst, ni, ni_lncb_lst);
+					qv->ni_lncb_cnt--;
+					ni->is_in_lncb_lst = 0;
+					KASSERT((qv->ni_lncb_cnt >= 0),
+						("Negative lncb station count"));
+					break;
+				}
+			}
+		} else {
+			if (ni->ni_in_auth_state && is_ap) {
+				qv->iv_3addr_count--;
+				DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_WLAN,
+					"3 address STA deauth'd (count %d)\n",
+					qv->iv_3addr_count);
+				KASSERT((qv->iv_3addr_count >= 0),
+					("Negative 3 address count"));
+			} else {
+				ni->ni_in_auth_state = 0;
+			}
+		}
+		ni->ni_in_auth_state = 0;
+	}
+	spin_unlock_irqrestore(&qv->ni_lst_lock, flags);
+}
+
+#define QDRV_BOOTCFG_BUF_LEN	32
+
+enum hw_opt_t get_bootcfg_bond_opt(void)
+{
+	uint32_t bond_opt;
+	char buf[QDRV_BOOTCFG_BUF_LEN];
+	char *s;
+	int rc;
+
+	s = bootcfg_get_var("bond_opt", buf);
+	if (s) {
+		rc = sscanf(s, "=%d", &bond_opt);
+		if ((rc == 1) && bond_opt >= 0)
+			return (bond_opt | HW_OPTION_BONDING_TOPAZ_PROD);
+	}
+
+	return HW_OPTION_BONDING_NOT_SET;
+}
+
+static int qdrv_wlan_80211_mark_dfs(struct ieee80211com *ic, int nchans,
+					struct ieee80211_channel *chans)
+{
+	int i;
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+	/* check if channel requires DFS */
+	for (i = 0; i < nchans; i++) {
+		if (ic->ic_country_code != CTRY_DEFAULT &&
+				chans[i].ic_ieee > 0 &&
+				chans[i].ic_ieee < IEEE80211_CHAN_MAX &&
+				isset(ic->ic_chan_dfs_required, chans[i].ic_ieee)) {
+			chans[i].ic_flags |= IEEE80211_CHAN_DFS;
+			/* active scan not allowed on DFS channel */
+			chans[i].ic_flags |= IEEE80211_CHAN_PASSIVE;
+			if (vap->iv_opmode == IEEE80211_M_STA) {
+				ic->ic_chan_availability_status[chans[i].ic_ieee]
+					= IEEE80211_CHANNEL_STATUS_NON_AVAILABLE;
+			} else {
+				ic->ic_chan_availability_status[chans[i].ic_ieee]
+					= IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_CAC_REQUIRED;
+			}
+			if (ic->ic_mark_channel_dfs_cac_status) {
+				ic->ic_mark_channel_dfs_cac_status(ic, &chans[i], IEEE80211_CHAN_DFS_CAC_DONE, false);
+				ic->ic_mark_channel_dfs_cac_status(ic, &chans[i], IEEE80211_CHAN_DFS_CAC_IN_PROGRESS, false);
+			}
+		}
+
+		chans[i].ic_radardetected = 0;
+	}
+
+	return 0;
+}
+
+static int qdrv_wlan_80211_mark_weather_radar(struct ieee80211com *ic, int nchans,
+					struct ieee80211_channel *chans)
+{
+	int i;
+	int chan_sec_ieee;
+	struct ieee80211_channel *chan_sec;
+
+	for (i = 0; i < nchans; i++) {
+		if (qdrv_dfs_is_eu_region() &&
+				chans[i].ic_ieee > 0 &&
+				chans[i].ic_ieee < IEEE80211_CHAN_MAX &&
+				isset(ic->ic_chan_weather_radar, chans[i].ic_ieee)) {
+			chans[i].ic_flags |= IEEE80211_CHAN_WEATHER;
+			chans[i].ic_flags |= IEEE80211_CHAN_WEATHER_40M;
+			chans[i].ic_flags |= IEEE80211_CHAN_WEATHER_80M;
+
+			chan_sec_ieee = ieee80211_find_sec_chan(&chans[i]);
+			if (chan_sec_ieee)
+				chan_sec = ieee80211_find_channel_by_ieee(ic, chan_sec_ieee);
+			else
+				chan_sec = NULL;
+			if (chan_sec)
+				chan_sec->ic_flags |= IEEE80211_CHAN_WEATHER_40M;
+
+			chan_sec_ieee = ieee80211_find_sec40u_chan(&chans[i]);
+			if (chan_sec_ieee)
+				chan_sec = ieee80211_find_channel_by_ieee(ic, chan_sec_ieee);
+			else
+				chan_sec = NULL;
+			if (chan_sec)
+				chan_sec->ic_flags |= IEEE80211_CHAN_WEATHER_80M;
+
+			chan_sec_ieee = ieee80211_find_sec40l_chan(&chans[i]);
+			if (chan_sec_ieee)
+				chan_sec = ieee80211_find_channel_by_ieee(ic, chan_sec_ieee);
+			else
+				chan_sec = NULL;
+			if (chan_sec)
+				chan_sec->ic_flags |= IEEE80211_CHAN_WEATHER_80M;
+		}
+	}
+
+	return 0;
+}
+
+static int qdrv_pm_notify(struct notifier_block *b, unsigned long level, void *v)
+{
+	int retval = NOTIFY_OK;
+	static int pm_prev_level = BOARD_PM_LEVEL_NO;
+	const int switch_level = BOARD_PM_LEVEL_DUTY;
+	u_int16_t new_beacon_interval;
+	struct qdrv_wlan *qw = container_of(b, struct qdrv_wlan, pm_notifier);
+	struct ieee80211com *ic = &qw->ic;
+
+	ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] = level;
+
+#ifdef CONFIG_QVSP
+	qdrv_wlan_notify_qvsp_coc_state_changed(qw->qvsp, ic);
+#endif
+	qdrv_wlan_notify_pm_state_changed(ic, pm_prev_level);
+
+	if ((pm_prev_level < switch_level) && (level >= switch_level)) {
+
+#if defined(QBMPS_ENABLE)
+		/* qdrv_pm_notify is registered in qos_pm framework */
+		/* it could be triggered from modules besides wlan qdrv: e.g. qpm */
+		/* so we need to make sure BMPS is enabled */
+		/* before going into power-saving in STA mode */
+		if ((ic->ic_opmode == IEEE80211_M_STA) &&
+		    !(ic->ic_flags_qtn & IEEE80211_QTN_BMPS))
+			return retval;
+#endif
+
+#if defined(QBMPS_ENABLE)
+		if (ic->ic_opmode != IEEE80211_M_STA) {
+#endif
+			/* BMPS power-saving is used for STA */
+			/* this is only needed for CoC power-saving in non-STA mode */
+			new_beacon_interval = ieee80211_pm_period_tu(ic);
+			if (ic->ic_lintval != new_beacon_interval) {
+				/* Configure beacon interval to power duty interval */
+				ieee80211_beacon_interval_set(ic, new_beacon_interval);
+			}
+
+			ic->ic_pm_period_change.expires = jiffies +
+				ic->ic_pm_state[QTN_PM_PERIOD_CHANGE_INTERVAL] * HZ;
+			if (&ic->ic_pm_period_change)
+				add_timer(&ic->ic_pm_period_change);
+#if defined(QBMPS_ENABLE)
+		}
+#endif
+		retval = ((qdrv_hostlink_power_save(qw, QTN_PM_CURRENT_LEVEL, level) < 0) ?
+				NOTIFY_STOP : NOTIFY_OK);
+		if ((retval != NOTIFY_OK)
+#if defined(QBMPS_ENABLE)
+		    && (ic->ic_opmode != IEEE80211_M_STA)
+#endif
+		   ) {
+			del_timer(&ic->ic_pm_period_change);
+			ieee80211_beacon_interval_set(ic, ic->ic_lintval_backup);
+		}
+	} else if ((pm_prev_level >= switch_level) && (level < switch_level)) {
+		if (ic->ic_lintval != ic->ic_lintval_backup) {
+			/* Recovering beacon setting */
+			ieee80211_beacon_interval_set(ic, ic->ic_lintval_backup);
+		}
+
+		ic->ic_pm_enabled = 1;
+		retval = ((qdrv_hostlink_power_save(qw, QTN_PM_CURRENT_LEVEL, level) < 0) ?
+				NOTIFY_STOP : NOTIFY_OK);
+		ic->ic_pm_enabled = 0;
+		if ((retval == NOTIFY_OK)
+#if defined(QBMPS_ENABLE)
+		    && (ic->ic_opmode != IEEE80211_M_STA)
+#endif
+		   ) {
+			if (&ic->ic_pm_period_change)
+				del_timer(&ic->ic_pm_period_change);
+		}
+	}
+
+	pm_prev_level = level;
+
+	return retval;
+}
+
+static int qdrv_wlan_80211_config_channel(struct ieee80211com *ic, int ic_nchans)
+{
+	struct ieee80211_channel chans[IEEE80211_MAX_5_GHZ_CHANNELS];
+	int i;
+	int nchans = 0;
+	struct ieee80211_channel *inchans = chans;
+	struct qtn_channel *qtn_chan_ptr = NULL;
+	u32 def_chan_flags = 0;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	memset(chans, 0, sizeof(chans));
+	/* Set up some dummy channels */
+#ifdef QDRV_FEATURE_HT
+	nchans = ic_nchans;
+	KASSERT((sizeof(chans)/sizeof(chans[0])) >= (IEEE80211_MAX_5_GHZ_CHANNELS),
+			("Negative config channel array size"));
+
+	//TODO: Avinash. BBIC4 is not Simutaneous Dual-Band platform. 
+#ifdef PEARL_PLATFORM
+	if (nchans == IEEE80211_MAX_DUAL_CHANNELS) {
+		/* Dual band. Initialize with all the supported channels */
+		int j;
+		nchans = IEEE80211_MAX_2_4_GHZ_CHANNELS;
+		def_chan_flags = IEEE80211_CHAN_HT20 | IEEE80211_CHAN_OFDM |
+			IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_CCK;
+		qtn_chan_ptr = qtn_channels_2ghz;
+		for (i = 0; i < nchans; i++) {
+			chans[i].ic_flags =  def_chan_flags | qtn_chan_ptr[i].channel_flags;
+			/* Add the common 40M flag if either U/D 20M flag is set */
+			chans[i].ic_flags |=
+				(chans[i].ic_flags & (IEEE80211_CHAN_HT40U|IEEE80211_CHAN_HT40D))?
+				IEEE80211_CHAN_HT40:0;
+			chans[i].ic_ext_flags = qtn_chan_ptr[i].channel_ext_flags;
+			chans[i].ic_freq = qtn_chan_ptr[i].channel_freq;
+			chans[i].ic_ieee = qtn_chan_ptr[i].channel_number;
+			chans[i].ic_maxregpower = QDRV_DFLT_MAX_TXPOW;
+			chans[i].ic_maxpower = QDRV_DFLT_MAX_TXPOW;
+			chans[i].ic_minpower = QDRV_DFLT_MIN_TXPOW;
+			chans[i].ic_maxpower_normal = QDRV_DFLT_MAX_TXPOW;
+			chans[i].ic_minpower_normal = QDRV_DFLT_MIN_TXPOW;
+			/* '0' means power is not configured */
+			memset(&chans[i].ic_maxpower_table, 0, sizeof(chans[i].ic_maxpower_table));
+			chans[i].ic_center_f_40MHz = qtn_chan_ptr[i].center_freq_40M;
+		}
+
+		nchans = IEEE80211_MAX_5_GHZ_CHANNELS;
+		def_chan_flags = IEEE80211_CHAN_HT20 | IEEE80211_CHAN_HT40 |
+			IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
+		qtn_chan_ptr = qtn_channels_5ghz;
+		for (j = 0; j < nchans; j++, i++) {
+			chans[i].ic_flags =  def_chan_flags | qtn_chan_ptr[j].channel_flags;
+			/* Add the common 40M flag if either U/D 20M flag is set */
+			chans[i].ic_flags |=
+				(chans[i].ic_flags & (IEEE80211_CHAN_HT40U|IEEE80211_CHAN_HT40D))?
+				IEEE80211_CHAN_HT40:0;
+			chans[i].ic_ext_flags = qtn_chan_ptr[j].channel_ext_flags;
+			chans[i].ic_freq = qtn_chan_ptr[j].channel_freq;
+			chans[i].ic_ieee = qtn_chan_ptr[j].channel_number;
+			chans[i].ic_maxregpower = QDRV_DFLT_MAX_TXPOW;
+			chans[i].ic_maxpower = QDRV_DFLT_MAX_TXPOW;
+			chans[i].ic_minpower = QDRV_DFLT_MIN_TXPOW;
+			chans[i].ic_maxpower_normal = QDRV_DFLT_MAX_TXPOW;
+			chans[i].ic_minpower_normal = QDRV_DFLT_MIN_TXPOW;
+			/* '0' means power is not configured */
+			memset(&chans[i].ic_maxpower_table, 0, sizeof(chans[i].ic_maxpower_table));
+			chans[i].ic_center_f_40MHz = qtn_chan_ptr[j].center_freq_40M;
+			chans[i].ic_center_f_80MHz = qtn_chan_ptr[j].center_freq_80M;
+			chans[i].ic_center_f_160MHz = qtn_chan_ptr[j].center_freq_160M;
+			if (chans[i].ic_center_f_80MHz) {
+				chans[i].ic_flags |= IEEE80211_CHAN_VHT80;
+			}
+		}
+		nchans = IEEE80211_MAX_DUAL_CHANNELS;
+
+	} else {
+#endif
+		if (nchans == IEEE80211_MAX_2_4_GHZ_CHANNELS) {
+			def_chan_flags = IEEE80211_CHAN_HT20 | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
+			qtn_chan_ptr = qtn_channels_2ghz;
+		} else if (nchans == IEEE80211_MAX_5_GHZ_CHANNELS) {
+			nchans = IEEE80211_MAX_5_GHZ_CHANNELS;
+			def_chan_flags = IEEE80211_CHAN_HT20 | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
+			qtn_chan_ptr = qtn_channels_5ghz;
+		} else {
+			printk(KERN_ERR "Num of chans specified does not correspond to any known freq band\n");
+			return -1;
+		}
+
+		for (i = 0; i < nchans; i++) {
+			chans[i].ic_flags =  def_chan_flags | qtn_chan_ptr[i].channel_flags;
+			/* Add the common 40M flag if either U/D 20M flag is set */
+			chans[i].ic_flags |=
+				(chans[i].ic_flags & (IEEE80211_CHAN_HT40U|IEEE80211_CHAN_HT40D))?
+				IEEE80211_CHAN_HT40:0;
+			chans[i].ic_ext_flags = qtn_chan_ptr[i].channel_ext_flags;
+			chans[i].ic_freq = qtn_chan_ptr[i].channel_freq;
+			chans[i].ic_ieee = qtn_chan_ptr[i].channel_number;
+			chans[i].ic_maxregpower = QDRV_DFLT_MAX_TXPOW;
+			chans[i].ic_maxpower = QDRV_DFLT_MAX_TXPOW;
+			chans[i].ic_minpower = QDRV_DFLT_MIN_TXPOW;
+			chans[i].ic_maxpower_normal = QDRV_DFLT_MAX_TXPOW;
+			chans[i].ic_minpower_normal = QDRV_DFLT_MIN_TXPOW;
+			/* '0' means power is not configured */
+			memset(chans[i].ic_maxpower_table, 0, sizeof(chans[i].ic_maxpower_table));
+			chans[i].ic_center_f_80MHz = qtn_chan_ptr[i].center_freq_80M;
+			chans[i].ic_center_f_160MHz = qtn_chan_ptr[i].center_freq_160M;
+			if (chans[i].ic_center_f_80MHz) {
+				chans[i].ic_flags |= IEEE80211_CHAN_VHT80;
+			}
+		}
+#ifdef PEARL_PLATFORM
+	}
+#endif
+#else
+	for (i = 0; i < 32; i++) {
+		chans[i].ic_freq = 2412 + i;
+		chans[i].ic_flags = IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
+		chans[i].ic_ieee = i;
+		chans[i].ic_maxregpower = QDRV_DFLT_MAX_TXPOW;
+		chans[i].ic_maxpower = QDRV_DFLT_MAX_TXPOW;
+		chans[i].ic_minpower = QDRV_DFLT_MIN_TXPOW;
+		chans[i].ic_maxpower_normal = QDRV_DFLT_MAX_TXPOW;
+		chans[i].ic_minpower_normal = QDRV_DFLT_MIN_TXPOW;
+	}
+	nchans = ((chans[0].ic_flags & IEEE80211_CHAN_2GHZ) == IEEE80211_CHAN_2GHZ) ?
+			IEEE80211_MAX_2_4_GHZ_CHANNELS : IEEE80211_MAX_5_GHZ_CHANNELS;
+#endif
+
+	ic->ic_pwr_constraint = 0;
+
+	/* check if channel requires DFS */
+	qdrv_wlan_80211_mark_dfs(ic, nchans, inchans);
+
+	qdrv_wlan_80211_mark_weather_radar(ic, nchans, inchans);
+
+	/* Initialize the channels in the ieee80211com structure */
+	set_channels(ic, nchans, chans);
+
+	return 0;
+}
+
+#ifdef CONFIG_QVSP
+
+static void
+qdrv_wlan_vsp_strm_state_set(struct ieee80211com *ic, uint8_t strm_state,
+				const struct ieee80211_qvsp_strm_id *strm_id,
+				struct ieee80211_qvsp_strm_dis_attr *attr)
+{
+#if !TOPAZ_QTM /* Disable STA side control for QTM-Lite */
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct qvsp_s *qvsp = qw->qvsp;
+
+	if (qvsp == NULL) {
+		return;
+	}
+
+	qvsp_cmd_strm_state_set(qvsp, strm_state, strm_id, attr);
+#endif
+}
+
+static void
+qdrv_wlan_vsp_change_stamode(struct ieee80211com *ic, uint8_t stamode)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct qvsp_s *qvsp = qw->qvsp;
+
+	if (qvsp == NULL) {
+		return;
+	}
+
+	qvsp_change_stamode(qvsp, stamode);
+}
+
+static void
+qdrv_wlan_vsp_configure(struct ieee80211com *ic, uint32_t index, uint32_t value)
+{
+#if !TOPAZ_QTM /* Disable STA side control for QTM-Lite */
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct qvsp_s *qvsp = qw->qvsp;
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "configuring VSP %u:%u\n",
+		index, value);
+
+	qvsp_cmd_vsp_configure(qvsp, index, value);
+#endif
+}
+
+static void
+qdrv_wlan_vsp_set(struct ieee80211com *ic, uint32_t index, uint32_t value)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct qvsp_s *qvsp = qw->qvsp;
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "configuring VSP %u:%u\n",
+		index, value);
+
+	qvsp_cmd_vsp_cfg_set(qvsp, index, value);
+}
+
+static int
+qdrv_wlan_vsp_get(struct ieee80211com *ic, uint32_t index, uint32_t *value)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct qvsp_s *qvsp = qw->qvsp;
+	int ret;
+
+	ret = qvsp_cmd_vsp_cfg_get(qvsp, index, value);
+	if (!ret) {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "VSP configuration %u:%u\n",
+			index, *value);
+	} else {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "reading VSP failed\n");
+	}
+
+	return ret;
+}
+
+int
+qdrv_wlan_query_wds(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_opmode == IEEE80211_M_WDS)
+			return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * VSP config callback to send a configuration updates to peer stations
+ */
+void
+qdrv_wlan_vsp_cb_cfg(void *token, uint32_t index, uint32_t value)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)token;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211_qvsp_act_cfg qvsp_ac;
+	struct ieee80211_action_data act;
+	uint8_t *oui;
+
+	memset(&act, 0, sizeof(act));
+	act.cat = IEEE80211_ACTION_CAT_VENDOR;
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "VSP: send config to stations - %d:%d\n",
+		index, value);
+
+	memset(&qvsp_ac, 0, sizeof(qvsp_ac));
+	act.params = (void *)&qvsp_ac;
+	oui = qvsp_ac.header.oui;
+	ieee80211_oui_add_qtn(oui);
+	qvsp_ac.header.type = QVSP_ACTION_VSP_CTRL;
+	qvsp_ac.count = 1;
+	qvsp_ac.cfg_items[0].index = index;
+	qvsp_ac.cfg_items[0].value = value;
+
+	ieee80211_iterate_nodes(&ic->ic_sta, ieee80211_node_vsp_send_action, &act, 1);
+
+	/* Store config for sending to new stations when they associate */
+	ic->vsp_cfg[index].value = value;
+	ic->vsp_cfg[index].set = 1;
+}
+
+/*
+ * VSP config callback to send stream state changes to peer stations
+ */
+void
+qdrv_wlan_vsp_cb_strm_ctrl(void *token, struct ieee80211_node *ni, uint8_t strm_state,
+		struct ieee80211_qvsp_strm_id *strm_id, struct ieee80211_qvsp_strm_dis_attr *attr)
+{
+	struct ieee80211_qvsp_act_strm_ctrl qvsp_ac;
+	struct ieee80211_action_data act;
+	uint8_t *oui;
+
+	memset(&act, 0, sizeof(act));
+	act.cat = IEEE80211_ACTION_CAT_VENDOR;
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP,
+		"VSP: send stream state change (%u) to " DBGMACVAR "\n",
+		strm_state, DBGMACFMT(ni->ni_macaddr));
+
+	memset(&qvsp_ac, 0, sizeof(qvsp_ac));
+	act.params = (void *)&qvsp_ac;
+	oui = qvsp_ac.header.oui;
+	ieee80211_oui_add_qtn(oui);
+	qvsp_ac.header.type = QVSP_ACTION_STRM_CTRL;
+	qvsp_ac.strm_state = strm_state;
+	memcpy(&qvsp_ac.dis_attr, attr, sizeof(qvsp_ac.dis_attr));
+	qvsp_ac.count = 1;
+
+	qvsp_ac.strm_items[0] = *strm_id;
+
+	ieee80211_node_vsp_send_action(&act, ni);
+}
+
+void qdrv_wlan_vsp_reset(struct ieee80211com *ic)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	if (qw->qvsp)
+		qvsp_reset(qw->qvsp);
+}
+
+#if TOPAZ_QTM
+static void __sram_text
+qdrv_wlan_vsp_sync_node(void *arg, struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct qtn_vsp_stats *vsp_stats = qw->vsp_stats;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct qtn_per_tid_stats *stats;
+	struct qtn_per_tid_stats *prev_stats;
+	const uint8_t tids[] = QTN_VSP_TIDS;
+	uint8_t tid_idx;
+	uint8_t tid;
+	const int8_t tid2statsidx[] = QTN_VSP_STATS_TID2IDX;
+	int8_t stats_idx;
+	uint8_t node;
+	uint32_t sent_bytes;
+	uint32_t sent_pkts;
+	uint32_t throt_bytes;
+	uint32_t throt_pkts;
+
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP ||
+				vap->iv_opmode == IEEE80211_M_WDS) &&
+			ni->ni_associd == 0)
+		return;
+
+	node = IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx);
+
+	if (!qw->vsp_enabling) {
+		for (tid_idx = 0; tid_idx < ARRAY_SIZE(tids); ++tid_idx) {
+			tid = tids[tid_idx];
+			stats_idx = tid2statsidx[tid];
+
+			stats = &vsp_stats->per_node_stats[node].per_tid_stats[stats_idx];
+			prev_stats = &ni->ni_prev_vsp_stats.per_tid_stats[stats_idx];
+
+			/*
+			 * There is a race condition that when lhost is reading these counters from
+			 * shared memory, MuC is updating it. However, this won't hurt VSP. Because
+			 * the key information here is whether throt_pkts is zero or not. We rely
+			 * on this when reenabling streams. The small error in sent_pkts doesn't
+			 * matter. So we don't need to use ping-pong buffer to solve this race condtion.
+			 */
+			throt_pkts = stats->tx_throt_pkts - prev_stats->tx_throt_pkts;
+			throt_bytes = stats->tx_throt_bytes - prev_stats->tx_throt_bytes;
+			sent_pkts = stats->tx_sent_pkts - prev_stats->tx_sent_pkts;
+			sent_bytes = stats->tx_sent_bytes - prev_stats->tx_sent_bytes;
+			qvsp_strm_tid_check_add(qw->qvsp, ni,
+				IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx), tid,
+				sent_pkts + throt_pkts,
+				sent_bytes + throt_bytes,
+				sent_pkts, sent_bytes);
+		}
+	}
+
+	memcpy(&ni->ni_prev_vsp_stats, &vsp_stats->per_node_stats[node], sizeof(ni->ni_prev_vsp_stats));
+}
+
+static void __sram_text
+qdrv_wlan_vsp_sync(struct qdrv_wlan *qw)
+{
+	ieee80211_iterate_nodes(&qw->ic.ic_sta, qdrv_wlan_vsp_sync_node, 0, 1);
+
+	if (qw->vsp_sync_sched_remain) {
+		schedule_delayed_work(&qw->vsp_sync_work, HZ);
+		qw->vsp_sync_sched_remain--;
+	}
+}
+
+static void __sram_text
+qdrv_wlan_vsp_sync_work(struct work_struct *work)
+{
+	struct delayed_work *dwork = (struct delayed_work *)work;
+	struct qdrv_wlan *qw = container_of(dwork, struct qdrv_wlan, vsp_sync_work);
+
+	qdrv_wlan_vsp_sync(qw);
+}
+#endif
+
+static void __sram_text
+qdrv_wlan_vsp_tasklet(unsigned long _qw)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) _qw;
+	if (likely(qvsp_is_active(qw->qvsp))) {
+#if TOPAZ_QTM
+		/* sched work one time less than interval number because here we already do one */
+		qw->vsp_sync_sched_remain = qw->vsp_check_intvl - 1;
+		qdrv_wlan_vsp_sync(qw);
+
+		if (qw->vsp_enabling) {
+			/* warming up, stats not ready yet */
+			qw->vsp_enabling--;
+			return;
+		}
+#endif
+
+		qvsp_fat_set(qw->qvsp,
+				qw->vsp_stats->fat, qw->vsp_stats->intf_ms,
+				qw->ic.ic_curchan->ic_ieee);
+	}
+}
+
+static void __sram_text
+qdrv_wlan_vsp_irq_handler(void *_qw, void *_unused)
+{
+	struct qdrv_wlan *qw = _qw;
+
+	if (likely(qvsp_is_active(qw->qvsp))) {
+		tasklet_schedule(&qw->vsp_tasklet);
+	}
+}
+
+void qdrv_wlan_vsp_cb_strm_ext_throttler(void *token, struct ieee80211_node *ni,
+			uint8_t strm_state, const struct ieee80211_qvsp_strm_id *strm_id,
+			struct ieee80211_qvsp_strm_dis_attr *attr, uint32_t throt_intvl)
+{
+#if TOPAZ_QTM
+	uint8_t node;
+	uint8_t tid;
+	uint32_t value;
+	uint32_t intvl;
+	uint32_t quota;
+
+	qvsp_fake_ip2nodetid((uint32_t*)(&strm_id->daddr.ipv4), &node, &tid);
+
+	if (strm_state == QVSP_STRM_STATE_DISABLED) {
+		/* default interval */
+		intvl = throt_intvl;	/* ms */
+	        quota = (attr->throt_rate / 8) * intvl; /* bytes */
+		if (quota < QTN_AUC_THROT_QUOTA_UNIT) {
+			quota = QTN_AUC_THROT_QUOTA_UNIT;
+		} else if (quota > (QTN_AUC_THROT_QUOTA_MAX * QTN_AUC_THROT_QUOTA_UNIT)) {
+			quota = QTN_AUC_THROT_QUOTA_MAX * QTN_AUC_THROT_QUOTA_UNIT;
+		}
+		intvl = quota / (attr->throt_rate / 8);
+		if ((intvl < QTN_AUC_THROT_INTVL_UNIT) ||
+				(intvl > QTN_AUC_THROT_INTVL_MAX * QTN_AUC_THROT_INTVL_UNIT)) {
+			printk("VSP: throttling rate %u exceeds ioctl range: intvl %u quota %u\n",
+				attr->throt_rate, intvl, quota);
+			return;
+		}
+		intvl /= QTN_AUC_THROT_INTVL_UNIT;
+		quota /= QTN_AUC_THROT_QUOTA_UNIT;
+	} else {
+		intvl = 0;
+		quota = 0;
+	}
+
+	value = SM(AUC_QOS_SCH_PARAM_TID_THROT, AUC_QOS_SCH_PARAM) |
+		SM(node, QTN_AUC_THROT_NODE) |
+		SM(tid, QTN_AUC_THROT_TID) |
+		SM(intvl, QTN_AUC_THROT_INTVL) |
+		SM(quota, QTN_AUC_THROT_QUOTA);
+
+	qdrv_wlan_80211_setparam(ni, IEEE80211_PARAM_AUC_QOS_SCH, value, NULL, 0);
+#endif
+}
+
+static int
+qdrv_wlan_vsp_irq_init(struct qdrv_wlan *qw, unsigned long hi_vsp_stats_phys)
+{
+	struct qdrv_mac *mac = qw->mac;
+	struct int_handler int_handler;
+	int ret;
+
+	qw->vsp_stats = ioremap_nocache(muc_to_lhost(hi_vsp_stats_phys),
+					sizeof(*qw->vsp_stats));
+	if (qw->vsp_stats == NULL) {
+		return -ENOMEM;
+	}
+
+	tasklet_init(&qw->vsp_tasklet, &qdrv_wlan_vsp_tasklet, (unsigned long) qw);
+
+#if TOPAZ_QTM
+	INIT_DELAYED_WORK(&qw->vsp_sync_work, qdrv_wlan_vsp_sync_work);
+#endif
+
+	int_handler.handler = &qdrv_wlan_vsp_irq_handler;
+	int_handler.arg1 = qw;
+	int_handler.arg2 = NULL;
+	ret = qdrv_mac_set_handler(mac, RUBY_M2L_IRQ_LO_VSP, &int_handler);
+	if (ret == 0) {
+		qdrv_mac_enable_irq(mac, RUBY_M2L_IRQ_LO_VSP);
+	} else {
+		DBGPRINTF_E("Could not initialize VSP update irq handler\n");
+		iounmap(qw->vsp_stats);
+	}
+
+	return ret;
+}
+
+static void
+qdrv_wlan_vsp_irq_exit(struct qdrv_wlan *qw)
+{
+	struct qdrv_mac *mac = qw->mac;
+
+	iounmap(qw->vsp_stats);
+	qdrv_mac_disable_irq(mac, RUBY_M2L_IRQ_LO_VSP);
+	qdrv_mac_clear_handler(mac, RUBY_M2L_IRQ_LO_VSP);
+	tasklet_kill(&qw->vsp_tasklet);
+
+#if TOPAZ_QTM
+	cancel_delayed_work_sync(&qw->vsp_sync_work);
+#endif
+}
+
+int
+qdrv_wlan_vsp_ba_throt(struct ieee80211_node *ni, int32_t tid, int intv, int dur, int win_size)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	struct ieee80211_ba_throt *ba_throt;
+	int start_timer = 0;
+
+	ba_throt = &ni->ni_ba_rx[tid].ba_throt;
+	if (ba_throt->throt_dur && !dur) {
+		ic->ic_vsp_ba_throt_num--;
+		ni->ni_vsp_ba_throt_bm &= ~BIT(tid);
+	} else if (!ba_throt->throt_dur && dur) {
+		if (!ic->ic_vsp_ba_throt_num) {
+			start_timer = 1;
+		}
+		ic->ic_vsp_ba_throt_num++;
+		ni->ni_vsp_ba_throt_bm |= BIT(tid);
+	}
+	ba_throt->throt_intv = intv;
+	ba_throt->throt_dur = dur;
+	ba_throt->throt_win_size = win_size;
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "set node %u tid %d ba throt: intv=%u dur=%u win_size=%u\n",
+			IEEE80211_AID(ni->ni_associd), tid,
+			ba_throt->throt_intv,
+			ba_throt->throt_dur,
+			ba_throt->throt_win_size);
+
+	qdrv_wlan_drop_ba(ni, tid, 0, IEEE80211_REASON_UNSPECIFIED);
+
+	if (start_timer) {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "add vsp ba throt timer with intv %u ms\n",
+				QVSP_BA_THROT_TIMER_INTV);
+		qw->vsp_ba_throt.expires = jiffies + msecs_to_jiffies(QVSP_BA_THROT_TIMER_INTV);
+		add_timer(&qw->vsp_ba_throt);
+	}
+
+	return 0;
+}
+
+static void
+qdrv_wlan_vsp_ba_throt_timer(unsigned long arg)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan*)arg;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211_node *ni;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	int32_t tid;
+	struct ieee80211_ba_tid *ba_tid;
+	struct ieee80211_ba_throt *ba_throt;
+
+	if (!ic->ic_vsp_ba_throt_num) {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "auto stop vsp ba throt timer\n");
+		return;
+	}
+
+	DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_VSP, "vsp ba throt timer\n");
+
+	IEEE80211_SCAN_LOCK_BH(nt);
+	IEEE80211_NODE_LOCK_BH(nt);
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if ((ni == ni->ni_vap->iv_bss) ||
+		    IEEE80211_ADDR_EQ(ni->ni_vap->iv_myaddr, ni->ni_macaddr) ||
+		    ieee80211_blacklist_check(ni)) {
+			continue;
+		}
+
+		if (!ni->ni_vsp_ba_throt_bm) {
+			continue;
+		}
+
+		for (tid = 0; tid < WME_NUM_TID; tid++) {
+			ba_tid = &ni->ni_ba_rx[tid];
+			ba_throt = &ba_tid->ba_throt;
+			if ((ba_tid->state == IEEE80211_BA_ESTABLISHED) &&
+				ba_throt->throt_dur &&
+				time_after(jiffies, (ba_throt->last_setup_jiffies +
+						      msecs_to_jiffies(ba_throt->throt_dur)))) {
+				DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP,
+					"VSP: delba node %u tid %d\n", IEEE80211_AID(ni->ni_associd), tid);
+				qdrv_wlan_drop_ba(ni, tid, 0, IEEE80211_REASON_UNSPECIFIED);
+			}
+		}
+	}
+
+	IEEE80211_NODE_UNLOCK_BH(nt);
+	IEEE80211_SCAN_UNLOCK_BH(nt);
+
+	qw->vsp_ba_throt.expires = jiffies + msecs_to_jiffies(QVSP_BA_THROT_TIMER_INTV);
+	add_timer(&qw->vsp_ba_throt);
+}
+
+struct qvsp_3rdpt_method_entry {
+	uint8_t vendor;
+	uint8_t ba_throt_session_dur;		/* bool, whether to throt session duration */
+	uint8_t ba_throt_winsize;		/* bool, whether to throt winsize */
+};
+
+#define QVSP_3RDPT_VENDOR_METHOD_NUM		8
+static struct qvsp_3rdpt_method_entry qvsp_3rdpt_method_table[QVSP_3RDPT_VENDOR_METHOD_NUM] = {
+	{PEER_VENDOR_NONE, 1, 1},		/* must be first entry */
+	/* more entries can be dynamically added */
+};
+
+int qdrv_wlan_vsp_3rdpt_get_method(struct ieee80211_node *ni, uint8_t *throt_session_dur, uint8_t *throt_winsize)
+{
+	int i;
+	struct qvsp_3rdpt_method_entry *entry;
+
+	for (i = 0; i < ARRAY_SIZE(qvsp_3rdpt_method_table); i++) {
+		entry = &qvsp_3rdpt_method_table[i];
+		if (entry->vendor == ni->ni_vendor) {
+			*throt_session_dur = entry->ba_throt_session_dur;
+			*throt_winsize = entry->ba_throt_winsize;
+			DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "VSP: node %u vendor 0x%x"
+					" throt_dur=%u throt_winsize=%u\n",
+					IEEE80211_AID(ni->ni_associd), ni->ni_vendor,
+					*throt_session_dur, *throt_winsize);
+			return 1;
+		}
+	}
+
+	entry = &qvsp_3rdpt_method_table[0];
+	*throt_session_dur = entry->ba_throt_session_dur;
+	*throt_winsize = entry->ba_throt_winsize;
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "VSP: node %u use default method"
+			" throt_dur=%u throt_winsize=%u\n",
+			IEEE80211_AID(ni->ni_associd),
+			*throt_session_dur, *throt_winsize);
+	return 1;
+}
+
+static void qdrv_wlan_vsp_3rdpt_set_method(uint8_t idx, uint8_t vendor,
+		uint8_t throt_session_dur, uint8_t throt_winsize)
+{
+	struct qvsp_3rdpt_method_entry *entry;
+
+	if (idx < ARRAY_SIZE(qvsp_3rdpt_method_table)) {
+		entry = &qvsp_3rdpt_method_table[idx];
+		entry->vendor = vendor;
+		entry->ba_throt_session_dur = throt_session_dur;
+		entry->ba_throt_winsize = throt_winsize;
+	} else {
+		DBGPRINTF(DBG_LL_CRIT, QDRV_LF_VSP, "invalid table index %u\n", idx);
+	}
+}
+
+static void qdrv_wlan_vsp_3rdpt_dump_method_table(void)
+{
+	struct qvsp_3rdpt_method_entry *entry;
+	int i;
+
+	printk("VSP 3rd party method table:\n");
+	printk("idx vendor throt_dur throt_winsize\n");
+	for (i = 0; i < ARRAY_SIZE(qvsp_3rdpt_method_table); i++) {
+		entry = &qvsp_3rdpt_method_table[i];
+		printk("%3u 0x%04x %9u %13u\n", i, entry->vendor,
+			entry->ba_throt_session_dur, entry->ba_throt_winsize);
+	}
+}
+
+enum qdrv_manual_ba_throt_subcmd {
+	QDRV_MANUAL_BA_THROT_SUBCMD_SET_PARAM = 0,
+	QDRV_MANUAL_BA_THROT_SUBCMD_APPLY_THROT = 1,
+	QDRV_MANUAL_BA_THROT_SUBCMD_DUMP_BA = 2,
+	QDRV_MANUAL_BA_THROT_SUBCMD_SET_VENDOR_TABLE = 3,
+};
+
+#define QDRV_MANUAL_BA_THROT_SUBCMD		0xC0000000
+#define QDRV_MANUAL_BA_THROT_VALUE		0x3FFFFFFF
+
+#define QDRV_MANUAL_BA_THROT_INTV		0x3FFF0000
+#define QDRV_MANUAL_BA_THROT_DUR		0x0000FF00
+#define QDRV_MANUAL_BA_THROT_WINSIZE		0x000000FF
+
+#define QDRV_MANUAL_BA_THROT_ENABLE		0x3FFF0000
+#define QDRV_MANUAL_BA_THROT_NCIDX		0x0000FF00
+#define QDRV_MANUAL_BA_THROT_TID		0x000000FF
+
+#define QDRV_MANUAL_BA_THROT_DUMP_NCIDX		0x000000FF
+
+#define QDRV_MANUAL_BA_THROT_IDX		0x3F000000
+#define QDRV_MANUAL_BA_THROT_VENDOR		0x00F00000
+#define QDRV_MANUAL_BA_THROT_USE_DUR		0x000F0000
+#define QDRV_MANUAL_BA_THROT_USE_WINSIZE	0x0000F000
+
+/*
+ * Manually control BA throttling instead of VSP automatic control
+ */
+static void qdrv_wlan_manual_ba_throt(struct qdrv_wlan *qw, struct qdrv_vap *qv, unsigned int value)
+{
+	uint32_t subcmd;
+	struct ieee80211_node *ni = NULL;
+	static uint32_t manual_ba_throt_intv;
+	static uint32_t manual_ba_throt_dur;
+	static uint32_t manual_ba_throt_winsize;
+	uint32_t enable;
+	uint32_t ncidx;
+	int32_t tid;
+	uint8_t idx;
+	uint8_t vendor;
+	uint8_t use_dur_throt;
+	uint8_t use_winsize_throt;
+
+	subcmd = MS_OP(value, QDRV_MANUAL_BA_THROT_SUBCMD);
+	value = MS_OP(value, QDRV_MANUAL_BA_THROT_VALUE);
+	DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_VSP, "manual ba throt: subcmd=%u, value=0x%x\n", subcmd, value);
+
+	switch (subcmd) {
+	case QDRV_MANUAL_BA_THROT_SUBCMD_SET_PARAM:
+		manual_ba_throt_intv = MS_OP(value, QDRV_MANUAL_BA_THROT_INTV);
+		manual_ba_throt_dur = MS_OP(value, QDRV_MANUAL_BA_THROT_DUR);
+		manual_ba_throt_winsize = MS_OP(value, QDRV_MANUAL_BA_THROT_WINSIZE);
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "set manual ba throt intv=%u dur=%u win_size=%u\n",
+				manual_ba_throt_intv, manual_ba_throt_dur, manual_ba_throt_winsize);
+		break;
+	case QDRV_MANUAL_BA_THROT_SUBCMD_APPLY_THROT:
+		enable = MS_OP(value, QDRV_MANUAL_BA_THROT_ENABLE);
+		ncidx = MS_OP(value, QDRV_MANUAL_BA_THROT_NCIDX);
+		tid = MS_OP(value, QDRV_MANUAL_BA_THROT_TID);
+		ni = ieee80211_find_node_by_node_idx(&qv->iv, ncidx);
+		if (!ni) {
+			DBGPRINTF(DBG_LL_CRIT, QDRV_LF_VSP, "node %u not found\n", ncidx);
+			break;
+		}
+		if (enable) {
+			qdrv_wlan_vsp_ba_throt(ni, tid,	manual_ba_throt_intv, manual_ba_throt_dur,
+					manual_ba_throt_winsize);
+		} else {
+			qdrv_wlan_vsp_ba_throt(ni, tid, 0, 0, 0);
+		}
+		ieee80211_free_node(ni);
+		break;
+	case QDRV_MANUAL_BA_THROT_SUBCMD_DUMP_BA:
+		ncidx = MS_OP(value, QDRV_MANUAL_BA_THROT_DUMP_NCIDX);
+		ni = ieee80211_find_node_by_node_idx(&qv->iv, ncidx);
+		if (!ni) {
+			DBGPRINTF(DBG_LL_CRIT, QDRV_LF_VSP, "node %u not found\n", ncidx);
+			break;
+		}
+		qdrv_wlan_dump_ba(ni);
+		ieee80211_free_node(ni);
+		break;
+	case QDRV_MANUAL_BA_THROT_SUBCMD_SET_VENDOR_TABLE:
+		idx = MS_OP(value, QDRV_MANUAL_BA_THROT_IDX);
+		vendor = MS_OP(value, QDRV_MANUAL_BA_THROT_VENDOR);
+		use_dur_throt = MS_OP(value, QDRV_MANUAL_BA_THROT_USE_DUR);
+		use_winsize_throt = MS_OP(value, QDRV_MANUAL_BA_THROT_USE_WINSIZE);
+
+		qdrv_wlan_vsp_3rdpt_set_method(idx, vendor, use_dur_throt, use_winsize_throt);
+		qdrv_wlan_vsp_3rdpt_dump_method_table();
+		break;
+	default:
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "unknown subcmd %u\n", subcmd);
+		break;
+	}
+}
+
+int qdrv_wlan_vsp_wme_throt(void *token, uint32_t ac, uint32_t enable,
+		uint32_t aifsn, uint32_t ecwmin, uint32_t ecwmax, uint32_t txoplimit,
+		uint32_t add_qwme_ie)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)token;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211vap *vap;
+	struct ieee80211_wme_state *wme = &qw->ic.ic_wme;
+	struct chanAccParams *acc_params;
+	struct wmm_params *params;
+
+	acc_params = &wme->wme_throt_bssChanParams;
+	params = &acc_params->cap_wmeParams[ac];
+	if (enable) {
+		wme->wme_throt_bm |= BIT(ac);
+		wme->wme_throt_add_qwme_ie = add_qwme_ie;
+		memcpy(params, &wme->wme_bssChanParams.cap_wmeParams[ac], sizeof(struct wmm_params));
+		params->wmm_aifsn = aifsn;
+		params->wmm_logcwmin = ecwmin;
+		params->wmm_logcwmax = ecwmax;
+		params->wmm_txopLimit = txoplimit;
+	} else {
+		wme->wme_throt_bm &= ~BIT(ac);
+		if (!wme->wme_throt_bm) {
+			wme->wme_throt_add_qwme_ie = 0;
+		}
+		memset(params, 0x0, sizeof(struct wmm_params));
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "set ac %u wme throt: enable=%u aifsn=%u"
+			" ecwmin=%u ecwmax=%u txoplimit=%u add_qwme_ie=%u\n",
+			ac, enable, params->wmm_aifsn, params->wmm_logcwmin, params->wmm_logcwmax,
+			params->wmm_txopLimit, wme->wme_throt_add_qwme_ie);
+
+	wme->wme_wmeBssChanParams.cap_info_count++;
+	/* apply it to all vap as we don't support per-vap wme params now */
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		ieee80211_wme_updateparams(vap, 0);
+	}
+
+	return 0;
+}
+
+static void qdrv_wlan_vsp_wme_throt_dump(struct qdrv_wlan *qw)
+{
+	struct ieee80211_wme_state *wme = &qw->ic.ic_wme;
+	struct chanAccParams *acc_params;
+	struct wmm_params *params;
+	uint32_t ac;
+
+	acc_params = &wme->wme_throt_bssChanParams;
+
+	printk("VSP wme throt state: throt_bm=0x%x, add_qwme_ie=%u\n", wme->wme_throt_bm,
+			wme->wme_throt_add_qwme_ie);
+	printk("ac enable aifsn ecwmin ecwmax txoplimit\n");
+	for (ac = 0; ac < WME_NUM_AC; ac++) {
+		params = &acc_params->cap_wmeParams[ac];
+		printk("%2u %6u %5u %6u %6u %9u\n",
+			ac, !!(wme->wme_throt_bm & BIT(ac)),
+			params->wmm_aifsn, params->wmm_logcwmin, params->wmm_logcwmax,
+			params->wmm_txopLimit);
+	}
+}
+
+enum qdrv_manual_wme_throt_subcmd {
+	QDRV_MANUAL_WME_THROT_SUBCMD_SET_PARAM = 0,
+	QDRV_MANUAL_WME_THROT_SUBCMD_APPLY_THROT = 1,
+	QDRV_MANUAL_WME_THROT_SUBCMD_DUMP = 2,
+};
+
+#define QDRV_MANUAL_WME_THROT_SUBCMD	0xC0000000
+#define QDRV_MANUAL_WME_THROT_VALUE	0x3FFFFFFF
+
+#define QDRV_MANUAL_WME_THROT_AIFSN	0x3F000000
+#define QDRV_MANUAL_WME_THROT_ECWMIN	0x00F00000
+#define QDRV_MANUAL_WME_THROT_ECWMAX	0x000F0000
+#define QDRV_MANUAL_WME_THROT_TXOPLIMIT	0x0000FFFF
+#define QDRV_MANUAL_WME_THROT_ENABLE	0x3F000000
+#define QDRV_MANUAL_WME_THROT_AC	0x00F00000
+
+/*
+ * Manually control WME throttling instead of VSP automatic control
+ */
+static void qdrv_wlan_manual_wme_throt(struct qdrv_wlan *qw, struct qdrv_vap *qv, unsigned int value)
+{
+	uint32_t subcmd;
+	static uint32_t manual_wme_throt_aifsn;
+	static uint32_t manual_wme_throt_ecwmin;
+	static uint32_t manual_wme_throt_ecwmax;
+	static uint32_t manual_wme_throt_txoplimit;
+	uint32_t enable;
+	uint32_t ac;
+
+	subcmd = MS_OP(value, QDRV_MANUAL_WME_THROT_SUBCMD);
+	value = MS_OP(value, QDRV_MANUAL_WME_THROT_VALUE);
+	DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_VSP, "manual wme throt: subcmd=%u, value=0x%x\n", subcmd, value);
+
+	switch (subcmd) {
+	case QDRV_MANUAL_WME_THROT_SUBCMD_SET_PARAM:
+		manual_wme_throt_aifsn = MS_OP(value, QDRV_MANUAL_WME_THROT_AIFSN);
+		manual_wme_throt_ecwmin = MS_OP(value, QDRV_MANUAL_WME_THROT_ECWMIN);
+		manual_wme_throt_ecwmax = MS_OP(value, QDRV_MANUAL_WME_THROT_ECWMAX);
+		manual_wme_throt_txoplimit = MS_OP(value, QDRV_MANUAL_WME_THROT_TXOPLIMIT);
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP,
+				"set manual wme throt aifsn=%u ecwmin=%u ecwmax=%u txoplimit=%u\n",
+				manual_wme_throt_aifsn, manual_wme_throt_ecwmin, manual_wme_throt_ecwmax,
+				manual_wme_throt_txoplimit);
+		break;
+	case QDRV_MANUAL_WME_THROT_SUBCMD_APPLY_THROT:
+		enable = MS_OP(value, QDRV_MANUAL_WME_THROT_ENABLE);
+		ac = MS_OP(value, QDRV_MANUAL_WME_THROT_AC);
+		if (enable) {
+			qdrv_wlan_vsp_wme_throt(qw, ac, enable,
+				manual_wme_throt_aifsn, manual_wme_throt_ecwmin,
+				manual_wme_throt_ecwmax, manual_wme_throt_txoplimit, 1);
+		} else {
+			qdrv_wlan_vsp_wme_throt(qw, ac, 0, 0, 0, 0, 0, 0);
+		}
+		break;
+	case QDRV_MANUAL_WME_THROT_SUBCMD_DUMP:
+		qdrv_wlan_vsp_wme_throt_dump(qw);
+		break;
+	default:
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "unknown subcmd %u\n", subcmd);
+		break;
+	}
+}
+
+int qdrv_wlan_vsp_3rdpt_init(struct qdrv_wlan *qw)
+{
+	qvsp_3rdpt_register_cb(qw->qvsp, &qw->ic.ic_wme, qdrv_wlan_vsp_3rdpt_get_method, qdrv_wlan_vsp_ba_throt,
+				qdrv_wlan_vsp_wme_throt);
+
+	init_timer(&qw->vsp_ba_throt);
+	qw->vsp_ba_throt.function = qdrv_wlan_vsp_ba_throt_timer;
+	qw->vsp_ba_throt.data = (unsigned long) qw;
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "init ok\n");
+	return 0;
+}
+
+void qdrv_wlan_vsp_3rdpt_exit(struct qdrv_wlan *qw)
+{
+	del_timer(&qw->vsp_ba_throt);
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_VSP, "exit ok\n");
+}
+#endif	/* CONFIG_QVSP */
+
+extern void dfs_reentry_chan_switch_notify(struct net_device *dev, struct ieee80211_channel *new_chan);
+extern struct ieee80211_channel* qdrv_radar_select_newchan(u_int8_t new_ieee);
+
+static void qdrv_wlan_send_csa_frame(struct ieee80211vap *vap,
+		u_int8_t csa_mode,
+		u_int8_t csa_chan,
+		u_int8_t csa_count,
+		u_int64_t tsf)
+{
+	if (vap->iv_bss == NULL) {
+		DBGPRINTF_E("CSA sending frame for NULL BSS\n");
+	} else {
+		ieee80211_send_csa_frame(vap, csa_mode, csa_chan, csa_count, tsf);
+	}
+}
+
+static void qdrv_wlan_pm_state_init(struct ieee80211com *ic)
+{
+	const static int defaults[QTN_PM_IOCTL_MAX] = QTN_PM_PARAM_DEFAULTS;
+	memcpy(ic->ic_pm_state, defaults, sizeof(ic->ic_pm_state));
+}
+
+void qdrv_wlan_coex_stats_update(struct ieee80211com *ic, uint32_t value)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	switch (value) {
+		case WLAN_COEX_STATS_BW_ACTION:
+			RXSTAT(qw, rx_coex_bw_action);
+			break;
+		case WLAN_COEX_STATS_BW_ASSOC:
+			RXSTAT(qw, rx_coex_bw_assoc);
+			break;
+		case WLAN_COEX_STATS_BW_SCAN:
+			RXSTAT(qw, rx_coex_bw_scan);
+			break;
+	}
+}
+
+int ieee80211_get_cca_adjusting_status(void)
+{
+	volatile struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+
+	return sp->cca_adjusting_flag;
+}
+
+static int qdrv_wlan_80211_cfg_ht(struct ieee80211com *ic)
+{
+#ifdef QDRV_FEATURE_HT
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+
+	ic->ic_htcap.maxmsdu = IEEE80211_MSDU_SIZE_3839;
+	ic->ic_htcap.cap |= (IEEE80211_HTCAP_C_CHWIDTH40 |
+				IEEE80211_HTCAP_C_SHORTGI40 |
+				IEEE80211_HTCAP_C_SHORTGI20);
+
+	ic->ic_htcap.numrxstbcstr = IEEE80211_MAX_TX_STBC_SS;
+	ic->ic_htcap.cap |= (IEEE80211_HTCAP_C_TXSTBC |
+				IEEE80211_HTCAP_C_RXSTBC |
+				IEEE80211_HTCAP_C_MAXAMSDUSIZE_8K);
+	ic->ic_htcap.pwrsave = IEEE80211_HTCAP_C_MIMOPWRSAVE_NONE ;
+
+	/*
+	 * Workaround for transfer across slow ethernet interfaces (100Mbps or less)
+	 * Reduce advertised RX MAX AMPDU to reduce sender hold time
+	 * Reduce TX aggr hold time (done in MuC)
+	 */
+	if (board_slow_ethernet()) {
+		ic->ic_htcap.maxampdu = IEEE80211_HTCAP_MAXRXAMPDU_8191;
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE,
+				"Slow Ethernet WAR: RXAMPDU %d bytes\n", ic->ic_htcap.maxampdu);
+	} else {
+		ic->ic_htcap.maxampdu = IEEE80211_HTCAP_MAXRXAMPDU_65535;
+	}
+
+	if (sp->lh_chip_id >= QTN_BBIC_11AC) {
+		ic->ic_htcap.mpduspacing = IEEE80211_HTCAP_MPDUSPACING_4;
+	} else {
+		ic->ic_htcap.mpduspacing = IEEE80211_HTCAP_MPDUSPACING_8;
+	}
+
+	ic->ic_htcap.maxdatarate = 0;	/* Highest advertised rate is supported */
+
+	IEEE80211_HTCAP_SET_TXBF_CAPABILITIES(&ic->ic_htcap,
+						(IEEE80211_HTCAP_B_NDP_RX |
+						IEEE80211_HTCAP_B_NDP_TX |
+						IEEE80211_HTCAP_B_EXP_NCOMP_STEER |
+						IEEE80211_HTCAP_B_EXP_COMP_STEER));
+	IEEE80211_HTCAP_SET_EXP_NCOMP_TXBF(&ic->ic_htcap, IEEE80211_HTCAP_B_CAPABLE_BOTH);
+	IEEE80211_HTCAP_SET_EXP_COMP_TXBF(&ic->ic_htcap, IEEE80211_HTCAP_B_CAPABLE_BOTH);
+	IEEE80211_HTCAP_SET_GROUPING(&ic->ic_htcap, IEEE80211_HTCAP_B_GROUPING_ONE_TWO_FOUR);
+
+	if (ieee80211_swfeat_is_supported(SWFEAT_ID_2X2, 0)) {
+		ic->ic_ht_nss_cap = QTN_2X2_GLOBAL_RATE_NSS_MAX;
+		IEEE80211_HTCAP_SET_NCOMP_NUM_BF(&ic->ic_htcap, IEEE80211_HTCAP_B_ANTENNAS_TWO);
+		IEEE80211_HTCAP_SET_COMP_NUM_BF(&ic->ic_htcap, IEEE80211_HTCAP_B_ANTENNAS_TWO);
+		IEEE80211_HTCAP_SET_CHAN_EST(&ic->ic_htcap, IEEE80211_HTCAP_B_ST_STREAM_TWO);
+	} else if (ieee80211_swfeat_is_supported(SWFEAT_ID_3X3, 0)) {
+		ic->ic_ht_nss_cap = QTN_3X3_GLOBAL_RATE_NSS_MAX;
+		IEEE80211_HTCAP_SET_NCOMP_NUM_BF(&ic->ic_htcap, IEEE80211_HTCAP_B_ANTENNAS_THREE);
+		IEEE80211_HTCAP_SET_COMP_NUM_BF(&ic->ic_htcap, IEEE80211_HTCAP_B_ANTENNAS_THREE);
+		IEEE80211_HTCAP_SET_CHAN_EST(&ic->ic_htcap, IEEE80211_HTCAP_B_ST_STREAM_THREE);
+	} else {
+		ic->ic_ht_nss_cap = QTN_GLOBAL_RATE_NSS_MAX;
+		IEEE80211_HTCAP_SET_NCOMP_NUM_BF(&ic->ic_htcap, IEEE80211_HTCAP_B_ANTENNAS_FOUR);
+		IEEE80211_HTCAP_SET_COMP_NUM_BF(&ic->ic_htcap, IEEE80211_HTCAP_B_ANTENNAS_FOUR);
+		IEEE80211_HTCAP_SET_CHAN_EST(&ic->ic_htcap, IEEE80211_HTCAP_B_ST_STREAM_FOUR);
+	}
+
+	qdrv_wlan_80211_set_mcsset(ic);
+	qdrv_wlan_80211_set_mcsparams(ic);
+	ic->ic_htinfo.sigranularity = IEEE80211_HTINFO_SIGRANULARITY_5;
+	ic->ic_htinfo.basicmcsset[IEEE80211_HT_MCSSET_20_40_NSS1] = 0;
+	ic->ic_htinfo.basicmcsset[IEEE80211_HT_MCSSET_20_40_NSS2] = 0;
+#endif
+
+	return 0;
+}
+
+static int qdrv_wlan_80211_cfg_vht(struct ieee80211_vhtcap *vhtcap, struct ieee80211_vhtop *vhtop,
+		enum ieee80211_vht_nss *vht_nss_cap, int band_24g, enum ieee80211_opmode opmode,
+		uint8_t mu_enable)
+{
+#ifdef QDRV_FEATURE_VHT
+	/*
+	 * Not yet supported:
+	 *   IEEE80211_VHTCAP_C_SHORT_GI_160
+	 *   IEEE80211_VHTCAP_C_VHT_TXOP_PS
+	 */
+	vhtcap->cap_flags = IEEE80211_VHTCAP_C_RX_LDPC |
+					IEEE80211_VHTCAP_C_SHORT_GI_80 |
+					IEEE80211_VHTCAP_C_TX_STBC |
+					IEEE80211_VHTCAP_C_SU_BEAM_FORMER_CAP |
+					IEEE80211_VHTCAP_C_SU_BEAM_FORMEE_CAP |
+					IEEE80211_VHTCAP_C_PLUS_HTC_MINUS_VHT_CAP |
+					IEEE80211_VHTCAP_C_RX_ATN_PATTERN_CONSISTNCY |
+					IEEE80211_VHTCAP_C_TX_ATN_PATTERN_CONSISTNCY;
+
+	if (mu_enable) {
+		if (opmode == IEEE80211_M_STA) {
+			vhtcap->cap_flags |= IEEE80211_VHTCAP_C_MU_BEAM_FORMEE_CAP;
+		} else if (opmode == IEEE80211_M_HOSTAP) {
+			vhtcap->cap_flags |= IEEE80211_VHTCAP_C_MU_BEAM_FORMER_CAP;
+		}
+
+	}
+
+	vhtcap->maxmpdu = IEEE80211_VHTCAP_MAX_MPDU_11454;
+	vhtcap->chanwidth = IEEE80211_VHTCAP_CW_80M_ONLY ;
+	vhtcap->rxstbc = IEEE80211_VHTCAP_RX_STBC_UPTO_1;
+	vhtcap->maxampduexp = (band_24g ? IEEE80211_VHTCAP_MAX_A_MPDU_65535 : IEEE80211_VHTCAP_MAX_A_MPDU_1048575); /* revisit */
+	vhtcap->lnkadptcap = IEEE80211_VHTCAP_LNKADAPTCAP_BOTH;
+
+	if (ieee80211_swfeat_is_supported(SWFEAT_ID_2X2, 0)) {
+		*vht_nss_cap = IEEE80211_VHT_NSS2;
+		vhtcap->bfstscap = IEEE80211_VHTCAP_RX_STS_2;
+		vhtcap->numsounding = IEEE80211_VHTCAP_SNDDIM_2;
+	} else if (ieee80211_swfeat_is_supported(SWFEAT_ID_2X4, 0)) {
+		*vht_nss_cap = IEEE80211_VHT_NSS4;
+		vhtcap->bfstscap = IEEE80211_VHTCAP_RX_STS_4;
+		vhtcap->numsounding = IEEE80211_VHTCAP_SNDDIM_2;
+	} else if (ieee80211_swfeat_is_supported(SWFEAT_ID_3X3, 0)) {
+		*vht_nss_cap = IEEE80211_VHT_NSS3;
+		vhtcap->bfstscap = IEEE80211_VHTCAP_RX_STS_3;
+		vhtcap->numsounding = IEEE80211_VHTCAP_SNDDIM_3;
+	} else if (ieee80211_swfeat_is_supported(SWFEAT_ID_4X4, 0)) {
+		*vht_nss_cap = IEEE80211_VHT_NSS4;
+		vhtcap->bfstscap = IEEE80211_VHTCAP_RX_STS_4;
+		vhtcap->numsounding = IEEE80211_VHTCAP_SNDDIM_4;
+	} else {
+		DBGPRINTF_E("%s: stream mode is not valid\n", __func__);
+		return -1;
+	}
+
+	vhtcap->bfstscap_save = IEEE80211_VHTCAP_RX_STS_INVALID;
+
+	qdrv_wlan_80211_set_vht_mcsset(vhtcap, *vht_nss_cap, IEEE80211_VHT_MCS_0_9);
+
+	vhtcap->rxlgimaxrate = 0;	/* revisit */
+	vhtcap->txlgimaxrate = 0;	/* revisit */
+
+	vhtop->chanwidth = (band_24g ? IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ : IEEE80211_VHTOP_CHAN_WIDTH_80MHZ);
+	vhtop->centerfreq0 = 0;	/* revisit */
+	vhtop->centerfreq1 = 0;	/* Not supported in current BBIC4 hardware */
+
+	vhtop->basicvhtmcsnssset = htons(qdrv_wlan_80211_vhtmcs_map(IEEE80211_VHT_NSS1,
+									IEEE80211_VHT_MCS_0_7));
+#endif /* QDRV_FEATURE_VHT */
+	return 0;
+}
+
+static void qdrv_wlan_init_dm_factors(struct ieee80211com *ic)
+{
+	char tmpbuf[QDRV_BOOTCFG_BUF_LEN];
+	char *varstart;
+	int value = 0;
+
+	ic->ic_dm_factor.flags = 0;
+
+	varstart = bootcfg_get_var("dm_txpower_factor", tmpbuf);
+	if (varstart != NULL &&
+		sscanf(varstart, "=%d", &value) == 1) {
+		if (value >= DM_TXPOWER_FACTOR_MIN &&
+				value <= DM_TXPOWER_FACTOR_MAX) {
+			ic->ic_dm_factor.flags |= DM_FLAG_TXPOWER_FACTOR_PRESENT;
+			ic->ic_dm_factor.txpower_factor = value;
+		}
+	}
+
+	varstart = bootcfg_get_var("dm_aci_factor", tmpbuf);
+	if (varstart != NULL &&
+		sscanf(varstart, "=%d", &value) == 1) {
+		if (value >= DM_ACI_FACTOR_MIN &&
+				value <= DM_ACI_FACTOR_MAX) {
+			ic->ic_dm_factor.flags |= DM_FLAG_ACI_FACTOR_PRESENT;
+			ic->ic_dm_factor.aci_factor = value;
+		}
+	}
+
+	varstart = bootcfg_get_var("dm_cci_factor", tmpbuf);
+	if (varstart != NULL &&
+		sscanf(varstart, "=%d", &value) == 1) {
+		if (value >= DM_CCI_FACTOR_MIN &&
+				value <= DM_CCI_FACTOR_MAX) {
+			ic->ic_dm_factor.flags |= DM_FLAG_CCI_FACTOR_PRESENT;
+			ic->ic_dm_factor.cci_factor = value;
+		}
+	}
+
+	varstart = bootcfg_get_var("dm_dfs_factor", tmpbuf);
+	if (varstart != NULL &&
+		sscanf(varstart, "=%d", &value) == 1) {
+		if (value >= DM_DFS_FACTOR_MIN &&
+				value <= DM_DFS_FACTOR_MAX) {
+			ic->ic_dm_factor.flags |= DM_FLAG_DFS_FACTOR_PRESENT;
+			ic->ic_dm_factor.dfs_factor = value;
+		}
+	}
+
+	varstart = bootcfg_get_var("dm_beacon_factor", tmpbuf);
+	if (varstart != NULL &&
+		sscanf(varstart, "=%d", &value) == 1) {
+		if (value >= DM_BEACON_FACTOR_MIN &&
+				value <= DM_BEACON_FACTOR_MAX) {
+			ic->ic_dm_factor.flags |= DM_FLAG_BEACON_FACTOR_PRESENT;
+			ic->ic_dm_factor.beacon_factor = value;
+		}
+	}
+}
+
+void qdrv_ic_dump_chan_availability_status(struct ieee80211com *ic)
+{
+	int i;
+	struct ieee80211_channel * chan = NULL;
+	const char * str[] = QTN_CHAN_AVAIL_STATUS_TO_STR;
+
+	DBGPRINTF_N("Channel   Status   Status_string\n");
+
+	for (i = 1; i < IEEE80211_CHAN_MAX; i++) {
+		chan = ieee80211_find_channel_by_ieee(ic, i);
+		if (chan == NULL) {
+			continue;
+		}
+
+		DBGPRINTF_N("%7d   %6d   %s\n",
+				chan->ic_ieee, ic->ic_chan_availability_status[chan->ic_ieee],
+				str[ic->ic_chan_availability_status[chan->ic_ieee]]);
+	}
+}
+
+static int qdrv_get_chan_availability_status_by_chan_num(struct ieee80211com *ic, struct ieee80211_channel *chan)
+{
+	struct ieee80211_channel *channel = NULL;
+
+	if (chan && (channel = ieee80211_find_channel_by_ieee(ic, chan->ic_ieee))) {
+		return ic->ic_chan_availability_status[channel->ic_ieee];
+	}
+	return 0;
+}
+
+static void qdrv_set_chan_availability_status_by_chan_num(struct ieee80211com *ic,
+		struct ieee80211_channel *chan, uint8_t usable)
+{
+	struct ieee80211_channel *channel = NULL;
+
+	if (chan && (channel = ieee80211_find_channel_by_ieee(ic, chan->ic_ieee))) {
+		if (ic->ic_mark_channel_availability_status) {
+			ic->ic_mark_channel_availability_status(ic, channel, usable);
+		}
+	}
+	return;
+}
+
+static void qdrv_mark_channel_availability_status(struct ieee80211com *ic,
+				struct ieee80211_channel *chan, uint8_t usable)
+{
+	struct ieee80211_channel *low_chan = NULL;
+	int bw = qdrv_wlan_80211_get_cap_bw(ic);
+
+	if (chan == NULL) {
+		return;
+	}
+
+	if ((chan->ic_flags & IEEE80211_CHAN_RADAR) &&
+		(usable != IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_RADAR_DETECTED)) {
+		return;
+	}
+
+	if (!(chan->ic_flags & IEEE80211_CHAN_DFS)) {
+		return;
+	}
+
+	if (ic->ic_opmode == IEEE80211_M_STA)
+		bw = MIN(bw, ic->ic_bss_bw);
+
+	switch (bw) {
+		case BW_HT80:
+			if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LL) {
+				low_chan = chan;
+			} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LU) {
+				low_chan = chan - 1;
+			} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UL) {
+				low_chan = chan - 2;
+			} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UU) {
+				low_chan = chan - 3;
+			}
+
+			if (low_chan && (low_chan + 1) && (low_chan + 2) && (low_chan + 3)) {
+				ic->ic_chan_availability_status[low_chan->ic_ieee] = usable;
+				ic->ic_chan_availability_status[(low_chan + 1)->ic_ieee] = usable;
+				ic->ic_chan_availability_status[(low_chan + 2)->ic_ieee] = usable;
+				ic->ic_chan_availability_status[(low_chan + 3)->ic_ieee] = usable;
+
+				/* If radar found and non-occupancy started, mark all sub-channels as radar found */
+				if ((usable == IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_RADAR_DETECTED) &&
+						(chan->ic_flags & IEEE80211_CHAN_RADAR)) {
+					low_chan->ic_flags |= IEEE80211_CHAN_RADAR;
+					(low_chan + 1)->ic_flags |= IEEE80211_CHAN_RADAR;
+					(low_chan + 2)->ic_flags |= IEEE80211_CHAN_RADAR;
+					(low_chan + 3)->ic_flags |= IEEE80211_CHAN_RADAR;
+				} else if (usable == IEEE80211_CHANNEL_STATUS_AVAILABLE) {
+					/*
+					 * Mark primary channel and subchannels as CAC_DONE,
+					 * to prevent CAC being run when set channel is issued
+					 * on one of the sub-channels.
+					 */
+					low_chan->ic_flags |= IEEE80211_CHAN_DFS_CAC_DONE;
+					low_chan->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_IN_PROGRESS;
+					(low_chan + 1) ->ic_flags |= IEEE80211_CHAN_DFS_CAC_DONE;
+					(low_chan + 1)->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_IN_PROGRESS;
+					(low_chan + 2)->ic_flags |= IEEE80211_CHAN_DFS_CAC_DONE;
+					(low_chan + 2)->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_IN_PROGRESS;
+					(low_chan + 3)->ic_flags |= IEEE80211_CHAN_DFS_CAC_DONE;
+					(low_chan + 3)->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_IN_PROGRESS;
+
+				} else if (usable == IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_CAC_REQUIRED) {
+					/*
+					 * Non-Occupancy expired; Mark the channels as ready for cac
+					 * Once non-occupancy is period is expired, we should be able to do
+					 * CAC on the channel;
+					 */
+					low_chan->ic_flags &= ~IEEE80211_CHAN_RADAR;
+					(low_chan + 1)->ic_flags &= ~IEEE80211_CHAN_RADAR;
+					(low_chan + 2)->ic_flags &= ~IEEE80211_CHAN_RADAR;
+					(low_chan + 3)->ic_flags &= ~IEEE80211_CHAN_RADAR;
+
+					low_chan->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_DONE;
+					(low_chan + 1)->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_DONE;
+					(low_chan + 2)->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_DONE;
+					(low_chan + 3)->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_DONE;
+				}
+			}
+			break;
+		case BW_HT40:
+			if (chan->ic_flags & IEEE80211_CHAN_HT40D) {
+				low_chan = chan - 1;
+			} else {
+				low_chan = chan;
+			}
+
+			if ((low_chan) && (low_chan + 1)) {
+				ic->ic_chan_availability_status[low_chan->ic_ieee] = usable;
+				ic->ic_chan_availability_status[(low_chan + 1)->ic_ieee] = usable;
+				/* If radar found and non-occupancy started, mark all sub-channels as radar found */
+				if ((usable == IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_RADAR_DETECTED) &&
+						(chan->ic_flags & IEEE80211_CHAN_RADAR)) {
+					low_chan->ic_flags |= IEEE80211_CHAN_RADAR;
+					(low_chan + 1)->ic_flags |= IEEE80211_CHAN_RADAR;
+				} else if (usable == IEEE80211_CHANNEL_STATUS_AVAILABLE) {
+					/*
+					 * Mark primary channel and subchannels as CAC_DONE,
+					 * to prevent CAC being run when set channel is issued
+					 * on one of the sub-channels.
+					 */
+					low_chan->ic_flags |= IEEE80211_CHAN_DFS_CAC_DONE;
+					low_chan->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_IN_PROGRESS;
+					(low_chan + 1) ->ic_flags |= IEEE80211_CHAN_DFS_CAC_DONE;
+					(low_chan + 1)->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_IN_PROGRESS;
+
+				} else if (usable == IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_CAC_REQUIRED) {
+					/*
+					 * Non-Occupancy expired; Mark the channels as ready for cac
+					 * Once non-occupancy is period is expired, we should be able to do
+					 * CAC on the channel
+					 */
+					low_chan->ic_flags &= ~IEEE80211_CHAN_RADAR;
+					(low_chan + 1)->ic_flags &= ~IEEE80211_CHAN_RADAR;
+					low_chan->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_DONE;
+					(low_chan + 1)->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_DONE;
+				}
+			}
+
+			break;
+		case BW_HT20:
+			if (chan) {
+				ic->ic_chan_availability_status[chan->ic_ieee] = usable;
+				if (usable == IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_CAC_REQUIRED) {
+					chan->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_DONE;
+				}
+			}
+			break;
+		default:
+			printk(KERN_INFO "%s: Invalid bandwidth\n", __func__);
+			return;
+	}
+
+	if (ic->ic_dump_chan_availability_status) {
+		ic->ic_dump_chan_availability_status(ic);
+	}
+	return;
+}
+
+static void qdrv_mark_channel_dfs_cac_status(struct ieee80211com *ic, struct ieee80211_channel *chan, u_int32_t cac_flag, bool set)
+{
+	struct ieee80211_channel *low_chan = NULL;
+	int bw = qdrv_wlan_80211_get_cap_bw(ic);
+
+	if (chan == NULL) {
+		return;
+	}
+
+	switch (bw) {
+		case BW_HT80:
+			if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LL) {
+				low_chan = chan;
+			} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LU) {
+				low_chan = chan - 1;
+			} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UL) {
+				low_chan = chan - 2;
+			} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UU) {
+				low_chan = chan - 3;
+			}
+			if (low_chan && (low_chan + 1) && (low_chan + 2) && (low_chan + 3)) {
+				set ? (low_chan->ic_flags |= cac_flag): (low_chan->ic_flags &= ~cac_flag);
+				set ? ((low_chan + 1)->ic_flags |= cac_flag): ((low_chan + 1)->ic_flags &= ~cac_flag);
+				set ? ((low_chan + 2)->ic_flags |= cac_flag): ((low_chan + 2)->ic_flags &= ~cac_flag);
+				set ? ((low_chan + 3)->ic_flags |= cac_flag): ((low_chan + 3)->ic_flags &= ~cac_flag);
+			}
+			break;
+		case BW_HT40:
+			if (chan->ic_flags & IEEE80211_CHAN_HT40D) {
+				low_chan = chan - 1;
+			} else {
+				low_chan = chan;
+			}
+			if ((low_chan) && (low_chan + 1)) {
+				set ? (low_chan->ic_flags |= cac_flag): (low_chan->ic_flags &= ~cac_flag);
+				set ? ((low_chan + 1)->ic_flags |= cac_flag): ((low_chan + 1)->ic_flags &= ~cac_flag);
+			}
+			break;
+		case BW_HT20:
+			if (chan) {
+				set ? (chan->ic_flags |= cac_flag): (chan->ic_flags &= ~cac_flag);
+			}
+			break;
+		default:
+			printk(KERN_INFO "%s: Invalid bandwidth\n", __func__);
+			return;
+	}
+}
+
+static int qdrv_is_dfs_chans_available_dfs_reentry(struct ieee80211com *ic, struct ieee80211vap *vap)
+{
+	ieee80211_scan_refresh_scan_module_chan_list(ic, vap);
+
+	/** Select any DFS channel from {CAC_REQUIRED, AVAILABLE} set */
+	if (ieee80211_scan_pickchannel(ic, IEEE80211_SCAN_PICK_ANY_DFS)) {
+		return 1;
+	}
+
+	return -EOPNOTSUPP;
+}
+
+
+/**
+ * @function : qdrv_dfs_chans_available_for_cac
+ * @param    : ieee80211_channel [ch]: check if this channel is ready for CAC.
+ *		if [ch] is NULL, function returns true if any one channel is ready for CAC
+ * @brief    : returns true if atleast one DFS channel is found for which
+ *		cac not yet done
+ */
+static bool qdrv_dfs_chans_available_for_cac(struct ieee80211com *ic, struct ieee80211_channel * ch)
+{
+	int i;
+	int chan = 0;
+	struct ieee80211_channel * ieee80211_channel = ch;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	ieee80211_scan_refresh_scan_module_chan_list(ic, TAILQ_FIRST(&ic->ic_vaps));
+
+	/* Check channel ch is ready for CAC */
+	if(ch) {
+		if((ch->ic_flags & IEEE80211_CHAN_DFS) && ieee80211_is_chan_cac_required(ch)) {
+			return true;
+		} else {
+			return false;
+		}
+	}
+
+	if (ss) {
+		for (i = 0; i < ss->ss_last; i++) {
+			chan = ieee80211_chan2ieee(ic, ss->ss_chans[i]);
+			if (!is_channel_valid(chan)) {
+				continue;
+			}
+
+			ieee80211_channel = ieee80211_find_channel_by_ieee(ic, chan);
+			if (ieee80211_channel == NULL) {
+				continue;
+			}
+
+			if ((ieee80211_channel->ic_flags & IEEE80211_CHAN_DFS)
+				&& ieee80211_is_chan_cac_required(ieee80211_channel)) {
+				return true;
+			}
+		}
+	}
+	return false;
+}
+
+
+static int qdrv_get_init_cac_duration(struct ieee80211com *ic)
+{
+	return ic->ic_max_boot_cac_duration;
+}
+
+static void qdrv_set_init_cac_duration(struct ieee80211com *ic, int val)
+{
+	ic->ic_max_boot_cac_duration = val;
+}
+
+static void qdrv_icac_timer_func(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *)arg;
+	if (ic->ic_stop_icac_procedure) {
+		ic->ic_stop_icac_procedure(ic);
+	}
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "%s: Timer expired\n", __func__);
+}
+
+static void qdrv_start_icac_procedure(struct ieee80211com *ic)
+{
+	/* Update the boot time CAC timestamp only when ICAC is actually in-progress */
+	init_timer(&ic->icac_timer);
+	ic->icac_timer.function = qdrv_icac_timer_func;
+	ic->icac_timer.data = (unsigned long) ic;
+
+	if (ic->ic_get_init_cac_duration) {
+		if (!ic->ic_boot_cac_end_jiffy && (ic->ic_get_init_cac_duration(ic) > 0)) {
+			ic->ic_boot_cac_end_jiffy = jiffies + (ic->ic_get_init_cac_duration(ic) * HZ);
+			ic->icac_timer.expires = ic->ic_boot_cac_end_jiffy;
+			add_timer(&ic->icac_timer);
+			DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "%s: Add init CAC timer\n", __func__);
+		}
+	}
+}
+
+static void qdrv_stop_icac_procedure(struct ieee80211com *ic)
+{
+	/* set the max_boot_cac_duration to -1 */
+	if (ic->ic_set_init_cac_duration) {
+		ic->ic_set_init_cac_duration(ic, -1);
+	}
+
+	/* Stop on-going ICAC timer if any */
+	del_timer(&ic->icac_timer);
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "%s: Init CAC completed\n", __func__);
+}
+
+static void qdrv_init_cac_completion_event(struct ieee80211com *ic, struct ieee80211vap *vap)
+{
+	struct ieee80211_channel *bestchan = NULL;
+
+	if (ic->ic_get_init_cac_duration(ic) > 0) {
+		if (ic->ic_stop_icac_procedure) {
+			ic->ic_stop_icac_procedure(ic);
+		}
+
+		bestchan = ieee80211_find_channel_by_ieee(ic,
+					ic->ic_ocac.ocac_cfg.ocac_chan_ieee);
+		if (bestchan && ic->ic_ocac.ocac_cfg.ocac_enable && ic->ic_ocac.ocac_running == 0 &&
+					ieee80211_is_on_weather_channel(ic, bestchan)) {
+			bestchan = ieee80211_scan_pickchannel(ic, IEEE80211_SCAN_NO_DFS);
+		} else if (ic->ic_des_chan_after_init_cac == 0) {
+			bestchan = ieee80211_scan_pickchannel(ic,
+					IEEE80211_SCAN_PICK_AVAILABLE_ANY_CHANNEL);
+		} else {
+			if ((vap->iv_opmode == IEEE80211_M_HOSTAP ||
+						vap->iv_opmode == IEEE80211_M_IBSS ||
+						vap->iv_opmode == IEEE80211_M_WDS ||
+						vap->iv_opmode == IEEE80211_M_AHDEMO) &&
+					!(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER)) {
+
+				struct ieee80211_channel *chan = NULL;
+				/*
+				 * AP operation and we already have a channel;
+				 * bypass the scan and startup immediately.
+				 * But under repeater mode, initiate the AP scan anyway
+				 */
+				ic->ic_chan_is_set = 0;
+				bestchan = ieee80211_find_channel_by_ieee(ic,
+						ic->ic_des_chan_after_init_cac);
+
+				/* to update the fast-switch alternate channel */
+				chan = ieee80211_scan_pickchannel(ic,
+						IEEE80211_SCAN_PICK_AVAILABLE_ANY_CHANNEL);
+
+				if ((ic->ic_ignore_init_scan_icac) ||
+						(NULL == bestchan) ||
+						((bestchan) && (!ic->ic_check_channel(ic, bestchan, 0, 1)))) {
+					if (chan) {
+						bestchan = chan;
+					}
+				}
+				ic->ic_chan_is_set = 1;
+			}
+			ic->ic_des_chan_after_init_cac = 0;
+		}
+
+		if (bestchan) {
+			struct ieee80211_scan_state *ss = ic->ic_scan;
+
+			ic->ic_des_chan = bestchan;
+			if (ss && ss->ss_ops) {
+				struct ap_state *as = ss->ss_priv;
+				struct ieee80211_scan_entry se;
+
+				memset(&se, 0, sizeof(se));
+				se.se_chan = bestchan;
+				as->as_selbss = se;
+				as->as_action = ss->ss_ops->scan_default;
+				IEEE80211_SCHEDULE_TQUEUE(&as->as_actiontq);
+				ic->ic_pm_reason = IEEE80211_PM_LEVEL_ICAC_COMPLETE_ACTION;
+				ieee80211_pm_queue_work_custom(ic, BOARD_PM_WLAN_IDLE_TIMEOUT);
+			}
+		} else {
+			DBGPRINTF_N("%s: failed to select an available channel\n", __func__);
+		}
+	}
+}
+
+static int qdrv_ap_next_cac(struct ieee80211com *ic, struct ieee80211vap *vap,
+				unsigned long cac_period,
+				struct ieee80211_channel **qdrv_radar_cb_cac_chan,
+				u_int32_t scan_pick_flags)
+{
+	/* ICAC is dependent on the scan module and availability of channels to perform initial CAC */
+	if ((ic->ic_scan->ss_ops == NULL)
+			|| (ic->ic_scan->ss_last == 0)
+			|| (ic->ic_get_init_cac_duration(ic) <= 0))
+		return -1;
+
+	if ((!ieee80211_scan_pickchannel(ic, scan_pick_flags))
+			|| (!(ic->ic_boot_cac_end_jiffy
+			&& time_before(jiffies + cac_period, ic->ic_boot_cac_end_jiffy)))) {
+		if (qdrv_radar_cb_cac_chan) {
+			*qdrv_radar_cb_cac_chan = NULL;
+		}
+		qdrv_init_cac_completion_event(ic, vap);
+	} else {
+		if (ic->ic_scan->ss_ops->scan_end) {
+			/*
+			 * When max_boot_cac timer is too large value, and no channels left for CAC
+			 * stop the ICAC procedure
+			 */
+			if (0 == ic->ic_scan->ss_ops->scan_end(ic->ic_scan, vap, NULL, scan_pick_flags)) {
+				qdrv_init_cac_completion_event(ic, vap);
+			}
+		}
+	}
+	return 0;
+}
+
+static void qdrv_enable_xmit(struct ieee80211com *ic)
+{
+	sys_enable_xmit();
+}
+
+static void qdrv_disable_xmit(struct ieee80211com *ic)
+{
+	sys_disable_xmit();
+}
+
+static int qdrv_wlan_80211_init(struct ieee80211com *ic, u8 *mac_addr, u8 rf_chipid)
+{
+	int cc_rd = 0;
+	int nchans;
+	int i;
+
+	/* Set up some dummy channels */
+	if (rf_chipid == CHIPID_2_4_GHZ) {
+		nchans = IEEE80211_MAX_2_4_GHZ_CHANNELS;
+	} else if (rf_chipid == CHIPID_5_GHZ) {
+		nchans = IEEE80211_MAX_5_GHZ_CHANNELS;
+	} else {
+		nchans = IEEE80211_MAX_DUAL_CHANNELS;
+	}
+	qdrv_wlan_80211_config_channel(ic, nchans);
+
+	ic->ic_ver_sw = QDRV_BLD_VER;
+	ic->ic_ver_hw = get_hardware_revision();
+
+	ic->ic_ver_platform_id = QDRV_CFG_PLATFORM_ID;
+	ic->ic_ver_timestamp = QDRV_BUILDDATE;
+
+	/* Initialize the ieee80211com structure */
+	ic->ic_config_channel_list = qdrv_wlan_80211_config_channel;
+	ic->ic_rf_chipid = rf_chipid;
+	ic->ic_newassoc = qdrv_wlan_80211_newassoc;
+	ic->ic_disassoc = qdrv_wlan_80211_disassoc;
+	ic->ic_node_update = qdrv_wlan_80211_node_update;
+
+	/* These are called without protection from 802.11 layer */
+	ic->ic_updateslot = qdrv_wlan_80211_updateslot;
+	ic->ic_reset = qdrv_wlan_80211_reset;
+	ic->ic_init = qdrv_wlan_80211_start;
+	ic->ic_queue_reset = qdrv_wlan_80211_resetmaxqueue;
+
+	ic->ic_send_80211 = qdrv_wlan_80211_send;
+	ic->ic_get_wlanstats = qdrv_wlan_80211_stats;
+
+	/* Hook up our code */
+	ic->ic_join_bss = qdrv_wlan_80211_join_bss;
+	ic->ic_beacon_update = qdrv_wlan_80211_beacon_update;
+	ic->ic_beacon_stop = qdrv_wlan_80211_beacon_stop;
+
+	ic->ic_set_l2_ext_filter = qdrv_wlan_set_l2_ext_filter;
+	ic->ic_set_l2_ext_filter_port = qdrv_wlan_set_l2_ext_filter_port;
+	ic->ic_get_l2_ext_filter_port = qdrv_wlan_get_l2_ext_filter_port;
+
+	ic->ic_send_to_l2_ext_filter = qdrv_send_to_l2_ext_filter;
+	ic->ic_mac_reserved = qdrv_mac_reserved;
+	ic->ic_setparam = qdrv_wlan_80211_setparam;
+	ic->ic_getparam = qdrv_wlan_80211_getparam;
+	ic->ic_register_node = qdrv_wlan_register_node;
+	ic->ic_unregister_node = qdrv_wlan_unregister_node;
+	ic->ic_get_phy_stats = qdrv_wlan_80211_get_phy_stats;
+	ic->ic_get_cca_stats = qdrv_wlan_80211_get_cca_stats;
+
+	/* Hook up our Block ack code */
+	ic->ic_htaddba = qdrv_wlan_80211_process_addba;
+	ic->ic_htdelba = qdrv_wlan_80211_process_delba;
+
+	/* Hook up our Security code */
+	ic->ic_setkey = qdrv_wlan_80211_setkey;
+	ic->ic_delkey = qdrv_wlan_80211_delkey;
+
+	/* Hook up the MIMO power save mode change */
+	ic->ic_smps = qdrv_wlan_80211_smps;
+
+	/* Function to authorize/deauthorize an STA */
+	ic->ic_node_auth_state_change = qdrv_wlan_auth_state_change;
+
+	/* Station has joined or rejoined a BSS */
+	ic->ic_new_assoc = qdrv_wlan_new_assoc;
+
+	ic->ic_wmm_params_update = qdrv_wlan_update_wmm_params;
+	ic->ic_vap_pri_wme = 1;
+	ic->ic_airfair = QTN_AUC_AIRFAIR_DFT;
+
+	ic->ic_power_table_update = qdrv_wlan_update_chan_power_table;
+
+	ic->ic_power_save = qdrv_wlan_80211_power_save;
+	ic->ic_remain_on_channel = qdrv_remain_on_channel;
+
+	/* Hoop up to set the TDLS parameters */
+	ic->ic_set_tdls_param = qdrv_wlan_80211_tdls_set_params;
+	ic->ic_get_tdls_param = qdrv_wlan_80211_tdls_get_params;
+
+	ic->ic_peer_rts_mode = IEEE80211_PEER_RTS_DEFAULT;
+	ic->ic_dyn_wmm = IEEE80211_DYN_WMM_DEFAULT;
+
+	ic->ic_tqew_descr_limit = QTN_AUC_TQEW_DESCR_LIMIT_PERCENT_DFT;
+
+	/* Should set real opmode here - not just a placeholder */
+	ic->ic_opmode = IEEE80211_M_STA;
+
+	ic->ic_country_code = cc_rd;
+	ic->ic_spec_country_code = cc_rd;
+
+	ic->ic_beaconing_scheme = QTN_BEACONING_SCHEME_0;
+	ic->ic_set_beaconing_scheme = qdrv_wlan_80211_set_bcn_scheme;
+
+	ic->ic_caps = 0;
+	ic->ic_caps |= IEEE80211_C_IBSS			/* ibss, nee adhoc, mode */
+			| IEEE80211_C_HOSTAP		/* hostap mode */
+			| IEEE80211_C_MONITOR		/* monitor mode */
+			| IEEE80211_C_AHDEMO		/* adhoc demo mode */
+			| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
+			| IEEE80211_C_SHSLOT		/* short slot time supported */
+			| IEEE80211_C_WPA		/* capable of WPA1 + WPA2 */
+			| IEEE80211_C_WME		/* WMM/WME */
+			| IEEE80211_C_11N
+			| IEEE80211_C_TXPMGT		/* Capable of Tx Power Management */
+			| IEEE80211_C_UEQM		/* Capable of unequal modulation */
+			| IEEE80211_C_BGSCAN		/* Capable of background scan */
+			| IEEE80211_C_UAPSD;		/* Capable of WMM power save*/
+
+	ic->ic_mode_get_phy_stats = MUC_PHY_STATS_ALTERNATE;
+	ic->ic_rx_agg_timeout = IEEE80211_RX_AGG_TIMEOUT_DEFAULT; /* ms */
+	ic->ic_legacy_retry_limit = QTN_DEFAULT_LEGACY_RETRY_COUNT;
+	ic->ic_mu_enable = QTN_GLOBAL_MU_INITIAL_STATE;
+	ic->ic_vht_mcs_cap = IEEE80211_VHT_MCS_0_9;
+	/* for WFA testbed */
+	ic->ic_vht_opmode_notif = IEEE80211_VHT_OPMODE_NOTIF_DEFAULT;
+	ic->use_non_ht_duplicate_for_mu = 0;
+	ic->rx_bws_support_for_mu_ndpa = 0;
+
+	qdrv_wlan_80211_set_11ac_mode(ic, 1);
+
+	if (qdrv_wlan_80211_cfg_ht(ic) != 0)
+		return -1;
+
+	if (qdrv_wlan_80211_cfg_vht(&ic->ic_vhtcap, &ic->ic_vhtop, &ic->ic_vht_nss_cap, 0,
+				ic->ic_opmode, ic->ic_mu_enable) != 0)
+		return -1;
+
+	if (qdrv_wlan_80211_cfg_vht(&ic->ic_vhtcap_24g, &ic->ic_vhtop_24g, &ic->ic_vht_nss_cap_24g, 1,
+				ic->ic_opmode, 0) != 0)
+		return -1;
+
+	/* Assign the mac address */
+	IEEE80211_ADDR_COPY(ic->ic_myaddr, mac_addr);
+
+	/* Call MI attach routine. */
+	ieee80211_ifattach(ic);
+	ic->ic_node_alloc = qdrv_node_alloc;
+	ic->ic_qdrv_node_free = qdrv_node_free;
+	ic->ic_scan_start = qtn_scan_start;
+	ic->ic_scan_end = qtn_scan_end;
+	ic->ic_check_channel = qdrv_check_channel;
+	ic->ic_set_channel = qdrv_set_channel;
+	ic->ic_get_tsf = hal_get_tsf;
+	ic->ic_set_channel_deferred = qdrv_set_channel_deferred;
+	ic->ic_set_start_cca_measurement = qdrv_async_cca_read;
+	ic->ic_do_measurement = qtn_do_measurement;
+	ic->ic_finish_measurement = ieee80211_action_finish_measurement;
+	ic->ic_send_csa_frame = qdrv_wlan_send_csa_frame;
+	ic->ic_findchannel = findchannel;
+	ic->ic_cca_token = CCA_TOKEN_INIT_VAL;
+	ic->ic_set_coverageclass = qtn_set_coverageclass;
+	ic->ic_mhz2ieee = qtn_mhz2ieee;
+	ic->ic_vap_create = qtn_vap_create;
+	ic->ic_vap_delete = qtn_vap_delete;
+	ic->ic_get_vap_idx = qdrv_get_vap_idx;
+	ic->ic_radar_detected = qdrv_radar_detected;
+	ic->ic_select_channel = qdrv_radar_select_newchan;
+	ic->ic_dfs_action_scan_done = qdrv_dfs_action_scan_done;
+	ic->ic_dfs_is_eu_region = qdrv_dfs_is_eu_region;
+	ic->ic_mark_dfs_channels = qdrv_wlan_80211_mark_dfs;
+	ic->ic_mark_weather_radar_chans = qdrv_wlan_80211_mark_weather_radar;
+	ic->ic_radar_test_mode_enabled = qdrv_radar_test_mode_enabled;
+	ic->ic_use_rtscts = qdrv_use_rts_cts;
+	ic->ic_sta_set_xmit = qdrv_sta_set_xmit;
+	ic->ic_set_radar = qdrv_set_radar;
+	ic->ic_enable_sta_dfs = qdrv_sta_dfs_enable;
+	ic->ic_radar_detections_num = qdrv_radar_detections_num;
+	ic->ic_complete_cac = qdrv_cac_instant_completed;
+
+	ic->ic_sta_assoc_limit = QTN_ASSOC_LIMIT;
+	for (i = 0; i < IEEE80211_MAX_BSS_GROUP; i++) {
+		ic->ic_ssid_grp[i].limit = ic->ic_sta_assoc_limit;
+		ic->ic_ssid_grp[i].reserve = 0;
+		ic->ic_ssid_grp[i].assocs = 0;
+	}
+	ic->ic_emi_power_switch_enable = QTN_EMI_POWER_SWITCH_ENABLE;
+#if defined(QBMPS_ENABLE)
+	ic->ic_bmps_set_frame = qdrv_bmps_set_frame;
+	ic->ic_bmps_release_frame = qdrv_bmps_release_frame;
+#endif
+#ifdef QSCS_ENABLED
+	ic->ic_scs_update_scan_stats = qdrv_scs_update_scan_stats;
+	ic->ic_sample_channel = qdrv_sample_channel;
+	ic->ic_sample_channel_cancel = qdrv_sample_channel_cancel;
+
+	ic->ic_mark_channel_availability_status = qdrv_mark_channel_availability_status;
+	ic->ic_set_chan_availability_status_by_chan_num = qdrv_set_chan_availability_status_by_chan_num;
+	ic->ic_get_chan_availability_status_by_chan_num = qdrv_get_chan_availability_status_by_chan_num;
+	ic->ic_mark_channel_dfs_cac_status = qdrv_mark_channel_dfs_cac_status;
+	ic->ic_ap_next_cac = qdrv_ap_next_cac;
+	ic->ic_dump_chan_availability_status = qdrv_ic_dump_chan_availability_status;
+	ic->ic_dfs_chans_available_for_cac = qdrv_dfs_chans_available_for_cac;
+	ic->ic_is_dfs_chans_available_for_dfs_reentry = qdrv_is_dfs_chans_available_dfs_reentry;
+	ic->ic_get_init_cac_duration = qdrv_get_init_cac_duration;
+	ic->ic_set_init_cac_duration = qdrv_set_init_cac_duration;
+	ic->ic_start_icac_procedure = qdrv_start_icac_procedure;
+	ic->ic_stop_icac_procedure = qdrv_stop_icac_procedure;
+	ic->ic_chan_compare_equality = qdrv_chan_compare_equality;
+
+	ic->ic_enable_xmit = qdrv_enable_xmit;
+	ic->ic_disable_xmit = qdrv_disable_xmit;
+
+	/* defaults for SCS */
+	ic->ic_scs.scs_enable = 0;
+	ic->ic_scs.scs_smpl_enable = 0;
+	ic->ic_scs.scs_stats_on = 0;
+	ic->ic_scs.scs_debug_enable = 0;
+	ic->ic_scs.scs_atten_sw_enable = 0;
+	ic->ic_scs.scs_sample_intv = IEEE80211_SCS_SMPL_INTV_DEFAULT;
+	ic->ic_scs.scs_sample_type = QTN_OFF_CHAN_FLAG_PASSIVE_NORMAL;
+	ic->ic_scs.scs_smpl_dwell_time = IEEE80211_SCS_SMPL_DWELL_TIME_DEFAULT;
+	ic->ic_scs.scs_thrshld_smpl_pktnum = IEEE80211_SCS_THRSHLD_SMPL_PKTNUM_DEFAULT;
+	ic->ic_scs.scs_thrshld_smpl_airtime = IEEE80211_SCS_THRSHLD_SMPL_AIRTIME_DEFAULT;
+	ic->ic_scs.scs_thrshld_atten_inc = IEEE80211_SCS_THRSHLD_ATTEN_INC_DFT;
+	ic->ic_scs.scs_thrshld_dfs_reentry = IEEE80211_SCS_THRSHLD_DFS_REENTRY_DFT;
+	ic->ic_scs.scs_thrshld_dfs_reentry_intf = IEEE80211_SCS_THRSHLD_DFS_REENTRY_INTF_DFT;
+	ic->ic_scs.scs_thrshld_aging_nor = IEEE80211_SCS_THRSHLD_AGING_NOR_DFT;
+	ic->ic_scs.scs_thrshld_aging_dfsreent = IEEE80211_SCS_THRSHLD_AGING_DFSREENT_DFT;
+	ic->ic_scs.scs_cca_idle_thrshld = IEEE80211_CCA_IDLE_THRSHLD;
+	ic->ic_scs.scs_cca_intf_lo_thrshld = IEEE80211_CCA_INTFR_LOW_THRSHLD;
+	ic->ic_scs.scs_cca_intf_hi_thrshld = IEEE80211_CCA_INTFR_HIGH_THRSHLD;
+	ic->ic_scs.scs_cca_intf_ratio = IEEE80211_CCA_INTFR_RATIO;
+	ic->ic_scs.scs_cca_intf_dfs_margin = IEEE80211_CCA_INTFR_DFS_MARGIN;
+	ic->ic_scs.scs_pmbl_err_thrshld = IEEE80211_PMBL_ERR_THRSHLD;
+	ic->ic_scs.scs_cca_intf_smth_fctr[SCS_CCA_INTF_SMTH_FCTR_NOXP] =
+			IEEE80211_CCA_INTF_SMTH_FCTR_NOXP_DFT;
+	ic->ic_scs.scs_cca_intf_smth_fctr[SCS_CCA_INTF_SMTH_FCTR_XPED] =
+			IEEE80211_CCA_INTF_SMTH_FCTR_XPED_DFT;
+	ic->ic_scs.scs_rssi_smth_fctr[SCS_RSSI_SMTH_FCTR_UP] = IEEE80211_SCS_RSSI_SMTH_FCTR_UP_DFT;
+	ic->ic_scs.scs_rssi_smth_fctr[SCS_RSSI_SMTH_FCTR_DOWN] = IEEE80211_SCS_RSSI_SMTH_FCTR_DOWN_DFT;
+	ic->ic_scs.scs_chan_mtrc_mrgn = IEEE80211_SCS_CHAN_MTRC_MRGN_DFT;
+	ic->ic_scs.scs_leavedfs_chan_mtrc_mrgn = IEEE80211_SCS_LEAVE_DFS_CHAN_MTRC_MRGN_DFT;
+	ic->ic_scs.scs_atten_adjust = IEEE80211_SCS_ATTEN_ADJUST_DFT;
+	ic->ic_scs.scs_cca_sample_dur = IEEE80211_CCA_SAMPLE_DUR;
+	ic->ic_scs.scs_last_smpl_chan = -1;
+	ic->ic_scs.scs_brcm_rxglitch_thrshlds_scale = IEEE80211_SCS_BRCM_RXGLITCH_THRSHLD_SCALE_DFT;
+	ic->ic_scs.scs_pmbl_err_smth_fctr = IEEE80211_SCS_PMBL_ERR_SMTH_FCTR_DFT;
+	ic->ic_scs.scs_pmbl_err_range = IEEE80211_SCS_PMBL_ERR_RANGE_DFT;
+	ic->ic_scs.scs_pmbl_err_mapped_intf_range = IEEE80211_SCS_PMBL_ERR_MAPPED_INTF_RANGE_DFT;
+	ic->ic_scs.scs_sp_wf = IEEE80211_SCS_PMBL_SHORT_WF_DFT;
+	ic->ic_scs.scs_lp_wf = IEEE80211_SCS_PMBL_LONG_WF_DFT;
+	ic->ic_scs.scs_thrshld_loaded = IEEE80211_SCS_THRSHLD_LOADED_DFT;
+	ic->ic_scs.scs_pmp_rpt_cca_smth_fctr = IEEE80211_SCS_PMP_RPT_CCA_SMTH_FCTR_DFT;
+	ic->ic_scs.scs_pmp_rx_time_smth_fctr = IEEE80211_SCS_PMP_RX_TIME_SMTH_FCTR_DFT;
+	ic->ic_scs.scs_pmp_tx_time_smth_fctr = IEEE80211_SCS_PMP_TX_TIME_SMTH_FCTR_DFT;
+	ic->ic_scs.scs_pmp_stats_stable_percent = IEEE80211_SCS_PMP_STATS_STABLE_PERCENT_DFT;
+	ic->ic_scs.scs_pmp_stats_stable_range = IEEE80211_SCS_PMP_STATS_STABLE_RANGE_DFT;
+	ic->ic_scs.scs_pmp_stats_clear_interval = IEEE80211_SCS_PMP_STATS_CLEAR_INTERVAL_DFT;
+	ic->ic_scs.scs_as_rx_time_smth_fctr = IEEE80211_SCS_AS_RX_TIME_SMTH_FCTR_DFT;
+	ic->ic_scs.scs_as_tx_time_smth_fctr = IEEE80211_SCS_AS_TX_TIME_SMTH_FCTR_DFT;
+	ic->ic_scs.scs_cca_idle_smth_fctr = IEEE80211_SCS_CCA_IDLE_SMTH_FCTR_DFT;
+	spin_lock_init(&ic->ic_scs.scs_tdls_lock);
+	ic->ic_scs.scs_burst_enable = IEEE80211_SCS_BURST_ENABLE_DEFAULT;
+	ic->ic_scs.scs_burst_window = IEEE80211_SCS_BURST_WINDOW_DEFAULT * 60;
+	ic->ic_scs.scs_burst_thresh = IEEE80211_SCS_BURST_THRESH_DEFAULT;
+	ic->ic_scs.scs_burst_pause_time = IEEE80211_SCS_BURST_PAUSE_DEFAULT * 60;
+	ic->ic_scs.scs_burst_force_switch = IEEE80211_SCS_BURST_SWITCH_DEFAULT;
+	ic->ic_scs.scs_burst_is_paused = 0;
+#endif /* QSCS_ENABLED */
+
+	ic->ic_ocac.ocac_cfg.ocac_enable = 0;
+	ic->ic_ocac.ocac_cfg.ocac_chan_ieee = 0;
+	ic->ic_ocac.ocac_cfg.ocac_debug_level = 0;
+	ic->ic_ocac.ocac_cfg.ocac_report_only = 0;
+	strncpy(ic->ic_ocac.ocac_cfg.ocac_region, "NA", sizeof(ic->ic_ocac.ocac_cfg.ocac_region));
+	ic->ic_ocac.ocac_cfg.ocac_timer_expire_init = IEEE80211_OCAC_TIMER_EXPIRE_INIT_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.timer_interval = IEEE80211_OCAC_TIMER_INTERVAL_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.secure_dwell_ms = IEEE80211_OCAC_SECURE_DWELL_TIME_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.dwell_time_ms = IEEE80211_OCAC_DWELL_TIME_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.duration_secs = IEEE80211_OCAC_DURATION_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.cac_time_secs = IEEE80211_OCAC_CAC_TIME_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.wea_dwell_time_ms = IEEE80211_OCAC_WEA_DWELL_TIME_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.wea_duration_secs = IEEE80211_OCAC_WEA_DURATION_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.wea_cac_time_secs = IEEE80211_OCAC_WEA_CAC_TIME_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.thresh_fat = IEEE80211_OCAC_THRESHOLD_FAT_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.thresh_traffic = IEEE80211_OCAC_THRESHOLD_TRAFFIC_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.thresh_cca_intf = IEEE80211_OCAC_THRESHOLD_CCA_INTF_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.thresh_fat_dec = IEEE80211_OCAC_THRESHOLD_FAT_DEC_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.traffic_ctrl = IEEE80211_OCAC_TRAFFIC_CTRL_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.offset_txhalt = IEEE80211_OCAC_OFFSET_TXHALT_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.offset_offchan = IEEE80211_OCAC_OFFSET_OFFCHAN_DEFAULT;
+	ic->ic_ocac.ocac_cfg.ocac_params.beacon_interval = IEEE80211_OCAC_BEACON_INTERVAL_DEFAULT;
+	ic->ic_ocac.ocac_tsflog.log_index = 0;
+	ic->ic_set_ocac = qdrv_set_ocac;
+	ic->ic_ocac_release_frame = qdrv_ocac_release_frame;
+
+	ic->ic_rxtx_phy_rate = qdrv_muc_stats_rxtx_phy_rate;
+	ic->ic_rssi = qdrv_muc_stats_rssi;
+	ic->ic_smoothed_rssi = qdrv_muc_stats_smoothed_rssi;
+	ic->ic_snr = qdrv_muc_stats_snr;
+	ic->ic_hw_noise = qdrv_muc_stats_hw_noise;
+	ic->ic_max_queue = qdrv_muc_stats_max_queue;
+	ic->ic_mcs_to_phyrate = qdrv_muc_stats_mcs_to_phyrate;
+	ic->ic_tx_failed = qdrv_muc_stats_tx_failed;
+	ic->ic_chan_switch_record = qdrv_channel_switch_record;
+	ic->ic_chan_switch_reason_record = qdrv_channel_switch_reason_record;
+	ic->ic_dfs_chan_switch_notify = dfs_reentry_chan_switch_notify;
+	ic->ic_set_11g_erp = qdrv_wlan_set_11g_erp;
+	/* shared CSA framework */
+	init_completion(&ic->csa_completion);
+	INIT_WORK(&ic->csa_work, ieee80211_csa_finish);
+	ic->csa_work_queue = create_workqueue("csa_work_queue");
+	ic->finish_csa = NULL;
+	ic->ic_20_40_coex_enable  = 1;
+	ic->ic_obss_scan_enable = 1;
+	ic->ic_obss_scan_count = 0;
+	init_timer(&ic->ic_obss_timer);
+	ic->ic_obss_ie.obss_passive_dwell = IEEE80211_OBSS_PASSIVE_DWELL_DEFAULT;
+	ic->ic_obss_ie.obss_active_dwell = IEEE80211_OBSS_ACTIVE_DWELL_DEFAULT;
+	ic->ic_obss_ie.obss_trigger_interval = IEEE80211_OBSS_TRIGGER_INTERVAL_DEFAULT;
+	ic->ic_obss_ie.obss_passive_total = IEEE80211_OBSS_PASSIVE_TOTAL_DEFAULT;
+	ic->ic_obss_ie.obss_active_total = IEEE80211_OBSS_ACTIVE_TOTAL_DEFAULT;
+	ic->ic_obss_ie.obss_channel_width_delay = IEEE80211_OBSS_CHANNEL_WIDTH_DELAY_DEFAULT;
+	ic->ic_obss_ie.obss_activity_threshold = IEEE80211_OBSS_ACTIVITY_THRESHOLD_DEFAULT;
+	ic->ic_coex_stats_update = qdrv_wlan_coex_stats_update;
+	ic->ic_neighbor_count = -1;
+	ic->ic_neighbor_cnt_sparse = IEEE80211_NEIGHBORHOOD_TYPE_SPARSE_DFT_THRSHLD;
+	ic->ic_neighbor_cnt_dense = IEEE80211_NEIGHBORHOOD_TYPE_DENSE_DFT_THRSHLD;
+
+#ifdef CONFIG_QVSP
+	ic->ic_vsp_strm_state_set = qdrv_wlan_vsp_strm_state_set;
+	ic->ic_vsp_change_stamode = qdrv_wlan_vsp_change_stamode;
+	ic->ic_vsp_configure = qdrv_wlan_vsp_configure;
+	ic->ic_vsp_set = qdrv_wlan_vsp_set;
+	ic->ic_vsp_get = qdrv_wlan_vsp_get;
+	ic->ic_vsp_cb_strm_ctrl = qdrv_wlan_vsp_cb_strm_ctrl;
+	ic->ic_vsp_cb_cfg = qdrv_wlan_vsp_cb_cfg;
+	ic->ic_vsp_reset = qdrv_wlan_vsp_reset;
+	ic->ic_vsp_cb_strm_ext_throttler = qdrv_wlan_vsp_cb_strm_ext_throttler;
+#endif
+
+#ifdef QTN_BG_SCAN
+	ic->ic_bgscan_start = qdrv_bgscan_start;
+	ic->ic_bgscan_channel = qdrv_bgscan_channel;
+#endif /* QTN_BG_SCAN */
+
+	/* we don't have short range issue with Topaz */
+	ic->ic_pwr_adjust_scancnt = 0;
+
+	/* initiate data struct that record channel switch */
+	memset(&ic->ic_csw_record, 0, sizeof(ic->ic_csw_record));
+
+	memset(&ic->ic_chan_occupy_record, 0, sizeof(ic->ic_chan_occupy_record));
+
+	ic->ic_send_notify_chan_width_action = ieee80211_send_notify_chan_width_action;
+	ic->ic_send_vht_grp_id_act = ieee80211_send_vht_grp_id_mgmt_action;
+	qdrv_wlan_pm_state_init(ic);
+
+	ic->ic_get_local_txpow = qdrv_get_local_tx_power;
+	ic->ic_get_local_link_margin = qdrv_get_local_link_margin;
+	ic->ic_get_shared_vap_stats = qdrv_wlan_get_shared_vap_stats;
+	ic->ic_reset_shared_vap_stats = qdrv_wlan_reset_shared_vap_stats;
+	ic->ic_get_shared_node_stats = qdrv_wlan_get_shared_node_stats;
+	ic->ic_reset_shared_node_stats = qdrv_wlan_reset_shared_node_stats;
+	ic->ic_get_dscp2ac_map = qdrv_wlan_get_dscp2ac_map;
+	ic->ic_set_dscp2ac_map = qdrv_wlan_set_dscp2ac_map;
+
+	ic->ic_get_dscp2tid_map = qdrv_sch_get_dscp2tid_map;
+	ic->ic_set_dscp2tid_map = qdrv_sch_set_dscp2tid_map;
+
+	ic->ic_pco.pco_set = 0;
+	ic->ic_pco.pco_pwr_constraint = 0;
+	ic->ic_pco.pco_rssi_threshold = 0;
+	ic->ic_pco.pco_sec_offset = 0;
+	ic->ic_pco.pco_pwr_constraint_save = PWR_CONSTRAINT_SAVE_INIT;
+
+	ic->ic_su_txbf_pkt_cnt = QTN_SU_TXBF_TX_CNT_DEF_THRSHLD;
+	ic->ic_mu_txbf_pkt_cnt = QTN_MU_TXBF_TX_CNT_DEF_THRSHLD;
+	ic->ic_get_cca_adjusting_status = ieee80211_get_cca_adjusting_status;
+
+        ic->ic_flags_qtn |= QTN_NODE_11N_TXAMSDU_OFF;
+	ic->ic_flags_ext |= IEEE80211_FEXT_UAPSD;
+
+	ic->ic_extender_rssi_continue = 0;
+	ic->ic_dfs_csa_cnt = 1;
+
+	qdrv_wlan_init_dm_factors(ic);
+
+	/* tx airtime callback init */
+	ic->ic_tx_airtime = qdrv_muc_stats_tx_airtime;
+	ic->ic_tx_accum_airtime = qdrv_muc_stats_tx_accum_airtime;
+	ic->ic_tx_airtime_control = qdrv_tx_airtime_control;
+	ic->ic_rx_airtime = qdrv_muc_stats_rx_airtime;
+	ic->ic_rx_accum_airtime = qdrv_muc_stats_rx_accum_airtime;
+
+	/* MU group state update */
+	ic->ic_mu_group_update = qdrv_mu_grp_update;
+
+	ic->sta_dfs_info.sta_dfs_strict_mode = 0;
+	ic->sta_dfs_info.sta_dfs_radar_detected_timer = 0;
+	ic->sta_dfs_info.sta_dfs_radar_detected_channel = 0;
+	ic->sta_dfs_info.sta_dfs_strict_msr_cac = 0;
+	ic->sta_dfs_info.allow_measurement_report = 0;
+	ic->sta_dfs_info.sta_dfs_tx_chan_close_time = STA_DFS_STRICT_TX_CHAN_CLOSE_TIME_DEFAULT;
+#ifdef CONFIG_QHOP
+	ic->rbs_mbs_dfs_info.rbs_dfs_tx_chan_close_time = RBS_DFS_TX_CHAN_CLOSE_TIME_DEFAULT;
+#endif
+	ic->auto_cca_enable = 0x1;
+	ic->cca_fix_disable = 0;
+	ic->ic_opmode_bw_switch_en = 0;
+
+	spin_lock_init(&ic->ic_ocac.ocac_lock);
+	memset(&ic->ic_ocac.ocac_rx_state, 0, sizeof(ic->ic_ocac.ocac_rx_state));
+
+	ic->ic_update_ocac_state_ie = qdrv_update_ocac_state_ie;
+
+	return 0;
+}
+
+void qdrv_wlan_get_dscp2ac_map(const uint8_t vapid, uint8_t *dscp2ac)
+{
+	qdrv_sch_get_dscp2ac_map(vapid, dscp2ac);
+	return;
+}
+
+void qdrv_wlan_set_dscp2ac_map(const uint8_t vapid, uint8_t *ip_dscp, uint8_t listlen, uint8_t ac)
+{
+	qdrv_sch_set_dscp2ac_map(vapid, ip_dscp, listlen, ac);
+	return;
+}
+
+static int qdrv_wlan_80211_exit(struct ieee80211com *ic)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	ic->finish_csa = NULL;
+	del_timer_sync(&ic->ic_obss_timer);
+
+	pm_qos_remove_notifier(PM_QOS_POWER_SAVE, &qw->pm_notifier);
+	del_timer(&ic->ic_pm_period_change);
+
+	ieee80211_ifdetach(ic);
+
+#ifdef QTN_BG_SCAN
+	qdrv_bgscan_release_frame(ic, IEEE80211_SCAN_FRAME_ALL, 1);
+#endif /* QTN_BG_SCAN */
+	qdrv_scs_release_frame(ic, 1);
+	qdrv_ocac_release_frame(ic, 1);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+static void qdrv_show_wlan_stats(struct seq_file *s, void *data, u32 num)
+{
+	struct qdrv_mac *mac = (struct qdrv_mac *) data;
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) mac->data;
+	struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
+	int i;
+
+	DBGPRINTF(DBG_LL_ERR, QDRV_LF_TRACE, "-->Enter %d\n", num);
+
+	seq_printf(s, "TX Statistics\n");
+	seq_printf(s, "  tx_enqueue_mgmt           : %d\n", qw->tx_stats.tx_enqueue_mgmt);
+	seq_printf(s, "  tx_enqueue_80211_data     : %d\n", qw->tx_stats.tx_enqueue_80211_data);
+	seq_printf(s, "  tx_enqueue_data           : %d\n", qw->tx_stats.tx_enqueue_data);
+	seq_printf(s, "  tx_muc_enqueue            : %d\n", qw->tx_stats.tx_muc_enqueue);
+	seq_printf(s, "  tx_muc_enqueue_mbox       : %d\n", qw->tx_stats.tx_muc_enqueue_mbox);
+	seq_printf(s, "  tx_null_data              : %d\n", qw->tx_stats.tx_null_data);
+	seq_printf(s, "  tx_done_success           : %d\n", qw->tx_stats.tx_done_success);
+	seq_printf(s, "  tx_done_muc_ready_err     : %d\n", qw->tx_stats.tx_done_muc_ready_err);
+	seq_printf(s, "  tx_done_enable_queues     : %d\n", qw->tx_stats.tx_done_enable_queues);
+	seq_printf(s, "  tx_queue_stop             : %d\n", qw->tx_stats.tx_queue_stop);
+	seq_printf(s, "  tx_requeue                : %d\n", qw->tx_stats.tx_requeue);
+	seq_printf(s, "  tx_requeue_err            : %d\n", qw->tx_stats.tx_requeue_err);
+	seq_printf(s, "  tx_hardstart              : %d\n", qw->tx_stats.tx_hardstart);
+	seq_printf(s, "  tx_complete               : %d\n", qw->tx_stats.tx_complete);
+	seq_printf(s, "  tx_min_cl_cnt             : %d\n", qw->tx_stats.tx_min_cl_cnt);
+	seq_printf(s, "  txdesc_data               : %d\n", qw->tx_if.txdesc_cnt[QDRV_TXDESC_DATA]);
+	seq_printf(s, "  txdesc_mgmt               : %d\n", qw->tx_if.txdesc_cnt[QDRV_TXDESC_MGMT]);
+	seq_printf(s, "  tx_dropped_mac_dead       : %d\n", qw->tx_stats.tx_dropped_mac_dead);
+
+	seq_printf(s, "  tx_igmp                   : %d\n", qw->tx_stats.tx_igmp);
+	seq_printf(s, "  tx_unknown                : %d\n", qw->tx_stats.tx_unknown);
+	seq_printf(s, "  tx_arp_req                : %d\n", qw->tx_stats.tx_arp_req);
+
+	seq_printf(s, "  tx_copy4_mc               : %d\n", qw->tx_stats.tx_copy4_mc);
+	seq_printf(s, "  tx_copy4_igmp             : %d\n", qw->tx_stats.tx_copy4_igmp);
+	seq_printf(s, "  tx_copy4_unknown          : %d\n", qw->tx_stats.tx_copy4_unknown);
+	seq_printf(s, "  tx_copy4                  : %d\n", qw->tx_stats.tx_copy4);
+	seq_printf(s, "  tx_copy_fail              : %d\n", qw->tx_stats.tx_copy_fail);
+	seq_printf(s, "  tx_copy4_busy             : %d\n", qw->tx_stats.tx_copy4_busy);
+	seq_printf(s, "  tx_copy3_mc               : %d\n", qw->tx_stats.tx_copy3_mc);
+	seq_printf(s, "  tx_copy3_igmp             : %d\n", qw->tx_stats.tx_copy3_igmp);
+	seq_printf(s, "  tx_copy_uc                : %d\n", qw->tx_stats.tx_copy_uc);
+	seq_printf(s, "  tx_copy_mc                : %d\n", qw->tx_stats.tx_copy_mc);
+	seq_printf(s, "  tx_copy_mc_to_uc          : %d\n", qw->tx_stats.tx_copy_mc_to_uc);
+	seq_printf(s, "  tx_copy_ssdp              : %d\n", qw->tx_stats.tx_copy_ssdp);
+	seq_printf(s, "  tx_copy3                  : %d\n", qw->tx_stats.tx_copy3);
+
+	seq_printf(s, "  tx_drop_auth              : %d\n", qw->tx_stats.tx_drop_auth);
+	seq_printf(s, "  tx_drop_aid               : %d\n", qw->tx_stats.tx_drop_aid);
+	seq_printf(s, "  tx_drop_nodesc            : %d\n", qw->tx_stats.tx_drop_nodesc);
+	seq_printf(s, "  tx_drop_wds               : %d\n", qw->tx_stats.tx_drop_wds);
+	seq_printf(s, "  tx_drop_3addr             : %d\n", qw->tx_stats.tx_drop_3addr);
+	seq_printf(s, "  tx_drop_vsp               : %d\n", qw->tx_stats.tx_drop_vsp);
+	seq_printf(s, "  tx_dropped_config         : %d\n", qw->tx_stats.tx_dropped_config);
+	seq_printf(s, "  tx_drop_total             : %d\n", qw->tx_stats.tx_drop_total);
+	seq_printf(s, "  tx_channel                : %d\n", qw->tx_stats.tx_channel);
+	seq_printf(s, "  tx_l2_ext_filter          : %d\n", qw->tx_stats.tx_l2_ext_filter);
+	seq_printf(s, "  tx_drop_l2_ext_filter     : %d\n", qw->tx_stats.tx_drop_l2_ext_filter);
+
+	seq_printf(s, "  tx_prot_arp               : %u\n", qw->tx_stats.prot_arp);
+	seq_printf(s, "  tx_prot_pae               : %u\n", qw->tx_stats.prot_pae);
+	seq_printf(s, "  tx_prot_ip_udp            : %u\n", qw->tx_stats.prot_ip_udp);
+	seq_printf(s, "  tx_prot_ip_tcp            : %u\n", qw->tx_stats.prot_ip_tcp);
+	seq_printf(s, "  tx_prot_ip_icmp           : %u\n", qw->tx_stats.prot_ip_icmp);
+	seq_printf(s, "  tx_prot_ip_igmp           : %u\n", qw->tx_stats.prot_ip_igmp);
+	seq_printf(s, "  tx_prot_ip_other          : %u\n", qw->tx_stats.prot_ip_other);
+	seq_printf(s, "  tx_prot_ipv6              : %u\n", qw->tx_stats.prot_ipv6);
+	seq_printf(s, "  tx_prot_other             : %u\n", qw->tx_stats.prot_other);
+
+	seq_printf(s, "RX Statistics\n");
+	seq_printf(s, "  rx_irq                    : %d\n", qw->rx_stats.rx_irq);
+	seq_printf(s, "  rx_irq_schedule           : %d\n", qw->rx_stats.rx_irq_schedule);
+	seq_printf(s, "  rx_beacon                 : %d\n", qw->rx_stats.rx_beacon);
+	seq_printf(s, "  rx_non_beacon             : %d\n", qw->rx_stats.rx_non_beacon);
+	seq_printf(s, "  rx_input_all              : %d\n", qw->rx_stats.rx_input_all);
+	seq_printf(s, "  rx_input_node             : %d\n", qw->rx_stats.rx_input_node);
+	seq_printf(s, "  rx_data_snap              : %d\n", qw->rx_stats.rx_data_snap);
+	seq_printf(s, "  rx_data_tods              : %d\n", qw->rx_stats.rx_data_tods);
+	seq_printf(s, "  rx_data_nods              : %d\n", qw->rx_stats.rx_data_nods);
+	seq_printf(s, "  rx_data_fromds            : %d\n", qw->rx_stats.rx_data_fromds);
+	seq_printf(s, "  rx_data_dstods            : %d\n", qw->rx_stats.rx_data_dstods);
+	seq_printf(s, "  rx_data_no_node           : %d\n", qw->rx_stats.rx_data_no_node);
+	seq_printf(s, "  rx_data_too_short         : %d\n", qw->rx_stats.rx_data_too_short);
+	seq_printf(s, "  rx_poll                   : %d\n", qw->rx_stats.rx_poll);
+	seq_printf(s, "  rx_poll_pending           : %d\n", qw->rx_stats.rx_poll_pending);
+	seq_printf(s, "  rx_poll_empty             : %d\n", qw->rx_stats.rx_poll_empty);
+	seq_printf(s, "  rx_poll_retrieving        : %d\n", qw->rx_stats.rx_poll_retrieving);
+	seq_printf(s, "  rx_poll_buffer_err        : %d\n", qw->rx_stats.rx_poll_buffer_err);
+	seq_printf(s, "  rx_poll_skballoc_err      : %d\n", qw->rx_stats.rx_poll_skballoc_err);
+	seq_printf(s, "  rx_poll_stopped           : %d\n", qw->rx_stats.rx_poll_stopped);
+	seq_printf(s, "  rx_df_numelems            : %d\n", qw->rx_stats.rx_df_numelems);
+	seq_printf(s, "  rx_amsdu                  : %d\n", qw->rx_stats.rx_amsdu);
+	seq_printf(s, "  rx_packets                : %d\n", qw->rx_stats.rx_packets);
+	seq_printf(s, "  rx_bytes                  : %d\n", qw->rx_stats.rx_bytes);
+	seq_printf(s, "  rx_poll_next              : %d\n", qw->rx_stats.rx_poll_next);
+	seq_printf(s, "  rx_poll_complete          : %d\n", qw->rx_stats.rx_poll_complete);
+	seq_printf(s, "  rx_poll_continue          : %d\n", qw->rx_stats.rx_poll_continue);
+	seq_printf(s, "  rx_poll_vap_err           : %d\n", qw->rx_stats.rx_poll_vap_err);
+	seq_printf(s, "  rx_frag                   : %d\n", qw->rx_stats.rx_frag);
+	seq_printf(s, "  rx_lncb_4                 : %d\n", qw->rx_stats.rx_lncb_4);
+	seq_printf(s, "  rx_blacklist              : %d\n", qw->rx_stats.rx_blacklist);
+	seq_printf(s, "  rx_igmp                   : %d\n", qw->rx_stats.rx_igmp);
+	seq_printf(s, "  rx_igmp_4                 : %d\n", qw->rx_stats.rx_igmp_4);
+	seq_printf(s, "  rx_igmp_3_drop            : %d\n", qw->rx_stats.rx_igmp_3_drop);
+	seq_printf(s, "  rx_mc_3_drop              : %d\n", qw->rx_stats.rx_mc_3_drop);
+
+	seq_printf(s, "  rx_prot_arp               : %u\n", qw->rx_stats.prot_arp);
+	seq_printf(s, "  rx_prot_pae               : %u\n", qw->rx_stats.prot_pae);
+	seq_printf(s, "  rx_prot_ip_udp            : %u\n", qw->rx_stats.prot_ip_udp);
+	seq_printf(s, "  rx_prot_ip_tcp            : %u\n", qw->rx_stats.prot_ip_tcp);
+	seq_printf(s, "  rx_prot_ip_icmp           : %u\n", qw->rx_stats.prot_ip_icmp);
+	seq_printf(s, "  rx_prot_ip_igmp           : %u\n", qw->rx_stats.prot_ip_igmp);
+	seq_printf(s, "  rx_prot_ip_other          : %u\n", qw->rx_stats.prot_ip_other);
+	seq_printf(s, "  rx_prot_ipv6              : %u\n", qw->rx_stats.prot_ipv6);
+	seq_printf(s, "  rx_prot_other             : %u\n", qw->rx_stats.prot_other);
+	seq_printf(s, "  rx_rate_train_invalid     : %u\n", qw->rx_stats.rx_rate_train_invalid);
+	seq_printf(s, "  rx_mac_reserved           : %u\n", qw->rx_stats.rx_mac_reserved);
+	seq_printf(s, "  rx_coex_bw_action         : %u\n", qw->rx_stats.rx_coex_bw_action);
+	seq_printf(s, "  rx_coex_bw_assoc          : %u\n", qw->rx_stats.rx_coex_bw_assoc);
+	seq_printf(s, "  rx_coex_bw_scan           : %u\n", qw->rx_stats.rx_coex_bw_scan);
+
+	seq_printf(s, "Recycling Statistics\n");
+	seq_printf(s, "  qdrv_free_pass            : %d\n", recycle_list->stats_qdrv.free_recycle_pass);
+	seq_printf(s, "  qdrv_free_fail            : %d\n", recycle_list->stats_qdrv.free_recycle_fail);
+	seq_printf(s, "  qdrv_free_fail_undersize  : %d\n", recycle_list->stats_qdrv.free_recycle_fail_undersize);
+	seq_printf(s, "  qdrv_alloc_recycle        : %d\n", recycle_list->stats_qdrv.alloc_recycle);
+	seq_printf(s, "  qdrv_alloc_kmalloc        : %d\n", recycle_list->stats_qdrv.alloc_kmalloc);
+	seq_printf(s, "  eth_free_pass             : %d\n", recycle_list->stats_eth.free_recycle_pass);
+	seq_printf(s, "  eth_free_fail             : %d\n", recycle_list->stats_eth.free_recycle_fail);
+	seq_printf(s, "  eth_free_fail_undersize   : %d\n", recycle_list->stats_eth.free_recycle_fail_undersize);
+	seq_printf(s, "  eth_alloc_recycle         : %d\n", recycle_list->stats_eth.alloc_recycle);
+	seq_printf(s, "  eth_alloc_kmalloc         : %d\n", recycle_list->stats_eth.alloc_kmalloc);
+#if defined(CONFIG_RUBY_PCIE_HOST) || defined(CONFIG_RUBY_PCIE_TARGET)
+	seq_printf(s, "  pcie_free_pass             : %d\n", recycle_list->stats_pcie.free_recycle_pass);
+	seq_printf(s, "  pcie_free_fail             : %d\n", recycle_list->stats_pcie.free_recycle_fail);
+	seq_printf(s, "  pcie_free_fail_undersize   : %d\n", recycle_list->stats_pcie.free_recycle_fail_undersize);
+	seq_printf(s, "  pcie_alloc_recycle         : %d\n", recycle_list->stats_pcie.alloc_recycle);
+	seq_printf(s, "  pcie_alloc_kmalloc         : %d\n", recycle_list->stats_pcie.alloc_kmalloc);
+#endif
+	seq_printf(s, "  kfree_free_pass           : %d\n", recycle_list->stats_kfree.free_recycle_pass);
+	seq_printf(s, "  kfree_free_fail           : %d\n", recycle_list->stats_kfree.free_recycle_fail);
+	seq_printf(s, "  kfree_free_fail_undersize : %d\n", recycle_list->stats_kfree.free_recycle_fail_undersize);
+
+	seq_printf(s, "BF Statistics\n");
+	for (i = 0; i < QTN_STATS_NUM_BF_SLOTS; i++) {
+		seq_printf(s, "  slot %u success            : %d\n", i, qw->rx_stats.rx_bf_success[i]);
+	}
+	for (i = 0; i < QTN_STATS_NUM_BF_SLOTS; i++) {
+		seq_printf(s, "  slot %u rejected           : %d\n", i, qw->rx_stats.rx_bf_rejected[i]);
+	}
+
+	seq_printf(s, "QCAT state: %d\n", qw->tx_stats.qcat_state);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return;
+}
+
+int qdrv_wlan_stats(void *data)
+{
+	qdrv_control_set_show(qdrv_show_wlan_stats, data, 1, 1);
+
+	return 0;
+}
+
+static void
+qdrv_wlan_show_assoc_queue_info(struct seq_file *s, void *data, u32 num)
+{
+	struct qdrv_wlan *qw = data;
+	struct ieee80211com *ic = &qw->ic;
+	struct qdrv_sch_shared_data *sd = qw->tx_sch_shared_data;
+	struct qdrv_mac *mac = qw->mac;
+	struct qdrv_vap *qv;
+	struct net_device *dev;
+	struct Qdisc *sch;
+	uint32_t i;
+
+	seq_printf(s, "shared data: users=%d tokens=%u/%u res=%u rdt=%u muc_thresh=%u/%u\n",
+			sd->users,
+			sd->total_tokens,
+			sd->available_tokens,
+			sd->reserved_tokens_per_user,
+			sd->random_drop_threshold,
+			qw->tx_if.muc_thresh_high,
+			qw->tx_if.muc_thresh_low);
+
+	for (i = 0; i <= mac->vnet_last; ++i) {
+		dev = mac->vnet[i];
+		if (dev && (dev->flags & IFF_UP)) {
+			sch = qdrv_tx_sch_vap_get_qdisc(dev);
+			qv = netdev_priv(dev);
+			if (sch) {
+				seq_printf(s, "%s qdisc=%p queued=%u muc=%u\n",
+					dev->name, sch, sch->q.qlen, qv->muc_queued);
+			}
+		}
+	}
+
+	ic->ic_iterate_nodes(&ic->ic_sta, qdrv_wlan_tx_sch_node_info, (void *)s, 1);
+}
+
+/*
+ * If BIT(24) is set, it stands for all nodes.
+ * If not, 8 LSBs stands for the node index.
+ * BIT(8)-BIT(15) stands for the control masks.
+ */
+void qdrv_tx_airtime_control(struct ieee80211vap *vap, uint32_t value)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct qdrv_wlan *qw = qv->parent;
+
+	qdrv_hostlink_tx_airtime_control(qw, value);
+}
+
+void qdrv_mu_grp_update(struct ieee80211com *ic, struct qtn_mu_group_update_args *args)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+	qdrv_hostlink_mu_group_update(qw, args);
+}
+
+static void
+qdrv_wlan_show_assoc_info( struct seq_file *s, void *data, u32 num )
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) data;
+	struct ieee80211com *ic = &qw->ic;
+
+	ic->ic_iterate_nodes(&ic->ic_sta, get_node_info, (void *)s, 1);
+}
+
+#ifdef CONFIG_NAC_MONITOR
+static void
+qdrv_wlan_show_nac_info( struct seq_file *s, void *data, u32 num )
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct nac_mon_info *info = sp->nac_mon_info;
+	struct nac_stats_entry *entry = &info->nac_stats[0];
+	int i;
+	seq_printf(s, "  Mac Address      Rssi(dB)  Timestamp  Channel  Packet Type\n");
+	for (i = 0; i < MAX_NAC_STA; i++, entry++) {
+		if(entry->nac_valid) {
+			seq_printf(s, "%s %9d %10llu %8d   %-10s\n",
+				ether_sprintf(&entry->nac_txmac[0]),
+				(int8_t)entry->nac_avg_rssi,
+				entry->nac_timestamp,
+				entry->nac_channel,
+				(entry->nac_packet_type == 1)?"Control":
+					((entry->nac_packet_type == 2)?"Data":"Management"));
+		}
+	}
+}
+
+int
+qdrv_wlan_get_nac_info(void *data)
+{
+	qdrv_control_set_show(qdrv_wlan_show_nac_info, data, 1, 1);
+
+	return 0;
+}
+#endif
+
+int
+qdrv_wlan_get_assoc_queue_info(void *data)
+{
+	qdrv_control_set_show(qdrv_wlan_show_assoc_queue_info, data, 1, 1);
+
+	return 0;
+}
+
+int
+qdrv_wlan_get_assoc_info(void *data)
+{
+	qdrv_control_set_show(qdrv_wlan_show_assoc_info, data, 1, 1);
+
+	return 0;
+}
+
+int qdrv_wlan_start_vap(struct qdrv_wlan *qw, const char *name,
+	uint8_t *mac_addr, int devid, int opmode, int flags)
+{
+	int ret;
+	struct ieee80211com *ic = &qw->ic;
+
+	switch (opmode) {
+	case IEEE80211_M_HOSTAP:
+		if (!(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER) &&
+				!ieee80211_swfeat_is_supported(SWFEAT_ID_MODE_AP, 1)) {
+			return -1;
+		}
+		break;
+	case IEEE80211_M_WDS:
+		if (!ieee80211_swfeat_is_supported(SWFEAT_ID_MODE_AP, 1))
+			return -1;
+		break;
+	case IEEE80211_M_STA:
+		if (!ieee80211_swfeat_is_supported(SWFEAT_ID_MODE_STA, 1))
+			return -1;
+		break;
+	default:
+		printk("mode %u is not supported on this device\n", opmode);
+		return -1;
+	}
+
+	ret = qdrv_hostlink_msg_create_vap(qw, name, mac_addr, devid, opmode, flags);
+	if (ret < 0) {
+		DBGPRINTF_E("Failed to send create VAP message\n");
+	}
+
+	return ret;
+}
+
+int qdrv_wlan_stop_vap(struct qdrv_mac *mac, struct net_device *vdev)
+{
+	struct qdrv_wlan *qw = qdrv_mac_get_wlan(mac);
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	if (qdrv_hostlink_msg_delete_vap(qw, vdev) < 0) {
+		DBGPRINTF_E("Failed to delete VAP on MuC\n");
+		return -1;
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+#define DEFAULT_NUM_TEMP_ZONE 15
+#define MAX_NUM_TEMP_ZONE 50
+#define MAX_SIZE_TEMP_PROFILE_BUFF 200
+#define QDRV_TEMP_CAL_PERIOD	(5 * HZ)
+
+#define TPROFILE_IN_TEMP "/tmp/tprofile.txt"
+#define TPROFILE_IN_PROC "/proc/bootcfg/tprofile.txt"
+#define PDETECTOR_IN_PROC "/proc/bootcfg/pdetector.cal"
+
+struct _temp_info *p_muc_temp_index;
+static int tpf[MAX_NUM_TEMP_ZONE] = { 0xFFFFFFF };
+
+static inline char *qdrv_txpow_cal_strip_all_white_space(char *str)
+{
+	char *p;
+	char *s_ptr = str;
+	p = str;
+	do {
+		if (!isspace(*p = *str)) {
+			p++;
+		}
+	} while (*str++);
+
+	return s_ptr;
+}
+
+static int qdrv_txpow_cal_tzone_get(char *temperature_profile)
+{
+	char *from;
+	char *value;
+	int  *d_ptr;
+	int num_of_temp_zone = 0;
+
+	from = qdrv_txpow_cal_strip_all_white_space(temperature_profile);
+	d_ptr = &tpf[0];
+
+	while (from) {
+		value = strsep(&from, ",");
+		*d_ptr = simple_strtoul(value, NULL, 0);
+		d_ptr++;
+		num_of_temp_zone++;
+	}
+	return num_of_temp_zone;
+}
+
+static int qdrv_txpow_cal_convert_temp_index(struct qdrv_wlan *qw, int temp)
+{
+	int temp_index = 0;
+	int i = 0;
+	int findit = 0;
+
+	for (i = 0; i < qw->tx_power_cal_data.temp_info.num_zone; i++) {
+		if (temp >= tpf[i] && temp < tpf[i + 1]) {
+			temp_index = i + 1;
+			findit = 1;
+			break;
+		}
+	}
+
+	if (findit != 1) {
+		if (temp < tpf[0])
+			temp_index = 0;
+		else if (temp > tpf[qw->tx_power_cal_data.temp_info.num_zone - 1])
+			temp_index = qw->tx_power_cal_data.temp_info.num_zone;
+	}
+	return temp_index;
+}
+
+static void qdrv_init_tsensor(struct qdrv_wlan *qw)
+{
+	struct i2c_board_info se95_info = {
+		I2C_BOARD_INFO("se95", SE95_DEVICE_ADDR),
+	};
+	int temp;
+	struct i2c_adapter *adapter;
+
+	adapter = i2c_get_adapter(RUBY_I2C_ADAPTER_NUM);
+	if (!adapter) {
+		qw->se95_temp_sensor = NULL;
+		printk("QDRV: I2C dapter not found\n");
+		return;
+	}
+
+	qw->se95_temp_sensor = i2c_new_device(adapter, &se95_info);
+	if (!qw->se95_temp_sensor) {
+		i2c_put_adapter(adapter);
+		DBGPRINTF_E("Failed to instantiate temperature sensor device\n");
+		return;
+	}
+
+	/*
+	 * i2c_new_device will return successfully even if i2c device's ->probe()
+	 * callback failed, so check that temperature sensor is functional.
+	 */
+	if (qtn_tsensor_get_temperature(qw->se95_temp_sensor, &temp) < 0) {
+		i2c_unregister_device(qw->se95_temp_sensor);
+		qw->se95_temp_sensor = NULL;
+		i2c_put_adapter(adapter);
+		DBGPRINTF_N("QDRV: no external temperature sensor found\n");
+	}
+}
+
+static void qdrv_txpow_cal_execute(struct work_struct *work)
+{
+	struct qdrv_wlan *qw = container_of(work, struct qdrv_wlan, tx_power_cal_data.bbrf_cal_work.work);
+	int temp = 0;
+
+	qtn_tsensor_get_temperature(qw->se95_temp_sensor, &temp);
+	qw->tx_power_cal_data.temp_info.temp_index = qdrv_txpow_cal_convert_temp_index(qw, temp);
+	qw->tx_power_cal_data.temp_info.real_temp = temp;
+	if (p_muc_temp_index) {
+		memcpy(p_muc_temp_index, &qw->tx_power_cal_data.temp_info, sizeof(*p_muc_temp_index));
+	}
+
+	schedule_delayed_work(&qw->tx_power_cal_data.bbrf_cal_work, jiffies + QDRV_TEMP_CAL_PERIOD);
+}
+
+static void qdrv_get_internal_temp(struct work_struct *work)
+{
+	struct qdrv_wlan *qw = container_of(work, struct qdrv_wlan, tx_power_cal_data.bbrf_cal_work.work);
+	int temp = 0;
+
+	qw->tx_power_cal_data.temp_info.temp_index = topaz_read_internal_temp_sens(&temp);
+	qw->tx_power_cal_data.temp_info.real_temp = temp;
+	if (p_muc_temp_index) {
+		memcpy(p_muc_temp_index, &qw->tx_power_cal_data.temp_info, sizeof(*p_muc_temp_index));
+	}
+	schedule_delayed_work(&qw->tx_power_cal_data.bbrf_cal_work, jiffies + QDRV_TEMP_CAL_PERIOD);
+}
+
+static void qdrv_txpow_cal_init(struct qdrv_wlan *qw)
+{
+	int i;
+	int num_temp_zone = 0;
+	int default_tpf[DEFAULT_NUM_TEMP_ZONE] = {
+			40, 46, 52, 57, 63, 67, 70, 74, 77, 81, 85, 89, 92, 96, 100
+	};
+	char temperature_profile[MAX_SIZE_TEMP_PROFILE_BUFF] = {0};
+	int fd_1 = sys_open(TPROFILE_IN_TEMP, O_RDONLY, 0);
+	int fd_2 = sys_open(TPROFILE_IN_PROC, O_RDONLY, 0);
+	int fd_3 = sys_open(PDETECTOR_IN_PROC, O_RDONLY, 0);
+
+	if (fd_1 < 0 && fd_2 < 0) {
+		DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_WLAN,
+				"QDRV: using default temperature profile\n");
+		num_temp_zone = DEFAULT_NUM_TEMP_ZONE;
+		memcpy(tpf, default_tpf, sizeof(int) * DEFAULT_NUM_TEMP_ZONE);
+	} else if (fd_1 >= 0 && fd_2 < 0) {
+		sys_read(fd_1, temperature_profile, MAX_SIZE_TEMP_PROFILE_BUFF);
+		sys_close(fd_1);
+		num_temp_zone = qdrv_txpow_cal_tzone_get(temperature_profile);
+	} else if (fd_1 < 0 && fd_2 >= 0) {
+		sys_read(fd_2, temperature_profile, MAX_SIZE_TEMP_PROFILE_BUFF);
+		sys_close(fd_2);
+		num_temp_zone = qdrv_txpow_cal_tzone_get(temperature_profile);
+	} else {
+		sys_read(fd_2, temperature_profile, MAX_SIZE_TEMP_PROFILE_BUFF);
+		sys_close(fd_2);
+		num_temp_zone = qdrv_txpow_cal_tzone_get(temperature_profile);
+	}
+
+	for (i = 0; i < num_temp_zone; i ++) {
+		tpf[i] *= 100000;
+	}
+
+	if (fd_3 < 0) {
+		if (qw->se95_temp_sensor) {
+			qw->tx_power_cal_data.temp_info.num_zone = num_temp_zone;
+			DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_WLAN,
+					"QDRV: temperature sensor zone=%d, <%d, %d, %d, %d>\n",
+					num_temp_zone, tpf[0], tpf[1], tpf[num_temp_zone - 2],
+					tpf[num_temp_zone - 1]);
+
+			INIT_DELAYED_WORK(&qw->tx_power_cal_data.bbrf_cal_work, qdrv_txpow_cal_execute);
+			schedule_delayed_work(&qw->tx_power_cal_data.bbrf_cal_work,
+					jiffies + QDRV_TEMP_CAL_PERIOD);
+		} else {
+			DBGPRINTF_W("QDRV: failed to initialize power calibration\n");
+		}
+	} else {
+		DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN,
+				"QDRV: using %s for Tx gain calibration\n",	PDETECTOR_IN_PROC);
+		/*
+		 * If no external temp sensor is found then rx_stats sys_temp
+		 * would be updated from internal temp sensor
+		 */
+		INIT_DELAYED_WORK(&qw->tx_power_cal_data.bbrf_cal_work, qdrv_get_internal_temp);
+		schedule_delayed_work(&qw->tx_power_cal_data.bbrf_cal_work,
+				jiffies + QDRV_TEMP_CAL_PERIOD);
+	}
+}
+
+static void qdrv_txpow_cal_stop(struct qdrv_wlan *qw)
+{
+	cancel_delayed_work_sync(&qw->tx_power_cal_data.bbrf_cal_work);
+}
+#if !TOPAZ_FPGA_PLATFORM
+static void qdrv_wlan_enable_hr(unsigned long data)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan*)data;
+	qdrv_hostlink_set_hrflags(qw, 1);
+}
+#endif
+/* Enable the one-shot timer to enable hang detection/recovery */
+static void qdrv_wlan_hr_oneshot_enable(struct qdrv_wlan *qw)
+{
+#if !TOPAZ_FPGA_PLATFORM
+	init_timer(&qw->hr_timer);
+	qw->hr_timer.function = qdrv_wlan_enable_hr;
+	qw->hr_timer.data = (unsigned long)qw;
+	qw->hr_timer.expires = jiffies + QDRV_WLAN_HR_DELAY_SECS * HZ;
+	add_timer(&qw->hr_timer);
+#endif
+}
+
+static void
+qdrv_wlan_hr_oneshot_disable(struct qdrv_wlan *qw)
+{
+	del_timer(&qw->hr_timer);
+}
+
+static struct sk_buff *ip4_multicast_alloc_query(struct net_device *qbr_dev)
+{
+	struct sk_buff *skb;
+	struct igmphdr *ih;
+	struct ethhdr *eth;
+	struct iphdr *iph;
+	struct in_device *in_dev;
+
+	in_dev = in_dev_get(qbr_dev);
+	if (!in_dev) {
+		DBGPRINTF_LIMIT_E("could not get inet device\n");
+		return NULL;
+	}
+
+	skb = netdev_alloc_skb_ip_align(qbr_dev, sizeof(*eth) + sizeof(*iph) +
+						 sizeof(*ih) + 4);
+	if (!skb)
+		goto out;
+
+	skb->protocol = htons(ETH_P_IP);
+
+	skb_reset_mac_header(skb);
+	eth = eth_hdr(skb);
+
+	memcpy(eth->h_source, qbr_dev->dev_addr, 6);
+	eth->h_dest[0] = 1;
+	eth->h_dest[1] = 0;
+	eth->h_dest[2] = 0x5e;
+	eth->h_dest[3] = 0;
+	eth->h_dest[4] = 0;
+	eth->h_dest[5] = 1;
+
+	eth->h_proto = htons(ETH_P_IP);
+	skb_put(skb, sizeof(*eth));
+
+	skb_set_network_header(skb, skb->len);
+	iph = ip_hdr(skb);
+
+	iph->version = 4;
+	iph->ihl = 6;
+	iph->tos = 0xc0;
+	iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
+	iph->id = 0x0;
+	iph->frag_off = htons(IP_DF);
+	iph->ttl = 1;
+	iph->protocol = IPPROTO_IGMP;
+	iph->saddr = in_dev->ifa_list->ifa_address;
+	iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
+	((u8 *)&iph[1])[0] = IPOPT_RA;
+	((u8 *)&iph[1])[1] = 4;
+	((u8 *)&iph[1])[2] = 0;
+	((u8 *)&iph[1])[3] = 0;
+	ip_send_check(iph);
+	skb_put(skb, 24);
+
+	skb_set_transport_header(skb, skb->len);
+	ih = igmp_hdr(skb);
+	ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
+	ih->code = 0xa;
+	ih->group = 0;
+	ih->csum = 0;
+	ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
+	skb_put(skb, sizeof(*ih));
+
+out:
+	return skb;
+}
+
+static void qdrv_wlan_send_to_node(struct ieee80211vap *vap, struct sk_buff *skb)
+{
+	struct qdrv_vap *qv;
+	struct net_device *vdev;
+
+	qv = container_of(vap, struct qdrv_vap, iv);
+	vdev = qv->ndev;
+
+	skb->dev = vdev;
+	skb->priority = WME_AC_VO;
+
+	QTN_SKB_ENCAP(skb) = QTN_SKB_ENCAP_ETH;
+
+	M_FLAG_SET(skb, M_NO_AMSDU);
+
+	dev_queue_xmit(skb);
+}
+
+static void qdrv_wlan_igmp_query_send(struct qdrv_wlan *qw, struct ieee80211vap *vap)
+{
+	struct sk_buff *skb;
+
+	if (!qw->br_dev)
+		return;
+
+	skb = ip4_multicast_alloc_query(qw->br_dev);
+
+	if (!skb) {
+		DBGPRINTF_LIMIT_E("could not alloc igmp query skb\n");
+		return;
+	}
+
+	qdrv_wlan_send_to_node(vap, skb);
+}
+
+static void qdrv_wlan_igmp_query_timer_handler(unsigned long data)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)data;
+	struct ieee80211com *ic = &qw->ic;
+	struct ieee80211vap *vap;
+
+	if ((ic->ic_vendor_fix & VENDOR_FIX_BRCM_AP_GEN_IGMPQUERY) &&
+			ic->ic_nonqtn_sta) {
+		TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+			if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+				continue;
+			if (vap->iv_state != IEEE80211_S_RUN)
+				continue;
+			if (vap->iv_non_qtn_sta_assoc == 0)
+				continue;
+
+			qdrv_wlan_igmp_query_send(qw, vap);
+		}
+	}
+
+	mod_timer(&qw->igmp_query_timer, jiffies + QDRV_WLAN_IGMP_QUERY_INTERVAL * HZ);
+}
+
+void qdrv_wlan_igmp_query_timer_start(struct qdrv_wlan *qw)
+{
+	init_timer(&qw->igmp_query_timer);
+	qw->igmp_query_timer.function = qdrv_wlan_igmp_query_timer_handler;
+	qw->igmp_query_timer.data = (unsigned long)qw;
+	qw->igmp_query_timer.expires = jiffies + QDRV_WLAN_IGMP_QUERY_INTERVAL * HZ;
+	add_timer(&qw->igmp_query_timer);
+}
+
+void qdrv_wlan_igmp_timer_stop(struct qdrv_wlan *qw)
+{
+	del_timer(&qw->igmp_query_timer);
+}
+
+static int
+qdrv_troubleshoot_start_cb(void *in_ctx)
+{
+	struct qdrv_wlan *qw = in_ctx;
+	/* Stop the MuC so we can borrow its stack */
+	if (qw) {
+		struct qdrv_mac *mac = qw->mac;
+		mac->dead = 1;
+		mdelay(100);
+		hal_disable_muc();
+	}
+	return 0;
+}
+
+static void
+qdrv_wlan_debug_init(struct qdrv_wlan *qw)
+{
+	/* Hook into the troubleshoot functions */
+	arc_set_sram_safe_area(CONFIG_ARC_MUC_STACK_INIT - CONFIG_ARC_MUC_STACK_SIZE, CONFIG_ARC_MUC_STACK_INIT);
+	arc_set_troubleshoot_start_hook(qdrv_troubleshoot_start_cb, qw);
+}
+
+void qdrv_update_cgq_stats(void *ctx, uint32_t type, uint8_t index, uint32_t value)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *)ctx;
+
+	switch (type) {
+	case TOPAZ_CONGEST_QUEUE_STATS_QLEN:
+		qw->cgq_stats.congest_qlen[index] = value;
+		break;
+	case TOPAZ_CONGEST_QUEUE_STATS_ENQFAIL:
+		qw->cgq_stats.congest_enq_fail[index] = value;
+		break;
+	default:
+		break;
+	}
+}
+
+int qdrv_wlan_init(struct qdrv_mac *mac, struct host_ioctl_hifinfo *hifinfo,
+	u32 arg1, u32 arg2)
+{
+	struct qdrv_wlan *qw;	/* Old struct qnet_priv */
+	int i;
+
+	if (!TOPAZ_HBM_SKB_ALLOCATOR_DEFAULT) {
+		printk(KERN_ERR "%s: wlan rx accelerate should be used with topaz hbm"
+				"skb allocator only\n", __FUNCTION__);
+		return -1;
+	}
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	/* Allocate a wlan structure */
+	qw = kmalloc(sizeof(*qw), GFP_KERNEL);
+	if (qw == NULL) {
+		DBGPRINTF_E("Failed to allocate wlan structure\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -ENOMEM;
+	}
+
+	DBGPRINTF(DBG_LL_INFO, QDRV_LF_WLAN,
+		"qw 0x%08x ic 0x%08x\n",
+		(unsigned int) qw, (unsigned int) &qw->ic);
+
+	/* Clean it out */
+	memset(qw, 0, sizeof(struct qdrv_wlan));
+
+	/* Store it in the mac structure as opaque private data */
+	mac->data = (void *) qw;
+
+	qw->flags_ext |= QDRV_WLAN_FLAG_UNKNOWN_ARP;
+	qw->flags_ext |= QDRV_WLAN_FLAG_AUC_TX;
+
+	reg_congest_queue_stats(qdrv_update_cgq_stats, qw);
+
+	/* We need the back pointer so we can control interrupts */
+	qw->mac = mac;
+
+	/* Initialize the wlan data structure */
+	qw->unit = mac->unit;
+	qw->flags = arg1 & IOCTL_DEVATTACH_DEVFLAG_MASK;
+	qw->rf_chipid = (arg1 & IOCTL_DEVATTACH_DEV_RFCHIP_FREQID_MASK) >>
+					IOCTL_DEVATTACH_DEV_RFCHIP_FREQID_MASK_S;
+	qw->rf_chip_verid = (arg1 & IOCTL_DEVATTACH_DEV_RFCHIP_VERID_MASK) >>
+					IOCTL_DEVATTACH_DEV_RFCHIP_VERID_MASK_S;
+	qw->host_sem = (u32)&mac->ruby_sysctrl->l2m_sem;
+	soc_shared_params->rf_chip_id = qw->rf_chipid;
+
+	if ((strcmp(QDRV_CFG_TYPE, "qtm710_rgmii_config") == 0) ||
+			(strcmp(QDRV_CFG_TYPE, "topaz_rgmii_config") == 0) ||
+			(strcmp(QDRV_CFG_TYPE, "topaz_vzn_config") == 0) ||
+			(strcmp(QDRV_CFG_TYPE, "topaz_pcie_config") == 0))
+		qw->br_isolate = QDRV_BR_ISOLATE_NORMAL;
+	else
+		qw->br_isolate = 0;
+	qw->br_isolate_vid = 0;
+#ifdef CONFIG_QUANTENNA_RESTRICT_WLAN_IP
+	qw->restrict_wlan_ip = 1;
+#else
+	qw->restrict_wlan_ip = 0;
+#endif
+	qw->br_dev = dev_get_by_name(&init_net, "br0");
+	if (!qw->br_dev)
+		DBGPRINTF_E("Could not get bridge device\n");
+
+	spin_lock_init(&qw->lock);
+	spin_lock_init(&qw->flowlock);
+
+	qdrv_br_create(&qw->bridge_table);
+	qw->mcs_odd_even = 0;
+	qw->tx_restrict = 0;
+	qw->tx_restrict_rts = IEEE80211_TX_RESTRICT_RTS_DEF;
+	qw->tx_restrict_limit = IEEE80211_TX_RESTRICT_LIMIT_DEF;
+	qw->tx_restrict_rate = IEEE80211_TX_RESTRICT_RATE;
+	qw->tx_swretry_agg_max = -1;
+	qdrv_wlan_tx_sch_init(qw);
+
+	qw->tx_swretry_noagg_max = -1;
+	qw->arp_last_sent = jiffies;
+
+	qw->tx_swretry_suspend_xmit = -1;
+
+	/* init csa workqueues and irq handlers */
+	qdrv_init_csa_irqhandler(qw);
+	INIT_WORK(&qw->csa_wq, csa_work);
+	spin_lock_init(&qw->csa_lock);
+	INIT_WORK(&qw->remain_chan_wq, remain_channel_work);
+	INIT_WORK(&qw->channel_work_wq, channel_work);
+
+	qdrv_init_cca_irqhandler(qw);
+	INIT_WORK(&qw->cca_wq, cca_work);
+	spin_lock_init(&qw->cca_lock);
+
+	qdrv_init_meas_irqhandler(qw);
+	INIT_WORK(&qw->meas_wq, meas_work);
+
+#ifdef QTN_BG_SCAN
+	qdrv_init_scan_irqhandler(qw);
+	INIT_WORK(&qw->scan_wq, scan_work);
+	spin_lock_init(&qw->scan_lock);
+#endif /* QTN_BG_SCAN */
+
+#ifdef CONFIG_QVSP
+	if (qdrv_wlan_vsp_irq_init(qw, hifinfo->hi_vsp_stats_phys)) {
+		panic("Could not initialize VSP stats IRQ");
+	}
+#endif
+
+	qw->shared_pernode_stats_pool = dma_alloc_coherent(NULL,
+			sizeof(struct qtn_node_shared_stats_list) * QTN_PER_NODE_STATS_POOL_SIZE,
+			&qw->shared_pernode_stats_phys, GFP_KERNEL | GFP_DMA | __GFP_ZERO);
+	if (qw->shared_pernode_stats_pool == NULL) {
+		DBGPRINTF_E("Failed to allocate per node stats pool");
+		return -ENOMEM;
+	}
+	TAILQ_INIT(&qw->shared_pernode_stats_head);
+	for (i = 0; i < QTN_PER_NODE_STATS_POOL_SIZE; i++) {
+		TAILQ_INSERT_TAIL(&qw->shared_pernode_stats_head,
+				&qw->shared_pernode_stats_pool[i], next);
+	}
+
+	/* Initialize the TX interface */
+	if (qdrv_tx_init(mac, hifinfo, arg2) < 0) {
+		DBGPRINTF_E("Failed to initialize TX\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	/* Initialize the RX interface */
+	if (qdrv_rx_init(qw, hifinfo) < 0) {
+		DBGPRINTF_E("Failed to initialize RX\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	/* Initialize the Scan interface */
+	if (qdrv_scan_init(qw, hifinfo) < 0) {
+		DBGPRINTF_E("Failed to initialize Scan\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	/* Initialize the hostlink interface */
+	if (qdrv_hostlink_init(qw, hifinfo) < 0) {
+		DBGPRINTF_E("Failed to initialize hostlink\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	/* Initialize the beamforming support */
+	if (qdrv_txbf_init(qw) < 0) {
+		DBGPRINTF_E("Failed to initialize beamforming\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	/* Start the RX interface */
+	if (qdrv_rx_start(mac) < 0) {
+		DBGPRINTF_E("Failed to start RX\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	/* Start the TX interface */
+	if (qdrv_tx_start(mac) < 0) {
+		DBGPRINTF_E("Failed to start TX\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	/* Start the Scan interface */
+	if (qdrv_scan_start(mac) < 0) {
+		DBGPRINTF_E("Failed to start Scan\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	/* Start the hostlink interface */
+	if (qdrv_hostlink_start(mac) < 0) {
+		DBGPRINTF_E("Failed to start hostlink\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	/* Set up rate tables for all potential media types. */
+	if (set_rates(qw, IEEE80211_MODE_11A) < 0) {
+		DBGPRINTF_E("Failed to set A rates\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	if (set_rates(qw, IEEE80211_MODE_11B) < 0) {
+		DBGPRINTF_E("Failed to set B rates\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	if (set_rates(qw, IEEE80211_MODE_11G) < 0) {
+		DBGPRINTF_E("Failed to set G rates\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	if (set_rates(qw, IEEE80211_MODE_11NG) < 0) {
+		DBGPRINTF_E("Failed to set NG rates\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	if (set_rates(qw, IEEE80211_MODE_11NG_HT40PM) < 0) {
+		DBGPRINTF_E("Failed to set NG rates\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	if (set_rates(qw, IEEE80211_MODE_11NA) < 0) {
+		DBGPRINTF_E("Failed to set NA rates\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	if (set_rates(qw, IEEE80211_MODE_11NA_HT40PM) < 0) {
+		DBGPRINTF_E("Failed to set NA rates\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	if (set_rates(qw, IEEE80211_MODE_11AC_VHT20PM) < 0) {
+		DBGPRINTF_E("Failed to set AC rates VHT20\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	if (set_rates(qw, IEEE80211_MODE_11AC_VHT40PM) < 0) {
+		DBGPRINTF_E("Failed to set AC rates VHT40\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	if (set_rates(qw, IEEE80211_MODE_11AC_VHT80PM) < 0) {
+		DBGPRINTF_E("Failed to set AC rates VHT80\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	/* Initialize the 802.11 layer (old qtn_attach()) */
+	if (qdrv_wlan_80211_init(&qw->ic, mac->mac_addr, qw->rf_chipid) < 0) {
+		DBGPRINTF_E("Failed to initialize 802.11 (ieee80211com)\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	if (set_mode(qw, IEEE80211_MODE_11G) < 0) {
+		DBGPRINTF_E("Failed to set mode\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	if (set_mode(qw, IEEE80211_MODE_11NG) < 0) {
+		DBGPRINTF_E("Failed to set mode\n");
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+		return -1;
+	}
+
+	qdrv_init_tsensor(qw);
+	qdrv_txpow_cal_init(qw);
+
+	/* start DFS function */
+	qdrv_radar_init(mac);
+
+	qdrv_pktlogger_init(qw);
+
+	/* Timer to enable hang recovery. We delay this as the intial channel
+	 * change can take a long time to complete, causing false hangs to be
+	 * detected.
+	 */
+	qdrv_wlan_hr_oneshot_enable(qw);
+
+	/* Subscribe to PM notifications */
+	qw->pm_notifier.notifier_call = qdrv_pm_notify;
+	pm_qos_add_notifier(PM_QOS_POWER_SAVE, &qw->pm_notifier);
+
+	qdrv_wlan_debug_init(qw);
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+	br_fdb_get_active_sub_port_hook = qdrv_get_active_sub_port;
+	br_fdb_check_active_sub_port_hook = qdrv_check_active_sub_port;
+#endif
+
+	qw->sp = qtn_mproc_sync_shared_params_get();
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+/* WPS Polling programs */
+static u8	qdrv_wps_gpio_polling_pin = 255;
+static u32	qdrv_wps_button_last_level;
+static u32	qdrv_wps_button_active_level;
+
+int qdrv_wlan_exit(struct qdrv_mac *mac)
+{
+	struct qdrv_wlan *qw = (struct qdrv_wlan *) mac->data;
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+
+	qdrv_wlan_80211_exit(&qw->ic);
+
+	qdrv_wlan_hr_oneshot_disable(qw);
+	qdrv_txpow_cal_stop(qw);
+	if (qw->se95_temp_sensor) {
+		i2c_unregister_device(qw->se95_temp_sensor);
+		qw->se95_temp_sensor = NULL;
+	}
+
+	qdrv_txbf_exit(qw);
+	qdrv_hostlink_exit(qw);
+	qdrv_tx_stop(mac);
+	qdrv_scan_exit(qw);
+	qdrv_rx_exit(qw);
+	qdrv_tx_exit(qw);
+	qdrv_sch_shared_data_exit(qw->tx_sch_shared_data);
+
+	qdrv_pktlogger_exit(qw);
+
+	qdrv_br_exit(&qw->bridge_table);
+
+	if (qw->br_dev != NULL) {
+		dev_put(qw->br_dev);
+	}
+
+	if (qw->pktlogger.dev != NULL) {
+		dev_put(qw->pktlogger.dev);
+	}
+
+	dma_free_coherent(NULL, sizeof(struct qtn_node_shared_stats_list) * QTN_PER_NODE_STATS_POOL_SIZE,
+			qw->shared_pernode_stats_pool, qw->shared_pernode_stats_phys);
+
+#ifdef CONFIG_QVSP
+	qdrv_wlan_vsp_irq_exit(qw);
+#endif
+	qdrv_genpcap_exit(qw);
+
+	kfree(qw);
+
+	/* Reset the MAC data structure */
+	mac->data = NULL;
+
+	if (qdrv_wps_gpio_polling_pin != 255)
+		gpio_free(qdrv_wps_gpio_polling_pin);
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+	br_fdb_get_active_sub_port_hook = NULL;
+	br_fdb_check_active_sub_port_hook = NULL;
+#endif
+
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return 0;
+}
+
+/*
+* Queue of processes who access wps_button file
+*/
+DECLARE_WAIT_QUEUE_HEAD(WPS_Button_WaitQ);
+
+/* WPS button event reported to user space process */
+typedef enum {
+	WPS_BUTTON_NONE_EVENT = 0,
+	WPS_BUTTON_WIRELESS_EVENT,
+	WPS_BUTTON_DBGDUMP_EVENT,
+	WPS_BUTTON_INVALIDE_EVENT
+} WPS_Button_Event;
+#define WPS_BUTTON_VALID(e) (WPS_BUTTON_NONE_EVENT < (e) && (e) < WPS_BUTTON_INVALIDE_EVENT)
+static WPS_Button_Event wps_button_event = WPS_BUTTON_NONE_EVENT;
+
+static void qdrv_wps_button_event_wakeup(WPS_Button_Event event)
+{
+	if (!WPS_BUTTON_VALID(event))
+		return;
+
+	wps_button_event = event;
+	wake_up_all(&WPS_Button_WaitQ);
+}
+
+static ssize_t qdrv_wps_button_read(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buff)
+{
+	int i = 0;
+
+	/* As usual, this read is always blocked untill wps button is pressed
+	 * so increase the module reference to prevent it being unload during
+	 * blocking read
+	 */
+	if (!try_module_get(THIS_MODULE))
+		return 0;
+
+	/* wait for valid WPS button event */
+	wait_event_interruptible(WPS_Button_WaitQ, WPS_BUTTON_VALID(wps_button_event));
+
+	/* read back empty string in signal wakeup case */
+	for (i = 0; i < _NSIG_WORDS; i++) {
+		if (current->pending.signal.sig[i] & ~current->blocked.sig[i]) {
+			module_put(THIS_MODULE);
+			return 0;
+		}
+	}
+
+	sprintf(buff, "%d\n", wps_button_event);
+
+	/* after new event been handled, reset to none event */
+	wps_button_event = WPS_BUTTON_NONE_EVENT;
+
+	module_put(THIS_MODULE);
+
+	return strlen(buff);
+}
+
+DEVICE_ATTR(wps_button, S_IRUSR, qdrv_wps_button_read, NULL); /* dev_attr_wps_button */
+
+static inline void qdrv_wps_button_device_file_create(struct net_device *ndev)
+{
+	device_create_file(&(ndev->dev), &dev_attr_wps_button);
+}
+
+static inline void qdrv_wps_button_device_file_remove(struct net_device *ndev)
+{
+	device_remove_file(&ndev->dev, &dev_attr_wps_button);
+}
+
+/* records the jiffies when button down, back to 0 after button released */
+static u32 qdrv_wps_button_down_jiffies = 0;
+static int interrupt_mode = 0;
+
+#define WPS_BUTTON_TIMER_INTERVAL ((3 * HZ) / 10) /* timer internal */
+
+static void qdrv_wps_polling_button_notifier(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	u32 current_level;
+
+	current_level = gpio_get_value(qdrv_wps_gpio_polling_pin);
+
+	/* records the falling edge jiffies */
+	if ((current_level == qdrv_wps_button_active_level)
+	    && (qdrv_wps_button_last_level != qdrv_wps_button_active_level)) {
+
+		qdrv_wps_button_down_jiffies = jiffies;
+	}
+
+	/* at rising edge */
+	if ((current_level != qdrv_wps_button_active_level)
+	    && (qdrv_wps_button_last_level == qdrv_wps_button_active_level)) {
+
+		/* WPS button event is rising triggered -- when button
+		 * being changed from active to inactive level.
+		 *
+		 * Different press time trigger different event
+		 */
+		if ((jiffies - qdrv_wps_button_down_jiffies) >= 10 * HZ) {
+
+			/* wakeup the event waiting processes */
+			qdrv_wps_button_event_wakeup(WPS_BUTTON_DBGDUMP_EVENT);
+			DBGPRINTF_N("WPS: button long press polling at %u\n", (unsigned int) jiffies);
+		} else {
+			/* wakeup the event waiting processes */
+			qdrv_wps_button_event_wakeup(WPS_BUTTON_WIRELESS_EVENT);
+			qdrv_eventf(dev, "WPS-BUTTON.indication");
+
+			DBGPRINTF_N("WPS: button short press polling at %u\n", (unsigned int) jiffies);
+		}
+
+		/* back to 0 after rising edge */
+		qdrv_wps_button_down_jiffies = 0;
+
+		if (interrupt_mode)
+			goto interrupt_end;
+	}
+
+	/* Restart the timer */
+	mod_timer(&qdrv_wps_button_timer, jiffies + WPS_BUTTON_TIMER_INTERVAL);
+
+interrupt_end:
+	qdrv_wps_button_last_level = current_level;
+
+	return;
+}
+
+static int qdrv_polling_wps_button_init(struct net_device *dev, u8 wps_gpio_pin, u8 active_logic, int mode)
+{
+	interrupt_mode = mode;
+
+	if (wps_gpio_pin > MAX_GPIO_PIN) {
+		DBGPRINTF_E("WPS polling GPIO pin %d is invalid\n", wps_gpio_pin);
+		return -1;
+	}
+
+	/*
+	 * Set up timer to poll the button.
+	 * Request the GPIO resource and export it for userspace
+	 */
+	if (gpio_request(wps_gpio_pin, dev->name) < 0)
+		DBGPRINTF_E("%s: Failed to request GPIO%d for GPIO reset\n",
+				dev->name, wps_gpio_pin);
+	else
+		gpio_export(wps_gpio_pin, true);
+
+	qdrv_wps_gpio_polling_pin = wps_gpio_pin;
+	qdrv_wps_button_active_level = (active_logic) ? 1 : 0;
+	qdrv_wps_button_last_level = ~qdrv_wps_button_active_level;
+
+	init_timer(&qdrv_wps_button_timer);
+	qdrv_wps_button_timer.function = qdrv_wps_polling_button_notifier;
+	qdrv_wps_button_timer.data = (unsigned long)dev;
+
+	/* creeate the device file for user space use */
+	qdrv_wps_button_device_file_create(dev);
+
+	return 0;
+}
+
+static void qdrv_polling_wps_button_begin(void)
+{
+	qdrv_wps_button_timer.expires = jiffies + WPS_BUTTON_TIMER_INTERVAL;
+	if (!timer_pending(&qdrv_wps_button_timer))
+		add_timer(&qdrv_wps_button_timer);
+}
+
+static void qdrv_polling_wps_button_exit(uint8_t wps_gpio_pin)
+{
+	struct net_device *ndev = (struct net_device *)(qdrv_wps_button_timer.data);
+
+	qdrv_wps_button_device_file_remove(ndev);
+	del_timer_sync(&qdrv_wps_button_timer);
+	qdrv_wps_button_timer.data = (unsigned long)(NULL);
+	gpio_free(wps_gpio_pin);
+}
+
+static irqreturn_t qdrv_wps_button_handler(int irq, void *dev_id)
+{
+	qdrv_polling_wps_button_begin();
+
+	return IRQ_HANDLED;
+}
+
+static int
+qdrv_interrupt_wps_button_init(struct net_device *dev, u8 wps_gpio_pin)
+{
+	u8 active_logic;
+
+	/* current wps button is in released state, so its value can determine active_logic */
+	active_logic = (~gpio_get_value(wps_gpio_pin) & 0x01) ? 1 : 0;
+
+	if (request_irq(GPIO2IRQ(wps_gpio_pin), qdrv_wps_button_handler, IRQF_TRIGGER_FALLING | IRQF_SHARED,
+			"qwps_btn", dev)) {
+		DBGPRINTF_E("WPS: push button IRQ %d is not free for register falling edge irq\n", GPIO2IRQ(wps_gpio_pin));
+		return -1;
+	}
+
+	DBGPRINTF_N("WPS: push button IRQ initialised\n");
+
+	return qdrv_polling_wps_button_init(dev, wps_gpio_pin, active_logic, 1);
+}
+
+static void qdrv_interrupt_wps_button_exit(struct net_device *dev, u8 wps_gpio_pin)
+{
+	qdrv_polling_wps_button_exit(wps_gpio_pin);
+	free_irq(GPIO2IRQ(wps_gpio_pin), dev);
+}
+
+int qdrv_wps_button_init(struct net_device *dev)
+{
+	u8	wps_gpio_pin = 0;
+	u8	use_interrupt = 0;
+	u8	active_logic = 0;
+	int	retval;
+
+	if (qdrv_get_wps_push_button_config(&wps_gpio_pin, &use_interrupt, &active_logic) != 0) {
+		DBGPRINTF_N("WPS: push button is not configured\n");
+		return 0;
+	}
+
+	DBGPRINTF_N("WPS: push button GPIO pin %d\n", wps_gpio_pin);
+	DBGPRINTF_N("WPS: monitored using %s\n", use_interrupt ? "interrupt" : "polling");
+	if (use_interrupt) {
+		DBGPRINTF_N("WPS: interrupt on line %d\n", GPIO2IRQ(wps_gpio_pin));
+	} else {
+		DBGPRINTF_N("WPS: active logic is %s\n", active_logic ? "high" : "low");
+	}
+
+	set_wps_push_button_enabled();
+
+	if (use_interrupt) {
+		retval = qdrv_interrupt_wps_button_init(dev, wps_gpio_pin);
+	} else {
+		retval = qdrv_polling_wps_button_init(dev, wps_gpio_pin, active_logic, 0);
+		qdrv_polling_wps_button_begin();
+	}
+
+	return retval;
+}
+
+void qdrv_wps_button_exit(void)
+{
+	struct net_device *dev = (struct net_device *)(qdrv_wps_button_timer.data);
+	u8	wps_gpio_pin = 0;
+	u8	use_interrupt = 0;
+	u8	active_logic = 0;
+
+	if (!dev)
+		return;
+
+	if (qdrv_get_wps_push_button_config(&wps_gpio_pin, &use_interrupt, &active_logic) != 0) {
+		return;
+	}
+
+	if (use_interrupt) {
+		qdrv_interrupt_wps_button_exit(dev, wps_gpio_pin);
+	} else {
+		qdrv_polling_wps_button_exit(wps_gpio_pin);
+	}
+}
+
+struct net_device *qdrv_wps_button_get_dev(void)
+{
+	return (struct net_device *)(qdrv_wps_button_timer.data);
+}
+
+/**
+ * Intermediate point between MUC and WLAN driver. Layer to hide 802.11 specific structures
+ * from the MUC comm layer.
+ *
+ * @return 1 if the MIC failure was reported to the WLAN driver, 0 otherwise.
+ */
+int qdrv_wlan_tkip_mic_error(struct qdrv_mac *mac, int devid, int count)
+{
+	struct net_device *dev = mac->vnet[QDRV_WLANID_FROM_DEVID(devid)];
+
+	struct ieee80211vap *vap;
+	struct ieee80211com *ic;
+
+	if (dev) {
+		vap = netdev_priv(dev);
+		ic = vap->iv_ic;
+		ic->ic_tkip_mic_failure(vap, count);
+		return 1;
+	}
+
+	return 0;
+}
+
+/* record channel change event */
+void qdrv_channel_switch_record(struct ieee80211com *ic,
+				struct ieee80211_channel *new_chan,
+				uint32_t reason)
+{
+	struct ieee80211req_csw_record *records = &ic->ic_csw_record;
+	int index = records->index;
+
+	qdrv_chan_occupy_record_start(ic, new_chan->ic_ieee);
+
+	/* if the new_chan is same with last channel, do not record it */
+	if (new_chan->ic_ieee == records->channel[index]) {
+		return;
+	}
+
+	if (records->cnt < CSW_MAX_RECORDS_MAX) {
+		records->cnt++;
+	}
+
+	if (records->cnt == 1) {
+		records->index = 0;
+	} else {
+		if (records->index == (CSW_MAX_RECORDS_MAX - 1))
+			records->index = 0;
+		else
+			records->index++;
+	}
+
+	index = records->index;
+	records->channel[index] = new_chan->ic_ieee;
+	records->timestamp[index] = (jiffies - INITIAL_JIFFIES) / HZ;
+	if (ic->ic_opmode != IEEE80211_M_STA) {
+		records->reason[index] = reason;
+
+		if ((reason & CSW_REASON_MASK) == IEEE80211_CSW_REASON_SCS) {
+			records->reason[index] = ic->ic_csw_reason;
+			memcpy(records->csw_record_mac[index], ic->ic_csw_mac, IEEE80211_ADDR_LEN);
+		}
+	} else {
+		records->reason[index] = IEEE80211_CSW_REASON_UNKNOWN;
+	}
+}
+EXPORT_SYMBOL(qdrv_channel_switch_record);
+
+void qdrv_channel_switch_reason_record(struct ieee80211com *ic, int reason_flag)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	switch (reason_flag & CSW_REASON_MASK) {
+	case IEEE80211_CSW_REASON_SCS:
+		qw->csw_stats.csw_by_scs++;
+		break;
+	case IEEE80211_CSW_REASON_DFS:
+		qw->csw_stats.csw_by_dfs++;
+		break;
+	case IEEE80211_CSW_REASON_MANUAL:
+		qw->csw_stats.csw_by_user++;
+		break;
+	case IEEE80211_CSW_REASON_SAMPLING:
+		qw->csw_stats.csw_by_sampling++;
+		break;
+	case IEEE80211_CSW_REASON_TDLS_CS:
+		qw->csw_stats.csw_by_tdls++;
+		break;
+	case IEEE80211_CSW_REASON_BGSCAN:
+		qw->csw_stats.csw_by_bgscan++;
+		break;
+	case IEEE80211_CSW_REASON_OCAC:
+		qw->csw_stats.csw_by_ocac++;
+		break;
+	case IEEE80211_CSW_REASON_OCAC_RUN:
+		qw->csw_stats.csw_by_ocac_run++;
+		break;
+	case IEEE80211_CSW_REASON_CSA:
+		qw->csw_stats.csw_by_csa++;
+		break;
+	case IEEE80211_CSW_REASON_SCAN:
+		qw->csw_stats.csw_by_scan++;
+		break;
+	case IEEE80211_CSW_REASON_COC:
+		qw->csw_stats.csw_by_coc++;
+		break;
+	default:
+		DBGPRINTF_E("unexpected event\n");
+	}
+}
+
+void qdrv_wlan_drop_ba(struct ieee80211_node *ni, int tid, int tx, int reason)
+{
+	ieee80211_send_delba(ni, tid, !tx, reason);
+	ieee80211_node_ba_del(ni, tid, tx, reason);
+}
+
+void qdrv_wlan_dump_ba(struct ieee80211_node *ni)
+{
+	int32_t tid;
+	int istx;
+	struct ieee80211_ba_tid *ba_tid;
+
+	printk("Node %u %pM BA table:\n", IEEE80211_AID(ni->ni_associd), ni->ni_macaddr);
+	printk("tx tid state type win timeout\n");
+	for (istx = 0; istx <= 1; istx++) {
+		for (tid = 0; tid < WME_NUM_TID; tid++) {
+			ba_tid = istx ? &ni->ni_ba_tx[tid] : &ni->ni_ba_rx[tid];
+			printk("%2d %3d %5d %4d %3d %7d\n",
+				istx, tid, ba_tid->state,
+				ba_tid->type, ba_tid->buff_size, ba_tid->timeout);
+		}
+	}
+}
+
+static void qdrv_wlan_set_11g_erp(struct ieee80211vap *vap, int on)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+	struct qtn_node_args *args = NULL;
+	dma_addr_t args_dma;
+
+	if (!(ioctl = vnet_alloc_ioctl(qv)) ||
+	    !(args = qdrv_hostlink_alloc_coherent(NULL, sizeof(*args),
+						  &args_dma, GFP_DMA | GFP_ATOMIC))) {
+		DBGPRINTF_E("Failed to allocate set_11g_erp message\n");
+		vnet_free_ioctl(ioctl);
+		return;
+	}
+
+	memset(args, 0, sizeof(*args));
+
+	ioctl->ioctl_command = IOCTL_DEV_SET_11G_ERP;
+        ioctl->ioctl_arg1 = qv->devid;
+        ioctl->ioctl_arg2 = on;
+        ioctl->ioctl_argp = args_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+        qdrv_hostlink_free_coherent(NULL, sizeof(*args), args, args_dma);
+}
+
+void qdrv_wlan_cleanup_before_reload(struct ieee80211com *ic)
+{
+	ic->ic_chan_is_set = 0;
+}
+
+int8_t qdrv_get_local_tx_power(struct ieee80211com *ic)
+{
+#define QTN_TXPOW_TOTAL_OFFSET	6
+	if (qdrv_is_gain_low()) {
+		return IEEE80211_LOWGAIN_TXPOW_MAX + QTN_TXPOW_TOTAL_OFFSET;
+	} else {
+		return ic->ic_curchan->ic_maxpower_normal + QTN_TXPOW_TOTAL_OFFSET;
+	}
+#undef QTN_TXPOW_TOTAL_OFFSET
+}
+
+static int8_t min_rssi_40MHZ_perchain_mcstbl[] = {
+	-82,
+	-82,
+	-81,
+	-78,
+	-74,
+	-71,
+	-70,
+	-68,
+	-82,
+	-80,
+	-77,
+	-74,
+	-71,
+	-67,
+	-65,
+	-63,
+	-80,
+	-77,
+	-73,
+	-71,
+	-66,
+	-63,
+	-61,
+	-58,
+	-72,
+	-69,
+	-63,
+	-61,
+	-55,
+	-47,
+	-47,
+	-47,
+	-76,
+	-71,
+	-72,
+	-70,
+	-65,
+	-65,
+	-74,
+	-72,
+	-70,
+	-70,
+	-69,
+	-67,
+	-67,
+	-68,
+	-68,
+	-62,
+	-61,
+	-63,
+	-62,
+	-62,
+	-66,
+	-64,
+	-62,
+	-61,
+	-61,
+	-60,
+	-59,
+	-55,
+	-57,
+	-47,
+	-47,
+	-47,
+	-58,
+	-57,
+	-57,
+	-47,
+	-47,
+	-47,
+	-47,
+	-47,
+	-47,
+	-47,
+	-47,
+	-47
+};
+
+char *link_margin_info_err_msg[] = {
+	"Link Margin ERROR:No such node in macfw\n"				/* QTN_LINK_MARGIN_REASON_NOSUCHNODE*/
+};
+
+int qdrv_get_local_link_margin(struct ieee80211_node *ni, int8_t *result)
+{
+	struct qdrv_vap *qv = container_of(ni->ni_vap, struct qdrv_vap, iv);
+	struct host_ioctl *ioctl;
+	struct qtn_link_margin_info *lm_info;
+	dma_addr_t lm_dma;
+
+	if ((!(ioctl = vnet_alloc_ioctl(qv))) ||
+			(!(lm_info = (struct qtn_link_margin_info *)qdrv_hostlink_alloc_coherent(NULL,
+												 sizeof(struct qtn_link_margin_info),
+												 &lm_dma,
+												 GFP_DMA | GFP_ATOMIC)))) {
+		DBGPRINTF_E("Failed to allocate LINKMARGIN message\n");
+		vnet_free_ioctl(ioctl);
+		return -EINVAL;
+	}
+
+	memset(lm_info, 0, sizeof(*lm_info));
+	memcpy(lm_info->mac_addr, ni->ni_macaddr, 6);
+	ioctl->ioctl_command = IOCTL_DEV_GET_LINK_MARGIN_INFO;
+	ioctl->ioctl_arg1 = qv->devid;
+	ioctl->ioctl_argp = lm_dma;
+
+	vnet_send_ioctl(qv, ioctl);
+
+	if (lm_info->reason == QTN_LINK_MARGIN_REASON_SUCC) {
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "get link margin info success:bw=%d,mcs=%d,rssi_avg=%d\n",
+				lm_info->bw,
+				lm_info->mcs,
+				lm_info->rssi_avg / 10);
+
+		/* protect invalid mcs */
+		if (lm_info->mcs >= ARRAY_SIZE(min_rssi_40MHZ_perchain_mcstbl))
+			lm_info->mcs = 0;
+
+		/* it seems bw=0 appears usually */
+		if (!lm_info->bw)
+			*result = (lm_info->rssi_avg / 10) - RSSI_40M_TO_20M_DBM(min_rssi_40MHZ_perchain_mcstbl[lm_info->mcs]);
+		else
+			*result = (lm_info->rssi_avg / 10) - min_rssi_40MHZ_perchain_mcstbl[lm_info->mcs];
+	} else {
+		DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "%s", link_margin_info_err_msg[lm_info->reason - QTN_LINK_MARGIN_REASON_NOSUCHNODE]);
+		*result = LINK_MARGIN_INVALID;
+	}
+
+	qdrv_hostlink_free_coherent(NULL, sizeof(struct qtn_link_margin_info), lm_info, lm_dma);
+	return 0;
+}
+
+int qdrv_wlan_get_shared_vap_stats(struct ieee80211vap *vap)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	uint8_t vapid = QDRV_WLANID_FROM_DEVID(qv->devid);
+	qtn_shared_vap_stats_t* shared_stats = qdrv_auc_get_vap_stats(vapid);
+
+	if (!shared_stats)
+		return -EINVAL;
+
+	vap->iv_devstats.rx_packets = shared_stats->qtn_rx_pkts;
+	vap->iv_devstats.rx_bytes = shared_stats->qtn_rx_bytes;
+	vap->iv_devstats.rx_unicast_packets = shared_stats->qtn_rx_ucast;
+	vap->iv_devstats.rx_broadcast_packets = shared_stats->qtn_rx_bcast;
+	vap->iv_devstats.multicast = shared_stats->qtn_rx_mcast;
+	vap->iv_devstats.rx_dropped = shared_stats->qtn_rx_dropped;
+
+	vap->iv_devstats.tx_packets = shared_stats->qtn_tx_pkts;
+	vap->iv_devstats.tx_bytes = shared_stats->qtn_tx_bytes;
+	vap->iv_devstats.tx_multicast_packets += shared_stats->qtn_tx_mcast +
+			shared_stats->qtn_muc_tx_mcast;
+	vap->iv_devstats.tx_unicast_packets = shared_stats->qtn_tx_pkts -
+			vap->iv_devstats.tx_multicast_packets -
+			vap->iv_devstats.tx_broadcast_packets;
+
+	vap->iv_devstats.tx_dropped = shared_stats->qtn_tx_dropped;
+
+	return 0;
+}
+
+int qdrv_wlan_reset_shared_vap_stats(struct ieee80211vap *vap)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	uint8_t vapid = QDRV_WLANID_FROM_DEVID(qv->devid);
+	qtn_shared_vap_stats_t* shared_stats = qdrv_auc_get_vap_stats(vapid);
+
+	if (!shared_stats)
+		return -EINVAL;
+
+	memset(shared_stats, 0, sizeof(*shared_stats));
+
+	return 0;
+}
+
+int qdrv_wlan_get_shared_node_stats(struct ieee80211_node *ni)
+{
+	uint8_t node_idx = IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx);
+	qtn_shared_node_stats_t* shared_stats = qdrv_auc_get_node_stats(node_idx);
+	uint8_t i;
+
+	if (!shared_stats)
+		return -EINVAL;
+
+	ni->ni_stats.ns_rx_data = shared_stats->qtn_rx_pkts;
+	ni->ni_stats.ns_rx_bytes = shared_stats->qtn_rx_bytes;
+	ni->ni_stats.ns_rx_ucast = shared_stats->qtn_rx_ucast;
+	ni->ni_stats.ns_rx_mcast = shared_stats->qtn_rx_mcast;
+	ni->ni_stats.ns_rx_bcast = shared_stats->qtn_rx_bcast;
+	ni->ni_stats.ns_rx_vlan_pkts = shared_stats->qtn_rx_vlan_pkts;
+
+	ni->ni_stats.ns_tx_data = shared_stats->qtn_tx_pkts;
+	ni->ni_stats.ns_tx_bytes = shared_stats->qtn_tx_bytes;
+	ni->ni_stats.ns_tx_mcast = shared_stats->qtn_tx_mcast +
+			shared_stats->qtn_muc_tx_mcast;
+	ni->ni_stats.ns_tx_ucast = shared_stats->qtn_tx_pkts -
+			ni->ni_stats.ns_tx_mcast -
+			ni->ni_stats.ns_tx_bcast;
+	for (i = 0; i < WMM_AC_NUM; i++)
+		ni->ni_stats.ns_tx_wifi_drop[i] = shared_stats->qtn_tx_drop_data_msdu[i];
+
+	return 0;
+}
+
+int qdrv_wlan_reset_shared_node_stats(struct ieee80211_node *ni)
+{
+	uint8_t node_idx = IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx);
+	qtn_shared_node_stats_t* shared_stats = qdrv_auc_get_node_stats(node_idx);
+
+	if (!shared_stats)
+		return -EINVAL;
+
+	memset(shared_stats, 0, sizeof(*shared_stats));
+
+	return 0;
+}
+
+int qdrv_rxgain_params(struct ieee80211com *ic, int index, struct qtn_rf_rxgain_params *params)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	qdrv_hostlink_rxgain_params(qw, index, params);
+
+	return 0;
+}
+
+void
+qdrv_wlan_vlan_enable(struct ieee80211com *ic, int enable)
+{
+	struct qdrv_wlan *qw = container_of(ic, struct qdrv_wlan, ic);
+
+	qdrv_hostlink_vlan_enable(qw, enable);
+}
+
+int qdrv_wlan_80211_set_bcn_scheme(struct ieee80211vap *vap, int param, int value)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	struct ieee80211com *ic = vap->iv_ic;
+	int ret = 0;
+
+	ret = qdrv_hostlink_change_bcn_scheme(qv, param, value);
+	if ((ret < 0) || (ret & (QTN_HLINK_RC_ERR)))
+		return -1;
+
+	ic->ic_beaconing_scheme = value;
+	return 0;
+}
diff --git a/drivers/qtn/qdrv/qdrv_wlan.h b/drivers/qtn/qdrv/qdrv_wlan.h
new file mode 100644
index 0000000..53924ab
--- /dev/null
+++ b/drivers/qtn/qdrv/qdrv_wlan.h
@@ -0,0 +1,1895 @@
+/**
+  Copyright (c) 2008 - 2013 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ **/
+
+#ifndef _QDRV_WLAN_H
+#define _QDRV_WLAN_H
+
+#include <linux/version.h>
+#include <linux/interrupt.h>
+#include <linux/net/bridge/br_public.h>
+
+/* Include the WLAN 802.11 layer here */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/i2c.h>
+#include <linux/workqueue.h>
+#include <net/ip.h>
+#include <net80211/if_media.h>
+#include <net80211/ieee80211_var.h>
+#include "qdrv_comm.h"
+#include "qdrv_debug.h"
+#include "qtn/qtn_pcap.h"
+#include "qdrv/qdrv_bridge.h"
+#include "qtn/muc_txrx_stats.h"
+#include "qtn/muc_phy_stats.h"
+#include "qtn/dsp_stats.h"
+#include "qtn/auc_debug_stats.h"
+#include "qtn/skb_recycle.h"
+#include "qtn/qdrv_sch_data.h"
+#include "qtn/iputil.h"
+#ifdef CONFIG_QVSP
+#include "qtn/qvsp.h"
+#endif
+#include "qtn/topaz_congest_queue.h"
+#include <qtn/txbf_common.h>
+#include <qtn/lhost_muc_comm.h>
+
+#define QNET_TXLIST_ENTRIES_DEFAULT 512
+#define QNET_TXLIST_ENTRIES_MAX 2048
+#define QNET_TXLIST_ENTRIES_MIN 1
+#define QNET_HLRING_ENTRIES 32
+#define QDRV_NUM_RF_STREAMS IEEE80211_QTN_NUM_RF_STREAMS
+
+#define RSSI_40M_TO_20M_DBM(x)		(x - 3)
+#define RSSI_20M_TO_40M_DBM(x)		(x + 3)
+#define RSSI_TOTAL_TO_PERCHAIN_DBM(x)	(x - 6)
+#define RSSI_PERCHAIN_TO_TOTAL_DBM(x)	(x + 6)
+#define LINK_MARGIN_INVALID		(-127)
+
+enum qdrv_bld_type {
+	QDRV_BLD_TYPE_ENG = 1,
+	QDRV_BLD_TYPE_BENCH,
+	QDRV_BLD_TYPE_BUILDBOT,
+	QDRV_BLD_TYPE_REL,
+	QDRV_BLD_TYPE_SDK,
+	QDRV_BLD_TYPE_GPL
+};
+
+struct qdrv_vap;
+
+/* driver-specific node state */
+struct qdrv_node
+{
+	struct ieee80211_node qn_node;  /* Must be first for the 802.11 layer */
+	struct kobject kobj;
+	TAILQ_ENTRY(qdrv_node) qn_next;
+	uint16_t qn_node_idx;		/* a copy of ni_node_idx */
+};
+
+#define MAX_MEMDEBUG_WATCHPTS	64
+struct qdrv_memdebug_watchpt {
+	uint32_t	addr;
+	void	*remap_addr;
+	size_t	size;
+};
+
+#include "qdrv_slab_def.h"
+
+struct qdrv_meminfo {
+	struct kmem_cache *caches[QDRV_SLAB_IDX_MAX];
+};
+
+struct qtn_auc_stat_field {
+	uintptr_t addr;
+	const char *name;
+};
+
+struct qdrv_auc_intr_stats {
+	uint32_t sleep;
+	uint32_t jiffies;
+	uint32_t aucirq[AUC_TID_NUM];
+};
+
+struct qdrv_pktlogger_stats {
+	uint32_t pkt_queued;
+	uint32_t pkt_dropped;
+	uint32_t pkt_failed;
+	uint32_t pkt_requeued;
+	uint32_t queue_send;
+};
+
+struct qdrv_pktlogger {
+	struct qdrv_wlan *qw;
+	struct net_device *dev;
+	__be32 dst_ip;
+	__be32 src_ip;
+	__be16 dst_port;
+	__be16 src_port;
+	uint8_t dst_addr[IEEE80211_ADDR_LEN];
+	uint8_t src_addr[IEEE80211_ADDR_LEN];
+	uint8_t recv_addr[IEEE80211_ADDR_LEN];
+	uint32_t maxfraglen;
+	uint32_t flag;
+	uint16_t ip_id;
+
+	spinlock_t sendq_lock;
+	STAILQ_HEAD(,qdrv_pktlogger_data) sendq_head;
+	struct work_struct sendq_work;
+	int sendq_scheduled;
+
+	struct sock *netlink_socket;
+	int netlink_ref;
+
+	struct timer_list stats_timer;
+	struct timer_list mem_timer;
+	int mem_wp_index;
+	struct qdrv_memdebug_watchpt mem_wps[MAX_MEMDEBUG_WATCHPTS];
+
+	struct timer_list rate_timer;
+	struct timer_list sysmsg_timer;
+	struct timer_list flush_data;
+
+	uint32_t *stats_uc_rx_ptr;
+	uint32_t *stats_uc_rx_rate_ptr;
+	uint32_t *stats_uc_rx_bf_ptr;
+	uint32_t *stats_uc_tx_ptr;
+	uint32_t *stats_uc_tx_rate_ptr;
+	uint32_t *stats_uc_su_rates_read_ptr;
+	uint32_t *stats_uc_mu_rates_read_ptr;
+	uint32_t *stats_uc_scs_cnt;
+	struct netdev_queue *netdev_q_ptr_w;
+	struct netdev_queue *netdev_q_ptr_e;
+	struct qdrv_meminfo qmeminfo;
+	uint32_t queue_len;
+	struct qdrv_pktlogger_stats stats;
+
+	struct net_device *dev_emac0;
+	struct net_device *dev_emac1;
+
+	uint32_t *stats_auc_sleep_p;
+	uint32_t *stats_auc_jiffies_p;
+	uint32_t *stats_auc_intr_p;
+	struct auc_dbg_counters *stats_auc_dbg_p;
+
+	struct muc_rx_rates rx_rates[2];
+	struct muc_rx_rates *rx_rate_pre;
+	struct muc_rx_rates *rx_rate_cur;
+
+	struct muc_rx_rates rx_ratelog[2];
+	struct muc_rx_rates *rx_ratelog_pre;
+	struct muc_rx_rates *rx_ratelog_cur;
+	struct timer_list phy_stats_timer;
+	struct timer_list dsp_stats_timer;
+};
+
+/**********/
+/** SCAN **/
+/**********/
+
+struct host_scanif
+{
+	struct workqueue_struct *workqueue;
+	u32 scan_sem_bit;
+	u32 tx_sem_bit;
+	volatile u32 *sc_res_mbox;
+	volatile u32 *sc_req_mbox;
+};
+
+/**********/
+/** RX   **/
+/**********/
+
+struct host_fifo_if
+{
+	struct host_descfifo *fifo;
+	dma_addr_t fifo_dma;
+	struct host_rxdesc *pending;
+	struct host_rxdesc **descp;
+	int ring_size;
+	struct dma_pool *df_rxdesc_cache;
+};
+
+#define QNET_RXRING_ENTRIES		64
+#define QNET_MGMTRING_ENTRIES	4
+#define QNET_CTRLRING_ENTRIES	4
+#define QNET_ERRRING_ENTRIES	4
+
+struct host_rxif
+{
+	struct host_fifo_if rx;
+	u32 rx_sem_bit;
+};
+
+#define QNET_RXRING_SIZE (QNET_RXRING_ENTRIES*sizeof(struct host_rxdesc))
+#define QNET_RXBUF_SIZE (QNET_RXRING_ENTRIES*sizeof(struct host_buf))
+
+/**********/
+/** TX   **/
+/**********/
+
+#define QDRV_TXDESC_DATA		0
+#define QDRV_TXDESC_MGMT		1
+#define QDRV_TXDESC_QUEUE_MAX		2
+
+/* each node is always allowed this many descriptors */
+#define QDRV_TXDESC_THRESH_MAX_MIN	384
+/* re-enable a node queue when muc_queued is this much less than max per node */
+#define QDRV_TXDESC_THRESH_MIN_DIFF	32
+
+struct host_txif
+{
+	uint16_t txdesc_cnt[QDRV_TXDESC_QUEUE_MAX];
+	uint16_t list_max_size;
+	uint16_t muc_thresh_high;
+	uint16_t muc_thresh_low;
+	struct tasklet_struct txdone_tasklet;
+
+	struct host_ioctl *hl_ring;
+	struct host_ioctl *hl_first;
+	struct host_ioctl *hl_last;
+	dma_addr_t hl_ring_dma;
+	int hl_read;
+	int hl_write;
+	int hl_tosend;
+	u32 hl_count;
+	spinlock_t hl_flowlock;
+
+	struct dma_pool *df_txdesc_cache;
+
+	struct lhost_txdesc *df_txdesc_list_head;
+	struct lhost_txdesc *df_txdesc_list_tail;
+
+	volatile u32 *tx_mbox;
+};
+
+#define QNET_HLRING_SIZE (QNET_HLRING_ENTRIES*sizeof(struct host_ioctl))
+
+/**********/
+/** WLAN **/
+/**********/
+
+#define HOST_TXD_VERSION	0x01
+#define CCA_TOKEN_INIT_VAL	0x50
+#define QTN_RATE_11N		0x80    /* Same bit setting as in MLME */
+
+#define QTN_RATE_PHY_OFDM	0
+#define QTN_RATE_PHY_CCK	1
+#define QTN_RATE_PHY_HT		2
+
+struct qtn_rateentry
+{
+	u_int8_t re_ieeerate;    /* IEEE rate:2*phyrate (for legacy MLME)
+							* or MCS index for 11n */
+	u_int16_t   re_rate;    /* Rate in 100Kbps */
+	u_int8_t    re_ctrlrate;    /* index in rate table of control
+                       rate to use with this rate */
+	u_int8_t    re_shortpre:1;  /* Must use short preamble */
+	u_int8_t    re_basicrate:1; /* Is this rate a basic rate */
+	u_int8_t    re_phytype:2;   /* Phy type */
+} __packed;
+
+struct qtn_ratetable
+{
+	u_int8_t        rt_num;     /* Number of entries (legacy + 11n) in the rate table */
+	u_int8_t        rt_legacy_num;     /* Number of legacy entries in the rate table */
+	struct qtn_rateentry    *rt_entries;    /* Array of entries */
+} __packed;
+
+struct qtn_channel
+{
+	u_int16_t channel_number;	/* IEEE channel number */
+	u_int16_t channel_freq;		/* Channel frequency */
+	u_int32_t channel_flags;	/* Channel flags */
+	u_int16_t center_freq_40M;	/* Channel Center Frequency for 40MHz */
+	u_int16_t center_freq_80M;	/* Channel Center Frequency for 80MHz */
+	u_int16_t center_freq_160M;	/* Channel Center Frequency for 160MHz */
+	u_int32_t channel_ext_flags;	/* Extra channel flags for 80MHZ mode */
+} __packed;
+
+#define QDRV_STAT(_qw, _is_tx, _member)	do	\
+{						\
+	if (_is_tx) {				\
+		_qw->tx_stats._member++;	\
+	} else {				\
+		_qw->rx_stats._member++;	\
+	}					\
+} while (0)
+
+#define TXSTAT(qw, member) \
+	(qw)->tx_stats.member += 1
+
+#define TXSTAT_SET(qw, member, val) \
+	(qw)->tx_stats.member = (val)
+
+#define RXSTAT(qw, member) \
+	(qw)->rx_stats.member += 1
+
+#define RXSTAT_SET(qw, member, val) \
+	(qw)->rx_stats.member = (val)
+
+#define SMSTAT(qw, member) \
+	(qw)->sm_stats.member += 1
+
+/* #define QDRV_TX_DEBUG 1 */
+#ifdef QDRV_TX_DEBUG
+
+extern uint32_t qdrv_tx_ctr[];
+extern uint32_t qdrv_dbg_ctr[];
+#define QDRV_TX_CTR_INC(_x)	qdrv_tx_ctr[_x]++;
+
+#define QDRV_TX_DBG(_i, _ni, _fmt, ...) do			\
+{								\
+	struct ieee80211_node *__ni = _ni;			\
+	if (_i >= 0) {						\
+		if (qdrv_dbg_ctr[_i] == 0) {			\
+			break;					\
+		}						\
+		qdrv_dbg_ctr[_i]--;				\
+	}							\
+	if (__ni) {						\
+		printk("[%s]", ether_sprintf(__ni->ni_macaddr));\
+	}							\
+	printk("%s " _fmt,					\
+		__func__, ##__VA_ARGS__);			\
+} while (0)
+
+#else
+
+#define QDRV_TX_CTR_INC(_x)
+#define QDRV_TX_DBG(_i, _ni, _fmt, ...) if (_ni) {}
+
+#endif /* QDRV_TX_DEBUG */
+
+/**
+ * \defgroup LHOST_STATS Linux host generated stats
+ */
+/* @{ */
+
+/**
+ * \brief 802.11 state machine statistics.
+ *
+ * These statistics are updated as 802.11 management packets are sent and received
+ * by both the AP and STA.
+ */
+struct qdrv_wlan_sm_stats {
+	/**
+	 * The number of times that the state machine went from trying to authenticate
+	 * directly back to scanning - i.e. the AUTH request timed out or was rejected.
+	 */
+	unsigned int sm_scan_auth_fail_scan_pend;
+
+	/**
+	 * The number of times that the state machine went from trying to associate
+	 * directly back to scanning - i.e. the ASSOC request timed out or was rejected.
+	 */
+	unsigned int sm_scan_assoc_fail_scan_pend;
+
+	/**
+	 * The number of times that a scan has been triggered (excluding failure to connect).
+	 */
+	unsigned int sm_scan_pend;
+
+	/**
+	 * The number of times that an authentication request is sent, waiting on response.
+	 */
+	unsigned int sm_auth_pend;
+
+	/**
+	 * The number of times that a deauth sequence is sent (i.e. going from authenticated
+	 * to disconnected state).
+	 */
+	unsigned int sm_run_deauth_auth_pend;
+
+	/**
+	 * The number of times that an association request is sent, waiting on response.
+	 */
+	unsigned int sm_assoc_pend;
+
+	/**
+	 * The number of times that a disassociate sequence is sent (i.e. going from
+	 * associated to authenticated state).
+	 */
+	unsigned int sm_run_disassoc_assoc_pend;
+
+	/**
+	 * The number of times a node is authenticated - i.e. becomes ready to send data packets.
+	 */
+	unsigned int sm_nd_auth;
+
+	/**
+	 * The number of times a node is unauthenticated.
+	 */
+	unsigned int sm_nd_unauth;
+
+	/**
+	 * The total number of nodes that are currently authenticated.
+	 */
+	unsigned int sm_nd_auth_tot;
+
+	/**
+	 * The number of times a station goes into connected state - i.e. ready to send data
+	 * packets.
+	 */
+	unsigned int sm_sta_associated;
+
+	/**
+	 * The state of the device - composite of flags indicating current operating mode and
+	 * radar flags.
+	 */
+	unsigned int sm_state;
+#define QDRV_WLAN_SM_STATE_AP          0x00000001
+#define QDRV_WLAN_SM_STATE_STA         0x00000002
+#define QDRV_WLAN_SM_STATE_RADAR_EN    0x00000004
+#define QDRV_WLAN_SM_STATE_RADAR_ACT   0x00000008
+#define QDRV_WLAN_SM_STATE_CAC_ACTIVE  0x00000010
+#define QDRV_SET_SM_FLAG(_stats, _flag) (_stats).sm_state |= (_flag)
+#define QDRV_CLEAR_SM_FLAG(_stats, _flag) (_stats).sm_state &= ~(_flag)
+};
+
+/**
+ * \brief WLAN transmit statistics.
+ *
+ * These statistics are gathered within the WLAN driver on the LHost.
+ */
+struct qdrv_wlan_tx_stats {
+	/**
+	 * The total number of management frames enqueued for transmission.
+	 */
+	unsigned int tx_enqueue_mgmt;
+
+	/**
+	 * The total number of driver-generated data frames enqueued for transmission.
+	 */
+	unsigned int tx_enqueue_80211_data;
+
+	/**
+	 * The total number of data packets enqueued for transmission.
+	 */
+	unsigned int tx_enqueue_data;
+
+	/**
+	 * The total number of packets enqueued to the MuC via the mailbox.
+	 */
+	unsigned int tx_muc_enqueue;
+
+	/**
+	 * The number of data packets enqueued to the MuC via the host mailbox (when no current
+	 * packets are in the mailbox).
+	 */
+	unsigned int tx_muc_enqueue_mbox;
+
+	/**
+	 * The total number of keep-alive (NULL data) packets transmitted to clients
+	 * associated to the AP. These packets are used to check the client is still
+	 * connected and able to ACK the AP.
+	 */
+	unsigned int tx_null_data;
+
+	/**
+	 * The number of TX done interrupts received indicating the MuC is not ready.
+	 * This figure should always read as zero.
+	 */
+	unsigned int tx_done_muc_ready_err;
+
+	/**
+	 * The number of packets successfully sent (all data, mgmt for all TIDs).
+	 */
+	unsigned int tx_done_success;
+
+	/**
+	 * The number of txdone interrupts received at the LHost.
+	 */
+	unsigned int tx_done_enable_queues;
+
+	/**
+	 * The number of times the transmit queue has stopped.
+	 * Generally this is because the MuC backs up and causes backpressure to the
+	 * LHost.
+	 */
+	unsigned int tx_queue_stop;
+
+	/**
+	 * The number of times a packet to the MuC was requeued.
+	 */
+	unsigned int tx_requeue;
+
+	/**
+	 * The number of times packet requeuing failed.
+	 */
+	unsigned int tx_requeue_err;
+
+	/**
+	 * The number of times the hardstart function is called.
+	 */
+	unsigned int tx_hardstart;
+
+	/**
+	 * The number of packets completed - marked as done by the MuC.
+	 */
+	unsigned int tx_complete;
+
+	/**
+	 * The size of the skb recycle list shared between the Ethernet and wireless drivers.
+	 *
+	 * This number will vary as traffic goes through the system.
+	 */
+	unsigned int tx_min_cl_cnt;
+
+	/**
+	 * The number of packets dropped in the driver during a configuration change.
+	 */
+	unsigned int tx_dropped_config;
+
+	/**
+	 * The number of packets dropped in the driver due to the MAC not being enabled.
+	 */
+	unsigned int tx_dropped_mac_dead;
+
+	/**
+	 * The current transmit channel.
+	 */
+	unsigned int tx_channel;
+
+	/**
+	 * The total number of IGMP packets for transmission.
+	 */
+	unsigned int tx_igmp;
+
+	/**
+	 * The number of packets for transmission to unknown destination MAC addresses.
+	 */
+	unsigned int tx_unknown;
+
+	/**
+	 * The number of ARP request packets sent in attempts to discover the location of
+	 * unknown destinations.
+	 */
+	unsigned int tx_arp_req;
+
+	/**
+	 * The number of packets transmitted that lie inside the Local Network Control Block
+	 * (LNCB), the range 224.0.0.0/24, sent as four address (reliable) multicast
+	 * packets to Quantenna bridge stations.
+	 */
+	unsigned int tx_copy4_mc;
+
+	/**
+	 * The number of IGMP packets transmitted as 4 address reliable multicast packets
+	 * to Quantenna bridge stations.
+	 */
+	unsigned int tx_copy4_igmp;
+
+	/**
+	 * The number of packets for unknown destination MAC addresses sent as 4 address
+	 * reliable multicast packets to bridge stations.
+	 */
+	unsigned int tx_copy4_unknown;
+
+	/**
+	 * The total count of packet retransmissions as reliable, 4 address multicast frames.
+	 */
+	unsigned int tx_copy4;
+
+	/**
+	 * The number of times transmission of a copied packet failed due to lack of
+	 * resources.
+	 */
+	unsigned int tx_copy_fail;
+
+	/**
+	 * The number of times transmission of a 4 address packet failed due to the tx
+	 * queue being full.
+	 */
+	unsigned int tx_copy4_busy;
+
+	/**
+	 * The number of packets transmitted that lie inside the Local Network Control Block
+	 * (LNCB), the range 224.0.0.0/24, sent as three address (unreliable) multicast
+	 * packets (to third party clients).
+	 */
+	unsigned int tx_copy3_mc;
+
+	/**
+	 * The number of IGMP packets transmitted as 3 address unreliable multicast
+	 * packets (to third party clients).
+	 */
+	unsigned int tx_copy3_igmp;
+
+	/**
+	 * The number of broadcast or multicast packets transmitted as unicast frames.
+	 */
+	unsigned int tx_copy_uc;
+
+	/**
+	 * The number of 3 address broadcast/multicast packets sent to third party STAs.
+	 */
+	unsigned int tx_copy3;
+
+	/**
+	 * The number of broadcast/multicast packets transmitted as group-addressed frames.
+	 */
+	unsigned int tx_copy_mc;
+
+	/**
+	 * The number of broadcast/multicast packets transmitted as group-addressed frames.
+	 */
+	unsigned int tx_copy_mc_enc;
+
+	/**
+	 * The number of broadcast/multicast packets transmitted as directed frames
+	 */
+	unsigned int tx_copy_mc_to_uc;
+
+	/**
+	 * The number of SSDP packets transmitted as directed frames
+	 */
+	unsigned int tx_copy_ssdp;
+
+	/**
+	 * The number of packets that were dropped because the destination station was not
+	 * authorised.
+	 */
+	unsigned int tx_drop_auth;
+
+	/**
+	 * The number of packets that were dropped because the destination station had
+	 * disassociated.
+	 */
+	unsigned int tx_drop_aid;
+
+	/**
+	 * The number of packets that were dropped because of buffer exhaustion.
+	 */
+	unsigned int tx_drop_nodesc;
+
+	/**
+	 * The number of packets that were dropped because the WDS peer was not
+	 * associated.
+	 */
+	unsigned int tx_drop_wds;
+
+	/**
+	 * The number of packets that were dropped because of 3 address mode bridging
+	 * rules.
+	 */
+	unsigned int tx_drop_3addr;
+
+	/**
+	 * The number of packets that were dropped because of Video Stream Protection.
+	 */
+	unsigned int tx_drop_vsp;
+
+	/**
+	 * The total count of packets dropped at the wireless interface.
+	 */
+	unsigned int tx_drop_total;
+
+	/**
+	 * The number of data frames forworded to L2 external filter
+	 */
+	unsigned int tx_l2_ext_filter;
+
+	/**
+	 * The number of data frames droped without forwording to L2 external filter
+	 */
+	unsigned int tx_drop_l2_ext_filter;
+
+	/**
+	 * Field for QCAT.
+	 */
+	unsigned int qcat_state;
+
+	/**
+	 * Ticks that DSP waits until wmac is ready before installing the qmatrix.
+	 */
+	unsigned int txbf_qmat_wait;
+
+	/**
+	 * Protocol counts
+	 */
+	unsigned int prot_ip_udp;
+	unsigned int prot_ip_tcp;
+	unsigned int prot_ip_icmp;
+	unsigned int prot_ip_igmp;
+	unsigned int prot_ip_other;
+	unsigned int prot_ipv6;
+	unsigned int prot_arp;
+	unsigned int prot_pae;
+	unsigned int prot_other;
+};
+
+/**
+ * \brief WLAN receive statistics.
+ *
+ * These statistics are gathered within the WLAN driver on the LHost.
+ */
+struct qdrv_wlan_rx_stats {
+
+	/**
+	 * The number of receive IRQs
+	 */
+	unsigned int rx_irq;
+
+	/**
+	 * The number of times the receive tasklet is scheduled based on the IRQ.
+	 */
+	unsigned int rx_irq_schedule;
+
+	/**
+	 * The number of beacons received.
+	 */
+	unsigned int rx_beacon;
+
+	/**
+	 * The number of non-beacon packets received (eg, other management, control
+	 * and data packets combined).
+	 */
+	unsigned int rx_non_beacon;
+
+	/**
+	 * The number of packets received that were sent via the slow WLAN driver path,
+	 * which have no node structure associated with them.
+	 */
+	unsigned int rx_input_all;
+
+	/**
+	 * The number of packets received for a specific node (slow WLAN driver path).
+	 * The slow path is for management, control or fragmented data packets.
+	 */
+	unsigned int rx_input_node;
+
+	/**
+	 * The number of data packets received which are SNAP encapsulated.
+	 */
+	unsigned int rx_data_snap;
+
+	/**
+	 * The number of packets received with only the to DS bit set.
+	 */
+	unsigned int rx_data_tods;
+
+	/**
+	 * The number of packets received with none of the to/from DS bits set.
+	 */
+	unsigned int rx_data_nods;
+
+	/**
+	 * The number of packets received with only the from DS bit set.
+	 */
+	unsigned int rx_data_fromds;
+
+	/**
+	 * The number of packets received with both the to and from DS bits set.
+	 * These are 4 address (bridged) packets.
+	 */
+	unsigned int rx_data_dstods;
+
+	/**
+	 * The number of packets received from unknown STAs - that is, the AP doesn't
+	 * have an association with the STA.
+	 */
+	unsigned int rx_data_no_node;
+
+	/**
+	 * The number of packets received which have too short a length. These packets
+	 * are dropped.
+	 */
+	unsigned int rx_data_too_short;
+
+	/**
+	 * The number of times the rx poll function is called.
+	 */
+	unsigned int rx_poll;
+
+	/**
+	 * The number of times that a poll is carried on from a previous poll - that is,
+	 * the previous poll terminated early due to heavy RX load.
+	 */
+	unsigned int rx_poll_pending;
+
+	/**
+	 * The number of times rx poll terminated due to reaching the end of the received
+	 * data chain.
+	 */
+	unsigned int rx_poll_empty;
+
+	/**
+	 * The number of times a poll to the receive mailbox has data available.
+	 */
+	unsigned int rx_poll_retrieving;
+
+	/**
+	 * The number of times that an AMSDU being decapsulated fails due to not having enough headroom
+	 * in the packet. Eg, badly formatted AMSDU.
+	 */
+	unsigned int rx_poll_buffer_err;
+
+	/**
+	 * The number of times a receive descriptor allocate for an skb (when used for requeueing the RX descriptor
+	 * for the next packet) fails.
+	 */
+	unsigned int rx_poll_skballoc_err;
+
+	/**
+	 * Whether the poll function for receive is currently running (1) or not (0).
+	 */
+	unsigned int rx_poll_stopped;
+
+	/**
+	 * The number of elements on the receive FIFO between the MuC and LHost.
+	 */
+	unsigned int rx_df_numelems;
+
+	/**
+	 * The number of Aggregate MSDUs received.
+	 *
+	 * This counter is incremented once per AMSDU, NOT once per subframe within
+	 * the AMSDU.
+	 */
+	unsigned int rx_amsdu;
+
+	/**
+	 * The number of received packets (singletons, MPDUs or AMSDUs) in the LHost driver.
+	 */
+	unsigned int rx_packets;
+
+	/**
+	 * The number of received bytes (based on received packets counter above), including 802.2, 802.11 headers.
+	 */
+	unsigned int rx_bytes;
+
+	/**
+	 * The number of times chained receive descriptors are read in.
+	 */
+	unsigned int rx_poll_next;
+
+	/**
+	 * The number of times that the poll function completed processing all received packets before using
+	 * it's entire budget.
+	 */
+	unsigned int rx_poll_complete;
+
+	/**
+	 * The number of times the receive poll function completes.
+	 */
+	unsigned int rx_poll_continue;
+
+	/**
+	 * The number of times packets received are from unauthenticated STAs.
+	 */
+	unsigned int rx_poll_vap_err;
+
+	/**
+	 * The number of received 802.11 fragmented packets.
+	 * Fragmented packets are processed via the slow data path.
+	 */
+	unsigned int rx_frag;
+
+	/**
+	 * The number of packets received for STAs that are currently blacklisted (due to MAC address filtering).
+	 */
+	unsigned int rx_blacklist;
+
+	/**
+	 * The number of received LNCB packets in 4 address mode.
+	 */
+	unsigned int rx_lncb_4;
+
+	/**
+	 * The number of received IGMP packets.
+	 */
+	unsigned int rx_igmp;
+
+	/**
+	 * The number of received IGMP packets in 4 address mode.
+	 */
+	unsigned int rx_igmp_4;
+
+	/**
+	 * The number of IGMP packets dropped due to already receiving the IGMP packet
+	 * as a reliable 4 address packet.
+	 */
+	unsigned int rx_igmp_3_drop;
+
+	/**
+	 * The number of received 3 address multicast packets dropped due to already
+	 * receiving the same packet as a reliable 4 address packet.
+	 */
+	unsigned int rx_mc_3_drop;
+
+	/**
+	 * Protocol counts
+	 */
+	unsigned int prot_ip_udp;
+	unsigned int prot_ip_tcp;
+	unsigned int prot_ip_icmp;
+	unsigned int prot_ip_igmp;
+	unsigned int prot_ip_other;
+	unsigned int prot_ipv6;
+	unsigned int prot_arp;
+	unsigned int prot_pae;
+	unsigned int prot_other;
+
+	/**
+	 * Beamforming Statistics
+	 */
+	unsigned int rx_bf_success[QTN_STATS_NUM_BF_SLOTS];
+	unsigned int rx_bf_rejected[QTN_STATS_NUM_BF_SLOTS];
+
+	unsigned int rx_rate_train_invalid;
+	unsigned int rx_mac_reserved;
+	unsigned int rx_coex_bw_action;
+	unsigned int rx_coex_bw_assoc;
+	unsigned int rx_coex_bw_scan;
+};
+
+struct qdrv_tqe_cgq_stats {
+	uint32_t	congest_qlen[TOPAZ_CONGEST_QUEUE_NUM];
+	uint32_t	congest_enq_fail[TOPAZ_CONGEST_QUEUE_NUM];
+};
+/* @} */
+
+/*
+ * This can be changed to an array if the stat_parser is enhanced to parse array syntax.
+ */
+struct qdrv_rx_evm_array {
+	unsigned int rx_evm_val[NUM_ANT];
+};
+
+/**
+ * \brief Transmit power
+ *
+ * Each member of the array records transmit power of one Tx chain.
+ */
+struct qdrv_tx_pd_array {
+	/**
+	 * Transmit power of chain 0-3.
+	 */
+	uint16_t tx_pd_vol[NUM_ANT];
+};
+
+/**
+ * \brief Qdisc stats
+ *
+ * Queue statistics per-node
+ */
+struct qdrv_netdebug_nd_stats {
+	uint32_t	sch_aid;
+	uint32_t	sch_mac1;
+	uint32_t	sch_mac2;
+	uint32_t	sch_ref;
+	uint32_t	sch_muc_queued;
+	uint32_t	sch_tokens;
+	uint32_t	sch_qlen;
+	uint32_t	sch_low_rate;
+	uint32_t	sch_depth[QDRV_SCH_BANDS];
+	uint32_t	sch_sent[QDRV_SCH_BANDS];
+	uint32_t	sch_dropped[QDRV_SCH_BANDS];
+	uint32_t	sch_victim[QDRV_SCH_BANDS];
+};
+
+struct qdrv_sch_stats {
+	uint32_t	sch_users;
+	uint32_t	sch_tokens;
+	uint32_t	sch_cnt;
+};
+
+/**
+ * \brief Linux memory statistics.
+ *
+ * This structure contains a sample of different statistics related to the Linux memory
+ * management subsystem.
+ */
+struct qdrv_mem_stats {
+	/**
+	 * The number of free pages in the system.
+	 */
+	unsigned long mem_free;
+	/**
+	 * The number of SLAB pages that can be freed up.
+	 */
+	unsigned long mem_slab_reclaimable;
+	/**
+	 * The number of SLAB pages that can't be freed up.
+	 */
+	unsigned long mem_slab_unreclaimable;
+	/**
+	 *
+	 */
+	unsigned long mem_anon;
+	unsigned long mem_mapped;
+	unsigned long mem_cached;
+};
+
+/**
+ * \brief Linux misc statistics
+ */
+struct qdrv_misc_stats {
+	/**
+	 * CPU awake cycles. When CPU is at full load, this will be at
+	 * CPU clock speed (Hz) / stats interval (s)
+	 */
+	unsigned long cpuawake;
+};
+
+/**
+ * \brief Statistics indicates the reason for a channel change
+ *
+ * Each member of this structure records times channel change caused by different reason.
+ */
+struct qdrv_csw_count_stats {
+	/**
+	 * Channel change caused by SCS.
+	 */
+	uint16_t csw_by_scs;
+	/**
+	 * Channel change caused by DFS.
+	 */
+	uint16_t csw_by_dfs;
+	/**
+	 * Channel change caused by User configuration
+	 */
+	uint16_t csw_by_user;
+	/**
+	 * Channel change when device does off-channel sampling.
+	 */
+	uint16_t csw_by_sampling;
+	/**
+	 * Channel change when device does scan and sample.
+	 */
+	uint16_t csw_by_tdls;
+	/**
+	 * Channel change when device does background scanning.
+	 */
+	uint16_t csw_by_bgscan;
+	/**
+	 * Channel change after off-channel CAC is completed.
+	 */
+	uint16_t csw_by_ocac;
+	/**
+	 * Channel change when off-channel CAC is running.
+	 */
+	uint16_t csw_by_ocac_run;
+	/**
+	 * Channel change when received CSAIE from action frame or beacon
+	 */
+	uint16_t csw_by_csa;
+	/**
+	 * Channel change when device does regular scanning.
+	 */
+	uint16_t csw_by_scan;
+	/**
+	 * Channel change triggered due to pm level changes
+	 */
+	uint16_t csw_by_coc;
+};
+
+struct tx_power_cal
+{
+	struct _temp_info {
+		int temp_index;
+		int real_temp;
+		u_int8_t num_zone;
+	} temp_info;
+	struct delayed_work bbrf_cal_work;
+};
+
+#define MAX_UNKNOWN_DP_PER_SECOND 5   /* Rate limit per sec for unknown data pkts */
+
+/* Ext Flags in qdrv_wlan */
+#define QDRV_WLAN_MUC_KILLED		0x00000001
+#define QDRV_FLAG_3ADDR_BRIDGE_DISABLE	0x00000002
+#define QDRV_WLAN_DEBUG_TEST_LNCB	0x00000004
+#define QDRV_WLAN_FLAG_UNKNOWN_ARP	0x00000008 /* send ARP requests for unknown destinations */
+#define QDRV_WLAN_FLAG_UNKNOWN_FWD	0x00000010 /* send unknown dest pkt to all bridge STAs */
+#define QDRV_WLAN_FLAG_AUC_TX		0x00000020 /* enqueue tx packets to AuC, not MuC */
+
+#define QDRV_WLAN_TX_USE_AUC(qw)	( qw->flags_ext & QDRV_WLAN_FLAG_AUC_TX )
+
+#define QDRV_FLAG_3ADDR_BRIDGE_ENABLED() ((qw->flags_ext & QDRV_FLAG_3ADDR_BRIDGE_DISABLE) == 0)
+
+struct qtn_node_shared_stats_list {
+	/* Note: shared_pernode_stats should be the 1st field in the structure */
+	struct qtn_node_shared_stats		shared_pernode_stats;
+	TAILQ_ENTRY(qtn_node_shared_stats_list)	next;
+};
+
+struct qdrv_wlan {
+	/* The 802.11 networking structure */
+	struct ieee80211com ic;
+	int unit;
+
+	struct work_struct scan_task;
+
+	u32 flags_ext;
+	u16 flags;
+	u8 rf_chipid;
+	u8 rf_chip_verid;
+	struct qdrv_mac *mac;/* Interrupts per MAC so we need a back pointer */
+
+	/* Synchronization */
+	spinlock_t lock;
+
+	char semmap[HOST_NUM_HOSTIFQ];
+	char txdoneirq;
+	int rxirq;
+	int scanirq;
+
+	struct host_scanif scan_if;
+
+	struct host_scanfifo *scan_fifo; /* For iounmap */
+
+	/* Tx to MuC */
+	struct host_txif tx_if;
+	/* Rx from MuC */
+	struct host_rxif rx_if;
+
+	/* Registers */
+	u32 host_sem;
+	struct qtn_ratetable qw_rates[IEEE80211_MODE_MAX];/* rate tables */
+	struct qtn_ratetable *qw_currt;      /* current rate table */
+	enum ieee80211_phymode qw_curmode;
+	struct qdrv_wlan_tx_stats tx_stats;
+	struct qdrv_wlan_rx_stats rx_stats;
+	struct qdrv_csw_count_stats csw_stats;
+	struct qdrv_pktlogger pktlogger;
+	struct qdrv_wlan_sm_stats sm_stats;
+
+	/*congest queue stats*/
+	struct qdrv_tqe_cgq_stats cgq_stats;
+
+	/* TX Beamforming support */
+	void *txbf_state;
+
+	/* Flow control */
+	spinlock_t flowlock;
+
+	struct net_device *br_dev;
+
+	int unknown_dp_count;
+	unsigned long unknown_dp_jiffies;
+
+#ifdef CONFIG_QVSP
+	struct qvsp_s *qvsp;
+	struct qtn_vsp_stats *vsp_stats;
+	struct tasklet_struct vsp_tasklet;
+	struct timer_list vsp_ba_throt;
+#if TOPAZ_QTM
+	uint32_t vsp_enabling;		/* VSP is just enabled, need warm up for traffic stats */
+	uint32_t vsp_check_intvl;	/* in seconds */
+	uint32_t vsp_sync_sched_remain;	/* sched sync task before next vsp interrupt from MuC */
+	/*
+	 * Used to sync stream stats every second. Since VSP check interval is bigger than 1 second,
+	 * we need to sched the sync work one less than the interval. And vsp tasklet will do 1 time.
+	 */
+	struct delayed_work vsp_sync_work;
+#endif
+#endif
+
+	/* 3-address mode bridging */
+	struct qdrv_br bridge_table;
+	int mcs_cap;
+	int mcs_odd_even;
+	int tx_restrict;
+	int tx_restrict_rts;
+	int tx_restrict_limit;
+	int tx_restrict_rate;
+	uint8_t tx_swretry_agg_max;
+	uint8_t tx_swretry_noagg_max;
+	uint8_t tx_swretry_suspend_xmit;
+	struct timer_list hr_timer;
+	struct timer_list cca_timer;
+	struct timer_list igmp_query_timer;
+	struct work_struct cca_wq;
+	struct work_struct meas_wq;
+	spinlock_t cca_lock;
+	struct work_struct scan_wq;
+	spinlock_t scan_lock;
+	void (*csa_callback)(const struct ieee80211_channel *, u_int64_t);
+	struct work_struct csa_wq;
+	spinlock_t csa_lock;
+	struct work_struct channel_work_wq;
+	int (*radar_detect_callback)(const struct ieee80211_channel *);
+	unsigned long arp_last_sent;
+
+	struct work_struct remain_chan_wq;
+
+	/* MuC per node stats pool */
+	struct qtn_node_shared_stats_list		*shared_pernode_stats_pool;
+	dma_addr_t					shared_pernode_stats_phys;
+	TAILQ_HEAD(, qtn_node_shared_stats_list)	shared_pernode_stats_head;
+
+	struct notifier_block pm_notifier;
+
+#if QTN_GENPCAP
+	struct qtn_genpcap_args genpcap_args;
+#endif
+
+	struct qdrv_sch_shared_data	*tx_sch_shared_data;
+	bool				queue_enabled;
+#define QDRV_BR_ISOLATE_NORMAL		BIT(0)
+#define QDRV_BR_ISOLATE_VLAN		BIT(1)
+	uint16_t br_isolate;
+	uint16_t br_isolate_vid;
+	uint8_t restrict_wlan_ip;
+	struct i2c_client *se95_temp_sensor;
+	struct tx_power_cal tx_power_cal_data;
+	struct shared_params *sp;
+	uint32_t tx_mimomode;
+};
+
+/**************/
+/** Netdebug **/
+/**************/
+#define	QDRV_NETDEBUG_NETPOLL_NAME		"qdrv_netpoll"
+#define	QDRV_NETDEBUG_NETPOLL_DEV		"eth1_0"
+
+#define	QDRV_NETDEBUG_FLAGS_NO_STATS		0x1
+#define	QDRV_NETDEBUG_FLAGS_TRUNCATED		0x2
+
+#define QDRV_NETDEBUG_RADAR_MAXPULSE		(175)
+#define QDRV_NETDEBUG_RADAR_PULSESIZE		(8)
+
+#define QDRV_NETDEBUG_TXBF_DATALEN		1024
+#define QDRV_NETDEBUG_IWEVENT_LENGTH		128
+#define QDRV_NETDEBUG_MEM_DATALEN		1024
+#define QDRV_NETDEBUG_SYSMSG_LENGTH		4096
+#define QDRV_NETDEBUG_BUILDSTRING_SIZE		32
+
+/**
+ * \brief The common header for netdebug (packetlogger) packets.
+ */
+struct qdrv_pktlogger_hdr {
+	struct udphdr udpheader;
+	u_int8_t type;
+	u_int8_t			opmode;
+	/**
+	 * The source address (the bridge MAC address).
+	 */
+	unsigned char			src[IEEE80211_ADDR_LEN];
+	u_int32_t			version;
+	u_int32_t			builddate;
+	/**
+	 * Identifying string to easily see in packet dumps that this is a packetlogger packet.
+	 */
+	char				buildstring[QDRV_NETDEBUG_BUILDSTRING_SIZE];
+	u_int8_t			flags;
+
+	/**
+	 * Epoch timestamp.
+	 */
+	u_int32_t			timestamp;
+	/**
+	 * TSF timestamp low bytes.
+	 */
+	u_int32_t			tsf_lo;
+	/**
+	 * TSF timestamp high bytes.
+	 */
+	u_int32_t			tsf_hi;
+
+	u_int32_t			platform;
+	u_int32_t			stats_len;
+	char				padding[3];	/* Word align data start */
+} __packed;
+
+/* Note: NDB tags are added to pktlogger structures. Do not remove these tags. */
+/* @NDB@: type=4 */
+struct qdrv_netdebug_txbf {
+	struct qdrv_pktlogger_hdr	ndb_hdr;
+	u_int8_t			stvec_data[QDRV_NETDEBUG_TXBF_DATALEN];
+} __packed;
+
+/* @NDB@: type=5,payload=iwevent_data,payloadtype=varstring */
+struct qdrv_netdebug_iwevent {
+	struct qdrv_pktlogger_hdr	ndb_hdr;
+	char				iwevent_data[QDRV_NETDEBUG_IWEVENT_LENGTH];
+} __packed;
+
+/* @NDB@: type=7 */
+struct qdrv_netdebug_mem {
+	struct qdrv_pktlogger_hdr	ndb_hdr;
+	u_int8_t			stvec_data[QDRV_NETDEBUG_MEM_DATALEN];
+} __packed;
+
+/* @NDB@: type=8 */
+struct qdrv_netdebug_rate {
+	struct qdrv_pktlogger_hdr	ndb_hdr;
+	struct qtn_rate_su_tx_stats	rate_su_tx_stats[RATES_STATS_NUM_ADAPTATIONS];
+	struct qtn_rate_mu_tx_stats	rate_mu_tx_stats[RATES_STATS_NUM_ADAPTATIONS];
+	struct qtn_rate_gen_stats	rate_gen_stats;
+} __packed;
+
+/* @NDB@: type=3 */
+struct qdrv_radar_stats {
+	struct qdrv_pktlogger_hdr	ndb_hdr;
+	u_int32_t			numpulses;
+	u_int8_t			pulseinfo[QDRV_NETDEBUG_RADAR_PULSESIZE *
+						  QDRV_NETDEBUG_RADAR_MAXPULSE];
+} __packed;
+
+struct qdrv_muc_rx_rates {
+	u_int16_t			rx_mcs[IEEE80211_HT_RATE_MAXSIZE];
+	u_int16_t			rx_mcs_pad; /* unique name for packet logger */
+} __packed;
+
+struct qdrv_muc_rx_11acrates {
+	u_int16_t			rx_11ac_mcs[MUC_VHT_NUM_RATES];
+} __packed;
+
+/* @NDB@: type=6,payload=msg,payloadtype=varstring */
+struct qdrv_netdebug_sysmsg {
+	struct qdrv_pktlogger_hdr ndb_hdr;
+	char msg[QDRV_NETDEBUG_SYSMSG_LENGTH];
+} __packed;
+
+/**
+ * \brief Statistics on the traffic queueing discipline (qdisc).
+ *
+ * These statistics are used to track packets that are sent/dropped by the traffic
+ * policer on the Ethernet and wireless interfaces.
+ *
+ * The '_dropped' counters represent true packet loss, due to backpressure from lower
+ * parts of the system.
+ */
+struct qdrv_qdisc_stats {
+	/**
+	 * The number of packets queued via the qdisc for the wireless interface.
+	 */
+	u_int32_t wifi_sent;
+	/**
+	 * The number of packets dropped by the qdisc on the wireless interface.
+	 */
+	u_int32_t wifi_dropped;
+	/**
+	 * The number of packets queued via the qdisc for the Ethernet interface.
+	 */
+	u_int32_t eth_sent;
+	/**
+	 * The number of packets dropped by the qdisc on the Ethernet interface.
+	 */
+	u_int32_t eth_dropped;
+};
+
+/**
+ * \brief Statistics related to the EMAC.
+ *
+ * This structure contains statistic related to the EMAC block of the system.
+ */
+struct qdrv_emac_stats {
+	/**
+	 * The number of packets lost due to no DMA buffers being available on
+	 * receive. Each of these represents a genuine single packet loss on
+	 * Ethernet receive.
+	 */
+	u_int32_t rx_emac0_dma_missed;
+	u_int32_t rx_emac1_dma_missed;
+};
+
+/* This struct must be kept in sync with qtn_scs_cnt*/
+struct qdrv_scs_cnt {
+	uint32_t scs_iotcl;
+	uint32_t scs_noqosnull;
+	uint32_t scs_1stcnflct;
+	uint32_t scs_qosnul_sntfail;
+	uint32_t scs_2ndcnflct;
+	uint32_t scs_low_dwell;
+	uint32_t scs_offch_scan;
+	uint32_t scs_sample_start;
+	uint32_t scs_sample_stop;
+};
+
+struct qdrv_tqe_stats {
+	uint32_t emac0_outc;
+	uint32_t emac1_outc;
+	uint32_t wmac_outc;
+	uint32_t lhost_outc;
+	uint32_t muc_outc;
+	uint32_t dsp_outc;
+	uint32_t auc_outc;
+	uint32_t pcie_outc;
+	uint32_t drop;
+	uint32_t emac0_drop;
+	uint32_t emac1_drop;
+	uint32_t wmac_drop;
+	uint32_t lhost_drop;
+	uint32_t muc_drop;
+	uint32_t dsp_drop;
+	uint32_t auc_drop;
+	uint32_t pcie_drop;
+};
+
+struct qdrv_hbm_stats {
+	uint32_t req_lhost[TOPAZ_HBM_POOL_COUNT];
+	uint32_t req_muc[TOPAZ_HBM_POOL_COUNT];
+	uint32_t req_emac0[TOPAZ_HBM_POOL_COUNT];
+	uint32_t req_emac1[TOPAZ_HBM_POOL_COUNT];
+	uint32_t req_wmac[TOPAZ_HBM_POOL_COUNT];
+	uint32_t req_tqe[TOPAZ_HBM_POOL_COUNT];
+	uint32_t req_auc[TOPAZ_HBM_POOL_COUNT];
+	uint32_t req_dsp[TOPAZ_HBM_POOL_COUNT];
+	uint32_t req_pcie[TOPAZ_HBM_POOL_COUNT];
+	uint32_t rel_lhost[TOPAZ_HBM_POOL_COUNT];
+	uint32_t rel_muc[TOPAZ_HBM_POOL_COUNT];
+	uint32_t rel_emac0[TOPAZ_HBM_POOL_COUNT];
+	uint32_t rel_emac1[TOPAZ_HBM_POOL_COUNT];
+	uint32_t rel_wmac[TOPAZ_HBM_POOL_COUNT];
+	uint32_t rel_tqe[TOPAZ_HBM_POOL_COUNT];
+	uint32_t rel_auc[TOPAZ_HBM_POOL_COUNT];
+	uint32_t rel_dsp[TOPAZ_HBM_POOL_COUNT];
+	uint32_t rel_pcie[TOPAZ_HBM_POOL_COUNT];
+};
+
+struct qdrv_hbm_stats_oth {
+	uint32_t hbm_req;
+	uint32_t hbm_rel;
+	uint32_t hbm_diff;
+	uint32_t hbm_overflow;
+	uint32_t hbm_underflow;
+};
+
+struct dsp_mu_stats {
+	uint32_t mu_u0_aid[QTN_MU_QMAT_MAX_SLOTS];
+	uint32_t mu_u1_aid[QTN_MU_QMAT_MAX_SLOTS];
+	int32_t  mu_rank[QTN_MU_QMAT_MAX_SLOTS];
+};
+
+/* @NDB@: type=1 */
+struct qdrv_netdebug_stats {
+	struct qdrv_pktlogger_hdr	ndb_hdr;
+	struct muc_rx_stats		stats_muc_rx;
+	struct qdrv_muc_rx_rates	rates_muc_rx;
+	struct qdrv_muc_rx_11acrates	rates_muc_rx_11ac;
+	struct muc_rx_bf_stats		stats_muc_rx_bf;
+	struct muc_tx_stats		stats_muc_tx;
+	struct qdrv_emac_stats		stats_emac;
+	struct qdrv_qdisc_stats		stats_qdisc;
+
+	struct qdrv_wlan_rx_stats	stats_wlan_rx;
+	struct qdrv_wlan_tx_stats	stats_wlan_tx;
+	struct qdrv_wlan_sm_stats	stats_wlan_sm;
+
+	struct qtn_rx_stats		stats_phy_rx;
+	struct qtn_tx_stats		stats_phy_tx;
+	struct qdrv_mem_stats		stats_mem;
+	struct qdrv_misc_stats		stats_misc;
+	struct qdrv_rx_evm_array	stats_evm;
+	struct qdrv_csw_count_stats	stats_csw;
+	struct qdrv_tx_pd_array		stats_pd_vol;
+	struct qdrv_slab_watch		stats_slab;
+	struct qdrv_scs_cnt		stats_scs_cnt;
+	struct qdrv_auc_intr_stats	stats_auc_intr_count;
+	struct auc_dbg_counters		stats_auc_debug_counts;
+	struct qdrv_tqe_stats		stats_tqe;
+	struct qdrv_tqe_cgq_stats	stats_cgq;
+	struct qdrv_hbm_stats		stats_hbm;
+	struct qdrv_hbm_stats_oth	stats_hbm_oth;
+	struct dsp_mu_stats		stats_dsp_mu;
+} __packed;
+
+/* TBD */
+#define QDRV_NETDEBUG_EVENT_STR_MAX	127
+struct qdrv_netdebug_event {
+	u_int8_t 			version;
+	u_int8_t 			type;
+	u_int8_t 			reserved[2];		/* Reserved for alignment */
+	u_int32_t 			tstamp;
+	u_int8_t 			event_msg[QDRV_NETDEBUG_EVENT_STR_MAX + 1];
+};
+
+struct qdrv_netdebug_per_node_phystats {
+	uint8_t node_macaddr[IEEE80211_ADDR_LEN];
+	struct qtn_node_shared_stats per_node_phystats;
+} __packed;
+
+/*
+ * We always have at least one per-node statistic (the board itself)
+ * For APs we can have more than one, so added variable length array at the end of structure
+ */
+/* @NDB@: type=10,payload=per_node_stats,payloadtype=vararray(per_node_stats_count) */
+struct qdrv_netdebug_phystats {
+	struct qdrv_pktlogger_hdr		ndb_hdr;
+	struct qtn_stats			stats;
+	u_int32_t				per_node_stats_count;
+	struct qdrv_netdebug_per_node_phystats	per_node_stats[1];
+} __packed;
+
+/* @NDB@: type=11 */
+struct qdrv_netdebug_dspstats {
+	struct qdrv_pktlogger_hdr		ndb_hdr;
+	struct qtn_dsp_stats			stats;
+} __packed;
+
+/* @NDB@: type=12 */
+struct qdrv_netdebug_core_dump {
+	struct qdrv_pktlogger_hdr		ndb_hdr;
+	char					data[CONFIG_ARC_MUC_STACK_SIZE];
+} __packed;
+
+extern int g_triggers_on;
+
+int topaz_read_internal_temp_sens(int *temp);
+void qdrv_halt_muc(void);
+int get_temp_zone_from_tprofile(void);
+int convert_temp_index(int temp);
+
+/* Support wlan modules */
+int qdrv_rx_poll(struct napi_struct *napi, int budget);
+int qdrv_rx_start(struct qdrv_mac *mac);
+int qdrv_rx_stop(struct qdrv_mac *mac);
+int qdrv_rx_init(struct qdrv_wlan *qw, struct host_ioctl_hifinfo *hifinfo);
+int qdrv_rx_exit(struct qdrv_wlan *qw);
+
+void qdrv_tx_done_flush_vap(struct qdrv_vap *qv);
+int qdrv_tx_hardstart(struct sk_buff *skb, struct net_device *dev);
+struct host_txdesc * qdrv_tx_get_mgt_txdesc(struct sk_buff *skb, struct net_device *dev);
+void qdrv_tx_release_txdesc(struct qdrv_wlan *qw, struct lhost_txdesc* txdesc);
+int qdrv_tx_start(struct qdrv_mac *mac);
+int qdrv_tx_stop(struct qdrv_mac *mac);
+int qdrv_tx_eth_pause_init(struct qdrv_wlan *qw);
+int qdrv_tx_init(struct qdrv_mac *mac, struct host_ioctl_hifinfo *hifinfo,
+	u32 arg2);
+int qdrv_tx_exit(struct qdrv_wlan *qw);
+void qdrv_tx_ba_establish(struct qdrv_vap *qv,
+		struct ieee80211_node *ni, uint8_t tid);
+
+int qdrv_scan_start(struct qdrv_mac *mac);
+int qdrv_scan_stop(struct qdrv_mac *mac);
+int qdrv_scan_init(struct qdrv_wlan *qw, struct host_ioctl_hifinfo *hifinfo);
+int qdrv_scan_exit(struct qdrv_wlan *qw);
+int qdrv_async_cca_read(struct ieee80211com *ic, const struct ieee80211_channel *sample_channel,
+		u_int64_t start_tsf, u_int32_t sample_millis);
+
+int qdrv_ap_isolate_filter(struct ieee80211_node *ni, struct sk_buff *skb);
+
+int qdrv_hostlink_msg_cmd(struct qdrv_wlan *qw, u_int32_t cmd, u_int32_t arg);
+int qdrv_hostlink_msg_create_vap(struct qdrv_wlan *qw, const char *name, const uint8_t *mac_addr,
+		int devid, int opmode, int flags);
+int qdrv_hostlink_msg_delete_vap(struct qdrv_wlan *qw, struct net_device *vdev);
+int qdrv_hostlink_start(struct qdrv_mac *mac);
+int qdrv_hostlink_stop(struct qdrv_mac *mac);
+int qdrv_hostlink_init(struct qdrv_wlan *qw,
+	struct host_ioctl_hifinfo *hifinfo);
+int qdrv_hostlink_exit(struct qdrv_wlan *qw);
+int qdrv_hostlink_store_txpow(struct qdrv_wlan *qw, u_int32_t txpower);
+int qdrv_hostlink_setchan(struct qdrv_wlan *qw, uint32_t freq_band, uint32_t qtn_chan);
+int qdrv_hostlink_sample_chan_cancel(struct qdrv_wlan *qw, struct qtn_samp_chan_info *);
+int qdrv_hostlink_sample_chan(struct qdrv_wlan *qw, struct qtn_samp_chan_info *);
+int qdrv_hostlink_remain_chan(struct qdrv_wlan *qw, struct qtn_remain_chan_info *remain_chan_bus);
+int qdrv_hostlink_set_ocac(struct qdrv_wlan *qw, struct qtn_ocac_info *);
+int qdrv_hostlink_suspend_off_chan(struct qdrv_wlan *qw, uint32_t suspend);
+int qdrv_hostlink_meas_chan(struct qdrv_wlan *qw, struct qtn_meas_chan_info *meas_chan_bus);
+int qdrv_hostlink_rxgain_params(struct qdrv_wlan *qw, uint32_t index, struct qtn_rf_rxgain_params *rx_gain_params);
+#ifdef QTN_BG_SCAN
+int qdrv_hostlink_bgscan_chan(struct qdrv_wlan *qw, struct qtn_scan_chan_info *);
+#endif /* QTN_BG_SCAN */
+int qdrv_hostlink_setchan_deferred(struct qdrv_wlan *qw, struct qtn_csa_info*);
+int qdrv_hostlink_setscanmode(struct qdrv_wlan *qw, u_int32_t scanmode);
+int qdrv_hostlink_xmitctl(struct qdrv_wlan *qw, bool enable_xmit);
+int qdrv_hostlink_msg_calcmd(struct qdrv_wlan *qw, int cmdlen, dma_addr_t cmd_dma);
+//int qdrv_hostlink_do_txpwr_cal(struct qdrv_wlan *qw, int temp_idx, int pwr);
+int qdrv_hostlink_msg_set_wifi_macaddr( struct qdrv_wlan *qw, u8 *new_macaddr );
+int qdrv_hlink_get_outstand_msgs(struct qdrv_wlan *qw);
+int qdrv_hostlink_set_hrflags(struct qdrv_wlan *qw, u_int32_t flags);
+int qdrv_hostlink_power_save(struct qdrv_wlan *qw, int param, int val);
+int qdrv_hostlink_tx_airtime_control(struct qdrv_wlan *qw, uint32_t value);
+int qdrv_hostlink_mu_group_update(struct qdrv_wlan *qw, struct qtn_mu_group_update_args *args);
+void* qdrv_hostlink_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag);
+void qdrv_hostlink_free_coherent(struct device *dev, size_t size, void *kvaddr, dma_addr_t dma_handle);
+extern u_int8_t get_bootcfg_scancnt(void);
+uint8_t get_bootcfg_two_by_four_configuration(void);
+enum hw_opt_t get_bootcfg_bond_opt(void);
+
+#ifdef CONFIG_QVSP
+int qdrv_hostlink_qvsp(struct qdrv_wlan *qw, uint32_t param, uint32_t value);
+int qdrv_wlan_query_wds(struct ieee80211com *ic);
+#endif
+
+int qdrv_hostlink_killmuc(struct qdrv_wlan *qw);
+int qdrv_hostlink_use_rtscts(struct qdrv_wlan *qw, int rtscts_required);
+int qdrv_hostlink_send_ioctl_args(struct qdrv_wlan *qw, uint32_t command,
+	uint32_t arg1, uint32_t arg2);
+
+void qdrv_dump_replace_db(struct qdrv_wlan *qw);
+int qdrv_hostlink_enable_flush_data(struct qdrv_wlan *qw, int enable);
+int qdrv_hostlink_update_ocac_state_ie(struct qdrv_wlan *qw, uint8_t state, uint8_t param);
+
+int qdrv_wlan_80211_set_bcn_scheme(struct ieee80211vap *vap, int param, int value);
+int qdrv_hostlink_change_bcn_scheme(struct qdrv_vap *qv, int param, int value);
+
+/* Main wlan module */
+#if 0
+int qdrv_wlan_start(void *data, char *name);
+#endif
+int qdrv_wlan_stats(void *data);
+int qdrv_wlan_start_vap(struct qdrv_wlan *qw, const char *name,
+	uint8_t *mac_addr, int devid, int opmode, int flags);
+int qdrv_wlan_stop_vap(struct qdrv_mac *mac, struct net_device *vdev);
+int qdrv_wlan_init(struct qdrv_mac *mac, struct host_ioctl_hifinfo *hifinfo,
+	u32 arg1, u32 arg2);
+int qdrv_wlan_exit(struct qdrv_mac *mac);
+int qdrv_wlan_get_assoc_queue_info(void *data);
+int qdrv_wlan_get_assoc_info(void *data);
+#ifdef CONFIG_NAC_MONITOR
+int qdrv_wlan_get_nac_info(void *data);
+#endif
+int qdrv_wps_button_init(struct net_device *dev);
+void qdrv_wps_button_exit(void);
+struct net_device *qdrv_wps_button_get_dev(void);
+int qdrv_wlan_tkip_mic_error(struct qdrv_mac *mac, int devid, int count);
+int qdrv_wlan_ba_is_ok(struct ieee80211_node *ni, int tid, int direction);
+void qdrv_wlan_igmp_query_timer_start(struct qdrv_wlan *qw);
+void qdrv_wlan_igmp_timer_stop(struct qdrv_wlan *qw);
+void qdrv_wlan_drop_ba(struct ieee80211_node *ni, int tid, int tx, int reason);
+void qdrv_wlan_cleanup_before_reload(struct ieee80211com *ic);
+int qdrv_get_br_ipaddr(struct qdrv_wlan *qw, __be32 *ipaddr);
+int qdrv_is_bridge_ipaddr(struct qdrv_wlan *qw, __be32 ipaddr);
+void qdrv_wlan_dump_ba(struct ieee80211_node *ni);
+void qdrv_wlan_stats_prot(struct qdrv_wlan *qw, uint8_t is_tx, uint16_t ether_type,
+				uint8_t ip_proto);
+int qdrv_proxy_arp(struct ieee80211vap *iv,
+		struct qdrv_wlan *qw,
+		struct ieee80211_node *ni_rx,
+		uint8_t *data_start);
+#ifdef CONFIG_IPV6
+int qdrv_wlan_handle_neigh_msg(struct ieee80211vap *vap, struct qdrv_wlan *qw,
+			uint8_t *data_start, uint8_t in_tx,  struct sk_buff *skb,
+			uint8_t ip_proto, void *proto_data);
+#endif
+#ifdef CONFIG_QVSP
+int qdrv_wlan_vsp_3rdpt_init(struct qdrv_wlan *qw);
+void qdrv_wlan_vsp_3rdpt_exit(struct qdrv_wlan *qw);
+#endif
+
+void qdrv_wlan_vlan_enable(struct ieee80211com *ic, int enable);
+int qdrv_hostlink_vlan_enable(struct qdrv_wlan *qw, int enable);
+
+void qdrv_tx_airtime_control(struct ieee80211vap *vap, uint32_t vaule);
+
+void qdrv_mu_grp_update(struct ieee80211com *ic, struct qtn_mu_group_update_args *args);
+
+static inline struct qdrv_wlan *qdrv_mac_get_wlan(struct qdrv_mac *mac)
+{
+	return (struct qdrv_wlan*)mac->data;
+}
+
+static inline int
+qdrv_wlan_bc_should_forward(const struct ether_header *eh,
+			struct ieee80211vap *iv)
+{
+	if (ieee80211_is_bcst(eh->ether_dhost) && iv->iv_reliable_bcst) {
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int
+qdrv_wlan_mc_should_drop(const struct ether_header *eh, void *p_iphdr,
+			struct ieee80211vap *iv, bool is_vap_node, uint8_t ip_proto)
+{
+	/* Make sure frames are not transmitted prior to CAC */
+	if (IEEE80211_IS_MULTICAST(eh->ether_dhost) &&
+			iv->iv_opmode == IEEE80211_M_HOSTAP &&
+			iv->iv_ic->ic_sta_assoc == 0) {
+		return 1;
+	}
+
+	if (IEEE80211_IS_MULTICAST(eh->ether_dhost) &&
+			is_vap_node &&
+			ip_proto != IPPROTO_ICMPV6 &&
+			ip_proto != IPPROTO_IGMP &&
+			!iv->iv_forward_unknown_mc &&
+			iputil_is_mc_data(eh, p_iphdr)) {
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int
+qdrv_wlan_mc_should_forward(const struct ether_header *eh, void *p_iphdr,
+			struct ieee80211vap *iv, bool is_vap_node)
+{
+	if (iputil_eth_is_multicast(eh) &&
+			is_vap_node &&
+			iv->iv_forward_unknown_mc) {
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int qdrv_wlan_is_4addr_mc(const struct ether_header *eh, u_int8_t *data_start,
+		struct ieee80211vap *iv, bool is_vap_node)
+{
+	if (unlikely(IEEE80211_IS_MULTICAST(eh->ether_dhost) &&
+		(iputil_is_lncb(eh->ether_dhost, data_start) ||
+			qdrv_wlan_mc_should_forward(eh, data_start, iv, is_vap_node) ||
+			qdrv_wlan_bc_should_forward(eh, iv)))) {
+		return 1;
+	}
+
+	return 0;
+}
+
+#define IGMP_TYPE_QUERY 0x11
+#define IGMP_TYPE_MEMBERSHIP_REPORT1 0x12
+#define IGMP_TYPE_MEMBERSHIP_REPORT2 0x16
+#define IGMP_TYPE_LEAVE_GROUP 0x17
+#define IGMP_TYPE_MEMBERSHIP_REPORT3 0x22
+
+static inline const char *qdrv_igmp_type_to_string(int igmp_type)
+{
+	switch(igmp_type) {
+		case IGMP_TYPE_QUERY:
+			return "Query";
+		case IGMP_TYPE_MEMBERSHIP_REPORT1:
+			return "Membership Report (v1)";
+		case IGMP_TYPE_MEMBERSHIP_REPORT2:
+			return "Membership Report (v2)";
+		case IGMP_TYPE_MEMBERSHIP_REPORT3:
+			return "Membership Report (v3)";
+		case IGMP_TYPE_LEAVE_GROUP:
+			return "Leave Group";
+		//default:
+			/* Fall through */
+	}
+	return "Unknown";
+}
+
+static inline int qdrv_igmp_type(struct iphdr *p_iphdr, int len)
+{
+	/* Incomplete, but enough for the field we're interested in. */
+	struct igmphdr {
+		u_int8_t type;
+	};
+	if (len > (sizeof(*p_iphdr) + sizeof(struct igmphdr))) {
+		if (p_iphdr->protocol == IPPROTO_IGMP) {
+			/* Size of IP header is in 4 byte words */
+			int hlen = p_iphdr->ihl * (sizeof(u_int32_t));
+			struct igmphdr *p_igmp = (struct igmphdr *)((u_int8_t *)p_iphdr + hlen);
+			return p_igmp->type;
+		}
+	}
+	return 0;
+}
+
+/* Is this an IGMP query? */
+static inline int qdrv_is_igmp_query(struct iphdr *p_iphdr, int len)
+{
+	if (qdrv_igmp_type(p_iphdr, len) == IGMP_TYPE_QUERY) {
+		return 1;
+	}
+	return 0;
+}
+
+void qdrv_channel_switch_record(struct ieee80211com *ic, struct ieee80211_channel *new_chan,
+		uint32_t reason);
+void qdrv_channel_switch_reason_record(struct ieee80211com *ic, int reason);
+int8_t qdrv_get_local_tx_power(struct ieee80211com *ic);
+int qdrv_get_local_link_margin(struct ieee80211_node *ni, int8_t *result);
+int qdrv_wlan_get_shared_vap_stats(struct ieee80211vap *vap);
+int qdrv_wlan_reset_shared_vap_stats(struct ieee80211vap *vap);
+int qdrv_wlan_get_shared_node_stats(struct ieee80211_node *ni);
+int qdrv_wlan_reset_shared_node_stats(struct ieee80211_node *ni);
+int qdrv_rxgain_params(struct ieee80211com *ic, int index, struct qtn_rf_rxgain_params *params);
+void qdrv_wlan_get_dscp2ac_map(const uint8_t vapid, uint8_t *dscp2ac);
+void qdrv_wlan_set_dscp2ac_map(const uint8_t vapid, uint8_t *ip_dscp, uint8_t listlen, uint8_t ac);
+void wowlan_wakeup_host(void);
+/*
+ * Delete all bridge table entries for the peer.  They would eventually
+ * age out, but in the mean time data will be directed to the wrong
+ * sub_port (node_idx) until the bridge entries are updated by upstream
+ * traffic from the endpoint. Multicast port entries for the sub_port
+ *  are not aged and would hang around for ever, so they are also deleted.
+ */
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static inline void qdrv_remove_invalid_sub_port(struct ieee80211vap *vap,
+						uint32_t sub_port)
+{
+	struct net_bridge_port *br_port = get_br_port(vap->iv_dev);
+
+	DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_BRIDGE, "Purge subport[0x%x]\n", sub_port);
+#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE))
+	if (br_fdb_delete_by_sub_port_hook && br_port) {
+		br_fdb_delete_by_sub_port_hook(br_port->br,
+				br_port, sub_port);
+	}
+#endif
+}
+#else
+static inline void qdrv_remove_invalid_sub_port(struct ieee80211vap *vap,
+		uint32_t sub_port)
+{
+	DBGPRINTF(DBG_LL_DEBUG, QDRV_LF_BRIDGE, "Purge subport[0x%x]\n", sub_port);
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+	if (br_fdb_delete_by_sub_port_hook && vap->iv_dev->br_port) {
+		br_fdb_delete_by_sub_port_hook(vap->iv_dev->br_port->br,
+				vap->iv_dev->br_port, sub_port);
+	}
+#endif
+}
+#endif
+
+int qtn_tsensor_get_temperature(struct i2c_client *client, int *val);
+
+void qdrv_tqe_send_l2_ext_filter(struct qdrv_wlan *qw, struct sk_buff *skb);
+
+/*
+ * Called only if packet received has destination address as broadcast.
+ */
+static inline int check_if_exceeds_max_bcast_pps(struct bcast_pps_info *bcast_pps, int is_rx)
+{
+	u_int16_t *bcast_counter;
+	unsigned long *start_time;
+	if (is_rx) {
+		bcast_counter = &bcast_pps->rx_bcast_counter;
+		start_time = &bcast_pps->rx_bcast_pps_start_time;
+	} else {
+		bcast_counter = &bcast_pps->tx_bcast_counter;
+		start_time = &bcast_pps->tx_bcast_pps_start_time;
+	}
+	if (time_after(jiffies, ((*start_time) + HZ))) {
+		*start_time = jiffies;
+		*bcast_counter = 0;
+	}
+	if (*bcast_counter >= bcast_pps->max_bcast_pps) {
+		return 1;
+	}
+	(*bcast_counter)++;
+	return 0;
+}
+
+static inline int check_is_bcast_pps_exception(uint16_t ether_type, uint8_t ip_proto,
+			void *proto_data)
+{
+	if (ether_type == __constant_htons(ETH_P_ARP)) {
+		return 1;
+	}
+
+	if (ether_type == __constant_htons(ETH_P_IP) &&
+			proto_data &&
+			ip_proto == IPPROTO_UDP) {
+		struct udphdr *udph = proto_data;
+
+		if ((udph->dest == __constant_htons(DHCPSERVER_PORT)) ||
+				udph->dest == __constant_htons(DHCPCLIENT_PORT)) {
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static inline int bcast_pps_should_drop(const u8 *dst, struct bcast_pps_info *bcast_pps,
+		uint16_t eth_type, uint8_t ip_proto, void *proto_data, int is_rx)
+{
+	if ((ieee80211_is_bcst(dst))
+			&& (bcast_pps->max_bcast_pps)
+			&& (!check_is_bcast_pps_exception(eth_type, ip_proto, proto_data))
+			&& (check_if_exceeds_max_bcast_pps(bcast_pps, is_rx))) {
+		return 1;
+	}
+	return 0;
+}
+
+#endif
diff --git a/drivers/qtn/qvsp/qvsp_netdbg.h b/drivers/qtn/qvsp/qvsp_netdbg.h
new file mode 100644
index 0000000..88ecb67
--- /dev/null
+++ b/drivers/qtn/qvsp/qvsp_netdbg.h
@@ -0,0 +1,84 @@
+/*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2012 Quantenna Communications, Inc.                 **
+**                            All Rights Reserved                            **
+**                                                                           **
+*******************************************************************************
+EH0*/
+
+#ifndef _QVSP_NETDBG_H_
+#define _QVSP_NETDBG_H_
+
+#include "qtn/qvsp.h"
+#include <qtn/qvsp_common.h>
+#include <qtn/qvsp_data.h>
+
+struct qvsp_netdbg_data {
+	uint32_t		ndb_strm_tot;	/* must be first for stat_parser */
+	uint32_t		fat_last;
+	uint32_t		fat_avail;
+	uint32_t		fat_intf;
+	uint32_t		chan_last;
+	uint32_t		strm_tot;
+	uint32_t		strm_tot_qtn;
+	uint32_t		strm_tot_enabled;
+	uint32_t		strm_tot_disabled;
+	uint32_t		strm_tot_disabled_remote;
+	uint32_t		strm_tot_ac[WME_NUM_AC];
+
+	uint32_t		pkts_tx;
+	uint32_t		pkts_tx_sent;
+	uint32_t		kbps_tx;
+	uint32_t		kbps_tx_sent;
+	uint32_t		pkts_rx;
+	uint32_t		pkts_rx_sent;
+	uint32_t		kbps_rx;
+	uint32_t		kbps_rx_sent;
+
+	uint32_t		strm_enable;
+	uint32_t		strm_disable;
+	uint32_t		strm_disable_remote;
+	uint32_t		strm_reenable;
+	uint32_t		fat_over;
+	uint32_t		fat_under;
+	uint32_t		fat_chk_disable;
+	uint32_t		fat_chk_reenable;
+} __packed;
+
+struct qvsp_netdbg_strm {
+	uint32_t		strm_hash;
+	uint32_t		saddr[4];
+	uint32_t		sport;
+	uint32_t		daddr[4];
+	uint32_t		dport;
+	uint32_t		node_idx;
+	uint32_t		hairpin_id;
+	uint32_t		hairpin_type;
+	uint32_t		ac;
+	uint32_t		ip_proto;
+	enum qvsp_rule_dir_e	dir;
+	enum qvsp_strm_state_e	strm_state;
+	uint32_t		age;
+	uint32_t		pkts;
+	uint32_t		pkts_sent;
+	uint32_t		kbps;
+	uint32_t		kbps_sent;
+	uint32_t		phy_rate;
+	uint32_t		phy_rate_disabled;
+	uint32_t		ni_cost;
+	uint32_t		ni_strm_cost;
+	uint32_t		tx_last_mcs;
+	uint32_t		avg_per;
+} __packed;
+
+/* Used by the stat_parser */
+struct qvsp_netdbg_rec {
+#ifdef QVSP_NETDBG_DUMMY
+	struct qdrv_netdebug_hdr	ndb_hdr;
+#endif
+	struct qvsp_netdbg_data		vsp_data;
+	struct qvsp_netdbg_strm		strm_data;
+} __packed;
+
+#endif /* _QVSP_NETDBG_H_ */
diff --git a/drivers/qtn/radar/detect.h b/drivers/qtn/radar/detect.h
new file mode 100644
index 0000000..28a83e6
--- /dev/null
+++ b/drivers/qtn/radar/detect.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2009-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ */
+#ifndef __DETECT_H__
+#define __DETECT_H__
+
+#include <linux/spinlock.h>
+#include "radar.h"
+
+/* structure to hold raw samples returned from hardware */
+typedef struct {
+	unsigned	start;
+	unsigned	stop;
+#if (RADAR_MAX_POW_TRACK)
+	unsigned	max_pow;
+#endif
+	bool		 tagged;
+} raw_pulse_t;
+
+/* configuration */
+typedef struct {
+	u8		region;
+	bool		sta_dfs;
+	unsigned	maxPowTh;
+	bool		maxPowEnable;
+
+	unsigned	hwPwUBnd;
+	unsigned	hwPwLBnd;
+
+	unsigned	swPwUBnd;
+	unsigned	swPwLBnd;
+
+	unsigned	ssrPriUBnd;
+	unsigned	ssrPriLBnd;
+
+	unsigned	ssrPwUBnd;
+	unsigned	ssrPwLBnd;
+
+	unsigned	itlvPriUBnd;
+	unsigned	itlvPriLBnd;
+
+	unsigned	itlvPriDiffUBnd;
+	unsigned	itlvPriDiffLBnd;
+
+	unsigned	itlvPwUBnd;
+	unsigned	itlvPwLBnd;
+
+	unsigned	lsrPriUBnd;
+	unsigned	lsrPriLBnd;
+
+	unsigned	lsrPwUBnd;
+	unsigned	lsrPwLBnd;
+
+	unsigned	lsr_fsm_invalid_maxcnt;
+
+	unsigned	maxPulseReadCnt;
+
+	unsigned	ssrMaxPulseCnt;
+	unsigned	maxFrbdnPri;
+	unsigned	minFrbdnPri;
+
+	unsigned	priNeighborRadius;
+	unsigned	pwNeighborRadius;
+	unsigned	lsrPwNeighborRadius;
+
+	unsigned	priClusterRadius;
+	unsigned	pwClusterRadius;
+
+	unsigned	minFHpri;
+	unsigned	maxFHpri;
+	int		minFHpritol;
+	int		maxFHpritol;
+	unsigned	minFHw;
+	unsigned	maxFHw;
+	unsigned	localFHpercent;		/* detection percent */
+
+	unsigned	mergeSeparationMax;
+
+	unsigned	maxPulseCnt;
+	unsigned	maxPulseCntLSR;
+	unsigned	maxPulseCntFH;
+	unsigned	minDetectTh;
+	unsigned	lsrTimerShrt;
+
+	unsigned	lsr_min_pw_diff;
+	unsigned	lsr_min_pcnt_diff;
+	unsigned	lsr_min_chirp_detect;
+	unsigned	lsr_max_chirp_reject;
+	unsigned	lsr_min_diff_det_rej;
+	unsigned	chirp_town_freq_th;
+
+	unsigned	ocac_lsr_min_chirp_detect;
+	unsigned	ocac_lsr_burstcnt_lbnd;
+	unsigned	ocac_lsrTimerShrt;
+
+	bool		fhDetectEnable;
+	bool		itlvDetectEnable;
+	bool		lsrDetectEnable;
+	bool		detected_lsr_flag;
+	bool		lsrEnhancedAlg;
+	bool		localFHdetect;
+	bool		tag;
+
+	bool		rateFilter;
+	long		maxRate;
+	int		maxInstRate;
+	long		maxAllowedInstRate;	/* max instantaneous date rate for radar processing */
+
+	unsigned	win_size;
+	unsigned	win_per_pulse;
+
+	unsigned	zcwin_per_pulse;
+	unsigned	pw_2_zc_norm;
+	unsigned	pulsetown_det_th;
+	unsigned	pulsetown_det_th_eu_ocac;
+
+	unsigned	max_radar_pulse_cnt;
+
+	unsigned	max_lost_pulse_cnt;
+	unsigned	lsr_timer_long;
+	unsigned	lsr_burstcnt_lbnd;
+	unsigned	lsr_burstcnt_ubnd;
+	bool		lsr_off_center_det;
+	bool		lsr_boost;
+	bool		zc_diff_restrict;
+	unsigned	zc_min_diff;
+	char		region_str[7];
+
+	unsigned	Korea2PriLbnd;
+	unsigned	Korea2PriUbnd;
+} detect_cfg_t;
+
+/* statistics */
+typedef struct {
+	unsigned	detected_ssr;		/* SSR detections */
+	unsigned	detected_itlv;		/* ITLV radar detections */
+	unsigned	detected_lsr;		/* LSR detections */
+	unsigned	detected_rjt;		/* detections rejected based on the pulse history */
+
+	unsigned	numDetectReq;		/* radar detection requests */
+	unsigned	numSsrCandidate;	/* SSR candidates */
+	unsigned	numItlvCandidate;	/* interleaving radar candidates */
+	unsigned	numItlvVerifyStop;	/* stopped itlv radar verification */
+
+	unsigned	numRawPulse;		/* raw pulses recorded in the radar memory */
+	unsigned	numMaxPollingPulse;	/* max raw pulses recorded at polling */
+	unsigned	numProcRawPulse;	/* processed raw pulses recorded in the radar memory */
+	unsigned	numProcMaxPollingPulse;	/* max processed raw pulses recorded at polling */
+	unsigned	numReadPulse;		/* pulses read from the radar memory for processing */
+	unsigned	numLeftPulse;		/* pulses after preprocessing of read ones */
+	unsigned	numTagPulse;		/* tagged pulses among read ones */
+	unsigned	numMergePulse;		/* merged pulses */
+	unsigned	numPwFilterPulse;	/* pulses discarded because of out-of-range pulse width */
+	unsigned	numPowFilterPulse;	/* pulses discarded because of low maximum power */
+
+	unsigned	numLsrLikeBurst;	/* bursts that look part of a long sequence radar */
+
+	unsigned	lastRadarMemIdx;
+
+	unsigned	burst_cnt;
+	unsigned	pulse_cnt;
+	unsigned	invalid_cnt;
+
+	unsigned	max_pw;
+	unsigned	min_pw;
+	unsigned	min_pulsecnt;
+	unsigned	max_pulsecnt;
+
+	unsigned	chirp_det_cnt;
+	unsigned	chirp_rej_cnt;
+	unsigned	chirp_nsi_cnt;
+
+	int zc_index_hist[MAX_RADAR_PULSE_READ_CNT];
+
+
+#define RADAR_PS_SIZE	10
+	int		radar_ps_buf [RADAR_PS_SIZE];
+	int		last_radar_ps_cnt;
+	int		max_ps_cnt;
+
+	/* info from the last detection */
+	struct {
+		unsigned	numPulse;
+		unsigned	pri[3];
+		unsigned	pw;		/* pulse width */
+	} last;
+} detect_sta_t;
+
+struct detect_drv_sample_t {
+	spinlock_t	lock;
+	int		tx_pkts;	/* packets transmitted by driver in last sample period */
+	int		tx_bytes;	/* bytes transmitted by driver in last sample period */
+};
+
+typedef struct {
+	unsigned	start;
+	unsigned	pw;
+	unsigned	pri;
+} pulse_info_t;
+
+#define RADAR_REGION_US			(1)
+#define RADAR_REGION_EU			(2)
+#define RADAR_REGION_JP			(3)
+#define PULSETOWN_POPULATION		(100)
+#define PULSETOWN_SIZE_MAX		(11) /* 9 */
+#define PULSETOWN_SIZE_MIN		(6)
+#define OCAC_PULSETOWN_SIZE_MIN		(4)
+#define OCAC_EU_WEATHER_PULSETOWN_SIZE_MIN		(2)
+#define PULSETOWN_ITLV_SIZE_MAX		(18)
+#define PULSETOWN_DETECTION_THRESHOLD2	(6)
+#define OCAC_PULSETOWN_DETECTION_THRESHOLD2	(4)
+#define OCAC_EU_WEATHER_PULSETOWN_DETECTION_THRESHOLD2	(2)
+#define RADAR_PULSE_MOD			(0x08000000)
+#define RADAR_PULSE_MASK		(RADAR_PULSE_MOD-1)
+#define RADAR_PULSE_MOD_SHIFT_SIZE	(27)
+#define DEBUG_PROC_PRINT_LENGTH		(PULSETOWN_ITLV_SIZE_MAX)
+#define RADAR_CLUSTER_RADIUS		(4)
+/* MAX_VALID_CLUSTERS >= floor(PULSETOEN_ITLV_SIZE_MAX / RADAR_CLUSTER_RADIUS) */
+#define MAX_VALID_CLUSTERS		(4)
+#define TOTAL_ITLV_RADAR_HITS		(11)
+#define LSR_BURST_MINPULSECNT		(1)
+#define LSR_BURST_MAXPULSECNT		(3)
+#define RADAR_PRI_UPPER_BOUND_1		(13868)
+#define RADAR_PRI_UPPER_BOUND_2		(14265)
+#define RADAR_PRI_UPPER_BOUND_3		(39980)
+#define RADAR_PRI_UPPER_BOUND_4		(38440)
+#define RADAR_PRI_LOWER_BOUND_1		(5020)
+#define RADAR_PRI_LOWER_BOUND_2		(13908)
+#define RADAR_PRI_LOWER_BOUND_3		(14305)
+#define RADAR_PRI_LOWER_BOUND_4		(38490)
+#define W53_W56_SEPARATION_CHANNEL	(100)
+#define MAX_CHANNEL_NO_FOR_JAPAN	(136)
+#define MIN_CHANNEL_NO_FOR_JAPAN	(36)
+
+typedef struct {
+	pulse_info_t	pulse[PULSETOWN_POPULATION];
+	unsigned	size;
+} pulse_town_t;
+
+/* control block */
+typedef struct {
+	detect_cfg_t	cfg;
+	detect_sta_t	sta;
+
+	pulse_town_t	town;
+} detect_cb_t;
+
+/* zero-crossing call-back block */
+typedef struct {
+	int		zc_mat[RADAR_ZERO_CROSS_PROC_PULSE_DEPTH][ZC_PER_PULSE];
+	bool		is_zc_accurate;
+} zc_cb_t;
+
+extern bool detect_dbg_verbose;
+extern bool detect_dbg_rdisp;
+
+extern detect_cb_t *detect_cb;
+
+void detect_init(detect_cb_t *cb);
+void detect_reset(void);
+bool detect_radar(raw_pulse_t *s, int count, int count_last_window, zc_cb_t *zc_cb);
+bool detect_radar_poll(void);
+bool isit_itlv(unsigned *pri_vec);
+bool radar_post_detection_process(raw_pulse_t *s, int count);
+struct detect_drv_sample_t *detect_drv_sample_loc_get(void);
+
+static inline unsigned diff(unsigned a, unsigned b)
+{
+	return (a < b) ? (b-a) : (a-b);
+}
+
+#endif /* __DETECT_H__ */
diff --git a/drivers/qtn/radar/radar.h b/drivers/qtn/radar/radar.h
new file mode 100644
index 0000000..975668a
--- /dev/null
+++ b/drivers/qtn/radar/radar.h
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2009-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ */
+#ifndef __RADAR_H__
+#define __RADAR_H__
+
+#include <qtn/registers.h>
+#include <qtn/shared_params.h>
+#include <linux/spinlock.h>
+
+#define MIN_PULSE_SEP				(5)	/* 5 usec is minimum pulse separation */
+							/* Anything less is merged */
+
+/* Radar registers */
+#define QT4_GLBL_BASE_ADDR			(0xe6000000)
+#define QT4_TD_0_BASE_ADDR			(0xe6090000)
+#define QT4_TD_1_BASE_ADDR			(0xe6091000)
+#define QT4_RADAR_TH1_ADDR			(QT4_TD_1_BASE_ADDR + 0x4e0)
+#define QT4_RADAR_TH2_ADDR			(QT4_TD_1_BASE_ADDR + 0x4e4)
+#define QT4_RADAR_M_ADDR			(QT4_TD_1_BASE_ADDR + 0x4c8)
+#define QT4_RADAR_N_ADDR			(QT4_TD_1_BASE_ADDR + 0x4cc)
+#define QT4_RADAR_DOWNSAMPLE_3_0_ADDR		(QT4_TD_1_BASE_ADDR + 0x4bc)
+#define QT4_RADAR_NUMDELAY_3_0_ADDR		(QT4_TD_1_BASE_ADDR + 0x4d0)
+#define QT4_RADAR_TAGMODE_ADDR			(QT4_TD_1_BASE_ADDR + 0x4dc)
+#define QT4_RADAR_MINWIDTH_ADDR			(QT4_TD_1_BASE_ADDR + 0x4c4)
+#define QT4_RADAR_MAXWIDTH_ADDR			(QT4_TD_1_BASE_ADDR + 0x4c0)
+#define QT4_RADAR_RX_EXTENSION_AFTER_ADDR	(QT4_GLBL_BASE_ADDR + 0x0e0)
+#define REG_RADAR_RX_EXTENSION_AFTER_DEFAULT	(0x00ff)
+#define QT4_RADAR_RX_EXTENSION_BEFORE_ADDR	(QT4_TD_1_BASE_ADDR + 0x568)
+#define REG_RADAR_RX_EXTENSION_BEFORE_DEFAULT	(0x00ff)
+#define QT4_RADAR_RX_IN_PROG_EN_ADDR		(QT4_TD_1_BASE_ADDR + 0x4d4)
+#define REG_RADAR_RX_IN_PROG_EN_DEFAULT		(0x1)
+#define QT4_RADAR_SPRAM_IN_PROG_EN_ADDR		(QT4_TD_1_BASE_ADDR + 0x560)
+#define REG_RADAR_SPRAM_IN_PROG_EN_DEFAULT	(0x0) /*(0x1) */
+#define QT4_RADAR_INTR_EN_ADDR			(QT4_TD_1_BASE_ADDR + 0X524)
+#define QT4_RADAR_CNT_L				(QT4_TD_1_BASE_ADDR + 0x518)
+#define QT4_RADAR_PTR_L				(QT4_TD_1_BASE_ADDR + 0x51c)
+#define QT4_RADAR_PTR_H				(QT4_TD_1_BASE_ADDR + 0x520)
+#define QT4_RADAR_MEM_L				(0xe6401000)
+#define QT4_RADAR_MEM_H				(0xe6401400)
+#define ZC_OVERFLOW_ADDR_OFFSET			(64) /* 4096 % (21 * 4) */
+#define ZC_ADDR_MASK				(0x7FF)
+#define RADAR_ZC_MEM_SHIFT			(2)
+#define RADAR_ZC_MASK_1				(0x000000FF)
+#define RADAR_ZC_MASK_2				(0x0000FF00)
+#define RADAR_ZC_MASK_3				(0x00FF0000)
+#define RADAR_ZC_MASK_4				(0xFF000000)
+#define RADAR_ZERO_CROSS_MEM_DEPTH		(896)
+#define RADAR_ZERO_CROSS_MEM_PULSE_DEPTH	(48) /* floor(1024 / 21) */
+#define RADAR_ZERO_CROSS_PROC_PULSE_DEPTH	(49) /* ceil(1024 / 21) */
+#define RADAR_ZERO_CROSS_MEM_VALID_DEPTH	(42) /* floor(896 / 21) */
+#define PULSE_MEM_INC_STEP			(0x4)
+#define RADAR_PULSE_MEM_DEPTH			(256)
+#define QT4_RADAR_MAXWR				(QT4_TD_1_BASE_ADDR + 0x514)
+#define QT4_RADAR_ZWIN				(QT4_TD_1_BASE_ADDR + 0x510)
+#define QT4_RADAR_ZC_ADR			(0xe6400000)
+#define QT4_RADAR_ZC_ADR_INVLD			(0xe6400e00)
+#define QT4_RADAR_ZC_ADR_END			(0xe6401000)
+#define QT_BB_TD_RADAR_ZC_MAX_DEPTH		(0x1000)
+#define QT4_RADAR_CUR_CHMEM_ADR			(QT4_TD_1_BASE_ADDR + 0x530)
+#define QT4_RADAR_NEW_MODE			(QT4_TD_1_BASE_ADDR + 0x564)
+#define QT_RADAR_NEW_MODE_DEFAULT		(0x1)
+#define QT4_RADAR_NEW_MODE_MEM_L_PTR		(QT4_TD_1_BASE_ADDR + 0x570)
+#define QT4_RADAR_NEW_MODE_START_PTR		(QT4_TD_1_BASE_ADDR + 0x56c)
+#define FIFO_SIZE				(RADAR_PULSE_MEM_DEPTH)
+#define RADAR_TAG_BIT				(0x08000000)
+#define TH1_MAX					(0x7ffffff)
+#define TH1_DEFAULT				(0x34000)
+#define TH2_DEFAULT				(0x8000)
+#define DSAMPLE_DEFAULT				(5)
+#define NDELAY_DEFAULT				(5)
+#define RXINPROGEN_DEFAULT			(0x1)
+#define RADAR_IIR_HPF_ORDER_DEFAULT		(10)
+#define RADAR_IIR_LPF_SHIFT_DEFAULT		(0)
+#define RADAR_GAIN_NORM_DEFAULT			(0x82)
+#define RADAR_GAIN_NORM_TARGET_GAIN_HWVAL	(0x23)
+#define RADAR_GAIN_NORM_TARGET_GAIN_DEFAULT	(0x19)
+#define QT4_RADAR_IIR_HPF_ORDER			(QT4_TD_1_BASE_ADDR + 0x540)
+#define QT4_RADAR_IIR_LPF_SHIFT			(QT4_TD_1_BASE_ADDR + 0x504)
+#define QT4_RADAR_GAIN_NORM_ADDR		(QT4_TD_1_BASE_ADDR + 0x508)
+#define QT4_RADAR_GAIN_NORM_TARGET_GAIN_ADDR	(QT4_TD_1_BASE_ADDR + 0x534)
+#define QT4_RADAR_AGC_IN_PROG_ADDR		(QT4_TD_1_BASE_ADDR + 0x4d8)
+#define QT4_RADAR_TIMER_REC_EN_ADDR		(QT4_TD_0_BASE_ADDR + 0x530)
+#define TIMER_REC_EN_ADDR_MASK			(0x80000000)
+#define RADAR_AGC_IN_PROG_DEFAULT		(0x0)
+#define QT4_RADAR_DC_LUT_ADDR			(QT4_TD_1_BASE_ADDR + 0x558)
+#define RADAR_DC_LUT_DEFAULT			(0x0)
+#define WIN_PER_PULSE				(28)
+#define ZC_PER_PULSE				(4 * WIN_PER_PULSE)
+#define ZC_TO_READ_PER_POLLING			(18)
+#define MAX_PROC_PULSE_WRITE			(64)
+#define MAX_PROC_ZC_WRITE			(16)
+#define MAX_ZC_BAD_STATE_CTR			(2)
+#define MAX_PULSE_CNT_TO_RESET			(1)
+#define MAX_POLLING_CNT_BEFORE_RESET		(100)
+#define ABS_MAX_POLLING_CNT_BEFORE_RESET	(160)
+#define QT4_RADAR_NUM_OF_PULSES_B4_INT		(QT4_TD_1_BASE_ADDR + 0x4c8)
+#define QT4_RADAR_CLEAR_INT_STATUS		(QT4_GLBL_BASE_ADDR + 0x320)
+#define CLEAR_INT_STATUS_REG_RADAR_VAL		(0x2)
+#define RADAR_PROC_SIZE				(8192)
+#define RADAR_FH_PRI				(3330)
+#define RADAR_MAX_POW_TRACK			(1)
+#define RADAR_MAX_POW_MASK			(0xf8000000)
+#define MAX_RADAR_PULSE_READ_CNT		(128)
+#define MAX_RADAR_PULSE_READ_CNT_MASK		(0x7F)
+#define QT4_RADAR_IIR_A1			(QT4_TD_1_BASE_ADDR + 0x4e8)
+#define QT4_RADAR_IIR_A2			(QT4_TD_1_BASE_ADDR + 0x4ec)
+#define QT4_RADAR_IIR_A3			(QT4_TD_1_BASE_ADDR + 0x4f0)
+#define QT4_RADAR_IIR_B0			(QT4_TD_1_BASE_ADDR + 0x4f4)
+#define QT4_RADAR_IIR_B1			(QT4_TD_1_BASE_ADDR + 0x4f8)
+#define QT4_RADAR_IIR_B2			(QT4_TD_1_BASE_ADDR + 0x4fc)
+#define QT4_RADAR_IIR_B3			(QT4_TD_1_BASE_ADDR + 0x500)
+
+/*#define RADAR_IIR_A1_40				0x3015
+#define RADAR_IIR_A2_40				0xc92
+#define RADAR_IIR_A3_40				0x3c55
+#define RADAR_IIR_B0_40				0x20
+#define RADAR_IIR_B1_40				0x5f
+#define RADAR_IIR_B2_40				0x5f
+#define RADAR_IIR_B3_40				0x20
+
+#define RADAR_IIR_A1_80				0x39eb
+#define RADAR_IIR_A2_80				0x59e
+#define RADAR_IIR_A3_80				0x3e54
+#define RADAR_IIR_B0_80				0xbc
+#define RADAR_IIR_B1_80				0x233
+#define RADAR_IIR_B2_80				0x233
+#define RADAR_IIR_B3_80				0xbc  // cover 80% of bw [-32,32] for 80MHz, [-16, 16] for 40MHz*/
+
+#define RADAR_IIR_A1_40				0x33ec
+#define RADAR_IIR_A2_40				0x929
+#define RADAR_IIR_A3_40				0x3d4d
+#define RADAR_IIR_B0_40				0x4c
+#define RADAR_IIR_B1_40				0xe5
+#define RADAR_IIR_B2_40				0xe5
+#define RADAR_IIR_B3_40				0x4c 
+
+#define RADAR_IIR_A1_80				0x3F7E
+#define RADAR_IIR_A2_80				0x421
+#define RADAR_IIR_A3_80				0x3F0F
+#define RADAR_IIR_B0_80				0x156
+#define RADAR_IIR_B1_80				0x401
+#define RADAR_IIR_B2_80				0x401
+#define RADAR_IIR_B3_80				0x156 //cover 100% bw [-40, 40] for 80MHz, [-20, 20] for 40MHz
+
+
+#define QT4_RADAR_TX_EXT			(QT4_TD_1_BASE_ADDR + 0x53c)
+#define QT4_RADAR_TX_EXT_VALUE			0x140	// unit is 0.00625us
+
+
+#define QT3_RADAR_TH1_ADDR			(0xe6090520)
+#define QT3_RADAR_TH2_ADDR			(0xe6090524)
+#define QT3_RADAR_M_ADDR			(0xe6090508)
+#define QT3_RADAR_N_ADDR			(0xe609050c)
+#define QT3_RADAR_DOWNSAMPLE_3_0_ADDR		(0xe60904fc)
+#define QT3_RADAR_NUMDELAY_3_0_ADDR		(0xe6090510)
+#define QT3_RADAR_TAGMODE_ADDR			(0xe609051c)
+#define QT3_RADAR_MINWIDTH_ADDR			(0xe6090504)
+#define QT3_RADAR_MAXWIDTH_ADDR			(0xe6090500)
+#define QT3_RADAR_RX_EXTENSION_AFTER_ADDR	(0xe60000e0)
+#define QT3_RADAR_RX_EXTENSION_BEFORE_ADDR	(0xe60905a8)
+#define QT3_RADAR_RX_IN_PROG_EN_ADDR		(0xe6090514)
+#define QT3_RADAR_SPRAM_IN_PROG_EN_ADDR		(0xe60905a0)
+#define QT3_RADAR_INTR_EN_ADDR			(0xe6090564)
+#define QT3_RADAR_CNT_L				(0xe6090558)
+#define QT3_RADAR_PTR_L				(0xe609055c)
+#define QT3_RADAR_PTR_H				(0xe6090560)
+#define QT3_RADAR_MEM_L				(0xe6401000)
+#define QT3_RADAR_MEM_H				(0xe6401400)
+#define QT3_RADAR_MAXWR				(0xe6090554)
+#define QT3_RADAR_ZWIN				(0xe6090550)
+#define QT3_RADAR_ZC_ADR			(0xe6400000)
+#define QT3_RADAR_ZC_ADR_INVLD			(0xe6400e00)
+#define QT3_RADAR_ZC_ADR_END			(0xe6401000)
+#define QT3_RADAR_CUR_CHMEM_ADR			(0xe6090570)
+#define QT3_RADAR_NEW_MODE			(0xe60905a4)
+#define QT3_RADAR_NEW_MODE_MEM_L_PTR		(0xe60905b0)
+#define QT3_RADAR_NEW_MODE_START_PTR		(0xe60905ac)
+#define QT3_RADAR_IIR_HPF_ORDER			(0xe6090580)
+#define QT3_RADAR_IIR_LPF_SHIFT			(0xe6090544)
+#define QT3_RADAR_GAIN_NORM_ADDR		(0xe6090548)
+#define QT3_RADAR_GAIN_NORM_TARGET_GAIN_ADDR	(0xe6090574)
+#define QT3_RADAR_AGC_IN_PROG_ADDR		(0xe6090518)
+#define QT3_RADAR_TIMER_REC_EN_ADDR		(0xe60905f4)
+#define QT3_RADAR_DC_LUT_ADDR			(0xe6090598)
+#define QT3_RADAR_NUM_OF_PULSES_B4_INT		(0xe6090508)
+#define QT3_RADAR_CLEAR_INT_STATUS		(0xe6000320)
+#define QT3_TD_0_BASE_ADDR                      (0xe6090000)
+#define QT3_RADAR_IIR_A1                        (QT3_TD_0_BASE_ADDR + 0x528)
+#define QT3_RADAR_IIR_A2                        (QT3_TD_0_BASE_ADDR + 0x52c)
+#define QT3_RADAR_IIR_A3                        (QT3_TD_0_BASE_ADDR + 0x530)
+#define QT3_RADAR_IIR_B0                        (QT3_TD_0_BASE_ADDR + 0x534)
+#define QT3_RADAR_IIR_B1                        (QT3_TD_0_BASE_ADDR + 0x538)
+#define QT3_RADAR_IIR_B2                        (QT3_TD_0_BASE_ADDR + 0x53c)
+#define QT3_RADAR_IIR_B3                        (QT3_TD_0_BASE_ADDR + 0x540)
+#define QT3_RADAR_TX_EXT                        (QT3_TD_0_BASE_ADDR + 0x57c)
+#define QT3_RADAR_TX_EXT_VALUE                  0x00
+
+# define QT_RADAR(x)		QT4_RADAR_##x
+
+
+typedef struct {
+	u32		base[2];
+	u32		ptr;
+	u32		bb_base;
+	int		irq;
+	u32		flags;
+	u32		n_irq;
+
+	u32		poll_freq;
+
+	u32		th1;			/* threshold 1 */
+	u32		th2;			/* threshold 2 */
+
+	u32		flush;			/* timer wrap */
+	u32		irq_en;
+	spinlock_t	lock;
+	u32		chip;			/* chipID */
+
+	u32		radar_en;
+	void		(*callback)(void);
+	bool		(*radar_is_dfs_chan)(uint8_t wifi_chan);
+	bool		(*radar_is_dfs_weather_chan)(uint8_t wifi_chan);
+
+	void		*radar_stats_send_arg;
+	void		(*radar_stats_send)(void *data,
+	int		(*pulse_copy_iter)(void *dest, void *src, int pulse_indx),
+	void		*pulse_buf, int num_pulses);
+
+	/*
+	 * Fields for pulse history maintenance.
+	 * - all fields set to 0 during initialization
+	 * - at each polling, ph_buf[ph_idx] is updated with a new pulse count and then ph_idx is
+	 *   changed to next (ring)
+	 * - at each polling, ph_sum is updated
+	 */
+#define RADAR_PH_BUFLEN			20	/* The radar pulse history buffer length */
+#define RADAR_PH_BUFLEN_LSR		240	/* The LSR pulse history buffer length */
+#define RADAR_TIMER_BAD			9	/* Wait time in the bad state */
+#define RADAR_TIMER_BAD_MAX		24	/* Wait time when a rejection happens */
+#define RADAR_TIMER_STEP_DETECT		2	/* Radar timer step in the bad state when the detected radars match */
+#define RADAR_PS_TIMER_MAX		20
+#define RADAR_MIN_DETECT_PULSE_CNT	14	/* If number of pulses in a window are bigger than RADAR_MIN_DETECT_PULSE_CNT, there should be a valid detection */
+
+	u16		ph_buf[RADAR_PH_BUFLEN_LSR]; /* pulse count hisotry buffer (ring) */
+	u16		ph_idx;			/* where in 'ph_buf' to write a new pulse count */
+	u32		ph_sum;			/* sum of all pulse counts in 'ph_buf' */
+	u16		ph_idx_LSR;		/* where in 'ph_buf' to write a new pulse count */
+	u32		ph_sum_LSR;		/* sum of all pulse counts in 'ph_buf' */
+	unsigned	last_rej_w;		/* Width of the last radar detection */
+	unsigned	last_rej_pri[3];	/* PRI of the last radar detection */
+	bool		curr_radar_state;	/* Current state of the radar logic; true -> good state, and false -> bad state */
+	int		bad_state_timer;	/* Current time at the bad state */
+	bool		bad_state_detection;	/* Has there been any radar detections in the bad state? */
+
+	unsigned	poll_cnt_after_reset;
+	unsigned	pulse_timer_reset_time;
+	unsigned	pulse_timer_reset_cnt;
+
+	u8		wifi_channel;
+	uint32_t	bw;
+} radar_cb_t;
+
+#define RADAR_OCAC_BEGIN	BIT(1)
+#define RADAR_OCAC_END		BIT(0)
+
+#define RADAR_OCAC_PTS_SIZE	10
+
+struct radar_fifo_pt_s {
+        uint8_t			ocac_status;
+        uint32_t		fifo_pt;
+};
+
+/*
+ * Shared by QDRV and RADAR
+ */
+struct radar_ocac_info_s {
+	spinlock_t		lock;
+	bool			ocac_enabled;
+	uint8_t			ocac_scan_chan;
+	struct radar_fifo_pt_s	ocac_radar_pts[RADAR_OCAC_PTS_SIZE];
+	uint8_t			array_ps;	/* array is ocac_radar_pts */
+	bool			weather_channel_yes;	/*0: not weather channel, 1: weather channel */
+};
+
+#define DFS_RQMT_UNKNOWN        0
+#define DFS_RQMT_EU             1
+#define DFS_RQMT_US             2
+#define DFS_RQMT_JP             3
+#define DFS_RQMT_AU             4
+#define DFS_RQMT_BR		5
+#define DFS_RQMT_CL		6
+
+int radar_register(void (*dfs_mark_callback)(void));
+int radar_register_is_dfs_chan(
+	bool (*qdrv_radar_is_dfs_chan)(uint8_t wifi_chan));
+int radar_register_is_dfs_weather_chan(
+	bool (*qdrv_radar_is_dfs_weather_chan)(uint8_t wifi_chan));
+void radar_disable(void);
+void radar_enable(void);
+bool radar_start(const char *region);
+void radar_stop(void);
+int radar_register_statcb(void (*stats_send)(void *data,
+	int (*pulse_copy_iter)(void *dest, void *src, int pulse_indx),
+	void *pulse_buf, int num_pulses),
+	void *stats_send_arg);
+unsigned dfs_rqmt_code(const char *region);
+void radar_set_shared_params(struct shared_params *p_params);
+int radar_set_chan(uint8_t chan);
+uint8_t radar_get_chan(void);
+bool sta_dfs_is_region_required(void);
+void qdrv_sta_dfs_enable(int sta_dfs_enable);
+void qdrv_cac_instant_completed(void);
+struct radar_ocac_info_s *radar_ocac_info_addr_get(void);
+void radar_record_buffer_pt(uint32_t *pt);
+bool radar_get_status(void);
+void radar_set_bw(uint32_t bw);
+
+#endif /* __RADAR_H__ */
diff --git a/drivers/qtn/ruby/Kconfig b/drivers/qtn/ruby/Kconfig
new file mode 120000
index 0000000..cdb141e
--- /dev/null
+++ b/drivers/qtn/ruby/Kconfig
@@ -0,0 +1 @@
+Kconfig_bbic4_pss
\ No newline at end of file
diff --git a/drivers/qtn/ruby/Kconfig_bbic4 b/drivers/qtn/ruby/Kconfig_bbic4
new file mode 100644
index 0000000..345a095
--- /dev/null
+++ b/drivers/qtn/ruby/Kconfig_bbic4
@@ -0,0 +1,126 @@
+#
+# Quantenna Ruby board
+#
+
+menu "Quantenna"
+
+config QUANTENNA_RUBY
+	bool "Quantenna Ruby board support"
+	select WEXT_PRIV
+	select WIRELESS_EXT
+
+config ARCH_RUBY_NUMA
+	bool "Support of Ruby non-unified memory architecture"
+
+config QVSP
+	bool "Video Stream Protection"
+	help
+	  Advanced stream protection feature.
+
+config RUBY_PCIE_TARGET
+	bool "Include the PCIe target driver for board with a PCIe interface"
+	help
+	  A build for a board with a PCIe interface must select this option
+
+config RUBY_PCIE_HOST
+	bool "Include the PCIe host driver for board with a PCIe interface"
+	help
+	  A build for a board with a PCIe RootComplex support must select this option
+
+config TOPAZ_PCIE_TARGET
+	bool "Include the topaz PCIe target driver for board with a PCIe interface"
+	help
+	  A build for a board with a topaz PCIe interface must select this option
+
+config TOPAZ_PCIE_HOST
+	bool "Include topaz PCIe host interface for TOPAZ RC Host"
+		help
+			  A build for a board with a topaz PCIe RC Host support must select this option
+config PCIEPORTBUS
+	depends on TOPAZ_PCIE_HOST
+	bool "Include generic PCIe driver"
+	default y
+
+config TOPAZ_DBDC_HOST
+	bool "include topaz DBDC host interface for TOPAZ RC Host"
+	help
+	  A build for a board with a topaz RC Host support for DBDC must select this option
+
+config ARCH_RUBY_SRAM_IRQ_STACK
+	bool "Use dedicated SRAM stack for interrupts" if (!ARCH_ARC_LV2_INTR && ARCH_ARC_CURR_IN_REG && !PREEMPT)
+
+config KERNEL_TEXT_SNAPSHOTS
+	bool "Take snapshots of kernel text section to detect corruption"
+	default n
+	help
+	  Create /proc/kdump, which is a human readable hex readout
+	  of the kernel text section. Reading this several times then
+	  using diff utilities on the output can show which kernel
+	  text is being corrupted. Also allows kernel text section
+	  snapshots to be taken for analysis in the troubleshooting
+	  crash handler.
+
+config KERNEL_TEXT_SNAPSHOT_COUNT
+	int "Number of snapshots to hold"
+	default 2
+	depends on KERNEL_TEXT_SNAPSHOTS
+	help
+	  How many kernel text section snapshots to keep. More can be
+	  added if suspicious of particular areas of code corrupting
+	  and before and after shots are desired to check. Each snapshot
+	  adds several MB of data size.
+
+config ARCH_RUBY_EMAC_LIB
+	tristate "Common Arasan EMAC routines for Ruby / Topaz"
+	default y
+	help
+	  Routines common to Ruby and Topaz EMAC drivers. Includes PHY
+	  routines, initialization, PHY/MII/MDIO routines.
+
+config ARCH_RUBY_EMAC
+	tristate "Ruby Arasan EMAC driver"
+	default y
+	select ARCH_RUBY_EMAC_LIB
+	help
+	  Ruby Arasan EMAC AHB driver
+
+config ARCH_RUBY_EMAC_SMOOTHING
+	bool "Enable Arasan EMAC traffic bursts smoothing"
+	default y
+	depends on ARCH_RUBY_EMAC
+
+config ARCH_RUBY_EMAC_SMOOTHING_BURST_SIZE
+	int "Number of packets per burst"
+	default 48
+	depends on ARCH_RUBY_EMAC_SMOOTHING
+
+config ARCH_RUBY_EMAC_SMOOTHING_RATE
+	int "Number of packets per second"
+	default 50000
+	depends on ARCH_RUBY_EMAC_SMOOTHING
+
+config QUANTENNA_RESTRICT_WLAN_IP
+	bool "Restrict incoming IP packets on the WLAN interface for RGMII bridge operation"
+	default n
+	help
+	Restrict the incoming IP traffic on the WLAN interface to prevent access to the
+	bridge interface on RGMII boards.
+
+config SWITCH_RTL8365MB
+	tristate "Realtek RTL8365MB switch"
+	default n
+	depends on ARCH_RUBY_EMAC
+	help
+	  Realtek RTL8365MB driver
+
+config SWITCH_RTL8363SB
+	tristate "Realtek RTL8363SB switch"
+	default n
+	depends on ARCH_RUBY_EMAC
+	help
+	  Realtek RTL8363SB driver
+
+endmenu
+
+source "drivers/pci/pcie/Kconfig"
+
diff --git a/drivers/qtn/ruby/Kconfig_bbic4_pss b/drivers/qtn/ruby/Kconfig_bbic4_pss
new file mode 100644
index 0000000..61f4674
--- /dev/null
+++ b/drivers/qtn/ruby/Kconfig_bbic4_pss
@@ -0,0 +1,129 @@
+#
+# Quantenna Ruby board
+#
+
+menu "Quantenna"
+
+config QUANTENNA_RUBY
+	bool "Quantenna Ruby board support"
+	select WEXT_PRIV
+	select WIRELESS_EXT
+	select ARCH_REQUIRE_GPIOLIB
+
+config ARCH_RUBY_NUMA
+	bool "Support of Ruby non-unified memory architecture"
+
+config QVSP
+	bool "Video Stream Protection"
+	help
+	  Advanced stream protection feature.
+
+config RUBY_PCIE_TARGET
+	bool "Include the PCIe target driver for board with a PCIe interface"
+	help
+	  A build for a board with a PCIe interface must select this option
+
+config RUBY_PCIE_HOST
+	bool "Include the PCIe host driver for board with a PCIe interface"
+	help
+	  A build for a board with a PCIe RootComplex support must select this option
+
+config TOPAZ_PCIE_TARGET
+	bool "Include the topaz PCIe target driver for board with a PCIe interface"
+	help
+	  A build for a board with a topaz PCIe interface must select this option
+
+config TOPAZ_PCIE_HOST
+	bool "Include topaz PCIe host interface for TOPAZ RC Host"
+		help
+			  A build for a board with a topaz PCIe RC Host support must select this option
+config PCIEPORTBUS
+	depends on TOPAZ_PCIE_HOST
+	bool "Include generic PCIe driver"
+	default y
+
+config TOPAZ_DBDC_HOST
+	bool "include topaz DBDC host interface for TOPAZ RC Host"
+	help
+	  A build for a board with a topaz RC Host support for DBDC must select this option
+
+config ARCH_RUBY_SRAM_IRQ_STACK
+	bool "Use dedicated SRAM stack for interrupts" if (!ARCH_ARC_LV2_INTR && ARCH_ARC_CURR_IN_REG && !PREEMPT)
+
+config KERNEL_TEXT_SNAPSHOTS
+	bool "Take snapshots of kernel text section to detect corruption"
+	default n
+	help
+	  Create /proc/kdump, which is a human readable hex readout
+	  of the kernel text section. Reading this several times then
+	  using diff utilities on the output can show which kernel
+	  text is being corrupted. Also allows kernel text section
+	  snapshots to be taken for analysis in the troubleshooting
+	  crash handler.
+
+config KERNEL_TEXT_SNAPSHOT_COUNT
+	int "Number of snapshots to hold"
+	default 2
+	depends on KERNEL_TEXT_SNAPSHOTS
+	help
+	  How many kernel text section snapshots to keep. More can be
+	  added if suspicious of particular areas of code corrupting
+	  and before and after shots are desired to check. Each snapshot
+	  adds several MB of data size.
+
+config ARCH_RUBY_EMAC_LIB
+	tristate "Common Arasan EMAC routines for Ruby / Topaz"
+	default y
+	help
+	  Routines common to Ruby and Topaz EMAC drivers. Includes PHY
+	  routines, initialization, PHY/MII/MDIO routines.
+
+config ARCH_RUBY_EMAC
+	tristate "Ruby Arasan EMAC driver"
+	default y
+	select ARCH_RUBY_EMAC_LIB
+	help
+	  Ruby Arasan EMAC AHB driver
+
+config ARCH_RUBY_EMAC_SMOOTHING
+	bool "Enable Arasan EMAC traffic bursts smoothing"
+	default y
+	depends on ARCH_RUBY_EMAC
+
+config ARCH_RUBY_EMAC_SMOOTHING_BURST_SIZE
+	int "Number of packets per burst"
+	default 48
+	depends on ARCH_RUBY_EMAC_SMOOTHING
+
+config ARCH_RUBY_EMAC_SMOOTHING_RATE
+	int "Number of packets per second"
+	default 50000
+	depends on ARCH_RUBY_EMAC_SMOOTHING
+
+config QUANTENNA_RESTRICT_WLAN_IP
+	bool "Restrict incoming IP packets on the WLAN interface for RGMII bridge operation"
+	default n
+	help
+	Restrict the incoming IP traffic on the WLAN interface to prevent access to the
+	bridge interface on RGMII boards.
+
+config SWITCH_RTL8365MB
+	tristate "Realtek RTL8365MB switch"
+	default n
+	depends on ARCH_RUBY_EMAC
+	help
+	  Realtek RTL8365MB driver
+
+config SWITCH_RTL8363SB
+	tristate "Realtek RTL8363SB switch"
+	default n
+	depends on ARCH_RUBY_EMAC
+	help
+	  Realtek RTL8363SB driver
+
+config QTN_SKB_RECYCLE
+	bool "Support of QTN SKB recycle"
+	default n
+
+endmenu
+
diff --git a/drivers/qtn/ruby/Makefile b/drivers/qtn/ruby/Makefile
new file mode 100644
index 0000000..cf844ed
--- /dev/null
+++ b/drivers/qtn/ruby/Makefile
@@ -0,0 +1,38 @@
+EXTRA_CFLAGS +=	-Wall -Werror \
+		-I../include \
+		-I../common \
+		-I../drivers/include/shared \
+		-I../drivers/include/kernel
+
+obj-y += \
+	board_config.o \
+	gpio.o\
+	machine.o \
+	clock.o \
+	soc.o \
+	i2c_bus.o \
+	skb_recycle.o \
+	pm.o \
+	iputil.o \
+	dmautil.o \
+	health.o
+
+ifeq ($(RUBY_SBM_TEST),1)
+	obj-y += ../topaz/hbm.o
+endif
+
+ifeq ($(QTN_EXTERNAL_MODULES),y)
+emaclib-objs += emac_lib.o ar823x.o mv88e6071.o
+obj-$(CONFIG_ARCH_RUBY_EMAC_LIB) += emaclib.o
+emac-objs += arasan_emac_ahb.o
+obj-$(CONFIG_ARCH_RUBY_EMAC) += emac.o
+endif
+
+obj-$(CONFIG_ARC_AHB_PCI_BRIDGE) += pcibios.o
+obj-$(CONFIG_ARC_AHB_PCI_BRIDGE) += pcibios_sysfs.o
+obj-$(CONFIG_PCI_MSI) += pci_msi.o
+
+
+obj-$(CONFIG_MTD) += spi_flash.o
+obj-$(CONFIG_MTD) += spi_api.o
+
diff --git a/drivers/qtn/ruby/ar823x.c b/drivers/qtn/ruby/ar823x.c
new file mode 100644
index 0000000..5e3f7dd
--- /dev/null
+++ b/drivers/qtn/ruby/ar823x.c
@@ -0,0 +1,452 @@
+/*
+ *  ar823x.c
+ *
+ *  Driver for the Atheros 8236 & 8327 switches
+ *
+ *  Copyright (c) Quantenna Communications Incorporated 2009.
+ *  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+/********************************************************************
+    Atheros 823x MDIO programming details
+    The Arasan MDIO IP on our chip uses two registers to program
+    MDIO.
+    
+    data[16:0]
+    control
+    1   1111   1    00000   00000   
+    5   4321   0    98765   43210
+    -   ----   --   -----   -----
+    st  xxxx   op   reg     phy
+    
+    st: 1 - start, poll for completion
+    op: 1 - read, 0 write
+    
+    
+    These are encoded into the serial MDIO protocol as follows
+    
+    IEEE MDIO frame
+    33 22   22222   22211   11  1111110000000000
+    10 98   76543   21098   76  5432109876543210
+    -- --   -----   -----   --  ----------------
+    ST OP   PHY     REG     TA  DATA[15:0]
+    
+    TA and ST are encoded automatically by Arasan IP.
+    
+    
+    This device uses 18 bits to specify register addresses.
+    These bits are programmed into the device by paging as follows.
+    
+    aaaaaaaaa   aaaaaaaa   aa
+    111111111   00000000   00
+    876543210   98765432   10
+    ---------   --------   ------
+    page addr   reg addr   ignore
+    
+    Since the registers are all 32-bit, the lower two address
+    bits are discarded.  The page is written first using the
+    following format.  Note PHY is limited to three bits.
+    
+    8213 Page program command
+    -----------------------------------------------
+    33  22  22   222  22211   11  111111 0000000000
+    10  98  76   543  21098   76  543210 9876543210
+    --  --  --   ---  -----   --  ------ ----------
+    ST  OP  CMD  PHY  xxxxx   TA  xxxxxx page addr      
+    
+    CMD: 11 - page address write
+    CMD: 10 - reg access
+    
+    The tricky part is the reg access step following page programming
+    Since the register format of arasan swaps the order of reg and
+    phy, and since our register address spans these two fields, we
+    have to swizzle the bits into place.
+
+    8213 Reg read/write command
+    ------------------------------------------------
+    33  22  22    2222221   1   11  1111110000000000
+    10  98  76    5432109   8   76  5432109876543210
+    --  --  --    -------   -   --  ----------------
+    ST  OP  CMD   reg adr   W   TA  DATA[15:0]      
+    
+    W: 0 - lower 16 bits, 1: upper 16 bits
+    
+    Programming this operation into Arasan requires
+    
+    phy = 'b10 << 3 | regAddr[9:7]
+    reg = regAddr[6:2]
+    
+    mdioCmd = phy | reg << 5 | op | start
+    
+    
+********************************************************************/
+////////////////////////////////////////////////////////////////////
+// NOTE - we do not check for valid base in mdio access routines
+// use must ensure device is initialized and valid prior
+// to using MDIO funtions
+///////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////
+//      Includes
+////////////////////////////////////////////////////////////////////
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include "ar823x.h"
+#include <common/ruby_arasan_emac_ahb.h>
+
+////////////////////////////////////////////////////////////////////
+//      Defines
+////////////////////////////////////////////////////////////////////
+#define AR823x_MODE_CTRL            (0x04)
+
+// use phy mode and invert tx clock for better hold margin
+#define AR8236_MODE_MII_PHY         (0x80000600)
+#define AR8236_MODE_MII_MAC         (0x80000004)
+#define AR8236_FLOOD_MASK           (0x2c)
+#define AR8236_FLOOD_MASK_DEF       (0xfe7f007f)
+#define AR8236_PORT0_CTRL           (0x100)
+#define AR8236_PORT0_CTRL_DEF       (0x7d)
+#define AR8236_FLOW_LINK_EN         (0x1000)
+
+#define AR823x_MASK_CTL             (0)
+#define AR823x_MASK_CTL_RESET       (0x80000000)
+
+#define AR8327_MASK_CLEAR_DEF       (0)
+#define AR8327_MODE_RGMII_PHY     (0x07600000)
+//#define AR8327_MODE_RGMII_PHY       (0x07402000)
+#define AR8327_PWS_CTRL             (0x10)
+#define AR8327_PWS_CTRL_DEF         (0x40000000)
+#define AR8327_FWCTL_CTRL           (0x0624)
+#define AR8327_FWCTL_CTRL_DEF       (0x007f7f7f)
+#define AR8327_PORT6_CTRL           (0xc)
+#define AR8327_PORT6_CTRL_DEF       (0x01000000)
+#define AR8327_PORT0_CTRL           (0x7c)
+#define AR8327_PORT0_CTRL_DEF       (0x7e)
+
+#define AR823x_CPU_PORT_REG         (0x78)
+#define AR823x_CPU_PORT_EN          (1 << 8)
+#define AR823x_MIRROR_PORT_NONE     (0xf << 4)
+
+#define AR823x_MDIO_START           (1 << 15)
+#define AR823x_MDIO_WRITE           (0 << 10)
+#define AR823x_MDIO_READ            (1 << 10)
+#define AR823x_MDIO_HIGH_WORD       (1 << 0)
+
+#define AR823x_MDIO_TIMEOUT         (0x1000)
+#define AR823x_MDIO_PAGE            (0x18)
+#define AR823x_MDIO_NORM            (0x10)
+#define AR823x_PORT_CTRL(x)         (0x104 + 0x100 * (x))
+#define AR823x_PORT_VLAN(x)         (0x108 + 0x100 * (x))
+#define AR823x_NUM_PORTS            (5)
+
+#define AR823x_ARP_LEAKY_EN         (1 << 22)
+#define AR823x_LEARN_EN             (1 << 14)
+#define AR823x_FORWARD              (1 << 2)
+
+#define AR823x_MIN_PHY_NUM          (0)
+#define AR823x_MAX_PHY_NUM          (4)
+
+#define AR823x_QM_REG               (0x3c)
+#define AR823x_ARP_EN               (1 << 15)
+#define AR823x_ARP_REDIRECT         (1 << 14)
+
+////////////////////////////////////////////////////////////////////
+//      Types
+////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////
+//      Data
+////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////
+//      Functions
+////////////////////////////////////////////////////////////////////
+#define AR823x_REG_WRITE(x,y)    (*(volatile unsigned int *)IO_ADDRESS(x) = (unsigned int)(y))
+#define AR823x_REG_READ(x)       (*(volatile unsigned int *)IO_ADDRESS(x))
+
+extern int mdc_clk_divisor;
+
+inline u32 ar823x_emac_rdreg(int reg)
+{
+	return AR823x_REG_READ(RUBY_ENET0_BASE_ADDR + reg);
+}
+
+inline void ar823x_emac_wrreg(int reg, u32 val)
+{
+	AR823x_REG_WRITE(RUBY_ENET0_BASE_ADDR + reg, val);
+}
+
+/*********************************************************************
+ Name:      ar823x_mdio_poll
+ Purpose:   mdio poll routine for AR823x device
+ Notes:     Checks for mdio operation complete
+*********************************************************************/
+int ar823x_mdio_poll(void)
+{
+	u32 timeout = AR823x_MDIO_TIMEOUT;
+
+	// check for clear MDIO status
+	while (timeout--) {
+		int status = ar823x_emac_rdreg(EMAC_MAC_MDIO_CTRL);
+		if ((status & AR823x_MDIO_START) == 0) {
+			break;
+		}
+	}
+	if (timeout == 0) {
+		printk("ar823x mdio timeout\n");
+		return -1;
+	}
+	return 0;
+}
+
+
+/*********************************************************************
+ Name:      ar823x_mdio_write
+ Purpose:   mdio write routine for AR823x device
+ Notes:     reg_addr[1]=1 determines high word
+*********************************************************************/
+int ar823x_mdio_write(struct mii_bus *bus, int phyAddr, int regAddr, u16 data)
+{
+	u32 highAddr = regAddr >> 9;
+	// need to swizzle the bits into arasan's fields which are different          
+	u32 rg = (regAddr & 0x3c) >> 1;
+	u32 ph = (regAddr & 0x1c0) >> 6;
+
+	// wait for completion
+	if (ar823x_mdio_poll() != 0) {
+		return -1;
+	}
+
+	if (regAddr & 0x2) {
+		ar823x_emac_wrreg(EMAC_MAC_MDIO_DATA, highAddr);
+		ar823x_emac_wrreg(EMAC_MAC_MDIO_CTRL, (phyAddr & 0x1f) |
+				  ((mdc_clk_divisor & MacMdioCtrlClkMask) << MacMdioCtrlClkShift) |
+				  AR823x_MDIO_START | AR823x_MDIO_PAGE);
+
+		// wait for completion
+		if (ar823x_mdio_poll() != 0) {
+			return -1;
+		}
+		ar823x_emac_wrreg(EMAC_MAC_MDIO_DATA, data);
+		ar823x_emac_wrreg(EMAC_MAC_MDIO_CTRL,
+				  ((rg | AR823x_MDIO_HIGH_WORD) << 5) | ph |
+				  ((mdc_clk_divisor & MacMdioCtrlClkMask) << MacMdioCtrlClkShift) |
+				  AR823x_MDIO_START | AR823x_MDIO_NORM);
+		if (ar823x_mdio_poll() != 0) {
+			return -1;
+		}
+	} else {
+
+		ar823x_emac_wrreg(EMAC_MAC_MDIO_DATA, data);
+		ar823x_emac_wrreg(EMAC_MAC_MDIO_CTRL,
+				  (rg  << 5) | ph |
+				  ((mdc_clk_divisor & MacMdioCtrlClkMask) << MacMdioCtrlClkShift) |
+				  AR823x_MDIO_START | AR823x_MDIO_NORM);
+	}
+	// return without waiting for final completion
+	return 0;
+}
+
+/*********************************************************************
+ Name:      ar823x_mdio_write32
+ Purpose:   mdio write routine for AR823x device
+ Notes:     This is a partial blocking call since we require
+            more than one cycle to complete the write.
+            checks for completion first
+*********************************************************************/
+int ar823x_mdio_write32(int phyAddr, int regAddr, u32 data)
+{
+
+	u32 highAddr = regAddr >> 9;
+	// need to swizzle the bits into arasan's fields which are different          
+	u32 rg = (regAddr & 0x3c) >> 1;
+	u32 ph = (regAddr & 0x1c0) >> 6;
+
+	// check for clear MDIO status
+	if (ar823x_mdio_poll() != 0) {
+		return -1;
+	}
+
+	ar823x_emac_wrreg(EMAC_MAC_MDIO_DATA, highAddr);
+	ar823x_emac_wrreg(EMAC_MAC_MDIO_CTRL, (phyAddr & 0x1f) |
+			  ((mdc_clk_divisor & MacMdioCtrlClkMask) << MacMdioCtrlClkShift) |
+			  AR823x_MDIO_START | AR823x_MDIO_PAGE);
+
+	// wait for completion
+	if (ar823x_mdio_poll() != 0) {
+		return -1;
+	}
+
+	ar823x_emac_wrreg(EMAC_MAC_MDIO_DATA, (data >> 16) & 0xffff);
+	ar823x_emac_wrreg(EMAC_MAC_MDIO_CTRL,
+			  ((rg | AR823x_MDIO_HIGH_WORD) << 5) | ph |
+			  ((mdc_clk_divisor & MacMdioCtrlClkMask) << MacMdioCtrlClkShift) |
+			  AR823x_MDIO_START | AR823x_MDIO_NORM);
+	if (ar823x_mdio_poll() != 0) {
+		return -1;
+	}
+
+	ar823x_emac_wrreg(EMAC_MAC_MDIO_DATA, data & 0xffff);
+	ar823x_emac_wrreg(EMAC_MAC_MDIO_CTRL,
+			  (rg << 5) | ph | AR823x_MDIO_START |
+			  ((mdc_clk_divisor & MacMdioCtrlClkMask) << MacMdioCtrlClkShift) |
+			  AR823x_MDIO_NORM);
+	if (ar823x_mdio_poll() != 0) {
+		return -1;
+	}
+
+	// return without waiting for final completion
+	return 0;
+}
+
+/*********************************************************************
+ Name:      ar823x_mdio_read
+ Purpose:   mdio read routine for AR823x device
+ Notes:     This is a blocking call since we require
+            more than one cycle to complete the write.
+            checks for completion first
+*********************************************************************/
+int ar823x_mdio_read(struct mii_bus *bus, int phyAddr, int regAddr)
+{
+	int data;
+	u32 highAddr = regAddr >> 9;
+	// need to swizzle the bits into arasan's fields which are different          
+	u32 rg = (regAddr & 0x3c) >> 1;
+	u32 ph = (regAddr & 0x1c0) >> 6;
+
+	if (ar823x_mdio_poll() != 0) {
+		return -1;
+	}
+
+	ar823x_emac_wrreg(EMAC_MAC_MDIO_DATA, highAddr);
+	ar823x_emac_wrreg(EMAC_MAC_MDIO_CTRL, phyAddr | AR823x_MDIO_START |
+			  ((mdc_clk_divisor & MacMdioCtrlClkMask) << MacMdioCtrlClkShift) |
+			  AR823x_MDIO_READ | AR823x_MDIO_PAGE);
+	if (ar823x_mdio_poll() != 0) {
+		return -1;
+	}
+
+	ar823x_emac_wrreg(EMAC_MAC_MDIO_CTRL, (rg << 5) | AR823x_MDIO_START |
+			  ((mdc_clk_divisor & MacMdioCtrlClkMask) << MacMdioCtrlClkShift) |
+			  AR823x_MDIO_READ | ph | AR823x_MDIO_NORM);
+	if (ar823x_mdio_poll() != 0) {
+		return -1;
+	}
+
+	data = ar823x_emac_rdreg(EMAC_MAC_MDIO_DATA);
+
+	ar823x_emac_wrreg(EMAC_MAC_MDIO_CTRL,
+			  ((rg | AR823x_MDIO_HIGH_WORD) << 5) |
+			  AR823x_MDIO_START | AR823x_MDIO_READ | ph |
+			  ((mdc_clk_divisor & MacMdioCtrlClkMask) << MacMdioCtrlClkShift) |
+			  AR823x_MDIO_NORM);
+
+	if (ar823x_mdio_poll() != 0) {
+		return -1;
+	}
+
+	data = data | (ar823x_emac_rdreg(EMAC_MAC_MDIO_DATA) << 16);
+	return data;
+}
+
+static void ar823x_reset(const u32 phy_addr)
+{
+	int reset = AR823x_MASK_CTL_RESET;
+
+	// do a clean reset and wait for completion 
+	ar823x_mdio_write32(phy_addr, AR823x_MASK_CTL, AR823x_MASK_CTL_RESET);
+	while (reset & AR823x_MASK_CTL_RESET) {
+		reset = ar823x_mdio_read(NULL, phy_addr, AR823x_MASK_CTL);
+	}
+}
+
+static u32 ar8236_init(const u32 phy_addr, const u32 devID)
+{
+	printk("Detected AR823x Switch %d:%x - set for MII, 100FD\n", phy_addr, devID);
+
+	// do a softreset
+	ar823x_mdio_write32(phy_addr, AR823x_MODE_CTRL, AR8236_MODE_MII_PHY);
+
+	ar823x_reset(phy_addr);
+
+	ar823x_mdio_write32(phy_addr, AR8236_FLOOD_MASK, AR8236_FLOOD_MASK_DEF);
+	ar823x_mdio_write32(phy_addr, AR8236_PORT0_CTRL, AR8236_PORT0_CTRL_DEF);
+
+	return phy_addr;
+}
+
+static u32 ar8327_init(const u32 phy_addr, const u32 devID)
+{
+	printk("Detected AR8327 Switch %d:%x - set for RGMII, 1000FD\n", phy_addr, devID);
+
+	// do a softreset
+	ar823x_mdio_write32(phy_addr, AR823x_MODE_CTRL, AR8327_MODE_RGMII_PHY);
+
+	ar823x_reset(phy_addr);
+
+	ar823x_mdio_write32(phy_addr, AR823x_MASK_CTL, AR8327_MASK_CLEAR_DEF);
+	ar823x_mdio_write32(phy_addr, AR8327_PWS_CTRL, AR8327_PWS_CTRL_DEF);
+	ar823x_mdio_write32(phy_addr, AR8327_FWCTL_CTRL, AR8327_FWCTL_CTRL_DEF);
+	ar823x_mdio_write32(phy_addr, AR8327_PORT6_CTRL, AR8327_PORT6_CTRL_DEF);
+	ar823x_mdio_write32(phy_addr, AR8327_PORT0_CTRL, AR8327_PORT0_CTRL_DEF);
+
+	//set the register 0xe00000b4 for RGMII Dll control register
+	*(volatile u32 *)(0xe00000b4) = 0x86868f8f;
+
+	return phy_addr;
+}
+
+/*********************************************************************
+Name:      ar823x_init
+Purpose:   Check for Atheros 823x switch, return pointer to device
+if found, NULL otherwise 
+Notes:     pass phy addr as -1 to scan for phy
+ *********************************************************************/
+int ar823x_init(int phy_addr)
+{
+	u32 devID;
+	u32 addr;
+
+	// need to scan?
+	if (phy_addr == 32) {
+		addr = AR823x_MIN_PHY_NUM;
+	} else {
+		addr = phy_addr;
+	}
+
+	while (addr <= AR823x_MAX_PHY_NUM) {
+		devID = ar823x_mdio_read(NULL, addr, AR823x_MASK_CTL);
+		if ((devID & 0xff00) == 0x300) {
+			return ar8236_init(addr, devID);
+		} else if ((devID & 0xff00) == 0x1200) {
+			return ar8327_init(addr, devID);
+		}
+
+		if (phy_addr == 32) {
+			addr++;
+		} else {
+			// not found on passed addr
+			break;
+		}
+	}
+	return -1;
+}
diff --git a/drivers/qtn/ruby/ar823x.h b/drivers/qtn/ruby/ar823x.h
new file mode 100644
index 0000000..28653b8
--- /dev/null
+++ b/drivers/qtn/ruby/ar823x.h
@@ -0,0 +1,73 @@
+#ifndef __AR823x_h__

+#define __AR823x_h__

+/*

+ *  ar823x.h

+ *

+ *  Driver for the Atheros 8236 & 8327 switches

+ *

+ *  Copyright (c) Quantenna Communications Incorporated 2009.

+ *  All rights reserved.

+ *

+ * This program is free software; you can redistribute it and/or modify

+ * it under the terms of the GNU General Public License as published by

+ * the Free Software Foundation; either version 2 of the License, or

+ * (at your option) any later version.

+ *

+ * This program is distributed in the hope that it will be useful,

+ * but WITHOUT ANY WARRANTY; without even the implied warranty of

+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the

+ * GNU General Public License for more details.

+ *

+ * You should have received a copy of the GNU General Public License

+ * along with this program; if not, write to the Free Software

+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA

+ *

+ */

+

+////////////////////////////////////////////////////////////////////

+// NOTE - we do not check for valid base in mdio access routines

+// use must ensure device is initialized and valid prior

+// to using MDIO funtions

+///////////////////////////////////////////////////////////////////

+

+/*********************************************************************

+ Name:      ar823x_init

+ Purpose:   Check for Atheros 823x switch, return pointer to device

+            if found, NULL otherwise 

+ Notes:     pass phy addr as -1 to scan for phy

+*********************************************************************/

+int ar823x_init(int phy_addr);

+

+/*********************************************************************

+ Name:      ar823x_mdio_read

+ Purpose:   mdio read routine for AR823x device

+ Notes:     This is a blocking call since we require

+            more than one cycle to complete the write.

+            checks for completion first

+*********************************************************************/

+int ar823x_mdio_read(struct mii_bus *bus, int phyAddr, int regAddr);

+

+/*********************************************************************

+ Name:      ar823x_mdio_write

+ Purpose:   mdio write routine for AR823x device

+ Notes:     reg_addr[1]=1 determines high word

+*********************************************************************/

+int ar823x_mdio_write(struct mii_bus *bus,int phyAddr, int regAddr, u16 data);

+

+/*********************************************************************

+ Name:      ar823x_mdio_write32

+ Purpose:   mdio write routine for AR823x device

+ Notes:     This is a partial blocking call since we require

+            more than one cycle to complete the write.

+            checks for completion first

+*********************************************************************/

+int ar823x_mdio_write32(int phyAddr, int regAddr, u32 data);

+

+/*********************************************************************

+ Name:      ar823x_mdio_poll

+ Purpose:   mdio poll routine for AR823x device

+ Notes:     Checks for mdio operation complete

+*********************************************************************/

+int ar823x_mdio_poll(void);

+

+#endif // __AR823x_h__

diff --git a/drivers/qtn/ruby/arasan_emac_ahb.c b/drivers/qtn/ruby/arasan_emac_ahb.c
new file mode 100644
index 0000000..9c060a1
--- /dev/null
+++ b/drivers/qtn/ruby/arasan_emac_ahb.c
@@ -0,0 +1,1206 @@
+/**
+ * Copyright (c) 2011-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+#ifndef EXPORT_SYMTAB
+#define EXPORT_SYMTAB
+#endif
+
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/proc_fs.h>
+#include <linux/phy.h>
+
+#include <trace/skb.h>
+#include <trace/ippkt.h>
+
+#include <asm/board/soc.h>
+#include <asm/board/board_config.h>
+#include <asm/board/dma_cache_ops.h>
+
+#include <common/queue.h>
+
+#ifdef QTN_ENABLE_SKB_RECYCLE
+#include <qtn/skb_recycle.h>
+#endif
+#include <qtn/qtn_buffers.h>
+#include <qtn/qtn_global.h>
+#include <qtn/emac_debug.h>
+#include <qtn/qtn_debug.h>
+
+#include "arasan_emac_ahb.h"
+#include "emac_lib.h"
+#include <compat.h>
+
+#define DRV_NAME	"emac_eth"
+#define DRV_VERSION	"1.0"
+#ifndef DRV_AUTHOR
+#define DRV_AUTHOR	"Quantenna Communications, Inc."
+#endif
+#ifndef DRV_DESC
+#define DRV_DESC	"Arasan AHB-EMAC on-chip Ethernet driver"
+#endif
+
+#define TIMEOUT_DMA_STATUS_IRQ	50	/*ms*/
+
+/*
+ * 125MHz AHBCLK leads to a max mitigation timeout value of 0.524ms.
+ * Under extreme conditions, this is the max packet rx delay, an acceptable
+ * one. The benefit will be maximum rx interrupt mitigation.
+ * Hence, choose 0xFFFF for EMAC_IRQ_MITIGATION_TIMEOUT
+ *
+ * Under a 400Mbps rx data flow, roughly 16 to 20 packets arrive at
+ * EMAC, thus choose 0x10 for EMAC_IRQ_MITIGATION_FRAME_COUNTER
+ * NOTE: Adjust EMAC_IRQ_MITIGATION_FRAME_COUNTER value if you think is appropriate
+ */
+#define EMAC_IRQ_MITIGATION_FRAME_COUNTER	0x10
+#define EMAC_IRQ_MITIGATION_TIMEOUT		0xFFFF
+#define EMAC_MITIGATION_TIMER_FREQ		(HZ/10) /* 100 ms */
+#define EMAC_MITIGATION_EN_THRESHOLD		250 /* no. of interrupts */
+#define EMAC_MITIGATION_DIS_THRESHOLD		100 /* no. of interrupts */
+#define EMAC_MITIGATION_ENABLED			01
+#define EMAC_MITIGATION_DISABLED		00
+#define EMAC_ERROR_FRAME_MASK			(RxDescStatusAlignErr | RxDescStatusRuntFrame |	\
+			RxDescStatusCRCErr | RxDescStatusMaxLenErr | RxDescStatusJabberErr)
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+
+static struct net_device* emac_probe(int port_num);
+static void bring_up_interface(struct net_device *dev);
+static void shut_down_interface(struct net_device *dev);
+static int emac_open(struct net_device *dev);
+static int emac_close(struct net_device *dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int arasan_proc_rd(struct seq_file *sfile, void *data);
+#else
+static int arasan_proc_rd(char *buf, char **start, off_t offset, int count, int *eof, void *data);
+#endif
+static struct {
+	u32 base_addr;
+	u32 mdio_base_addr;
+	int irq;
+	const char *proc_name;
+	int phydev_addr;
+	struct net_device *dev;
+} iflist[] = {
+	{
+		(u32)IO_ADDRESS(RUBY_ENET0_BASE_ADDR),
+		(u32)IO_ADDRESS(RUBY_ENET0_BASE_ADDR),
+		RUBY_IRQ_ENET0,
+		"arasan_emac0",
+		-1,
+		NULL
+	},
+	{
+		(u32)IO_ADDRESS(RUBY_ENET1_BASE_ADDR),
+		(u32)IO_ADDRESS(RUBY_ENET0_BASE_ADDR),
+		RUBY_IRQ_ENET1,
+		"arasan_emac1",
+		-1,
+		NULL
+	},
+};
+
+/* Disable interrupts */
+inline static void disable_emac_ints(struct emac_private *arap)
+{
+	struct emac_common *arapc = &arap->com;
+
+	/* Disable all ints from block to central interrupt controller */
+	emac_wr(arapc, EMAC_MAC_INT_ENABLE, 0);
+	emac_wr(arapc, EMAC_DMA_INT_ENABLE, 0);
+	/* Clear any remaining interrupts */
+	emac_wr(arapc, EMAC_MAC_INT, emac_rd(arapc, EMAC_MAC_INT));
+	emac_wr(arapc, EMAC_DMA_STATUS_IRQ, emac_rd(arapc, EMAC_DMA_STATUS_IRQ));
+}
+
+static int __sram_text allocate_new_rx_buf(struct net_device *dev, int i)
+{
+	struct emac_private *arap = netdev_priv(dev);
+	struct emac_common *arapc = &arap->com;
+	struct sk_buff *skb = NULL;
+#ifdef QTN_ENABLE_SKB_RECYLE
+	struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
+#endif
+	/* Initialization */
+	/* Make sure that driver is not using this buffer */
+	arap->rxbufs[i] = NULL;
+	/* Make sure that DMA controller is not using this buffer */
+	arapc->rx.descs[i].status = 0;
+	/* Clear the DMA mapping start for an buffer previously attached */
+	arapc->rx.descs[i].bufaddr2 = 0;
+	/* Set up control field */
+	arapc->rx.descs[i].control = min(RX_BUF_SIZE_PAYLOAD, RxDescBuf1SizeMask) << RxDescBuf1SizeShift;
+	if (i >= RUBY_EMAC_NUM_RX_BUFFERS - 1) {
+		arapc->rx.descs[i].control |= RxDescEndOfRing;
+	}
+
+	/*
+	 * Allocate socket buffer.
+	 * Oversize the buffer to allow for cache line alignment.
+	 */
+#ifdef QTN_ENABLE_SKB_RECYLE
+	if (recycle_list) {
+		skb = qtn_skb_recycle_list_pop(recycle_list, &recycle_list->stats_eth);
+	}
+#endif
+	if (!skb) {
+		size_t size;
+#if TOPAZ_HBM_SKB_ALLOCATOR_DEFAULT
+		size = TOPAZ_HBM_BUF_EMAC_RX_SIZE / 2;
+#else
+		size = qtn_rx_buf_size();
+#endif
+		skb = dev_alloc_skb(size);
+	}
+
+	if (!skb) {
+		arap->rx_skb_alloc_failures++;
+		return -1;
+	}
+
+#ifdef QTN_ENABLE_SKB_REYCLE
+	skb->recycle_list = recycle_list;
+#endif
+	skb->dev = dev;
+	arap->rxbufs[i] = skb;
+
+	trace_skb_perf_start(skb, 0);
+	trace_skb_perf_stamp_call(skb);
+
+	/* Move skb->data to a cache line boundary */
+	skb_reserve(skb, align_buf_dma_offset(skb->data));
+
+	/* Invalidate cache and map virtual address to bus address. */
+	arap->rxdmabufs[i] = cache_op_before_rx(skb->data, rx_buf_map_size(),
+						skb->cache_is_cleaned) + NET_IP_ALIGN;
+	skb->cache_is_cleaned = 0;
+	arapc->rx.descs[i].bufaddr1 = arap->rxdmabufs[i];
+	/* buffaddr2 value is ignored by the DMA (its length is specified as
+	 * zero), so this is a handy place to store the DMA mapping start
+	 * for use later when the buffer is passed over to Linux.
+	 */
+	skb_reserve(skb, NET_IP_ALIGN);
+	arapc->rx.descs[i].bufaddr2 = (u32)skb->data;
+
+	/* Hand off buffer to DMA controller */
+	arapc->rx.descs[i].status = RxDescOwn;
+
+	return 0;
+}
+static void enable_emac_ints(struct emac_private *arap)
+{
+	struct emac_common *arapc = &arap->com;
+
+	/* Clear any pending interrupts */
+	emac_wr(arapc, EMAC_MAC_INT, emac_rd(arapc, EMAC_MAC_INT));
+	emac_wr(arapc, EMAC_DMA_STATUS_IRQ, emac_rd(arapc, EMAC_DMA_STATUS_IRQ));
+	/* Enable selected ints from block to central interrupt controller */
+	emac_wr(arapc, EMAC_MAC_INT_ENABLE, MacUnderrun | MacJabber);
+	emac_wr(arapc, EMAC_DMA_INT_ENABLE, DmaTxDone| DmaRxDone | DmaMacInterrupt);
+}
+__always_inline static volatile struct emac_desc* current_rx_desc(struct emac_private *arap)
+{
+	return arap->com.rx.descs + arap->rx_index;
+}
+
+__always_inline static void disable_emac_irq_mitigation(struct emac_private *arap)
+{
+	emac_wr(&arap->com, EMAC_DMA_RX_IRQ_MITIGATION, ~(1 << 31));
+}
+
+__always_inline static void enable_emac_irq_mitigation(struct emac_private *arap, u_int32_t frame_counter, u_int32_t timeout)
+{
+	emac_wr(&arap->com, EMAC_DMA_RX_IRQ_MITIGATION,
+		(frame_counter & 0xFF) |
+		((timeout & 0xFFFF) << 8) |
+		(1 << 31) /*enable mitigation*/);
+}
+
+static __be16 emac_eth_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ethhdr *eth;
+	unsigned char *rawp;
+
+	skb->mac_header = *(u16*)skb->data;
+	skb_pull(skb, ETH_HLEN);
+	eth = eth_hdr(skb);
+
+	if (is_multicast_ether_addr(eth->h_dest)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		if (ether_addr_equal(eth->h_dest, dev->broadcast))
+#else
+		if (!compare_ether_addr(eth->h_dest, dev->broadcast))
+#endif
+
+			skb->pkt_type = PACKET_BROADCAST;
+		else
+			skb->pkt_type = PACKET_MULTICAST;
+	}
+
+	if (ntohs(eth->h_proto) >= 1536)
+		return eth->h_proto;
+
+	rawp = skb->data;
+
+	/*
+	 *      This is a magic hack to spot IPX packets. Older Novell breaks
+	 *      the protocol design and runs IPX over 802.3 without an 802.2 LLC
+	 *      layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
+	 *      won't work for fault tolerant netware but does for the rest.
+	 */
+	if (*(unsigned short *)rawp == 0xFFFF)
+		return htons(ETH_P_802_3);
+
+	/*
+	 *      Real 802.2 LLC
+	 */
+	return htons(ETH_P_802_2);
+}
+
+#ifdef CONFIG_QVSP
+static __sram_text int emac_rx_vsp_should_drop(struct sk_buff *skb, struct ethhdr *eh)
+{
+	u8 *data_start;
+	u16 ether_type = 0;
+
+	if (qvsp_is_active(emac_qvsp.qvsp) && emac_qvsp.qvsp_check_func) {
+		data_start = qdrv_sch_find_data_start(skb, (struct ether_header *)eh, &ether_type);
+		qdrv_sch_classify(skb, ether_type, data_start);
+		if (emac_qvsp.qvsp_check_func(emac_qvsp.qvsp, QVSP_IF_ETH_RX, skb,
+				data_start, skb->len - (data_start - skb->data),
+				skb->priority)) {
+			trace_ippkt_dropped(TRACE_IPPKT_DROP_RSN_VSP, 1, 0);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+#else
+#define emac_rx_vsp_should_drop(_skb, _data_start)	(0)
+#endif /* CONFIG_QVSP */
+
+static int __sram_text emac_rx_poll (struct napi_struct *napi, int budget)
+{
+	struct emac_private *arap = container_of(napi, struct emac_private, napi);
+	struct emac_common *arapc = &arap->com;
+	struct ethhdr *eth;
+	u32 status, dma_ints;
+	int processed = 0;
+	struct sk_buff *skb_tmp = NULL, *skb;
+	struct sk_buff_head skb_list;
+
+	if ((current_rx_desc(arap)->status & RxDescOwn)) {
+		napi_complete(napi);
+		dma_ints = emac_rd(arapc, EMAC_DMA_INT_ENABLE);
+		dma_ints |= DmaRxDone;
+		emac_wr(arapc, EMAC_DMA_INT_ENABLE, dma_ints);
+		return 0;
+	}
+
+	__skb_queue_head_init(&skb_list);
+
+	while (!((status = current_rx_desc(arap)->status) & RxDescOwn) && (processed < budget)) {
+		skb = arap->rxbufs[arap->rx_index];
+
+		trace_skb_perf_stamp_call(skb);
+
+		if (!skb) {
+			/*
+			 * Buffer for this index was used during prev iteration
+			 * and then new buffer failed to allocate.
+			 * So skip processing and try to allocate it again.
+			 */
+		} else if (status & EMAC_ERROR_FRAME_MASK) {
+			printk(KERN_ERR
+				"%s: Discarding corrupted frame and reset buffer. (%08X)\n",
+				arapc->dev->name, status);
+				dev_kfree_skb(skb);
+				bring_up_interface(arapc->dev);
+		} else if ((status & RxDescFirstDesc) && (status & RxDescLastDesc)) {
+			u32 length = (status >> RxDescFrameLenShift) & RxDescFrameLenMask;
+#ifdef INSERT_PCIE_DMA_TES
+			extern void pcie_dma_tst(u32 local_vaddr, u32 local_paddr, int len);
+			pcie_dma_tst((u32) skb->data, (u32) arap->rxdmabufs[arap->rx_index], length);
+#endif
+			skb_put(skb, length);
+			skb->protocol = emac_eth_type_trans(skb, arap->com.dev);
+			skb->src_port = 0;
+			processed++;
+
+			eth = eth_hdr(skb);
+			trace_ippkt_check(eth, skb->len, TRACE_IPPKT_LOC_ETH_RX);
+#ifdef QTN_ENABLE_SKB_RECYCLE
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+			if (unlikely(ether_addr_equal(eth->h_dest, arap->com.dev->dev_addr) ||
+#else
+			if (unlikely(!compare_ether_addr(eth->h_dest, arap->com.dev->dev_addr) ||
+#endif
+						skb->pkt_type == PACKET_BROADCAST ||
+						skb->pkt_type == PACKET_MULTICAST)) {
+				skb->is_recyclable = 0;
+			} else
+#endif
+			if (emac_rx_vsp_should_drop(skb, eth)) {
+				dev_kfree_skb(skb);
+				skb = NULL;
+			}
+#ifdef QTN_ENABLE_SKB_RECYCLE
+			else {
+				skb->is_recyclable = 1;
+			}
+#endif
+			if (likely(skb)) {
+				if (unlikely(!skb_tmp)) {
+					__skb_queue_head(&skb_list, skb);
+				} else {
+					skb_append(skb_tmp, skb, &skb_list);
+				}
+				skb_tmp = skb;
+			}
+		} else {
+			/*
+			 * This is bad.  We have set the hardware to discard frames over a
+			 * certain size and allocated single buffers large enough to take
+			 * frames smaller than that.  Any frames we get should therefore
+			 * fit into a single buffer, but this frame is fragmented across
+			 * multiple buffers - which should never happen.
+			 */
+			if (status & RxDescFirstDesc) {
+				printk(KERN_ERR
+						"%s: Discarding initial frame fragment. (%08X)\n",
+						arap->com.dev->name, status);
+			} else if (status & RxDescLastDesc) {
+				u32 length = (status >> RxDescFrameLenShift) & RxDescFrameLenMask;
+				printk(KERN_ERR
+						"%s: Discarding final frame fragment.\n"
+						"Frame length was %lu bytes. (%08X)\n",
+						arap->com.dev->name, (unsigned long)length, status);
+				arap->rx_fragmented_frame_discards++;
+			} else {
+				printk(KERN_ERR
+						"%s: Discarding intermediate frame fragment (status %08X).\n",
+						arap->com.dev->name, status);
+			}
+			dev_kfree_skb(skb);
+		}
+		arap->com.dev->last_rx = jiffies;
+
+		/* We are done with the current buffer attached to this descriptor, so attach a new one. */
+		if (allocate_new_rx_buf(arap->com.dev, arap->rx_index)) {
+			/* Failed to attach new buffer.
+			 * If allocating is failed then it is not a problem - during one of next
+			 * iterations allocating will be tried again.
+			 */
+			break;
+		} else {
+			emac_wr(&arap->com, EMAC_DMA_RX_POLL_DEMAND, 0);
+			if (++arap->rx_index >= RUBY_EMAC_NUM_RX_BUFFERS) {
+				arap->rx_index = 0;
+			}
+		}
+
+		//GPIO_DBG_CLR(PROF_POINT_2);	
+	}
+
+	skb_queue_walk_safe(&skb_list, skb, skb_tmp) {
+		skb->next = NULL;
+		netif_receive_skb(skb);
+	}
+
+	if ((current_rx_desc(arap)->status & RxDescOwn) && (processed < budget)) {
+		napi_complete(napi);
+		dma_ints = emac_rd(&arap->com, EMAC_DMA_INT_ENABLE);
+		dma_ints |= DmaRxDone;
+		emac_wr(&arap->com, EMAC_DMA_INT_ENABLE, dma_ints);
+	}
+
+	return processed;
+}
+
+__always_inline static volatile struct emac_desc* current_tx_desc(struct emac_private *arap)
+{
+	return arap->com.tx.descs + arap->arap_data.tx_head;
+}
+
+static void __sram_text emac_tx_stop_queue(struct net_device *dev, unsigned wake_idx)
+{
+	struct emac_private *arap = netdev_priv(dev);
+	volatile struct emac_desc *ptx_desc = arap->com.tx.descs + wake_idx;
+	volatile struct emac_desc *prev_ptx_desc = arap->com.tx.descs + EMAC_TX_BUFF_INDX(wake_idx - 1);
+	int stopped = 0;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	if (likely(prev_ptx_desc->status & ptx_desc->status & TxDescOwn)) {
+		ptx_desc->control |= TxDescIntOnComplete; /* better to keep it first in block */
+		netif_stop_queue(dev);
+		arap->tx_queue_stopped = 1;
+		stopped = 1;
+	}
+	local_irq_restore(flags);
+
+	if (likely(stopped)) {
+		arap->arap_data.tx_full = 1;
+		arap->arap_data.tx_stopped_count++;
+	}
+}
+
+static int __sram_text emac_tx_pass(struct net_device *dev)
+{
+#ifndef CONFIG_ARCH_RUBY_EMAC_SMOOTHING
+
+	return 1;
+
+#else
+
+	const unsigned burst_packets = CONFIG_ARCH_RUBY_EMAC_SMOOTHING_BURST_SIZE;
+	const unsigned pend_packets_wake_queue = ((burst_packets >> 1) + (burst_packets >> 2));
+	const uint64_t tokens_per_packet = (NSEC_PER_SEC / CONFIG_ARCH_RUBY_EMAC_SMOOTHING_RATE);
+	const uint64_t max_accum_tokens = (burst_packets * tokens_per_packet);
+
+	static uint64_t tokens = 0;
+	static ktime_t last_ts = {0,};
+
+	struct emac_private *arap = netdev_priv(dev);
+	int ret = 0;
+
+	/*
+	 * If currently have no enough tokens to pass packet
+	 * try to put more tokens to bucket based on passed time.
+	 */
+	if (unlikely(tokens < tokens_per_packet)) {
+		struct timespec now_ts;
+		ktime_t now;
+
+		ktime_get_ts(&now_ts);
+		now = timespec_to_ktime(now_ts);
+
+		tokens += ktime_to_ns(now) - ktime_to_ns(last_ts);
+		if (tokens > max_accum_tokens) {
+			tokens = max_accum_tokens;
+		}
+
+		last_ts = now;
+	}
+
+	/* If have enough tokens to pass packet remove these tokens from bucket. */
+	if (likely(tokens >= tokens_per_packet)) {
+		tokens -= tokens_per_packet;
+		ret = 1;
+	}
+
+	/* If have no tokens anymore try to disable tx queue if appropriate. */
+	if (unlikely(tokens < tokens_per_packet)) {
+		unsigned pending = EMAC_TX_BUFF_INDX(arap->arap_data.tx_head -
+			arap->arap_data.tx_tail);
+		if (unlikely(pending >= burst_packets)) {
+			emac_tx_stop_queue(dev, EMAC_TX_BUFF_INDX(arap->arap_data.tx_tail +
+				pend_packets_wake_queue));
+		}
+	}
+
+	return ret;
+#endif
+}
+
+inline static void emac_recycle_tx_buf(struct sk_buff *skb)
+{
+#ifdef QTN_ENABLE_SKB_RECYCLE
+	struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
+	trace_skb_perf_stamp_call(skb);
+	trace_skb_perf_finish(skb);
+
+	if (!qtn_skb_recycle_list_push(recycle_list,
+				&recycle_list->stats_eth, skb)) {
+		dev_kfree_skb(skb);
+	}
+#endif
+}
+
+static int __sram_text emac_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct emac_private *arap = netdev_priv(dev);
+	volatile struct emac_desc *ptx_desc;
+	u32 ctrlval;
+
+	trace_skb_perf_stamp_call(skb);
+
+	/* Free any buffers that the DMA has finished with since the last Tx */
+	ptx_desc = arap->com.tx.descs + arap->arap_data.tx_tail;
+
+	while ((arap->arap_data.tx_tail != arap->arap_data.tx_head) &&
+			!(ptx_desc->status & TxDescOwn)) {
+		if (ptx_desc->bufaddr2) {
+			struct sk_buff *skb_old = (struct sk_buff *)ptx_desc->bufaddr2;
+
+			emac_recycle_tx_buf(skb_old);
+			ptx_desc->bufaddr2 = 0;
+			ptx_desc->bufaddr1 = 0;
+		}
+
+		arap->arap_data.tx_tail =
+			EMAC_TX_BUFF_INDX(arap->arap_data.tx_tail+1);
+		ptx_desc = arap->com.tx.descs + arap->arap_data.tx_tail;
+		arap->arap_data.tx_full = 0;
+		arap->tx_queue_stopped = 0;
+	}
+
+	if (arap->tx_queue_stopped) {
+		/* We had run out of descriptors */
+		return NETDEV_TX_BUSY;
+	}
+
+	if (!emac_tx_pass(dev)) {
+		return NETDEV_TX_BUSY;
+	}
+
+	ptx_desc = current_tx_desc(arap);
+
+	if (likely(skb->len <= TxDescBuf1SizeMask)) {
+		trace_ippkt_check(skb->data, skb->len, TRACE_IPPKT_LOC_ETH_TX);
+
+		/*
+		 * Flush and invalidate any cached data in the skb out to memory so that
+		 * the DMA engine can see it.
+		 * Save mapped memory for use by DMA engine.
+		 */
+		ptx_desc->bufaddr1 = cache_op_before_tx(align_buf_cache(skb->head),
+				align_buf_cache_size(skb->head, skb_headroom(skb) + skb->len)) +
+				align_buf_cache_offset(skb->head) + skb_headroom(skb);
+		skb->cache_is_cleaned = 1;
+
+		/*
+		 * The buffer 2 length is set to 0, so it is ignored by the
+		 * DMA engine and can be used as a handy place to store the
+		 * skbuff address.  This is used to free the skbuf once the DMA
+		 * has completed.
+		 */
+		KASSERT(ptx_desc->bufaddr2 == 0,
+			("EMAC:Non-NULL TX descriptor (%p) - leak?\n", (void *)ptx_desc->bufaddr2));
+		ptx_desc->bufaddr2 = (u32)skb;
+
+		ctrlval = TxDescFirstSeg | TxDescLastSeg |
+			(skb->len & TxDescBuf1SizeMask) << TxDescBuf1SizeShift;
+		if (arap->arap_data.tx_head >= RUBY_EMAC_NUM_TX_BUFFERS - 1) {
+			/* Last desc in ring must be marked as such */
+			ctrlval |= TxDescEndOfRing;
+		}
+		ptx_desc->control = ctrlval;
+
+		ptx_desc->status |= TxDescOwn;
+
+		/* give descriptor */
+		arap->arap_data.tx_head = EMAC_TX_BUFF_INDX(arap->arap_data.tx_head + 1);
+		if (EMAC_TX_BUFF_INDX(arap->arap_data.tx_head + 1) == arap->arap_data.tx_tail) {
+			/*
+			 * We just used the last descriptor.
+			 * Stop TX queue and mark DMA for pkt at 75% empty point, to
+			 * restart queue so that link utilisation is maximized.
+			 */
+			unsigned wake_idx = EMAC_TX_BUFF_INDX(arap->arap_data.tx_head -
+				(RUBY_EMAC_NUM_TX_BUFFERS / 4));
+			emac_tx_stop_queue(dev, wake_idx);
+		}
+		emac_wr(&arap->com, EMAC_DMA_TX_POLL_DEMAND, 0);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		netif_trans_update(dev);
+#else
+		dev->trans_start = jiffies;
+#endif
+
+	} else {
+		/*
+		 * The allocated buffers should be big enough to take any
+		 * frame.  Unexpectedly large frames are dropped.
+		 */
+		printk(KERN_ERR "%s: Dropping overlength (%d bytes) packet.\n",
+				dev->name, skb->len);
+		arap->tx_overlength_frame_discards++;
+		emac_recycle_tx_buf(skb);
+	}
+
+	return 0;
+}
+
+static irqreturn_t __sram_text emac_interrupt(int irq, void *dev_id)
+{
+	u32 pending_ints, mac_ints, dma_ints;
+	struct emac_private *arap;
+	struct emac_common *arapc;
+	struct net_device *dev = (struct net_device *)dev_id;
+
+	const u32 handledDmaInts = DmaTxDone | DmaRxDone | DmaMacInterrupt;
+
+
+	if (dev == NULL) {
+		printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
+		return IRQ_RETVAL(1);
+	}
+
+	arap = netdev_priv(dev);
+	arapc = &arap->com;
+
+	pending_ints = emac_rd(arapc, EMAC_DMA_STATUS_IRQ) & handledDmaInts;
+	emac_wr(arapc, EMAC_DMA_STATUS_IRQ, pending_ints);
+
+	/* Handle RX interrupts first to minimize chance of overrun */
+	if (pending_ints & DmaRxDone) {
+		arap->mitg_intr_count++;
+		dma_ints = emac_rd(arapc, EMAC_DMA_INT_ENABLE);
+		dma_ints &= ~DmaRxDone;
+		emac_wr(arapc, EMAC_DMA_INT_ENABLE, dma_ints);
+		napi_schedule(&arap->napi);
+	}
+
+	/* Handle TX interrupts next to minimize chance of overrun */
+	if ((pending_ints & DmaTxDone)) {
+		arap->arap_data.tx_done_intr++;
+		if (arap->tx_queue_stopped) {
+			/* Just wake the queue up if we are stopped*/
+			netif_wake_queue(dev);
+		}
+	}
+
+	if (pending_ints & DmaMacInterrupt) {
+		mac_ints = emac_rd(arapc, EMAC_MAC_INT);
+		emac_wr(arapc, EMAC_MAC_INT, mac_ints);
+		if (mac_ints & MacUnderrun) {
+			arap->mac_underrun++;
+			printk(KERN_ERR "%s: MAC underrun\n", dev->name);
+		}
+		if (mac_ints & MacJabber) {
+			arap->mac_jabber++;
+			printk(KERN_ERR "%s: Jabber detected\n", dev->name);
+		}
+	}
+	return IRQ_RETVAL(1);
+}
+
+/*
+ * The Tx ring has been full longer than the watchdog timeout
+ * value. The transmitter must be hung?
+ */
+static void emac_tx_timeout(struct net_device *dev)
+{
+	printk(KERN_ERR "%s: emac_tx_timeout: dev=%p\n", dev->name, dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	netif_trans_update(dev);
+#else
+	dev->trans_start = jiffies;
+#endif
+}
+
+static void reset_dma(struct emac_common *arapc)
+{
+	/* Normally we would read-modify-write to set & clear the reset bit, but
+	 * due to a bug we cannot read any Ethernet registers whilst the block
+	 * is reset.  We therefore just write the desired initial value back to
+	 * the config register and clear the reset.
+	 */
+	emac_wr(arapc, EMAC_DMA_CONFIG, DmaSoftReset);
+	emac_wr(arapc, EMAC_DMA_CONFIG, Dma4WordBurst | Dma64BitMode);
+	emac_lib_init_dma(arapc);
+}
+
+static void stop_traffic(struct emac_common *arapc)
+{
+	int force_reset = 0;
+
+	/* Stop transmit */
+	emac_clrbits(arapc, EMAC_MAC_TX_CTRL, MacTxEnable);
+	emac_clrbits(arapc, EMAC_DMA_CTRL, DmaStartTx);
+	if (!emac_wait(arapc, EMAC_DMA_STATUS_IRQ, DmaTxStopped, DmaTxStopped, TIMEOUT_DMA_STATUS_IRQ, NULL)) {
+		if ((emac_rd(arapc, EMAC_DMA_STATUS_IRQ) & DmaTxStateMask) != DmaTxStateStopped) {
+			printk(KERN_ERR"Failed to stop Ethernet TX DMA\n");
+			force_reset = 1;
+		}
+	}
+
+	/* Stop receive */
+	emac_clrbits(arapc, EMAC_MAC_RX_CTRL, MacRxEnable);
+	emac_clrbits(arapc, EMAC_DMA_CTRL, DmaStartRx);
+	if (!emac_wait(arapc, EMAC_DMA_STATUS_IRQ, DmaRxStopped, DmaRxStopped, TIMEOUT_DMA_STATUS_IRQ, NULL)) {
+		if ((emac_rd(arapc, EMAC_DMA_STATUS_IRQ) & DmaRxStateMask) != DmaRxStateStopped) {
+			printk(KERN_ERR"Failed to stop Ethernet RX DMA\n");
+			force_reset = 1;
+		}
+	}
+
+	if (force_reset) {
+		reset_dma(arapc);
+	}
+}
+
+static void start_traffic(struct emac_private *arap)
+{
+	struct emac_common *arapc = &arap->com;
+
+	/* These IRQ flags must be cleared when we start as stop_traffic()
+	 * relys on them to indicate when activity has stopped.
+	 */
+	emac_wr(arapc, EMAC_DMA_STATUS_IRQ, DmaTxStopped | DmaRxStopped);
+
+	/* Start transmit */
+	emac_setbits(arapc, EMAC_MAC_TX_CTRL, MacTxEnable);
+	emac_setbits(arapc, EMAC_DMA_CTRL, DmaStartTx);
+
+	/* Start receive */
+	emac_setbits(arapc, EMAC_DMA_CTRL, DmaStartRx);
+	emac_setbits(arapc, EMAC_MAC_RX_CTRL, MacRxEnable);
+}
+
+static void clear_buffers(struct emac_private *arap)
+{
+	/* Discard data from all Rx and Tx buffers */
+	struct emac_common *arapc = &arap->com;
+	int i;
+
+	arap->arap_data.tx_done_intr = 0;
+	arap->arap_data.tx_head = 0;
+	arap->arap_data.tx_tail = 0;
+	arap->arap_data.tx_full = 0;
+	arap->tx_queue_stopped = 0;
+	arap->rx_index = 0;
+	if (arapc->rx.descs) {
+		for (i = 0; i < RUBY_EMAC_NUM_RX_BUFFERS; i++) {
+			(arapc->rx.descs + i)->status |= RxDescOwn;
+		}
+	}
+	if (arapc->tx.descs) {
+		for (i = 0; i < RUBY_EMAC_NUM_TX_BUFFERS; i++) {
+			(arapc->tx.descs + i)->status &= ~TxDescOwn;
+			if ((arapc->tx.descs + i)->bufaddr2) {
+				struct sk_buff *skb_old = (struct sk_buff *)(arapc->tx.descs + i)->bufaddr2;
+				dev_kfree_skb(skb_old);
+				(arapc->tx.descs + i)->bufaddr2 = 0;
+				(arapc->tx.descs + i)->bufaddr1 = 0;
+			}
+		}
+	}
+}
+
+static int __init emac_init_module(void)
+{
+	int i, found_one = 0;
+
+	emac_lib_enable(0);
+
+	/* Probe devices */
+	for(i = 0; i < sizeof(iflist) / sizeof(iflist[0]); i++) {
+		struct net_device *dev = emac_probe(i);
+		iflist[i].dev = dev;
+		if (dev) {
+			found_one++;
+		}
+	}
+	if (!found_one) {
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static const struct net_device_ops emac_device_ops = {
+	.ndo_open = emac_open,
+	.ndo_stop = emac_close,
+	.ndo_start_xmit = emac_tx,
+	.ndo_get_stats = emac_lib_stats,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	.ndo_set_rx_mode = emac_lib_set_rx_mode,
+#else
+	.ndo_set_multicast_list = emac_lib_set_rx_mode,
+#endif
+	.ndo_do_ioctl = emac_lib_ioctl,
+	.ndo_tx_timeout = emac_tx_timeout,
+	.ndo_set_mac_address = eth_mac_addr,
+};
+
+static void emac_bufs_free(struct net_device *dev)
+{
+	struct emac_private *arap = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < RUBY_EMAC_NUM_RX_BUFFERS; i++) {
+		if (arap->rxbufs[i]) {
+			dev_kfree_skb(arap->rxbufs[i]);
+			arap->rxbufs[i] = NULL;
+		}
+	}
+}
+
+int emac_bufs_alloc(struct net_device *dev)
+{
+	int i;
+
+	/* Allocate rx buffers */
+	for (i = 0; i < RUBY_EMAC_NUM_RX_BUFFERS; i++) {
+		if (allocate_new_rx_buf(dev, i)) {
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static void emac_set_eth_addr(struct net_device *dev, int port_num)
+{
+	memcpy(dev->dev_addr, get_ethernet_addr(), ETH_ALEN);
+
+	if (port_num > 0) {
+		u32 val;
+
+		val = (u32)dev->dev_addr[5] +
+			((u32)dev->dev_addr[4] << 8) +
+			((u32)dev->dev_addr[3] << 16);
+		val += port_num;
+		dev->dev_addr[5] = (unsigned char)val;
+		dev->dev_addr[4] = (unsigned char)(val >> 8);
+		dev->dev_addr[3] = (unsigned char)(val >> 16);
+	}
+}
+
+static int arasan_seq_show(struct seq_file *file, void *v)
+{
+	return arasan_proc_rd(file, v);
+}
+
+static int arasan_proc_open(struct inode *inode, struct file *file)
+{
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "-->Enter\n");
+	DBGPRINTF(DBG_LL_ALL, QDRV_LF_TRACE, "<--Exit\n");
+
+	return single_open(file, arasan_seq_show, NULL);
+}
+
+static struct file_operations arasan_proc_ops =
+{
+	.owner   = THIS_MODULE,
+	.open    = arasan_proc_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+static struct net_device* emac_probe(int port_num)
+{
+	struct emac_private *arap = NULL;
+	struct emac_common *arapc = NULL;
+	struct net_device *dev = NULL;
+	int irq, err;
+	unsigned long base;
+	char devname[IFNAMSIZ + 1];
+	int emac_cfg;
+	int emac_phy;
+
+	printk(KERN_INFO "%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+
+	if (emac_lib_board_cfg(port_num, &emac_cfg, &emac_phy)) {
+		return NULL;
+	}
+
+	if ((emac_cfg & EMAC_IN_USE) == 0) {
+		return NULL;
+	}
+
+	/* Get requested port data */
+	base  = PHYS_IO_ADDRESS(iflist[port_num].base_addr);
+	irq = iflist[port_num].irq;
+
+	/* Allocate device structure */
+	sprintf(devname, "eth%d_emac%d", soc_id(), port_num);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,3)
+	dev = alloc_netdev(sizeof(struct emac_private), devname, NET_NAME_UNKNOWN, ether_setup);
+#else
+	dev = alloc_netdev(sizeof(struct emac_private), devname, ether_setup);
+#endif
+	if (!dev) {
+		printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
+		return NULL;
+	}
+
+	/* Initialize device structure fields */
+	emac_set_eth_addr(dev, port_num);
+	dev->base_addr = base;
+	dev->irq = irq;
+	dev->watchdog_timeo = ETH_TX_TIMEOUT;
+	dev->netdev_ops = &emac_device_ops;
+	dev->tx_queue_len = QTN_BUFS_EMAC_TX_QDISC;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dev->ethtool_ops = &emac_lib_ethtool_ops;
+#else
+	SET_ETHTOOL_OPS(dev, &emac_lib_ethtool_ops);
+#endif
+
+	/* Initialize private data */
+	arap = netdev_priv(dev);
+	arapc = &arap->com;
+	memset(arap, 0, sizeof(struct emac_private));
+	arapc->dev = dev;
+	arapc->mac_id = port_num;
+	arapc->vbase = iflist[port_num].base_addr;
+	arapc->mdio_vbase = iflist[port_num].mdio_base_addr;
+	arapc->emac_cfg = emac_cfg;
+	arapc->phy_addr = emac_phy;
+
+	/* Initialize MII */
+	if (emac_lib_mii_init(dev)) {
+		goto mii_init_error;
+	}
+
+	/* Allocate descs & buffers */
+	if (emac_lib_descs_alloc(dev,
+				RUBY_EMAC_NUM_RX_BUFFERS, 0,
+				RUBY_EMAC_NUM_TX_BUFFERS, 0)) {
+		goto descs_alloc_error;
+	}
+	if (emac_bufs_alloc(dev)) {
+		goto bufs_alloc_error;
+	}
+
+	/* Initialize NAPI */
+	netif_napi_add(dev, &arap->napi, emac_rx_poll, board_napi_budget());
+
+	/* The interface may have been used by the bootloader, so shut it down
+	 * here in preparation for bringing it up later.
+	 */
+	shut_down_interface(dev);
+
+	/* Register device */
+	if ((err = register_netdev(dev)) != 0) {
+		printk(KERN_ERR "%s: Cannot register net device, error %d\n", DRV_NAME, err);
+		goto netdev_register_error;
+	}
+	printk(KERN_INFO"%s: Arasan Ethernet found at 0x%lx, irq %d\n", dev->name, base, irq);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	proc_create_data(iflist[arapc->mac_id].proc_name, 0, NULL, &arasan_proc_ops, dev);
+#else
+	create_proc_read_entry(iflist[arapc->mac_id].proc_name, 0, NULL, arasan_proc_rd, dev);
+#endif
+	emac_lib_phy_power_create_proc(dev);
+	emac_lib_phy_reg_create_proc(dev);
+	emac_lib_mdio_sysfs_create(dev);
+
+	return dev;
+
+netdev_register_error:
+	emac_bufs_free(dev);
+bufs_alloc_error:
+	emac_lib_descs_free(dev);
+descs_alloc_error:
+	emac_lib_mii_exit(dev);
+mii_init_error:
+	free_netdev(dev);
+
+	return NULL;
+}
+
+static void release_all(struct net_device *dev)
+{
+	struct emac_private *arap;
+	struct emac_common *arapc;
+
+	if (!dev) {
+		return;
+	}
+
+	arap = netdev_priv(dev);
+	arapc = &arap->com;
+
+	emac_lib_mdio_sysfs_remove(dev);
+	emac_lib_phy_reg_remove_proc(dev);
+	emac_lib_phy_power_remove_proc(dev);
+	remove_proc_entry(iflist[arapc->mac_id].proc_name, NULL);
+
+	unregister_netdev(dev);
+
+	shut_down_interface(dev);
+	emac_bufs_free(dev);
+	emac_lib_descs_free(dev);
+	emac_lib_mii_exit(dev);
+
+	free_netdev(dev);
+}
+
+static void bring_up_interface(struct net_device *dev)
+{
+	/* Interface will be ready to send/receive data, but will need hooking
+	 * up to the interrupts before anything will happen.
+	 */
+	struct emac_private *arap = netdev_priv(dev);
+
+	shut_down_interface(dev);
+	start_traffic(arap);
+	enable_emac_ints(arap);
+	emac_lib_set_rx_mode(dev);
+}
+
+static void shut_down_interface(struct net_device *dev)
+{
+	/* Close down MAC and DMA activity and clear all data. */
+	struct emac_private *arap = netdev_priv(dev);
+	struct emac_common *arapc = &arap->com;
+
+	disable_emac_ints(arap);
+	stop_traffic(arapc);
+	emac_lib_init_dma(arapc);
+	emac_lib_init_mac(dev);
+	clear_buffers(arap);
+}
+
+static void on_off_rxmitigation(unsigned long arg)
+{
+	struct emac_private *arap = (struct emac_private *)arg;
+	static int emac_mitig_state = EMAC_MITIGATION_DISABLED;
+
+	if ((emac_mitig_state == EMAC_MITIGATION_DISABLED) &&
+		(arap->mitg_intr_count > EMAC_MITIGATION_EN_THRESHOLD)) {
+		enable_emac_irq_mitigation(arap, EMAC_IRQ_MITIGATION_FRAME_COUNTER,
+					   EMAC_IRQ_MITIGATION_TIMEOUT);
+		emac_mitig_state = EMAC_MITIGATION_ENABLED;
+	} else if ((emac_mitig_state == EMAC_MITIGATION_ENABLED) &&
+		(arap->mitg_intr_count < EMAC_MITIGATION_DIS_THRESHOLD)) {
+		disable_emac_irq_mitigation(arap);
+		emac_mitig_state = EMAC_MITIGATION_DISABLED;
+	}
+
+	arap->mitg_intr_count = 0;
+	mod_timer(&arap->mitg_timer, jiffies + EMAC_MITIGATION_TIMER_FREQ);
+}
+
+static int emac_open(struct net_device *dev)
+{
+	int retval = 0;
+	struct emac_private *arap = netdev_priv(dev);
+	struct emac_common *arapc = &arap->com;
+
+	bring_up_interface(dev);
+
+	napi_enable(&arap->napi);
+
+	if ((retval = request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev))) {
+		printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq);
+		goto err_out;
+	}
+
+	/* Enable RX mitigation only for gigabit or auto negotiated interfaces */
+	if (board_slow_ethernet() == 0) {
+		/* Fast and Gig eth: Start RX interrupt mitigation timer */
+		init_timer(&arap->mitg_timer);
+		arap->mitg_timer.function = on_off_rxmitigation;
+		arap->mitg_timer.data = (unsigned long)arapc;
+		mod_timer(&arap->mitg_timer, jiffies + EMAC_MITIGATION_TIMER_FREQ);
+	} else {
+		/* slow eth */
+		disable_emac_irq_mitigation(arap);
+	}
+
+	emac_lib_phy_start(dev);
+	netif_start_queue(dev);
+	emac_lib_pm_emac_add_notifier(dev);
+
+	return 0;
+
+err_out:
+	napi_disable(&arap->napi);
+	return retval;
+}
+
+static int emac_close(struct net_device *dev)
+{
+	struct emac_private *arap = netdev_priv(dev);
+
+	emac_lib_pm_emac_remove_notifier(dev);
+
+	napi_disable(&arap->napi);
+
+	emac_lib_phy_stop(dev);
+	shut_down_interface(dev);
+	netif_stop_queue(dev);
+	del_timer(&arap->mitg_timer);
+	free_irq(dev->irq, dev);
+	return 0;
+}
+
+static void __exit emac_cleanup_module(void)
+{
+	int i;
+
+	for (i = 0; i < sizeof(iflist) / sizeof(iflist[0]); i++) {
+		release_all(iflist[i].dev);
+	}
+}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int arasan_proc_rd(struct seq_file *sfile, void *data)
+{
+	struct net_device *dev = data;
+	struct emac_private *arap = NULL;
+
+	if (dev) {
+		arap = netdev_priv(dev);
+	}
+
+	if (!dev || !arap) {
+		printk("%s: NULL dev or dev->priv pointer\n", __FUNCTION__);
+		return 0;
+	}
+
+	emac_lib_stats_sprintf(sfile, dev);
+	seq_printf(sfile, "%2s#%02d %6s: %d, %d, %d, %d, %d\n", "Queue", 0, "State",
+			arap->arap_data.tx_full, arap->arap_data.tx_stopped_count,
+			arap->arap_data.tx_done_intr, arap->arap_data.tx_head, arap->arap_data.tx_tail);
+
+	return 0;
+}
+
+#else
+static int arasan_proc_rd(char *buf, char **start, off_t offset, int count,
+		int *eof, void *data)
+{
+	char *p = buf;
+	struct net_device *dev = data;
+	struct emac_private *arap = NULL;
+
+	if (dev) {
+		arap = netdev_priv(dev);
+	}
+
+	if (!dev || !arap) {
+		printk("%s: NULL dev or dev->priv pointer\n", __FUNCTION__);
+		*eof = 1;
+		return 0;
+	}
+
+	p += emac_lib_stats_sprintf(p, dev);
+	p += sprintf(p, "%2s#%02d %6s: %d, %d, %d, %d, %d\n", "Queue", 0, "State",
+			arap->arap_data.tx_full, arap->arap_data.tx_stopped_count,
+			arap->arap_data.tx_done_intr, arap->arap_data.tx_head, arap->arap_data.tx_tail);
+	*eof = 1;
+
+	return p - buf;
+}
+#endif
+
+module_init(emac_init_module);
+module_exit(emac_cleanup_module);
+
diff --git a/drivers/qtn/ruby/arasan_emac_ahb.h b/drivers/qtn/ruby/arasan_emac_ahb.h
new file mode 100644
index 0000000..4018e94
--- /dev/null
+++ b/drivers/qtn/ruby/arasan_emac_ahb.h
@@ -0,0 +1,83 @@
+/*
+ *  drivers/net/arasan_emac_ahb.h
+ *
+ *  Copyright (c) Quantenna Communications Incorporated 2007.
+ *  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef __DRIVERS_NET_ARASAN_EMAC_AHB_H
+#define __DRIVERS_NET_ARASAN_EMAC_AHB_H	1
+
+#define ETH_TX_TIMEOUT (100*HZ)
+
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include "emac_lib.h"
+
+#ifndef __ASSEMBLY__
+
+#if (RUBY_EMAC_NUM_TX_BUFFERS & (RUBY_EMAC_NUM_TX_BUFFERS - 1))
+#error "TX BUFFER needs to be power of two for MODULO operations"
+#endif
+#if (RUBY_EMAC_NUM_RX_BUFFERS & (RUBY_EMAC_NUM_RX_BUFFERS - 1))
+#error "RX BUFFER needs to be power of two for MODULO operations"
+#endif
+
+#define EMAC_TX_BUFF_INDX(X) ((X) & (RUBY_EMAC_NUM_TX_BUFFERS - 1))
+#define EMAC_RX_BUFF_INDX(X) ((X) & (RUBY_EMAC_NUM_RX_BUFFERS - 1))
+
+struct emac_private {
+	struct emac_common com;		/* Must be first */
+
+	struct sk_buff *rxbufs[RUBY_EMAC_NUM_RX_BUFFERS];
+	dma_addr_t rxdmabufs[RUBY_EMAC_NUM_RX_BUFFERS];
+	int rx_index;
+
+	/* Tx_head points to the next descriptor to use in the ring.
+	 * Tx_tail points to the start of the descriptors that have been used
+	 * for transmission and who need their associated buffers freeing once
+	 * the DMA engine has finished with them.  Tx_full is true if the
+	 * descriptor ring has filled up whilst waiting for the DMA engine
+	 * to send the packets.
+	 */
+
+	spinlock_t flowlock;
+	int tx_queue_stopped;
+	u32 rx_skb_alloc_failures;
+	u32 rx_fragmented_frame_discards;
+	u32 tx_overlength_frame_discards;
+	u32 mac_underrun;
+	u32 mac_jabber;
+
+	struct napi_struct napi;
+
+	struct {
+		u32 tx_done_intr;
+		u32 tx_full;
+		u32 tx_stopped_count;
+		u32 tx_head;
+		u32 tx_tail;
+	} arap_data;
+
+	struct timer_list mitg_timer;
+	unsigned long mitg_intr_count;
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/drivers/qtn/ruby/board_config.c b/drivers/qtn/ruby/board_config.c
new file mode 100644
index 0000000..b8f44c6
--- /dev/null
+++ b/drivers/qtn/ruby/board_config.c
@@ -0,0 +1,306 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <qtn/shared_defs.h>
+#include <asm/kdebug.h>
+#include <asm/board/board_config.h>
+#include <common/ruby_board_db.h>
+#include <common/ruby_partitions.h>
+
+static int			global_board_id = -1;
+static int			global_spi_protect_mode = 0;
+static int			g_slow_ethernet = 0;
+static const board_cfg_t	g_board_cfg_table[] = QTN_BOARD_DB;
+static ruby_bda_t bda_copy;
+
+#define MEM_SIZE_128MB (128 * 1024 * 1024)
+
+static int __init
+setup_slow_ethernet(char *buf)
+{
+	g_slow_ethernet = (buf != NULL);
+	return 0;
+}
+early_param("slow_ethernet", setup_slow_ethernet);
+
+static void __init
+update_ddr_size(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	/* Memory size is determined from device tree specification file*/
+	return;
+#else
+	/* update DDR size */
+	extern unsigned long end_mem;
+	int ddr_size = 0;
+	if (get_board_config(BOARD_CFG_DDR_SIZE, &ddr_size) < 0) {
+		printk_init("UNKNOWN DDR SIZE !!!\n");
+	} else {
+#if !TOPAZ_MMAP_UNIFIED
+		if (ddr_size > MEM_SIZE_128MB)
+			ddr_size = MEM_SIZE_128MB;
+#endif
+		end_mem = RUBY_DRAM_BEGIN + ddr_size;
+	}
+#endif
+}
+
+void __init
+qtn_set_hw_config_id(int id)
+{
+	global_board_id = id;
+	update_ddr_size();
+}
+
+int qtn_get_hw_config_id(void)
+{
+	return global_board_id;
+}
+EXPORT_SYMBOL(qtn_get_hw_config_id);
+
+void __init
+qtn_set_spi_protect_config(u32 mode)
+{
+	global_spi_protect_mode = mode;
+}
+
+int qtn_get_spi_protect_config(void)
+{
+	return global_spi_protect_mode;
+}
+EXPORT_SYMBOL(qtn_get_spi_protect_config);
+
+static const ruby_bda_t *get_bda(void)
+{
+	return &bda_copy;
+}
+
+static int
+lookup_config( const board_cfg_t *p_board_config_entry, int board_config_param, int *p_board_config_value )
+{
+	int	retval = 0;
+
+	switch (board_config_param)
+	{
+	  case BOARD_CFG_WIFI_HW:
+		*p_board_config_value = p_board_config_entry->bc_wifi_hw;
+		break;
+
+	  case BOARD_CFG_EMAC0:
+		*p_board_config_value = p_board_config_entry->bc_emac0;
+		break;
+
+	  case BOARD_CFG_EMAC1:
+		*p_board_config_value = p_board_config_entry->bc_emac1;
+		break;
+
+	  case BOARD_CFG_PHY1_ADDR:
+		*p_board_config_value = p_board_config_entry->bc_phy1_addr;
+		break;
+
+	  case BOARD_CFG_PHY0_ADDR:
+		*p_board_config_value = p_board_config_entry->bc_phy0_addr;
+		break;
+
+	  case BOARD_CFG_UART1:
+		*p_board_config_value = p_board_config_entry->bc_uart1;
+		break;
+
+	  case BOARD_CFG_PCIE:
+		*p_board_config_value = p_board_config_entry->bc_pcie;
+		break;
+
+	  case BOARD_CFG_RGMII_TIMING:
+		*p_board_config_value = p_board_config_entry->bc_rgmii_timing;
+		break;
+
+	  case BOARD_CFG_DDR_SIZE:
+		*p_board_config_value = p_board_config_entry->bc_ddr_size;
+		break;
+
+	  case BOARD_CFG_FLASH_SIZE:
+		{
+			if (global_board_id == QTN_RUBY_AUTOCONFIG_ID ||
+					global_board_id == QTN_RUBY_UNIVERSAL_BOARD_ID) {
+				*p_board_config_value = get_bda()->bda_flashsz;
+			} else {
+				*p_board_config_value = 0;
+			}
+		}
+		break;
+	  default:
+		retval = -1;
+	}
+
+	return( retval );
+}
+
+int
+get_board_config( int board_config_param, int *p_board_config_value )
+{
+	int	iter;
+	int	retval = -1;
+	int	found_entry = 0;
+
+	static int	get_board_config_error_is_reported = 0;
+
+	if (p_board_config_value == NULL) {
+		return( -1 );
+	}
+
+	if (global_board_id == QTN_RUBY_AUTOCONFIG_ID ||
+			global_board_id == QTN_RUBY_UNIVERSAL_BOARD_ID) {
+		return lookup_config(&get_bda()->bda_boardcfg, board_config_param,
+					p_board_config_value);
+	}
+	//printk("get_board_config:board ID %d param %d\n",global_board_id,board_config_param);
+	for (iter = 0; iter < sizeof( g_board_cfg_table ) / sizeof( g_board_cfg_table[ 0 ] ) && (found_entry == 0); iter++) {
+		if (global_board_id == g_board_cfg_table[ iter ].bc_board_id) {
+			found_entry = 1;
+			retval = lookup_config( &g_board_cfg_table[ iter ], board_config_param, p_board_config_value );
+		}
+	}
+	/*
+	 * Default to the first entry in the table if not found.
+	 * Likely will not work for customer boards, but Q bringup baords might be OK.
+	 */
+	if (found_entry == 0) {
+		retval = lookup_config( &g_board_cfg_table[ 0 ], board_config_param, p_board_config_value );
+		if (get_board_config_error_is_reported == 0) {
+			printk(KERN_ERR "get board config: HW config ID %d not recognized, defaulting to %d\n",
+					 global_board_id, g_board_cfg_table[ 0 ].bc_board_id);
+			get_board_config_error_is_reported = 1;
+		}
+	}
+
+	return( retval );
+}
+EXPORT_SYMBOL(get_board_config);
+
+int
+board_slow_ethernet(void)
+{
+	int emac0_cfg = 0, emac1_cfg = 0;
+
+	if (g_slow_ethernet) {
+			return 1;
+	}
+
+#ifdef DETECT_SLOW_ETHERNET
+	get_board_config(BOARD_CFG_EMAC0, &emac0_cfg);
+	get_board_config(BOARD_CFG_EMAC1, &emac1_cfg);
+#endif
+
+	return ( (emac0_cfg | emac1_cfg) & EMAC_SLOW_PHY);
+}
+EXPORT_SYMBOL(board_slow_ethernet);
+
+int
+board_napi_budget(void)
+{
+	return 128;
+}
+EXPORT_SYMBOL(board_napi_budget);
+
+int
+get_all_board_params(char *p)
+{
+	int iter;
+	int ch_printed;
+	const board_cfg_t *board = NULL;
+
+	if (global_board_id == QTN_RUBY_AUTOCONFIG_ID ||
+			global_board_id == QTN_RUBY_UNIVERSAL_BOARD_ID) {
+		board = &get_bda()->bda_boardcfg;
+	} else {
+		for (iter = 0; iter < ARRAY_SIZE(g_board_cfg_table); iter++) {
+			if (global_board_id == g_board_cfg_table[iter].bc_board_id) {
+				board = &g_board_cfg_table[iter];
+				break;
+			}
+		}
+	}
+
+	if (!board)
+		return 0;
+
+	ch_printed = snprintf(p, PAGE_SIZE,
+				"board_id\t%d\n"
+				"name\t%s\n"
+				"ddr_type\t%d\n"
+				"ddr_speed\t%d\n"
+				"ddr_size\t%d\n"
+				"emac0\t%d\n"
+				"emac1\t%d\n"
+				"phy0_addr\t%d\n"
+				"phy1_addr\t%d\n"
+				"spi1\t%d\n"
+				"wifi_hw\t%d\n"
+				"uart1\t%d\n"
+				"bd_pcie\t%d\n"
+				"rgmii_timing\t%d\n",
+				board->bc_board_id, board->bc_name,
+				board->bc_ddr_type, board->bc_ddr_speed, board->bc_ddr_size,
+				board->bc_emac0, board->bc_emac1, board->bc_phy0_addr,
+				board->bc_phy1_addr, board->bc_spi1, board->bc_wifi_hw,
+				board->bc_uart1, board->bc_pcie, board->bc_rgmii_timing);
+	if (ch_printed < 0)
+		ch_printed = 0;
+
+	return ch_printed;
+}
+EXPORT_SYMBOL(get_all_board_params);
+
+void __init
+parse_board_config(char *cmdline)
+{
+	/* parse command line */
+	int cmdline_len = strlen(cmdline);
+	const char *var = "hw_config_id=";
+	int var_len = strlen(var);
+	while (cmdline_len > var_len) {
+		if (!strncmp(cmdline, var, var_len)) {
+			sscanf(cmdline + var_len, "%d", &global_board_id);
+			printk_init("%s: Board id: %d\n", __func__, global_board_id);
+			break;
+		}
+		++cmdline;
+		--cmdline_len;
+	}
+
+	/* we should already know board id */
+	if (global_board_id < 0) {
+		printk_init("UNKNOWN BOARD ID !!!\n");
+	}
+
+	/*
+	 * Copy bda structure so that it doesn't get overwritten by an A-MSDU with
+	 * a size greater than CONFIG_ARC_CONF_SIZE
+	 */
+	memcpy(&bda_copy, (void *)CONFIG_ARC_CONF_BASE, sizeof(bda_copy));
+	bda_copy.bda_boardcfg.bc_name = bda_copy.bda_boardname;
+
+	update_ddr_size();
+}
+
diff --git a/drivers/qtn/ruby/board_config.h b/drivers/qtn/ruby/board_config.h
new file mode 100644
index 0000000..cce6626
--- /dev/null
+++ b/drivers/qtn/ruby/board_config.h
@@ -0,0 +1,42 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+
+#ifndef __BOARD_RUBY_TYPE_H
+#define __BOARD_RUBY_TYPE_H
+
+#include <common/ruby_board_cfg.h>
+
+void __init qtn_set_hw_config_id(int id);
+void __init qtn_set_spi_protect_config(u32 mode);
+
+int get_board_config( int board_config_param, int *p_board_config_value );
+int get_all_board_params( char * p );
+
+int board_slow_ethernet(void);
+
+int board_napi_budget(void);
+
+void parse_board_config(char *cmdline);
+
+void parse_spi_config(char *cmdline);
+
+void parse_pcie_intr_config(char *cmdline);
+
+#endif // #ifndef __BOARD_RUBY_TYPE_H
diff --git a/drivers/qtn/ruby/clock.c b/drivers/qtn/ruby/clock.c
new file mode 100644
index 0000000..6a9faa2
--- /dev/null
+++ b/drivers/qtn/ruby/clock.c
@@ -0,0 +1,107 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <asm/hardware.h>
+#include <asm/board/platform.h>
+
+struct clk {
+	unsigned long rate;	/* Clock rate in HZ */
+};
+
+struct qtn_clk_lookup_entry {
+	const char *name;
+	struct clk *clk_p;
+};
+
+static struct clk qtn_clk_bus = {
+	.rate	= 0,
+};
+
+static struct qtn_clk_lookup_entry qtn_clk_list[] = {
+	{ .name = "qtn-i2c.0", .clk_p = &qtn_clk_bus },
+	{ .name = "bus-clk", .clk_p = &qtn_clk_bus },
+};
+
+static unsigned long qtn_clk_get_bus_freq(void)
+{
+	unsigned long freq = 0;
+
+#ifdef CONFIG_ARC700_FPGA
+	freq = CONFIG_ARC700_FPGA_CLK;
+#else
+	switch(readl(IO_ADDRESS(RUBY_SYS_CTL_CTRL)) & RUBY_SYS_CTL_MASK_CLKSEL) {
+	case RUBY_SYS_CTL_CLKSEL(0x0):
+		freq = RUBY_SYS_CTL_CLKSEL_00_BUS_FREQ;
+		break;
+	case RUBY_SYS_CTL_CLKSEL(0x1):
+		freq = RUBY_SYS_CTL_CLKSEL_01_BUS_FREQ;
+		break;
+	case RUBY_SYS_CTL_CLKSEL(0x2):
+		freq = RUBY_SYS_CTL_CLKSEL_10_BUS_FREQ;
+		break;
+	case RUBY_SYS_CTL_CLKSEL(0x3):
+		freq = RUBY_SYS_CTL_CLKSEL_11_BUS_FREQ;
+		break;
+	default:
+		panic("Logic error!\n");
+		break;
+	}
+#endif
+	return freq;
+}
+
+static int __init qtn_clk_init_rates(void)
+{
+	/* Clock source bits are latched on power-on */
+	qtn_clk_bus.rate = qtn_clk_get_bus_freq();
+	return 0;
+}
+
+arch_initcall(qtn_clk_init_rates);
+
+struct clk *qtn_clk_get(struct device *dev, const char *id)
+{
+	struct clk *ret_clk = NULL;
+	const char *dev_id = NULL;
+	int i;
+
+	if (dev) {
+		dev_id = dev_name(dev);
+
+		for (i = 0; i < ARRAY_SIZE(qtn_clk_list); ++i) {
+			if (!strcmp(dev_id, qtn_clk_list[i].name)) {
+				ret_clk = qtn_clk_list[i].clk_p;
+				break;
+			}
+		}
+	}
+
+	return ret_clk ? ret_clk : ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(qtn_clk_get);
+
+unsigned long qtn_clk_get_rate(struct clk *clk)
+{
+	return clk ? clk->rate : 0;
+}
+EXPORT_SYMBOL(qtn_clk_get_rate);
diff --git a/drivers/qtn/ruby/clock.h b/drivers/qtn/ruby/clock.h
new file mode 100644
index 0000000..c88d47b
--- /dev/null
+++ b/drivers/qtn/ruby/clock.h
@@ -0,0 +1,28 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+
+#ifndef __BOARD_RUBY_CLOCK_H
+#define __BOARD_RUBY_CLOCK_H
+
+unsigned get_ruby_bus_freq(void);
+unsigned get_ruby_cpu_freq(void);
+
+#endif // #ifndef __BOARD_RUBY_CLOCK_H
+
diff --git a/drivers/qtn/ruby/ddr.c b/drivers/qtn/ruby/ddr.c
new file mode 100644
index 0000000..89694ad
--- /dev/null
+++ b/drivers/qtn/ruby/ddr.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) Quantenna Communications Incorporated 2012.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/io.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <linux/pm_qos.h>
+#else
+#include <linux/pm_qos_params.h>
+#endif
+
+#include <asm/board/platform.h>
+#include <asm/hardware.h>
+
+#include <common/ruby_pm.h>
+
+static void ruby_ddr_powerdown_enable(int enable)
+{
+	printk(KERN_INFO"ddr sleep %sabled\n", enable ? "en" : "dis");
+
+	if (enable) {
+		writel(readl(RUBY_DDR_CONTROL) | RUBY_DDR_CONTROL_POWERDOWN_EN,
+			RUBY_DDR_CONTROL);
+	} else {
+		writel(readl(RUBY_DDR_CONTROL) & ~RUBY_DDR_CONTROL_POWERDOWN_EN,
+			RUBY_DDR_CONTROL);
+	}
+}
+
+static int ruby_ddr_pm_notify(struct notifier_block *b, unsigned long level, void *v)
+{
+	static unsigned long pm_prev_level = BOARD_PM_LEVEL_NO;
+	const unsigned long threshold = BOARD_PM_LEVEL_SLOW_DOWN;
+
+	if ((pm_prev_level < threshold) && (level >= threshold)) {
+		ruby_ddr_powerdown_enable(1);
+	} else if ((pm_prev_level >= threshold) && (level < threshold)) {
+		ruby_ddr_powerdown_enable(0);
+	}
+
+	pm_prev_level = level;
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block pm_notifier = {
+	.notifier_call = ruby_ddr_pm_notify,
+};
+
+static int __init ruby_ddr_init(void)
+{
+	pm_qos_add_notifier(PM_QOS_POWER_SAVE, &pm_notifier);
+	return 0;
+}
+arch_initcall(ruby_ddr_init);
+
+MODULE_DESCRIPTION("Ruby DDR");
+MODULE_AUTHOR("Quantenna");
+MODULE_LICENSE("GPL");
diff --git a/drivers/qtn/ruby/dma_cache_ops.h b/drivers/qtn/ruby/dma_cache_ops.h
new file mode 100644
index 0000000..d52a593
--- /dev/null
+++ b/drivers/qtn/ruby/dma_cache_ops.h
@@ -0,0 +1,46 @@
+/*
+ * (C) Copyright 2011 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+
+#include "plat_dma_addr.h"
+#ifndef __BOARD_RUBY_DMA_CACHE_OPS_H
+#define __BOARD_RUBY_DMA_CACHE_OPS_H
+
+static __always_inline dma_addr_t cache_op_before_rx(void *ptr, size_t size,
+							uint8_t cache_is_cleaned)
+{
+	dma_addr_t ret;
+
+	if (cache_is_cleaned) {
+		/* Cache is already invalidated, so it is enough to just convert address. */
+		ret = plat_kernel_addr_to_dma(NULL, ptr);
+	} else {
+		ret = dma_map_single(NULL, ptr, size, DMA_FROM_DEVICE);
+	}
+
+	return ret;
+}
+
+static __always_inline dma_addr_t cache_op_before_tx(void *ptr, size_t size)
+{
+	return dma_map_single(NULL, ptr, size, DMA_BIDIRECTIONAL);
+}
+
+#endif // #ifndef __BOARD_RUBY_DMA_CACHE_OPS_H
+
diff --git a/drivers/qtn/ruby/dmautil.c b/drivers/qtn/ruby/dmautil.c
new file mode 100644
index 0000000..2e09ca2
--- /dev/null
+++ b/drivers/qtn/ruby/dmautil.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) Quantenna Communications, Inc. 2012
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <qtn/dmautil.h>
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <asm/cacheflush.h>
+
+#include "mem_check.h"
+
+int dmautil_aligned_dma_desc_alloc(aligned_dma_descs *d,
+		unsigned int desc_size, unsigned int desc_count,
+		unsigned int align, bool is_sram)
+{
+	size_t remap_size = desc_size * desc_count;
+	size_t alloc_size = remap_size + align - 1;
+	void *p;
+
+	memset(d, 0, sizeof(*d));
+	d->desc_count = desc_count;
+
+#ifdef CONFIG_ARCH_RUBY_NUMA
+	if (is_sram) {
+		p = heap_sram_alloc(alloc_size);
+	} else {
+		p = kmalloc(alloc_size, GFP_KERNEL);
+	}
+#else
+	p = kmalloc(alloc_size, GFP_KERNEL);
+#endif
+	if (!p) {
+		return -ENOMEM;
+	}
+
+	d->unaligned_vdescs = (unsigned long)p;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dma_cache_wback(d->unaligned_vdescs, alloc_size);
+#else
+	flush_dcache_range(d->unaligned_vdescs, d->unaligned_vdescs + alloc_size);
+#endif
+	d->aligned_vdescs = align_val_up(d->unaligned_vdescs, align);
+	d->descs_dma_addr = virt_to_bus((void *)d->aligned_vdescs);
+	d->descs = ioremap_nocache(d->aligned_vdescs, remap_size);
+	if (!d->descs) {
+		/* alloc pass but remap failure, free descs */
+		dmautil_aligned_dma_desc_free(d);
+		return -ENOMEM;
+	}
+
+	memset(d->descs, 0, remap_size);
+
+	return 0;
+}
+EXPORT_SYMBOL(dmautil_aligned_dma_desc_alloc);
+
+void dmautil_aligned_dma_desc_free(aligned_dma_descs *d)
+{
+	void *p;
+
+	if (!d) {
+		return;
+	}
+
+	if (d->descs) {
+		iounmap(d->descs);
+	}
+
+	p = (void *)d->unaligned_vdescs;
+	if (is_linux_sram_mem_addr(d->unaligned_vdescs)) {
+#ifdef CONFIG_ARCH_RUBY_NUMA
+		heap_sram_free(p);
+#else
+		kfree(p);
+#endif
+	} else {
+		kfree(p);
+	}
+
+	memset(d, 0, sizeof(*d));
+}
+EXPORT_SYMBOL(dmautil_aligned_dma_desc_free);
+
+
diff --git a/drivers/qtn/ruby/early_printk.c b/drivers/qtn/ruby/early_printk.c
new file mode 100644
index 0000000..1e97053
--- /dev/null
+++ b/drivers/qtn/ruby/early_printk.c
@@ -0,0 +1,108 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <asm/serial.h>
+#include <asm/hardware.h>
+
+#include <asm/board/platform.h>
+#include <asm/board/gpio.h>
+
+inline static int early_serial_tx_ready(void)
+{
+	return (readb(IO_ADDRESS(RUBY_UART0_LSR)) & RUBY_LSR_TX_Empty);
+}
+
+inline static void early_serial_wait_tx(void)
+{
+	while (!early_serial_tx_ready());
+}
+
+inline static void early_serial_putc_simple(const char c)
+{
+	early_serial_wait_tx();
+	writeb(c, IO_ADDRESS(RUBY_UART0_RBR_THR_DLL));
+}
+
+inline static void early_serial_putc(const char c)
+{
+	if (c == '\n') {
+		early_serial_putc_simple('\r');
+	}
+	early_serial_putc_simple(c);
+}
+
+inline static void early_serial_setbrg(void)
+{
+	u32 baud_val = (BASE_BAUD * 16);
+	u32 div_val = (CONFIG_ARC700_DEV_CLK + baud_val / 2) / baud_val;
+	u8 lcr_val =
+		RUBY_LCR_Data_Word_Length_8 |
+		RUBY_LCR_Stop_Bit_1 |
+		RUBY_LCR_No_Parity |
+		RUBY_LCR_Break_Disable;
+
+	if (!(readb(IO_ADDRESS(RUBY_UART0_USR)) & RUBY_USR_Busy)) {
+		writeb(lcr_val | RUBY_LCR_DLAB, IO_ADDRESS(RUBY_UART0_LCR));
+		writeb((div_val & 0xff), IO_ADDRESS(RUBY_UART0_RBR_THR_DLL));
+		writeb((div_val >> 8) & 0xff, IO_ADDRESS(RUBY_UART0_DLH_IER));
+		writeb(lcr_val, IO_ADDRESS(RUBY_UART0_LCR));
+	}
+}
+
+static void early_console_write(struct console *co, const char *s, unsigned count)
+{
+	while(count--) {
+		early_serial_putc(*s);
+		++s;
+	}
+}
+
+static int __init early_console_setup(struct console *co, char *options)
+{
+	/* Options are ignored.
+	* User hard-coded in early_serial_setbrg() mode.
+	*/
+	early_serial_setbrg();
+	return 0;
+}
+
+static struct console __initdata early_console_struct = {
+	.name  = "ruby_early",
+	.write = early_console_write,
+	.setup = early_console_setup,
+	.flags = CON_PRINTBUFFER | CON_BOOT,
+	.index = -1.
+};
+
+static struct console *early_console = &early_console_struct;
+
+static int __init setup_early_printk(char *buf)
+{
+	gpio_uart0_config();
+	register_console(early_console);
+	return 0;
+}
+early_param("earlyprintk", setup_early_printk);
+
diff --git a/drivers/qtn/ruby/emac_lib.c b/drivers/qtn/ruby/emac_lib.c
new file mode 100644
index 0000000..95a614b
--- /dev/null
+++ b/drivers/qtn/ruby/emac_lib.c
@@ -0,0 +1,1990 @@
+/**
+ * Copyright (c) 2008-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/sysfs.h>
+#include <linux/crc32.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <linux/proc_fs.h>
+#include <linux/pm_qos.h>
+#include <linux/gpio.h>
+#else
+#include <linux/pm_qos_params.h>
+#include <asm/gpio.h>
+#endif
+
+
+
+#include <asm/board/soc.h>
+#include <asm/board/board_config.h>
+
+#include <common/ruby_pm.h>
+#include <qtn/emac_debug.h>
+#include "ar823x.h"
+#include "mv88e6071.h"
+#include "emac_lib.h"
+#include "rtl8367b/rtl8367b_init.h"
+#include <common/topaz_emac.h>
+#include <qtn/dmautil.h>
+#include <qtn/qtn_debug.h>
+
+/* create build error if arasan structure is re-introduced */
+struct emac_lib_private {
+};
+
+#define MDIO_REGISTERS		32
+
+#define DRV_NAME		"emac_lib"
+#define DRV_VERSION		"1.0"
+#define DRV_AUTHOR		"Quantenna Communications Inc."
+#define DRV_DESC		"Arasan AHB-EMAC on-chip Ethernet driver"
+
+#define PHY_REG_PROC_FILE_NAME	"phy_reg"
+#define PHY_PW_PROC_FILE_NAME	"phy_pw"
+#define PHY_PW_CMD_MAX_LEN	20
+
+static int mdio_use_noops = 0;
+module_param(mdio_use_noops, int, 0);
+int mdc_clk_divisor = 1;
+module_param(mdc_clk_divisor, int, 0644);
+
+static uint32_t emac_lib_dual_emac;
+
+static ssize_t show_mdio_use_noops(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", mdio_use_noops);
+}
+
+static ssize_t store_mdio_use_noops(struct device *dev, struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	if (count >= 1)
+		mdio_use_noops = (buf[0] == '1');
+	if (mdio_use_noops)
+		printk(KERN_INFO "Disabling MDIO read/write for %s\n", dev_name(dev));
+
+#if EMAC_REG_DEBUG
+	if (buf[0] == 'd') {
+		unsigned long base = RUBY_ENET0_BASE_ADDR;
+		if (buf[1] == '1')
+			base = RUBY_ENET1_BASE_ADDR;
+		emac_lib_reg_debug(base);
+	}
+#endif
+
+#ifdef RTL_SWITCH
+	if (buf[0] == 'a') {
+		rtl8367b_dump_status();
+	}
+	if (buf[0] == 's') {
+		rtl8367b_dump_stats();
+	}
+#endif
+
+	return count;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static DEVICE_ATTR(mdio_use_noops, S_IRUSR | S_IWUSR, show_mdio_use_noops, store_mdio_use_noops);
+#else
+static DEVICE_ATTR(mdio_use_noops, S_IRUGO | S_IWUGO, show_mdio_use_noops, store_mdio_use_noops);
+#endif
+
+int emac_lib_mdio_sysfs_create(struct net_device *net_dev)
+{
+	return sysfs_create_file(&net_dev->dev.kobj, &dev_attr_mdio_use_noops.attr);
+}
+EXPORT_SYMBOL(emac_lib_mdio_sysfs_create);
+
+void emac_lib_mdio_sysfs_remove(struct net_device *net_dev)
+{
+	sysfs_remove_file(&net_dev->dev.kobj, &dev_attr_mdio_use_noops.attr);
+}
+EXPORT_SYMBOL(emac_lib_mdio_sysfs_remove);
+
+/* noop_values are shared between all devices, could get interesting if there are more than one */
+static u32 mdio_noop_values[MDIO_REGISTERS] = {
+			    0xFFFF,
+			    0x796D,
+			    0x243,
+			    0xD91,
+			    0xDE1,
+			    0xCDE1,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0x700,
+			    0x7C00,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF,
+			    0xFFFF
+};
+
+
+/*
+ * This functions polls until register value is changed (apply mask and compare with val).
+ * Function has timeout, so polling is not indefinite.
+ * Also function try to be clever and work safely in heavy loaded system.
+ * It also try to reduce CPU load using sleep or context switch.
+ */
+int emac_lib_poll_wait(struct emac_common *privc, u32(*read_func)(struct emac_common*, int),
+		int reg, u32 mask, u32 val, unsigned long ms, const char *func)
+{
+	int ret = 1;
+	unsigned long ms_warn = ms / 2;
+
+	int first_run = 1;
+	unsigned long deadline = jiffies + max(msecs_to_jiffies(ms), 1UL);
+
+	while (((*read_func)(privc, reg) & mask) != val) {
+		if (first_run) {
+			deadline = jiffies + max(msecs_to_jiffies(ms), 1UL);
+			first_run = 0;
+		} else if (time_after_eq(jiffies, deadline)) {
+			break;
+		} else if (irqs_disabled() || in_atomic()) {
+			udelay(100);
+		} else if (time_before(jiffies + 5, deadline)) {
+			msleep(1000 / HZ);
+		} else {
+			cond_resched();
+		}
+	}
+	if (((*read_func)(privc, reg) & mask) != val) {
+		if (func) {
+			if (printk_ratelimit()) {
+				printk(KERN_ERR "%s %s: err: timeout %lums\n",
+						privc->dev->name, func, ms);
+			}
+		}
+		ret = 0;
+	} else if(time_after_eq(jiffies + msecs_to_jiffies(ms_warn), deadline)) {
+		if (func) {
+			if (printk_ratelimit())
+				printk(KERN_WARNING "%s %s: warn: system is overloaded : spend more ~%lums!\n",
+					privc->dev->name, func, ms_warn);
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(emac_lib_poll_wait);
+
+int emac_lib_board_cfg(int port, int *cfg, int *phy)
+{
+	int cfg_param;
+	int phy_param;
+	int rc;
+
+	if (port == 0) {
+		cfg_param = BOARD_CFG_EMAC0;
+		phy_param = BOARD_CFG_PHY0_ADDR;
+	} else if (port == 1) {
+		cfg_param = BOARD_CFG_EMAC1;
+		phy_param = BOARD_CFG_PHY1_ADDR;
+	} else {
+		printk(KERN_ERR "%s invalid port number %d\n", __FUNCTION__, port);
+		return -EINVAL;
+	}
+
+	rc = get_board_config(cfg_param, cfg);
+	if (rc) {
+		printk(KERN_ERR "%s get_board_config returns %d\n", __FUNCTION__, rc);
+		return rc;
+	}
+
+	rc = get_board_config(phy_param, phy);
+	if (rc) {
+		printk(KERN_ERR "%s get_board_config returns %d\n", __FUNCTION__, rc);
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(emac_lib_board_cfg);
+
+static void emac_lib_adjust_speed(struct net_device *dev, int speed, int duplex)
+{
+	u32 val;
+	u32 speed_shift = 0;
+	struct emac_common *privc = netdev_priv(dev);
+
+	switch (privc->mac_id) {
+		case 0:
+			speed_shift = RUBY_SYS_CTL_MASK_GMII0_SHIFT;
+			break;
+		case 1:
+			speed_shift = RUBY_SYS_CTL_MASK_GMII1_SHIFT;
+			break;
+		default:
+			panic("No speed_shift defined for %d\n", (int)privc->mac_id);
+			break;
+	}
+
+	val = emac_rd(privc, EMAC_MAC_GLOBAL_CTRL) & ~(MacSpeedMask | MacFullDuplex);
+	if (duplex == DUPLEX_FULL) {
+		val |= MacFullDuplex;
+	}
+	/* note: this covers emac0 - need to extend for emac1 when we add support */
+	writel(RUBY_SYS_CTL_MASK_GMII_TXCLK << speed_shift, IO_ADDRESS(RUBY_SYS_CTL_MASK));
+	switch (speed) {
+		case SPEED_10:
+			emac_wr(privc, EMAC_MAC_GLOBAL_CTRL, val | MacSpeed10M);
+			writel(RUBY_SYS_CTL_MASK_GMII_10M << speed_shift, IO_ADDRESS(RUBY_SYS_CTL_CTRL));
+			break;
+		case SPEED_100:
+			emac_wr(privc, EMAC_MAC_GLOBAL_CTRL, val | MacSpeed100M);
+			writel(RUBY_SYS_CTL_MASK_GMII_100M << speed_shift, IO_ADDRESS(RUBY_SYS_CTL_CTRL));
+			break;
+		case SPEED_1000:
+			emac_wr(privc, EMAC_MAC_GLOBAL_CTRL, val | MacSpeed1G);
+			writel(RUBY_SYS_CTL_MASK_GMII_1000M << speed_shift, IO_ADDRESS(RUBY_SYS_CTL_CTRL));
+			break;
+		default:
+			printk(KERN_WARNING "%s: Speed (%d) is not supported\n", dev->name, speed);
+			break;
+	}
+}
+
+static void emac_lib_adjust_link(struct net_device *dev)
+{
+	struct emac_common *privc = netdev_priv(dev);
+	struct phy_device *phydev = privc->phy_dev;
+
+	BUG_ON(!privc->phy_dev);
+	if (phydev->link != privc->old_link) {
+		if (phydev->link) {
+			emac_lib_adjust_speed(dev, phydev->speed, phydev->duplex);
+			netif_tx_schedule_all(dev);
+			printk(KERN_INFO "%s: link up (%d/%s)\n",
+					dev->name, phydev->speed,
+					phydev->duplex == DUPLEX_FULL ? "Full" : "Half");
+		} else {
+			printk(KERN_INFO "%s: link down\n", dev->name);
+		}
+		privc->old_link = phydev->link;
+	}
+}
+
+/*
+* One MDIO bus is used on our SoC to access PHYs of all EMACs.
+* This mutex guards access to the bus.
+*/
+static DEFINE_MUTEX(mdio_bus_lock);
+/*
+ * MII operations
+ */
+int emac_lib_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
+{
+	struct net_device *dev;
+	struct emac_common *privc;
+	u32 mii_control, read_val;
+
+	mutex_lock(&mdio_bus_lock);
+
+	if (mdio_use_noops) {
+		uint32_t noop_value = mdio_noop_values[reg % MDIO_REGISTERS];
+		mutex_unlock(&mdio_bus_lock);
+		return noop_value;
+	}
+
+	dev = bus->priv;
+	privc = netdev_priv(dev);
+
+	if (!mdio_wait(privc, EMAC_MAC_MDIO_CTRL, MacMdioCtrlStart, 0, TIMEOUT_MAC_MDIO_CTRL, __FUNCTION__)) {
+		mutex_unlock(&mdio_bus_lock);
+		return -1;
+	}
+
+	mii_control =
+		((reg & MacMdioCtrlRegMask) << MacMdioCtrlRegShift) |
+		((phy_addr & MacMdioCtrlPhyMask) << MacMdioCtrlPhyShift) |
+		((mdc_clk_divisor & MacMdioCtrlClkMask) << MacMdioCtrlClkShift) |
+		MacMdioCtrlRead | MacMdioCtrlStart;
+	mdio_wr(privc, EMAC_MAC_MDIO_CTRL, mii_control);
+	if (!mdio_wait(privc, EMAC_MAC_MDIO_CTRL, MacMdioCtrlStart, 0, TIMEOUT_MAC_MDIO_CTRL, __FUNCTION__)) {
+		mutex_unlock(&mdio_bus_lock);
+		return -1;
+	}
+
+	read_val = mdio_rd(privc, EMAC_MAC_MDIO_DATA) & MacMdioDataMask;
+	/* printk(KERN_INFO "%s: PHY: %d Reg %d Value 0x%08x\n", __FUNCTION__, phy_addr, reg, read_val); */
+	mdio_noop_values[reg % MDIO_REGISTERS] = read_val;
+	mutex_unlock(&mdio_bus_lock);
+
+	return (int)(read_val);
+}
+
+int emac_lib_mdio_write(struct mii_bus *bus, int phy_addr, int reg, uint16_t value)
+{
+	struct net_device *dev;
+	struct emac_common *privc;
+	u32 mii_control;
+
+	if (mdio_use_noops) {
+		/* printk(KERN_WARNING "MII Write is a noop: MII WR: Reg %d Value %08X\n",reg,(unsigned)value); */
+		return 0;
+	}
+
+	mutex_lock(&mdio_bus_lock);
+	dev = bus->priv;
+	privc = netdev_priv(dev);
+
+	if (!mdio_wait(privc, EMAC_MAC_MDIO_CTRL, MacMdioCtrlStart, 0, TIMEOUT_MAC_MDIO_CTRL, __FUNCTION__)) {
+		mutex_unlock(&mdio_bus_lock);
+		return -1;
+	}
+
+	mii_control =
+		((reg & MacMdioCtrlRegMask) << MacMdioCtrlRegShift) |
+		((phy_addr & MacMdioCtrlPhyMask) << MacMdioCtrlPhyShift) |
+		((mdc_clk_divisor & MacMdioCtrlClkMask) << MacMdioCtrlClkShift) |
+		MacMdioCtrlWrite | MacMdioCtrlStart;
+
+	/* printk(KERN_INFO "%s: PHY: %d Reg %d Value 0x%08x\n", __FUNCTION__, phy_addr, reg, value); */
+	mdio_wr(privc, EMAC_MAC_MDIO_DATA, value);
+	mdio_wr(privc, EMAC_MAC_MDIO_CTRL, mii_control);
+	mutex_unlock(&mdio_bus_lock);
+
+	return 0;
+}
+
+static u32 phydev_addr_inuse[2] = { -1, -1 };
+
+static int mii_probe(struct net_device *dev)
+{
+	struct emac_common * const privc = netdev_priv(dev);
+	struct phy_device *phydev = NULL;
+	int phy_index;
+	int phy_found = 0;
+	int port_num = privc->mac_id;
+	unsigned long phy_supported = 0;
+
+	privc->phy_dev = NULL;
+
+	if (privc->emac_cfg & (EMAC_PHY_AR8236 | EMAC_PHY_AR8327)) {
+		// handle ar823x switch first
+		return ar823x_init(privc->phy_addr);
+	} else if (privc->emac_cfg & EMAC_PHY_MV88E6071) {
+		return mv88e6071_init(privc);
+	}
+
+	if (privc->emac_cfg & EMAC_PHY_NOT_IN_USE) {
+		// no PHY - just return OK
+		return 0;
+	}
+
+	/*
+	 * Find a matching phy address on the current bus, or the first unused
+	 * by index if scanning for phys
+	 */
+	for (phy_index = 0; phy_index < PHY_MAX_ADDR; phy_index++) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		struct phy_device *pdev = mdiobus_get_phy(privc->mii_bus, phy_index); 
+#else
+		struct phy_device *pdev = privc->mii_bus->phy_map[phy_index];
+#endif
+		int in_use = 0;
+		int i;
+
+		if (!pdev) {
+			continue;
+		}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		printk(KERN_INFO DRV_NAME " %s: index %d id 0x%x addr %d\n",
+				dev->name, phy_index, pdev->phy_id, pdev->mdio.addr);
+
+		if (phy_found) {
+			continue;
+		}
+
+		/* check that this phy isn't currently in use */
+		for (i = 0; i < ARRAY_SIZE(phydev_addr_inuse); i++) {
+			if (port_num != i && phydev_addr_inuse[i] == pdev->mdio.addr) {
+				in_use = 1;
+			}
+		}
+
+		if (!in_use && (privc->phy_addr == pdev->mdio.addr ||
+					privc->phy_addr == EMAC_PHY_ADDR_SCAN)) {
+			phydev = pdev;
+			phydev_addr_inuse[port_num] = pdev->mdio.addr;
+			phy_found = 1;
+		}
+#else
+		printk(KERN_INFO DRV_NAME " %s: index %d id 0x%x addr %d\n",
+				dev->name, phy_index, pdev->phy_id, pdev->addr);
+
+		if (phy_found) {
+			continue;
+		}
+
+		/* check that this phy isn't currently in use */
+		for (i = 0; i < ARRAY_SIZE(phydev_addr_inuse); i++) {
+			if (port_num != i && phydev_addr_inuse[i] == pdev->addr) {
+				in_use = 1;
+			}
+		}
+
+		if (!in_use && (privc->phy_addr == pdev->addr ||
+					privc->phy_addr == EMAC_PHY_ADDR_SCAN)) {
+			phydev = pdev;
+			phydev_addr_inuse[port_num] = pdev->addr;
+			phy_found = 1;
+		}
+#endif
+	}
+
+	if (!phydev) {
+		printk(KERN_ERR DRV_NAME " %s: no PHY found\n", dev->name);
+		return -1;
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	printk(KERN_INFO DRV_NAME " %s: phy_id 0x%x addr %d\n",
+			dev->name, phydev->phy_id, phydev->mdio.addr);
+#else
+	printk(KERN_INFO DRV_NAME " %s: phy_id 0x%x addr %d\n",
+			dev->name, phydev->phy_id, phydev->addr);
+#endif
+	/* now we are supposed to have a proper phydev, to attach to... */
+	BUG_ON(phydev->attached_dev);
+
+	/* XXX: Check if this should be done here. Forcing advert of Symm Pause */
+	phy_write(phydev, MII_ADVERTISE,
+			phy_read(phydev, MII_ADVERTISE) | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM );
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	phydev = phy_connect(dev, dev_name(&phydev->dev), &emac_lib_adjust_link, 0,
+			PHY_INTERFACE_MODE_MII);
+#else
+	phydev = phy_connect(dev, dev_name(&phydev->mdio.dev), &emac_lib_adjust_link,
+			PHY_INTERFACE_MODE_MII);
+#endif
+	if (IS_ERR(phydev)) {
+		printk(KERN_ERR DRV_NAME " %s: Could not attach to PHY\n", dev->name);
+		return PTR_ERR(phydev);
+	}
+
+	/* mask with MAC supported features */
+	phy_supported =	SUPPORTED_Autoneg        |
+		SUPPORTED_Pause          |
+		SUPPORTED_Asym_Pause     |
+		SUPPORTED_MII            |
+		SUPPORTED_TP;
+
+	if (privc->emac_cfg & EMAC_PHY_FORCE_10MB) {
+		phy_supported |= SUPPORTED_10baseT_Half   |
+			SUPPORTED_10baseT_Full;
+	} else if (privc->emac_cfg & EMAC_PHY_FORCE_100MB) {
+		phy_supported |= SUPPORTED_100baseT_Half   |
+			SUPPORTED_100baseT_Full;
+	} else if (privc->emac_cfg & EMAC_PHY_FORCE_1000MB) {
+		phy_supported |= SUPPORTED_1000baseT_Half   |
+			SUPPORTED_1000baseT_Full;
+	} else {
+		phy_supported |= SUPPORTED_10baseT_Half   |
+			SUPPORTED_10baseT_Full   |
+			SUPPORTED_100baseT_Half  |
+			SUPPORTED_100baseT_Full  |
+			SUPPORTED_1000baseT_Half |
+			SUPPORTED_1000baseT_Full;
+	}
+
+	phydev->supported &= phy_supported;
+	phydev->advertising = phydev->supported;
+	privc->old_link = 0;
+	privc->phy_dev = phydev;
+
+	printk(KERN_INFO DRV_NAME " %s: attached PHY driver [%s] "
+			"(mii_bus:phy_addr=%s, irq=%d)\n",
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+			dev->name, phydev->drv->name, phydev->mdio.dev.bus->name, phydev->irq);
+#else
+			dev->name, phydev->drv->name, phydev->dev.bus->name, phydev->irq);
+#endif
+	return 0;
+}
+
+void emac_lib_phy_start(struct net_device *dev)
+{
+	struct emac_common *privc = netdev_priv(dev);
+
+	if ((privc->emac_cfg & EMAC_PHY_NOT_IN_USE) == 0){
+		/* cause the PHY state machine to schedule a link state check */
+		privc->old_link = 0;
+		phy_stop(privc->phy_dev);
+		phy_start(privc->phy_dev);
+	} else {
+		// This is the case of no phy - need to force link speed
+		int speed = 0;
+		int duplex = DUPLEX_FULL;
+
+		if (privc->emac_cfg & EMAC_PHY_FORCE_10MB)  {
+			speed = SPEED_10;
+		} else if (privc->emac_cfg & EMAC_PHY_FORCE_100MB)  {
+			speed = SPEED_100;
+		} else if (privc->emac_cfg & EMAC_PHY_FORCE_1000MB)  {
+			speed = SPEED_1000;
+		}
+		if (privc->emac_cfg & EMAC_PHY_FORCE_HDX) {
+			duplex = 0;
+		}
+		emac_lib_adjust_speed(dev, speed, duplex);
+		printk(KERN_INFO DRV_NAME " %s: force link (%d/%s)\n",
+				dev->name, speed,
+				duplex == DUPLEX_FULL ? "Full" : "Half");
+	}
+
+#ifdef RTL_SWITCH
+	if (emac_lib_rtl_switch(privc->emac_cfg)) {
+		rtl8367b_ext_port_enable(dev->if_port);
+	}
+#endif
+}
+EXPORT_SYMBOL(emac_lib_phy_start);
+
+void emac_lib_phy_stop(struct net_device *dev)
+{
+	struct emac_common *privc = netdev_priv(dev);
+
+#ifdef RTL_SWITCH
+	if (emac_lib_rtl_switch(privc->emac_cfg)) {
+		rtl8367b_ext_port_disable(dev->if_port);
+	}
+#endif
+
+	if (privc->phy_dev) {
+		phy_stop(privc->phy_dev);
+	}
+}
+EXPORT_SYMBOL(emac_lib_phy_stop);
+
+static void emac_lib_enable_gpio_reset_pin(int pin)
+{
+	printk(KERN_INFO "%s GPIO pin %d reset sequence\n", DRV_NAME, pin);
+	if (gpio_request(pin, DRV_NAME) < 0)
+		printk(KERN_ERR "%s: Failed to request GPIO%d for GPIO reset\n",
+				DRV_NAME, pin);
+
+	gpio_direction_output(pin, 1);
+	udelay(100);
+	gpio_set_value(pin, 0);
+	mdelay(100);
+	gpio_set_value(pin, 1);
+	gpio_free(pin);
+}
+
+static void emac_lib_enable_gpio_reset(uint32_t cfg)
+{
+	if (cfg & EMAC_PHY_GPIO1_RESET) {
+		emac_lib_enable_gpio_reset_pin(RUBY_GPIO_PIN1);
+	}
+
+	if (cfg & EMAC_PHY_GPIO13_RESET) {
+		emac_lib_enable_gpio_reset_pin(RUBY_GPIO_PIN13);
+	}
+}
+
+void emac_lib_enable(uint32_t ext_reset)
+{
+	uint32_t emac0_cfg = EMAC_NOT_IN_USE;
+	uint32_t emac1_cfg = EMAC_NOT_IN_USE;
+	uint32_t emac_cfg;
+	uint32_t rgmii_timing = CONFIG_ARCH_RGMII_DEFAULT;
+
+	get_board_config(BOARD_CFG_RGMII_TIMING, (int *) &rgmii_timing);
+
+	if (get_board_config(BOARD_CFG_EMAC0, (int *) &emac0_cfg) != 0) {
+		printk(KERN_ERR "%s: get_board_config returned error status for EMAC0\n", DRV_NAME);
+	}
+
+	if (get_board_config(BOARD_CFG_EMAC1, (int *) &emac1_cfg) != 0) {
+		printk(KERN_ERR "%s: get_board_config returned error status for EMAC1\n", DRV_NAME);
+	}
+	emac_cfg = emac0_cfg | emac1_cfg;
+
+	/* Use GPIO to reset ODM PHY */
+	emac_lib_enable_gpio_reset(emac_cfg);
+
+	arasan_initialize_release_reset(emac0_cfg, emac1_cfg, rgmii_timing, ext_reset);
+}
+EXPORT_SYMBOL(emac_lib_enable);
+
+static struct mii_bus* emac_lib_alloc_mii(struct net_device *dev)
+{
+	int i;
+	struct emac_common *privc = netdev_priv(dev);
+	struct mii_bus *mii = NULL;
+
+	/* Alloc bus structure */
+	mii = mdiobus_alloc();
+	if (!mii) {
+		goto mii_alloc_err_out;
+	}
+
+	/* Initialize mii structure fields */
+	mii->priv = dev;
+
+	/* if we are using ar8236 switch, the mdio ops are special */
+	if (privc->emac_cfg & (EMAC_PHY_AR8236 | EMAC_PHY_AR8327)) {
+		mii->read = ar823x_mdio_read;
+		mii->write = ar823x_mdio_write;
+	} else if (privc->emac_cfg & EMAC_PHY_MV88E6071) {
+		mii->read = mv88e6071_mdio_read;
+		mii->write = mv88e6071_mdio_write;
+	} else if (emac_lib_rtl_switch(privc->emac_cfg)) {
+#ifdef RTL_SWITCH
+		if (rtl8367b_init(mii, &emac_lib_mdio_read,
+					&emac_lib_mdio_write,
+					privc->emac_cfg, privc->mac_id)) {
+			goto board_init_err_out;
+		}
+#else
+		printk(KERN_ERR "rtl switch module not available\n");
+		goto board_init_err_out;
+#endif
+	} else {
+		mii->read = emac_lib_mdio_read;
+		mii->write = emac_lib_mdio_write;
+	}
+	mii->name = "emac_eth_mii";
+	snprintf(mii->id, MII_BUS_ID_SIZE, "%x", privc->mac_id);
+
+#if 0
+	mii->irq is array. Allocation not needed.
+	/* Initialize irq field */
+	mii->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+	if (!mii->irq) {
+		goto irq_alloc_err_out;
+	}
+#endif
+
+	for(i = 0; i < PHY_MAX_ADDR; ++i) {
+		mii->irq[i] = PHY_POLL;
+	}
+
+	/* Register bus if we are using PHY */
+	if ((privc->emac_cfg & EMAC_PHY_NOT_IN_USE) == 0) {
+		if (mdiobus_register(mii)) {
+			goto mii_register_err_out;
+		}
+	}
+	return mii;
+
+mii_register_err_out:
+	kfree(mii->irq);
+
+/*irq_alloc_err_out:*/
+board_init_err_out:
+	mdiobus_free(mii);
+
+mii_alloc_err_out:
+	return NULL;
+}
+
+void emac_lib_mii_exit(struct net_device *dev)
+{
+	struct emac_common *privc = netdev_priv(dev);
+	struct mii_bus *mii = privc->mii_bus;
+
+	if (mii) {
+#ifdef RTL_SWITCH
+		if (emac_lib_rtl_switch(privc->emac_cfg)) {
+			rtl8367b_exit();
+		}
+#endif
+		if (mii->irq) {
+			kfree(mii->irq);
+		}
+		mdiobus_unregister(mii);
+		mdiobus_free(mii);
+	}
+
+	phydev_addr_inuse[privc->mac_id] = -1;
+}
+EXPORT_SYMBOL(emac_lib_mii_exit);
+
+int emac_lib_mii_init(struct net_device *dev)
+{
+	struct emac_common *privc = netdev_priv(dev);
+
+	privc->mii_bus = emac_lib_alloc_mii(dev);
+	if (!privc->mii_bus) {
+		goto err_out;
+	}
+
+	if (mii_probe(dev)) {
+		goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	if (privc->mii_bus) {
+		emac_lib_mii_exit(dev);
+		privc->mii_bus = NULL;
+	}
+	return -ENODEV;
+}
+EXPORT_SYMBOL(emac_lib_mii_init);
+
+static int emac_lib_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct emac_common *privc = netdev_priv(dev);
+
+	if (privc->phy_dev) {
+		return phy_ethtool_gset(privc->phy_dev, cmd);
+	}
+
+	if (privc->emac_cfg & EMAC_PHY_NOT_IN_USE) {
+		uint32_t supported = 0;
+		uint16_t speed = 0;
+		uint8_t duplex;
+
+		memset(cmd, 0, sizeof(*cmd));
+
+		/*
+		 * Return forced settings; used by bonding driver etc
+		 */
+		if (privc->emac_cfg & EMAC_PHY_FORCE_10MB) {
+			supported = SUPPORTED_10baseT_Half |
+				SUPPORTED_10baseT_Full;
+			speed = SPEED_10;
+		} else if (privc->emac_cfg & EMAC_PHY_FORCE_100MB) {
+			supported |= SUPPORTED_100baseT_Half |
+				SUPPORTED_100baseT_Full;
+			speed = SPEED_100;
+		} else if (privc->emac_cfg & EMAC_PHY_FORCE_1000MB) {
+			supported |= SUPPORTED_1000baseT_Half |
+				SUPPORTED_1000baseT_Full;
+			speed = SPEED_1000;
+		}
+
+		if (privc->emac_cfg & EMAC_PHY_FORCE_HDX) {
+			duplex = DUPLEX_HALF;
+		} else {
+			duplex = DUPLEX_FULL;
+		}
+
+		cmd->supported = supported;
+		cmd->advertising = supported;
+		cmd->speed = speed;
+		cmd->duplex = duplex;
+		cmd->port = PORT_MII;
+		cmd->transceiver = XCVR_EXTERNAL;
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int emac_lib_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct emac_common *privc = netdev_priv(dev);
+
+	if (!capable(CAP_NET_ADMIN)) {
+		return -EPERM;
+	}
+
+	if (privc->phy_dev) {
+		return phy_ethtool_sset(privc->phy_dev, cmd);
+	}
+
+	return -EINVAL;
+}
+
+static void emac_lib_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct emac_common *privc = netdev_priv(dev);
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	info->fw_version[0] = '\0';
+	sprintf(info->bus_info, "%s %d", DRV_NAME, privc->mac_id);
+	info->regdump_len = 0;
+}
+
+const struct ethtool_ops emac_lib_ethtool_ops = {
+	.get_settings = emac_lib_ethtool_get_settings,
+	.set_settings = emac_lib_ethtool_set_settings,
+	.get_drvinfo = emac_lib_ethtool_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+};
+EXPORT_SYMBOL(emac_lib_ethtool_ops);
+
+
+void emac_lib_descs_free(struct net_device *dev)
+{
+	/*
+	 * All Ethernet activity should have ceased before calling
+	 * this function
+	 */
+	struct emac_common *priv = netdev_priv(dev);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dma_free_coherent(NULL, priv->rx.desc_count *
+			sizeof(priv->rx.descs[0]), priv->rx.descs,
+			priv->rx.descs_dma_addr);
+	dma_free_coherent(NULL, priv->tx.desc_count *
+			sizeof(priv->tx.descs[0]), priv->tx.descs,
+			priv->tx.descs_dma_addr);
+#else
+	ALIGNED_DMA_DESC_FREE(&priv->rx);
+	ALIGNED_DMA_DESC_FREE(&priv->tx);
+#endif
+}
+EXPORT_SYMBOL(emac_lib_descs_free);
+
+int emac_lib_descs_alloc(struct net_device *dev,
+		u32 rxdescs, bool rxdescs_sram,
+		u32 txdescs, bool txdescs_sram)
+{
+	struct emac_common *priv = netdev_priv(dev);
+	dma_addr_t dma_handle;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	priv->rx.desc_count = rxdescs;
+	priv->rx.descs = dma_alloc_coherent(NULL, rxdescs * sizeof(priv->rx.descs[0]), &dma_handle, GFP_KERNEL);
+	if (!priv->rx.descs)
+		goto bad;
+	if (dma_handle & 7) {
+		panic("dma_alloc_coherent returned pointer that is not 8 byte aligned\n");
+	}
+	priv->rx.descs_dma_addr = dma_handle;
+	
+	priv->tx.desc_count = txdescs;
+	priv->tx.descs = dma_alloc_coherent(NULL, txdescs * sizeof(priv->tx.descs[0]), &dma_handle, GFP_KERNEL);
+	if (!priv->tx.descs)
+		goto bad;
+	if (dma_handle & 7) {
+		panic("dma_alloc_coherent returned pointer that is not 8 byte aligned\n");
+	}
+	priv->tx.descs_dma_addr = dma_handle;
+	
+#else
+	if (ALIGNED_DMA_DESC_ALLOC(&priv->rx, rxdescs, desc_align, rxdescs_sram)) {
+		goto bad;
+	}
+
+	if (ALIGNED_DMA_DESC_ALLOC(&priv->tx, txdescs, desc_align, txdescs_sram)) {
+		goto bad;
+	}
+#endif
+
+	return 0;
+bad:
+	emac_lib_descs_free(dev);
+
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(emac_lib_descs_alloc);
+
+void emac_lib_send_pause(struct net_device *dev, int pause_time)
+{
+	struct emac_common *privc = netdev_priv(dev);
+	uint32_t control;
+
+	emac_wr(privc, EMAC_MAC_FLOW_PAUSE_TIMEVAL, pause_time);
+	emac_wr(privc, EMAC_MAC_FLOW_PAUSE_GENERATE, 0);
+	control  = emac_rd(privc, EMAC_MAC_FLOW_PAUSE_GENERATE);
+
+	emac_wr(privc, EMAC_MAC_FLOW_PAUSE_GENERATE, 1);
+	while (control & 0x1) {
+		control  = emac_rd(privc, EMAC_MAC_FLOW_PAUSE_GENERATE);
+	}
+}
+EXPORT_SYMBOL(emac_lib_send_pause);
+
+void emac_lib_init_mac(struct net_device *dev)
+{
+	/* This routine has the side-effect of stopping MAC RX and TX */
+	struct emac_common *privc = netdev_priv(dev);
+
+	/* EMAC_MAC_GLOBAL_CTRL set in response to link negotiation */
+	emac_wr(privc, EMAC_MAC_TX_CTRL, MacTxAutoRetry);
+	emac_wr(privc, EMAC_MAC_RX_CTRL, MacRxEnable | MacRxStripFCS |
+			MacRxStoreAndForward | MacAccountVLANs);
+	/* FIXME : These values should change based on required MTU size */
+	emac_wr(privc, EMAC_MAC_MAX_FRAME_SIZE, 0xC80);
+	emac_wr(privc, EMAC_MAC_TX_JABBER_SIZE, 0xCA0);
+	emac_wr(privc, EMAC_MAC_RX_JABBER_SIZE, 0xCA0);
+	emac_wr(privc, EMAC_MAC_ADDR1_HIGH, *(u16 *)&dev->dev_addr[0]);
+	emac_wr(privc, EMAC_MAC_ADDR1_MED, *(u16 *)&dev->dev_addr[2]);
+	emac_wr(privc, EMAC_MAC_ADDR1_LOW, *(u16 *)&dev->dev_addr[4]);
+	emac_wr(privc, EMAC_MAC_ADDR_CTRL, MacAddr1Enable);
+
+	emac_wr(privc, EMAC_MAC_TABLE1, 0);
+	emac_wr(privc, EMAC_MAC_TABLE2, 0);
+	emac_wr(privc, EMAC_MAC_TABLE3, 0);
+	emac_wr(privc, EMAC_MAC_TABLE4, 0);
+	emac_wr(privc, EMAC_MAC_FLOW_CTRL, MacFlowDecodeEnable |
+			MacFlowGenerationEnable | MacAutoFlowGenerationEnable |
+			MacFlowMulticastMode | MacBlockPauseFrames);
+	emac_wr(privc, EMAC_MAC_FLOW_SA_HIGH, *(u16 *)&dev->dev_addr[0]);
+	emac_wr(privc, EMAC_MAC_FLOW_SA_MED, *(u16 *)&dev->dev_addr[2]);
+	emac_wr(privc, EMAC_MAC_FLOW_SA_LOW, *(u16 *)&dev->dev_addr[4]);
+
+	/* !!! FIXME - whether or not we need this depends on whether
+	 * the auto-pause generation uses it.  The auto function may just
+	 * use 0xffff val to stop sending & then 0 to restart it.
+	 */
+	emac_wr(privc, EMAC_MAC_FLOW_PAUSE_TIMEVAL, 100);
+
+	emac_wr(privc, EMAC_MAC_TX_ALMOST_FULL, 0x1f8);
+	emac_wr(privc, EMAC_MAC_TX_START_THRESHOLD, 1518);
+	/* EMAC_MAC_RX_START_THRESHOLD ignored in store & forward mode */
+	emac_wr(privc, EMAC_MAC_INT, MacUnderrun | MacJabber); /* clear ints */
+}
+EXPORT_SYMBOL(emac_lib_init_mac);
+
+void emac_lib_init_dma(struct emac_common *privc)
+{
+	emac_wr(privc, EMAC_DMA_CONFIG, DmaRoundRobin | Dma16WordBurst | Dma64BitMode);
+	emac_wr(privc, EMAC_DMA_CTRL, 0);
+	emac_wr(privc, EMAC_DMA_STATUS_IRQ, (u32)-1);
+	emac_wr(privc, EMAC_DMA_INT_ENABLE, 0);
+	emac_wr(privc, EMAC_DMA_TX_AUTO_POLL, 0);
+	emac_wr(privc, EMAC_DMA_TX_BASE_ADDR, privc->tx.descs_dma_addr);
+	emac_wr(privc, EMAC_DMA_RX_BASE_ADDR, privc->rx.descs_dma_addr);
+}
+EXPORT_SYMBOL(emac_lib_init_dma);
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,30)
+static inline int netdev_mc_count(const struct net_device *dev)
+{
+	return dev->mc_count;
+}
+#endif
+
+static void set_rx_mode_mcfilter(struct net_device *dev, u32 *mc_filter)
+{
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,30)
+	int i;
+	struct dev_mc_list *mclist;
+
+	for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+			i++, mclist = mclist->next) {
+		set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26, mc_filter);
+	}
+#else
+	struct netdev_hw_addr *ha;
+
+	netdev_for_each_mc_addr(ha, dev) {
+		set_bit(ether_crc(ETH_ALEN, ha->addr) >> 26,
+				(unsigned long *)mc_filter);
+	}
+#endif
+}
+
+void emac_lib_set_rx_mode(struct net_device *dev)
+{
+	struct emac_common *arapc = netdev_priv(dev);
+
+	if (dev->flags & IFF_PROMISC) {
+		emac_setbits(arapc, EMAC_MAC_ADDR_CTRL, MacPromiscuous);
+	} else if ((dev->flags & IFF_ALLMULTI)  ||
+			netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
+		emac_wr(arapc, EMAC_MAC_TABLE1, 0xffff);
+		emac_wr(arapc, EMAC_MAC_TABLE2, 0xffff);
+		emac_wr(arapc, EMAC_MAC_TABLE3, 0xffff);
+		emac_wr(arapc, EMAC_MAC_TABLE4, 0xffff);
+		emac_clrbits(arapc, EMAC_MAC_ADDR_CTRL, MacPromiscuous);
+		printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
+	} else {
+		u32 mc_filter[2];	/* Multicast hash filter */
+		mc_filter[1] = mc_filter[0] = 0;
+
+		set_rx_mode_mcfilter(dev, mc_filter);
+
+		emac_wr(arapc, EMAC_MAC_TABLE1, mc_filter[0] & 0xffff);
+		emac_wr(arapc, EMAC_MAC_TABLE2, mc_filter[0] >> 16);
+		emac_wr(arapc, EMAC_MAC_TABLE3, mc_filter[1] & 0xffff);
+		emac_wr(arapc, EMAC_MAC_TABLE4, mc_filter[1] >> 16);
+		emac_clrbits(arapc, EMAC_MAC_ADDR_CTRL, MacPromiscuous);
+	}
+}
+EXPORT_SYMBOL(emac_lib_set_rx_mode);
+
+static void emac_pm_enter_to_halt(struct phy_device *phy_dev)
+{
+	if (phy_dev && phy_dev->state != PHY_HALTED) {
+		phy_stop(phy_dev);
+		genphy_suspend(phy_dev);
+		printk(KERN_INFO"emac enter halted state\n");
+	}
+}
+
+static void emac_pm_return_from_halt(struct phy_device *phy_dev)
+{
+	if (phy_dev->state == PHY_HALTED) {
+		genphy_resume(phy_dev);
+		phy_start(phy_dev);
+		printk(KERN_INFO "%s: emac resumed from halt\n", DRV_NAME);
+	}
+	/* Delay of about 50ms between PHY is resume and start auto negotiation */
+	mdelay(50);
+}
+
+static unsigned long emac_pm_power_save_level = PM_QOS_DEFAULT_VALUE;
+static int emac_pm_adjust_level(const int pm_emac_level, struct phy_device *phy_dev)
+{
+	if (pm_emac_level != BOARD_PM_LEVEL_NO &&
+			(phy_dev->state == PHY_HALTED || emac_lib_dual_emac)) {
+		return pm_emac_level;
+	}
+
+	return emac_pm_power_save_level;
+}
+
+static void emac_pm_level(struct emac_common *arapc, int level)
+{
+	struct phy_device *phy_dev = arapc->phy_dev;
+
+	if (arapc->emac_cfg & (EMAC_PHY_NOT_IN_USE |
+				EMAC_PHY_NO_COC |
+				EMAC_PHY_AUTO_MASK)) {
+		return;
+	}
+
+	level = emac_pm_adjust_level(level, phy_dev);
+
+	if (level >= BOARD_PM_LEVEL_SUSPEND) {
+		if (phy_dev->state != PHY_HALTED) {
+			phy_stop(phy_dev);
+			genphy_suspend(phy_dev);
+			printk(KERN_INFO "%s: emac halted\n", DRV_NAME);
+		}
+	} else if (level >= BOARD_PM_LEVEL_IDLE) {
+		emac_pm_return_from_halt(phy_dev);
+
+		if (!arapc->pm_adv_mode) {
+			const uint32_t adv_10M_H = SUPPORTED_10baseT_Half;
+			const uint32_t adv_10M_F = SUPPORTED_10baseT_Full;
+			const uint32_t adv_100M_F = SUPPORTED_100baseT_Full;
+			const uint32_t adv_100M_H = SUPPORTED_100baseT_Half;
+			const uint32_t adv_1G_F = SUPPORTED_1000baseT_Full;
+			const uint32_t adv_1G_H = SUPPORTED_1000baseT_Half;
+			uint32_t clear_adv = 0;
+
+			if (phy_dev->advertising & (adv_10M_F | adv_10M_H)) {
+				clear_adv = adv_10M_H | adv_100M_F | adv_100M_H | adv_1G_F | adv_1G_H;
+			} else if (phy_dev->advertising & (adv_100M_F | adv_100M_H)) {
+				clear_adv = (adv_1G_H | adv_1G_F);
+			}
+
+			if (clear_adv) {
+				arapc->pm_adv_mode = 1;
+				mutex_lock(&phy_dev->lock);
+				phy_dev->advertising &= ~clear_adv;
+				phy_dev->state = PHY_QTNPM;
+				phy_dev->link = 0;
+				mutex_unlock(&phy_dev->lock);
+			}
+
+			printk(KERN_INFO "%s: emac slowed down\n", DRV_NAME);
+		}
+	} else {
+		emac_pm_return_from_halt(phy_dev);
+
+		if (arapc->pm_adv_mode) {
+			arapc->pm_adv_mode = 0;
+			mutex_lock(&phy_dev->lock);
+			phy_dev->advertising = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
+				SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full | \
+				SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
+			phy_dev->state = PHY_QTNPM;
+			phy_dev->link = 0;
+			mutex_unlock(&phy_dev->lock);
+
+			printk(KERN_INFO "%s: emac resumed from slow down\n", DRV_NAME);
+		}
+	}
+}
+
+static int emac_lib_pm_emac_notify(struct notifier_block *b, unsigned long level, void *v)
+{
+	struct emac_common *arapc = container_of(b, struct emac_common, pm_notifier);
+
+	emac_pm_level(arapc, level);
+
+	return NOTIFY_OK;
+}
+
+void emac_lib_pm_emac_add_notifier(struct net_device *dev)
+{
+	struct emac_common *arapc = netdev_priv(dev);
+
+	arapc->pm_notifier.notifier_call = emac_lib_pm_emac_notify;
+	pm_qos_add_notifier(PM_QOS_POWER_EMAC, &arapc->pm_notifier);
+}
+EXPORT_SYMBOL(emac_lib_pm_emac_add_notifier);
+
+void emac_lib_pm_emac_remove_notifier(struct net_device *dev)
+{
+	struct emac_common *arapc = netdev_priv(dev);
+
+	pm_qos_remove_notifier(PM_QOS_POWER_EMAC, &arapc->pm_notifier);
+}
+EXPORT_SYMBOL(emac_lib_pm_emac_remove_notifier);
+
+static int emac_pm_save_notify(struct notifier_block *b, unsigned long level, void *v)
+{
+	emac_pm_power_save_level = level;
+	pm_qos_refresh_notifiers(PM_QOS_POWER_EMAC);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block pm_save_notifier = {
+	.notifier_call = emac_pm_save_notify,
+};
+
+void emac_lib_pm_save_add_notifier(void)
+{
+	pm_qos_add_notifier(PM_QOS_POWER_SAVE, &pm_save_notifier);
+}
+EXPORT_SYMBOL(emac_lib_pm_save_add_notifier);
+
+void emac_lib_pm_save_remove_notifier(void)
+{
+	pm_qos_remove_notifier(PM_QOS_POWER_SAVE, &pm_save_notifier);
+}
+EXPORT_SYMBOL(emac_lib_pm_save_remove_notifier);
+
+void emac_lib_update_link_vars(const uint32_t dual_link)
+{
+	emac_lib_dual_emac = dual_link;
+	pm_qos_refresh_notifiers(PM_QOS_POWER_EMAC);
+}
+EXPORT_SYMBOL(emac_lib_update_link_vars);
+
+static struct phy_device *phy_power_get_phy(struct net_device *dev)
+{
+	struct phy_device *phy_dev = NULL;
+	struct emac_common *arapc;
+
+	if (!dev) {
+		return NULL;
+	}
+
+	if (!netif_running(dev)) {
+		return NULL;
+	}
+
+	arapc = netdev_priv(dev);
+
+	if (arapc) {
+		phy_dev = arapc->phy_dev;
+	}
+
+	return phy_dev;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+
+static int phy_power_write_proc(struct file *file, const char __user *buffer, unsigned long count, void *data)
+{
+	char cmd[PHY_PW_CMD_MAX_LEN];
+	int ret = 0;
+	struct phy_device *phy_dev;
+	struct net_device *dev = data;
+
+	phy_dev = phy_power_get_phy(dev);
+
+	if (!phy_dev) return -EINVAL;
+
+	if (!count) {
+		return -EINVAL;
+	} else if (count > (PHY_PW_CMD_MAX_LEN - 1)) {
+		return -EINVAL;
+	} else if (copy_from_user(cmd, buffer, count)) {
+		return -EINVAL;
+	}
+
+	cmd[count - 1] = '\0';
+
+	if (strcmp(cmd, "1") == 0) {
+		emac_pm_enter_to_halt(phy_dev);
+		printk(KERN_INFO "%s: emac halted\n", DRV_NAME);
+	} else if (strcmp(cmd, "0") == 0) {
+		emac_pm_return_from_halt(phy_dev);
+		printk(KERN_INFO "%s: emac resumed\n", DRV_NAME);
+	} else {
+		ret = -EINVAL;
+	}
+
+	return ret ? ret : count;
+}
+
+static int phy_power_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+	struct net_device *dev = data;
+	struct phy_device *phy_dev;
+	int status;
+
+	phy_dev = phy_power_get_phy(dev);
+
+	if (!phy_dev) return -EINVAL;
+
+	if (phy_dev->state == PHY_HALTED) {
+		status = 1;
+	} else {
+		status = 0;
+	}
+
+	return sprintf(page, "%d\n", status);
+}
+
+static void phy_power_proc_name(char *buf, struct emac_common *arapc)
+{
+	sprintf(buf, "%s%d", PHY_PW_PROC_FILE_NAME, arapc->mac_id);
+}
+
+int emac_lib_phy_power_create_proc(struct net_device *dev)
+{
+	struct emac_common *arapc = netdev_priv(dev);
+	char proc_name[12];
+
+	phy_power_proc_name(proc_name, arapc);
+	struct proc_dir_entry *entry = create_proc_entry(proc_name, 0600, NULL);
+	if (!entry) {
+		return -ENOMEM;
+	}
+
+	entry->write_proc = phy_power_write_proc;
+	entry->read_proc = phy_power_read_proc;
+	entry->data = dev;
+
+	return 0;
+}
+EXPORT_SYMBOL(emac_lib_phy_power_create_proc);
+
+void emac_lib_phy_power_remove_proc(struct net_device *dev)
+{
+	struct emac_common *arapc = netdev_priv(dev);
+	char proc_name[12];
+
+	phy_power_proc_name(proc_name, arapc);
+	remove_proc_entry(proc_name, NULL);
+}
+EXPORT_SYMBOL(emac_lib_phy_power_remove_proc);
+
+static int phy_reg_rw_proc(struct file *file, const char __user *buffer, unsigned long count, void *data)
+{
+	char cmd[PHY_PW_CMD_MAX_LEN];
+	int ret = 0;
+	struct phy_device *phy_dev;
+	struct net_device *dev = (struct net_device *)data;
+	int phyreg, val;
+	char mode;
+
+	phy_dev = phy_power_get_phy(dev);
+
+	if (!phy_dev)
+		return -EINVAL;
+
+	if (!count)
+		return -EINVAL;
+	else if (count > (PHY_PW_CMD_MAX_LEN - 1))
+		return -EINVAL;
+	else if (copy_from_user(cmd, buffer, count))
+		return -EINVAL;
+
+	cmd[count] = '\0';
+
+	sscanf(cmd, "%c %x %x", &mode, &phyreg, &val);
+
+	if (mode == 'r') {
+		val =  phy_read(phy_dev, phyreg);
+		printk(KERN_ERR"0x%04x\n", val);
+	} else if (mode == 'w') {
+		ret = phy_write(phy_dev, phyreg, val);
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+		}
+	} else {
+		printk(KERN_ERR"usage: echo [r|w] reg [val] > /proc/%s\n", PHY_REG_PROC_FILE_NAME);
+	}
+
+	return ret ? ret : count;
+}
+
+static void phy_reg_proc_name(char *buf, struct emac_common *arapc)
+{
+	sprintf(buf, "%s%d", PHY_REG_PROC_FILE_NAME, arapc->mac_id);
+}
+
+int emac_lib_phy_reg_create_proc(struct net_device *dev)
+{
+	struct emac_common *arapc = netdev_priv(dev);
+	char proc_name[12] = {0};
+
+	phy_reg_proc_name(proc_name, arapc);
+
+	struct proc_dir_entry *entry = create_proc_entry(proc_name, 0600, NULL);
+	if (!entry) {
+		return -ENOMEM;
+	}
+
+	entry->write_proc = phy_reg_rw_proc;
+	entry->data = dev;
+
+	return 0;
+}
+EXPORT_SYMBOL(emac_lib_phy_reg_create_proc);
+#else
+static int phy_power_write_proc(struct file *file, const char __user *buffer, size_t count, loff_t *loff)
+{
+	char cmd[PHY_PW_CMD_MAX_LEN];
+	int ret = 0;
+	struct phy_device *phy_dev;
+	struct net_device *dev = (struct net_device *)PDE_DATA(file_inode(file));
+
+	phy_dev = phy_power_get_phy(dev);
+
+	if (!phy_dev) return -EINVAL;
+
+	if (!count) {
+		return -EINVAL;
+	} else if (count > (PHY_PW_CMD_MAX_LEN - 1)) {
+		return -EINVAL;
+	} else if (copy_from_user(cmd, buffer, count)) {
+		return -EINVAL;
+	}
+
+	cmd[count - 1] = '\0';
+
+	if (strcmp(cmd, "1") == 0) {
+		emac_pm_enter_to_halt(phy_dev);
+		printk(KERN_INFO "%s: emac halted\n", DRV_NAME);
+	} else if (strcmp(cmd, "0") == 0) {
+		emac_pm_return_from_halt(phy_dev);
+		printk(KERN_INFO "%s: emac resumed\n", DRV_NAME);
+	} else {
+		ret = -EINVAL;
+	}
+
+	return ret ? ret : count;
+}
+
+static int phy_power_read_proc(struct file *file, char __user *buffer, size_t count, loff_t *loff)
+{
+	struct net_device *dev = (struct net_device *)PDE_DATA(file_inode(file));
+	struct phy_device *phy_dev;
+	int status;
+	char buf_status[2];
+
+	phy_dev = phy_power_get_phy(dev);
+
+	if (!phy_dev) return -EINVAL;
+
+	if (phy_dev->state == PHY_HALTED) {
+		status = 1;
+	} else {
+		status = 0;
+	}
+
+	sprintf(buf_status, "%d\n", status);
+
+	copy_to_user(buffer, buf_status, sizeof(buf_status));
+
+	return sizeof(buf_status);
+}
+
+struct file_operations phy_power_ops = {
+	.owner	= THIS_MODULE,
+	.read	= phy_power_read_proc,
+	.write	= phy_power_write_proc,
+};
+
+static void phy_power_proc_name(char *buf, struct emac_common *arapc)
+{
+	sprintf(buf, "%s%d", PHY_PW_PROC_FILE_NAME, arapc->mac_id);
+}
+
+int emac_lib_phy_power_create_proc(struct net_device *dev)
+{
+	struct proc_dir_entry *entry;
+	struct emac_common *arapc = netdev_priv(dev);
+	char proc_name[12];
+
+	phy_power_proc_name(proc_name, arapc);
+	entry = proc_create_data(proc_name, 0600, NULL, &phy_power_ops, dev);
+	if (!entry) {
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(emac_lib_phy_power_create_proc);
+
+void emac_lib_phy_power_remove_proc(struct net_device *dev)
+{
+	struct emac_common *arapc = netdev_priv(dev);
+	char proc_name[12];
+
+	phy_power_proc_name(proc_name, arapc);
+	remove_proc_entry(proc_name, NULL);
+}
+EXPORT_SYMBOL(emac_lib_phy_power_remove_proc);
+
+static int phy_reg_rw_proc(struct file *file, const char __user *buffer, size_t count, loff_t *loff)
+{
+	char cmd[PHY_PW_CMD_MAX_LEN];
+	int ret = 0;
+	struct phy_device *phy_dev;
+	struct net_device *dev = (struct net_device *)PDE_DATA(file_inode(file));
+	int phyreg, val;
+	char mode;
+
+	phy_dev = phy_power_get_phy(dev);
+
+	if (!phy_dev)
+		return -EINVAL;
+
+	if (!count)
+		return -EINVAL;
+	else if (count > (PHY_PW_CMD_MAX_LEN - 1))
+		return -EINVAL;
+	else if (copy_from_user(cmd, buffer, count))
+		return -EINVAL;
+
+	cmd[count] = '\0';
+
+	sscanf(cmd, "%c %x %x", &mode, &phyreg, &val);
+
+	if (mode == 'r') {
+		val =  phy_read(phy_dev, phyreg);
+		printk(KERN_ERR"0x%04x\n", val);
+	} else if (mode == 'w') {
+		ret = phy_write(phy_dev, phyreg, val);
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+		}
+	} else {
+		printk(KERN_ERR"usage: echo [r|w] reg [val] > /proc/%s\n", PHY_REG_PROC_FILE_NAME);
+	}
+
+	return ret ? ret : count;
+}
+
+struct file_operations phy_reg_ops = {
+	.owner	= THIS_MODULE,
+	.write	= phy_reg_rw_proc,
+};
+
+static void phy_reg_proc_name(char *buf, struct emac_common *arapc)
+{
+	sprintf(buf, "%s%d", PHY_REG_PROC_FILE_NAME, arapc->mac_id);
+}
+
+int emac_lib_phy_reg_create_proc(struct net_device *dev)
+{
+	struct proc_dir_entry *entry;
+	struct emac_common *arapc = netdev_priv(dev);
+	char proc_name[12] = {0};
+
+	phy_reg_proc_name(proc_name, arapc);
+
+	entry = proc_create_data(proc_name, 0600, NULL, &phy_reg_ops, dev);
+	if (!entry) {
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(emac_lib_phy_reg_create_proc);
+#endif
+
+void emac_lib_phy_reg_remove_proc(struct net_device *dev)
+{
+	struct emac_common *arapc = netdev_priv(dev);
+	char proc_name[12];
+
+	phy_reg_proc_name(proc_name, arapc);
+	remove_proc_entry(proc_name, NULL);
+}
+EXPORT_SYMBOL(emac_lib_phy_reg_remove_proc);
+
+/* The max time (in ms) to wait for statistics counters to return a value */
+static const int max_stat_loop_count = 20;
+
+static uint32_t emac_lib_rxstatistics_counter(struct net_device *dev, int counter)
+{
+	uint32_t val;
+	struct emac_common *arapc = netdev_priv(dev);
+
+	if (!arapc || counter < 0 || counter > RxLastStatCounter) {
+		return 0;
+	}
+
+	if (!(arapc->emac_cfg & EMAC_PHY_NOT_IN_USE) && arapc->phy_dev &&
+			(arapc->phy_dev->state == PHY_HALTED)) {
+		return 0;
+	}
+
+	if (!emac_wait(arapc, EMAC_MAC_RXSTAT_CTRL, RxStatReadBusy, 0, 10 * max_stat_loop_count, __FUNCTION__)) {
+		return 0;
+	}
+
+	emac_wr(arapc, EMAC_MAC_RXSTAT_CTRL, RxStatReadBusy | counter);
+
+	if (!emac_wait(arapc, EMAC_MAC_RXSTAT_CTRL, RxStatReadBusy, 0, max_stat_loop_count, __FUNCTION__)) {
+		return 0;
+	}
+
+	val = emac_rd(arapc, EMAC_MAC_RXSTAT_DATA_HIGH) << 16;
+	val |= (emac_rd(arapc, EMAC_MAC_RXSTAT_DATA_LOW) & 0xffff);
+
+	return val;
+}
+
+static uint32_t emac_lib_txstatistics_counter(struct net_device *dev, int counter)
+{
+	uint32_t val;
+	struct emac_common *arapc = netdev_priv(dev);
+
+	if (!arapc || counter < 0 || counter > TxLastStatCounter) {
+		return 0;
+	}
+
+	if (!(arapc->emac_cfg & EMAC_PHY_NOT_IN_USE) && arapc->phy_dev &&
+			(arapc->phy_dev->state == PHY_HALTED)) {
+		return 0;
+	}
+
+	if (!emac_wait(arapc, EMAC_MAC_TXSTAT_CTRL, TxStatReadBusy, 0, 10 * max_stat_loop_count, __FUNCTION__)) {
+		return 0;
+	}
+
+	emac_wr(arapc, EMAC_MAC_TXSTAT_CTRL, TxStatReadBusy | counter);
+
+	if (!emac_wait(arapc, EMAC_MAC_TXSTAT_CTRL, TxStatReadBusy, 0, max_stat_loop_count, __FUNCTION__)) {
+		return 0;
+	}
+
+	val = emac_rd(arapc, EMAC_MAC_TXSTAT_DATA_HIGH) << 16;
+	val |= (emac_rd(arapc, EMAC_MAC_TXSTAT_DATA_LOW) & 0xffff);
+
+	return val;
+}
+
+static void emac_lib_stat_read_hw_counters(struct net_device *dev)
+{
+	struct emac_common *privc = netdev_priv(dev);
+	struct emac_stats *stats = &privc->stats[privc->current_stats];
+	int i;
+
+	/*
+	 * If privc->emac_cfg has flag EMAC_PHY_NOT_IN_USE/EMAC_PHY_AR8236/EMAC_PHY_AR8327/EMAC_PHY_MV88E6071 set,
+	 * the privc->phy_dev will be NULL after initialization, it does not mean that there is no phy device,
+	 * still can read Tx/Rx statistics from HW
+	 */
+	if (privc->phy_dev && privc->phy_dev->state != PHY_RUNNING)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(stats->tx); i++)
+		stats->tx[i] = emac_lib_txstatistics_counter(dev, i);
+	for (i = 0; i < ARRAY_SIZE(stats->rx); i++)
+		stats->rx[i] = emac_lib_rxstatistics_counter(dev, i);
+}
+
+static void emac_lib_stat_read_hw_dma(struct net_device *dev)
+{
+	struct emac_common *privc = netdev_priv(dev);
+	struct emac_stats *stats = &privc->stats[privc->current_stats];
+
+	stats->dma[DmaMissedFrame] += emac_rd(privc, EMAC_DMA_MISSED_FRAMES) & 0x7fffffff;
+	stats->dma[DmaStopFlush] += emac_rd(privc, EMAC_DMA_STOP_FLUSHES) & 0x7fffffff;
+}
+
+static void emac_lib_stat_read_hw(struct net_device *dev)
+{
+	emac_lib_stat_read_hw_counters(dev);
+	emac_lib_stat_read_hw_dma(dev);
+}
+
+static int emac_lib_stats_switch(struct net_device *dev)
+{
+	struct emac_common *privc = netdev_priv(dev);
+	struct emac_stats *becoming_old_stats = &privc->stats[privc->current_stats];
+	struct emac_stats *becoming_new_stats = &privc->stats[!privc->current_stats];
+	int i;
+
+	emac_lib_stat_read_hw(dev);
+	memset(&dev->stats, 0, sizeof(dev->stats));
+	memset(becoming_new_stats, 0, sizeof(*becoming_new_stats));
+
+	/* DMA counters not cumulative, so copy them */
+	for (i = 0; i <= DmaLastStatCounter; i++)
+		becoming_new_stats->dma[i] = becoming_old_stats->dma[i];
+
+	/* Flip stats structures */
+	privc->current_stats = !privc->current_stats;
+
+	return 0;
+}
+
+static uint32_t emac_lib_stat_rx(struct emac_common *privc, enum ArasanRxStatisticsCounters stat)
+{
+	return privc->stats[privc->current_stats].rx[stat] -
+		privc->stats[!privc->current_stats].rx[stat];
+}
+
+static uint32_t emac_lib_stat_tx(struct emac_common *privc, enum ArasanTxStatisticsCounters stat)
+{
+	return privc->stats[privc->current_stats].tx[stat] -
+		privc->stats[!privc->current_stats].tx[stat];
+}
+
+static uint32_t emac_lib_stat_dma(struct emac_common *privc, enum emac_dma_counter stat)
+{
+	return privc->stats[privc->current_stats].dma[stat] -
+		privc->stats[!privc->current_stats].dma[stat];
+}
+
+struct net_device_stats *emac_lib_stats(struct net_device *dev)
+{
+	struct emac_common *privc = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+
+	if (!netif_device_present(dev)) {
+		return 0;
+	}
+
+	emac_lib_stat_read_hw(dev);
+
+	stats->rx_packets = emac_lib_stat_rx(privc, FramesRxTotal);
+	stats->tx_packets = emac_lib_stat_tx(privc, FramesSentTotal);
+	stats->rx_bytes = emac_lib_stat_rx(privc, OctetsRxTotal);
+	stats->tx_bytes = emac_lib_stat_tx(privc, OctetsSentOK);
+	stats->rx_errors = emac_lib_stat_rx(privc, FramesRxErrTotal);
+	stats->tx_errors = emac_lib_stat_tx(privc, FramesSentError);
+	stats->multicast = emac_lib_stat_rx(privc, FramesRxMulticast);
+	stats->collisions = emac_lib_stat_tx(privc, FramesSentSingleCol) +
+		emac_lib_stat_tx(privc, FramesSentMultipleCol) +
+		emac_lib_stat_tx(privc, FramesSentLateCol) +
+		emac_lib_stat_tx(privc, FramesSentExcessiveCol);
+
+	stats->rx_length_errors = emac_lib_stat_rx(privc, FramesRxLenErr);
+	stats->rx_crc_errors = emac_lib_stat_rx(privc, FramesRxCrcErr);
+	stats->rx_frame_errors = emac_lib_stat_rx(privc, FramesRxAlignErr);
+	stats->rx_fifo_errors = emac_lib_stat_rx(privc, FramesRxDroppedBufFull) +
+		emac_lib_stat_rx(privc, FramesRxTruncatedBufFull);
+	stats->rx_missed_errors =  emac_lib_stat_dma(privc, DmaMissedFrame);
+
+	stats->rx_unicast_packets = emac_lib_stat_rx(privc, FramesRxUnicast);
+	stats->tx_unicast_packets = emac_lib_stat_tx(privc, FramesSentUnicast);
+	stats->tx_multicast_packets = emac_lib_stat_tx(privc, FramesSentMulticast);
+	stats->rx_broadcast_packets = emac_lib_stat_rx(privc, FramesRxBroadcast);
+	stats->tx_broadcast_packets = emac_lib_stat_tx(privc, FramesSentBroadcast);
+
+	return stats;
+}
+EXPORT_SYMBOL(emac_lib_stats);
+
+uint32_t qtn_eth_rx_lost_get(struct net_device *dev)
+{
+	struct emac_common *privc = netdev_priv(dev);
+	struct emac_stats *stats = &privc->stats[privc->current_stats];
+
+	emac_lib_stat_read_hw_dma(dev);
+
+	return stats->dma[DmaMissedFrame];
+}
+EXPORT_SYMBOL(qtn_eth_rx_lost_get);
+
+static const char * const tx_stat_names[] = {
+	"OK", "Total", "OK", "Err", "SingleClsn", "MultipleClsn",
+	"LateClsn", "ExcessiveClsn", "Unicast", "Multicast",
+	"Broadcast", "Pause",
+};
+
+static const char * tx_stat_name_prefix(enum ArasanTxStatisticsCounters stat)
+{
+	if (stat == OctetsSentOK)
+		return "Octets";
+	return "Frames";
+}
+
+static const char * const rx_stat_names[] = {
+	"OK", "Total", "CrcErr", "AlignErr", "TotalErr", "OK",
+	"Total", "Unicast", "Multicast", "Broadcast", "Pause",
+	"LenErr", "Undersized", "Oversized", "Frags", "Jabber",
+	"Len64", "Len65-127", "Len128-255", "Len256-511", "Len512-1023",
+	"Len1024-1518", "LenOver1518", "DroppedBufFull", "TruncBufFull",
+};
+
+static const char *rx_stat_name_prefix(enum ArasanRxStatisticsCounters stat)
+{
+	if (stat == OctetsRxOK || stat == OctetsRxTotal)
+		return "Octets";
+	return "Frames";
+}
+
+static const char * const dma_stat_names[] = {
+	"DmaMissedFrame", "DmaStopFlush",
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+int emac_lib_stats_sprintf(char *buf, struct net_device *dev)
+{
+	struct emac_common *privc = netdev_priv(dev);
+	struct emac_stats *stats = &privc->stats[privc->current_stats];
+	char *p = buf;
+	int i;
+
+	emac_lib_stat_read_hw(dev);
+
+	for (i = 0; i < ARRAY_SIZE(stats->tx); i++)
+		p += sprintf(p, "%2s#%02d %6s%-14s : %10d\n",
+				"Tx", i, tx_stat_name_prefix(i), tx_stat_names[i], stats->tx[i]);
+	for (i = 0; i < ARRAY_SIZE(stats->rx); i++)
+		p += sprintf(p, "%2s#%02d %6s%-14s : %10d\n",
+				"Rx", i, rx_stat_name_prefix(i), rx_stat_names[i], stats->rx[i]);
+	for (i = 0; i < ARRAY_SIZE(stats->dma); i++)
+		p += sprintf(p, "%-14s             : %10d\n",
+				dma_stat_names[i], stats->dma[i]);
+
+	return p - buf;
+}
+#else
+int emac_lib_stats_sprintf(struct seq_file *sfile, struct net_device *dev)
+{
+	struct emac_common *privc = netdev_priv(dev);
+	struct emac_stats *stats = &privc->stats[privc->current_stats];
+	int i;
+
+	emac_lib_stat_read_hw(dev);
+
+	for (i = 0; i < ARRAY_SIZE(stats->tx); i++)
+		seq_printf(sfile, "%2s#%02d %6s%-14s : %10d\n",
+				"Tx", i, tx_stat_name_prefix(i), tx_stat_names[i], stats->tx[i]);
+	for (i = 0; i < ARRAY_SIZE(stats->rx); i++)
+		seq_printf(sfile, "%2s#%02d %6s%-14s : %10d\n",
+				"Rx", i, rx_stat_name_prefix(i), rx_stat_names[i], stats->rx[i]);
+	for (i = 0; i < ARRAY_SIZE(stats->dma); i++)
+		seq_printf(sfile, "%-14s             : %10d\n",
+				dma_stat_names[i], stats->dma[i]);
+
+	return 0;
+}
+#endif
+EXPORT_SYMBOL(emac_lib_stats_sprintf);
+
+int emac_lib_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct emac_common *arapc = netdev_priv(dev);
+
+	if (!netif_running(dev)) {
+		return -EINVAL;
+	}
+
+	if (!arapc->phy_dev) {
+		/* PHY not controllable */
+		return -EINVAL;
+	}
+
+	switch(cmd) {
+	case SIOCRDEVSTATS:
+		return emac_lib_stats_switch(dev);
+
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		/* Accept these */
+		break;
+	default:
+		/* Reject the rest */
+		return -EOPNOTSUPP;
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	return phy_mii_ioctl(arapc->phy_dev, if_mii(rq), cmd);
+#else
+	return phy_mii_ioctl(arapc->phy_dev, rq, cmd);
+#endif
+}
+EXPORT_SYMBOL(emac_lib_ioctl);
+
+#ifdef CONFIG_QVSP
+struct qvsp_wrapper emac_qvsp;
+EXPORT_SYMBOL(emac_qvsp);
+
+void qvsp_wrapper_init(struct qvsp_ext_s *qvsp, QVSP_CHECK_FUNC_PROTOTYPE(*check_func))
+{
+	emac_qvsp.qvsp = qvsp;
+	emac_qvsp.qvsp_check_func = check_func;
+}
+EXPORT_SYMBOL(qvsp_wrapper_init);
+
+void qvsp_wrapper_exit(void)
+{
+	emac_qvsp.qvsp_check_func = NULL;
+	emac_qvsp.qvsp = NULL;
+}
+EXPORT_SYMBOL(qvsp_wrapper_exit);
+#endif	// CONFIG_QVSP
+
+#if EMAC_REG_DEBUG
+struct emac_reg_debug_s {
+	const char *name;
+	uint16_t offset;
+	uint16_t count;
+};
+
+#define EMACR(x)	{ #x, (x), 1 }
+#define EMACRR(x, r)	{ #x, (x), r }
+const static struct emac_reg_debug_s emac_reg_debug_list[] = {
+	EMACR(EMAC_DMA_CONFIG),
+	EMACR(EMAC_DMA_CTRL),
+	EMACR(EMAC_DMA_STATUS_IRQ),
+	EMACR(EMAC_DMA_INT_ENABLE),
+	EMACR(EMAC_DMA_TX_AUTO_POLL),
+	EMACR(EMAC_DMA_TX_POLL_DEMAND),
+	EMACR(EMAC_DMA_RX_POLL_DEMAND),
+	EMACR(EMAC_DMA_TX_BASE_ADDR),
+	EMACR(EMAC_DMA_RX_BASE_ADDR),
+	EMACR(EMAC_DMA_MISSED_FRAMES),
+	EMACR(EMAC_DMA_STOP_FLUSHES),
+	EMACR(EMAC_DMA_RX_IRQ_MITIGATION),
+	EMACR(EMAC_DMA_CUR_TXDESC_PTR),
+	EMACR(EMAC_DMA_CUR_TXBUF_PTR),
+	EMACR(EMAC_DMA_CUR_RXDESC_PTR),
+	EMACR(EMAC_DMA_CUR_RXBUF_PTR),
+	EMACR(EMAC_MAC_GLOBAL_CTRL),
+	EMACR(EMAC_MAC_TX_CTRL),
+	EMACR(EMAC_MAC_RX_CTRL),
+	EMACR(EMAC_MAC_MAX_FRAME_SIZE),
+	EMACR(EMAC_MAC_TX_JABBER_SIZE),
+	EMACR(EMAC_MAC_RX_JABBER_SIZE),
+	EMACR(EMAC_MAC_ADDR_CTRL),
+	EMACR(EMAC_MAC_ADDR1_HIGH),
+	EMACR(EMAC_MAC_ADDR1_MED),
+	EMACR(EMAC_MAC_ADDR1_LOW),
+	EMACR(EMAC_MAC_ADDR2_HIGH),
+	EMACR(EMAC_MAC_ADDR2_MED),
+	EMACR(EMAC_MAC_ADDR2_LOW),
+	EMACR(EMAC_MAC_ADDR3_HIGH),
+	EMACR(EMAC_MAC_ADDR3_MED),
+	EMACR(EMAC_MAC_ADDR3_LOW),
+	EMACR(EMAC_MAC_ADDR4_HIGH),
+	EMACR(EMAC_MAC_ADDR4_MED),
+	EMACR(EMAC_MAC_ADDR4_LOW),
+	EMACR(EMAC_MAC_TABLE1),
+	EMACR(EMAC_MAC_TABLE2),
+	EMACR(EMAC_MAC_TABLE3),
+	EMACR(EMAC_MAC_TABLE4),
+	EMACR(EMAC_MAC_FLOW_CTRL),
+	EMACR(EMAC_MAC_FLOW_PAUSE_GENERATE),
+	EMACR(EMAC_MAC_FLOW_SA_HIGH),
+	EMACR(EMAC_MAC_FLOW_SA_MED),
+	EMACR(EMAC_MAC_FLOW_SA_LOW),
+	EMACR(EMAC_MAC_FLOW_DA_HIGH),
+	EMACR(EMAC_MAC_FLOW_DA_MED),
+	EMACR(EMAC_MAC_FLOW_DA_LOW),
+	EMACR(EMAC_MAC_FLOW_PAUSE_TIMEVAL),
+	EMACR(EMAC_MAC_MDIO_CTRL),
+	EMACR(EMAC_MAC_MDIO_DATA),
+	EMACR(EMAC_MAC_RXSTAT_CTRL),
+	EMACR(EMAC_MAC_RXSTAT_DATA_HIGH),
+	EMACR(EMAC_MAC_RXSTAT_DATA_LOW),
+	EMACR(EMAC_MAC_TXSTAT_CTRL),
+	EMACR(EMAC_MAC_TXSTAT_DATA_HIGH),
+	EMACR(EMAC_MAC_TXSTAT_DATA_LOW),
+	EMACR(EMAC_MAC_TX_ALMOST_FULL),
+	EMACR(EMAC_MAC_TX_START_THRESHOLD),
+	EMACR(EMAC_MAC_RX_START_THRESHOLD),
+	EMACR(EMAC_MAC_INT),
+	EMACR(EMAC_MAC_INT_ENABLE),
+	EMACR(TOPAZ_EMAC_WRAP_CTRL),
+	EMACR(TOPAZ_EMAC_RXP_CTRL),
+	EMACR(TOPAZ_EMAC_TXP_CTRL),
+	EMACR(TOPAZ_EMAC_TXP_Q_FULL),
+	EMACR(TOPAZ_EMAC_TXP_DESC_PTR),
+	EMACR(TOPAZ_EMAC_BUFFER_POOLS),
+	EMACR(TOPAZ_EMAC_TXP_STATUS),
+	EMACR(TOPAZ_EMAC_DESC_LIMIT),
+	EMACR(TOPAZ_EMAC_RXP_PRIO_CTRL),
+	EMACR(TOPAZ_EMAC_RXP_OUTPORT_CTRL),
+	EMACR(TOPAZ_EMAC_RXP_OUTNODE_CTRL),
+	EMACR(TOPAZ_EMAC_RXP_VLAN_PRI_TO_TID),
+	EMACR(TOPAZ_EMAC_RXP_VLAN_PRI_CTRL),
+	EMACR(TOPAZ_EMAC_RXP_VLAN_TAG_0_1),
+	EMACR(TOPAZ_EMAC_RXP_VLAN_TAG_2_3),
+	EMACR(TOPAZ_EMAC_RXP_IP_CTRL),
+	EMACR(TOPAZ_EMAC_RXP_DPI_CTRL),
+	EMACR(TOPAZ_EMAC_RXP_STATUS),
+	EMACR(TOPAZ_EMAC_RXP_CST_SEL),
+	EMACR(TOPAZ_EMAC_RXP_FRAME_CNT_CLEAR),
+	EMACR(TOPAZ_EMAC_FRM_COUNT_ERRORS),
+	EMACR(TOPAZ_EMAC_FRM_COUNT_TOTAL),
+	EMACR(TOPAZ_EMAC_FRM_COUNT_DA_MATCH),
+	EMACR(TOPAZ_EMAC_FRM_COUNT_SA_MATCH),
+	EMACRR(TOPAZ_EMAC_RXP_IP_DIFF_SRV_TID_REG(0), 8),
+	EMACR(TOPAZ_EMAC_RXP_IP_CTRL),
+	EMACRR(TOPAZ_EMAC_RXP_DPI_TID_MAP_REG(0), 8),
+	EMACRR(TOPAZ_EMAC_RX_DPI_FIELD_VAL(0), TOPAZ_EMAC_NUM_DPI_FIELDS),
+	EMACRR(TOPAZ_EMAC_RX_DPI_FIELD_MASK(0), TOPAZ_EMAC_NUM_DPI_FIELDS),
+	EMACRR(TOPAZ_EMAC_RX_DPI_FIELD_CTRL(0), TOPAZ_EMAC_NUM_DPI_FIELDS),
+	EMACRR(TOPAZ_EMAC_RX_DPI_FIELD_GROUP(0), TOPAZ_EMAC_NUM_DPI_FILTERS),
+	EMACRR(TOPAZ_EMAC_RX_DPI_OUT_CTRL(0), TOPAZ_EMAC_NUM_DPI_FILTERS),
+	EMACRR(TOPAZ_EMAC_RX_DPI_IPT_GROUP(0), TOPAZ_EMAC_NUM_DPI_FILTERS),
+};
+
+void emac_lib_reg_debug(u32 base) {
+	int i;
+	int j;
+	for (i = 0; i < ARRAY_SIZE(emac_reg_debug_list); i++) {
+		u32 regcount = emac_reg_debug_list[i].count;
+		for (j = 0; j < regcount; j++) {
+			u32 reg = base + emac_reg_debug_list[i].offset + 4 * j;
+			u32 val = readl(reg);
+			printk("%s: 0x%08x = 0x%08x %s[%d]\n",
+					__FUNCTION__, reg, val, emac_reg_debug_list[i].name, j);
+		}
+	}
+}
+EXPORT_SYMBOL(emac_lib_reg_debug);
+#endif	// EMAC_REG_DEBUG
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/qtn/ruby/emac_lib.h b/drivers/qtn/ruby/emac_lib.h
new file mode 100644
index 0000000..9296350
--- /dev/null
+++ b/drivers/qtn/ruby/emac_lib.h
@@ -0,0 +1,192 @@
+/*
+ *  Copyright (c) Quantenna Communications, Inc. 2012
+ *  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef __EMAC_LIB_H
+#define __EMAC_LIB_H
+
+#include <linux/netdevice.h>
+#include <common/ruby_arasan_emac_ahb.h>
+#include <common/topaz_platform.h>
+#include <qtn/dmautil.h>
+
+#ifdef CONFIG_QVSP
+#include <qtn/qdrv_sch.h>
+#include <qtn/qvsp.h>
+#endif
+
+#define EMAC_REG_DEBUG	1
+
+#define QTN_DSCP_MIN		0
+#define QTN_DSCP_MAX		63
+
+#ifndef __ASSEMBLY__
+
+#define MULTICAST_FILTER_LIMIT	64
+
+#define TIMEOUT_MAC_MDIO_CTRL	1000	/*ms*/
+
+/*
+ * State which is common to Topaz and Ruby EMAC usage
+ */
+
+typedef ALIGNED_DMA_DESC(struct, emac_desc) aligned_emac_descs;
+
+enum emac_dma_counter {
+	DmaMissedFrame = 0,
+	DmaStopFlush = 1,
+	DmaLastStatCounter = 1,
+};
+
+struct emac_stats {
+	uint32_t tx[TxLastStatCounter + 1];
+	uint32_t rx[RxLastStatCounter + 1];
+	uint32_t dma[DmaLastStatCounter + 1];
+};
+
+struct emac_common {
+	u32 vbase;
+	u32 mdio_vbase;
+	u32 emac_cfg;
+	int mac_id;
+	int phy_addr;
+	struct phy_device *phy_dev;
+	struct mii_bus *mii_bus;
+	int old_link;
+	struct net_device *dev;
+
+	aligned_emac_descs rx;
+	aligned_emac_descs tx;
+
+	struct notifier_block pm_notifier;
+	uint32_t pm_adv_mode;
+
+	int current_stats;
+	struct emac_stats stats[2];
+};
+
+/*
+ * Utility functions for reading/writing registers in the Ethernet MAC
+ */
+__always_inline static u32 emac_rd(struct emac_common *arapc, int reg)
+{
+	return readl(IO_ADDRESS(arapc->vbase + reg));
+}
+__always_inline static void emac_wr(struct emac_common *arapc, int reg, u32 val)
+{
+	writel(val, IO_ADDRESS(arapc->vbase + reg));
+	/* HW bug workaround - dummy access breaks up bus transactions. */
+	readl(RUBY_SYS_CTL_BASE_ADDR);
+}
+
+static inline bool emac_lib_rtl_switch(uint32_t cfg)
+{
+	return cfg & (EMAC_PHY_RTL8363SB_P0 | EMAC_PHY_RTL8363SB_P1 | EMAC_PHY_RTL8365MB | EMAC_PHY_RTL8367RB);
+}
+
+/*
+ * Utility functions for reading/writing registers in the MDIO
+ */
+inline static u32 mdio_rd(struct emac_common *arapc, int reg)
+{
+	return readl(IO_ADDRESS(arapc->mdio_vbase + reg));
+}
+inline static void mdio_wr(struct emac_common *arapc, int reg, u32 val)
+{
+	writel(val, IO_ADDRESS(arapc->mdio_vbase + reg));
+	/* HW bug workaround - dummy access breaks up bus transactions. */
+	readl(RUBY_SYS_CTL_BASE_ADDR);
+}
+
+inline static void emac_setbits(struct emac_common *arapc, int reg, u32 val)
+{
+	emac_wr(arapc, reg, emac_rd(arapc, reg) | val);
+}
+
+inline static void emac_clrbits(struct emac_common *arapc, int reg, u32 val)
+{
+	emac_wr(arapc, reg, emac_rd(arapc, reg) & ~val);
+}
+
+int emac_lib_poll_wait(struct emac_common *arapc, u32(*read_func)(struct emac_common*, int),
+		int reg, u32 mask, u32 val, unsigned long ms, const char *func);
+
+inline static int mdio_wait(struct emac_common *arapc, int reg, u32 mask,
+		u32 val, unsigned long ms, const char *func)
+{
+	return emac_lib_poll_wait(arapc, mdio_rd, reg, mask, val, ms, func);
+}
+
+inline static int emac_wait(struct emac_common *arapc, int reg, u32 mask,
+		u32 val, unsigned long ms, const char *func)
+{
+	return emac_lib_poll_wait(arapc, emac_rd, reg, mask, val, ms, func);
+}
+
+extern int g_dscp_flag;
+extern int g_dscp_value[];
+extern const struct ethtool_ops emac_lib_ethtool_ops;
+void emac_lib_enable(uint32_t);
+int emac_lib_mii_init(struct net_device *dev);
+void emac_lib_mii_exit(struct net_device *dev);
+int emac_lib_mdio_read(struct mii_bus *bus, int phy_addr, int reg);
+int emac_lib_mdio_write(struct mii_bus *bus, int phy_addr, int reg, uint16_t value);
+int emac_lib_mdio_sysfs_create(struct net_device *dev);
+void emac_lib_mdio_sysfs_remove(struct net_device *dev);
+int emac_lib_board_cfg(int port, int *emac_cfg, int *emac_phy);
+void emac_lib_descs_free(struct net_device *dev);
+int emac_lib_descs_alloc(struct net_device *dev,
+		u32 rxdescs, bool rxdescs_sram,
+		u32 txdescs, bool txdescs_sram);
+void emac_lib_init_mac(struct net_device *dev);
+void emac_lib_init_dma(struct emac_common *privc);
+void emac_lib_phy_start(struct net_device *dev);
+void emac_lib_phy_stop(struct net_device *dev);
+void emac_lib_set_rx_mode(struct net_device *dev);
+int emac_lib_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+struct net_device_stats *emac_lib_stats(struct net_device *dev);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+int emac_lib_stats_sprintf(char *buf, struct net_device *dev);
+#else
+int emac_lib_stats_sprintf(struct seq_file *sfile, struct net_device *dev);
+#endif
+void emac_lib_pm_save_add_notifier(void);
+void emac_lib_pm_save_remove_notifier(void);
+int emac_lib_phy_power_create_proc(struct net_device *dev);
+void emac_lib_phy_power_remove_proc(struct net_device *dev);
+int emac_lib_phy_reg_create_proc(struct net_device *dev);
+void emac_lib_phy_reg_remove_proc(struct net_device *dev);
+
+void emac_lib_update_link_vars(const uint32_t dual_link);
+void emac_lib_pm_emac_add_notifier(struct net_device *dev);
+void emac_lib_pm_emac_remove_notifier(struct net_device *dev);
+
+#if EMAC_REG_DEBUG
+void emac_lib_reg_debug(u32 base);
+#else
+#define emac_reg_debug(x)
+#endif
+
+#ifdef CONFIG_QVSP
+extern struct qvsp_wrapper emac_qvsp;
+#endif
+
+#endif	// __ASSEMBLY__
+#endif	// __EMAC_LIB_H
+
diff --git a/drivers/qtn/ruby/gpio.c b/drivers/qtn/ruby/gpio.c
new file mode 100644
index 0000000..82b41eb
--- /dev/null
+++ b/drivers/qtn/ruby/gpio.c
@@ -0,0 +1,309 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/gpio/driver.h>
+
+#include <asm/hardware.h>
+
+#include <asm/board/platform.h>
+#include <asm/board/gpio.h>
+#include <asm-generic/gpio.h>
+
+#define GPIO_MAX		ARCH_NR_GPIOS
+
+#define to_arc_gpio_chip(d) container_of(d, struct arc_7XX_gpio, chip)
+
+struct arc_gpio_controller {
+	uint32_t	in_dat;
+	uint32_t	out_msk;
+	uint32_t	out_dat;
+	uint32_t	mode1;
+	uint32_t	mode2;
+	uint32_t	afsel;
+	uint32_t	def;
+};
+
+struct arc_7XX_gpio {
+	struct gpio_chip		chip;
+	struct arc_gpio_controller	*__iomem regs;
+};
+
+
+static DEFINE_SPINLOCK(gpio_spinlock);
+static struct arc_7XX_gpio arc_gpio;
+
+inline static void gpio_set_mode(void *reg, uint32_t offset, uint32_t cfg)
+{
+	uint32_t tmp = readl(IO_ADDRESS(reg));
+	tmp &= ~(0x3 << offset);
+	tmp |= (cfg << offset);
+	writel(tmp, IO_ADDRESS(reg));
+}
+
+inline static void gpio_config_pin(struct arc_gpio_controller *g, uint32_t pin, uint32_t cfg)
+{
+	if(pin < RUBY_GPIO_MODE1_MAX) {
+		gpio_set_mode(&g->mode1, pin * 2, cfg);
+	} else {
+		gpio_set_mode(&g->mode2, (pin - RUBY_GPIO_MODE1_MAX) * 2, cfg);
+	}
+}
+
+static void _gpio_config(struct arc_gpio_controller *g, uint32_t pin, uint32_t cfg)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&gpio_spinlock, flags);
+	if(cfg >= RUBY_GPIO_ALT_INPUT) {
+		writel_or(RUBY_BIT(pin), IO_ADDRESS(&g->afsel));
+	} else {
+		writel_and(~RUBY_BIT(pin), IO_ADDRESS(&g->afsel));
+	}
+	gpio_config_pin(g, pin, cfg & 0x3);
+	spin_unlock_irqrestore(&gpio_spinlock, flags);
+}
+
+
+static void gpio_output(struct arc_gpio_controller *g, uint32_t pin, uint32_t state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&gpio_spinlock, flags);
+	writel(RUBY_BIT(pin), IO_ADDRESS(&g->out_msk));
+	writel(state << pin, IO_ADDRESS(&g->out_dat));
+	writel(0, IO_ADDRESS(&g->out_msk));
+	spin_unlock_irqrestore(&gpio_spinlock, flags);
+}
+
+
+static uint32_t gpio_input(struct arc_gpio_controller *g, uint32_t pin)
+{
+	/* As long function is simple 32-bit read, no locking is needed. */
+	return ((readl(IO_ADDRESS(&g->in_dat)) >> pin) & 0x1);
+}
+
+static int arc_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+{
+	struct arc_7XX_gpio *arc_chip = to_arc_gpio_chip(chip);
+	struct arc_gpio_controller *__iomem g = arc_chip->regs;
+
+	_gpio_config(g, offset, GPIO_MODE_INPUT);
+
+	return 0;
+}
+
+static int arc_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct arc_7XX_gpio *arc_chip = to_arc_gpio_chip(chip);
+	struct arc_gpio_controller *__iomem g = arc_chip->regs;
+	gpio_output(g, offset, value);
+	_gpio_config(g, offset, GPIO_MODE_OUTPUT);
+
+	return 0;
+}
+
+static int arc_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct arc_7XX_gpio *arc_chip = to_arc_gpio_chip(chip);
+	struct arc_gpio_controller *__iomem g = arc_chip->regs;
+
+	return (int)gpio_input(g, offset);
+}
+
+static void arc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct arc_7XX_gpio *arc_chip = to_arc_gpio_chip(chip);
+	struct arc_gpio_controller *__iomem g = arc_chip->regs;
+
+	gpio_output(g, offset, value);
+}
+
+static int __init arc_gpio_init(void)
+{
+	memset(&arc_gpio, 0, sizeof(arc_gpio));
+	arc_gpio.chip.label = "arcgpio";
+	arc_gpio.chip.direction_input = arc_gpio_dir_in;
+	arc_gpio.chip.direction_output = arc_gpio_dir_out;
+	arc_gpio.chip.set = arc_gpio_set;
+	arc_gpio.chip.get = arc_gpio_get;
+	arc_gpio.chip.base = 0;
+	arc_gpio.chip.ngpio = GPIO_MAX;
+
+	arc_gpio.regs = (struct arc_gpio_controller *) RUBY_GPIO_REGS_ADDR;
+
+	gpiochip_add(&arc_gpio.chip);
+
+	return 0;
+}
+
+void gpio_config(uint32_t pin, uint32_t cfg)
+{
+	struct arc_gpio_controller *__iomem g =
+			(struct arc_gpio_controller *) RUBY_GPIO_REGS_ADDR;
+	_gpio_config(g, pin, cfg);
+
+}
+EXPORT_SYMBOL(gpio_config);
+
+void gpio_wowlan_output(uint32_t pin, uint32_t value)
+{
+#ifndef TOPAZ_AMBER_IP
+	struct arc_gpio_controller *__iomem g =
+			(struct arc_gpio_controller *) RUBY_GPIO_REGS_ADDR;
+	gpio_output(g, pin, value);
+#else
+	/*
+	 * In Amber WOWLAN is handled by WIFI2SOC interrupt.
+	 */
+#endif
+}
+EXPORT_SYMBOL(gpio_wowlan_output);
+
+void gpio_uart0_config(void)
+{
+#ifndef TOPAZ_AMBER_IP
+	gpio_config(RUBY_GPIO_UART0_SO, RUBY_GPIO_ALT_OUTPUT);
+	gpio_config(RUBY_GPIO_UART0_SI, RUBY_GPIO_ALT_INPUT);
+#else
+	/*
+	 * In Amber GPIO pins are not shared. No need to set up alternate function.
+	 */
+#endif
+}
+
+void gpio_uart1_config(void)
+{
+#ifndef TOPAZ_AMBER_IP
+	gpio_config(RUBY_GPIO_UART1_SO, RUBY_GPIO_ALT_OUTPUT);
+	gpio_config(RUBY_GPIO_UART1_SI, RUBY_GPIO_ALT_INPUT);
+#else
+	/*
+	 * In Amber GPIO pins are not shared. No need to set up alternate function.
+	 */
+#endif
+}
+
+void gpio_spi_flash_config(void)
+{
+#ifndef TOPAZ_AMBER_IP
+	gpio_config(RUBY_GPIO_SPI_MISO, RUBY_GPIO_ALT_OUTPUT);
+	gpio_config(RUBY_GPIO_SPI_MOSI, RUBY_GPIO_ALT_INPUT);
+	gpio_config(RUBY_GPIO_SPI_SCK, RUBY_GPIO_ALT_OUTPUT);
+	gpio_config(RUBY_GPIO_SPI_nCS, RUBY_GPIO_ALT_OUTPUT);
+#else
+	/*
+	 * In Amber GPIO pins are not shared. No need to set up alternate function.
+	 */
+#endif
+}
+EXPORT_SYMBOL(gpio_spi_flash_config);
+
+void gpio_lna_toggle_config(void)
+{
+#ifndef TOPAZ_AMBER_IP
+	gpio_config(RUBY_GPIO_LNA_TOGGLE, RUBY_GPIO_ALT_OUTPUT);
+#else
+	/*
+	 * In Amber GPIO pins are not shared. No need to set up alternate function.
+	 */
+#endif
+}
+EXPORT_SYMBOL(gpio_lna_toggle_config);
+
+inline static uint32_t _gpio_pin_pwm_reg(uint32_t pin)
+{
+	switch(pin) {
+#ifndef TOPAZ_AMBER_IP
+		case RUBY_GPIO_PIN1:
+			return RUBY_GPIO1_PWM0;
+		case RUBY_GPIO_PIN3:
+			return RUBY_GPIO3_PWM1;
+		case RUBY_GPIO_PIN9:
+			return RUBY_GPIO9_PWM2;
+		case RUBY_GPIO_PIN12:
+			return RUBY_GPIO12_PWM3;
+		case RUBY_GPIO_PIN13:
+			return RUBY_GPIO13_PWM4;
+		case RUBY_GPIO_PIN15:
+			return RUBY_GPIO15_PWM5;
+		case RUBY_GPIO_PIN16:
+			return RUBY_GPIO16_PWM6;
+#else
+		case RUBY_GPIO_PIN11:
+			return AMBER_GPIO11_PWM0;
+		case RUBY_GPIO_PIN12:
+			return AMBER_GPIO12_PWM1;
+		case RUBY_GPIO_PIN13:
+			return AMBER_GPIO13_PWM2;
+		case RUBY_GPIO_PIN14:
+			return AMBER_GPIO14_PWM3;
+		case RUBY_GPIO_PIN15:
+			return AMBER_GPIO15_PWM4;
+		case RUBY_GPIO_PIN16:
+			return AMBER_GPIO16_PWM5;
+		case RUBY_GPIO_PIN17:
+			return AMBER_GPIO17_PWM6;
+#endif
+	}
+	return 0;
+}
+
+int gpio_enable_pwm(uint32_t pin, uint32_t high_count, uint32_t low_count)
+{
+	uint32_t gpio_pwm_reg_addr = 0;
+	uint32_t gpio_pwm_reg_val = 0;
+
+	if (high_count > RUBY_GPIO_PWM_MAX_COUNT || low_count > RUBY_GPIO_PWM_MAX_COUNT)
+		return -1;
+
+	gpio_pwm_reg_addr = _gpio_pin_pwm_reg(pin);
+	if (gpio_pwm_reg_addr == 0)
+		return -1;
+
+	gpio_pwm_reg_val = (low_count << RUBY_GPIO_PWM_LOW_SHIFT) |
+				(high_count << RUBY_GPIO_PWM_HIGH_SHIFT) | RUBY_GPIO_PWM_ENABLE;
+
+	writel(gpio_pwm_reg_val, gpio_pwm_reg_addr);
+
+	return 0;
+}
+EXPORT_SYMBOL(gpio_enable_pwm);
+
+uint32_t gpio_disable_pwm(uint32_t pin)
+{
+	uint32_t gpio_pwm_reg_addr = 0;
+
+	gpio_pwm_reg_addr = _gpio_pin_pwm_reg(pin);
+	if (gpio_pwm_reg_addr == 0)
+		return -1;
+
+	writel(0, gpio_pwm_reg_addr);
+
+	return 0;
+}
+EXPORT_SYMBOL(gpio_disable_pwm);
+
+arch_initcall(arc_gpio_init);
+
+MODULE_DESCRIPTION("ARC 7XX GPIO");
+MODULE_AUTHOR("Quantenna");
+MODULE_LICENSE("GPL");
diff --git a/drivers/qtn/ruby/gpio.h b/drivers/qtn/ruby/gpio.h
new file mode 100644
index 0000000..ae9b376
--- /dev/null
+++ b/drivers/qtn/ruby/gpio.h
@@ -0,0 +1,35 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+
+#ifndef __BOARD_RUBY_GPIO_H
+#define __BOARD_RUBY_GPIO_H
+
+#include <linux/types.h>
+#include <common/ruby_platform.h>
+
+void gpio_config(uint32_t pin, uint32_t cfg);
+void gpio_uart0_config(void);
+void gpio_uart1_config(void);
+void gpio_spi_flash_config(void);
+int gpio_enable_pwm(uint32_t pin, uint32_t high_count, uint32_t low_count);
+uint32_t gpio_disable_pwm(uint32_t pin);
+void gpio_wowlan_output(uint32_t pin, uint32_t cfg);
+#endif // #ifndef __BOARD_RUBY_GPIO_H
+
diff --git a/drivers/qtn/ruby/head_fixup.S b/drivers/qtn/ruby/head_fixup.S
new file mode 100644
index 0000000..dd158da
--- /dev/null
+++ b/drivers/qtn/ruby/head_fixup.S
@@ -0,0 +1,32 @@
+/*
+ * must copy sram data + text segment BEFORE setting up the kernel stack,
+ * as init_thread_union may reside in this section
+ */
+__copy_sram_datatext:
+    mov r5, __sram_datatext_start
+    mov r6, __sram_datatext_end
+    mov r7, __sram_load_addr
+
+__copy_sram_datatext_loop:
+    ld.ab r3, [r7, 4]
+    st.ab r3, [r5, 4]
+    brlt  r5, r6, __copy_sram_datatext_loop
+
+    ;; Flush and invalidate dcache
+    ; Set flush mode for invalidate operation
+    lr   r3, [0x48] ; DC_CTRL register
+    bset r3, r3, 0x6
+    sr   r3, [0x48] ; DC_CTRL register
+    ; Start operation
+    mov  r3, 0x1
+    sr   r3, [0x47] ; DC_IVDC register
+    ; Wait operation completion
+__dcache_flush_continue:
+    lr   r3, [0x48] ; DC_CTRL register
+    and  r3, r3, 0x100
+    brne r3, 0x0, __dcache_flush_continue
+
+    ;; Invalidate icache
+    mov r3, 0x1
+    sr  r3, [0x10] ; IC_IVIC
+
diff --git a/drivers/qtn/ruby/health.c b/drivers/qtn/ruby/health.c
new file mode 100644
index 0000000..2c15be4
--- /dev/null
+++ b/drivers/qtn/ruby/health.c
@@ -0,0 +1,331 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/proc_fs.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+#include <asm/cache.h>
+#include <asm/uaccess.h>
+#include <asm/board/mem_check.h>
+
+#define RUBY_HEALTH_MOD_NUM_MAX		32
+#define RUBY_HEALTH_DRIVER_NAME		"ruby_health"
+
+static uint32_t ruby_health_csum = 0;
+static unsigned int ruby_health_csum_seq_id = 0;
+static unsigned int ruby_health_csum_change_seq_id = 0;
+static unsigned long ruby_health_csum_change_jiffies = 0;
+static struct task_struct *ruby_health_task = NULL;
+static struct module *ruby_health_modules[RUBY_HEALTH_MOD_NUM_MAX] = {0,};
+static DEFINE_SPINLOCK(ruby_health_modules_lock);
+
+static uint32_t ruby_health_gen_csum(void *start, uint32_t sz, uint32_t csum)
+{
+	uint32_t *ptr_begin = (uint32_t*)(((uint32_t)start) & ~0x3);
+	uint32_t *ptr_end = ptr_begin + (sz >> 2);
+	int sleep_counter = 0;
+	unsigned long last_jiffies = jiffies;
+
+	while (ptr_begin != ptr_end) {
+		/* generate simple checksum */
+		csum = csum ^ arc_read_uncached_32(ptr_begin);
+		++ptr_begin;
+
+		/* sleep from time to time */
+		++sleep_counter;
+		if (sleep_counter >= 256) {
+			sleep_counter = 0;
+			if (jiffies - last_jiffies > 1) {
+				msleep(3 * 1000 / HZ);
+				last_jiffies = jiffies;
+			}
+		}
+	}
+
+	msleep(10);
+
+	return csum;
+}
+
+static uint32_t ruby_health_kernel_check(uint32_t csum)
+{
+	extern char _text, _etext;
+#ifdef CONFIG_ARCH_RUBY_NUMA
+	extern char __sram_text_start, __sram_text_end;
+#endif
+
+	csum = ruby_health_gen_csum(&_text, &_etext - &_text, csum);
+#ifdef CONFIG_ARCH_RUBY_NUMA
+	csum = ruby_health_gen_csum(&__sram_text_start,
+		&__sram_text_end - &__sram_text_start, csum);
+#endif
+
+	return csum;
+}
+
+static uint32_t ruby_health_modules_check(uint32_t csum)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ruby_health_modules); ++i) {
+
+		struct module *mod = NULL;
+		struct module *mod_tmp;
+
+		spin_lock(&ruby_health_modules_lock);
+		mod_tmp = ruby_health_modules[i];
+		if (mod_tmp && try_module_get(mod_tmp)) {
+			mod = mod_tmp;
+		}
+		spin_unlock(&ruby_health_modules_lock);
+
+		if (mod) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+			csum = ruby_health_gen_csum(mod->core_layout.base, mod->core_layout.size, csum);
+#ifdef CONFIG_ARCH_RUBY_NUMA
+			csum = ruby_health_gen_csum(mod->sram_layout.base, mod->sram_layout.size, csum);
+#endif
+#else
+			csum = ruby_health_gen_csum(mod->module_core, mod->core_text_size, csum);
+#ifdef CONFIG_ARCH_RUBY_NUMA
+			csum = ruby_health_gen_csum(mod->module_sram, mod->sram_text_size, csum);
+#endif
+#endif
+			module_put(mod);
+		}
+	}
+
+	return csum;
+}
+
+static int ruby_health_daemon(void *arg)
+{
+	printk(KERN_INFO"%s: daemon start\n", RUBY_HEALTH_DRIVER_NAME);
+
+	while (!kthread_should_stop()) {
+
+		uint32_t csum = 0;
+
+		WARN_ONCE(!is_sram_irq_stack_good(), "*** IRQ SRAM stack corrupted\n");
+		WARN_ONCE(!is_kernel_stack_good(), "*** Kernel stack corrupted\n");
+
+		/* Generate checksum */
+		csum = ruby_health_kernel_check(csum);
+		csum = ruby_health_modules_check(csum);
+
+		/* Keep track of how many times checksum generated */
+		++ruby_health_csum_seq_id;
+
+		/* If checksum changed tell it */
+		if (csum != ruby_health_csum) {
+			printk(KERN_ERR"*** Checksum changed 0x%x (ts=%lu, seq=%u) -> 0x%x (ts=%lu, seq=%u)\n",
+				ruby_health_csum, ruby_health_csum_change_jiffies, ruby_health_csum_change_seq_id,
+				csum, jiffies, ruby_health_csum_seq_id);
+			ruby_health_csum = csum;
+			ruby_health_csum_change_seq_id = ruby_health_csum_seq_id;
+			ruby_health_csum_change_jiffies = jiffies;
+		}
+
+		/* This is very low priority background task */
+		msleep(50);
+	}
+
+	printk(KERN_INFO"%s: daemon stop: checksum=0x%x\n",
+		RUBY_HEALTH_DRIVER_NAME, ruby_health_csum);
+
+	return 0;
+}
+
+static void ruby_health_stop(void)
+{
+	if (ruby_health_task) {
+		kthread_stop(ruby_health_task);
+		ruby_health_task = NULL;
+		printk(KERN_INFO"%s: stop\n", RUBY_HEALTH_DRIVER_NAME);
+	}
+}
+
+static int ruby_health_start(void)
+{
+	struct sched_param param = { .sched_priority = 0 };
+
+	printk(KERN_INFO"%s: start\n", RUBY_HEALTH_DRIVER_NAME);
+
+	ruby_health_stop();
+
+	ruby_health_task = kthread_create(ruby_health_daemon, NULL, RUBY_HEALTH_DRIVER_NAME);
+	if (IS_ERR(ruby_health_task)) {
+		ruby_health_task = NULL;
+		return PTR_ERR(ruby_health_task);
+	}
+
+	sched_setscheduler_nocheck(ruby_health_task, SCHED_IDLE, &param);
+	set_user_nice(ruby_health_task, 19);
+
+	wake_up_process(ruby_health_task);
+
+	return 0;
+}
+
+static ssize_t
+ruby_health_write_proc(struct file *file, const char __user *buffer,
+		       size_t count, loff_t *ppos)
+{
+	int ret = 0;
+	char *procfs_buffer = NULL;
+
+	if (count < 1) {
+		return -EINVAL;
+	}
+
+	if ((procfs_buffer = kmalloc(count, GFP_KERNEL)) == NULL) {
+		printk("bootcfg error: out of memory\n");
+		return -ENOMEM;
+	}
+
+	copy_from_user(procfs_buffer, buffer, count);
+
+	if (procfs_buffer[0] != '0') {
+		ret = ruby_health_start();
+	} else {
+		ruby_health_stop();
+	}
+
+	kfree(procfs_buffer);
+	return ret ? ret : count;
+}
+
+static ssize_t
+ruby_health_read_proc(struct file *file, char __user *buffer, size_t buffer_length, loff_t *ppos)
+{
+	char *procfs_buffer = NULL;
+	ssize_t len = 0;
+
+	if ((procfs_buffer = kmalloc(buffer_length, GFP_KERNEL)) == NULL) {
+		printk("bootcfg error: out of memory\n");
+		return -ENOMEM;
+	}
+	sprintf(procfs_buffer, "%s: checksum=0x%x\n", RUBY_HEALTH_DRIVER_NAME, ruby_health_csum);
+	len = simple_read_from_buffer(buffer, buffer_length, ppos,
+				      procfs_buffer, strlen(procfs_buffer));
+
+	kfree(procfs_buffer);
+	return len;
+}
+
+static const struct file_operations fops_ruby_health = {
+	.read = ruby_health_read_proc,
+	.write = ruby_health_write_proc,
+};
+
+static int __init ruby_health_create_proc(void)
+{
+	struct proc_dir_entry *entry = proc_create_data(RUBY_HEALTH_DRIVER_NAME, 0600, NULL,
+							&fops_ruby_health, NULL);
+	if (!entry) {
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void __exit ruby_health_destroy_proc(void)
+{
+	remove_proc_entry(RUBY_HEALTH_DRIVER_NAME, NULL);
+}
+
+static int ruby_health_module_notifier_func(struct notifier_block *self, unsigned long val, void *data)
+{
+	int i;
+
+	spin_lock(&ruby_health_modules_lock);
+
+	for (i = 0; i < ARRAY_SIZE(ruby_health_modules); ++i) {
+		if (val == MODULE_STATE_GOING) {
+			if (ruby_health_modules[i] == data) {
+				ruby_health_modules[i] = NULL;
+				break;
+			}
+		} else if (val == MODULE_STATE_LIVE) {
+			if (ruby_health_modules[i] == NULL) {
+				ruby_health_modules[i] = data;
+				break;
+			}
+		}
+	}
+
+	spin_unlock(&ruby_health_modules_lock);
+
+	return 0;
+}
+
+static struct notifier_block ruby_health_module_notifier = {
+	.notifier_call = ruby_health_module_notifier_func,
+};
+
+static int ruby_health_stop_notifier_func(struct notifier_block *self, unsigned long val, void *data)
+{
+	ruby_health_stop();
+	return 0;
+}
+
+static struct notifier_block ruby_health_reboot_notifier = {
+	.notifier_call = ruby_health_stop_notifier_func,
+};
+
+static struct notifier_block ruby_health_panic_notifier = {
+	.notifier_call = ruby_health_stop_notifier_func,
+};
+
+static int __init ruby_health_init(void)
+{
+	int ret = 0;
+	printk(KERN_INFO"%s loading\n", RUBY_HEALTH_DRIVER_NAME);
+	ret = ruby_health_create_proc();
+	if (!ret) {
+		register_module_notifier(&ruby_health_module_notifier);
+		register_reboot_notifier(&ruby_health_reboot_notifier);
+		atomic_notifier_chain_register(&panic_notifier_list,
+			&ruby_health_panic_notifier);
+	}
+	return ret;
+}
+
+static void __exit ruby_health_exit(void)
+{
+	unregister_module_notifier(&ruby_health_module_notifier);
+	unregister_reboot_notifier(&ruby_health_reboot_notifier);
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+			&ruby_health_panic_notifier);
+	ruby_health_destroy_proc();
+	ruby_health_stop();
+	printk(KERN_INFO "%s unloaded\n", RUBY_HEALTH_DRIVER_NAME);
+}
+
+module_init(ruby_health_init);
+module_exit(ruby_health_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Quantenna");
+
diff --git a/drivers/qtn/ruby/i2c_bus.c b/drivers/qtn/ruby/i2c_bus.c
new file mode 100644
index 0000000..fe6b577
--- /dev/null
+++ b/drivers/qtn/ruby/i2c_bus.c
@@ -0,0 +1,51 @@
+/**
+ * Copyright (c) 2008-2014 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <asm/board/platform.h>
+
+static struct resource qtn_i2c_resources[] = {
+	{
+		.start	= RUBY_I2C_BASE_ADDR,
+		.end	= RUBY_I2C_BASE_ADDR + RUBY_I2C_MEM_SIZE,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start = RUBY_IRQ_MISC_I2C,
+		.end   = RUBY_IRQ_MISC_I2C,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device qtn_i2c_device = {
+	.name = "qtn-i2c",
+	.id = RUBY_I2C_ADAPTER_NUM,
+	.resource = qtn_i2c_resources,
+	.num_resources = ARRAY_SIZE(qtn_i2c_resources),
+};
+
+static int __init qtn_i2c_setup_bus(void)
+{
+	pr_debug("Quantenna I2C device register\n");
+	return platform_device_register(&qtn_i2c_device);
+}
+
+arch_initcall(qtn_i2c_setup_bus);
diff --git a/drivers/qtn/ruby/iputil.c b/drivers/qtn/ruby/iputil.c
new file mode 100644
index 0000000..3cd5f7d
--- /dev/null
+++ b/drivers/qtn/ruby/iputil.c
@@ -0,0 +1,324 @@
+/**
+ * Copyright (c) 2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include <compat.h>
+#include <net/ip.h>
+#ifdef CONFIG_IPV6
+#include <net/ipv6.h>
+#endif
+
+#include <qtn/iputil.h>
+#include <asm/board/platform.h>
+
+#ifdef CONFIG_IPV6
+/* optimisation of the ipv6_skip_exthdr() kernel function */
+int __sram_text
+iputil_v6_skip_exthdr(const struct ipv6hdr *ipv6h, int start, uint8_t *nexthdrp,
+			int total_len, __be32 *ip_id, uint8_t *more_frags)
+{
+	uint8_t nexthdr = ipv6h->nexthdr;
+	struct frag_hdr *frag_hdrp;
+	struct ipv6_opt_hdr *hp;
+	int hdrlen;
+
+	while ((start < total_len) && ipv6_ext_hdr(nexthdr) && (nexthdr != NEXTHDR_NONE)) {
+		hp = (struct ipv6_opt_hdr *)((char *)ipv6h + start);
+
+		if (unlikely(nexthdr == NEXTHDR_FRAGMENT)) {
+			frag_hdrp = (struct frag_hdr *)hp;
+
+			if (ip_id != NULL) {
+				*ip_id = ntohl(get_unaligned(&frag_hdrp->identification));
+			}
+
+			KASSERT((((int)frag_hdrp) & 0x1) == 0,
+				("iputil: frag hdr is not on 2-octet boundary"));
+
+			if (more_frags != NULL) {
+				*more_frags = IPUTIL_V6_FRAG_MF(frag_hdrp);
+			}
+			if (IPUTIL_V6_FRAG_OFFSET(frag_hdrp)) {
+				/* not start of packet - does not contain protocol hdr */
+				break;
+			}
+			hdrlen = 8;
+		} else if (unlikely(nexthdr == NEXTHDR_AUTH)) {
+			hdrlen = (hp->hdrlen + 2) << 2;
+		} else {
+			hdrlen = ipv6_optlen(hp);
+		}
+
+		if ((start + hdrlen) > total_len) {
+			nexthdr = NEXTHDR_NONE;
+			break;
+		}
+		nexthdr = hp->nexthdr;
+		start += hdrlen;
+	}
+
+	*nexthdrp = nexthdr;
+
+	return start;
+}
+EXPORT_SYMBOL(iputil_v6_skip_exthdr);
+
+int iputil_v6_ntop(char *buf, const struct in6_addr *addr)
+{
+	char *p = buf;
+	int i = 0;
+	int zstart = 0;
+	int bestzstart = 0;
+	int bestzend = 0;
+	int bestzlen = 0;
+	int inz = 0;
+	int donez = 0;
+	const int addr16len = (int)(sizeof(struct in6_addr) / sizeof(uint16_t));
+
+	/* parse address, looking for longest substring of 0s */
+	for (i = 0; i < addr16len; i++) {
+		if (addr->s6_addr16[i] == 0) {
+			if (!inz) {
+				zstart = i;
+			} else {
+				int zlen;
+				int zend;
+				zend = i;
+				zlen = zend - zstart + 1;
+				if (zlen > bestzlen) {
+					bestzlen = zlen;
+					bestzstart = zstart;
+					bestzend = i;
+				}
+			}
+			inz = 1;
+		} else {
+			inz = 0;
+		}
+	}
+
+	/* when only the last 32 bits contain an address, format as an ::ipv4 */
+	if (bestzstart == 0 && bestzlen == 6) {
+		p += sprintf(p, "::%d.%d.%d.%d",
+				addr->s6_addr[12], addr->s6_addr[13],
+				addr->s6_addr[14], addr->s6_addr[15]);
+		return p - buf;
+	}
+
+	/* otherwise format as normal ipv6 */
+	for (i = 0; i < addr16len; i++) {
+		uint16_t s = ntohs(addr->s6_addr16[i]);
+		if ((bestzlen == 0) || (i < bestzstart) || (i > bestzend) || s) {
+			const char *colon;
+			if ((i == (addr16len - 1)) || (i == (bestzstart - 1))) {
+				colon = "";
+			} else {
+				colon = ":";
+			}
+			p += sprintf(p, "%x%s", s, colon);
+		} else if (bestzlen && (i == bestzstart)) {
+			inz = 1;
+		} else if (bestzlen && (i == bestzend)) {
+			p += sprintf(p, "::");
+			inz = 0;
+			donez = 1;
+		} else if (inz) {
+		} else {
+			WARN_ONCE(1, "%s: implementation error", __FUNCTION__);
+		}
+	}
+
+	return p - buf;
+}
+EXPORT_SYMBOL(iputil_v6_ntop);
+
+int iputil_v6_ntop_port(char *buf, const struct in6_addr *addr, __be16 port)
+{
+	char *p = buf;
+	p += sprintf(p, "[");
+	p += iputil_v6_ntop(p, addr);
+	p += sprintf(p, "]:%u", ntohs(port));
+	return p - buf;
+}
+EXPORT_SYMBOL(iputil_v6_ntop_port);
+
+int iputil_eth_is_v6_mld(void *iphdr, uint32_t data_len)
+{
+	struct ipv6hdr *ip6hdr_p = iphdr;
+	uint8_t nexthdr;
+	int nhdr_off;
+	struct icmp6hdr *icmp6hdr;
+	int is_ipv6_mld = 0;
+
+	nhdr_off = iputil_v6_skip_exthdr(ip6hdr_p, sizeof(struct ipv6hdr),
+		&nexthdr, data_len, NULL, NULL);
+
+	if (unlikely(nexthdr == IPPROTO_ICMPV6)) {
+		icmp6hdr = (struct icmp6hdr*)((__u8 *)ip6hdr_p + nhdr_off);
+
+		if (icmp6hdr->icmp6_type == ICMPV6_MGM_QUERY ||
+			icmp6hdr->icmp6_type == ICMPV6_MGM_REPORT ||
+			icmp6hdr->icmp6_type == ICMPV6_MGM_REDUCTION ||
+			icmp6hdr->icmp6_type == ICMPV6_MLD2_REPORT) {
+			is_ipv6_mld = 1;
+		}
+	}
+
+	return is_ipv6_mld;
+}
+EXPORT_SYMBOL(iputil_eth_is_v6_mld);
+#endif /* CONFIG_IPV6 */
+
+/*
+ * Return IP packet protocol information
+ */
+uint8_t __sram_text
+iputil_proto_info(void *iph, void *data, void **proto_data, uint32_t *ip_id, uint8_t *more_frags)
+{
+	const struct iphdr *ipv4h = iph;
+	uint16_t frag_off;
+#ifdef CONFIG_IPV6
+	u_int8_t nexthdr;
+	int start;
+	uint32_t data_len;
+#endif
+	struct sk_buff *skb = data;
+
+	if (ipv4h->version == 4) {
+		if (skb->len < (ipv4h->ihl << 2))
+			return 0;
+		frag_off = ntohs(get_unaligned((u16 *)&ipv4h->frag_off));
+		if (ip_id && (frag_off & (IP_OFFSET | IP_MF)) != 0) {
+			*ip_id = (uint32_t)ntohs(ipv4h->id);
+		}
+		if (more_frags) {
+			*more_frags = !!(frag_off & IP_MF);
+		}
+		*proto_data = (char *)iph + (ipv4h->ihl << 2);
+
+		if ((frag_off & IP_OFFSET) != 0) {
+			/* not start of packet - does not contain protocol hdr */
+			return NEXTHDR_FRAGMENT;
+		}
+
+		return ipv4h->protocol;
+	}
+
+#ifdef CONFIG_IPV6
+	if (ipv4h->version == 6) {
+		data_len = skb->len - ((uint8_t*)iph - (skb->data));
+		start = iputil_v6_skip_exthdr((struct ipv6hdr *)iph, sizeof(struct ipv6hdr),
+				&nexthdr, data_len, ip_id, more_frags);
+		*proto_data = (char *)iph + start;
+
+		return nexthdr;
+	}
+#endif
+
+	return 0;
+}
+EXPORT_SYMBOL(iputil_proto_info);
+
+int iputil_v4_ntop_port(char *buf, __be32 addr, __be16 port)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	return sprintf(buf, "%pI4:%u", &addr, ntohs(port));
+#else
+	return sprintf(buf, NIPQUAD_FMT ":%u", NIPQUAD(addr), ntohs(port));
+#endif
+}
+EXPORT_SYMBOL(iputil_v4_ntop_port);
+
+int iputil_v4_pton(const char *ip_str, __be32 *ipaddr)
+{
+	int i;
+	uint32_t tmp_array[IPUTIL_V4_ADDR_LEN];
+	uint8_t *ipaddr_p = (uint8_t *)ipaddr;
+
+	if (ip_str == NULL)
+		return -1;
+
+	if (sscanf(ip_str, "%d.%d.%d.%d",
+			&tmp_array[0],
+			&tmp_array[1],
+			&tmp_array[2],
+			&tmp_array[3]) != 4) {
+		return -1;
+	}
+
+	for (i = 0; i < IPUTIL_V4_ADDR_LEN; i++) {
+		if (tmp_array[i] > 0xff) {
+			return -1;
+		}
+	}
+
+	ipaddr_p[0] = tmp_array[0];
+	ipaddr_p[1] = tmp_array[1];
+	ipaddr_p[2] = tmp_array[2];
+	ipaddr_p[3] = tmp_array[3];
+
+	return 0;
+}
+EXPORT_SYMBOL(iputil_v4_pton);
+
+#ifdef CONFIG_IPV6
+int iputil_ipv6_is_neigh_msg(struct ipv6hdr *ipv6, struct icmp6hdr *icmpv6)
+{
+	if (ipv6->hop_limit != 255) {
+		return 0;
+	}
+
+	if (icmpv6->icmp6_code != 0) {
+		return 0;
+	}
+
+	return 1;
+}
+EXPORT_SYMBOL(iputil_ipv6_is_neigh_msg);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+int iputil_ipv6_is_neigh_sol_msg(uint8_t dup_addr_detect, const struct in6_addr *target, const struct in6_addr *daddr)
+#else
+int iputil_ipv6_is_neigh_sol_msg(uint8_t dup_addr_detect, struct nd_msg *msg, struct ipv6hdr *ipv6)
+#endif
+{
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	const struct in6_addr *daddr = &ipv6->daddr;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if (ipv6_addr_is_multicast(target)) {
+#else
+	if (ipv6_addr_is_multicast(&msg->target)) {
+#endif
+
+		return 0;
+	}
+
+	if (dup_addr_detect && !(daddr->s6_addr32[0] == htonl(0xff020000) &&
+			daddr->s6_addr32[1] == htonl(0x00000000) &&
+			daddr->s6_addr32[2] == htonl(0x00000001) &&
+			daddr->s6_addr [12] == 0xff )) {
+		return 0;
+	}
+
+	return 1;
+}
+EXPORT_SYMBOL(iputil_ipv6_is_neigh_sol_msg);
+#endif
diff --git a/drivers/qtn/ruby/irq.c b/drivers/qtn/ruby/irq.c
new file mode 100644
index 0000000..36091df
--- /dev/null
+++ b/drivers/qtn/ruby/irq.c
@@ -0,0 +1,1247 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+
+/******************************************************************************
+ * Copyright ARC International (www.arc.com) 2007-2008
+ *
+ * Vineetg: Mar 2009
+ *  -use generic irqaction to store IRQ requests
+ *  -device ISRs no longer take pt_regs (rest of the kernel convention)
+ *  -request_irq( ) definition matches declaration in inc/linux/interrupt.h
+ *
+ * Vineetg: Mar 2009 (Supporting 2 levels of Interrupts)
+ *  -local_irq_enable shd be cognizant of current IRQ state
+ *    It is lot more involved now and thus re-written in "C"
+ *  -set_irq_regs in common ISR now always done and not dependent 
+ *      on CONFIG_PROFILEas it is used by
+ *
+ * Vineetg: Jan 2009
+ *  -Cosmetic change to display the registered ISR name for an IRQ
+ *  -free_irq( ) cleaned up to not have special first-node/other node cases
+ *
+ * Vineetg: June 17th 2008
+ *  -Added set_irq_regs() to top level ISR for profiling 
+ *  -Don't Need __cli just before irq_exit(). Intr already disabled
+ *  -Disabled compile time ARC_IRQ_DBG
+ *
+ *****************************************************************************/
+/******************************************************************************
+ * Copyright Codito Technologies (www.codito.com) Oct 01, 2004
+ * 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *****************************************************************************/
+
+/*
+ * arch/arc/kernel/irq.c
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+#include <linux/io.h>
+#include <linux/magic.h>
+#include <linux/random.h>
+
+#include <linux/sched.h>
+#include <asm/system.h>
+#include <asm/errno.h>
+#include <asm/arcregs.h>
+#include <asm/hardware.h>
+
+#include <asm/board/board_config.h>
+#include <asm/board/platform.h>
+#include <asm/board/mem_check.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,30)
+#include <trace/irq.h>
+#else
+#include <trace/events/irq.h>
+#endif
+
+#include <qtn/ruby_cpumon.h>
+
+//#define ARC_IRQ_DBG
+
+//#define TEST_IRQ_REG
+
+#ifdef ARC_IRQ_DBG
+#define ASSERT(expr)	BUG_ON(!(expr))
+#else
+#define ASSERT(expr)
+#endif
+
+#define TIMER_INT_MSK(t)	TOPAZ_SYS_CTL_INTR_TIMER_MSK(t)
+
+enum toggle_irq {
+	DISABLE,
+	ENABLE,
+};
+
+enum first_isr {
+	MULTIPLE_ISRS,
+	FIRST_ISR,
+};
+
+enum invert_bit {
+	NON_INVERT,
+	INVERT,
+};
+
+DEFINE_TRACE(irq_handler_entry);
+DEFINE_TRACE(irq_handler_exit);
+
+static void ruby_en_dis_ext_irq(unsigned irq, enum toggle_irq toggle);
+static void ruby_en_dis_common_irq(unsigned irq, enum toggle_irq toggle);
+
+/* table for system interrupt handlers, including handlers for extended interrupts. */
+struct irq_desc irq_desc[NR_IRQS];
+
+static inline struct irqaction *irq_get_action(unsigned int irq)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+	return desc->action;
+}
+
+/* IRQ status spinlock - enable, disable */
+static spinlock_t irq_controller_lock;
+
+extern void smp_ipi_init(void);
+
+static void run_handlers(unsigned irq);
+
+/* Return true if irq is extended. */
+static __always_inline int ruby_is_ext_irq(unsigned irq)
+{
+	return (irq > RUBY_MAX_IRQ_VECTOR);
+}
+
+void __init arc_irq_init(void)
+{
+	extern int _int_vec_base_lds;
+
+	/* set the base for the interrupt vector tabe as defined in Linker File
+	   Interrupts will be enabled in start_kernel
+	 */
+	write_new_aux_reg(AUX_INTR_VEC_BASE, &_int_vec_base_lds);
+
+	/* vineetg: Jan 28th 2008
+	   Disable all IRQs on CPU side
+	   We will selectively enable them as devices request for IRQ
+	 */
+	write_new_aux_reg(AUX_IENABLE, 0);
+
+#ifdef CONFIG_ARCH_ARC_LV2_INTR
+{
+	int level_mask = 0;
+	/* If any of the peripherals is Level2 Interrupt (high Prio),
+	   set it up that way
+	 */
+#ifdef  CONFIG_TIMER_LV2
+	level_mask |= (1 << TIMER0_INT );
+#endif
+
+#ifdef  CONFIG_SERIAL_LV2
+	level_mask |= (1 << VUART_IRQ);
+#endif
+
+#ifdef  CONFIG_EMAC_LV2
+	level_mask |= (1 << VMAC_IRQ );
+#endif
+
+	if (level_mask) {
+		printk("setup as level-2 interrupt/s \n");
+		write_new_aux_reg(AUX_IRQ_LEV, level_mask);
+	}
+}
+#endif
+
+}
+
+static irqreturn_t uart_irq_demux_handler(int irq, void *dev_id)
+{
+	int i, handled = 0;
+	u32 rcstatus = readl(IO_ADDRESS(RUBY_SYS_CTL_RESET_CAUSE));
+	for (i = 0; i < 2; i++) {
+		u32 rcset = rcstatus & RUBY_RESET_CAUSE_UART(i);
+		if (rcset && irq_has_action(RUBY_IRQ_UART0 + i)) {
+			run_handlers(RUBY_IRQ_UART0 + i);
+			handled = 1;
+		}
+	}
+	return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static irqreturn_t timer_irq_demux_handler(int irq, void *dev_id)
+{
+	int i, handled = 0;
+	u32 rcstatus = readl(IO_ADDRESS(RUBY_SYS_CTL_RESET_CAUSE));
+	for (i = 0; i < RUBY_NUM_TIMERS; i++) {
+		if ((rcstatus & TIMER_INT_MSK(i)) && irq_has_action(RUBY_IRQ_TIMER0 + i)) {
+			run_handlers(RUBY_IRQ_TIMER0 + i);
+			handled = 1;
+		}
+	}
+	return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static irqreturn_t misc_irq_demux_handler(int irq, void *dev_id)
+{
+	bool handled = false;
+	int i;
+	uint32_t rcstatus;
+
+	rcstatus = readl(IO_ADDRESS(RUBY_SYS_CTL_RESET_CAUSE));
+
+	for (i = 0; i < QTN_IRQ_MISC_EXT_IRQ_COUNT; i++) {
+		const int demux_irq = RUBY_IRQ_MISC_EXT_IRQ_START + i;
+		const unsigned int rcbit = QTN_IRQ_MISC_RST_CAUSE_START + i;
+
+		if ((rcstatus & (1 << rcbit)) && irq_has_action(demux_irq)) {
+			run_handlers(demux_irq);
+			handled = true;
+		}
+	}
+
+	return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/* remember desired triggering behavior of each gpio isr registered */
+static unsigned long gpio_trig_flags[RUBY_GPIO_IRQ_MAX];
+
+static __always_inline unsigned long read_gpio_inv_status(void)
+{
+	return readl(IO_ADDRESS(RUBY_SYS_CTL_INTR_INV0));
+}
+
+static __always_inline void write_gpio_inv_status_bit(int gpio, enum invert_bit invert)
+{
+	if (invert == INVERT) {
+		writel_or(RUBY_BIT(gpio), IO_ADDRESS(RUBY_SYS_CTL_INTR_INV0));
+	} else {
+		writel_and(~RUBY_BIT(gpio), IO_ADDRESS(RUBY_SYS_CTL_INTR_INV0));
+	}
+}
+
+static __always_inline unsigned long read_gpio_input_status(void)
+{
+	return readl(IO_ADDRESS(RUBY_GPIO_INPUT));
+}
+
+static void init_gpio_irq(int irq, unsigned long flags)
+{
+	int gpio = irq - RUBY_IRQ_GPIO0;
+	u32 line_status;
+
+	if (gpio >= RUBY_GPIO_IRQ_MAX || gpio < 0) {
+		panic("error in gpio isr init! irq: %d, flags: 0x%lx\n", irq, flags);
+	}
+
+	gpio_trig_flags[gpio] = flags & IRQF_TRIGGER_MASK;
+	line_status = read_gpio_input_status() & (1 << gpio);
+
+	if (flags & IRQF_TRIGGER_HIGH) {
+		/* set inversion line off for high level trigger */
+		write_gpio_inv_status_bit(gpio, NON_INVERT);
+	}
+	else if (flags & IRQF_TRIGGER_LOW) {
+		write_gpio_inv_status_bit(gpio, INVERT);
+	}
+	else if (flags & IRQF_TRIGGER_RISING) {
+		/*
+		 * for rising edge trigger, invert off,
+		 * then enable invert after each rising edge interrupt.
+		 * when the edge falls again, invert off to accept the next rising edge.
+		 * set to the opposite of the input pin for starters, so registering
+		 * the driver doesn't trigger an interrupt on the requested level
+		 * without an edge.
+		 */
+		if (line_status) {
+			write_gpio_inv_status_bit(gpio, INVERT);
+		} else {
+			write_gpio_inv_status_bit(gpio, NON_INVERT);
+		}
+	}
+	else if (flags & IRQF_TRIGGER_FALLING) {
+		if (line_status) {
+			write_gpio_inv_status_bit(gpio, NON_INVERT);
+		} else {
+			write_gpio_inv_status_bit(gpio, INVERT);
+		}
+	}
+	else {
+		/*
+		 * assume this has been manually set, leave as default or prior
+		 */
+	}
+}
+
+
+/**
+ * gpio demux handler
+ *
+ * call handlers based on status of gpio input lines, and line invert level.
+ * provides software support for edge triggered interrupts for gpio buttons etc.
+ */
+static irqreturn_t gpio_irq_demux_handler(int irq, void *dev_id)
+{
+	int i;
+	int handle[RUBY_GPIO_IRQ_MAX];
+	unsigned long flags, gpio_input_status, gpio_inv_status;
+
+	spin_lock_irqsave(&irq_controller_lock, flags);
+
+	/*
+	 * determine which handlers to run and manipulate registers
+	 * holding the lock, then run handlers afterwards
+	 */
+	gpio_input_status = read_gpio_input_status();
+	gpio_inv_status = read_gpio_inv_status();
+	for (i = 0; i < RUBY_GPIO_IRQ_MAX; i++) {
+		int handle_line = 0;
+		int ext_irq = RUBY_IRQ_GPIO0 + i;
+
+		if (irq_has_action(ext_irq)) {
+
+			unsigned long gpio_line_flags = gpio_trig_flags[i];
+			unsigned long line_high = gpio_input_status & (0x1 << i);
+			unsigned long inv_on = gpio_inv_status & (0x1 << i);
+
+#if 0
+			printk("%s irq %d gpio %d input %x inv %x flags %x lhigh %d inv %d\n",
+					__FUNCTION__, irq, i, gpio_input_status, gpio_inv_status, gpio_line_flags,
+					line_high != 0, inv_on != 0);
+#endif
+
+			if (gpio_line_flags & IRQF_TRIGGER_HIGH) {
+				handle_line = line_high;
+			} else if (gpio_line_flags & IRQF_TRIGGER_LOW) {
+				handle_line = !line_high;
+			} else if (gpio_line_flags & IRQF_TRIGGER_RISING) {
+				/*
+				 * rising edge trigger. if inverted, dont handle, uninvert.
+				 * if uninverted, handle and invert.
+				 *
+				 * if neither of these cases are true, then either:
+				 * 1) this line didn't cause this interrupt, or
+				 * 2) the input_status register has not updated yet.
+				 *    this was observed during test, spurious interrupts
+				 *    would occur where input_status is not set appropriately,
+				 *    even though an interrupt was caused by this line.
+				 *    This just means another interrupt will fire until
+				 *    the gpio input status reaches its appropriate state
+				 */
+				if (!line_high && inv_on) {
+					write_gpio_inv_status_bit(i, NON_INVERT);
+				} else if (line_high && !inv_on) {
+					handle_line = 1;
+					write_gpio_inv_status_bit(i, INVERT);
+				}
+			} else if (gpio_line_flags & IRQF_TRIGGER_FALLING) {
+				if (!line_high && inv_on) {
+					write_gpio_inv_status_bit(i, NON_INVERT);
+					handle_line = 1;
+				} else if (line_high && !inv_on) {
+					write_gpio_inv_status_bit(i, INVERT);
+				}
+			} else {
+				handle_line = (line_high && !inv_on) || (!line_high && inv_on);
+			}
+		}
+
+		handle[i] = handle_line;
+	}
+
+	spin_unlock_irqrestore(&irq_controller_lock, flags);
+
+	for (i = 0; i < RUBY_GPIO_IRQ_MAX; i++) {
+		if (handle[i]) {
+			run_handlers(RUBY_IRQ_GPIO0 + i);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+
+static struct irqaction gpio_irq = {
+	.name       = "GPIO demux",
+	.flags      = 0,
+	.handler    = &gpio_irq_demux_handler,
+	.dev_id     = NULL,
+};
+
+static struct irqaction uart_irq = {
+	.name       = "UART demux",
+	.flags      = 0,
+	.handler    = &uart_irq_demux_handler,
+	.dev_id     = NULL,
+};
+
+static struct irqaction timer_irq = {
+	.name       = "Timers demux",
+	.flags      = 0,
+	.handler    = &timer_irq_demux_handler,
+	.dev_id     = NULL,
+};
+
+static struct irqaction misc_irq = {
+	.name       = "Misc demux",
+	.flags      = 0,
+	.handler    = &misc_irq_demux_handler,
+	.dev_id     = NULL,
+};
+
+#ifdef TEST_IRQ_REG
+static irqreturn_t test_irq_handler(int irq, void *dev_id)
+{
+	if (printk_ratelimit()) {
+		printk(KERN_WARNING "%s, %s irq %d\n", __FUNCTION__, (const char*) dev_id, irq);
+	} else {
+		unsigned long flags;
+		spin_lock_irqsave(&irq_controller_lock, flags);
+		ruby_en_dis_ext_irq(irq, DISABLE);
+		spin_unlock_irqrestore(&irq_controller_lock, flags);
+	}
+	return IRQ_HANDLED;
+}
+
+/*
+ * attempt to register and deregister handlers on every irq
+ */
+static void test_irq_reg(void)
+{
+	int j, k;
+	for (j = 0; j < RUBY_IRQ_EXT_VECTORS_NUM; j++) {
+		char bufj[32];
+		snprintf(bufj, 32, "test.j.%d", j);
+		int req2 = request_irq(j, test_irq_handler, IRQF_SHARED, "testj", bufj);
+		for (k = 0; k < 2; k++) {
+			char bufk[32];
+			snprintf(bufk, 32, "test.k.%d", k);
+			int req3 = request_irq(j, test_irq_handler, IRQF_SHARED, "testk", bufk);
+			if (!req3) {
+				disable_irq(j);
+				disable_irq(j);
+				enable_irq(j);
+				enable_irq(j);
+				free_irq(j, bufk);
+			}
+		}
+		if (!req2) {
+			free_irq(j, bufj);
+		} else {
+			printk(KERN_WARNING "%s could not register %s\n",
+					__FUNCTION__, bufj);
+		}
+	}
+}
+#endif
+
+#ifndef TOPAZ_AMBER_IP
+/*
+ *
+ * Some checking to prevent registration of isrs on gpio lines that are in use
+ *
+ */
+static int check_uart1_use(void)
+{
+	int use_uart1 = 0;
+
+	if (get_board_config(BOARD_CFG_UART1, &use_uart1) != 0) {
+		printk(KERN_ERR "get_board_config returned error status for UART1\n");
+	}
+
+	return use_uart1;
+}
+#endif
+
+static struct disallowed_isr {
+	int	irq;
+	int	gpio;
+	char*	use;
+	int	(*checkfunc)(void);	/* returns 0 if the line is available */
+}
+disallowed_isrs[] =
+{
+#ifndef TOPAZ_AMBER_IP
+	{ -1,	RUBY_GPIO_UART0_SI,	"uart0 rx",	NULL			},
+	{ -1,	RUBY_GPIO_UART0_SO,	"uart0 tx",	NULL			},
+	{ -1,	RUBY_GPIO_UART1_SI,	"uart1 rx",	&check_uart1_use	},
+	{ -1,	RUBY_GPIO_UART1_SO,	"uart1 tx",	&check_uart1_use	},
+	{ -1,	RUBY_GPIO_I2C_SCL,	"i2c scl",	NULL			},
+	{ -1,	RUBY_GPIO_I2C_SDA,	"i2c sda",	NULL			},
+#endif /*TOPAZ_AMBER_IP*/
+	{ RUBY_IRQ_TIMER0,	-1,	"ruby timer0",	NULL			},
+};
+
+/* returns 0 if this gpio pin may have an isr installed on it */
+static int ext_isr_check(int irq)
+{
+	int i;
+	const struct disallowed_isr *inv;
+	int gpio = (irq - RUBY_IRQ_GPIO0);
+
+	if (!ruby_is_ext_irq(irq)) {
+		panic("%s: invalid irq: %d\n", __FUNCTION__, irq);
+		return -EBUSY;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(disallowed_isrs); i++) {
+		inv = &disallowed_isrs[i];
+		/* check if pin or irq num matches */
+		if (inv->irq == irq ||
+				(inv->irq < 0 &&
+				 inv->gpio == gpio &&
+				 gpio < RUBY_GPIO_IRQ_MAX)) {
+			/* no checkfunc means always busy */
+			int used = 1;
+			if (inv->checkfunc) {
+				used = inv->checkfunc();
+			}
+
+			if (used) {
+				printk(KERN_ERR "%s: irq %d in use by %s\n",
+						__FUNCTION__, irq, inv->use);
+				return -EBUSY;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static __always_inline u32 count_bits_set(u32 value)
+{
+	u32 count = 0;
+	for (; value; value >>= 1) {
+		count += value & 0x1;
+	}
+	return count;
+}
+
+/* initialise the irq table */
+void __init init_IRQ(void)
+{
+	memset(&irq_desc[0], 0, sizeof(irq_desc));
+
+	setup_irq(RUBY_IRQ_UART, &uart_irq);
+	setup_irq(RUBY_IRQ_GPIO, &gpio_irq);
+	setup_irq(RUBY_IRQ_TIMER, &timer_irq);
+	setup_irq(RUBY_IRQ_MISC, &misc_irq);
+
+#ifdef CONFIG_SMP
+	smp_ipi_init();
+#endif
+}
+
+
+/* Map extended IRQ to common IRQ line */
+static unsigned int ruby_map_ext_irq(unsigned irq)
+{
+	/*
+	 ************************************************
+	 * remap shared irq's at this level - there are
+	 * only 32 physical vectors so we cannot just
+	 * map 64 irq's directly.
+	 * The irq handler is responsible for demuxing
+	 * further - there are only a couple of
+	 * cases as shown below.  Otherwise we can
+	 * implement a second level irq scheme by
+	 * registering irq's in the init with demux
+	 * handlers.
+	 *************************************************
+	 */
+
+	if (ruby_is_ext_irq(irq)) {
+		/* Adjust extended irq to common irq line. */
+		if (irq <= RUBY_IRQ_GPIO15) {
+			irq = RUBY_IRQ_GPIO;
+		} else if (irq <= RUBY_IRQ_UART1) {
+			irq = RUBY_IRQ_UART;
+		} else if (irq <= RUBY_IRQ_TIMER3) {
+			irq = RUBY_IRQ_TIMER;
+		} else {
+			irq = RUBY_IRQ_MISC;
+		}
+	}
+
+	return irq;
+}
+
+/* Enable/disable Ruby extended interrupt requests */
+static void ruby_en_dis_ext_irq(unsigned irq, enum toggle_irq toggle)
+{
+	if (!ruby_is_ext_irq(irq)) {
+		printk(KERN_ERR"IRQ %u must be never used with this function.\n", irq);
+		panic("IRQ handling failure\n");
+
+	} else {
+
+		unsigned long status = readl(IO_ADDRESS(RUBY_SYS_CTL_LHOST_ORINT_EN));
+		if (toggle == ENABLE) {
+			status |= (1 << (irq - RUBY_IRQ_VECTORS_NUM));
+		} else {
+			status &= ~(1 << (irq - RUBY_IRQ_VECTORS_NUM));
+		}
+		writel(status, IO_ADDRESS(RUBY_SYS_CTL_LHOST_ORINT_EN));
+	}
+}
+
+/* Enable/disable Ruby common interrupt requests */
+static void ruby_en_dis_common_irq(unsigned irq, enum toggle_irq toggle)
+{
+	if (ruby_is_ext_irq(irq)) {
+		printk(KERN_ERR"IRQ %u must be never used with this function.\n", irq);
+		panic("IRQ handling failure\n");
+
+	} else if (irq >= RUBY_IRQ_WATCHDOG) {
+
+		/* If higher than timer, this is external irq to cpu and needs additional reg set up. */
+		unsigned long status = readl(IO_ADDRESS(RUBY_SYS_CTL_LHOST_INT_EN));
+		if (toggle == ENABLE) {
+			status |= (1 << (irq - RUBY_IRQ_WATCHDOG));
+		} else {
+			status &= ~(1 << (irq - RUBY_IRQ_WATCHDOG));
+		}
+		writel(status, IO_ADDRESS(RUBY_SYS_CTL_LHOST_INT_EN));
+	}
+}
+
+/* Check that setup request is correct. */
+static int ruby_setup_check_1(unsigned irq, unsigned mapped_irq, struct irqaction *node)
+{
+	int ret = 0;
+
+	if (ruby_is_ext_irq(irq)) {
+		ret = ext_isr_check(irq);
+	}
+
+	if (node->flags & IRQF_TRIGGER_MASK) {
+		if (mapped_irq == RUBY_IRQ_GPIO) {
+			if (count_bits_set(node->flags & IRQF_TRIGGER_MASK) > 1) {
+				printk(KERN_ERR"IRQ %d (0x%x) does not support multiple IRQF_TRIGGER_* flags\n",
+						irq, (unsigned)node->flags);
+
+				ret = -EINVAL;
+			}
+		} else {
+			printk(KERN_ERR"IRQ %d (0x%x) does not support IRQF_TRIGGER_* flags (fixme)\n",
+					irq, (unsigned)node->flags);
+			ret = -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+/* Check that setup request is correct. */
+static int ruby_setup_check_2(unsigned irq, unsigned mapped_irq, struct irqaction *node)
+{
+	int ret = 0;
+
+	if (!(irq_get_action(irq)->flags & IRQF_SHARED) || !(node->flags & IRQF_SHARED)) {
+
+		/* Enforce condition that all (if more than one) ISRs belong to the same
+		 * vector must be shared.
+		 */
+		printk(KERN_ERR"%s: %s incompatible with IRQ %d from %s\n",
+			__FUNCTION__, node->name, irq, irq_get_action(irq)->name);
+		ret = -EBUSY;
+
+	}
+
+	return ret;
+}
+
+/* Enable IRQ during setup */
+static void ruby_setup_enable_irq(unsigned irq)
+{
+	if (ruby_is_ext_irq(irq)) {
+		/* enable extended interrupt. */
+		ruby_en_dis_ext_irq(irq, ENABLE);
+	} else {
+		/* Enable this IRQ in CPU AUX_IENABLE Reg */
+		unmask_interrupt((1 << irq));
+		/* Enable IRQ on Ruby interrupt controller */
+		ruby_en_dis_common_irq(irq, ENABLE);
+	}
+}
+
+/* Link node during setup */
+static void ruby_setup_link_irq(enum first_isr is_first, unsigned irq, struct irqaction *node)
+{
+	if (is_first == FIRST_ISR) {
+
+		/* Add ISR to the head */
+		irq_to_desc(irq)->action = node;
+
+	} else {
+
+		/* Add the ISR to link-list of ISRs per IRQ */
+		struct irqaction *curr = irq_get_action(irq);
+		while (curr->next) {
+			curr = curr->next;
+		}
+		curr->next = node;
+	}
+}
+
+/* setup_irq:
+ * Typically used by architecure special interrupts for
+ * registering handler to IRQ line
+ */
+int setup_irq(unsigned irq, struct irqaction *node)
+{
+	int ret = 0;
+	unsigned mapped_irq = ruby_map_ext_irq(irq);
+	unsigned long flags;
+	enum first_isr is_first;
+
+#ifdef  ARC_IRQ_DBG
+	printk(KERN_INFO"---IRQ Request (%d) ISR\n", irq);
+#endif
+
+	if (node->flags & IRQF_SAMPLE_RANDOM)
+		rand_initialize_irq(irq);
+
+	spin_lock_irqsave(&irq_controller_lock, flags);
+
+	ret = ruby_setup_check_1(irq, mapped_irq, node);
+	if (ret) {
+		goto error;
+	}
+
+	if (!irq_has_action(irq)) {
+		is_first = FIRST_ISR;
+	} else {
+		is_first = MULTIPLE_ISRS;
+
+		/* additional checks for multiple isrs */
+		ret = ruby_setup_check_2(irq, mapped_irq, node);
+		if (ret) {
+			goto error;
+		}
+	}
+
+	/* additional pin settings for gpio irqs */
+	if (ruby_is_ext_irq(irq) && mapped_irq == RUBY_IRQ_GPIO) {
+		init_gpio_irq(irq, node->flags);
+	}
+
+	ruby_setup_link_irq(is_first, irq, node);
+	ruby_setup_enable_irq(irq);
+
+error:
+	spin_unlock_irqrestore(&irq_controller_lock, flags);
+
+	return ret;
+}
+
+/* request_irq:
+ * Exported to device drivers / modules to assign handler to IRQ line
+ */
+int request_irq(unsigned irq,
+		irqreturn_t (*handler)(int, void *),
+		unsigned long flags, const char *name, void *dev_id)
+{
+	struct irqaction *node;
+	int retval;
+
+	if (irq >= NR_IRQS) {
+		printk("%s: Unknown IRQ %d\n", __FUNCTION__, irq);
+		return -ENXIO;
+	}
+
+	node = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->handler = handler;
+	node->flags = flags;
+	node->dev_id = dev_id;
+	node->name = name;
+	node->next = NULL;
+
+	/* insert the new irq registered into the irq list */
+
+	retval = setup_irq(irq, node);
+	if (retval)
+		kfree(node);
+	return retval;
+}
+EXPORT_SYMBOL(request_irq);
+
+/* free an irq node for the irq list */
+void free_irq(unsigned irq, void *dev_id)
+{
+	unsigned long flags;
+	struct irqaction *tmp = NULL, **node;
+
+	if (irq >= NR_IRQS) {
+		printk(KERN_ERR "%s: Unknown IRQ %d\n", __FUNCTION__, irq);
+		return;
+	}
+
+	spin_lock_irqsave(&irq_controller_lock, flags); /* delete atomically */
+
+	/* Traverse through linked-list of ISRs. */
+	node = &irq_to_desc(irq)->action;
+	while (*node) {
+		if ((*node)->dev_id == dev_id) {
+			tmp = *node;
+			(*node) = (*node)->next;
+			kfree(tmp);
+		} else {
+			node = &((*node)->next);
+		}
+	}
+
+	/* Disable IRQ if found in linked-list. */
+	if (tmp) {
+		/* Disable if last ISR deleted. */
+		if (!irq_has_action(irq)) {
+			/* If it is extended irq - disable it. */
+			if (ruby_is_ext_irq(irq)) {
+				ruby_en_dis_ext_irq(irq, DISABLE);
+			} else {
+				/* Disable this IRQ in CPU AUX_IENABLE Reg */
+				mask_interrupt((1 << irq));
+				/* Disable IRQ on Ruby interrupt controller */
+				ruby_en_dis_common_irq(irq, DISABLE);
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&irq_controller_lock, flags);
+
+	if (!tmp) {
+		printk(KERN_ERR"%s: tried to remove invalid interrupt", __FUNCTION__);
+	}
+}
+EXPORT_SYMBOL(free_irq);
+
+#if defined(CONFIG_PCI_MSI)
+static inline void clear_msi_request(void)
+{
+	writel(RUBY_PCIE_MSI_CLEAR, RUBY_PCIE_MSI_STATUS);
+}
+#endif
+
+struct ruby_cpumon_data {
+	uint64_t sleep_cycles;
+	uint64_t awake_cycles;
+	uint32_t last_cyc;
+	int last_was_asleep;
+};
+
+static __sram_data struct ruby_cpumon_data ruby_cpumon;
+
+static __always_inline uint32_t ruby_cpumon_get_clock(void)
+{
+	return read_new_aux_reg(ARC_REG_TIMER1_CNT);
+}
+
+void ruby_cpumon_get_cycles(uint64_t *sleep, uint64_t *awake)
+{
+	uint32_t cyc;
+	unsigned long flags;
+	struct ruby_cpumon_data *cd = &ruby_cpumon;
+
+	local_irq_save(flags);
+
+	WARN_ON_ONCE(cd->last_was_asleep);
+
+	cyc = ruby_cpumon_get_clock();
+	cd->awake_cycles += cyc - cd->last_cyc;
+	cd->last_cyc = cyc;
+
+	if (sleep) {
+		*sleep = cd->sleep_cycles;
+	}
+	if (awake) {
+		*awake = cd->awake_cycles;
+	}
+
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(ruby_cpumon_get_cycles);
+
+static int ruby_cpumon_proc_read(char *page, char **start, off_t offset, int count, int *eof, void *data)
+{
+	char *p = page;
+	uint64_t awake;
+	uint64_t sleep;
+
+	if (offset > 0) {
+		*eof = 1;
+		return 0;
+	}
+
+	ruby_cpumon_get_cycles(&sleep, &awake);
+
+	p += sprintf(p, "up_msecs %u sleep %Lu awake %Lu\n", jiffies_to_msecs(jiffies), sleep, awake);
+
+	return p - page;
+}
+
+static int __init ruby_cpumon_register_proc(void)
+{
+	struct ruby_cpumon_data *cd = &ruby_cpumon;
+
+	create_proc_read_entry("ruby_cpumon", 0, NULL, ruby_cpumon_proc_read, NULL);
+	memset(cd, 0, sizeof(*cd));
+	cd->last_cyc = ruby_cpumon_get_clock();
+
+	return 0;
+}
+late_initcall(ruby_cpumon_register_proc);
+
+void arch_idle(void)
+{
+	uint32_t sleep_start;
+	unsigned long flags;
+	struct ruby_cpumon_data *cd = &ruby_cpumon;
+
+	local_irq_save(flags);
+	sleep_start = ruby_cpumon_get_clock();
+	cd->awake_cycles += sleep_start - cd->last_cyc;
+	cd->last_cyc = sleep_start;
+	cd->last_was_asleep = 1;
+	local_irq_restore_and_sleep(flags);
+}
+
+static inline void ruby_cpumon_interrupt(void)
+{
+	struct ruby_cpumon_data *cd = &ruby_cpumon;
+
+	if (unlikely(cd->last_was_asleep)) {
+		uint32_t awake_start = ruby_cpumon_get_clock();
+		cd->sleep_cycles += awake_start - cd->last_cyc;
+		cd->last_cyc = awake_start;
+		cd->last_was_asleep = 0;
+	}
+}
+
+static void __sram_text run_handlers(unsigned irq)
+{
+	struct irqaction *node;
+
+	ASSERT((irq < NR_IRQS) && irq_has_action(irq));
+#if defined(CONFIG_PCI_MSI)
+	if (irq == RUBY_IRQ_MSI) {
+		clear_msi_request();
+	}
+#endif
+	/* call all the ISR's in the list for that interrupt source */
+	node = irq_get_action(irq);
+	while (node) {
+		kstat_cpu(smp_processor_id()).irqs[irq]++;
+		trace_irq_handler_entry(irq, 0);
+		node->handler(irq, node->dev_id);
+		trace_irq_handler_exit(irq, 0, 0);
+		if (node->flags & IRQF_SAMPLE_RANDOM)
+			add_interrupt_randomness(irq);
+		node = node->next;
+	}
+
+#ifdef  ARC_IRQ_DBG
+	if (!irq_has_action(irq))
+		printk(KERN_ERR "Spurious interrupt : irq no %u on cpu %u", irq,
+		smp_processor_id());
+#endif
+}
+
+/* handle the irq */
+void __sram_text process_interrupt(unsigned irq, struct pt_regs *fp)
+{
+	struct pt_regs *old = set_irq_regs(fp);
+
+	irq_enter();
+
+	ruby_cpumon_interrupt();
+	run_handlers(irq);
+
+	irq_exit();
+
+	check_stack_consistency(__FUNCTION__);
+
+	set_irq_regs(old);
+	return;
+}
+
+/* IRQ Autodetect not required for ARC
+ * However the stubs still need to be exported for IDE et all
+ */
+unsigned long probe_irq_on(void)
+{
+	return 0;
+}
+EXPORT_SYMBOL(probe_irq_on);
+
+int probe_irq_off(unsigned long irqs)
+{
+	return 0;
+}
+EXPORT_SYMBOL(probe_irq_off);
+
+/* FIXME: implement if necessary */
+void init_irq_proc(void)
+{
+	// for implementing /proc/irq/xxx
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+	int i = *(loff_t *) v, j;
+
+	if (i == 0) {	  // First line, first CPU
+		seq_printf(p,"\t");
+		for_each_online_cpu(j) {
+			seq_printf(p,"CPU%-8d",j);
+		}
+		seq_putc(p,'\n');
+
+#ifdef TEST_IRQ_REG
+		test_irq_reg();
+#endif
+	}
+
+	if (i < NR_IRQS) {
+		int irq = i;
+		const struct irqaction *node = irq_get_action(irq);
+		while (node) {
+			seq_printf(p,"%u:\t",i);
+			if (strlen(node->name) < 8) {
+				for_each_online_cpu(j) {
+					seq_printf(p,"%s\t\t\t%u\n",
+							node->name, kstat_cpu(j).irqs[i]);
+				}
+			} else {
+				for_each_online_cpu(j) {
+					seq_printf(p,"%s\t\t%u\n",
+							node->name, kstat_cpu(j).irqs[i]);
+				}
+			}
+			node = node->next;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *      disable_irq - disable an irq and wait for completion
+ *      @irq: Interrupt to disable
+ *
+ *      Disable the selected interrupt line.  We do this lazily.
+ *
+ *      This function may be called from IRQ context.
+ */
+void disable_irq(unsigned irq)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&irq_controller_lock, flags);
+
+	if ((irq < NR_IRQS) && irq_has_action(irq)) {
+		if (!irq_to_desc(irq)->depth++) {
+			if (ruby_is_ext_irq(irq)) {
+				ruby_en_dis_ext_irq(irq, DISABLE);
+			} else {
+				ruby_en_dis_common_irq(irq, DISABLE);
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+EXPORT_SYMBOL(disable_irq);
+
+/**
+ *      enable_irq - enable interrupt handling on an irq
+ *      @irq: Interrupt to enable
+ *
+ *      Re-enables the processing of interrupts on this IRQ line.
+ *      Note that this may call the interrupt handler, so you may
+ *      get unexpected results if you hold IRQs disabled.
+ *
+ *      This function may be called from IRQ context.
+ */
+void enable_irq(unsigned irq)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&irq_controller_lock, flags);
+
+	if ((irq < NR_IRQS) && irq_has_action(irq)) {
+		if (irq_to_desc(irq)->depth) {
+			if (!--irq_to_desc(irq)->depth) {
+				if (ruby_is_ext_irq(irq)) {
+					ruby_en_dis_ext_irq(irq, ENABLE);
+				} else {
+					ruby_en_dis_common_irq(irq, ENABLE);
+				}
+			}
+		} else {
+			printk(KERN_ERR"Unbalanced IRQ action %d %s\n", irq, __FUNCTION__);
+		}
+	}
+
+	spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+EXPORT_SYMBOL(enable_irq);
+
+#ifdef CONFIG_SMP
+
+int get_hw_config_num_irq()
+{
+	uint32_t val = read_new_aux_reg(ARC_REG_VECBASE_BCR);
+
+	switch(val & 0x03)
+	{
+		case 0: return 16;
+		case 1: return 32;
+		case 2: return 8;
+		default: return 0;
+	}
+
+	return 0;
+}
+
+#endif
+
+/* Enable interrupts.
+ * 1. Explicitly called to re-enable interrupts
+ * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
+ *    which maybe in hard ISR itself
+ *
+ * Semantics of this function change depending on where it is called from:
+ *
+ * -If called from hard-ISR, it must not invert interrupt priorities
+ *  e.g. suppose TIMER is high priority (Level 2) IRQ
+ *    Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
+ *    Here local_irq_enable( ) shd not re-enable lower priority interrupts 
+ * -If called from soft-ISR, it must re-enable all interrupts   
+ *    soft ISR are low prioity jobs which can be very slow, thus all IRQs
+ *    must be enabled while they run. 
+ *    Now hardware context wise we may still be in L2 ISR (not done rtie)
+ *    still we must re-enable both L1 and L2 IRQs
+ *  Another twist is prev scenario with flow being
+ *     L1 ISR ==> interrupted by L2 ISR  ==> L2 soft ISR
+ *     here we must not re-enable Ll as prev Ll Interrupt's h/w context will get 
+ *     over-written (this is deficiency in ARC700 Interrupt mechanism)
+ */
+
+#ifdef CONFIG_ARCH_ARC_LV2_INTR     // Complex version for 2 levels of Intr
+
+void __sram_text local_irq_enable(void) {
+
+	unsigned long flags;
+	local_save_flags(flags);
+
+	/* Allow both L1 and L2 at the onset */
+	flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+
+	/* Called from hard ISR (between irq_enter and irq_exit) */
+	if (in_irq()) {
+
+		/* If in L2 ISR, don't re-enable any further IRQs as this can cause
+		 * IRQ priorities to get upside down.
+		 * L1 can be taken while in L2 hard ISR which is wron in theory ans
+		 * can also cause the dreaded L1-L2-L1 scenario
+		 */
+		if (flags & STATUS_A2_MASK) {
+			flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
+		}
+
+		/* Even if in L1 ISR, allowe Higher prio L2 IRQs */
+		else if (flags & STATUS_A1_MASK) {
+			flags &= ~(STATUS_E1_MASK);
+		}
+	}
+
+	/* called from soft IRQ, ideally we want to re-enable all levels */
+
+	else if (in_softirq()) {
+
+		/* However if this is case of L1 interrupted by L2,
+		 * re-enabling both may cause whaco L1-L2-L1 scenario
+		 * because ARC700 allows level 1 to interrupt an active L2 ISR
+		 * Thus we disable both
+		 * However some code, executing in soft ISR wants some IRQs to be
+		 * enabled so we re-enable L2 only
+		 *
+		 * How do we determine L1 intr by L2
+		 *  -A2 is set (means in L2 ISR)
+		 *  -E1 is set in this ISR's pt_regs->status32 which is
+		 *      saved copy of status32_l2 when l2 ISR happened
+		 */
+		struct pt_regs *pt = get_irq_regs();
+		if ((flags & STATUS_A2_MASK) && pt &&
+			(pt->status32 & STATUS_A1_MASK ) ) {
+			//flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
+			flags &= ~(STATUS_E1_MASK);
+		}
+	}
+
+	local_irq_restore(flags);
+}
+
+#else  /* ! CONFIG_ARCH_ARC_LV2_INTR */
+
+ /* Simpler version for only 1 level of interrupt
+  * Here we only Worry about Level 1 Bits
+  */
+
+void __sram_text local_irq_enable(void) {
+
+	unsigned long flags;
+	local_save_flags(flags);
+	flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+
+	/* If called from hard ISR (between irq_enter and irq_exit)
+	 * don't allow Level 1. In Soft ISR we allow further Level 1s
+	 */
+
+	if (in_irq()) {
+		flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
+	}
+
+	local_irq_restore(flags);
+}
+#endif
+
+EXPORT_SYMBOL(local_irq_enable);
+
diff --git a/drivers/qtn/ruby/kdump.h b/drivers/qtn/ruby/kdump.h
new file mode 100644
index 0000000..06f9bbf
--- /dev/null
+++ b/drivers/qtn/ruby/kdump.h
@@ -0,0 +1,44 @@
+/**
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __KDUMP_H__
+#define __KDUMP_H__
+
+#if defined(CONFIG_KERNEL_TEXT_SNAPSHOTS)
+void kdump_print_ktext_checksum(void);
+#else
+static __inline__ void kdump_print_ktext_checksum(void) {}
+#endif
+
+#if defined(CONFIG_KERNEL_TEXT_SNAPSHOTS) && CONFIG_KERNEL_TEXT_SNAPSHOT_COUNT > 0
+int kdump_take_snapshot(const char* description);
+void kdump_compare_all_snapshots(void);
+void kdump_add_module(struct module *mod);
+void kdump_remove_module(struct module *mod);
+#else
+static __inline__ int kdump_take_snapshot(const char* description) { return 0; }
+static __inline__ void kdump_compare_all_snapshots(void) {}
+static __inline__ void kdump_add_module(struct module *mod) {}
+static __inline__ void kdump_remove_module(struct module *mod) {}
+#endif
+
+void kdump_add_troubleshooter(void (*fn)(void));
+
+#endif	// __KDUMP_H__
+
diff --git a/drivers/qtn/ruby/machine.c b/drivers/qtn/ruby/machine.c
new file mode 100644
index 0000000..b83b52a
--- /dev/null
+++ b/drivers/qtn/ruby/machine.c
@@ -0,0 +1,80 @@
+/**
+ * Copyright (c) 2010 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include <linux/version.h>
+#include <linux/reboot.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <asm/board/platform.h>
+
+#ifdef TOPAZ_AMBER_IP
+#include <qtn/amber.h>
+#endif
+
+static inline void machine_restart_watchdog(unsigned long delay)
+{
+	writel(RUBY_WDT_ENABLE, RUBY_WDT_CTL);
+	writel(delay, RUBY_WDT_TIMEOUT_RANGE);
+	writel(RUBY_WDT_MAGIC_NUMBER, RUBY_WDT_COUNTER_RESTART);
+}
+
+static inline void machine_restart_sysctrl(unsigned long mask)
+{
+	writel(mask, RUBY_SYS_CTL_CPU_VEC_MASK);
+	writel(0x0, RUBY_SYS_CTL_CPU_VEC);
+	writel(0x0, RUBY_SYS_CTL_CPU_VEC_MASK);
+}
+
+void machine_restart(char *__unused)
+{
+#ifndef TOPAZ_AMBER_IP
+	/* Be paranoid - use both watchdog and sysctl to reset */
+	machine_restart_watchdog(RUBY_WDT_RESET_TIMEOUT);
+	machine_restart_sysctrl(RUBY_SYS_CTL_RESET_ALL);
+
+	/* Board must be resetted before this point! */
+	while(1);
+#else
+	machine_halt();
+#endif
+}
+
+void machine_halt(void)
+{
+#ifdef TOPAZ_AMBER_IP
+	local_irq_disable();
+	/* Tell ST HOST we have been halted */
+	amber_shutdown();
+#endif
+	/* Halt the processor */
+	__asm__ __volatile__("flag  %0"::"i"(STATUS_H_MASK));
+}
+
+void machine_power_off(void)
+{
+	/* FIXME ::  power off ??? */
+	machine_halt();
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+void (*pm_power_off) (void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+#endif
+
diff --git a/drivers/qtn/ruby/mem_check.h b/drivers/qtn/ruby/mem_check.h
new file mode 100644
index 0000000..828da5c
--- /dev/null
+++ b/drivers/qtn/ruby/mem_check.h
@@ -0,0 +1,103 @@
+/**
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __BOARD_RUBY_MEM_CHECK_H
+#define __BOARD_RUBY_MEM_CHECK_H
+
+#include <linux/module.h>
+#include <linux/magic.h>
+#include <linux/sched.h>
+
+static inline int is_linux_ddr_mem_addr(uint32_t addr)
+{
+	const uint32_t linux_dram_start = CONFIG_ARC_KERNEL_MEM_BASE + RUBY_DRAM_BEGIN;
+	const uint32_t linux_dram_end = linux_dram_start + CONFIG_ARC_KERNEL_MAX_SIZE;
+	return ((addr >= linux_dram_start) && (addr < linux_dram_end));
+}
+
+static inline int is_linux_sram_mem_addr(uint32_t addr)
+{
+	const uint32_t linux_sram_b1_start = CONFIG_ARC_KERNEL_SRAM_B1_BASE + RUBY_SRAM_BEGIN;
+	const uint32_t linux_sram_b1_end = CONFIG_ARC_KERNEL_SRAM_B1_END + RUBY_SRAM_BEGIN;
+	const uint32_t linux_sram_b2_start = CONFIG_ARC_KERNEL_SRAM_B2_BASE + RUBY_SRAM_BEGIN;
+	const uint32_t linux_sram_b2_end = CONFIG_ARC_KERNEL_SRAM_B2_END + RUBY_SRAM_BEGIN;
+	if ((addr >= linux_sram_b1_start) && (addr < linux_sram_b1_end)) {
+		return 1;
+	} else if ((addr >= linux_sram_b2_start) && (addr < linux_sram_b2_end)) {
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+static inline int is_linux_mem_addr(uint32_t addr)
+{
+	if (is_linux_ddr_mem_addr(addr)) {
+		return 1;
+	} else if (is_linux_sram_mem_addr(addr)) {
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+static inline int is_sram_irq_stack_good(void)
+{
+#ifdef CONFIG_ARCH_RUBY_SRAM_IRQ_STACK
+	extern unsigned long __irq_stack_begin;
+	if (unlikely(__irq_stack_begin != STACK_END_MAGIC)) {
+		return 0;
+	}
+#endif
+	return 1;
+}
+
+static inline int is_kernel_stack_good(void)
+{
+	struct task_struct *task = current;
+	if (likely(task)) {
+		unsigned long *stack = end_of_stack(task);
+		if (unlikely(!is_linux_mem_addr((uint32_t)stack) ||
+				(*stack != STACK_END_MAGIC))) {
+			return 0;
+		}
+	}
+	return 1;
+}
+
+static inline void check_stack_consistency_panic(const char *s1, const char *s2)
+{
+	register unsigned long sp asm ("sp");
+	register unsigned long ilink1 asm ("ilink1");
+	dump_stack();
+	printk(KERN_ERR"ilink1=0x%lx sp=0x%lx\n", ilink1, sp);
+	panic("%s overran stack, or stack corrupted: %s\n", s1, s2);
+}
+
+static inline void check_stack_consistency(const char *func)
+{
+	if (unlikely(!is_sram_irq_stack_good())) {
+		check_stack_consistency_panic("IRQ", func);
+	} else if (unlikely(!is_kernel_stack_good())) {
+		check_stack_consistency_panic("Thread", func);
+	}
+}
+
+#endif // #ifndef __BOARD_RUBY_MEM_CHECK_H
+
diff --git a/drivers/qtn/ruby/mv88e6071.c b/drivers/qtn/ruby/mv88e6071.c
new file mode 100644
index 0000000..9cf61c4
--- /dev/null
+++ b/drivers/qtn/ruby/mv88e6071.c
@@ -0,0 +1,160 @@
+/*
+ * Driver for the Marvell 88E6071 switch
+ *
+ * Copyright (c) Quantenna Communications, Incorporated 2013
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/compiler.h>
+#include "mv88e6071.h"
+#include <common/ruby_arasan_emac_ahb.h>
+
+#define MV88E6XXX_GLOBAL2_PHYADDR	0x17
+#define MV88E6XXX_SMI_PHY_CMD		0x18
+#define MV88E6XXX_SMI_PHY_DATA		0x19
+#define MV88E6XXX_SMI_PHY_CMD_WRITE	BIT(10)
+#define MV88E6XXX_SMI_PHY_CMD_READ	BIT(11)
+#define MV88E6XXX_SMI_PHY_CMD_MODE	BIT(12)	/* 0 = 802.3 Clause 45 SMI frame, 1 = 802.3 Clause 22 SMI frame */
+#define MV88E6XXX_SMI_PHY_CMD_BUSY	BIT(15)
+
+static uint16_t mv88e6071_smi_phy_cmd(uint8_t dev_addr, uint8_t reg_addr, uint16_t op)
+{
+	uint16_t val = 0;
+	val |= dev_addr << 5;
+	val |= reg_addr;
+	val |= op;
+	val |= MV88E6XXX_SMI_PHY_CMD_MODE;
+	val |= MV88E6XXX_SMI_PHY_CMD_BUSY;
+
+	return val;
+}
+
+static uint16_t mv88e6071_smi_phy_cmd_rd(uint8_t dev_addr, uint8_t reg_addr)
+{
+	return mv88e6071_smi_phy_cmd(dev_addr, reg_addr, MV88E6XXX_SMI_PHY_CMD_READ);
+}
+
+static uint16_t mv88e6071_smi_phy_cmd_wr(uint8_t dev_addr, uint8_t reg_addr)
+{
+	return mv88e6071_smi_phy_cmd(dev_addr, reg_addr, MV88E6XXX_SMI_PHY_CMD_WRITE);
+}
+
+static int mv88e6071_smi_wait(struct emac_common *privc)
+{
+	/* wait for arasan mdio op to complete */
+	if (!mdio_wait(privc, EMAC_MAC_MDIO_CTRL, MacMdioCtrlStart, 0,
+				TIMEOUT_MAC_MDIO_CTRL, __FUNCTION__)) {
+		return -1;
+	};
+
+	/* also make sure that mv88e6071 SMI PHY unit busy is deasserted */
+	if (!mdio_wait(privc, MV88E6XXX_SMI_PHY_CMD, MV88E6XXX_SMI_PHY_CMD_BUSY, 0,
+				TIMEOUT_MAC_MDIO_CTRL, __FUNCTION__)) {
+		return -1;
+	}
+
+	return 0;
+}
+
+int mv88e6071_mdio_read(struct mii_bus *bus, int dev_addr, int reg_addr)
+{
+	struct net_device *dev = bus->priv;
+	struct emac_common *privc = netdev_priv(dev);
+	int rc;
+
+	if (mv88e6071_smi_wait(privc)) {
+		return -1;
+	}
+
+	/* write to SMI phy control and SMI phy data registers that a read op is requested */
+	rc = emac_lib_mdio_write(bus, MV88E6XXX_GLOBAL2_PHYADDR, MV88E6XXX_SMI_PHY_CMD,
+				mv88e6071_smi_phy_cmd_rd(dev_addr, reg_addr));
+	if (rc < 0) {
+		return -1;
+	}
+
+	if (mv88e6071_smi_wait(privc)) {
+		return -1;
+	}
+
+	/* read desired value from the SMI phy data register */
+	return emac_lib_mdio_read(bus, MV88E6XXX_GLOBAL2_PHYADDR, MV88E6XXX_SMI_PHY_DATA);
+}
+
+int mv88e6071_mdio_write(struct mii_bus *bus, int dev_addr, int reg_addr, uint16_t value)
+{
+	struct net_device *dev = bus->priv;
+	struct emac_common *privc = netdev_priv(dev);
+	int rc;
+
+	if (mv88e6071_smi_wait(privc)) {
+		return -1;
+	}
+
+	/* write value to the SMI phy data register */
+	rc = emac_lib_mdio_write(bus, MV88E6XXX_GLOBAL2_PHYADDR, MV88E6XXX_SMI_PHY_DATA, value);
+	if (rc < 0) {
+		return -1;
+	}
+
+	if (mv88e6071_smi_wait(privc)) {
+		return -1;
+	}
+
+	/* write SMI phy ctrl reg to push through the new SMI phy data reg value */
+	rc = emac_lib_mdio_write(bus, MV88E6XXX_GLOBAL2_PHYADDR, MV88E6XXX_SMI_PHY_CMD,
+		mv88e6071_smi_phy_cmd_wr(dev_addr, reg_addr));
+	if (rc < 0) {
+		return -1;
+	}
+
+	if (mv88e6071_smi_wait(privc)) {
+		return -1;
+	}
+
+	return 0;
+}
+
+int mv88e6071_init(struct emac_common *privc)
+{
+	int phyaddr;
+
+	/* disable flow control for each port */
+	for (phyaddr = 0x10; phyaddr <= 0x14; phyaddr++) {
+		int val;
+
+		/* flow control change */
+		val = mv88e6071_mdio_read(privc->mii_bus, phyaddr, MII_ADVERTISE) &
+			~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+		mv88e6071_mdio_write(privc->mii_bus, phyaddr, MII_ADVERTISE, val);
+
+		/* reset & restart auto-negotiation */
+		val = mv88e6071_mdio_read(privc->mii_bus, phyaddr, MII_BMCR) |
+			BMCR_RESET | BMCR_ANRESTART;
+		mv88e6071_mdio_write(privc->mii_bus, phyaddr, MII_BMCR, val);
+	}
+
+	return 0;
+}
+
diff --git a/drivers/qtn/ruby/mv88e6071.h b/drivers/qtn/ruby/mv88e6071.h
new file mode 100644
index 0000000..4312853
--- /dev/null
+++ b/drivers/qtn/ruby/mv88e6071.h
@@ -0,0 +1,32 @@
+/*

+ *  Driver for Marvell 88E6071 switch

+ *

+ *  Copyright (c) Quantenna Communications, Incorporated 2013

+ *  All rights reserved.

+ *

+ * This program is free software; you can redistribute it and/or modify

+ * it under the terms of the GNU General Public License as published by

+ * the Free Software Foundation; either version 2 of the License, or

+ * (at your option) any later version.

+ *

+ * This program is distributed in the hope that it will be useful,

+ * but WITHOUT ANY WARRANTY; without even the implied warranty of

+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the

+ * GNU General Public License for more details.

+ *

+ * You should have received a copy of the GNU General Public License

+ * along with this program; if not, write to the Free Software

+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA

+ *

+ */

+#ifndef __QTN_MV88E6071_H__

+#define __QTN_MV88E6071_H__

+

+#include "emac_lib.h"

+

+int mv88e6071_init(struct emac_common *privc);

+int mv88e6071_mdio_read(struct mii_bus *bus, int phy_addr, int reg);

+int mv88e6071_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value);

+

+#endif // __QTN_MV88E6071_H__

+

diff --git a/drivers/qtn/ruby/pci_msi.c b/drivers/qtn/ruby/pci_msi.c
new file mode 100644
index 0000000..eed9b8d
--- /dev/null
+++ b/drivers/qtn/ruby/pci_msi.c
@@ -0,0 +1,183 @@
+/*
+ * (C) Copyright 2011 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/irq.h>
+#include <asm/irq.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <asm/board/platform.h>
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+void arch_teardown_msi_irq(unsigned int irq)
+{
+
+	irq_set_msi_desc(irq, NULL);
+}
+
+
+/**
+ *	arch_setup_msi_irq - setup irq associated with a msi_desc
+ *	 and write the MSI message info to dev config space
+ *	@desc:
+ */
+int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+{
+	struct msi_msg msg;
+	unsigned int irq;
+
+	printk("%s-%s called\n", __FILE__, __FUNCTION__);
+
+	irq = RUBY_IRQ_MSI;
+
+	irq_set_msi_desc(irq, desc);
+
+	msg.address_hi = 0;
+	msg.address_lo = RUBY_PCIE_MSI_REGION;
+	msg.data = RUBY_MSI_DATA;
+
+	write_msi_msg(irq, &msg);
+	return 0;
+
+}
+
+#else
+/**
+ * get_irq_msi- return the msi_desc of irq
+ */
+struct
+msi_desc * get_irq_msi(unsigned int irq)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	if (!desc) {
+		printk(KERN_ERR
+		       "Trying to install msi data for IRQ%d\n", irq);
+		return NULL;
+	}
+	return desc->msi_desc;
+}
+
+/**
+ *  set_irq_msi - Associated the irq with a MSI descriptor
+ */
+int
+set_irq_msi(unsigned int irq, struct msi_desc *entry)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	if (!desc) {
+		printk(KERN_ERR
+		       "Trying to install msi data for IRQ%d\n", irq);
+		return -EINVAL;
+	}
+	if (irq != RUBY_IRQ_MSI) {
+		printk(KERN_ERR "set up msi irq(%u) out of expected\n", irq);
+		return -EINVAL;
+	}
+	if (desc->msi_desc != NULL) {
+		printk(KERN_WARNING "%s: overwriting previous msi entry of irq %u\n", __FUNCTION__, irq);
+	}
+
+	desc->msi_desc = entry;
+	if (entry)
+		entry->irq = irq;
+
+	return 0;
+}
+
+static void
+handle_bad_irq(unsigned int irq, struct irq_desc *desc)
+{
+	printk(KERN_ERR "unexpected IRQ %u\n", irq);
+}
+
+/**
+ *	arch_teardown_msi_irq - cleanup a dynamically allocated msi irq
+ *	@irq:	irq number to teardown
+ */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)
+#define PCIMSI_SPIN_LOCK_IRQSAVE(lock, flags) raw_spin_lock_irqsave(lock, flags)
+#define PCIMSI_SPIN_UNLOCK_IRQSAVE(lock, flags) raw_spin_unlock_irqrestore(lock, flags)
+#else
+#define PCIMSI_SPIN_LOCK_IRQSAVE(lock, flags) spin_lock_irqsave(lock, flags)
+#define PCIMSI_SPIN_UNLOCK_IRQSAVE(lock, flags) spin_unlock_irqrestore(lock, flags)
+#endif
+
+void
+arch_teardown_msi_irq(unsigned int irq)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+	unsigned long flags;
+
+	if (!desc) {
+		WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
+		return;
+	}
+
+	PCIMSI_SPIN_LOCK_IRQSAVE(&desc->lock, flags);
+
+	if (desc->action) {
+		PCIMSI_SPIN_UNLOCK_IRQSAVE(&desc->lock, flags);
+		WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", irq);
+		return;
+	}
+	desc->msi_desc = NULL;
+	desc->handler_data = NULL;
+	desc->chip_data = NULL;
+	desc->handle_irq = handle_bad_irq;
+	desc->chip = NULL;
+	desc->name = NULL;
+
+	PCIMSI_SPIN_UNLOCK_IRQSAVE(&desc->lock, flags);
+}
+
+
+/**
+ *	arch_setup_msi_irq - setup irq associated with a msi_desc
+ *	 and write the MSI message info to dev config space
+ *	@desc:
+ */
+int
+arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+{
+	printk("%s-%s called\n", __FILE__, __FUNCTION__);
+
+	struct msi_msg msg;
+	unsigned int irq;
+
+	irq = RUBY_IRQ_MSI;
+
+	set_irq_msi(irq, desc);
+
+	msg.address_hi = 0;
+	msg.address_lo = RUBY_PCIE_MSI_REGION;
+
+	msg.data = RUBY_MSI_DATA;
+
+	write_msi_msg(irq, &msg);
+
+	return 0;
+}
+#endif
+
diff --git a/drivers/qtn/ruby/pcibios.c b/drivers/qtn/ruby/pcibios.c
new file mode 100644
index 0000000..f71a759
--- /dev/null
+++ b/drivers/qtn/ruby/pcibios.c
@@ -0,0 +1,554 @@
+/*
+ * (C) Copyright 2011 Quantenna Communications Inc.
+ *
+ * Description	  : ruby PCI bus setup
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <asm/io.h>
+#include <linux/ctype.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/interrupt.h>
+#include <asm/uaccess.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <common/topaz_platform.h>
+#include "pcibios.h"
+
+#define BUS_SCAN_TRY_MAX		1000
+#define PCIE_DEV_LINKUP_MASK TOPAZ_PCIE_LINKUP
+
+
+static int
+rpcic_read_config (struct pci_bus *bus,
+				   unsigned int    devfn,
+				   int			   where,
+				   int			   size,
+				   u32			  *val);
+
+static int
+rpcic_write_config (struct pci_bus	*bus,
+					unsigned int	 devfn,
+					int				 where,
+					int				 size,
+					u32				 val);
+
+
+
+static struct pci_ops rpcic_ops = {
+	.read =		rpcic_read_config,
+	.write =	rpcic_write_config,
+};
+
+typedef struct
+{
+	//TODO: define DW_PCIE_regs?
+} DW_PCIE_regs;
+
+struct ruby_pci
+{
+	DW_PCIE_regs	 *regs;
+	int				  irq;
+	void __iomem	 *cfg_virt;
+	void __iomem	 *cfg0;
+	void __iomem	 *cfg1;
+	void __iomem	 *cfg2;
+	void __iomem	 *cfg3;
+	struct resource  mem_res;
+	struct resource  io_res;
+	struct pci_bus	 *bus;
+};
+
+/* RC mode or EP mode, actually only RC mode in used */
+int pci_mode=0;
+
+/* PCI bus 0 */
+struct ruby_pci rpcib0;
+
+
+static int
+rpcic_read_word(unsigned int  busno,
+				 unsigned int  devfn,
+				 int		   where,
+				 u32		  *val)
+{
+	struct ruby_pci   *rpcic = &rpcib0;
+	volatile uint32_t *addr;
+
+	if (where&3)
+		panic("%s read address not aligned 0x%x\n", __FUNCTION__, where);
+
+	addr = rpcic->cfg_virt + ((devfn&0xff)<<8) + where;
+
+	*val = readl(addr);
+
+	DEBUG("%s addr 0x%X val_addr 0x%X \n", __FUNCTION__, addr, *val);
+	DEBUG("%s bus %u devfn %u \n", __FUNCTION__, busno, devfn);
+
+	return 0;
+}
+
+/*
+ * if addr&0x4 we should do some workaround
+ */
+static int
+rpcic_write_word(unsigned int busno,
+				  unsigned int devfn,
+				  int		   where,
+				  u32		   val)
+{
+	struct ruby_pci    *rpcic = &rpcib0;
+	volatile uint32_t  *addr;
+
+	if(where&3)
+		panic("%s read address not aligned 0x%x\n", __FUNCTION__, where);
+
+	addr = rpcic->cfg_virt +  ((devfn&0xff)<<8) + where;
+#if 0
+	writel(val, addr);
+#else
+   if ((u32)addr & 0x4) {
+		u32 temp = readl(RUBY_PCIE_CONFIG_REGION + 0x10);
+		writel(val,RUBY_PCIE_CONFIG_REGION + 0x10);
+		writel( val,addr);
+		writel(temp,RUBY_PCIE_CONFIG_REGION + 0x10);
+	} else {
+		writel(val, addr);
+	}
+#endif
+
+	DEBUG("%s addr 0x%X -> 0x%X \n", __FUNCTION__, addr, val);
+	DEBUG("%s bus %u devfn %u \n", __FUNCTION__, busno, devfn);
+
+	return 0;
+}
+
+
+/*
+ * Read ruby PCI config space
+ */
+static int
+rpcic_read_config (struct pci_bus *bus,
+				   unsigned int    devfn,
+				   int			   where,
+				   int			   size,
+				   u32			  *value)
+{
+	u32 val;
+	int		 retval = -EINVAL;
+
+	DEBUG("%s called with bno. %d devfn %u where %d size %d \n ",
+		 __FUNCTION__, bus->number, devfn, where, size);
+
+	if (bus->number != 0)
+	   return -EINVAL;
+
+	if (PCI_SLOT(devfn) > 1)
+	   return 0;
+
+	switch (size) {
+		case 1:
+			rpcic_read_word(bus->number, devfn, where&~3, &val);
+			*value = 0xff & (val >> (8*(where & 3)));
+			retval=0;
+			break;
+
+		case 2:
+			if (where&1) return -EINVAL;
+			rpcic_read_word(bus->number, devfn, where&~3, &val);
+			*value = 0xffff & (val >> (8*(where & 3)));
+			retval=0;
+			break;
+
+		case 4:
+			if (where&3) return -EINVAL;
+			rpcic_read_word(bus->number, devfn, where, value);
+			retval=0;
+			break;
+	}
+
+	DEBUG(" value=0x%x\n", *value);
+	return retval;
+}
+
+
+/*
+ * Write ruby PCI config space
+ */
+static int
+rpcic_write_config (struct pci_bus	*bus,
+					unsigned int	 devfn,
+					int				 where,
+					int				 size,
+					u32				 val)
+{
+	u32  tv;
+
+	DEBUG("%s called with bno. %d devfn %u where 0x%x size %d val 0x%x \n",
+			__FUNCTION__, bus->number, devfn, where, size, val);
+
+	if (bus->number != 0)
+		return -EINVAL;
+
+	switch (size) {
+		case 1:
+			rpcic_read_word(bus->number, devfn, where&~3, &tv);
+			tv = (tv & ~(0xff << (8*(where&3)))) | ((0xff&val) << (8*(where&3)));
+			return rpcic_write_word(bus->number, devfn, where&~3, tv);
+
+		case 2:
+			if (where&1) return -EINVAL;
+			rpcic_read_word(bus->number, devfn, where&~3, &tv);
+			tv = (tv & ~(0xffff << (8*(where&3)))) | ((0xffff&val) << (8*(where&3)));
+			return rpcic_write_word(bus->number, devfn, where&~3, tv);
+
+		case 4:
+			if (where&3) return -EINVAL;
+			return rpcic_write_word(bus->number, devfn, where, val);
+	}
+
+	return 0;
+}
+
+static int
+rpcic_find_capability(int cap)
+{
+	uint32_t pos;
+	uint32_t cap_found;
+
+	pos = (readl(RUBY_PCIE_REG_BASE + PCI_CAPABILITY_LIST) & 0x000000ff);
+	while (pos) {
+		cap_found = (readl(RUBY_PCIE_REG_BASE + pos) & 0x0000ffff);
+		if ((cap_found & 0x000000ff)== (uint32_t)cap)
+			break;
+
+		pos = ((cap_found >> 8) & 0x000000ff);
+	}
+
+	return pos;
+}
+/*
+ *  PCI bus scan and initialization
+ */
+static void
+pci_bus_init (void)
+{
+	struct ruby_pci *rpcic = NULL;
+	unsigned int ep_up = 0;
+	int i = 0;
+
+	rpcic = &rpcib0;
+	rpcic->cfg_virt = (void *)RUBY_PCIE_CONFIG_REGION;
+	rpcic->mem_res.name  = "RUBY PCI Memory space";
+	rpcic->mem_res.start = RUBY_PCI_RC_MEM_START;
+	rpcic->mem_res.end	= RUBY_PCI_RC_MEM_START + RUBY_PCI_RC_MEM_WINDOW -1;
+	rpcic->mem_res.flags = IORESOURCE_MEM;
+	rpcic->irq = TOPAZ_IRQ_PCIE;
+
+	if (request_resource(&iomem_resource, &rpcic->mem_res) < 0) {
+		printk(KERN_WARNING "WARNING: Failed to alloc IOMem!\n");
+		goto out;
+	}
+
+	/* waiting for end point linked up in the PCI bus */
+	for (i = 0; i < BUS_SCAN_TRY_MAX; i++) {
+		ep_up = readl(TOPAZ_PCIE_STAT);
+		if ( (ep_up & PCIE_DEV_LINKUP_MASK) == PCIE_DEV_LINKUP_MASK ) {
+			break;
+		}
+		if (i%100 == 0) {
+			printk(KERN_INFO "PCI Bus Scan loop for device link up\n");
+		}
+		udelay(1000);
+	}
+
+	if ( (ep_up & PCIE_DEV_LINKUP_MASK) != PCIE_DEV_LINKUP_MASK ) {
+		printk(KERN_INFO "PCI Bus Scan doesn't find any device link up!\n");
+		return;
+	}
+
+	/* Set RC Max_Payload_Size to 256 for topaz */
+	int pos;
+	uint32_t dev_ctl_sts;
+	pos = rpcic_find_capability(PCI_CAP_ID_EXP);
+	if (!pos) {
+		printk(KERN_ERR "Could not find PCI Express capability in RC config space!\n");
+	} else {
+		dev_ctl_sts = readl(RUBY_PCIE_REG_BASE + pos + PCI_EXP_DEVCTL);
+		dev_ctl_sts = ((dev_ctl_sts & ~PCI_EXP_DEVCTL_PAYLOAD) | BIT(5));
+		writel(dev_ctl_sts, RUBY_PCIE_REG_BASE + pos + PCI_EXP_DEVCTL);
+	}
+
+	if ( (ep_up & PCIE_DEV_LINKUP_MASK) == PCIE_DEV_LINKUP_MASK ) {
+		rpcic->bus = pci_scan_bus(0, &rpcic_ops, rpcic);
+		pci_assign_unassigned_resources();
+		printk(KERN_INFO "PCI Bus Scan completed!\n");
+	} else {
+		printk(KERN_INFO "PCI Bus Scan doesn't find any device link up!\n");
+	}
+
+out:
+	return;
+}
+
+/*
+ * Called after each bus is probed, but before its children are examined.
+ * Enable response for the resource of dev and assign IRQ num
+ */
+void __devinit
+pcibios_fixup_bus (struct pci_bus *bus)
+{
+	struct ruby_pci *rpcic;
+	struct pci_dev	*dev;
+	int				 i, has_io, has_mem;
+	u32				 cmd;
+
+	printk(KERN_INFO "%s called bus name %.10s no %d flags %x next_dev %p \n",
+		 __FUNCTION__, bus->name, bus->number, bus->bus_flags, bus->devices.next);
+
+	rpcic = (struct ruby_pci *) bus->sysdata;
+
+	bus->resource[0] = &rpcic->io_res;
+	bus->resource[1] = &rpcic->mem_res;
+
+	if (bus->number != 0) {
+		printk(KERN_WARNING "pcibios_fixup_bus: nonzero bus 0x%x\n", bus->number);
+		return;
+	}
+
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		has_io = has_mem = 0;
+
+		for (i=0; i < RUBY_PCIE_BAR_NUM; i++) {
+			unsigned long f = dev->resource[i].flags;
+			if (f & IORESOURCE_IO) {
+				has_io = 1;
+			} else if (f & IORESOURCE_MEM) {
+				has_mem = 1;
+			}
+		}
+		rpcic_read_config(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
+		printk(KERN_INFO "%s: Device [%2x:%2x.%d] has mem %d io %d cmd %x \n", __FUNCTION__,
+				   dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), has_mem, has_io, cmd);
+
+		if (has_io && !(cmd & PCI_COMMAND_IO)) {
+			cmd |= PCI_COMMAND_IO;
+			rpcic_write_config(dev->bus, dev->devfn, PCI_COMMAND, 2, cmd);
+			printk(KERN_INFO "%s: Enabling I/O for device [%2x:%2x.%d]\n", __FUNCTION__,
+				   dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
+		}
+		if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
+			cmd |= PCI_COMMAND_MEMORY;
+			rpcic_write_config(dev->bus, dev->devfn, PCI_COMMAND, 2, cmd);
+			printk(KERN_INFO "%s: Enabling memory for device [%2x:%2x.%d]\n", __FUNCTION__,
+				dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
+		}
+
+		dev->irq = rpcic->irq;
+	}
+
+}
+
+/*
+ * pcibios align resources() is called every time generic PCI code
+ * wants to generate a new address. The process of looking for
+ * an available address, each candidate is first "aligned" and
+ * then checked if the resource is available until a match is found.
+ *
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)
+resource_size_t
+pcibios_align_resource (void			 *data,
+						const struct resource  *res,
+						resource_size_t  size,
+						resource_size_t  align)
+{
+	struct pci_dev	  *dev	 = data;
+	struct ruby_pci   *rpcic = dev->sysdata;
+	resource_size_t start = res->start;
+
+	DEBUG("%s called\n ", __FUNCTION__);
+
+	if (res->flags & IORESOURCE_IO) {
+		/* Make sure we start at our min on all hoses */
+		if (start < PCIBIOS_MIN_IO + rpcic->io_res.start)
+			start = PCIBIOS_MIN_IO + rpcic->io_res.start;
+
+		/*
+		 * Put everything into 0x00-0xff region modulo 0x400
+		 */
+		if (start & 0x300)
+			start = (start + 0x3ff) & ~0x3ff;
+
+	} else if (res->flags & IORESOURCE_MEM) {
+		/* Make sure we start at our min on all hoses */
+		if (start < PCIBIOS_MIN_MEM + rpcic->mem_res.start)
+			start = PCIBIOS_MIN_MEM + rpcic->mem_res.start;
+	}
+
+	return start;
+}
+#else
+void
+pcibios_align_resource (void			 *data,
+						struct resource  *res,
+						resource_size_t  size,
+						resource_size_t  align)
+{
+	struct pci_dev	  *dev	 = data;
+	struct ruby_pci   *rpcic = dev->sysdata;
+	unsigned long	   start = res->start;
+
+	DEBUG("%s called\n ", __FUNCTION__);
+
+	if (res->flags & IORESOURCE_IO) {
+		/* Make sure we start at our min on all hoses */
+		if (start < PCIBIOS_MIN_IO + rpcic->io_res.start)
+			start = PCIBIOS_MIN_IO + rpcic->io_res.start;
+
+		/*
+		 * Put everything into 0x00-0xff region modulo 0x400
+		 */
+		if (start & 0x300)
+			start = (start + 0x3ff) & ~0x3ff;
+
+	} else if (res->flags & IORESOURCE_MEM) {
+		/* Make sure we start at our min on all hoses */
+		if (start < PCIBIOS_MIN_MEM + rpcic->mem_res.start)
+			start = PCIBIOS_MIN_MEM + rpcic->mem_res.start;
+	}
+
+	res->start = start;
+}
+#endif
+
+
+/**
+ * pcibios_enable_device - Enable I/O and memory.
+ * @dev: PCI device to be enabled
+ */
+int
+pcibios_enable_device (struct pci_dev *dev,
+						int			 mask)
+{
+	u16				 cmd, old_cmd;
+	int				 idx;
+	struct resource  *r;
+	int pos = 0;
+
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+	old_cmd = cmd;
+
+	for (idx=0; idx < RUBY_PCIE_BAR_NUM; idx++) {
+		/* Only set up the requested stuff */
+		if (!(mask & (1<<idx)))
+			continue;
+
+		r = &dev->resource[idx];
+
+		if (!r->start && r->end) {
+			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n",
+				  pci_name(dev));
+			return -EINVAL;
+		}
+
+		if (r->flags & IORESOURCE_IO) {
+			cmd |= PCI_COMMAND_IO;
+			printk(KERN_INFO "Enabling IO\n");
+		}
+
+		if (r->flags & IORESOURCE_MEM) {
+			cmd |= PCI_COMMAND_MEMORY;
+			printk(KERN_INFO "Enabling MEM\n");
+		}
+	}
+
+	if (dev->resource[PCI_ROM_RESOURCE].start)
+		cmd |= PCI_COMMAND_MEMORY;
+
+	if (cmd != old_cmd) {
+		printk(KERN_INFO "PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
+		pci_write_config_word(dev, PCI_COMMAND, cmd);
+	}
+
+	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO|PCI_COMMAND_MEMORY);
+
+	/* Set the device's MSI capability */
+	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+	if (!pos) {
+		printk(KERN_ERR "Error locating MSI capability position, using INTx instead\n");
+	} else {
+		/* Setup msi generation info */
+		writel(TOPAZ_PCIE_MSI_REGION, TOPAZ_MSI_ADDR_LOWER);
+		writel(0, TOPAZ_MSI_ADDR_UPPER);
+		writel(BIT(0), TOPAZ_MSI_INT_ENABLE);
+		writel(0, TOPAZ_PCIE_MSI_MASK);
+	}
+
+	return 0;
+}
+
+int
+pcibios_assign_resource (struct pci_dev *pdev,
+						 int			 resource)
+{
+	printk(KERN_INFO "%s called\n", __FUNCTION__);
+	return -ENXIO;
+}
+
+void __init
+pcibios_update_irq (struct pci_dev *dev,
+					int				irq)
+{
+	printk(KERN_INFO "%s called for irq %d\n", __FUNCTION__, irq);
+	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+
+char * __devinit
+pcibios_setup (char *str)
+{
+	printk(KERN_INFO "%s called str %s\n", __FUNCTION__, str);
+	return str;
+}
+
+/**
+ * ruby_pci_init - Initial PCI bus in RC mode
+ */
+static int __init
+ruby_pci_init (void)
+{
+	ruby_pci_create_sysfs();
+	pci_mode = RC_MODE;
+	pci_bus_init();
+
+	return 0;
+}
+
+subsys_initcall(ruby_pci_init);
diff --git a/drivers/qtn/ruby/pcibios.h b/drivers/qtn/ruby/pcibios.h
new file mode 100644
index 0000000..7b11e46
--- /dev/null
+++ b/drivers/qtn/ruby/pcibios.h
@@ -0,0 +1,62 @@
+/**
+ * Copyright (c) 2009-2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+#ifndef __RUBY_PCIBIOS_H__
+#define __RUBY_PCIBIOS_H__
+
+typedef union ruby_csr
+{
+    struct reg
+    {
+#if defined(__LITTLE_ENDIAN)
+        int chipid          :8;
+        int pci_dlink       :1;
+        int pci_phylink     :1;
+        int pci_phylink_isr :1;
+        int pci_rst_req     :1;
+        int rsvd_1          :2;
+        int pci_clk_rem     :1;
+        int pci_fatl_err    :1;
+        int rsvd_2          :16;
+#else
+        int rsvd_2          :16;
+        int pci_dlink       :1;
+        int pci_phylink     :1;
+        int pci_phylink_isr :1;
+        int pci_rst_req     :1;
+        int rsvd_1          :2;
+        int pci_clk_rem     :1;
+        int pci_fatl_err    :1;
+        int chipid          :8;
+#endif
+    }r;
+
+    uint32_t data;
+}ruby_csr_t;
+
+#define RC_MODE	1
+#define EP_MODE	2
+
+extern int pci_mode;
+
+extern void
+ruby_pci_create_sysfs (void);
+
+#define DEBUG(...) do{}while(0);
+
+#endif	/* __RUBY_PCIBIOS_H__ */
diff --git a/drivers/qtn/ruby/pcibios_sysfs.c b/drivers/qtn/ruby/pcibios_sysfs.c
new file mode 100644
index 0000000..7c856e7
--- /dev/null
+++ b/drivers/qtn/ruby/pcibios_sysfs.c
@@ -0,0 +1,240 @@
+/**
+ * Copyright (c) 2009-2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <asm/io.h>
+#include <common/topaz_platform.h>
+#include "pcibios.h"
+
+static inline void
+test_config_cycles(void)
+{
+	int i = 0;
+
+	printk("Config Cycles \n");
+	/* print out first 20 config for test */
+	for (i=0;i<20;i++) {
+	   printk(KERN_INFO "[0x%x] = 0x%x\n", ((u32)RUBY_PCIE_CONFIG_REGION+i), *(u8 *)((u8 *)RUBY_PCIE_CONFIG_REGION+i));
+	}
+
+}
+
+static inline void
+test_mem_cycles(void)
+{
+	int i = 0;
+
+	/* r/w first 40 config for test */
+	printk(KERN_INFO "Test mem cycles\n");
+	printk(KERN_INFO "Mem Cycles Reading\n");
+	for (i=0; i<40; i=i+4) {
+		printk(KERN_INFO "[0x%x] = 0x%x\n", ((u32)RUBY_PCI_RC_MEM_START+i), *(u32 *)((u8 *)RUBY_PCI_RC_MEM_START+i));
+
+	}
+	printk(KERN_INFO "Mem Cycles Writing\n");
+	for (i=0; i<40; i=i+4) {
+	   writel(0xa0a0a0a0, ((u32)RUBY_PCI_RC_MEM_START+i));
+
+	}
+	printk(KERN_INFO "Mem Cycles Reading after writing\n");
+	for (i=0; i<40; i=i+4) {
+		printk(KERN_INFO "[0x%x] = 0x%x\n", ((u32)RUBY_PCI_RC_MEM_START+i), *(u32 *)((u8 *)RUBY_PCI_RC_MEM_START+i));
+
+	}
+}
+
+static inline void
+ruby_atu_reg_dump(char *buf, int *len)
+{
+
+	*len += sprintf(buf+(*len), "-- Ruby ATU register dump --\n");
+
+	*len += sprintf(buf+(*len), "View port	  0x%x : 0x%x \n", RUBY_PCIE_ATU_VIEW, readl(RUBY_PCIE_ATU_VIEW));
+	*len += sprintf(buf+(*len), "LBAR		  0x%x : 0x%x \n", RUBY_PCIE_ATU_BASE_LO, readl(RUBY_PCIE_ATU_BASE_LO));
+	*len += sprintf(buf+(*len), "UBAR		  0x%x : 0x%x \n", RUBY_PCIE_ATU_BASE_HI, readl(RUBY_PCIE_ATU_BASE_HI));
+	*len += sprintf(buf+(*len), "LAR		  0x%x : 0x%x \n", RUBY_PCIE_ATU_BASE_LIMIT, readl(RUBY_PCIE_ATU_BASE_LIMIT));
+	*len += sprintf(buf+(*len), "LTAR		  0x%x : 0x%x \n", RUBY_PCIE_ATU_TARGET_LO, readl(RUBY_PCIE_ATU_TARGET_LO));
+	*len += sprintf(buf+(*len), "UTAR		  0x%x : 0x%x \n", RUBY_PCIE_ATU_TARGET_HI, readl(RUBY_PCIE_ATU_TARGET_HI));
+	*len += sprintf(buf+(*len), "CTL1		  0x%x : 0x%x \n", RUBY_PCIE_ATU_CTL1, readl(RUBY_PCIE_ATU_CTL1));
+	*len += sprintf(buf+(*len), "CTL2		  0x%x : 0x%x \n", RUBY_PCIE_ATU_CTL2, readl(RUBY_PCIE_ATU_CTL2));
+
+}
+
+
+static inline void
+ruby_pcie_reg_dump (char *buf, int *len)
+{
+	ruby_csr_t csr;
+
+	printk(KERN_INFO "-- Ruby PCI register dump --\n");
+	*len += sprintf(buf+(*len), "Reset Mask Reg 0x%08x = 0x%08x\n",RUBY_SYS_CTL_CPU_VEC_MASK,  readl(RUBY_SYS_CTL_CPU_VEC_MASK));
+	*len += sprintf(buf+(*len), "Reset Vect Reg 0x%08x = 0x%08x\n",RUBY_SYS_CTL_CPU_VEC,  readl(RUBY_SYS_CTL_CPU_VEC));
+	*len += sprintf(buf+(*len), "Cntrl Mask Reg 0x%08x = 0x%08x\n",RUBY_SYS_CTL_MASK,  readl(RUBY_SYS_CTL_MASK));
+	*len += sprintf(buf+(*len), "Cntrl Vect Reg 0x%08x = 0x%08x\n",RUBY_SYS_CTL_CTRL,  readl(RUBY_SYS_CTL_CTRL));
+
+	csr.data = readl(RUBY_SYS_CTL_CSR);
+	*len += sprintf(buf+(*len), "CSR		Reg 0x%08x = 0x%08x\n",RUBY_SYS_CTL_CSR,  csr.data);
+	*len += sprintf(buf+(*len), "Chip id 0x%x  dlink %d phylink %d phyisr %d rst_req %d clk_rem %d fatl_err %d\n",
+		   csr.r.chipid, csr.r.pci_dlink, csr.r.pci_phylink, csr.r.pci_phylink_isr,
+		   csr.r.pci_rst_req, csr.r.pci_clk_rem, csr.r.pci_fatl_err				);
+	*len += sprintf(buf+(*len), "PCIe CFG0	Reg 0x%08x = 0x%08x\n",RUBY_SYS_CTL_PCIE_CFG0,	readl(RUBY_SYS_CTL_PCIE_CFG0));
+	*len += sprintf(buf+(*len), "PCIe CFG1	Reg 0x%08x = 0x%08x\n",RUBY_SYS_CTL_PCIE_CFG1,	readl(RUBY_SYS_CTL_PCIE_CFG1));
+	*len += sprintf(buf+(*len), "PCIe CFG2	Reg 0x%08x = 0x%08x\n",RUBY_SYS_CTL_PCIE_CFG2,	readl(RUBY_SYS_CTL_PCIE_CFG2));
+	*len += sprintf(buf+(*len), "PCIe CFG3	Reg 0x%08x = 0x%08x\n",RUBY_SYS_CTL_PCIE_CFG3,	readl(RUBY_SYS_CTL_PCIE_CFG3));
+	*len += sprintf(buf+(*len), "PCIe CFG4	Reg 0x%08x = 0x%08x\n",RUBY_SYS_CTL_PCIE_CFG4,	readl(RUBY_SYS_CTL_PCIE_CFG4));
+	*len += sprintf(buf+(*len), "PCIe Int Mask	0x%08x = 0x%08x\n",RUBY_PCIE_INT_MASK,	readl(RUBY_PCIE_INT_MASK));
+}
+
+
+ssize_t	ruby_sysfs_show (struct kobject *kobj, struct attribute *attr,
+						char *buf)
+{
+	int feature = attr->name[0]-'0';
+	int len=0;
+	switch (feature) {
+		case 1: /* get mode */
+			len += sprintf(buf+len, "1 - RC, 2 - EP, Current %d", pci_mode);
+			break;
+
+		case 2: /* dump pcie regs */
+			if (pci_mode)
+				ruby_pcie_reg_dump(buf+len, &len);
+			else
+				len += sprintf(buf+len, "Mode is not set\n");
+
+			break;
+
+		case 3:
+			if (pci_mode)
+				ruby_atu_reg_dump(buf+len, &len);
+			else
+				len += sprintf(buf+len, "Mode is not set\n");
+
+			break;
+
+		default:
+			break;
+	}
+
+	len += sprintf(buf+len, "\n");
+	return len;
+}
+
+ssize_t	ruby_sysfs_store (struct kobject *kobj, struct attribute *attr,
+			const char *buf, size_t size)
+{
+	int feature = attr->name[0]-'0';
+	int input	= buf[0]-'0';
+
+	DEBUG("%s -> (%d)%s\n",attr->name,input,buf);
+
+	switch (feature) {
+		case 1: /* Set mode: RC or EP */
+			if (input == 1) {
+				/* RC mode */
+				pci_mode = RC_MODE;
+			}
+			else if (input == 2) {
+				pci_mode = EP_MODE;
+			}
+			else
+				printk(KERN_WARNING "1 - RC, 2 - EP, Cur Mode %d, invalid mode input %d", pci_mode, input);
+
+			break;
+
+		case 2: /* test config in RC mode */
+			if (pci_mode == RC_MODE)
+				test_config_cycles();
+			else
+				printk(KERN_INFO "Test config cycles in RC mode only\n");
+			break;
+		case 3: /* test mem in RC mode */
+			if (pci_mode == RC_MODE) {
+				test_mem_cycles();
+			}
+			else
+				printk(KERN_INFO "Test mem cycles in RC mode only\n");
+			break;
+
+		default:
+			break;
+	}
+	printk(KERN_INFO "\n");
+	return size;
+}
+
+
+static struct sysfs_ops ruby_sysfs_ops =
+{
+	.show = ruby_sysfs_show,
+	.store = ruby_sysfs_store
+};
+
+static struct kobj_type ruby_ktype =
+{
+	.sysfs_ops = &ruby_sysfs_ops
+};
+
+static struct kobject *ruby_kobj;
+static struct attribute ruby_sysfs_attrs[] =
+{
+	{
+		.name = "1_mode",
+		.mode = S_IRUGO|S_IWUGO,
+	},
+	{
+		.name = "2_config",
+		.mode = S_IRUGO|S_IWUGO,
+	},
+	{
+		.name = "3_mem",
+		.mode = S_IRUGO|S_IWUGO,
+	},
+
+};
+
+/*
+** Number of elements in an array
+*/
+#define ArraySize(X)  (sizeof(X)/sizeof(X[0]))
+
+void ruby_pci_create_sysfs (void)
+{
+	int i=0;
+	int file_num = ArraySize(ruby_sysfs_attrs);
+
+	ruby_kobj = kobject_create_and_add("ruby_PCI",NULL);
+	ruby_kobj->ktype = &ruby_ktype;
+
+	for (i=0; i<file_num; i++)
+	  sysfs_create_file(ruby_kobj, &ruby_sysfs_attrs[i]);
+}
+
+void ruby_pci_cleanup_sysfs (void)
+{
+	int i=0;
+	int file_num = ArraySize(ruby_sysfs_attrs);
+
+	for (i=0; i<file_num; i++)
+	  sysfs_remove_file(ruby_kobj, &ruby_sysfs_attrs[i]);
+}
+
+
+
diff --git a/drivers/qtn/ruby/pcie_tst.c b/drivers/qtn/ruby/pcie_tst.c
new file mode 100644
index 0000000..66897bf
--- /dev/null
+++ b/drivers/qtn/ruby/pcie_tst.c
@@ -0,0 +1,319 @@
+/**
+ * Copyright (c) 2009-2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/moduleparam.h>
+#include <common/topaz_platform.h>
+
+#define REMOTE_DDR_SIZE 0x100000
+#define REMOTE_DDR_BASE 0xb0000000
+#define REMOTE_ADDR_RANDOM
+
+
+#define REG_READ(reg) readl(reg)
+#if 0
+#define REG_WRITE(reg, val)   do {\
+		printk("\nwrite reg=%08x,val=%08x", reg, val);\
+		writel((val), (reg));\
+		printk(" read back val %08x", REG_READ(reg));\
+					} while(0)
+#else
+#define REG_WRITE(reg, val)  writel((val), (reg))
+#endif
+
+DEFINE_SPINLOCK(pcie_dmawr_lock);
+void *remote_ddr_vbase = NULL;
+
+/******************************************************************************
+ Function:	pcie_dma_wr
+ Purpose:	Use pcie dma to transfer from local mem to remote mem (write channel).
+ Returns:
+ Note:
+ *****************************************************************************/
+void pcie_dma_wr(u32 sar, u32 dar, u32 size)
+{
+	u32 intstat;
+	ulong deadline;
+
+	REG_WRITE(PCIE_DMA_WR_ENABLE, 0x00000001);
+	REG_WRITE(PCIE_DMA_WR_INTMASK, 0x00000000);
+	REG_WRITE(PCIE_DMA_CHNL_CONTEXT, 0x00000000);
+	REG_WRITE(PCIE_DMA_CHNL_CNTRL, 0x04000008);
+	REG_WRITE(PCIE_DMA_XFR_SIZE, size);
+	REG_WRITE(PCIE_DMA_SAR_LOW, sar);
+	REG_WRITE(PCIE_DMA_SAR_HIGH, 0x00000000);
+	REG_WRITE(PCIE_DMA_DAR_LOW, dar);
+	REG_WRITE(PCIE_DMA_DAR_HIGH, 0x00000000);
+	deadline = jiffies + msecs_to_jiffies(1);
+	REG_WRITE(PCIE_DMA_WR_DOORBELL, 0x00000000);
+	//Now check if DMA transfer is done
+	while (((intstat = REG_READ(PCIE_DMA_WR_INTSTS)) & 1) == 0) {
+		if (time_after(jiffies, deadline)) {
+			printk("\nError, Can't get done bit");
+			break;
+		}
+	}
+	intstat = 0;
+	//Clear status bit so can be used for next transfer
+	intstat = intstat | 1;
+	REG_WRITE(PCIE_DMA_WR_INTCLER, intstat);
+	//printk(" INFO : Done DMA WR CHNL");
+}
+
+/******************************************************************************
+ Function:	pcie_dma_rd
+ Purpose:	Use pcie dma to transfer from remote mem to local mem(Read channel).
+ Returns:
+ Note:
+ *****************************************************************************/
+void pcie_dma_rd(u32 sar, u32 dar, u32 size)
+{
+	u32 intstat;
+	ulong deadline;
+
+	printk(" INFO : Start PCIE-DMA programming RD Channel");
+	REG_WRITE(PCIE_DMA_RD_ENABLE, 0x00000001);
+	REG_WRITE(PCIE_DMA_RD_INTMASK, 0x00000000);
+	REG_WRITE(PCIE_DMA_CHNL_CONTEXT, 0x80000000);
+	REG_WRITE(PCIE_DMA_CHNL_CNTRL, 0x04000008);
+	REG_WRITE(PCIE_DMA_XFR_SIZE, size);
+	REG_WRITE(PCIE_DMA_SAR_LOW, sar);
+	REG_WRITE(PCIE_DMA_SAR_HIGH, 0x00000000);
+	REG_WRITE(PCIE_DMA_DAR_LOW, dar);
+	REG_WRITE(PCIE_DMA_DAR_HIGH, 0x00000000);
+	REG_WRITE(PCIE_DMA_RD_DOORBELL, 0x00000000);
+	//Now check if DMA transfer is done
+	deadline = jiffies + msecs_to_jiffies(1);
+	while (((intstat = REG_READ(PCIE_DMA_RD_INTSTS)) & 1) == 0) {
+		if (time_after_eq(jiffies, deadline)) {
+			printk("\nError, Can't get done bit");
+			break;
+		}
+	}
+	//DMA transfer is done. Check packet in SRAM.
+	//Clear status bit so can be used for next transfer
+	intstat = intstat | 1;
+	REG_WRITE(PCIE_DMA_RD_INTCLER, intstat);
+	printk(" INFO : Done DMA RD CHNL");
+}
+
+/* typpe
+ * 0 ep
+ * 1 rc
+ */
+int pcie_init(u32 type)
+{
+	u32 rdata;
+	REG_WRITE(RUBY_SYS_CTL_CPU_VEC_MASK, RUBY_SYS_CTL_RESET_IOSS | RUBY_SYS_CTL_RESET_PCIE);
+	REG_WRITE(RUBY_SYS_CTL_CPU_VEC, RUBY_SYS_CTL_RESET_IOSS | RUBY_SYS_CTL_RESET_PCIE);
+
+	rdata = 0;
+	rdata = 1 << 16;
+	rdata = rdata + 0xa2;
+	REG_WRITE(PCIE_PORT_LINK_CTL, rdata);
+	rdata = (type) * 4;
+	rdata += (0xff3c941 << 4);
+	REG_WRITE(RUBY_SYS_CTL_PCIE_CFG0, rdata);
+	REG_WRITE(RUBY_SYS_CTL_PCIE_CFG1, 0x00000001);
+	REG_WRITE(RUBY_SYS_CTL_PCIE_CFG2, 0x0);
+	REG_WRITE(RUBY_SYS_CTL_PCIE_CFG3, 0x45220000);
+	REG_WRITE(RUBY_PCIE_CMD_REG, 0x00100007);
+
+	//Now configure ATU in RTL for memory transactions.
+	//Define Region 0 of ATU as inbound Memory.
+	REG_WRITE(PCIE_ATU_VIEWPORT, 0x80000000);
+	//Configure lower 32 bit of start address for translation region as 0000_0000.
+	REG_WRITE(PCIE_ATU_BASE_LOW, 0x00000000);
+	//Configure upper 32 bit of start address for translation region as 0000_0000.
+	REG_WRITE(PCIE_ATU_BASE_HIGH, 0x00000000);
+	//Set translation region limit upto 0x4FFF_FFFF
+	REG_WRITE(PCIE_ATU_BASE_LIMIT, 0x03FFFFFF);
+	//Set target translated address as 0x8000_0000 in lower and upper register.
+	REG_WRITE(PCIE_ATU_TGT_LOW, 0x00000000);
+	REG_WRITE(PCIE_ATU_TGT_HIGH, 0x00000000);
+	//Configure ATU_CONTROL_1 register to Issue MRD/MWR request.
+	REG_WRITE(PCIE_ATU_CTRL1, 0x00000000);
+	//Configure ATU_CONTROL_2 register to Enable Address Translation.
+	//This will match to BAR0 for Inboud only
+	REG_WRITE(PCIE_ATU_CTRL2, 0xC0000000);
+	//Now configure BAR0 with ATU target address programmed above
+	REG_WRITE(PCIE_BAR0, 0x00000000);
+
+	//Now define region 1 of ATU as outbound memory
+	REG_WRITE(PCIE_ATU_VIEWPORT, 0x00000001);
+	//Configure lower 32 bit of start address for translation region as PCIE address space.
+	REG_WRITE(PCIE_ATU_BASE_LOW, REMOTE_DDR_BASE);
+	//Configure upper 32 bit of start address for translation region as 0000_0000.
+	REG_WRITE(PCIE_ATU_BASE_HIGH, 0x00000000);
+	//Set translation region limit upto 0x0FFFFFFF
+	REG_WRITE(PCIE_ATU_BASE_LIMIT, 0xB3FFFFFF);
+	//Set target translated address as 0x8000_0000 in lower and upper register.
+	REG_WRITE(PCIE_ATU_TGT_LOW, 0x00000000);
+	REG_WRITE(PCIE_ATU_TGT_HIGH, 0x00000000);
+	//Configure ATU_CONTROL_1 register to Issue MRD/MWR request.
+	REG_WRITE(PCIE_ATU_CTRL1, 0x00000000);
+	//Configure ATU_CONTROL_2 register to Enable Address Translation.
+	REG_WRITE(PCIE_ATU_CTRL2, 0x80000000);
+
+	return 0;
+}
+
+/*addr1 and addr2 is aligned with 4 bytes*/
+inline static int pcie_cmp(u32 addr1, u32 addr2, int size)
+{
+	int count;
+	int rc = 0;
+
+	for (count = 0; count < (size & ~0x3); count += 4) {
+		u32 word1 = *(u32 *) addr1;
+		u32 word2 = *(u32 *) addr2;
+		if (word1 != word2)	{
+			printk("\ncount=%d, size=%d, 0x%08x (0x%08x) != 0x%08x (0x%08x)",
+					count, size, addr1, word1, addr2, word2);
+			rc = -1;
+			break;
+		}
+		addr1 += 4;
+		addr2 += 4;
+	}
+	/* compare the unaligned part in the tail */
+	if (rc == 0) {
+		for (count = (size & ~0x3); count < size; count += 1) {
+			u8 byte1 = *(u8 *) addr1;
+			u8 byte2 = *(u8 *) addr2;
+			if (byte1 != byte2) {
+				printk("\ncount=%d, size=%d, 0x%08x (0x%02x) != 0x%08x (0x%02x)",
+						count, size, addr1, byte1, addr2, byte2);
+				rc = -1;
+				break;
+			}
+			addr1 += 1;
+			addr2 += 1;
+		}
+	}
+
+	return rc;
+}
+
+
+#ifdef REMOTE_ADDR_RANDOM
+
+inline static u32 get_remote_offset(void)
+{
+	u32 addr = random32();
+	addr &= ((REMOTE_DDR_SIZE - 1) & (~0x3));
+	if (addr + 2048 > REMOTE_DDR_SIZE)
+		addr -= 2048;
+	return addr;
+}
+
+#else
+static int remote_addr_offset = 0;
+module_param(remote_addr_offset, int, 0600);
+
+inline static u32 get_remote_offset(void)
+{
+	return (u32)remote_addr_offset;
+}
+
+#endif
+
+void pcie_dma_tst(u32 local_vaddr, u32 local_paddr, int len)
+{
+	u8 *pad;
+	u32 remote_vaddr, remote_paddr;
+	u32 offset;
+	int tmp = local_paddr & 0x3;
+	spin_lock_irq(&pcie_dmawr_lock);
+
+	if (tmp) {
+		local_vaddr = local_vaddr + 4 - tmp;
+		local_paddr = local_paddr + 4 - tmp;
+		len -= 4 - tmp;
+	}
+	offset = get_remote_offset();
+	remote_vaddr = (u32) remote_ddr_vbase + offset;
+	remote_paddr = REMOTE_DDR_BASE + offset;
+	pad = (u8 *) remote_vaddr + len;
+	pad[0] = 0x5a;
+	pad[1] = 0x7e;
+	pad[2] = 0x88;
+	pad[3] = 0xa5;
+
+	if(remote_paddr < 0xb0000000 || remote_paddr >= 0xb0000000 + 0x4000000) {
+		printk("\nRemote DDR address is invalid");
+		goto test_end;
+	}
+
+	pcie_dma_wr(local_paddr, remote_paddr, len);
+	//printk("\nlocal_paddr=%08x, remote_paddr=%08x", local_paddr, remote_paddr);
+
+	if (pcie_cmp(local_vaddr, remote_vaddr, len))
+	{
+		printk(" Data miss match\n");
+		goto test_end;
+	}
+
+	if (pad[0] != 0x5a || pad[1] != 0x7e || pad[2] != 0x88 || pad[3] != 0xa5)
+	{
+		printk("\nBuffer tail was dirted by PCIe DMA");
+	}
+
+test_end:
+	spin_unlock_irq(&pcie_dmawr_lock);
+}
+
+static int __init pcie_tst_init_module(void)
+{
+	int i;
+	unsigned int * ptr;
+
+	pcie_init(1);
+
+	remote_ddr_vbase = ioremap_nocache((ulong) REMOTE_DDR_BASE, REMOTE_DDR_SIZE);
+
+	if (remote_ddr_vbase)
+	{
+		memset(remote_ddr_vbase, 0x7a, 0x10000);
+		ptr = (unsigned int *) remote_ddr_vbase;
+		printk("\n");
+		for (i = 0; i < 20; i++)
+		{
+			printk(" %08x", *ptr++);
+			if (i % 8 == 0)
+				printk("\n");
+		}
+	}
+
+	return 0;
+}
+
+static void __exit  pcie_tst_cleanup_module(void)
+{
+	if (remote_ddr_vbase) {
+		iounmap(remote_ddr_vbase);
+	}
+}
+
+module_init(pcie_tst_init_module);
+module_exit(pcie_tst_cleanup_module);
+
diff --git a/drivers/qtn/ruby/plat_dma_addr.h b/drivers/qtn/ruby/plat_dma_addr.h
new file mode 100644
index 0000000..8321d38
--- /dev/null
+++ b/drivers/qtn/ruby/plat_dma_addr.h
@@ -0,0 +1,49 @@
+/*************************************************************************
+ * Copyright ARC International (www.arc.com) 2007-2009
+ *
+ * Vineetg: Feb 2009
+ *  -Reworked the API to work with ARC PCI Bridge
+ *
+ * vineetg: Feb 2009
+ *  -For AA4 board, kernel to DMA address APIs
+ ************************************************************************/
+
+/* Some notes on DMA <=> kernel address generation
+ *
+ * A simplistic implementation will generate 0 based bus address.
+ * For e.g. 0x8AAA_0000 becomes 0x0AAA_0000 bus addr
+ * However this doesnt work with PCI devices behind the PCI Host Bridge on AA4
+ * which can't allow 0 based addresses. So the API for special case of PCI
+ * makes corrections
+ *
+ * As a small optimisation, if PCI is not enabled we can simply return
+ * 0 based bus addr hence the CONFIG_xx check for PCI Host Bridge
+ */
+
+#ifndef __BOARD_RUBY_PLAT_DMA_ADDR_H
+#define __BOARD_RUBY_PLAT_DMA_ADDR_H
+
+#include <linux/device.h>
+
+static inline unsigned long plat_dma_addr_to_kernel(struct device *dev, dma_addr_t dma_addr)
+{
+      return (unsigned long)bus_to_virt(dma_addr);
+}
+
+static inline dma_addr_t plat_kernel_addr_to_dma(struct device *dev, void *ptr)
+{
+    /*
+     * To Catch buggy drivers which can call DMA map API with kernel vaddr
+     * i.e. for buffers alloc via vmalloc or ioremap which are not gaurnateed
+     * to be PHY contiguous and hence unfit for DMA anyways.
+     * On ARC kernel virtual address is 0x7000_0000 to 0x7FFF_FFFF, so 
+     * ideally we want to check this range here, but our implementation is  
+     * better as it checks for even worse user virtual address as well.
+     */
+    BUG_ON(ptr < (void*)PAGE_OFFSET);
+
+    return virt_to_bus(ptr);
+}
+
+#endif // #ifndef __BOARD_RUBY_PLAT_DMA_ADDR_H
+
diff --git a/drivers/qtn/ruby/plat_irq.h b/drivers/qtn/ruby/plat_irq.h
new file mode 100644
index 0000000..aaef029
--- /dev/null
+++ b/drivers/qtn/ruby/plat_irq.h
@@ -0,0 +1,39 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __BOARD_RUBY_PLAT_IRQ_H
+#define __BOARD_RUBY_PLAT_IRQ_H
+
+#include "platform.h"
+
+#define NR_IRQS 64
+
+#define TIMER0_INT RUBY_IRQ_CPUTIMER0
+#define TIMER1_INT RUBY_IRQ_CPUTIMER1
+
+#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 /* tell Linux that we keep IRQs disabled while in interrupt */
+
+#define __ARCH_USE_SOFTIRQ_DYNAMIC_MAX_RESTART /* balance how many times SoftIRQ be restarted before offload to softirqd */
+//#define __ARCH_USE_SOFTIRQ_BALANCE /* more aggressively offload SoftIRQ processing to softirqd */
+
+#endif // #ifndef __BOARD_RUBY_PLAT_IRQ_H
+
diff --git a/drivers/qtn/ruby/plat_memmap.h b/drivers/qtn/ruby/plat_memmap.h
new file mode 100644
index 0000000..fd6325e
--- /dev/null
+++ b/drivers/qtn/ruby/plat_memmap.h
@@ -0,0 +1,29 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __BOARD_RUBY_PLAT_MEMMAP_H
+#define __BOARD_RUBY_PLAT_MEMMAP_H
+
+#include "platform.h"
+
+#endif // #ifndef __BOARD_RUBY_PLAT_MEMMAP_H
+
diff --git a/drivers/qtn/ruby/platform.h b/drivers/qtn/ruby/platform.h
new file mode 100644
index 0000000..55e225a
--- /dev/null
+++ b/drivers/qtn/ruby/platform.h
@@ -0,0 +1,32 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __BOARD_RUBY_PLATFORM_H
+#define __BOARD_RUBY_PLATFORM_H
+
+#include <linux/types.h>
+
+#include <common/ruby_mem.h>
+#include <common/topaz_platform.h>
+
+#endif // #ifndef __BOARD_RUBY_PLATFORM_H
+
diff --git a/drivers/qtn/ruby/pm.c b/drivers/qtn/ruby/pm.c
new file mode 100644
index 0000000..99778bd
--- /dev/null
+++ b/drivers/qtn/ruby/pm.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) Quantenna Communications Incorporated 2012.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * ########################################################################
+ *
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+#include <asm/board/pm.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include "pm.h"
+
+#define STR_HELPER(x)			#x
+#define STR(x)				STR_HELPER(x)
+
+#define PM_PROC_FILE_NAME		"soc_pm"
+#define PM_EMAC_PROC_FILE_NAME		"emac_pm"
+
+#define PM_PROC_ADD_CMD			"add"
+#define PM_PROC_UPDATE_CMD		"update"
+#define PM_PROC_REMOVE_CMD		"remove"
+
+#define PM_MAX_NAME_LEN			64
+
+#define PM_PROC_PARSE_ADD_CMD		PM_PROC_ADD_CMD" %"STR(PM_MAX_NAME_LEN)"s %d"		/* syntax: "add who_ask NNN" */
+#define PM_PROC_PARSE_UPDATE_CMD	PM_PROC_UPDATE_CMD" %"STR(PM_MAX_NAME_LEN)"s %d"	/* syntax: "update who_ask NNN" */
+#define PM_PROC_PARSE_REMOVE_CMD	PM_PROC_REMOVE_CMD" %"STR(PM_MAX_NAME_LEN)"s"		/* syntax: "remove who_ask" */
+
+static struct workqueue_struct *pm_wq;
+static DEFINE_SPINLOCK(pm_wq_lock);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+struct qtn_pm_req_list {
+	struct list_head list;
+	int class_id;
+	char req_name[16];
+	struct pm_qos_request req;
+};
+static LIST_HEAD(qtn_pm_req_list_head);
+static struct pm_qos_request *pm_emac_req;
+#else
+static struct pm_qos_request_list *pm_emac_req;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static ssize_t pm_write_proc(struct file *file, const char __user *buffer,
+		size_t count, loff_t *f_pos)
+#else
+static int pm_write_proc(struct file *file, const char __user *buffer,
+				unsigned long count, void *data)
+#endif
+{
+	char cmd[PM_MAX_NAME_LEN + 32];
+	char name[PM_MAX_NAME_LEN + 1];
+	int val;
+	int ret = 0;
+
+	if (!count) {
+		return -EINVAL;
+	} else if (count > sizeof(cmd) - 1) {
+		return -EINVAL;
+	} else if (copy_from_user(cmd, buffer, count)) {
+		return -EFAULT;
+	}
+	cmd[count] = '\0';
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	switch ((int)PDE_DATA(file_inode(file))) {
+#else
+	switch ((int)data) {
+#endif
+	case PM_QOS_POWER_SAVE:
+		if (sscanf(cmd, PM_PROC_PARSE_ADD_CMD, name, &val) == 2) {
+			ret = pm_qos_add_requirement(PM_QOS_POWER_SAVE, name, val);
+		} else if (sscanf(cmd, PM_PROC_PARSE_UPDATE_CMD, name, &val) == 2) {
+			ret = pm_qos_update_requirement(PM_QOS_POWER_SAVE, name, val);
+		} else if (sscanf(cmd, PM_PROC_PARSE_REMOVE_CMD, name) == 1) {
+			pm_qos_remove_requirement(PM_QOS_POWER_SAVE, name);
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case PM_QOS_POWER_EMAC:
+		if (sscanf(cmd, "%d", &val) == 1) {
+			pm_qos_update_request(pm_emac_req, val);
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret ? ret : count;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int pm_show(struct seq_file *seq, void *v)
+{
+	int class_id = (int)seq->private;
+
+	seq_printf(seq, "class=%d, level=%d\n", class_id, pm_qos_requirement(class_id));
+	return 0;
+}
+
+static int pm_open_proc(struct inode *inode, struct file *file)
+{
+	return single_open(file, pm_show, PDE_DATA(inode));
+}
+
+static const struct file_operations pm_save_fops = {
+	.owner = THIS_MODULE,
+	.open = pm_open_proc,
+	.write = pm_write_proc,
+	.read = seq_read,
+};
+#else
+static int pm_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+	return sprintf(page, "%d", pm_qos_requirement((int)data));
+}
+#endif
+
+static int __init pm_create_proc(void)
+{
+	struct proc_dir_entry *entry;
+	struct proc_dir_entry *emac_proc_entry;
+	/*
+	 * Proc interface to change power save levels.
+	 * Main purpose is debugging.
+	 * Other kernel modules can directly use pm_qos_*() functions.
+	 * User-space application (safe way) can open misc character device
+	 * (major number 10, minor see by "cat /proc/misc")
+	 * provided by PM QoS kernel module and control through it.
+	 * It is safe because 'name' is chosen based on PID,
+	 * and when application quit all its requests are removed.
+	 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	entry = proc_create_data(PM_PROC_FILE_NAME, 0600, NULL, &pm_save_fops, (void *)PM_QOS_POWER_SAVE);
+	if (!entry) {
+		return -ENODEV;
+	}
+
+	emac_proc_entry = proc_create_data(PM_EMAC_PROC_FILE_NAME, 0600, NULL, &pm_save_fops, (void *)PM_QOS_POWER_EMAC);
+	if (!emac_proc_entry) {
+		return -ENODEV;
+	}
+#else
+	entry = create_proc_entry(PM_PROC_FILE_NAME, 0600, NULL);
+	if (!entry) {
+		return -ENODEV;
+	}
+	entry->write_proc = pm_write_proc;
+	entry->read_proc = pm_read_proc;
+	entry->data = (void *)PM_QOS_POWER_SAVE;
+
+	emac_proc_entry = create_proc_entry(PM_EMAC_PROC_FILE_NAME, 0600, NULL);
+	if (!emac_proc_entry) {
+		return -ENODEV;
+	}
+	emac_proc_entry->write_proc = pm_write_proc;
+	emac_proc_entry->read_proc = pm_read_proc;
+	emac_proc_entry->data = (void *)PM_QOS_POWER_EMAC;
+#endif
+	return 0;
+}
+
+static void __init pm_create_wq(void)
+{
+	pm_wq = create_workqueue("ruby_pm");
+}
+
+static int __init pm_init(void)
+{
+	pm_qos_add_requirement(PM_QOS_POWER_SAVE, BOARD_PM_GOVERNOR_QCSAPI, BOARD_PM_LEVEL_INIT);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if(NULL == (pm_emac_req = kzalloc(sizeof(*pm_emac_req), GFP_KERNEL))) {
+		return -ENOMEM;
+	}
+	pm_qos_add_request(pm_emac_req, PM_QOS_POWER_EMAC, BOARD_PM_LEVEL_NO);
+#else
+	pm_emac_req = pm_qos_add_request(PM_QOS_POWER_EMAC, BOARD_PM_LEVEL_NO);
+	if (!pm_emac_req)
+		return -ENOMEM;
+#endif
+	pm_create_wq();
+	return pm_create_proc();
+}
+arch_initcall(pm_init);
+
+static int __pm_cancel_work(struct delayed_work *dwork)
+{
+	int ret = 0;
+
+	if (delayed_work_pending(dwork)) {
+		cancel_delayed_work(dwork);
+		ret = 1;
+	} else if (work_pending(&dwork->work)) {
+		cancel_work_sync(&dwork->work);
+		ret = 1;
+	}
+
+	return ret;
+}
+
+int pm_queue_work(struct delayed_work *dwork, unsigned long delay)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&pm_wq_lock, flags);
+
+	ret = __pm_cancel_work(dwork);
+	queue_delayed_work(pm_wq, dwork, delay);
+
+	spin_unlock_irqrestore(&pm_wq_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(pm_queue_work);
+
+int pm_cancel_work(struct delayed_work *dwork)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&pm_wq_lock, flags);
+
+	ret = __pm_cancel_work(dwork);
+
+	spin_unlock_irqrestore(&pm_wq_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(pm_cancel_work);
+
+int pm_flush_work(struct delayed_work *dwork)
+{
+	might_sleep();
+	return cancel_delayed_work_sync(dwork);
+}
+EXPORT_SYMBOL(pm_flush_work);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,24)
+int pm_qos_add_requirement(int pm_qos_class, const char *name, s32 value)
+{
+	struct qtn_pm_req_list *req_list = NULL;
+
+	if ((NULL == name) || (pm_qos_class < PM_QOS_POWER_SAVE)
+		|| (pm_qos_class > PM_QOS_POWER_EMAC)) {
+		return -EINVAL;
+	}
+
+	list_for_each_entry(req_list, &qtn_pm_req_list_head, list) {
+		if ((pm_qos_class == req_list->class_id)
+			&& !strncmp(name, req_list->req_name, sizeof(req_list->req_name))) {
+			return -EEXIST;
+		}
+	}
+
+	if(NULL == (req_list = kzalloc(sizeof(*req_list), GFP_KERNEL))) {
+		return -ENOMEM;
+	}
+	req_list->class_id = pm_qos_class;
+	strncpy(req_list->req_name, name, sizeof(req_list->req_name));
+
+	pm_qos_add_request(&req_list->req, pm_qos_class, value);
+
+	list_add(&req_list->list, &qtn_pm_req_list_head);
+	return 0;
+}
+EXPORT_SYMBOL(pm_qos_add_requirement);
+
+int pm_qos_update_requirement(int pm_qos_class, const char *name, s32 new_value)
+{
+	struct qtn_pm_req_list *req_list = NULL;
+
+	if ((NULL == name) || (pm_qos_class < PM_QOS_POWER_SAVE)
+		|| (pm_qos_class > PM_QOS_POWER_EMAC)) {
+		return -EINVAL;
+	}
+
+	printk("%s: name=%s, value=%d\n", __func__, name, new_value);
+
+	list_for_each_entry(req_list, &qtn_pm_req_list_head, list) {
+		if ((pm_qos_class == req_list->class_id)
+			&& !strncmp(name, req_list->req_name, sizeof(req_list->req_name))) {
+			pm_qos_update_request(&req_list->req, new_value);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(pm_qos_update_requirement);
+
+void pm_qos_remove_requirement(int pm_qos_class, const char *name)
+{
+	struct qtn_pm_req_list *req_list = NULL;
+
+	if ((NULL == name) || (pm_qos_class < PM_QOS_POWER_SAVE)
+		|| (pm_qos_class > PM_QOS_POWER_EMAC)) {
+		return ;
+	}
+
+	list_for_each_entry(req_list, &qtn_pm_req_list_head, list) {
+		if ((pm_qos_class == req_list->class_id)
+			&& !strncmp(name, req_list->req_name, sizeof(req_list->req_name))) {
+			break;
+		}
+	}
+	pm_qos_remove_request(&req_list->req);
+	list_del(&req_list->list);
+	kfree(req_list);
+}
+EXPORT_SYMBOL(pm_qos_remove_requirement);
+
+int pm_qos_requirement(int pm_qos_class)
+{
+	return pm_qos_request(pm_qos_class);
+}
+EXPORT_SYMBOL(pm_qos_requirement);
+#endif
+
+MODULE_AUTHOR("Quantenna");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/qtn/ruby/pm.h b/drivers/qtn/ruby/pm.h
new file mode 100644
index 0000000..3825f82
--- /dev/null
+++ b/drivers/qtn/ruby/pm.h
@@ -0,0 +1,50 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+
+#ifndef __BOARD_RUBY_PM_H
+#define __BOARD_RUBY_PM_H
+
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	#include <linux/pm_qos.h>
+#else
+	#include <linux/pm_qos_params.h>
+#endif
+
+#include <common/ruby_pm.h>
+
+
+int pm_queue_work(struct delayed_work *dwork, unsigned long delay);
+int pm_cancel_work(struct delayed_work *dwork);
+int pm_flush_work(struct delayed_work *dwork);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,24)
+int pm_qos_add_requirement(int pm_qos_class, const char *name, s32 value);
+int pm_qos_update_requirement(int pm_qos_class, const char *name, s32 new_value);
+void pm_qos_remove_requirement(int pm_qos_class, const char *name);
+int pm_qos_requirement(int pm_qos_class);
+#endif
+
+#endif // #ifndef __BOARD_RUBY_PM_H
+
diff --git a/drivers/qtn/ruby/rtl8367b/rtl8367b_init.h b/drivers/qtn/ruby/rtl8367b/rtl8367b_init.h
new file mode 100644
index 0000000..8cec685
--- /dev/null
+++ b/drivers/qtn/ruby/rtl8367b/rtl8367b_init.h
@@ -0,0 +1,30 @@
+#ifndef _RTL8367B_INIT_H_
+#define _RTL8367B_INIT_H_
+
+#include <linux/types.h>
+
+#if defined(CONFIG_SWITCH_RTL8363SB) || defined(CONFIG_SWITCH_RTL8363SB_MODULE)
+#define CHIP_RTL8363SB
+#define RTL_SWITCH
+#define RTL_SWITCH_NAME "RTL8363SB"
+#endif
+
+#if defined(CONFIG_SWITCH_RTL8365MB) || defined(CONFIG_SWITCH_RTL8365MB_MODULE)
+#define CHIP_RTL8365MB
+#define RTL_SWITCH
+#define RTL_SWITCH_NAME "RTL8365MB"
+#endif
+
+struct mii_bus;
+int rtl8367b_init(struct mii_bus *mii,
+		int (*internal_read)(struct mii_bus *bus, int phy_addr, int reg),
+		int (*internal_write)(struct mii_bus *bus, int phy_addr, int reg, uint16_t value),
+		uint32_t emac_cfg, int port);
+void rtl8367b_exit(void);
+void rtl8367b_dump_stats(void);
+void rtl8367b_dump_status(void);
+int rtl8367b_ext_port_enable(int emac);
+int rtl8367b_ext_port_disable(int emac);
+
+#endif	// _RTL8367B_INIT_H_
+
diff --git a/drivers/qtn/ruby/serial.c b/drivers/qtn/ruby/serial.c
new file mode 100644
index 0000000..387da7d
--- /dev/null
+++ b/drivers/qtn/ruby/serial.c
@@ -0,0 +1,93 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+
+#include <asm/board/platform.h>
+#include <asm/board/gpio.h>
+#include <asm/board/board_config.h>
+
+static struct plat_serial8250_port ruby_data[] =
+{
+	{
+		.iotype		= UPIO_DWAPB,
+		.private_data	= (void*)RUBY_UART0_USR,
+		.flags		= (UPF_BOOT_AUTOCONF | UPF_SPD_FLAG),
+		.mapbase	= RUBY_UART0_BASE_ADDR,
+		.membase	= (void*)RUBY_UART0_BASE_ADDR,
+		.irq		= RUBY_IRQ_UART0,
+		.uartclk	= CONFIG_ARC700_DEV_CLK,
+		.regshift	= 2,
+	},
+	{
+		.iotype		= UPIO_DWAPB,
+		.private_data	= (void*)RUBY_UART1_USR,
+		.flags		= (UPF_BOOT_AUTOCONF | UPF_SPD_FLAG),
+		.mapbase	= RUBY_UART1_BASE_ADDR,
+		.membase	= (void*)RUBY_UART1_BASE_ADDR,
+		.irq		= RUBY_IRQ_UART1,
+		.uartclk	= CONFIG_ARC700_DEV_CLK,
+		.regshift	= 2,
+	},
+	{}
+};
+
+static struct platform_device ruby_uart =
+{
+	.name			= "serial8250",
+	.id			= PLAT8250_DEV_PLATFORM,
+	.dev			= {
+		.platform_data	= ruby_data,
+	},
+};
+
+static int __init setup_console(void)
+{
+	int use_uart1 = 0;
+	int ret;
+	
+	if (get_board_config(BOARD_CFG_UART1, &use_uart1) != 0) {
+		printk(KERN_ERR "get_board_config returned error status for UART1\n");
+	}
+
+	/* if uart1 is not requested, remove it from the device list so ttyS1 doesn't appear */
+	if (!use_uart1) {
+		memset(&ruby_data[1], 0, sizeof(struct plat_serial8250_port));
+	}
+
+	/* register device */
+	ret = platform_device_register(&ruby_uart);
+
+	/* configure GPIOs */
+	if(!ret) {
+		gpio_uart0_config();
+		if (use_uart1) {
+			gpio_uart1_config();
+		}
+	}
+
+	return ret;
+}
+arch_initcall(setup_console);
+
diff --git a/drivers/qtn/ruby/skb_recycle.c b/drivers/qtn/ruby/skb_recycle.c
new file mode 100644
index 0000000..26b6f5b
--- /dev/null
+++ b/drivers/qtn/ruby/skb_recycle.c
@@ -0,0 +1,116 @@
+/**
+ * Copyright (c) 2011 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include <ruby_mem.h>
+#include <qtn/skb_recycle.h>
+#include <linux/proc_fs.h>
+
+#ifdef CONFIG_QTN_SKB_RECYCLE
+
+#define IEEE80211_SKB_LIST_MAX_DEFAULT 1024
+#define SKB_RECYCLE_MAX_PROC "qtn_skb_recycle_max"
+#define SKB_RECYCLE_MAX_CMD_LEN 8
+
+struct proc_dir_entry *skb_recycle_max_proc = NULL;
+struct qtn_skb_recycle_list __sram_data __qtn_skb_recycle_list;
+EXPORT_SYMBOL(__qtn_skb_recycle_list);
+
+static int
+skb_recycle_max_read(char *buffer, char **buffer_location, off_t offset,
+	int buffer_length, int *eof, void *data)
+{
+	int len = 0;
+	struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
+
+	if (recycle_list)
+		len = sprintf(buffer, "%d\n", recycle_list->max);
+
+	return len;
+}
+
+static int
+skb_recycle_max_write(struct file *file, const char *buffer,
+	unsigned long count, void *data)
+{
+	char tmp[SKB_RECYCLE_MAX_CMD_LEN] = {0};
+	uint32_t max = 0;
+
+	struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
+
+	if (!recycle_list)
+		goto out;
+
+	if (count >= SKB_RECYCLE_MAX_CMD_LEN) {
+		printk(KERN_ERR "%s: Invalid parameters\n", __FUNCTION__);
+		goto out;
+	}
+
+	if (copy_from_user(tmp, buffer, count)) {
+		printk(KERN_ERR "%s: Failed\n", __FUNCTION__);
+		goto out;
+	}
+
+	if (sscanf(tmp, "%u", &max) == 1) {
+		recycle_list->max = max;
+	} else {
+		printk(KERN_ERR "Invalid parameter: %s\n", tmp);
+	}
+
+out:
+	return count;
+}
+
+static int __sram_text skb_list_recycle(struct qtn_skb_recycle_list *recycle_list, struct sk_buff *skb)
+{
+	/*
+	 * must run kernel recycle check to clean up the buffers freed here
+	 */
+	if (skb_recycle_check(skb, qtn_rx_buf_size()) &&
+			qtn_skb_recycle_list_push(recycle_list, &recycle_list->stats_kfree, skb)) {
+		return 1;
+	}
+	return 0;
+}
+
+static int __init qtn_recycle_list_init(void)
+{
+	struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
+
+	skb_queue_head_init(&recycle_list->list);
+	recycle_list->max = IEEE80211_SKB_LIST_MAX_DEFAULT;
+	recycle_list->recycle_func = &skb_list_recycle;
+
+	skb_recycle_max_proc = create_proc_entry(SKB_RECYCLE_MAX_PROC, 0x644, NULL);
+	if (skb_recycle_max_proc == NULL) {
+		printk(KERN_ERR "unable to create /proc/%s\n", SKB_RECYCLE_MAX_PROC);
+		return 0;
+	}
+	skb_recycle_max_proc->read_proc = skb_recycle_max_read;
+	skb_recycle_max_proc->write_proc = skb_recycle_max_write;
+	skb_recycle_max_proc->mode = S_IFREG | S_IRUGO;
+	skb_recycle_max_proc->uid = 0;
+	skb_recycle_max_proc->gid = 0;
+	skb_recycle_max_proc->size = 0x1000;
+	skb_recycle_max_proc->data = NULL;
+
+	return 0;
+}
+
+arch_initcall(qtn_recycle_list_init);
+#endif
diff --git a/drivers/qtn/ruby/soc.c b/drivers/qtn/ruby/soc.c
new file mode 100644
index 0000000..694da65
--- /dev/null
+++ b/drivers/qtn/ruby/soc.c
@@ -0,0 +1,127 @@
+/*
+ *  Copyright (c) Quantenna Communications Incorporated 2010.
+ *  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/if_ether.h>
+
+#include <asm/board/soc.h>
+#include <asm/hardware.h>
+
+#include "qtn/shared_params.h"
+
+#include "qdrv_sch_const.h"
+#include "qtn/qdrv_sch.h"
+static char board_id[16] = "unknown";
+static unsigned char ethernet_addr[ETH_ALEN] = {0x00, 0x08, 0x55, 0x41, 0x00, 0x00};
+
+int global_disable_wd = 0;
+
+__sram_data uint8_t qdrv_sch_tos2ac[] = {
+		QDRV_BAND_AC_BE,
+		QDRV_BAND_AC_BK,
+		QDRV_BAND_AC_BK,
+		QDRV_BAND_AC_BE,
+		QDRV_BAND_AC_VI,
+		QDRV_BAND_AC_VI,
+		QDRV_BAND_AC_VO,
+		QDRV_BAND_AC_VO
+};
+EXPORT_SYMBOL(qdrv_sch_tos2ac);
+
+__sram_data uint8_t qdrv_sch_dscp2tid[QTN_MAX_BSS_VAPS][IP_DSCP_MAPPING_SIZE] = {{0}};
+EXPORT_SYMBOL(qdrv_sch_dscp2tid);
+
+__sram_data uint16_t qdrv_sch_vlan2index[QTN_MAX_BSS_VAPS] = {
+	VLANID_INDEX_INITVAL,
+	VLANID_INDEX_INITVAL,
+	VLANID_INDEX_INITVAL,
+	VLANID_INDEX_INITVAL,
+	VLANID_INDEX_INITVAL,
+	VLANID_INDEX_INITVAL,
+	VLANID_INDEX_INITVAL,
+	VLANID_INDEX_INITVAL
+};
+EXPORT_SYMBOL(qdrv_sch_vlan2index);
+
+__sram_data uint8_t qdrv_vap_vlan_max = 0;
+EXPORT_SYMBOL(qdrv_vap_vlan_max);
+
+struct shared_params *soc_shared_params = NULL;
+EXPORT_SYMBOL(soc_shared_params);
+
+unsigned global_auc_config = SHARED_PARAMS_AUC_CONFIG_ASSERT_EN;
+EXPORT_SYMBOL(global_auc_config);
+
+int soc_id(void)
+{
+	/* Return the SOC we are running on, starting from 1 */
+	return 1;
+}
+EXPORT_SYMBOL(soc_id);
+
+u32 chip_id(void)
+{
+	return readl(IO_ADDRESS(RUBY_SYS_CTL_CSR));
+}
+EXPORT_SYMBOL(chip_id);
+
+const char* get_board_id(void)
+{
+	return board_id;
+}
+EXPORT_SYMBOL(get_board_id);
+
+const unsigned char* get_ethernet_addr(void)
+{
+	static int once = 0;
+	if (once == 0) {
+		get_random_bytes(&ethernet_addr[5], 1);
+		printk("Random stuff %02X\n", ethernet_addr[5]);
+		once = 1;
+	}
+	return ethernet_addr;
+}
+EXPORT_SYMBOL(get_ethernet_addr);
+
+static int __init
+setup_disable_wd(char *buf)
+{
+	if (buf != NULL) {
+		if (sscanf(buf, "%d", &global_disable_wd) != 1) {
+			printk(KERN_WARNING"Expecting integer value(0/1)\n");
+		}
+	}
+	return 0;
+}
+early_param("disable_wd", setup_disable_wd);
+
+static int __init
+setup_auc_config(char *buf)
+{
+	if (buf != NULL) {
+		if (sscanf(buf, "%x", &global_auc_config) != 1) {
+			printk(KERN_WARNING"auc_config expecting hexadecimal integer\n");
+		}
+	}
+	return 0;
+}
+early_param("auc_config", setup_auc_config);
+
diff --git a/drivers/qtn/ruby/soc.h b/drivers/qtn/ruby/soc.h
new file mode 100644
index 0000000..c2696bd
--- /dev/null
+++ b/drivers/qtn/ruby/soc.h
@@ -0,0 +1,54 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+
+#ifndef __BOARD_RUBY_SOC_H
+#define __BOARD_RUBY_SOC_H
+
+#include <common/topaz_platform.h>
+#include <asm/hardware.h>
+#include <linux/init.h>
+
+int soc_id(void);
+u32 chip_id(void);
+
+const char *get_board_id(void);
+const unsigned char* get_ethernet_addr(void);
+extern int global_disable_wd;
+extern unsigned global_auc_config;
+
+/* Following macro sets the watchdog timer for 5 Seconds */
+static inline void init_watchdog_timer(void)
+{
+	if (!global_disable_wd) {
+		writel(0xC, IO_ADDRESS(RUBY_WDT_TIMEOUT_RANGE));
+		writel((RUBY_WDT_ENABLE_IRQ_WARN | RUBY_WDT_ENABLE), IO_ADDRESS(RUBY_WDT_CTL));
+		writel(RUBY_WDT_MAGIC_NUMBER, IO_ADDRESS(RUBY_WDT_COUNTER_RESTART));
+	}
+}
+static inline void pet_watchdog_timer(void)
+{
+	if (!global_disable_wd) {
+		writel(RUBY_WDT_MAGIC_NUMBER, IO_ADDRESS(RUBY_WDT_COUNTER_RESTART));
+	}
+}
+
+#endif // #ifndef __BOARD_RUBY_SOC_H
+
diff --git a/drivers/qtn/ruby/spi_api.c b/drivers/qtn/ruby/spi_api.c
new file mode 100644
index 0000000..a79e7d7
--- /dev/null
+++ b/drivers/qtn/ruby/spi_api.c
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2010 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ *  SPI driver
+ */
+
+
+///////////////////////////////////////////////////////////////////////////////
+//             Includes
+///////////////////////////////////////////////////////////////////////////////
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <common/ruby_platform.h>
+#include <common/ruby_spi_api.h>
+#include "spi_api.h"
+
+
+///////////////////////////////////////////////////////////////////////////////
+//              Types
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+//             Globals
+///////////////////////////////////////////////////////////////////////////////
+
+/******************************************************************************
+Function:spi_protect_mode
+Purpose:check the id if we support, and see if the device support protec mode
+Returns:0 or < 0
+Note:
+*****************************************************************************/
+int spi_protect_mode(struct flash_info *device)
+{
+	uint32_t spi_ctrl_val;
+	int status = EOPNOTSUPP;
+
+	printk(KERN_INFO "SPI device:%0x\n",device->jedec_id);
+	switch(device->single_unprotect_mode){
+		case MACRONIX:
+			spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+			if (spi_read_status() & SPI_SR_QUAD_MODE) {
+				printk(KERN_INFO "SPI device is in Quad mode\n");
+				writel((SPI_SR_QUAD_MODE_MASK(spi_ctrl_val)), RUBY_SPI_CONTROL);
+			} else {
+				writel(SPI_SR_SINGLE_MODE_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+			}
+
+			if (spi_read_scur() & SPI_SCUR_WPSEL) {
+				printk(KERN_INFO "SPI Device Is In Protected Mode\n");
+				status = 0;
+			} else {
+				printk(KERN_INFO "Setting SPI Device To Protected Mode..... \n");
+				spi_write_prot_select(device);
+				/*
+				 * This is the place where we check if Device
+				 * support individual unprotect sector
+				 */
+
+				if (spi_read_scur() & SPI_SCUR_WPSEL) {
+					printk(KERN_INFO "SPI Device Is In Protected Mode\n");
+					status = 0;
+				} else {
+					printk(KERN_INFO "SPI Device Do Not Support Individual Unprotect Mode\n");
+					status = EOPNOTSUPP;
+				}
+			}
+			break;
+		case WINBOND:
+                        if (((spi_read_wps()) & SPI_WPS_SELECT)){
+				printk(KERN_INFO "SPI Device Is In Protected Mode\n");
+                                status = 0;
+                        } else {
+                                spi_write_prot_select(device);
+                                if ((spi_read_wps() & SPI_WPS_SELECT)) {
+					printk(KERN_INFO "SPI Device Is In Protected Mode\n");
+                                        status = 0;
+                                } else {
+					printk(KERN_INFO "SPI Device Do Not Support Individual Unprotect Mode\n");
+                                        status = EOPNOTSUPP;
+                                }
+                        }
+
+                        break;
+
+		default:
+			printk(KERN_INFO "SPI Device Do Not Support Individual Unprotect Mode\n");
+			status =  EOPNOTSUPP;
+	}
+
+	return status;
+}
+
+/******************************************************************************
+* Function:spi_read_scur
+* Purpose:Read security register
+* Returns: SPI Status Bits
+* Note:
+******************************************************************************/
+uint32_t spi_read_scur(void)
+{
+	uint32_t spi_ctrl_val;
+
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_READ_SCUR_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(0, RUBY_SPI_READ_SCUR);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	return (SWAP32(readl(RUBY_SPI_COMMIT))&RUBY_SPI_READ_STATUS_MASK);
+}
+
+/******************************************************************************
+* Function:spi_read_dpb_reg
+* Purpose: read dynamic protect block mode
+* Returns:status of the dynamic protect mode
+* Note:
+******************************************************************************/
+uint32_t spi_read_dpb_reg(uint32_t addr)
+{
+	uint32_t spi_ctrl_val;
+	uint32_t log_addr, sector_addr;
+
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	log_addr = addr & ADDRESS_MASK;
+	sector_addr = log_addr & SECTOR_MASK;
+	writel(RUBY_SPI_READ_DPB_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(sector_addr, RUBY_SPI_READ_DPB);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	return (SWAP32(readl(RUBY_SPI_COMMIT))&RUBY_SPI_READ_STATUS_MASK);
+}
+
+/******************************************************************************
+* Function:spi_gang_block_lock
+* Purpose:protect whole chipset
+* Returns: 0 or < 0
+* Note:
+*******************************************************************************/
+int spi_gang_block_lock(void)
+{
+	uint32_t spi_ctrl_val;
+
+	spi_unlock();
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_GBLOCK_LOCK_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(0, RUBY_SPI_GBLOCK_LOCK);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "Time Out On Write Operation\n");
+		spi_lock();
+		return ETIME;
+	}
+	spi_lock();
+	return 0;
+}
+
+/******************************************************************************
+* Function:spi_gang_block_unlock
+* Purpose:unprotect whole chipset
+* Returns:0 or < 0
+* Note:
+********************************************************************************/
+int spi_gang_block_unlock(void)
+{
+	uint32_t spi_ctrl_val;
+
+	spi_unlock();
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_GBLOCK_UNLOCK_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(0, RUBY_SPI_GBLOCK_UNLOCK);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "Time Out On Write Operation\n");
+		spi_lock();
+		return ETIME;
+	}
+	spi_lock();
+	return 0;
+}
+
+/******************************************************************************
+* Function:spi_write_prot_select
+* Purpose:Check if the device support individual unprotect mode
+* Returns:0 or < 0
+* Note:
+*********************************************************************************/
+int spi_write_prot_select(struct flash_info *device)
+{
+	uint32_t spi_ctrl_val;
+
+	switch(device->single_unprotect_mode){
+	case MACRONIX:
+		spi_unlock();
+		if (spi_read_scur() & SPI_SCUR_WPSEL) {
+			spi_lock();
+			printk(KERN_INFO "Individual Unprotedted Mode Is Enabled \n");
+			return 0;
+		}
+		spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+		writel(RUBY_SPI_WRITE_PRO_SEL_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+		writel(0, RUBY_SPI_WRITE_PRO_SEL);
+		writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+		if ((spi_api_flash_status()) == -1){
+			printk(KERN_ERR "Time Out On Write Operation\n");
+			spi_lock();
+			return ETIME;
+		}
+		spi_lock();
+		if (spi_read_scur() & SPI_SCUR_WPSEL) {
+			printk(KERN_INFO "Individual Unprotected Mode Is Enabled\n");
+			return 0;
+		} else {
+			printk(KERN_INFO "Individual Unprotected Mode Is Disabled \n");
+			return EOPNOTSUPP;
+		}
+		break;
+	case WINBOND:
+                        spi_unlock();
+                        spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+                        writel(RUBY_SPI_WRITE_WPS_SEL_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+                        writel(SPI_WPS_ENABLE, RUBY_SPI_WRITE_REG3);
+                        writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+                        spi_lock();
+			return 0;
+	default:
+		return EOPNOTSUPP;
+	}
+}
+
+/******************************************************************************
+* Function:spi_clear_dpb_reg
+* Purpose:unproctect individual sector
+* Returns: 0 or < 0
+* Note:
+*********************************************************************************/
+int spi_clear_dpb_reg(uint32_t addr)
+{
+	uint32_t spi_ctrl_val;
+	uint32_t log_addr, sector_addr;
+
+	spi_unlock();
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	log_addr = addr & ADDRESS_MASK;
+	sector_addr = log_addr & SECTOR_MASK;
+	writel(RUBY_SPI_WRITE_DPB_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(sector_addr, RUBY_SPI_WRITE_DPB);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "Time Out On Write Operation\n");
+		spi_lock();
+		return ETIME;
+	}
+	spi_lock();
+	return 0;
+}
+
+/******************************************************************************
+* Function:spi_clear_ibup_reg
+* Purpose:unproctect individual sector
+* Returns:0 or < 0
+* Note:
+**********************************************************************************/
+int spi_clear_ibup_reg(uint32_t addr)
+{
+        uint32_t spi_ctrl_val;
+        uint32_t log_addr, sector_addr;
+
+        spi_unlock();
+        spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+        log_addr = addr & ADDRESS_MASK;
+        sector_addr = log_addr & SECTOR_MASK;
+        writel(RUBY_SPI_WRITE_IBUP_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+        writel(sector_addr, RUBY_SPI_WRITE_IBUP);
+        writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+        if ((spi_api_flash_status()) == -1){
+                printk(KERN_ERR "Time Out On Write Operation\n");
+                spi_lock();
+                return ETIME;
+        }
+
+        spi_lock();
+        return 0;
+}
+
+/******************************************************************************
+* Function:spi_lock
+* Purpose:issue a lock after write is complete
+* Returns: 0
+* Note:
+*********************************************************************************/
+int spi_lock(void)
+{
+	writel(0, RUBY_SPI_WRITE_DIS);
+	return 0;
+}
+
+/******************************************************************************
+* Function:spi_unlock
+* Purpose:issue a unlock before any write to flash
+* Returns: 0
+* Note:
+*********************************************************************************/
+int spi_unlock(void)
+{
+	writel(0, RUBY_SPI_WRITE_EN);
+	return 0;
+}
+
+/******************************************************************************
+Function:spi_read_id
+Purpose:Reads spi device ID
+Returns: ID
+Note:
+*****************************************************************************/
+uint32_t spi_read_id(void)
+{
+	return SWAP32(readl(RUBY_SPI_READ_ID))&(RUBY_SPI_READ_ID_MASK);
+}
+
+/******************************************************************************
+Function:spi_read_status
+Purpose:Reads spi status reg
+Returns:Flash status
+Note:
+*****************************************************************************/
+uint32_t spi_read_status(void)
+{
+	return SWAP32(readl(RUBY_SPI_READ_STATUS))&(RUBY_SPI_READ_STATUS_MASK);
+}
+
+/******************************************************************************
+Function:spi_write_status
+Purpose:write spi status reg
+Returns:0
+Note:
+*****************************************************************************/
+int spi_write_status(uint32_t status)
+{
+	writel(status, RUBY_SPI_WRITE_STATUS);
+	return 0;
+}
+
+/******************************************************************************
+* Function:spi_unprotect_all
+* Purpose:unprotect the whole flash device
+* Returns: 0 or < 0
+* Note:
+******************************************************************************/
+int spi_unprotect_all(const struct flash_info *device)
+{
+	int ret = EOPNOTSUPP;
+
+	switch(device->single_unprotect_mode){
+		case MACRONIX:
+		case WINBOND:
+			ret = spi_gang_block_unlock();
+			break;
+		default:
+			ret = 0;
+	}
+
+	return ret;
+}
+
+/******************************************************************************
+* Function:spi_unprotect_sector
+* Purpose:unprotect the a individual sector
+* Returns:0 or < 0
+* Note:
+******************************************************************************/
+int spi_unprotect_sector(const struct flash_info *device, uint32_t flash_addr)
+{
+	int ret = EOPNOTSUPP;
+
+	switch(device->single_unprotect_mode){
+	case MACRONIX:
+		ret = spi_clear_dpb_reg(flash_addr) ;
+		break;
+	case WINBOND:
+                ret = spi_clear_ibup_reg(flash_addr) ;
+                break;
+
+	default:
+		ret = 0;
+	}
+
+	return ret;
+}
+
+/******************************************************************************
+* Function:spi_protect_all
+* Purpose:protect whole chipset device
+* Returns: 0 or < 0
+* Note:
+******************************************************************************/
+int spi_protect_all(const struct flash_info *device)
+{
+	int ret = EOPNOTSUPP;
+
+	switch(device->single_unprotect_mode){
+	case MACRONIX:
+	case WINBOND:
+		if ((spi_api_flash_status()) == -1){
+			printk(KERN_ERR "Time Out On Write Operation\n");
+			spi_lock();
+			return ETIME;
+		}
+
+		ret = spi_gang_block_lock();
+		break;
+	default:
+		ret = 0;
+	}
+
+	return ret;
+}
+
+/******************************************************************************
+* Function:spi_read_wps
+* Purpose:Read security register
+* Returns: SPI Status Bits
+* Note:
+******************************************************************************/
+uint32_t spi_read_wps(void)
+{
+        uint32_t spi_ctrl_val;
+
+        spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+        writel(RUBY_SPI_READ_SCUR_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+        writel(0, RUBY_SPI_READ_REG3);
+        writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+        return (SWAP32(readl(RUBY_SPI_COMMIT))&RUBY_SPI_READ_STATUS_MASK);
+}
+
+
+int spi_device_erase(struct flash_info *device, u32 flash_addr, unsigned es)
+{
+        int ret = 0;
+        int i;
+	int n_of_64k;
+
+	if ((es != device->sector_size) &&
+		((es != SPI_SECTOR_4K) || !(device->flags & SECTOR_ERASE_OP20))) {
+		printk(KERN_INFO "ERROR erase size%x\n", es);
+		return -1;
+	}
+
+        switch(device->single_unprotect_mode) {
+        case MACRONIX:
+        case WINBOND:
+
+	/* check if the address is below the first lower 64K or upper end 64K,
+	 * Flash specs for proctect mode is lower 64K you can protect/unprotect
+	 * 4K chuncks and anywhere is 64K
+	 * to make our life easeir we will use default 64K size, but will build some
+	 * intelleigent to erase the lower 64K or upper 64K of the flash
+	 */
+		n_of_64k = flash_addr / SPI_SECTOR_64K;
+		if ((n_of_64k == 0) || (n_of_64k == device->n_sectors - 1)) {
+			for (i = 0; i < SPI_SECTOR_INDEX; i++) {
+                                ret = spi_unprotect_sector(device, flash_addr) ;
+                                if (ret){
+                                        printk(KERN_INFO "ERROR: Failed to unprotect Sector %x \n", flash_addr);
+                                        return -1;
+                                }
+                                spi_flash_write_enable();
+				if ( device->sector_size * device->n_sectors > RUBY_SPI_BOUNDARY_4B ){
+					writel(SPI_MEM_ADDR_4B(flash_addr), RUBY_SPI_SECTOR_ERASE_20_4B);
+				} else {
+					writel(SPI_MEM_ADDR(flash_addr), RUBY_SPI_SECTOR_ERASE_20);
+				}
+				ret = spi_flash_wait_ready(device);
+				if (ret || device->sector_size == SPI_SECTOR_4K)
+					break;
+                                flash_addr += SPI_SECTOR_4K;
+                        }
+
+                } else {
+                        ret = spi_unprotect_sector(device, flash_addr) ;
+                        if (ret){
+                                printk(KERN_INFO "ERROR: Failed to unprotect Sector %x \n", flash_addr);
+                                return -1;
+                        }
+                        spi_flash_write_enable();
+			if ( device->sector_size * device->n_sectors > RUBY_SPI_BOUNDARY_4B ){
+				writel(SPI_MEM_ADDR_4B(flash_addr), RUBY_SPI_SECTOR_ERASE_D8_4B);
+			} else {
+				writel(SPI_MEM_ADDR(flash_addr), RUBY_SPI_SECTOR_ERASE_D8);
+			}
+                }
+                break;
+
+        default:
+                spi_flash_write_enable();
+		if ( device->sector_size * device->n_sectors > RUBY_SPI_BOUNDARY_4B ){
+			if (device->flags & SECTOR_ERASE_OP20) {
+				writel(SPI_MEM_ADDR_4B(flash_addr), RUBY_SPI_SECTOR_ERASE_20_4B);
+			} else {
+				writel(SPI_MEM_ADDR_4B(flash_addr), RUBY_SPI_SECTOR_ERASE_D8_4B);
+			}
+		} else {
+			if (device->flags & SECTOR_ERASE_OP20) {
+				writel(SPI_MEM_ADDR(flash_addr), RUBY_SPI_SECTOR_ERASE_20);
+			} else {
+				writel(SPI_MEM_ADDR(flash_addr), RUBY_SPI_SECTOR_ERASE_D8);
+			}
+		}
+        }
+
+        return ret;
+}
+
+
diff --git a/drivers/qtn/ruby/spi_api.h b/drivers/qtn/ruby/spi_api.h
new file mode 100644
index 0000000..80e5f7c
--- /dev/null
+++ b/drivers/qtn/ruby/spi_api.h
@@ -0,0 +1,178 @@
+#ifndef __SPI_H__
+#define __SPI_H__
+///////////////////////////////////////////////////////////////////////////////
+//             Includes
+///////////////////////////////////////////////////////////////////////////////
+//
+
+///////////////////////////////////////////////////////////////////////////////
+//              Types
+///////////////////////////////////////////////////////////////////////////////
+
+/******************************************************************************
+* Function:spi_protect_mode
+* Purpose:Initialize spi device - read device ID, if this matches return
+* pointer to device information structure
+* Returns:	0 or < 0 
+* Note:
+*  *****************************************************************************/
+int spi_protect_mode(struct flash_info *device);
+
+
+/******************************************************************************
+* Function:spi_read_id
+* Purpose:Reads spi device ID
+* Returns: ID
+* Note:
+*  *****************************************************************************/
+uint32_t spi_read_id(void);
+
+/******************************************************************************
+* Function:spi_read_status
+* Purpose:Reads spi status reg
+* Returns: Status
+* Note:
+*  *****************************************************************************/
+uint32_t spi_read_status(void);
+
+/******************************************************************************
+* Function:spi_write_status
+* Purpose:write spi status reg
+* Returns: NONE
+* Note:
+*  *****************************************************************************/
+int spi_write_status(uint32_t status);
+
+/******************************************************************************
+* Function:spi_lock
+* Purpose:locks spi device
+* Returns: NONE
+* Note:
+*  *****************************************************************************/
+int spi_lock(void);
+
+/******************************************************************************
+* Function:spi_unlock
+* Purpose:unlocks spi device
+* Returns: 0 or < 1
+* Note:
+*  *****************************************************************************/
+int spi_unlock(void);
+
+/******************************************************************************
+* Function:spi_write_prot_select
+* Purpose:Select write protection
+* Returns: 0 or < 0
+* Note:
+*  *****************************************************************************/
+int spi_write_prot_select(struct flash_info *device);
+
+/******************************************************************************
+* Function:spi_read_scur
+* Purpose:Read security register
+* Returns: status
+* Note:
+*  *****************************************************************************/
+uint32_t spi_read_scur(void);
+
+/******************************************************************************
+* Function:spi_write_scur
+* Purpose:Write security register to set lockdown bit
+* Returns: 0 or < 0
+* Note:
+*  *****************************************************************************/
+uint32_t spi_write_scur(void);
+
+
+/******************************************************************************
+* Function:spi_gang_block_lock
+* Purpose:Lock all DPB
+* Returns: 0 or < 0
+* Note:
+*  *****************************************************************************/
+int spi_gang_block_lock(void);
+
+/******************************************************************************
+* Function:spi_gang_block_unlock
+* Purpose:Lock all DPB
+* Returns: 0 or < 0
+* Note:
+*  *****************************************************************************/
+int spi_gang_block_unlock(void);
+
+/******************************************************************************
+* Function:spi_clear_dpb_reg
+* Purpose:unproctect individual sector
+* Returns: 0 or < 0
+* Note:
+***********************************************************************************/
+int spi_clear_dpb_reg(uint32_t addr);
+
+/******************************************************************************
+* Function:spi_read_dpb_reg
+* Purpose:read individual sector
+* Returns: o or < 0 
+* Note:
+**********************************************************************************/
+uint32_t spi_read_dpb_reg(uint32_t addr);
+
+/******************************************************************************
+* Function:spi_api_flash_status
+* Purpose: read status of the Flash
+* Returns: status
+* Note:
+* ***********************************************************************************/
+int spi_api_flash_status(void);
+
+/******************************************************************************
+* Function:spi_unprotect_all
+* Purpose:unprotect the whole flash device
+* Returns:0 or < 0
+* Note:
+*******************************************************************************/
+int spi_unprotect_all(const struct flash_info *device);
+
+/******************************************************************************
+* Function:spi_unprotect_sector
+* Purpose:unprotect the a individual sector
+* Returns:0 or < 0
+* Note:
+*******************************************************************************/
+int spi_unprotect_sector(const struct flash_info *device, uint32_t address);
+
+/******************************************************************************
+* Function:spi_protect_all
+* Purpose:protect whole chipset device
+* Returns: 0 or < 0
+* Note:
+******************************************************************************/
+int spi_protect_all(const struct flash_info *device);
+
+/******************************************************************************
+* Function:spi_read_wps
+* Purpose:Read register 3
+* Returns: SPI Status Bits
+* Note:
+* ******************************************************************************/
+uint32_t spi_read_wps(void);
+
+/******************************************************************************
+ * * Function:spi_write_wps
+ * * Purpose:write security register
+ * * Returns: SPI Status Bits
+ * * Note:
+ * ******************************************************************************/
+uint32_t spi_write_wps(void);
+
+void spi_device_resize(struct flash_info *device);
+
+int spi_device_erase(struct flash_info *device, u32 flash_addr, unsigned es);
+
+void spi_flash_write_enable(void);
+
+int qtn_get_spi_protect_config(void);
+
+int spi_flash_wait_ready(const struct flash_info *info);
+
+#endif // __SPI_H__
+
diff --git a/drivers/qtn/ruby/spi_flash.c b/drivers/qtn/ruby/spi_flash.c
new file mode 100644
index 0000000..c1fd557
--- /dev/null
+++ b/drivers/qtn/ruby/spi_flash.c
@@ -0,0 +1,1855 @@
+/*
+ * (C) Copyright 2010 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/hardware.h>
+
+#include <asm/board/platform.h>
+#include <asm/board/gpio.h>
+#include <asm/board/board_config.h>
+#include <asm/errno.h>
+
+#include <common/ruby_spi_api.h>
+#include <common/ruby_spi_flash_data.h>
+#include <common/ruby_partitions.h>
+#include <common/ruby_mem.h>
+#include <common/ruby_board_cfg.h>
+
+#include "spi_api.h"
+
+#define FLASH_64MB			(64*1024*1024)
+
+/* Driver name */
+#define SPI_FLASH_DRIVER		"spi_flash"
+
+/* Swap bytes */
+#define SWAP32(x)			((((x) & 0x000000ff) << 24)  | \
+					(((x)  & 0x0000ff00) << 8)   | \
+					(((x)  & 0x00ff0000) >> 8)   | \
+					(((x)  & 0xff000000) >> 24))
+
+/* Timeout */
+#define SPI_READY_TIMEOUT_MS		10000
+
+/* Each flash chip has same page size */
+#define SPI_PAGE_SIZE			256
+
+/*
+ *
+ * Ruby uses 3 msb bytes to form addresses.
+ * Topaz uses all 4 bytes, just skip first msb if in 3-bytes address mode.
+ *
+ */
+        #define SPI_MEM_ADDR(addr)      (((addr) & 0x00FFFFFF))
+
+/* Set zero if want to disable log messages */
+#define SPI_FLASH_DEBUG			0
+
+#if SPI_FLASH_DEBUG
+	#define SPI_LOG(a...)		printk(a)
+#else
+	#define SPI_LOG(a...)
+#endif
+
+/* Structure which holds all allocated for SPI flash resources.
+*
+* Access to SPI controller must be only when hold 'lock' mutex.
+*
+* To read data cached ioremapped 'mem_read_cache' is used.
+* Why cached - because ARC processor works more efficient
+* with AMBA bus when access comes through d-cache.
+*
+* To write data NON-cached ioremapped 'mem_write_nocache' is used.
+* Why non-cached - because in Linux process (if not be extra-cautious)
+* can be interrupted or switched very easily when half cache-line is
+* updated only. This half-updated cache-line can be flushed (because
+* it is dirty). Flush means 'write'. So half-updated data will be stored
+* on flash. It is not a problem in case of reading (see cached 'mem_read_cache')
+* as after cache-line invalidating FAST_READ SPI command will be issued again
+* and we will have only slight performance penalty. And it is real problem
+* when do writing. So for writing let's have non-cached memory area.
+* Disadvantage - we use twice more virtual memory (should not be problem on Ruby).
+*/
+struct spi_flash
+{
+	struct mutex			lock;
+	struct mtd_info			mtd;
+	void				*mem_write_nocache;
+	const void			*mem_read_cache;
+	struct flash_info		*info;
+	unsigned			partitioned;
+};
+
+static inline struct spi_flash* mtd_to_flash(struct mtd_info *mtd)
+{
+	return container_of(mtd, struct spi_flash, mtd);
+}
+
+static inline void spi_ctrl_clock_config(u32 val)
+{
+	writel(RUBY_SYS_CTL_MASK_SPICLK, IO_ADDRESS(RUBY_SYS_CTL_MASK));
+	writel(RUBY_SYS_CTL_SPICLK(val), IO_ADDRESS(RUBY_SYS_CTL_CTRL));
+	writel(0x0, IO_ADDRESS(RUBY_SYS_CTL_MASK));
+}
+
+static inline void spi_clock_config(unsigned long freq)
+{
+	const unsigned long ref = CONFIG_ARC700_DEV_CLK;
+
+	if (freq >= (ref / 2)) {
+		spi_ctrl_clock_config(0x0);
+	} else if (freq >= (ref / 4)) {
+		spi_ctrl_clock_config(0x1);
+	} else if(freq >= (ref / 8)) {
+		spi_ctrl_clock_config(0x2);
+	} else {
+		spi_ctrl_clock_config(0x3);
+	}
+}
+
+static inline size_t spi_flash_size(const struct flash_info *info)
+{
+	return (info->sector_size * info->n_sectors);
+}
+
+static inline int spi_flash_sector_aligned(const struct flash_info *info, loff_t addr)
+{
+	uint32_t es;
+	es = (info->flags & SECTOR_ERASE_OP20) ? SPI_SECTOR_4K : info->sector_size;
+	return !(addr % es);
+}
+
+static inline u32 spi_flash_id(void)
+{
+	return (SWAP32(readl(IO_ADDRESS(RUBY_SPI_READ_ID))) & 0xFFFFFF);
+}
+
+static inline void spi_flash_deassert_cs(void)
+{
+	spi_flash_id();
+}
+
+void spi_flash_write_enable(void)
+{
+	writel(0, IO_ADDRESS(RUBY_SPI_WRITE_EN));
+}
+
+static inline u32 spi_flash_status(void)
+{
+	return (SWAP32(readl(IO_ADDRESS(RUBY_SPI_READ_STATUS))) & 0xFF);
+}
+
+static inline int spi_flash_ready(void)
+{
+	return !(spi_flash_status() & RUBY_SPI_WR_IN_PROGRESS);
+}
+
+int spi_flash_wait_ready(const struct flash_info *info)
+{
+	int ret = -ETIMEDOUT;
+	const unsigned long deadline = jiffies +
+		max((unsigned long)(SPI_READY_TIMEOUT_MS * HZ / 1000 * info->n_sectors), 1UL);
+	unsigned long counter = max((unsigned long)info->n_sectors * SPI_READY_TIMEOUT_MS, 1UL);
+
+	do {
+		if(spi_flash_ready()) {
+			ret = 0;
+			break;
+		}
+
+		if(counter) {
+			--counter;
+		}
+
+		cond_resched();
+
+	} while (!time_after_eq(jiffies, deadline) || counter);
+
+	return ret;
+}
+
+static inline u32 spi_flash_addr(loff_t addr)
+{
+	return SPI_MEM_ADDR(addr);
+}
+
+static inline unsigned long spi_flash_align_begin(unsigned long addr, unsigned long step)
+{
+	return (addr & (~(step - 1)));
+}
+
+static inline unsigned long spi_flash_align_end(unsigned long addr, unsigned long step)
+{
+	return ((addr + step - 1) & (~(step - 1)));
+}
+
+static inline void spi_flash_cache_inv(unsigned long begin, unsigned long end)
+{
+	/* ARC Cache uses physical addresses */
+	inv_dcache_range(
+		spi_flash_align_begin(RUBY_SPI_FLASH_ADDR + begin, ARC_DCACHE_LINE_LEN),
+		spi_flash_align_end(RUBY_SPI_FLASH_ADDR + end, ARC_DCACHE_LINE_LEN));
+}
+
+static inline void* spi_flash_ioremap(struct spi_flash *flash, int cache)
+{
+	void *ret = NULL;
+	const unsigned long begin = spi_flash_align_begin(RUBY_SPI_FLASH_ADDR, PAGE_SIZE);
+	const unsigned long end = spi_flash_align_end(RUBY_SPI_FLASH_ADDR + spi_flash_size(flash->info), PAGE_SIZE);
+
+	if (cache) {
+		ret = ioremap(begin, end - begin);
+	} else {
+		ret = ioremap_nocache(begin, end - begin);
+	}
+
+	return ret;
+}
+
+static int spi_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	int ret = 0;
+	struct spi_flash *flash = mtd_to_flash(mtd);
+	loff_t erase_begin = instr->addr;
+	loff_t erase_end = min(erase_begin + (loff_t)instr->len, (loff_t)mtd->size);
+
+	SPI_LOG(KERN_INFO"%s: erase: begin=0x%x end=0x%x\n", SPI_FLASH_DRIVER, (unsigned)erase_begin, (unsigned)erase_end);
+
+	/* Pre-condition. */
+	if (erase_begin >= erase_end) {
+
+		/* Request is out of range. */
+		ret = -ERANGE;
+		goto exit;
+
+	} else if (!(spi_flash_sector_aligned(flash->info, erase_begin) &&
+			spi_flash_sector_aligned(flash->info, erase_end))) {
+
+		/* Although it is legal to have erase address
+		* inside sector, it is not very safe.
+		* Simple mistake - and neighbour sector erased.
+		*/
+		ret = -ERANGE;
+		goto exit;
+	}
+
+	/* Begin synchronized block */
+	mutex_lock(&flash->lock);
+
+	/* Going to bypass d-cache, so invalidate it before. */
+	spi_flash_cache_inv(erase_begin, erase_end);
+
+	/* Erasing */
+	if ((erase_begin == 0) && (erase_end == mtd->size)) {
+
+		ret = spi_unprotect_all(flash->info);
+		if (ret) {
+			printk(KERN_ERR "%s:Failed to unprotect all regions of the Flash\n", SPI_FLASH_DRIVER);
+			return (ret);
+		}
+
+		/* Bulk erase */
+
+		ret = spi_flash_wait_ready(flash->info);
+		if (!ret) {
+			spi_flash_write_enable();
+			writel(0, IO_ADDRESS(RUBY_SPI_BULK_ERASE));
+		}
+
+	} else {
+		while (erase_begin < erase_end) {
+			unsigned es; /* erase size */
+
+			es = flash->info->sector_size;
+			if (((erase_begin % es) || (erase_end - erase_begin < es)) && (flash->info->flags & SECTOR_ERASE_OP20))
+				es = SPI_SECTOR_4K;
+
+			ret = spi_flash_wait_ready(flash->info);
+			if (ret) {
+				break;
+			}
+
+			/* Per-sector erase */
+			if (spi_device_erase(flash->info, erase_begin, es)  < 0) {
+				break;
+			}
+
+			erase_begin += es;
+		}
+	}
+
+	/* End synchronized block */
+
+exit:
+	if (ret) {
+		instr->state = MTD_ERASE_FAILED;
+		SPI_LOG(KERN_ERR"%s: erase: failed: ret=%d\n", SPI_FLASH_DRIVER, ret);
+	} else {
+		instr->state = MTD_ERASE_DONE;
+		SPI_LOG(KERN_INFO"%s: erase: succeed\n", SPI_FLASH_DRIVER);
+	}
+
+	spi_flash_deassert_cs();
+	mtd_erase_callback(instr);
+
+	spi_flash_wait_ready(flash->info);
+	ret = spi_protect_all(flash->info);
+
+	if (ret) {
+		printk(KERN_ERR"%s: Failed to protect all regions of the Flash\n", SPI_FLASH_DRIVER);
+	}
+
+	/* End synchronized block */
+	mutex_unlock(&flash->lock);
+
+	return ret;
+}
+
+static int spi_flash_read(struct mtd_info *mtd, loff_t read_begin, size_t len, size_t *ret_len, u_char *buf)
+{
+	int ret = 0;
+	struct spi_flash *flash = mtd_to_flash(mtd);
+	loff_t read_end = min(read_begin + (loff_t)len, (loff_t)mtd->size);
+	size_t read_len = 0;
+
+	SPI_LOG(KERN_INFO"%s: read: begin=0x%x len=%u\n", SPI_FLASH_DRIVER, (unsigned)read_begin, (unsigned)len);
+
+	/* Pre-condition. */
+	if(read_begin >= read_end) {
+		/* Request is out of range. */
+		ret = -ERANGE;
+		goto exit;
+	}
+
+	/* Calculate read length */
+	read_len = read_end - read_begin;
+
+	/* Begin synchronized block */
+	mutex_lock(&flash->lock);
+
+	/* Reading */
+	ret = spi_flash_wait_ready(flash->info);
+	if (!ret) {
+#ifdef CONFIG_PREEMPT
+		memcpy(buf, (u_char *)flash->mem_read_cache + read_begin, read_len);
+#else
+		while (1) {
+
+			size_t iter_read_len = min((size_t)(read_end - read_begin), flash->info->sector_size);
+
+			memcpy(buf, (u_char *)flash->mem_read_cache + read_begin, iter_read_len);
+
+			read_begin += iter_read_len;
+			buf += iter_read_len;
+			if (read_begin == read_end) {
+				break;
+			}
+
+			cond_resched();
+		}
+#endif // #ifdef CONFIG_PREEMPT
+	}
+
+	/* End synchronized block */
+
+exit:
+	if (ret) {
+		SPI_LOG(KERN_ERR"%s: read: failed: ret=%d\n", SPI_FLASH_DRIVER, ret);
+	} else {
+		*ret_len = read_len;
+		SPI_LOG(KERN_INFO"%s: read: succeed: len=%u\n", SPI_FLASH_DRIVER, (unsigned)*ret_len);
+	}
+
+	spi_flash_deassert_cs();
+
+	/* End synchronized block */
+	mutex_unlock(&flash->lock);
+
+	return ret;
+}
+
+static int spi_flash_write(struct mtd_info *mtd, loff_t write_begin, size_t len, size_t *ret_len, const u_char *buf)
+{
+	int ret = 0;
+	struct spi_flash *flash = mtd_to_flash(mtd);
+	loff_t write_end = min(write_begin + len, (loff_t)mtd->size);
+	size_t write_len = 0;
+	int i;
+
+	SPI_LOG(KERN_INFO"%s: write: begin=0x%x len=%u\n", SPI_FLASH_DRIVER, (unsigned)write_begin, (unsigned)len);
+	/* Pre-condition. */
+	if (write_begin >= write_end) {
+		/* Request is out of range. */
+		ret = -ERANGE;
+		goto exit;
+	}
+
+	/* Calculate write length */
+	write_len = write_end - write_begin;
+
+	/* Begin synchronized block */
+	mutex_lock(&flash->lock);
+
+	/* Going to bypass d-cache, so invalidate it before. */
+	spi_flash_cache_inv(write_begin, write_end);
+
+	/* Writing */
+	while (write_begin < write_end) {
+
+		/* Per-page programming */
+
+		u32 iter_write_len = min(
+			SPI_PAGE_SIZE - (write_begin % SPI_PAGE_SIZE), /* do not exceed page boundary */
+			write_end - write_begin); /* do not exceed requested range */
+
+		ret = spi_flash_wait_ready(flash->info);
+		if (ret) {
+			break;
+		}
+
+		ret = spi_unprotect_sector(flash->info, write_begin);
+		if (ret) {
+			printk(KERN_ERR"%s: Failed to unprotect Sector %x \n", SPI_FLASH_DRIVER,
+				(unsigned int)write_begin);
+			break;
+		}
+		spi_flash_write_enable();
+		if ( flash->info->sector_size * flash->info->n_sectors > RUBY_SPI_BOUNDARY_4B ){
+			writel(SPI_MEM_ADDR_4B(write_begin), IO_ADDRESS(RUBY_SPI_PAGE_PROGRAM_4B));
+		} else {
+			writel(spi_flash_addr(write_begin), IO_ADDRESS(RUBY_SPI_PAGE_PROGRAM));
+		}
+
+		/*
+		 * memcpy((u_char *)flash->mem_write_nocache + write_begin, buf, iter_write_len);
+		 * memcpy doesn't work correctly here for Linux_2.6.35.12, due to implementation of it.
+		 */
+		for (i = 0; i < iter_write_len; i++)
+			*((u_char __iomem *)flash->mem_write_nocache + write_begin + i) = *(buf + i);
+		
+		writel(0, IO_ADDRESS(RUBY_SPI_COMMIT));
+
+		write_begin += iter_write_len;
+		buf += iter_write_len;
+	}
+
+	/* End synchronized block */
+	mutex_unlock(&flash->lock);
+
+exit:
+	if (ret) {
+		SPI_LOG(KERN_ERR"%s: write: failed: ret=%d\n", SPI_FLASH_DRIVER, ret);
+	} else {
+		*ret_len = write_len;
+		SPI_LOG(KERN_INFO"%s: write: succeed: len=%u\n", SPI_FLASH_DRIVER, (unsigned)*ret_len);
+	}
+
+	spi_flash_deassert_cs();
+
+	spi_flash_wait_ready(flash->info);
+	ret = spi_protect_all(flash->info);
+
+	if (ret) {
+		printk(KERN_ERR"%s: Failed to protect all regions of the Flash \n", SPI_FLASH_DRIVER);
+	}
+
+	return ret;
+}
+
+static void spi_flash_sync(struct mtd_info *mtd)
+{
+	struct spi_flash *flash = mtd_to_flash(mtd);
+
+	SPI_LOG(KERN_INFO"%s: sync: begin\n", SPI_FLASH_DRIVER);
+
+	/* Begin synchronized block */
+	mutex_lock(&flash->lock);
+
+	/* Make sure that all pending write/erase transactions are finished */
+	(void)spi_flash_wait_ready(flash->info);
+
+	/* End synchronized block */
+	mutex_unlock(&flash->lock);
+
+	SPI_LOG(KERN_INFO"%s: sync: end\n", SPI_FLASH_DRIVER);
+}
+
+static struct flash_info* __init spi_flash_info(void)
+{
+	u32 jedec_id = spi_flash_id();
+	int i;
+
+	for(i = 0; i < ARRAY_SIZE(flash_data); ++i) {
+		if(jedec_id == flash_data[i].jedec_id) {
+			return (flash_data + i);
+		}
+	}
+
+	printk(KERN_ERR"%s: SPI flash JEDEC id is unknown: 0x%x\n",
+		SPI_FLASH_DRIVER, (unsigned)jedec_id);
+
+	return NULL;
+}
+
+static void spi_flash_dealloc(struct spi_flash *flash)
+{
+	if (flash) {
+
+		if (flash->mem_read_cache) {
+			iounmap(flash->mem_read_cache);
+		}
+
+		if (flash->mem_write_nocache) {
+			iounmap(flash->mem_write_nocache);
+		}
+
+		kfree(flash);
+	}
+}
+
+static struct spi_flash* __init spi_flash_alloc(void)
+{
+	/* Allocate structure to hold flash specific data. */
+	struct spi_flash *flash = kzalloc(sizeof(struct spi_flash), GFP_KERNEL);
+	if (!flash) {
+		printk(KERN_ERR"%s: no memory\n", SPI_FLASH_DRIVER);
+		goto error_exit;
+	}
+
+	/* Cannot setup proper clock yet, so set up slowest possible mode. */
+	spi_clock_config(FREQ_UNKNOWN);
+
+	/* Get flash information. */
+	flash->info = spi_flash_info();
+	if (!flash->info) {
+		printk(KERN_ERR"%s: cannot get info\n", SPI_FLASH_DRIVER);
+		goto error_exit;
+	}
+
+	/* Now we are ready to setup correct frequency. */
+	spi_clock_config(flash->info->freq);
+
+	/* Map flash memory. We need both cached and non-cached access. */
+	flash->mem_read_cache = spi_flash_ioremap(flash, 1);
+	flash->mem_write_nocache = spi_flash_ioremap(flash, 0);
+	if (!flash->mem_read_cache || !flash->mem_write_nocache) {
+		printk(KERN_ERR"%s: cannot remap IO memory\n", SPI_FLASH_DRIVER);
+		goto error_exit;
+	}
+
+	/* Initialize mutex */
+	mutex_init(&flash->lock);
+
+	return flash;
+
+error_exit:
+	spi_flash_dealloc(flash);
+	return NULL;
+}
+
+static void __init spi_flash_init_mtd(struct spi_flash *flash)
+{
+	flash->mtd.name = SPI_FLASH_DRIVER;
+	flash->mtd.type = MTD_NORFLASH;
+	flash->mtd.writesize = 1;
+	flash->mtd.writebufsize = SPI_PAGE_SIZE;
+	flash->mtd.flags = MTD_CAP_NORFLASH;
+	flash->mtd.size = spi_flash_size(flash->info);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	flash->mtd._erase = spi_flash_erase;
+	flash->mtd._write = spi_flash_write;
+	flash->mtd._read = spi_flash_read;
+	flash->mtd._sync = spi_flash_sync;
+	flash->mtd.erasesize = flash->info->sector_size;
+#else
+	flash->mtd._erase = spi_flash_erase;
+	flash->mtd._write = spi_flash_write;
+	flash->mtd._read = spi_flash_read;
+	flash->mtd._sync = spi_flash_sync;
+#endif
+}
+
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,30)
+#define	RUBY_MTD_PART(name, size, offset)	{name, size, offset, 0, NULL, NULL}
+#else
+#define	RUBY_MTD_PART(name, size, offset)	{name, size, offset, 0}
+#endif
+
+static struct mtd_partition __initdata parts_64K[] = {
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_BIN,	F64K_UBOOT_PIGGY_PARTITION_SIZE, 0),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV,	F64K_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_DATA,	MTDPART_SIZ_FULL,		MTDPART_OFS_NXTBLK),
+};
+
+#ifdef FLASH_SUPPORT_256KB
+static struct mtd_partition __initdata parts_256K[] = {
+        RUBY_MTD_PART(MTD_PARTNAME_UBOOT_BIN,   F256K_UBOOT_PIGGY_PARTITION_SIZE, 0),
+        RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV,   F256K_ENV_PARTITION_SIZE,        MTDPART_OFS_NXTBLK),
+        RUBY_MTD_PART(MTD_PARTNAME_DATA,        MTDPART_SIZ_FULL,               MTDPART_OFS_NXTBLK),
+};
+#endif
+static struct mtd_partition __initdata parts_2M[] = {
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_BIN,	UBOOT_TEXT_PARTITION_SIZE,	0),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV,	UBOOT_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV_BAK,UBOOT_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_DATA,	MTDPART_SIZ_FULL,		MTDPART_OFS_NXTBLK),
+};
+static struct mtd_partition __initdata parts_4M[] = {
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_BIN,	UBOOT_TEXT_PARTITION_SIZE,	0),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV,	UBOOT_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV_BAK,UBOOT_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_DATA,	MTDPART_SIZ_FULL,		MTDPART_OFS_NXTBLK),
+};
+static struct mtd_partition __initdata parts_8M[] = {
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_BIN,	UBOOT_TEXT_PARTITION_SIZE,	0),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV,	UBOOT_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV_BAK,UBOOT_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_LINUX_LIVE,	IMG_SIZE_8M_FLASH_1_IMG,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_DATA,	MTDPART_SIZ_FULL,		MTDPART_OFS_NXTBLK),
+};
+
+static struct mtd_partition __initdata parts_16M[] = {
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_BIN,	UBOOT_TEXT_PARTITION_SIZE,	0),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV,	UBOOT_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV_BAK,UBOOT_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_LINUX_SAFETY,IMG_SIZE_16M_FLASH_2_IMG,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_LINUX_LIVE,	IMG_SIZE_16M_FLASH_2_IMG,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_DATA,	MTDPART_SIZ_FULL,		MTDPART_OFS_NXTBLK),
+};
+
+static struct mtd_partition __initdata parts_32M[] = {
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_BIN,	UBOOT_TEXT_PARTITION_SIZE,	0),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV,	UBOOT_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV_BAK,UBOOT_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_LINUX_SAFETY,IMG_SIZE_16M_FLASH_2_IMG,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_LINUX_LIVE,	IMG_SIZE_16M_FLASH_2_IMG,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_DATA,	RUBY_MIN_DATA_PARTITION_SIZE,		MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_EXTEND,	MTDPART_SIZ_FULL,		MTDPART_OFS_NXTBLK),
+};
+
+static struct mtd_partition __initdata parts_64M[] = {
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_BIN,	UBOOT_TEXT_PARTITION_SIZE,	0),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV,	UBOOT_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_UBOOT_ENV_BAK,UBOOT_ENV_PARTITION_SIZE,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_LINUX_SAFETY,IMG_SIZE_16M_FLASH_2_IMG,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_LINUX_LIVE,	IMG_SIZE_16M_FLASH_2_IMG,	MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_DATA,	RUBY_MIN_DATA_PARTITION_SIZE,		MTDPART_OFS_NXTBLK),
+	RUBY_MTD_PART(MTD_PARTNAME_EXTEND,	MTDPART_SIZ_FULL,		MTDPART_OFS_NXTBLK),
+};
+
+struct mtd_partition_table {
+	int flashsz;
+	struct mtd_partition *parts;
+	unsigned int nparts;
+};
+
+static const struct mtd_partition_table partition_tables[] = {
+	{ FLASH_64MB,	parts_64M,	ARRAY_SIZE(parts_64M)	},
+	{ FLASH_32MB,	parts_32M,	ARRAY_SIZE(parts_32M)	},
+	{ FLASH_16MB,	parts_16M,	ARRAY_SIZE(parts_16M)	},
+	{ FLASH_8MB,	parts_8M,	ARRAY_SIZE(parts_8M)	},
+	{ FLASH_4MB,	parts_4M,	ARRAY_SIZE(parts_4M)	},
+	{ FLASH_2MB,	parts_2M,	ARRAY_SIZE(parts_2M)	},
+#ifdef FLASH_SUPPORT_256KB
+        { FLASH_256KB,  parts_256K,     ARRAY_SIZE(parts_256K)   },
+#endif
+	{ FLASH_64KB,	parts_64K,	ARRAY_SIZE(parts_64K)	},
+};
+
+#if 0
+/*#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,30)*/
+static struct mtd_partition __init *allocate_mtd(int n)
+{
+	struct mtd_partition *parts = NULL;
+
+	parts = (struct mtd_partition *)kmalloc(n * sizeof(struct mtd_partition), GFP_KERNEL);
+	if (!parts)
+		panic("%s: allocation failed for mtd\n", SPI_FLASH_DRIVER);
+
+	return parts;
+}
+
+static int __init spi_flash_part_get(struct spi_flash *flash, struct mtd_partition **pparts)
+{
+	int nr_parts = 0;
+	int cfg_flash_sz = 0;
+	int jedec_flash_sz = 0;
+	int flashsz = 0;
+	int i;
+	struct mtd_partition *parts = NULL;
+	const struct flash_layout_tbl *pflt;
+
+	pflt = qtn_get_flt();
+	if (pflt) {
+		parts = allocate_mtd(pflt->entry_n);
+		for (i = 0; i< pflt->entry_n; i++) {
+			parts[i].name = (pflt->fld)[i].desc;
+			parts[i].size = (pflt->fld)[i].len << 8;
+			parts[i].offset = (pflt->fld)[i].start;
+		}
+		nr_parts = i;
+	}
+
+	printk("%s: step1: nr_parts=%d\n", __func__, nr_parts);
+
+	if (!nr_parts) {
+		get_board_config(BOARD_CFG_FLASH_SIZE, &cfg_flash_sz);
+		jedec_flash_sz = spi_flash_size(flash->info);
+		flashsz = cfg_flash_sz ?  min(cfg_flash_sz, jedec_flash_sz) :
+				jedec_flash_sz;
+
+		for (i = 0; i < ARRAY_SIZE(partition_tables); i++) {
+			const struct mtd_partition_table *t;
+			struct mtd_partition *p;
+
+			t = &partition_tables[i];
+			if (flashsz == t->flashsz) {
+				nr_parts = t->nparts;
+				p = t->parts;
+				parts = allocate_mtd(nr_parts);
+				memcpy(parts, p, sizeof(struct mtd_partition) * nr_parts);
+				break;
+			}
+		}
+
+		for (i = 1; i < nr_parts; i++) {
+			if (parts[i].offset == MTDPART_OFS_NXTBLK)
+				parts[i].offset = parts[i - 1].offset + parts[i - 1].size;
+		}
+	}
+
+	printk("%s: step2: nr_parts=%d\n", __func__, nr_parts);
+	if (!nr_parts)
+		panic("%s: No valid flash partition table."
+			" flashsz board/jedec/sel = 0x%x/0x%x/0x%x\n",
+				SPI_FLASH_DRIVER, cfg_flash_sz, jedec_flash_sz, flashsz);
+	*pparts = parts;
+	return nr_parts;
+}
+
+static void __init set_proper_erasesize(struct spi_flash *flash, struct mtd_partition *part)
+{
+	int es = flash->info->sector_size;
+
+	if (part->offset % es || part->size % es) {
+		if (flash->info->flags & SECTOR_ERASE_OP20) {
+			es = SPI_SECTOR_4K;
+			if (part->offset % es || part->size % es)
+				printk(KERN_ERR"%s: improper partition size=%llx, offset=%llx\n",
+					SPI_FLASH_DRIVER, part->size, part->offset);
+		}
+	}
+	flash->mtd.erasesize = es;
+}
+
+static int __init spi_flash_add_mtd(struct spi_flash *flash)
+{
+	struct mtd_partition *parts = NULL;
+	int nr_parts = 0;
+	int ret = -1;
+	int allocated = 0;
+	static const char *part_probes[] = { "cmdlinepart", NULL };
+	int i;
+
+	printk("%s\n", __func__);
+
+	spi_flash_init_mtd(flash);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	nr_parts = spi_flash_part_get(flash, &parts);
+	for (i = 0; i < nr_parts; i++) {
+		set_proper_erasesize(flash, &parts[i]);
+		ret = mtd_device_parse_register(&flash->mtd, part_probes, NULL, &parts[i], 1);
+	}
+	allocated = 1;
+#else
+	if (mtd_has_partitions()) {
+		// prioritise paritions being sent as command line parameters
+		if (mtd_has_cmdlinepart())
+			nr_parts = parse_mtd_partitions(&flash->mtd,
+					part_probes, &parts, 0);
+
+		// provide a default table if command line parameter fails
+		if (nr_parts <= 0) {
+			nr_parts = spi_flash_part_get(flash, &parts);
+			allocated = 1;
+		}
+
+		if (nr_parts > 0) {
+			for (i = 0; i < nr_parts; i++) {
+				set_proper_erasesize(flash, &parts[i]);
+				ret = add_mtd_partitions(&flash->mtd, &parts[i], 1);
+			}
+			flash->partitioned = 1;
+		}
+	}
+#endif
+	if(allocated)
+		kfree(parts);
+	return ret;
+}
+#else
+static int __init spi_flash_add_mtd(struct spi_flash *flash)
+{
+	struct mtd_partition *parts;
+	int nr_parts = 0;
+	int jedec_flash_sz = 0;
+	int flashsz = 0;
+
+	spi_flash_init_mtd(flash);
+
+	{
+		int i;
+		jedec_flash_sz = spi_flash_size(flash->info);
+		flashsz = jedec_flash_sz;
+		for (i = 0; i < ARRAY_SIZE(partition_tables); i++) {
+			const struct mtd_partition_table *t;
+
+			t = &partition_tables[i];
+			if (flashsz == t->flashsz) {
+				nr_parts = t->nparts;
+				parts = t->parts;
+				break;
+			}
+		}
+	}
+	return mtd_device_parse_register(&flash->mtd, 0, 0, parts, nr_parts);
+}
+#endif
+
+static int __init spi_flash_attach(struct spi_flash **flash_ret)
+{
+	int ret = 0;
+
+	struct spi_flash *flash = spi_flash_alloc();
+	if (!flash) {
+		printk(KERN_ERR"%s: allocation failed\n", SPI_FLASH_DRIVER);
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	if ( flash->info->sector_size * flash->info->n_sectors > RUBY_SPI_BOUNDARY_4B ){
+		writel(RUBY_SPI_ADDRESS_MODE_4B, RUBY_SPI_CONTROL);
+	}
+
+	*flash_ret = flash;
+
+	/* If flag is set, enable support Protection mode */
+
+	if ((qtn_get_spi_protect_config() & 0x1)){
+		if (spi_protect_mode(flash->info) == EOPNOTSUPP){
+			printk(KERN_INFO "%s: SPI Protected Mode is not Supported\n", SPI_FLASH_DRIVER);
+			flash->info->single_unprotect_mode = NOT_SUPPORTED;
+		} else {
+			printk(KERN_INFO "%s: SPI Protected Mode is Supported\n", SPI_FLASH_DRIVER);
+		}
+	} else {
+		/* No Protection */
+		printk(KERN_INFO "%s: Force not to support Protect Mode \n", SPI_FLASH_DRIVER);
+		flash->info->single_unprotect_mode = NOT_SUPPORTED;
+	}
+
+	ret = spi_flash_add_mtd(flash);
+	if (ret) {
+		printk(KERN_ERR"%s: MTD registering failed\n", SPI_FLASH_DRIVER);
+		goto error;
+	}
+
+	return ret;
+
+error:
+	spi_flash_dealloc(flash);
+	return ret;
+}
+
+static void __exit spi_flash_deattach(struct spi_flash *flash)
+{
+	int status = 0;
+
+	if (!flash) {
+		return;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	status = mtd_device_unregister(&flash->mtd);
+#else
+	if (mtd_has_partitions() && flash->partitioned) {
+		status = del_mtd_partitions(&flash->mtd);
+	} else {
+		status = del_mtd_device(&flash->mtd);
+	}
+#endif
+
+	if (status) {
+		printk(KERN_ERR"%s: cannot delete MTD\n", SPI_FLASH_DRIVER);
+	} else {
+		spi_flash_dealloc(flash);
+	}
+}
+
+/* We can have only single flash chip connected to SPI controller */
+static struct spi_flash *g_flash = NULL;
+
+
+/******************************************************************************
+* Function:spi_api_flash_status
+* Purpose: read status of te Flash
+* Returns: TIMEOUT or success
+* Note:
+* ***********************************************************************************/
+int spi_api_flash_status(void)
+{
+	int ret = -1;
+
+	if (g_flash){
+		ret = spi_flash_wait_ready(g_flash->info);
+	}
+
+	return (ret);
+}
+
+/* For external use to query flash size */
+size_t get_flash_size(void)
+{
+	if (g_flash)
+		return g_flash->mtd.size;
+	else
+		return 0;
+}
+EXPORT_SYMBOL(get_flash_size);
+
+uint32_t spi_read_one( unsigned int cmd)
+{
+	int spi_cmd, val;
+
+	if (cmd&RUBY_SPI_BASE_ADDR)
+		spi_cmd = cmd;
+	else
+		spi_cmd = RUBY_SPI_CMD_MASK(cmd) + RUBY_SPI_BASE_ADDR;
+	val = SWAP32(readl(spi_cmd))& RUBY_SPI_READ_DATA_MASK;
+	return val;
+}
+
+uint32_t spi_read_two( unsigned int cmd)
+{
+	int spi_cmd, val;
+	uint32_t spi_ctrl_val;
+
+	if (cmd&RUBY_SPI_BASE_ADDR)
+		spi_cmd = cmd;
+	else
+		spi_cmd = RUBY_SPI_CMD_MASK(cmd) + RUBY_SPI_BASE_ADDR;
+
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_PASS_TWO_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(0, spi_cmd);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	val = readw(RUBY_SPI_COMMIT);
+	return val;
+}
+
+uint32_t spi_read_four( unsigned int cmd)
+{
+	int spi_cmd, val;
+	uint32_t spi_ctrl_val;
+
+	if (cmd&RUBY_SPI_BASE_ADDR)
+		spi_cmd = cmd;
+	else
+		spi_cmd = RUBY_SPI_CMD_MASK(cmd) + RUBY_SPI_BASE_ADDR;
+
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_PASS_FOUR_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(0, spi_cmd);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	val = readl(RUBY_SPI_COMMIT);
+	return val;
+}
+
+
+uint32_t spi_read_one_addr( unsigned int cmd, unsigned int addr)
+{
+	int spi_cmd, val;
+	uint32_t spi_ctrl_val;
+
+	if (cmd&RUBY_SPI_BASE_ADDR)
+		spi_cmd = cmd;
+	else
+		spi_cmd = RUBY_SPI_CMD_MASK(cmd) + RUBY_SPI_BASE_ADDR;
+
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_PASS_ONE_ADDR_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(SPI_MEM_ADDR_4B(addr), spi_cmd);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	val = readb(RUBY_SPI_COMMIT);
+	return val;
+}
+
+
+int spi_write_cmd(unsigned int cmd)
+{
+	uint32_t spi_ctrl_val;
+	int spi_cmd;
+
+	if (cmd&RUBY_SPI_BASE_ADDR)
+		spi_cmd = cmd;
+	else
+		spi_cmd = RUBY_SPI_CMD_MASK(cmd) + RUBY_SPI_BASE_ADDR;
+
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_PASS_CMD_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(0, spi_cmd);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	return 0;
+}
+
+
+int spi_write_one(unsigned int cmd, uint32_t value)
+{
+	uint32_t spi_ctrl_val;
+	int spi_cmd;
+
+	if (cmd&RUBY_SPI_BASE_ADDR)
+		spi_cmd = cmd;
+	else
+		spi_cmd = RUBY_SPI_CMD_MASK(cmd) + RUBY_SPI_BASE_ADDR;
+
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_PASS_ONE_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(SWAP32(value), spi_cmd);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	return 0;
+}
+
+int spi_write_two(unsigned int cmd, uint32_t value)
+{
+	uint32_t spi_ctrl_val;
+	int spi_cmd;
+
+	if (cmd&RUBY_SPI_BASE_ADDR)
+		spi_cmd = cmd;
+	else
+		spi_cmd = RUBY_SPI_CMD_MASK(cmd) + RUBY_SPI_BASE_ADDR;
+
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_PASS_TWO_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(SWAP32(value), spi_cmd);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	return 0;
+}
+
+int spi_write_four(unsigned int cmd, uint32_t value)
+{
+	uint32_t spi_ctrl_val;
+	int spi_cmd;
+
+	if (cmd&RUBY_SPI_BASE_ADDR)
+		spi_cmd = cmd;
+	else
+		spi_cmd = RUBY_SPI_CMD_MASK(cmd) + RUBY_SPI_BASE_ADDR;
+
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_PASS_FOUR_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(SWAP32(value), spi_cmd);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	return 0;
+}
+
+int spi_write_four_prefix_4null(unsigned int cmd, uint32_t value)
+{
+	uint32_t spi_ctrl_val;
+	int spi_cmd;
+
+	if (cmd&RUBY_SPI_BASE_ADDR)
+		spi_cmd = cmd;
+	else
+		spi_cmd = RUBY_SPI_CMD_MASK(cmd) + RUBY_SPI_BASE_ADDR;
+
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_PASS_EIGHT_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(SWAP32(value), spi_cmd);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	return 0;
+}
+
+
+
+int spi_write_addr(unsigned int cmd, unsigned int addr)
+{
+	int spi_cmd;
+
+	if (cmd&RUBY_SPI_BASE_ADDR)
+		spi_cmd = cmd;
+	else
+		spi_cmd = RUBY_SPI_CMD_MASK(cmd) + RUBY_SPI_BASE_ADDR;
+
+	writel(SPI_MEM_ADDR_4B(addr), spi_cmd);
+
+	return 0;
+
+}
+
+uint32_t spi_mxic_read_scur(void)
+{
+	return spi_read_one(RUBY_SPI_READ_SCUR);
+}
+
+uint32_t spi_mxic_read_lock(void)
+{
+	return spi_read_two(RUBY_SPI_READ_LOCK);
+}
+
+
+int spi_mxic_protect_mode_check(void)
+{
+	unsigned int val, ret=1;
+
+	if (!((val=spi_mxic_read_scur())& SPI_SCUR_WPSEL)){
+		ret = 0;
+		}
+	return ret;
+}
+
+int spi_mxic_enable_protect_mode_otp(void)
+{
+	spi_unlock();
+	spi_write_cmd(RUBY_SPI_WRITE_PRO_SEL);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "Time Out On Write Operation\n");
+		spi_lock();
+		return ETIME;
+	}
+	if (!spi_mxic_protect_mode_check()){
+		printk(KERN_ERR "%s check error\n",__FUNCTION__);
+		spi_lock();
+		return -1;
+		}
+	spi_lock();
+	return 0;
+}
+
+int spi_mxic_spb_mode_check(void)
+{
+	unsigned int val, ret=1;
+
+	if (((val=spi_mxic_read_lock())& SPI_LOCK_SPM) ){
+		ret = 0;
+		}
+	return ret;
+}
+
+int spi_mxic_enable_spb_mode_otp(void)
+{
+	spi_unlock();
+	spi_write_two(RUBY_SPI_WRITE_LOCK,~SPI_LOCK_SPM);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "Time Out On Write Operation\n");
+		spi_lock();
+		return ETIME;
+	}
+	if (!spi_mxic_spb_mode_check()){
+		printk(KERN_ERR "%s check error\n",__FUNCTION__);
+		spi_lock();
+		return -1;
+		}
+	spi_lock();
+	return 0;
+}
+
+int spi_mxic_read_spb(unsigned int addr)
+{
+	return spi_read_one_addr(RUBY_SPI_READ_SPB,addr);
+}
+
+int spi_mxic_spb_check(unsigned int addr)
+{
+	unsigned int val, ret=1;
+	if (!(val=spi_mxic_read_spb(addr))){
+		ret=0;
+		}
+	return ret;
+}
+
+int spi_mxic_write_spb(unsigned int addr)
+{
+	spi_unlock();
+	spi_write_addr(RUBY_SPI_WRITE_SPB,addr);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "Time Out On Write Operation\n");
+		spi_lock();
+		return ETIME;
+	}
+	if (!spi_mxic_spb_check(addr)){
+		printk(KERN_ERR "%s check error at 0x%x\n",__FUNCTION__, addr);
+		spi_lock();
+		return -1;
+		}
+	spi_lock();
+	return 0;
+}
+
+int spi_mxic_erase_allspb(void)
+{
+	spi_unlock();
+	spi_write_cmd(RUBY_SPI_ERASE_SPB);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "Timeut On Write Operation\n");
+		spi_lock();
+		return ETIME;
+	}
+	spi_lock();
+	return 0;
+}
+
+uint32_t spi_mxic_read_spb_lock(void)
+{
+	return spi_read_one(RUBY_SPI_READ_SPBLOCK);
+}
+
+
+int spi_mxic_spb_lock_check(void)
+{
+	unsigned int val, ret=1;
+	if ((val=spi_mxic_read_spb_lock())){
+		ret=0;
+		}
+	return ret;
+}
+
+
+int spi_mxic_write_spb_lock(void)
+{
+	spi_unlock();
+	spi_write_cmd(RUBY_SPI_WRITE_SPBLOCK);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "%s Time Out On Write Operation\n",__FUNCTION__);
+		spi_lock();
+		return ETIME;
+	}
+	if (!spi_mxic_spb_lock_check()){
+		printk(KERN_ERR "%s check error\n",__FUNCTION__);
+		spi_lock();
+		return -1;
+		}
+	spi_lock();
+	return 0;
+}
+
+uint32_t spi_mxic_read_password(void)
+{
+	return spi_read_four(RUBY_SPI_READ_PASSWORD);
+}
+
+int spi_mxic_write_password(unsigned int value)
+{
+	unsigned int data;
+	spi_unlock();
+	spi_write_four_prefix_4null(RUBY_SPI_WRITE_PASWORD,value);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "%s Time Out On Write Operation\n",__FUNCTION__);
+		spi_lock();
+		return ETIME;
+	}
+	if ( (data=spi_mxic_read_password())!= value){
+		printk(KERN_ERR "%s password check error 0x%x\n",__FUNCTION__, data);
+		spi_lock();
+		return -1;
+		}
+	spi_lock();
+	return 0;
+}
+
+
+int spi_mxic_write_password_unlock(unsigned int value)
+{
+	spi_unlock();
+	spi_write_four_prefix_4null(RUBY_SPI_UNLOCK_PASSWORD,value);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "%s Time Out On Write Operation\n",__FUNCTION__);
+		spi_lock();
+		return ETIME;
+	}
+	if (spi_mxic_spb_lock_check()){
+		printk(KERN_ERR "%s check error\n",__FUNCTION__);
+		spi_lock();
+		return -1;
+		}
+	spi_lock();
+	return 0;
+}
+
+int spi_mxic_password_mode_check(void)
+{
+	unsigned int val, ret=1;
+
+	if (((val=spi_mxic_read_lock())& SPI_LOCK_PPM) ){
+		ret = 0;
+		}
+	return ret;
+}
+
+int spi_mxic_enable_password_mode_otp(void)
+{
+	spi_unlock();
+	spi_write_two(RUBY_SPI_WRITE_LOCK,~SPI_LOCK_PPM);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "%s Time Out On Write Operation\n",__FUNCTION__);
+		spi_lock();
+		return ETIME;
+	}
+	if (!spi_mxic_password_mode_check()){
+		printk(KERN_ERR "%s check error\n",__FUNCTION__);
+		spi_lock();
+		return -1;
+		}
+	spi_lock();
+	return 0;
+}
+
+
+int spi_mxic_read_dpb(unsigned int addr)
+{
+	return spi_read_one_addr(RUBY_SPI_READ_DPB,addr);
+}
+
+int spi_mxic_dpb_check(unsigned int addr)
+{
+	unsigned int val, ret=1;
+	if (!(val=spi_mxic_read_dpb(addr))){
+		ret=0;
+		}
+	return ret;
+}
+
+
+int spi_write_addr_prefix_1null(unsigned int cmd, unsigned int addr)
+{
+
+	uint32_t spi_ctrl_val;
+	int spi_cmd;
+
+	if (cmd&RUBY_SPI_BASE_ADDR)
+		spi_cmd = cmd;
+	else
+		spi_cmd = RUBY_SPI_CMD_MASK(cmd) + RUBY_SPI_BASE_ADDR;
+
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_PASS_FIVE_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(SPI_MEM_ADDR_4B(addr), spi_cmd);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	return 0;
+
+}
+
+int spi_mxic_erase_dpb(unsigned int addr)
+{
+	unsigned int data;
+	spi_unlock();
+	spi_write_addr_prefix_1null(RUBY_SPI_WRITE_DPB,addr);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "%s Time Out On Write Operation\n",__FUNCTION__);
+		spi_lock();
+		return ETIME;
+	}
+	if ((data=spi_mxic_dpb_check(addr))){
+		printk(KERN_ERR "%s check error %x\n",__FUNCTION__,data);
+		spi_lock();
+		return -1;
+		}
+	spi_lock();
+	return 0;
+}
+
+int spi_mxic_erase_alldpb(void)
+{
+	uint32_t spi_ctrl_val;
+
+	spi_unlock();
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_PASS_CMD_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(0, RUBY_SPI_GBLOCK_UNLOCK);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "%s Time Out On Write Operation\n",__FUNCTION__);
+		spi_lock();
+		return ETIME;
+	}
+	spi_lock();
+	return 0;
+}
+
+int spi_mxic_write_alldpb(void)
+{
+	uint32_t spi_ctrl_val;
+
+	spi_unlock();
+	spi_ctrl_val = readl(RUBY_SPI_CONTROL);
+	writel(RUBY_SPI_PASS_CMD_MASK(spi_ctrl_val), RUBY_SPI_CONTROL);
+	writel(0, RUBY_SPI_GBLOCK_LOCK);
+	writel(spi_ctrl_val, RUBY_SPI_CONTROL);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "%s Time Out On Write Operation\n",__FUNCTION__);
+		spi_lock();
+		return ETIME;
+	}
+	spi_lock();
+	return 0;
+}
+
+int spi_mxic_spbotp_mode_check(void)
+{
+	unsigned int val, ret=1;
+
+	if (((val=spi_mxic_read_lock())& SPI_LOCK_SPMOTP) ){
+		ret = 0;
+		}
+	return ret;
+}
+
+int spi_mxic_enable_spbotp_mode_otp(void)
+{
+	unsigned int val;
+	spi_unlock();
+	val = ~SPI_LOCK_SPMOTP & ~SPI_LOCK_SPM;
+	spi_write_two(RUBY_SPI_WRITE_LOCK,val);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "Time Out On Write Operation\n");
+		spi_lock();
+		return ETIME;
+	}
+	if (!spi_mxic_spbotp_mode_check()){
+		printk(KERN_ERR "%s check error\n",__FUNCTION__);
+		spi_lock();
+		return -1;
+		}
+	spi_lock();
+	return 0;
+}
+
+int spi_micron_nlb_mode_check(void)
+{
+	return spi_mxic_spb_mode_check();
+}
+
+int spi_micron_enable_nlb_mode_otp(void)
+{
+	return spi_mxic_enable_spb_mode_otp();
+}
+
+int spi_micron_read_nlb(unsigned int addr)
+{
+	return spi_mxic_read_spb(addr);
+}
+
+int spi_micron_nlb_check(unsigned int addr)
+{
+	unsigned int val, ret=1;
+	if ((val=spi_mxic_read_spb(addr))){
+		ret=0;
+		}
+	return ret;
+}
+
+int spi_micron_write_nlb(unsigned int addr)
+{
+	spi_unlock();
+	spi_write_addr(RUBY_SPI_WRITE_SPB,addr);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "Time Out On Write Operation\n");
+		spi_lock();
+		return ETIME;
+	}
+	if (!spi_micron_nlb_check(addr)){
+		printk(KERN_ERR "%s check error at 0x%x\n",__FUNCTION__, addr);
+		spi_lock();
+		return -1;
+		}
+	spi_lock();
+	return 0;
+}
+
+int spi_micron_erase_allnlb(void)
+{
+	spi_unlock();
+	spi_write_cmd(RUBY_SPI_ERASE_SPB);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "Timeut On Write Operation\n");
+		spi_lock();
+		return ETIME;
+	}
+	spi_lock();
+	return 0;
+}
+
+uint32_t spi_micron_read_status(void)
+{
+	return spi_read_one(RUBY_SPI_READ_STATUS);
+}
+
+
+int spi_micron_write_status(unsigned int status)
+{
+	spi_unlock();
+	spi_write_one(RUBY_SPI_WRITE_STATUS,status);
+	if ((spi_api_flash_status()) == -1){
+		printk(KERN_ERR "Time Out On Write Operation\n");
+		spi_lock();
+		return ETIME;
+	}
+	spi_lock();
+	return 0;
+}
+
+uint32_t spi_micron_read_status_bp(void)
+{
+	unsigned int status, bp;
+
+	status = spi_micron_read_status();
+	bp = SPI_MICRON_STATUS_BP(status);
+
+	return bp;
+}
+
+int spi_micron_write_status_bp(unsigned int bp)
+{
+	unsigned int status;
+	int ret = 0;
+
+	status = spi_micron_read_status();
+	status &= ~SPI_MICRON_STATUS_BP_MASK;
+	status |= SPI_MICRON_BP_STATUS(bp);
+	ret = spi_micron_write_status(status);
+
+	return ret;
+}
+
+int spi_micron_write_status_bottom(void)
+{
+	unsigned int status;
+	int ret = 0;
+
+	status = spi_micron_read_status();
+	status |= SPI_MICRON_STATUS_BOTTON_MASK;
+	ret = spi_micron_write_status(status);
+
+	return ret;
+}
+
+int spi_micron_read_status_bottom(void)
+{
+	unsigned int status;
+	status = spi_micron_read_status();
+	if(status&SPI_MICRON_STATUS_BOTTON_MASK){
+		return 1;
+		}
+	return 0;
+}
+
+
+int spi_micron_enable_status_lock_otp(void)
+{
+	unsigned int status;
+	int ret = 0;
+
+	status = spi_micron_read_status();
+	status |= SPI_MICRON_STATUS_LOCK_MASK;
+	ret = spi_micron_write_status(status);
+
+	return ret;
+}
+
+#ifdef SPI_PROC_DEBUG
+#include "emac_lib.h"
+#define SPI_REG_PROC_FILE_NAME	"spi_reg"
+#define SPI_CMD_MAX_LEN	30
+
+static int spi_reg_rw_proc(struct file *file, const char __user *buffer, unsigned long count, void *data)
+{
+	char cmd[SPI_CMD_MAX_LEN];
+	int ret = 0;
+	unsigned int spidata;
+	int val;
+	char cmd_mode[20];
+
+	if (!count)
+		return -EINVAL;
+	else if (count > (SPI_CMD_MAX_LEN - 1))
+		return -EINVAL;
+	else if (copy_from_user(cmd, buffer, count))
+		return -EINVAL;
+
+	cmd[count] = '\0';
+
+	sscanf(cmd, "%s %x", cmd_mode, &spidata);
+
+	if(!strncmp(cmd_mode,"proe",4)){	//proe, enable protection mode
+		ret = spi_mxic_enable_protect_mode_otp();
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"proc",4)){	//proc, protection mode check
+		val = spi_mxic_protect_mode_check();
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"lockr",5)){	//lockr, lock register read
+		val = spi_mxic_read_lock();
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"secur",5)){	//secur, recurity register read
+		val = spi_mxic_read_scur();
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"spme",4)){	//spme, enable spb mode
+		ret = spi_mxic_enable_spb_mode_otp();
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"spmc",4)){	//spmc, spb mode check
+		val=spi_mxic_spb_mode_check();
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"spmotpe",7)){	//spmotpe, enable spmotp mode
+		ret = spi_mxic_enable_spbotp_mode_otp();
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"spmotpc",7)){	//spmotpc, spmotp mode check
+		val=spi_mxic_spbotp_mode_check();
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"spbw",4)){	//spbw address, write (set) sector spb
+		ret = spi_mxic_write_spb(spidata);
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"spbr",4)){	//spbr address, read sector spb
+		val = spi_mxic_read_spb(spidata);
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"spbc",4)){	//spbc address, sector spb check
+		val = spi_mxic_spb_check(spidata);
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"spbae",5)){	//spbae, erase (reset) all spb
+		ret = spi_mxic_erase_allspb();
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"pww",3)){	//pww "password", write (set) password
+		ret = spi_mxic_write_password(spidata);
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"pwr",3)){	//pwr, read password
+		val = spi_mxic_read_password();
+		printk(KERN_ERR"%08x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"pwme",4)){	//pwme, enable password mode
+		ret = spi_mxic_enable_password_mode_otp();
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"pwmc",4)){	//pwmc, password mode check
+		val = spi_mxic_password_mode_check();
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"pwu",3)){	//pwu "password",  unlock password to reset spb lock
+		ret = spi_mxic_write_password_unlock(spidata);
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"spblkw",6)){	//spblkw, write (set) spb lock
+		ret = spi_mxic_write_spb_lock();
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"spblkr",6)){	//spblkr, read spb lock
+		val = spi_mxic_read_spb_lock();
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"dpbaw",5)){	//dpbaw, write (set) all dpb
+		ret = spi_mxic_write_alldpb();
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"dpbae",5)){	//dpbae, erase (reset) all spb
+		ret = spi_mxic_erase_alldpb();
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"dpbe",4)){	//dpbe address, erase (reset) sector dpb
+		ret = spi_mxic_erase_dpb(spidata);
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"dpbr",4)){	//dpbr address, read sector dpb
+		val = spi_mxic_read_dpb(spidata);
+		printk(KERN_ERR"%2x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"dpbc",4)){	//dpbc address, sector dpb check
+		val = spi_mxic_dpb_check(spidata);
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"nlbme",5)){	//nonve, enable nlb mode
+		ret = spi_micron_enable_nlb_mode_otp();
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"nlbmc",5)){	//nonve, check nlb mode
+		val = spi_micron_nlb_mode_check();
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"nlbc",4)){	//nonvc address, sector nlb check
+		val = spi_micron_nlb_check(spidata);
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"nlbw",4)){	//nonvw address, write (set) sector nlb
+		ret = spi_micron_write_nlb(spidata);
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"nlbr",4)){	//nonvr address, read sector nlb
+		val = spi_micron_read_nlb(spidata);
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"nlbae",5)){	//nonvae, erase (reset) all nlb
+		ret = spi_micron_erase_allnlb();
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"stsr",4)){	//stsr, read status
+		val = spi_micron_read_status();
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"stsle",5)){	//stsle, enable (set) status lock
+		ret = spi_micron_enable_status_lock_otp();
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"bpr",3)){	//bpr, read bp[3:0]
+		val = spi_micron_read_status_bp();
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"bpw",3)){	//bpw, write bp[3:0]
+		ret = spi_micron_write_status_bp(spidata);
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else if(!strncmp(cmd_mode,"btr",3)){	//btr, read bottom
+		val = spi_micron_read_status_bottom();
+		printk(KERN_ERR"%x\n",val);
+		}
+	else if(!strncmp(cmd_mode,"btw",3)){	//btw, write bottom
+		ret = spi_micron_write_status_bottom();
+		if (!ret) {
+			printk(KERN_ERR"complete\n");
+			}
+		}
+	else {
+		printk(KERN_ERR"usage: echo [proe|proc|spme|spmc|pwme|pwmc|spbw|spbr|spbc|spbae|pww|pwr|pwu|spblkr|spblkw|dpbaw|dpbae|dpbe|dpbr|dpbc] [addr|password] > /proc/%s\n", SPI_REG_PROC_FILE_NAME);
+		}
+
+	return ret ? ret : count;
+}
+
+static void spi_reg_proc_name(char *buf)
+{
+	sprintf(buf, "%s", SPI_REG_PROC_FILE_NAME);
+}
+
+int spi_reg_create_proc(void)
+{
+	char proc_name[12] = {0};
+
+	spi_reg_proc_name(proc_name);
+
+	struct proc_dir_entry *entry = create_proc_entry(proc_name, 0600, NULL);
+	if (!entry) {
+		return -ENOMEM;
+	}
+
+	entry->write_proc = spi_reg_rw_proc;
+
+	return 0;
+}
+
+
+void spi_reg_remove_proc(void)
+{
+	char proc_name[12];
+
+	spi_reg_proc_name(proc_name);
+	remove_proc_entry(proc_name, NULL);
+}
+#endif
+
+static int __init spi_flash_driver_init(void)
+{
+	int ret = 0;
+
+	ret = spi_flash_attach(&g_flash);
+
+	printk(KERN_INFO"%s: SPI flash driver initialized %ssuccessfully!\n", SPI_FLASH_DRIVER, ret ? "un" : "");
+
+#ifdef SPI_PROC_DEBUG
+	spi_reg_create_proc();
+	printk(KERN_INFO"%s: SPI flash proc %s installed\n", SPI_FLASH_DRIVER, SPI_REG_PROC_FILE_NAME);
+#endif
+	return ret;
+}
+
+static void __exit spi_flash_driver_exit(void)
+{
+	spi_flash_deattach(g_flash);
+}
+
+EXPORT_SYMBOL(spi_flash_status);
+
+late_initcall(spi_flash_driver_init);
+module_exit(spi_flash_driver_exit);
diff --git a/drivers/qtn/ruby/troubleshoot.h b/drivers/qtn/ruby/troubleshoot.h
new file mode 100644
index 0000000..253343c
--- /dev/null
+++ b/drivers/qtn/ruby/troubleshoot.h
@@ -0,0 +1,47 @@
+/*
+ * (C) Copyright 2013 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __BOARD_QTN_TROUBLESHOOT_H
+#define __BOARD_QTN_TROUBLESHOOT_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <common/topaz_platform.h>
+
+#define HEADER_CORE_DUMP	(0xDEAD)
+
+typedef int (*arc_troubleshoot_start_hook_cbk)(void *in_ctx);
+
+void arc_set_sram_safe_area(unsigned long sram_start, unsigned long sram_end);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+void arc_save_to_sram_safe_area(int compress_ratio);
+#endif
+
+void arc_set_troubleshoot_start_hook(arc_troubleshoot_start_hook_cbk in_troubleshoot_start, void *in_ctx);
+
+/* Inside printk.c - not the ideal header for this, but since it's Quantenna added, is
+ * OK in here.
+ */
+void *get_log_buf(int *, int *, char **);
+
+#endif // #ifndef __BOARD_QTN_TROUBLESHOOT_H
+
diff --git a/drivers/qtn/ruby/unaligned_accounting.h b/drivers/qtn/ruby/unaligned_accounting.h
new file mode 100644
index 0000000..2182a7c
--- /dev/null
+++ b/drivers/qtn/ruby/unaligned_accounting.h
@@ -0,0 +1,46 @@
+/**
+ * Copyright (c) 2008-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+#ifndef __UNALIGNED_ACCOUNTING_H
+#define __UNALIGNED_ACCOUNTING_H
+
+#ifdef CONFIG_ARC_MISALIGNED_ACCESS
+
+#define UNALIGNED_INST_OPCODES		32
+#define UNALIGNED_INSTPTR_BUFSIZE	2048
+
+struct unaligned_access_accounting {
+	unsigned long user;
+	unsigned long kernel;
+	unsigned long read;
+	unsigned long write;
+	unsigned long inst_16;
+	unsigned long inst_32;
+	unsigned long skipped;
+	unsigned long half;
+	unsigned long word;
+	unsigned long inst[UNALIGNED_INST_OPCODES];		// count each kind of instruction opcode
+	unsigned long kernel_iptr[UNALIGNED_INSTPTR_BUFSIZE];
+};
+
+extern struct unaligned_access_accounting unaligned_access_stats;
+
+#endif 
+
+#endif // __UNALIGNED_ACCOUNTING_H
+
diff --git a/drivers/qtn/se95sensor/Makefile b/drivers/qtn/se95sensor/Makefile
new file mode 100644
index 0000000..c2a3b68
--- /dev/null
+++ b/drivers/qtn/se95sensor/Makefile
@@ -0,0 +1,44 @@
+#
+# Quantenna Communications Inc. Driver Makefile
+#
+# Author: Mats Aretun
+# 
+#
+EXTRA_CFLAGS	+= -Wall -Werror -I../drivers -I../include
+EXTRA_CFLAGS	+= -mlong-calls
+ifneq ($(KERNELRELEASE),)
+                       #---------------------------------#
+                       # Call from kernel build system   #
+                       #---------------------------------#
+
+#EXTRA_CFLAGS	+= -Wall -Werror -DQDRV_DEBUG -DQDRV -DQDRV_FEATURE_WGB -I../drivers -I../include
+qtsens-objs		+=	temp_sensor.o
+obj-m			+=	qtsens.o
+
+else
+                       #---------------------------------#
+                       # Call from external build system #
+                       #---------------------------------#
+#KERNELDIR	?= /lib/modules/$(shell uname -r)/build
+KERNELDIR	?= ../../linux-2.6.20.1
+INSTALL		= INSTALL_MOD_PATH=../linux/modules
+CROSS		= ARCH=arm CROSS_COMPILE=../buildroot/build_arm/staging_dir/bin/arm-linux-
+PWD			:= $(shell pwd)
+
+default:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
+
+install:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) $(INSTALL) M=$(PWD) modules_install
+
+endif
+
+clean:
+	rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions
+
+depend .depend dep:
+	$(CC) $(CFLAGS) -M *.c > .depend
+
+ifeq (.depend,$(wildcard .depend))
+include .depend
+endif
diff --git a/drivers/qtn/se95sensor/temp_sensor.c b/drivers/qtn/se95sensor/temp_sensor.c
new file mode 100644
index 0000000..1016f91
--- /dev/null
+++ b/drivers/qtn/se95sensor/temp_sensor.c
@@ -0,0 +1,250 @@
+/**
+ * Temperature sensor driver
+ * Copyright (C) 2008 - 2014 Quantenna Communications Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+
+
+#define SE95_REG_TEMP	0x00
+#define SE95_REG_CONF	0x01
+#define SE95_REG_THYST	0x02
+#define SE95_REG_TOS	0x03
+#define SE95_REG_ID		0x05
+
+#define SE95_MANUFACTURER_ID	0xA1
+/*
+ * qcsapi guarantees temperature value to be no more than 5 seconds old.
+ * Do not break that promise.
+ */
+#define SE95_TEMP_UPDATE_PERIOD	(4 * HZ)
+
+#define SE95_DATA_VAL_RESOL		3125
+#define TWOS_COMP_13BIT(X) ((X > 4096) ? (X - 8192) : X)
+
+/*
+ * Temperature sensor messages involve sending the address as a write,
+ * then performing the desired read operation
+ */
+enum {
+	SE95_MSG_IDX_WRITE = 0,
+	SE95_MSG_IDX_READ = 1,
+	SE95_MSG_LEN = 2,
+};
+
+typedef struct {
+	int temp_cal_13;	/* Current temperature with 13-bit precision */
+	unsigned long next_update_ts;
+	struct mutex update_mutex;
+} qtn_tsensor_state;
+
+static int qtn_tsensor_send_message(struct i2c_client *client,
+									uint8_t addr, void* buf, size_t len)
+{
+	int ret;
+	struct i2c_msg msgs[SE95_MSG_LEN];
+
+	if (unlikely(!client || !client->adapter)) {
+		return -ENODEV;
+	}
+
+	msgs[SE95_MSG_IDX_WRITE].addr = client->addr;
+	msgs[SE95_MSG_IDX_WRITE].flags = 0;
+	msgs[SE95_MSG_IDX_WRITE].buf = &addr;
+	msgs[SE95_MSG_IDX_WRITE].len = sizeof(addr);
+	msgs[SE95_MSG_IDX_READ].addr = client->addr;
+	msgs[SE95_MSG_IDX_READ].flags = I2C_M_RD;
+	msgs[SE95_MSG_IDX_READ].buf = buf;
+	msgs[SE95_MSG_IDX_READ].len = len;
+
+	ret = i2c_transfer(client->adapter, msgs, SE95_MSG_LEN);
+	if (ret == SE95_MSG_LEN) {
+		ret = 0;
+	} else if (ret >= 0) {
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+/*
+ * Convert raw temperature data from sensor to an actual temperature value (datasheet formula):
+ * 1. If the temp_data MSB = 0, then: Temp (°C) = +(temp_data) × value resolution
+ * 2. If the temp_data MSB = 1, then: Temp (°C) = −(two’s complement temp_data) × value resolution
+ */
+static inline int qtn_tsensor_convert_sdata_to_temp(uint16_t temp_data)
+{
+	return TWOS_COMP_13BIT(temp_data) * SE95_DATA_VAL_RESOL;
+}
+
+static inline int qtn_tsensor_update_temperature(struct i2c_client *client)
+{
+	qtn_tsensor_state *se95_state = i2c_get_clientdata(client);
+	uint8_t temp[2] = {0, 0};
+	int ret = 0;
+
+	mutex_lock(&se95_state->update_mutex);
+
+	if (!time_after(jiffies, se95_state->next_update_ts)) {
+		goto exit;
+	}
+
+	ret = qtn_tsensor_send_message(client, SE95_REG_TEMP, temp, sizeof(temp));
+	if (ret < 0) {
+		dev_warn(&client->dev, "Failed to read temperature from sensor\n");
+		se95_state->temp_cal_13 = 0;
+	} else {
+		uint16_t temp_cal_13 = (temp[0] << 8 | (temp[1] & 0xF8)) >> 3;
+		se95_state->temp_cal_13 = qtn_tsensor_convert_sdata_to_temp(temp_cal_13);
+	}
+
+	se95_state->next_update_ts = jiffies + SE95_TEMP_UPDATE_PERIOD;
+
+exit:
+	mutex_unlock(&se95_state->update_mutex);
+	return ret;
+}
+
+static int qtn_tsensor_probe(struct i2c_client *client,
+							const struct i2c_device_id *dev_id)
+{
+	int ret;
+	uint8_t id;
+	uint8_t over_thres[2] = {0, 0};
+	uint8_t hyster[2] = {0, 0};
+	qtn_tsensor_state *se95_state = NULL;
+
+	ret = qtn_tsensor_send_message(client, SE95_REG_ID, &id, sizeof(id));
+	if (ret < 0) {
+		dev_dbg(&client->dev, "Failed reading sensor ID: %d\n", ret);
+		return ret;
+	}
+	if (id != SE95_MANUFACTURER_ID) {
+		dev_dbg(&client->dev, "Unknown temperature sensor ID: 0x%x\n", id);
+		return -ENODEV;
+	}
+
+	ret = qtn_tsensor_send_message(client, SE95_REG_TOS, over_thres,
+									sizeof(over_thres));
+	if (ret < 0) {
+		dev_dbg(&client->dev, "Failed reading overtemp threshold: %d\n", ret);
+		return ret;
+	}
+
+	ret = qtn_tsensor_send_message(client, SE95_REG_THYST, hyster,
+									sizeof(hyster));
+	if (ret < 0) {
+		dev_dbg(&client->dev, "Failed reading hysteresis: %d\n", ret);
+		return ret;
+	}
+
+	se95_state = kzalloc(sizeof(qtn_tsensor_state), GFP_KERNEL);
+	if (!se95_state) {
+		return -ENOMEM;
+	}
+
+	i2c_set_clientdata(client, se95_state);
+	se95_state->next_update_ts = jiffies;
+	mutex_init(&se95_state->update_mutex);
+
+	printk(KERN_DEBUG "temp_sensor: id=0x%x\n\tover temp thresh=%x %x\n\t"
+			"hysteresis=%x %x\n",
+			id, over_thres[0], over_thres[1], hyster[0], hyster[1]);
+
+	return 0;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int qtn_tsensor_remove(struct i2c_client *client)
+#else
+static int __devexit qtn_tsensor_remove(struct i2c_client *client)
+#endif
+{
+	qtn_tsensor_state *se95_state = i2c_get_clientdata(client);
+
+	if (se95_state) {
+		i2c_set_clientdata(client, NULL);
+		kfree(se95_state);
+	}
+	return 0;
+}
+
+static const struct i2c_device_id se95_ids[] = {
+	{ "se95", 0x49 },
+	{ }
+};
+
+static struct i2c_driver se95_driver = {
+	.driver = {
+		.name = "se95",
+		.owner = THIS_MODULE,
+	},
+	.probe	= qtn_tsensor_probe,
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	.remove	= qtn_tsensor_remove,
+#else
+	.remove	= __devexit_p(qtn_tsensor_remove),
+#endif
+	.class	= I2C_CLASS_HWMON,
+	.id_table = se95_ids,
+};
+
+static int __init qtn_tsensor_init(void)
+{
+	return i2c_add_driver(&se95_driver);
+}
+
+static void __exit qtn_tsensor_exit(void)
+{
+	i2c_del_driver(&se95_driver);
+}
+
+module_init (qtn_tsensor_init);
+module_exit (qtn_tsensor_exit);
+
+MODULE_AUTHOR("Quantenna Communications");
+MODULE_DESCRIPTION("Temperature sensor I2C driver");
+MODULE_LICENSE("GPL");
+
+int qtn_tsensor_get_temperature(struct i2c_client *client, int *val)
+{
+	qtn_tsensor_state *se95_state;
+	int ret;
+
+	if (!client) {
+		return -ENODEV;
+	}
+
+	se95_state = i2c_get_clientdata(client);
+	if (!se95_state) {
+		return -ENODEV;
+	}
+
+	ret = qtn_tsensor_update_temperature(client);
+	*val = se95_state->temp_cal_13;
+
+	return ret;
+}
+EXPORT_SYMBOL(qtn_tsensor_get_temperature);
diff --git a/drivers/qtn/topaz/Kconfig b/drivers/qtn/topaz/Kconfig
new file mode 100644
index 0000000..1705817
--- /dev/null
+++ b/drivers/qtn/topaz/Kconfig
@@ -0,0 +1,47 @@
+#
+# Quantenna Topaz board
+#
+
+menu "Quantenna Topaz"
+
+config QUANTENNA_TOPAZ
+	bool "Quantenna Topaz chipset support"
+	select QUANTENNA_RUBY
+	default n
+	help
+	  Additional drivers for Quantenna Topaz accelerated datapath
+
+config ARCH_TOPAZ_FWT
+	tristate "Topaz FWT driver"
+	depends on QUANTENNA_TOPAZ
+	default y
+	help
+	  Topaz Forwarding Table Driver
+
+config ARCH_TOPAZ_TQE
+	tristate "Topaz TQE driver"
+	depends on QUANTENNA_TOPAZ
+	select ARCH_TOPAZ_FWT
+	default y
+	help
+	  Topaz Transmit Queueing Engine Driver
+
+config ARCH_TOPAZ_SWITCH_TEST
+	tristate "Topaz Switch Test Modules"
+	depends on ARCH_TOPAZ_TQE
+	default n
+	help
+	  A set of test modules to control the various Topaz
+	  accelerated datapath elements
+
+config ARCH_TOPAZ_EMAC
+	tristate "Topaz Arasan EMAC driver"
+	depends on QUANTENNA_TOPAZ
+	default y
+	select ARCH_RUBY_EMAC_LIB
+	select ARCH_TOPAZ_TQE
+	help
+	  Topaz Arasan EMAC AHB driver
+
+endmenu
+
diff --git a/drivers/qtn/topaz/Makefile b/drivers/qtn/topaz/Makefile
new file mode 100644
index 0000000..8966938
--- /dev/null
+++ b/drivers/qtn/topaz/Makefile
@@ -0,0 +1,75 @@
+EXTRA_CFLAGS +=	-Wall -Werror -Wno-unknown-pragmas -mlong-calls \
+		-I../include \
+		-I../ \
+		-I../drivers/include/shared \
+		-I../drivers/include/kernel
+
+ifeq ($(board_config),topaz_config)
+	EXTRA_CFLAGS += -DTOPAZ_VB_CONFIG -DTOPAZ_CONGE_CONFIG -DTOPAZ_SRAM_CONFIG
+else ifeq ($(board_config),topaz_vb_config)
+	EXTRA_CFLAGS += -DTOPAZ_VB_CONFIG -DTOPAZ_CONGE_CONFIG -DTOPAZ_SRAM_CONFIG
+else ifeq ($(board_config),topaz_umm_config)
+	EXTRA_CFLAGS += -DTOPAZ_VB_CONFIG -DTOPAZ_CONGE_CONFIG -DTOPAZ_SRAM_CONFIG
+else ifeq ($(board_config),topaz_rfic6_config)
+	EXTRA_CFLAGS += -DTOPAZ_RFIC6_CONFIG -DTOPAZ_CONGE_CONFIG -DTOPAZ_SRAM_CONFIG
+else ifeq ($(board_config),topaz_rgmii_config)
+	EXTRA_CFLAGS += -DTOPAZ_CONGE_CONFIG -DTOPAZ_RGMII_CONFIG -DTOPAZ_SRAM_CONFIG
+else ifeq ($(board_config),topaz_vzn_config)
+	EXTRA_CFLAGS += -DTOPAZ_CONGE_CONFIG -DTOPAZ_VZN_CONFIG -DTOPAZ_SRAM_CONFIG
+endif
+
+ifeq ($(board_config),topaz_msft_config)
+	EXTRA_CFLAGS += -DERICSSON_CONFIG
+else
+	ifeq ($(board_config),topaz_msmr_config)
+		EXTRA_CFLAGS += -DERICSSON_CONFIG
+	endif
+endif
+
+obj-y += tqe.o hbm.o busmon.o temp_sens.o topaz_congest_queue.o
+
+ifeq ($(QTN_EXTERNAL_MODULES),y)
+obj-$(CONFIG_ARCH_TOPAZ_FWT) += fwt_if.o
+obj-$(CONFIG_ARCH_TOPAZ_TQE) += switch_tqe.o
+obj-$(CONFIG_ARCH_TOPAZ_TQE) += switch_vlan.o
+obj-$(CONFIG_ARCH_TOPAZ_EMAC) += dpi.o
+obj-$(CONFIG_ARCH_TOPAZ_EMAC) += switch_emac.o
+endif
+
+ifneq ($(CONFIG_ARCH_TOPAZ_SWITCH_TEST),)
+	TOPAZ_TEST ?= $(CONFIG_ARCH_TOPAZ_SWITCH_TEST)
+endif
+
+ifneq ($(TOPAZ_TEST),)
+	obj-m += busmon_test.o
+	FWT_TEST ?= $(TOPAZ_TEST)
+	DPI_TEST ?= $(TOPAZ_TEST)
+	VLAN_TEST ?= $(TOPAZ_TEST)
+	IPPRT_TEST ?= $(TOPAZ_TEST)
+	TQE_PKTGEN ?= $(TOPAZ_TEST)
+endif
+
+ifneq ($(TOPAZ_FWT_MAINTENANCE_ENABLE),)
+	EXTRA_CFLAGS += -DTOPAZ_FWT_MAINTENANCE_ENABLE=$(TOPAZ_FWT_MAINTENANCE_ENABLE)
+endif
+
+obj-$(FWT_TEST) += fwt_test.o
+obj-$(DPI_TEST) += dpi_test.o
+obj-$(VLAN_TEST) += vlan_test.o
+obj-$(IPPRT_TEST) += ipprt_test.o
+obj-$(TQE_PKTGEN) += switch_pktgen.o
+
+obj-m += $(obj-1)
+
+test_gen_pcap: test_gen_pcap.c
+	gcc -Wall -Werror -Wextra -O -g $< -lpcap -o $@ -MD -MF $@.d
+-include test_gen_pcap.d
+
+switch_test.pcap: test_gen_pcap
+	./$< $@
+
+fwttest:
+	gcc -DCONSOLE_TEST -Wall -Werror -Wextra -O2 -g fwt_test.c -o fwt.test.o -lz	\
+		-I../../ -I../../include/
+	./fwt.test.o
+
diff --git a/drivers/qtn/topaz/amber.c b/drivers/qtn/topaz/amber.c
new file mode 100644
index 0000000..3f41926
--- /dev/null
+++ b/drivers/qtn/topaz/amber.c
@@ -0,0 +1,401 @@
+/*
+ * (C) Copyright 2015 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>	/* for copy_from_user */
+#include <asm/gpio.h>
+#include <asm/hardware.h>
+
+#include <qtn/amber.h>
+#include <common/topaz_platform.h>
+#include <linux/delay.h>
+
+#define PROC_AMBER_DIR_NAME "amber"
+#define PROC_AMBER_WIFI2SOC_INT_FILE_NAME "wifi2soc_int"
+
+static struct proc_dir_entry *amber_dir;
+static struct proc_dir_entry *amber_wifi2soc_file;
+
+static unsigned long interrupt_errors_mask = 0;
+static unsigned long interrupt_errors_store = 0;
+
+static struct timer_list amber_timer;
+static DEFINE_SPINLOCK(wifi2soc_lock);
+static int initialized = 0;
+
+static struct {
+	const char *token;
+	const int val;
+} wifi2soc_int_token_map[] =
+{
+	{"SYSTEM_READY",	TOPAZ_AMBER_WIFI2SOC_SYSTEM_READY},
+	{"CAL_COMPLETE",	TOPAZ_AMBER_WIFI2SOC_CAL_COMPLETE},
+	{"CAL_CHANGE_REQ",	TOPAZ_AMBER_WIFI2SOC_CAL_CHANGE_REQ},
+	{"SHUTDOWN",		TOPAZ_AMBER_WIFI2SOC_SHUTDOWN},
+	{"WATCHDOG",		TOPAZ_AMBER_WIFI2SOC_WATCHDOG},
+	{"EMERGENCY",		TOPAZ_AMBER_WIFI2SOC_EMERGENCY},
+	{"NFS_MOUNT_FAILURE",	TOPAZ_AMBER_WIFI2SOC_NFS_MOUNT_FAILURE},
+	{"NFS_ACCESS_FAILURE",	TOPAZ_AMBER_WIFI2SOC_NFS_ACCESS_FAILURE},
+	{"NFS_INTEGRITY_FAILURE",TOPAZ_AMBER_WIFI2SOC_NFS_INTEGRITY_FAILURE},
+	{"WAKE_ON_WLAN",	TOPAZ_AMBER_WIFI2SOC_WAKE_ON_WLAN}
+};
+
+static enum amber_shutdown_code shutdown_code = AMBER_SD_CODE_GRACEFUL;
+
+void amber_set_shutdown_code(enum amber_shutdown_code code)
+{
+	shutdown_code = code;
+}
+EXPORT_SYMBOL(amber_set_shutdown_code);
+
+int amber_trigger_wifi2soc_interrupt_sync(unsigned long interrupt_code)
+{
+	interrupt_code &= interrupt_errors_mask;
+
+	if (interrupt_code == 0) {
+		return 0;
+	}
+
+	while (readl(IO_ADDRESS(TOPAZ_AMBER_WIFI2SOC_ERROR_REG)) & interrupt_errors_mask) {
+		/*
+		 * Idle wait for the previous error codes to be cleared by ST Host.
+		 * If we get stuck here, this means ST Host is hung, so it does not matter
+		 * where we halt - here, or few cycles later in machine_halt().
+		 */
+	};
+
+	writel(interrupt_code, IO_ADDRESS(TOPAZ_AMBER_WIFI2SOC_ERROR_REG));
+	writel(TOPAZ_AMBER_WIFI2SOC_INT_OUTPUT, IO_ADDRESS(GPIO_OUTPUT_MASK));
+	writel(TOPAZ_AMBER_WIFI2SOC_INT_OUTPUT, IO_ADDRESS(GPIO_OUTPUT));
+	udelay(1);
+	writel(0, IO_ADDRESS(GPIO_OUTPUT));
+	writel(0, IO_ADDRESS(GPIO_OUTPUT_MASK));
+
+	return 0;
+}
+
+void amber_shutdown(void)
+{
+	unsigned long interrupt_code = 0;
+
+	if (!initialized) {
+		/* Any early reboot is considered emergency */
+		interrupt_code = TOPAZ_AMBER_WIFI2SOC_EMERGENCY;
+		interrupt_errors_mask = readl(IO_ADDRESS(TOPAZ_AMBER_WIFI2SOC_MASK_REG));
+	} else {
+		switch (shutdown_code) {
+		case AMBER_SD_CODE_GRACEFUL:
+			interrupt_code = TOPAZ_AMBER_WIFI2SOC_SHUTDOWN;
+			break;
+		case AMBER_SD_CODE_EMERGENCY:
+			interrupt_code = TOPAZ_AMBER_WIFI2SOC_EMERGENCY;
+			break;
+		case AMBER_SD_CODE_NONE:
+		default:
+			/* Don't send any error code */
+			interrupt_code = 0;
+			break;
+		}
+	}
+
+	amber_trigger_wifi2soc_interrupt_sync(interrupt_code);
+}
+EXPORT_SYMBOL(amber_shutdown);
+
+static int amber_post_stored_interrupts(void)
+{
+	int need_timer = 0;
+
+	if (interrupt_errors_store == 0) {
+		return 0;
+	}
+
+	if ((readl(IO_ADDRESS(TOPAZ_AMBER_WIFI2SOC_ERROR_REG)) & interrupt_errors_mask) == 0) {
+
+		writel(interrupt_errors_store, IO_ADDRESS(TOPAZ_AMBER_WIFI2SOC_ERROR_REG));
+
+		interrupt_errors_store = 0;
+
+		/*
+		 * Pulse WIFI2SOC_INT_O. Minimal pulse width is 5 ns.
+		 * The below two calls compile to JL instructions to call gpio_set_value(),
+		 * which in turn indirectly calls gpio set handler.
+		 * This means the number of 500 MHz cycles between line assert and de-assert is
+		 * sufficiently large to fulfill the minimal pulse width requirement.
+		 */
+
+		gpio_set_value(TOPAZ_AMBER_WIFI2SOC_INT_OUTPUT, 1);
+		gpio_set_value(TOPAZ_AMBER_WIFI2SOC_INT_OUTPUT, 0);
+
+		need_timer = 0;
+	} else {
+		need_timer = 1;
+	}
+
+	return need_timer;
+}
+
+static void amber_timer_func(unsigned long data)
+{
+	amber_trigger_wifi2soc_interrupt(0);
+}
+
+int amber_trigger_wifi2soc_interrupt(unsigned long interrupt_code)
+{
+	int need_timer = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&wifi2soc_lock, flags);
+	interrupt_errors_store |= interrupt_code & interrupt_errors_mask;
+	need_timer = amber_post_stored_interrupts();
+	spin_unlock_irqrestore(&wifi2soc_lock, flags);
+
+	if (need_timer) {
+		mod_timer(&amber_timer, jiffies + AMBER_TIMER_PERIOD_JIFFIES);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(amber_trigger_wifi2soc_interrupt);
+
+static int amber_wifi2soc_int_proc_read(char *buffer,
+		  char **buffer_location, off_t offset,
+		  int buffer_length, int *eof, void *data)
+{
+	int i;
+	int written = 0;
+	unsigned long interrupt_code = readl(IO_ADDRESS(TOPAZ_AMBER_WIFI2SOC_ERROR_REG));
+
+	*eof = 1;
+
+	for (i = 0; i < ARRAY_SIZE(wifi2soc_int_token_map); i++) {
+		if (interrupt_code & wifi2soc_int_token_map[i].val) {
+			int ret = snprintf(buffer + written,buffer_length - written, "%s\n",
+					wifi2soc_int_token_map[i].token);
+
+			if (ret < 0) {
+				break;
+			}
+
+			if (ret + written > buffer_length) {
+				*eof = 0;
+				written = buffer_length;
+				break;
+			}
+
+			written += ret;
+		}
+	}
+
+	return written;
+}
+
+static int amber_wifi2soc_int_proc_write(struct file *file, const char *buffer,
+		       unsigned long count, void *data)
+{
+	char *procfs_buffer;
+	char *ptr;
+	char *token;
+	char found_token = 0;
+	int i;
+
+	procfs_buffer = kmalloc(count, GFP_KERNEL);
+
+	if (procfs_buffer == NULL) {
+		printk("AMBER: wifi2soc error - out of memory\n");
+		return -ENOMEM;
+	}
+
+	/* write data to the buffer */
+	if (copy_from_user(procfs_buffer, buffer, count)) {
+		goto bail;
+	}
+
+	ptr = (char *)procfs_buffer;
+	ptr[count - 1] = '\0';
+	token = strsep(&ptr, "\n");
+
+	for (i = 0; i < ARRAY_SIZE(wifi2soc_int_token_map); i++) {
+		if (strcmp(token, wifi2soc_int_token_map[i].token) == 0) {
+			amber_trigger_wifi2soc_interrupt(wifi2soc_int_token_map[i].val);
+			found_token = 1;
+			break;
+		}
+	}
+
+	if (!found_token) {
+		printk(KERN_ERR "AMBER: wifi2soc error - unable to parse token %s\n", token);
+	}
+
+bail:
+	kfree(procfs_buffer);
+	return count;
+}
+
+static int amber_panic_notifier(struct notifier_block *this, unsigned long event, void *ptr)
+{
+	amber_set_shutdown_code(AMBER_SD_CODE_EMERGENCY);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block amber_panic_block = {
+	.notifier_call = amber_panic_notifier,
+};
+
+static void amber_soc2wifi_panic_work(struct work_struct *work)
+{
+	printk("AMBER: SoC host panic - halting Amber kernel\n");
+	amber_set_shutdown_code(AMBER_SD_CODE_NONE);
+	kernel_halt();
+}
+
+static DECLARE_WORK(amber_soc2wifi_panic_wq, &amber_soc2wifi_panic_work);
+
+static void amber_soc2wifi_wake_on_lan_work(struct work_struct *work)
+{
+	printk("AMBER: SoC host - waking up on LAN\n");
+}
+
+static DECLARE_WORK(amber_soc2wifi_wake_on_lan_wq, &amber_soc2wifi_wake_on_lan_work);
+
+static irqreturn_t amber_soc2wifi_irq_handler(int irq, void *dev_id)
+{
+	unsigned long int_status = readl(IO_ADDRESS(TOPAZ_AMBER_GPIO_IRQ_STATUS));
+	unsigned long int_code;
+
+	if ((int_status & (1 << TOPAZ_AMBER_SOC2WIFI_INT_INPUT)) == 0) {
+		/* Filter other GPIO interrupts */
+		return IRQ_NONE;
+	}
+
+	int_code = readl(IO_ADDRESS(TOPAZ_AMBER_SOC2WIFI_ERROR_REG));
+
+	if (int_code & TOPAZ_AMBER_SOC2WIFI_KERNEL_PANIC) {
+		/*
+		 * Clear the mask to prevent anymore wifi2soc interrupts posting
+		 * and halt the kernel.
+		 */
+		interrupt_errors_mask = 0;
+		schedule_work(&amber_soc2wifi_panic_wq);
+	}
+
+	if (int_code & TOPAZ_AMBER_SOC2WIFI_WAKE_ON_LAN) {
+		schedule_work(&amber_soc2wifi_wake_on_lan_wq);
+	}
+
+	writel(1 << TOPAZ_AMBER_SOC2WIFI_INT_INPUT, IO_ADDRESS(TOPAZ_AMBER_GPIO_IRQ_CLEAR));
+
+	return IRQ_HANDLED;
+}
+
+static int __init amber_init(void)
+{
+	int err;
+
+	amber_dir = proc_mkdir(PROC_AMBER_DIR_NAME, NULL);
+
+	amber_wifi2soc_file = create_proc_entry(PROC_AMBER_WIFI2SOC_INT_FILE_NAME, 0x644, amber_dir);
+
+	if (amber_wifi2soc_file == NULL) {
+		printk(KERN_ERR "AMBER: unable to create /proc/%s/%s\n", PROC_AMBER_DIR_NAME,
+			PROC_AMBER_WIFI2SOC_INT_FILE_NAME);
+		err = -ENOMEM;
+		goto out_exit_wifi2soc_proc;
+	}
+
+	amber_wifi2soc_file->read_proc = amber_wifi2soc_int_proc_read;
+	amber_wifi2soc_file->write_proc = amber_wifi2soc_int_proc_write;
+	amber_wifi2soc_file->mode = S_IFREG | S_IRUGO;
+	amber_wifi2soc_file->uid = 0;
+	amber_wifi2soc_file->gid = 0;
+	amber_wifi2soc_file->size = 0x1000;
+	amber_wifi2soc_file->data = NULL;
+
+	spin_lock_init(&wifi2soc_lock);
+	setup_timer(&amber_timer, amber_timer_func, 0);
+
+	interrupt_errors_mask = readl(IO_ADDRESS(TOPAZ_AMBER_WIFI2SOC_MASK_REG));
+
+	err = gpio_request(TOPAZ_AMBER_WIFI2SOC_INT_OUTPUT, "WIFI2SOC_INT_OUTPUT");
+
+	if (err < 0) {
+		printk(KERN_INFO "AMBER: failed to request GPIO %d for WIFI2SOC_INT_OUTPUT\n",
+			TOPAZ_AMBER_WIFI2SOC_INT_OUTPUT);
+		goto out_exit_wifi2soc_gpio;
+	}
+
+	ruby_set_gpio_irq_sel(TOPAZ_AMBER_SOC2WIFI_INT_INPUT);
+
+	err = request_irq(GPIO2IRQ(TOPAZ_AMBER_SOC2WIFI_INT_INPUT), amber_soc2wifi_irq_handler, 0,
+		"amber_soc2wifi", NULL);
+
+	if (err) {
+		printk(KERN_INFO "AMBER: failed to register IRQ %d\n",
+			GPIO2IRQ(TOPAZ_AMBER_SOC2WIFI_INT_INPUT));
+		goto out_exit_wifi2soc_irq;
+	}
+
+	atomic_notifier_chain_register(&panic_notifier_list, &amber_panic_block);
+
+	initialized = 1;
+
+	printk("AMBER: initialized successfully\n");
+
+	return 0;
+
+out_exit_wifi2soc_irq:
+	gpio_free(TOPAZ_AMBER_WIFI2SOC_INT_OUTPUT);
+
+out_exit_wifi2soc_gpio:
+	remove_proc_entry(PROC_AMBER_WIFI2SOC_INT_FILE_NAME, amber_dir);
+
+out_exit_wifi2soc_proc:
+	remove_proc_entry(PROC_AMBER_DIR_NAME, NULL);
+
+	return err;
+}
+
+static void __exit amber_exit(void)
+{
+	del_timer_sync(&amber_timer);
+
+	atomic_notifier_chain_unregister(&panic_notifier_list, &amber_panic_block);
+
+	free_irq(GPIO2IRQ(TOPAZ_AMBER_SOC2WIFI_INT_INPUT), NULL);
+	gpio_free(TOPAZ_AMBER_WIFI2SOC_INT_OUTPUT);
+
+	remove_proc_entry(PROC_AMBER_WIFI2SOC_INT_FILE_NAME, amber_dir);
+	remove_proc_entry(PROC_AMBER_DIR_NAME, NULL);
+}
+
+module_init(amber_init);
+module_exit(amber_exit);
+
+MODULE_DESCRIPTION("Amber Driver");
+MODULE_AUTHOR("Quantenna");
+MODULE_LICENSE("GPL");
diff --git a/drivers/qtn/topaz/busmon.c b/drivers/qtn/topaz/busmon.c
new file mode 100644
index 0000000..5dad94d
--- /dev/null
+++ b/drivers/qtn/topaz/busmon.c
@@ -0,0 +1,529 @@
+/**
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ **/
+
+#include <linux/proc_fs.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+
+#include <qtn/qtn_debug.h>
+#include <qtn/busmon.h>
+#include <common/topaz_platform.h>
+
+/**
+ * AHB monitor driver. Linux kernel API is declared in busmon.h. Interfaces to user space are
+ * done via procfs and sysfs. RO /proc/topaz_busmon file provides information about bus monitor
+ * status and control registers. AHB bus is registered in kernel and the appropriate element is
+ * created (/bus/ahb). The following files are created in sysfs to contol the AHB monitor
+ * /sys/bus/ahb/ahbm_ranges: rw file to specify start and end range addresses to be monitored. Up to
+				4 ranges can be specified
+ * /sys/bus/ahb/ahbm_outside: rw file to control inside/outside range check
+ * /sys/bus/ahb/ahbm_timeout: rw file to specify clock cycles to time out in 250MHz clock
+ * /sys/bus/ahb/ahbm_range_test_on: rw file to toggle range test on or off. The new values of ranges
+					and outside take effect only after writing 1 to this file
+ * /sys/bus/ahb/ahbm_timeout_test_on: rf file to toggle timeout test on or off. The new timeout value
+					takes effect only after writing 1 to this file
+ */
+
+#define PROC_NAME "topaz_busmon"
+
+static const char *master_names[] = TOPAZ_BUSMON_MASTER_NAMES;
+
+#define BUSMON_TIMEOUT_BITS(t)		\
+	(TOPAZ_BUSMON_TIMER_INT_EN |	\
+	 TOPAZ_BUSMON_TIMEOUT(t))
+
+#define BUSMON_RANGE_BITS(r)		\
+	(TOPAZ_BUSMON_REGION_VALID(r) |	\
+	 TOPAZ_BUSMON_ADDR_CHECK_EN |	\
+	 TOPAZ_BUSMON_BLOCK_TRANS_EN |	\
+	 TOPAZ_BUSMON_OUTSIDE_ADDR_CHECK)
+/**
+ * When enabled range test is running
+ */
+static unsigned int range_test_on = 0;
+
+/**
+ * When enabled timeout test is running
+ */
+static unsigned int timeout_test_on = 0;
+
+/**
+ * Clock cycles to time out in 250MHz clock
+ */
+static uint16_t timeout = 255;
+
+/**
+ * Lower and upper limits of address ranges 0-3
+ */
+static struct topaz_busmon_range ranges[TOPAZ_BUSMON_MAX_RANGES] = {
+	{ 0, 0 },
+	{ 0, 0 },
+	{ 0, 0 },
+	{ 0, 0 }
+};
+
+/**
+ * Enables/disables outside address check
+ */
+static unsigned int outside = 0;
+
+/**
+ * AHB bus definition appeared in /sys/bus
+ */
+static struct bus_type ahb = {
+	.name = "ahb",
+};
+
+/**
+ * Setter and getter functions for AHB monitor parameters presented over sysfs
+ */
+
+static ssize_t ahbm_outside_show(struct bus_type *bus, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", outside);
+}
+
+static ssize_t ahbm_outside_store(struct bus_type *bus, const char *buf, size_t count)
+{
+	sscanf(buf, "%u", &outside);
+	return count;
+}
+BUS_ATTR(ahbm_outside, S_IRUGO | S_IWUSR, ahbm_outside_show, ahbm_outside_store);
+
+static ssize_t ahbm_range_test_show(struct bus_type *bus, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", range_test_on);
+}
+
+static ssize_t ahbm_range_test_store(struct bus_type *bus, const char *buf, size_t count)
+{
+	sscanf(buf, "%u", &range_test_on);
+
+	if (range_test_on) {
+		topaz_busmon_range_check(TOPAZ_BUSMON_LHOST, ranges, ARRAY_SIZE(ranges), outside);
+	} else {
+		topaz_busmon_range_check_disable(TOPAZ_BUSMON_LHOST);
+	}
+
+	return count;
+}
+BUS_ATTR(ahbm_range_test_on, S_IRUGO | S_IWUSR, ahbm_range_test_show, ahbm_range_test_store);
+
+static ssize_t ahbm_timeout_test_show(struct bus_type *bus, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", timeout_test_on);
+}
+
+static ssize_t ahbm_timeout_test_store(struct bus_type *bus, const char *buf, size_t count)
+{
+	sscanf(buf, "%u", &timeout_test_on);
+
+	if (timeout_test_on) {
+		topaz_busmon_timeout_en(TOPAZ_BUSMON_LHOST, timeout);
+	} else {
+		topaz_busmon_timeout_dis(TOPAZ_BUSMON_LHOST);
+	}
+
+	return count;
+}
+BUS_ATTR(ahbm_timeout_test_on, S_IRUGO | S_IWUSR, ahbm_timeout_test_show, ahbm_timeout_test_store);
+
+static ssize_t ahbm_timeout_show(struct bus_type *bus, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%hu\n", timeout);
+}
+
+static ssize_t ahbm_timeout_store(struct bus_type *bus, const char *buf, size_t count)
+{
+	sscanf(buf, "%hu", &timeout);
+	return count;
+}
+BUS_ATTR(ahbm_timeout, S_IRUGO | S_IWUSR, ahbm_timeout_show, ahbm_timeout_store);
+
+static ssize_t ahbm_ranges_show(struct bus_type *bus, char *buf)
+{
+	ssize_t	n;
+	int i;
+
+	for (i = 0, n = 0; i < TOPAZ_BUSMON_MAX_RANGES; i++) {
+		n += scnprintf(buf + n, PAGE_SIZE, "0x%08x:0x%08x\n", (uint32_t)ranges[i].start,
+		               (uint32_t)ranges[i].end);
+	}
+
+	return n;
+}
+
+static ssize_t ahbm_ranges_store(struct bus_type *bus, const char *buf, size_t count)
+{
+	ssize_t		n;
+	int		i;
+	bool		is_err = false;
+
+	for (i = 0, n = 0; i < TOPAZ_BUSMON_MAX_RANGES; i++) {
+		if (!is_err) {
+			if (sscanf(buf + n, "0x%08x:0x%08x\n",
+			                (uint32_t *)&ranges[i].start, (uint32_t *)&ranges[i].end) == 2) {
+				n += 22;
+			} else {
+				is_err = true;
+				ranges[i].start = 0;
+				ranges[i].end = 0;
+			}
+		} else {
+			ranges[i].start = 0;
+			ranges[i].end = 0;
+		}
+	}
+
+	return count;
+}
+BUS_ATTR(ahbm_ranges, S_IRUGO | S_IWUSR, ahbm_ranges_show, ahbm_ranges_store);
+
+/**
+ * Enables/disables mask bits for AHB monitor IRQ
+ */
+static void topaz_busmon_irq_set(uint32_t bit, bool enable)
+{
+	uint32_t busmon_intr_mask;
+
+	busmon_intr_mask = readl(TOPAZ_BUSMON_INTR_MASK);
+
+	if (enable) {
+		busmon_intr_mask |= bit;
+	} else {
+		busmon_intr_mask &= ~bit;
+	}
+
+	writel(busmon_intr_mask, TOPAZ_BUSMON_INTR_MASK);
+}
+
+/**
+ * Enables/disables AHB monitor timeout interrupt generation and sets timeout value
+ */
+void topaz_busmon_timeout(uint8_t bus, uint16_t tm, bool enable)
+{
+	uint32_t busmon_ctrl;
+
+	timeout = tm;
+	timeout_test_on = enable;
+
+	/*
+	 * Add timeout settings, preserving existing range-check settings
+	 */
+	busmon_ctrl = readl(TOPAZ_BUSMON_CTL(bus));
+	busmon_ctrl &= BUSMON_RANGE_BITS(~0);
+
+	if (enable) {
+		busmon_ctrl |= BUSMON_TIMEOUT_BITS(timeout);
+	}
+
+	writel(busmon_ctrl, TOPAZ_BUSMON_CTL(bus));
+
+	if (enable) {
+		/* enable/disable timeout interrupt for this bus master monitor */
+		topaz_busmon_irq_set(TOPAZ_BUSMON_INTR_MASK_TIMEOUT_EN(bus), enable);
+	}
+}
+EXPORT_SYMBOL(topaz_busmon_timeout);
+
+/**
+ * Enables/disables AHB monitor range interrupt generation and defines the ranges
+ */
+void topaz_busmon_range_check(uint8_t bus,
+                              const struct topaz_busmon_range *range,
+                              size_t nranges, bool out)
+{
+	uint32_t busmon_ctrl;
+	int i;
+
+	outside = out;
+
+	/*
+	 * temporarily disable range checking for this bus master monitor,
+	 * preserving other settings like timeout checking
+	 */
+	busmon_ctrl = readl(TOPAZ_BUSMON_CTL(bus));
+	busmon_ctrl &= BUSMON_TIMEOUT_BITS(~0);
+	writel(busmon_ctrl, TOPAZ_BUSMON_CTL(bus));
+
+	/* initialize address range registers, and busmon_ctrl filter enable */
+	for (i = 0; i < TOPAZ_BUSMON_MAX_RANGES; i++) {
+		if (i < nranges) {
+			memcpy(&ranges[i], &range[i], sizeof(ranges[i]));
+			writel(range[i].start, TOPAZ_BUSMON_CTL_RANGE_LOW(bus, i));
+			writel(range[i].end,   TOPAZ_BUSMON_CTL_RANGE_HIGH(bus, i));
+			busmon_ctrl |= TOPAZ_BUSMON_REGION_VALID(BIT(i));
+		} else {
+			memset(&ranges[i], 0, sizeof(ranges[i]));
+			writel(0, TOPAZ_BUSMON_CTL_RANGE_LOW(bus, i));
+			writel(0, TOPAZ_BUSMON_CTL_RANGE_HIGH(bus, i));
+		}
+	}
+
+	/* enable/disable range checking */
+	if (nranges) {
+		range_test_on = 1;
+		busmon_ctrl |= TOPAZ_BUSMON_ADDR_CHECK_EN;
+		busmon_ctrl |= TOPAZ_BUSMON_BLOCK_TRANS_EN;
+
+		if (outside) {
+			busmon_ctrl |= TOPAZ_BUSMON_OUTSIDE_ADDR_CHECK;
+		}
+	} else {
+		range_test_on = 0;
+	}
+
+	writel(busmon_ctrl, TOPAZ_BUSMON_CTL(bus));
+
+	/* enable/disable range check interrupt for this bus master monitor */
+	topaz_busmon_irq_set(TOPAZ_BUSMON_INTR_MASK_RANGE_CHECK_EN(bus), nranges > 0);
+}
+EXPORT_SYMBOL(topaz_busmon_range_check);
+
+static int topaz_busmon_dump_master(char *const p, unsigned int master)
+{
+	unsigned int reg;
+	uint32_t debug_regs[TOPAZ_BUSMON_DEBUG_MAX];
+
+	for (reg = 0; reg < ARRAY_SIZE(debug_regs); reg++) {
+		writel(TOPAZ_BUSMON_DEBUG_VIEW_MASTER(master) |
+		       TOPAZ_BUSMON_DEBUG_VIEW_DATA_SEL(reg),
+		       TOPAZ_BUSMON_DEBUG_VIEW);
+		debug_regs[reg] = readl(TOPAZ_BUSMON_DEBUG_STATUS);
+	}
+
+	return sprintf(p, "master %-5s addr 0x%08x rd %08x%08x wr %08x%08x ctrl %08x %08x %08x\n",
+	               master_names[master], debug_regs[TOPAZ_BUSMON_ADDR],
+	               debug_regs[TOPAZ_BUSMON_RD_H32], debug_regs[TOPAZ_BUSMON_RD_L32],
+	               debug_regs[TOPAZ_BUSMON_WR_H32], debug_regs[TOPAZ_BUSMON_WR_L32],
+	               debug_regs[TOPAZ_BUSMON_CTRL0],
+	               debug_regs[TOPAZ_BUSMON_CTRL1],
+	               debug_regs[TOPAZ_BUSMON_CTRL2]);
+}
+
+static irqreturn_t topaz_busmon_irq_handler(int irq, void *arg)
+{
+	unsigned int master;
+	char buf[128];
+	uint32_t ahb_mon_int_status = readl(TOPAZ_BUSMON_INTR_STATUS);
+
+	uint32_t busmon_ctrl = readl(TOPAZ_BUSMON_CTL(TOPAZ_BUSMON_LHOST));
+	busmon_ctrl &= ~TOPAZ_BUSMON_TIMER_INT_EN;
+	writel(busmon_ctrl, TOPAZ_BUSMON_CTL(TOPAZ_BUSMON_LHOST));
+
+	printk("%s, irq %d, ahb_mon_int_status 0x%x\n",
+	       __FUNCTION__, irq, ahb_mon_int_status);
+
+	for (master = 0; master < ARRAY_SIZE(master_names); master++) {
+		topaz_busmon_dump_master(buf, master);
+		printk("%s", buf);
+	}
+
+	writel(~0, TOPAZ_BUSMON_INTR_STATUS);
+
+	/* Dump task stack */
+	printk("Current task = '%s', PID = %u, ASID = %p\n", current->comm,
+	       current->pid, current->active_mm->context.asid);
+	show_stacktrace(current, NULL);
+
+	busmon_ctrl |= TOPAZ_BUSMON_TIMER_INT_EN;
+	writel(busmon_ctrl, TOPAZ_BUSMON_CTL(TOPAZ_BUSMON_LHOST));
+
+	return IRQ_HANDLED;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int topaz_busmon_read_proc(struct file *file,  char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	char *p = buffer;
+	unsigned int master;
+	const char *master_names[] = TOPAZ_BUSMON_MASTER_NAMES;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	for (master = 0; master < ARRAY_SIZE(master_names); master++) {
+		p += topaz_busmon_dump_master(p, master);
+	}
+
+	local_irq_restore(flags);
+
+	*ppos += p - buffer;
+	return p - buffer;
+}
+
+static const struct file_operations fops_busmon = {
+	.read = topaz_busmon_read_proc,
+};
+
+static int __init topaz_busmon_create_proc(void)
+{
+	struct proc_dir_entry *entry =
+		proc_create(PROC_NAME, 0x444, NULL, &fops_busmon);
+
+	if (!entry)
+		return -ENOMEM;
+
+	return 0;
+}
+
+#else
+static int topaz_busmon_read_proc(char *page, char **start, off_t off,
+                                  int count, int *eof, void *_unused)
+{
+	char *p = page;
+	unsigned int master;
+	const char *master_names[] = TOPAZ_BUSMON_MASTER_NAMES;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	for (master = 0; master < ARRAY_SIZE(master_names); master++) {
+		p += topaz_busmon_dump_master(p, master);
+	}
+
+	local_irq_restore(flags);
+
+	*eof = 1;
+	return p - page;
+}
+
+static int __init topaz_busmon_create_proc(void)
+{
+	struct proc_dir_entry *entry = create_proc_entry(PROC_NAME, 0600, NULL);
+
+	if (!entry) {
+		return -ENOMEM;
+	}
+
+	entry->write_proc = NULL;
+	entry->read_proc = topaz_busmon_read_proc;
+
+	return 0;
+}
+#endif
+
+int __init topaz_busmon_init(void)
+{
+	int rc;
+
+	rc = request_irq(TOPAZ_IRQ_MISC_AHB_MON, topaz_busmon_irq_handler,
+	                 0, "ahb bus monitor", NULL);
+
+	if (rc) {
+		goto error;
+	}
+
+	rc = topaz_busmon_create_proc();
+
+	if (rc) {
+		printk(KERN_WARNING "procfs: error creating proc entry: %d\n", rc);
+		goto error1;
+	}
+
+	rc = bus_register(&ahb);
+
+	if (rc < 0) {
+		printk(KERN_WARNING "sysfs: error register bus: %d\n", rc);
+		goto error2;
+	}
+
+	rc = bus_create_file(&ahb, &bus_attr_ahbm_range_test_on);
+
+	if (rc < 0) {
+		printk(KERN_WARNING "sysfs: error creating busfile\n");
+		goto error3;
+	}
+
+	rc = bus_create_file(&ahb, &bus_attr_ahbm_timeout_test_on);
+
+	if (rc < 0) {
+		printk(KERN_WARNING "sysfs: error creating busfile\n");
+		goto error4;
+	}
+
+	rc = bus_create_file(&ahb, &bus_attr_ahbm_timeout);
+
+	if (rc < 0) {
+		printk(KERN_WARNING "sysfs: error creating busfile\n");
+		goto error5;
+	}
+
+	rc = bus_create_file(&ahb, &bus_attr_ahbm_ranges);
+
+	if (rc < 0) {
+		printk(KERN_WARNING "sysfs: error creating busfile\n");
+		goto error6;
+	}
+
+	rc = bus_create_file(&ahb, &bus_attr_ahbm_outside);
+
+	if (rc < 0) {
+		printk(KERN_WARNING "sysfs: error creating busfile\n");
+		goto error7;
+	}
+
+	printk(KERN_DEBUG "%s success\n", __FUNCTION__);
+
+	return 0;
+error7:
+	bus_remove_file(&ahb, &bus_attr_ahbm_ranges);
+error6:
+	bus_remove_file(&ahb, &bus_attr_ahbm_timeout);
+error5:
+	bus_remove_file(&ahb, &bus_attr_ahbm_timeout_test_on);
+error4:
+	bus_remove_file(&ahb, &bus_attr_ahbm_range_test_on);
+error3:
+	bus_unregister(&ahb);
+error2:
+	remove_proc_entry(PROC_NAME, NULL);
+error1:
+	free_irq(TOPAZ_IRQ_MISC_AHB_MON, NULL);
+error:
+	return rc;
+}
+
+static void __exit topaz_busmon_exit(void)
+{
+	bus_remove_file(&ahb, &bus_attr_ahbm_outside);
+	bus_remove_file(&ahb, &bus_attr_ahbm_ranges);
+	bus_remove_file(&ahb, &bus_attr_ahbm_timeout);
+	bus_remove_file(&ahb, &bus_attr_ahbm_timeout_test_on);
+	bus_remove_file(&ahb, &bus_attr_ahbm_range_test_on);
+	bus_unregister(&ahb);
+	remove_proc_entry(PROC_NAME, NULL);
+	free_irq(TOPAZ_IRQ_MISC_AHB_MON, NULL);
+}
+
+module_init(topaz_busmon_init);
+module_exit(topaz_busmon_exit);
+
+MODULE_DESCRIPTION("Topaz AHB Bus Monitors");
+MODULE_AUTHOR("Quantenna");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/qtn/topaz/dpi.c b/drivers/qtn/topaz/dpi.c
new file mode 100644
index 0000000..6bcbf11
--- /dev/null
+++ b/drivers/qtn/topaz/dpi.c
@@ -0,0 +1,617 @@
+/**
+ * (C) Copyright 2012-2013 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ **/
+
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+#include <linux/hardirq.h>
+
+#include <asm/hardware.h>
+#include <asm/board/platform.h>
+
+#include <common/queue.h>
+#include <qtn/topaz_dpi.h>
+
+/**
+ * Each DPI filter is a mix of DPI fields, and DPI IP tuples.
+ * A field is a versatile way of matching any aspect of packet. An example field,
+ * which matches only large packets is:
+ *  - anchor on the IPv4 header
+ *  - Offset 0. Offset works with 32 bit words, length field is 1st word
+ *  - Comparison operator >=
+ *  - Mask 0x0000FFFF (Length is bytes 2 & 3, network endian)
+ *  - Val  0x00000400 (1024 bytes or more)
+ * There are 32 hardware DPI fields.
+ *
+ * An IP tuple is a kind of field, specifically for matching whole IPv4 of IPv6 addresses,
+ * and UDP or TCP ports. One IP tuple has 9 matching words:
+ *  - word0	1st word of ipv6 source addr, or ipv4 source addr
+ *  - word[1-3] other words for ipv6 source addr match
+ *  - word4	1st word of ipv6 dest addr, or ipv4 dest addr
+ *  - word[5-7] other words for ipv6 dest addr match
+ *  - word9	TCP/UDP source + dest ports.
+ * An IP tuple can match on any combination of the source address, dest address,
+ * source port, dest port, but it must be an exact match.
+ * There are 8 hardware IP tuples.
+ *
+ * A DPI filter is some combination of fields and/or ip tuples, grouped together with the
+ * TOPAZ_EMAC_RX_DPI_IPT_GROUP(x) and TOPAZ_EMAC_RX_DPI_FIELD_GROUP(x) registers.
+ * For example, filter 2 is composed of:
+ *  - IP tuple 3, configured to match on source & dest address (not ports)
+ *  - Field 17 matches something else
+ * The filter is composed by setting bit 17 TOPAZ_EMAC_RX_DPI_FIELD_GROUP(2), and 
+ * bits 3 (enable ip tuple 3 saddr match) and 11 (enable ip tuple 3 dest addr match)
+ * of TOPAZ_EMAC_RX_DPI_IPT_GROUP(2).
+ * There are 16 hardware DPI filters.
+ *
+ * 
+ * IP Tuple memory is arranged as:
+ * - 8 lwords wide (for 8 different ip tuples)
+ * - 9 lwords deep (called 'entries')
+ * Entries 0-3 are for IP source address matching
+ * Entries 4-7 are for IP destination address matching
+ * Entry 8 is TCP/UDP source/dest address matching
+ *
+ * The entire width for an entry is read/written at once, controlled by
+ * TOPAZ_EMAC_RX_DPI_IPT_MEM_COM (0x620). So when modifying an address, order is:
+ * - Read entry, so old unchanged values will be retained
+ * - Poll on entry read complete
+ * - Modify word (TOPAZ_EMAC_RX_DPI_IPT_MEM_DATA(x), 0x600 + 4 * x)
+ * - Write entry back
+ * - Poll on write complete
+ */
+
+
+struct topaz_dpi_field {
+	int refcount;
+	struct topaz_dpi_field_def def;
+};
+
+struct topaz_dpi_iptuple_addr {
+	int refcount;
+	struct in6_addr addr;
+};
+
+struct topaz_dpi_iptuple_port {
+	int refcount;
+	uint16_t port;
+};
+
+struct topaz_dpi_filter {
+	uint8_t index;
+	TAILQ_ENTRY(topaz_dpi_filter) next;
+
+	struct {
+		uint8_t srcaddr[howmany(TOPAZ_EMAC_NUM_DPI_IPTUPLES, NBBY)];
+		uint8_t destaddr[howmany(TOPAZ_EMAC_NUM_DPI_IPTUPLES, NBBY)];
+		uint8_t srcport[howmany(TOPAZ_EMAC_NUM_DPI_IPTUPLES, NBBY)];
+		uint8_t destport[howmany(TOPAZ_EMAC_NUM_DPI_IPTUPLES, NBBY)];
+		uint8_t fields[howmany(TOPAZ_EMAC_NUM_DPI_FIELDS, NBBY)];
+	} used;
+};
+typedef TAILQ_HEAD(filter_head_s, topaz_dpi_filter) topaz_dpi_filter_head;
+
+struct topaz_dpi_info {
+	unsigned long base;	/* emac ctl address base */
+	spinlock_t lock;
+
+	struct topaz_dpi_field fields[TOPAZ_EMAC_NUM_DPI_FIELDS];
+	struct topaz_dpi_iptuple_addr ipt_srcaddr[TOPAZ_EMAC_NUM_DPI_IPTUPLES];
+	struct topaz_dpi_iptuple_addr ipt_destaddr[TOPAZ_EMAC_NUM_DPI_IPTUPLES];
+	struct topaz_dpi_iptuple_port ipt_srcport[TOPAZ_EMAC_NUM_DPI_IPTUPLES];
+	struct topaz_dpi_iptuple_port ipt_destport[TOPAZ_EMAC_NUM_DPI_IPTUPLES];
+
+	topaz_dpi_filter_head used_filters_head;
+	topaz_dpi_filter_head unused_filters_head;
+	struct topaz_dpi_filter filters[TOPAZ_EMAC_NUM_DPI_FILTERS];
+};
+
+static void topaz_dpi_lock(struct topaz_dpi_info *info)
+{
+	spin_lock_bh(&info->lock);
+}
+
+static void topaz_dpi_unlock(struct topaz_dpi_info *info)
+{
+	spin_unlock_bh(&info->lock);
+}
+
+static void topaz_dpi_info_init(struct topaz_dpi_info *info, unsigned long base_addr)
+{
+	int i;
+
+	memset(info, 0, sizeof(*info));
+	info->base = base_addr;
+	spin_lock_init(&info->lock);
+
+	for (i = 0; i < TOPAZ_EMAC_NUM_DPI_IPTUPLES; i++) {
+		info->ipt_srcaddr[i].refcount = 0;
+		info->ipt_srcport[i].refcount = 0;
+		info->ipt_destaddr[i].refcount = 0;
+		info->ipt_destport[i].refcount = 0;
+	}
+
+	for (i = 0; i < TOPAZ_EMAC_NUM_DPI_FIELDS; i++) {
+		info->fields[i].refcount = 0;
+	}
+
+	TAILQ_INIT(&info->used_filters_head);
+	TAILQ_INIT(&info->unused_filters_head);
+	for (i = 0; i < TOPAZ_EMAC_NUM_DPI_FILTERS; i++) {
+		info->filters[i].index = i;
+		TAILQ_INSERT_TAIL(&info->unused_filters_head, &info->filters[i], next);
+	}
+}
+
+static int topaz_dpi_get_filter_fields(struct topaz_dpi_info *info,
+		const struct topaz_dpi_filter_request *req, uint8_t *used)
+{
+	unsigned int i;
+	unsigned int j;
+
+	/* look for identical fields */
+	for (i = 0; i < req->field_count; i++) {
+		const struct topaz_dpi_field_def *new_field_def = &req->fields[i];
+		int unused_index = -1;
+		int found = 0;
+
+		for (j = 0; !found && j < TOPAZ_EMAC_NUM_DPI_FIELDS; j++) {
+			const struct topaz_dpi_field *field = &info->fields[j];
+
+			if (field->refcount == 0 && !isset(used, j) && unused_index < 0) {
+				unused_index = j;
+			} else if (field->refcount && memcmp(new_field_def, &field->def,
+						sizeof(*new_field_def)) == 0) {
+				/* share existing field */
+				setbit(used, j);
+				found = 1;
+			}
+		}
+
+		if (found) {
+			/* use shared field */
+		} else if (unused_index >= 0) {
+			/* use new field */
+			info->fields[unused_index].def = *new_field_def;
+			setbit(used, unused_index);
+		} else {
+			/* out of fields */
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int topaz_dpi_get_filter_ipt_addr(struct topaz_dpi_iptuple_addr *addrs,
+		const struct in6_addr *req_addr, uint8_t *used)
+{
+	int i;
+	int found = 0;
+	int unused_index = -1;
+	const struct in6_addr zero = IN6ADDR_ANY_INIT;
+
+	if (memcmp(&zero, req_addr, sizeof(*req_addr)) == 0) {
+		return 0;
+	}
+
+	for (i = 0; !found && i < TOPAZ_EMAC_NUM_DPI_IPTUPLES; i++) {
+		if (addrs[i].refcount == 0 && unused_index < 0) {
+			unused_index = i;
+		} else if (addrs[i].refcount && memcmp(&addrs[i].addr, req_addr, sizeof(*req_addr)) == 0) {
+			found = 1;
+			setbit(used, i);
+		}
+	}
+
+	if (found) {
+		/* use shared field */
+	} else if (unused_index >= 0) {
+		addrs[unused_index].addr = *req_addr;
+		setbit(used, unused_index);
+	} else {
+		return -1;
+	}
+
+	return 0;
+}
+
+static int topaz_dpi_get_filter_ipt_port(struct topaz_dpi_iptuple_port *ports,
+		uint16_t req_port, uint8_t *used)
+{
+	int i;
+	int found = 0;
+	int unused_index = -1;
+
+	if (!req_port) {
+		return 0;
+	}
+
+	for (i = 0; !found && i < TOPAZ_EMAC_NUM_DPI_IPTUPLES; i++) {
+		if (ports[i].refcount == 0 && unused_index < 0) {
+			unused_index = i;
+		} else if (ports[i].refcount && ports[i].port == req_port) {
+			found = 1;
+			setbit(used, i);
+		}
+	}
+
+	if (found) {
+		/* use shared field */
+	} else if (unused_index >= 0) {
+		ports[unused_index].port = req_port;
+		setbit(used, unused_index);
+	} else {
+		return -1;
+	}
+
+	return 0;
+}
+
+static struct topaz_dpi_filter * topaz_dpi_get_filter(struct topaz_dpi_info *info,
+		const struct topaz_dpi_filter_request *req)
+{
+	struct topaz_dpi_filter *filter;
+
+	filter = TAILQ_FIRST(&info->unused_filters_head);
+	if (!filter)
+		return NULL;
+
+	if (topaz_dpi_get_filter_fields(info, req, filter->used.fields))
+		goto insufficient_fields;
+
+	if (topaz_dpi_get_filter_ipt_addr(info->ipt_srcaddr, &req->srcaddr, filter->used.srcaddr))
+		goto insufficient_fields;
+
+	if (topaz_dpi_get_filter_ipt_addr(info->ipt_destaddr, &req->destaddr, filter->used.destaddr))
+		goto insufficient_fields;
+
+	if (topaz_dpi_get_filter_ipt_port(info->ipt_srcport, req->srcport, filter->used.srcport))
+		goto insufficient_fields;
+
+	if (topaz_dpi_get_filter_ipt_port(info->ipt_destport, req->destport, filter->used.destport))
+		goto insufficient_fields;
+
+	TAILQ_REMOVE(&info->unused_filters_head, filter, next);
+	TAILQ_INSERT_TAIL(&info->used_filters_head, filter, next);
+
+	return filter;
+
+insufficient_fields:
+	/* not enough fields available */
+	memset(&filter->used, 0, sizeof(filter->used));
+	return NULL;
+}
+
+static struct topaz_dpi_info *topaz_dpi_info_get(unsigned int emac)
+{
+	static struct topaz_dpi_info emac0_dpi_info;
+	static struct topaz_dpi_info emac1_dpi_info;
+
+	return emac ? &emac1_dpi_info : &emac0_dpi_info;
+}
+
+static void topaz_dpi_filter_add_set_tidmap(struct topaz_dpi_info *info, uint8_t index, uint8_t tid)
+{
+	uint32_t tidmap_reg_addr = TOPAZ_EMAC_RXP_DPI_TID_MAP_INDEX(index);
+	uint32_t tidmap_reg_val;
+	uint32_t tidmap_change_shift = TOPAZ_EMAC_RXP_TID_MAP_INDEX_SHIFT(index);
+	uint32_t tidmap_change_mask = TOPAZ_EMAC_RXP_TID_MAP_INDEX_MASK(index);
+
+	tidmap_reg_val = readl(info->base + tidmap_reg_addr);
+	tidmap_reg_val = (tidmap_reg_val & ~tidmap_change_mask) |
+		((tid << tidmap_change_shift) & tidmap_change_mask);
+	writel(tidmap_reg_val, info->base + tidmap_reg_addr);
+}
+
+static void topaz_dpi_iptuple_set(const struct topaz_dpi_info *info,
+		uint8_t iptuple, uint8_t entry, uint32_t value)
+{
+	topaz_dpi_iptuple_read_entry(info->base, entry);
+	writel(value, info->base + TOPAZ_EMAC_RX_DPI_IPT_MEM_DATA(iptuple));
+	topaz_dpi_iptuple_write_entry(info->base, entry);
+}
+
+static void topaz_dpi_iptuple_set_srcaddr(const struct topaz_dpi_info *info,
+		uint8_t iptuple, const struct in6_addr *addr)
+{
+	int entry;
+
+	for (entry = TOPAZ_EMAC_RX_DPI_IPT_ENTRY_SRCADDR_START;
+			entry < TOPAZ_EMAC_RX_DPI_IPT_ENTRY_SRCADDR_END; entry++) {
+		topaz_dpi_iptuple_set(info, iptuple, entry, ntohl(addr->s6_addr32[entry - 0]));
+	}
+}
+
+static void topaz_dpi_iptuple_set_destaddr(const struct topaz_dpi_info *info,
+		uint8_t iptuple, const struct in6_addr *addr)
+{
+	int entry;
+
+	for (entry = TOPAZ_EMAC_RX_DPI_IPT_ENTRY_DESTADDR_START;
+			entry < TOPAZ_EMAC_RX_DPI_IPT_ENTRY_DESTADDR_END; entry++) {
+		topaz_dpi_iptuple_set(info, iptuple, entry, ntohl(addr->s6_addr32[entry - 4]));
+	}
+}
+
+static void topaz_dpi_iptuple_set_port(const struct topaz_dpi_info *info,
+		uint8_t iptuple, uint16_t port, bool is_src)
+{
+	uint32_t reg;
+	uint16_t srcport;
+	uint16_t destport;
+
+	/*
+	 * ip proto memory ports are little endian
+	 * request format is network endian
+	 */
+	topaz_dpi_iptuple_read_entry(info->base, TOPAZ_EMAC_RX_DPI_IPT_ENTRY_PORTS);
+
+	reg = readl(info->base + TOPAZ_EMAC_RX_DPI_IPT_MEM_DATA(iptuple));
+
+	srcport = MS(reg, TOPAZ_EMAC_RX_DPI_IPT_PORT_SRC);
+	destport = MS(reg, TOPAZ_EMAC_RX_DPI_IPT_PORT_DEST);
+	if (is_src) {
+		srcport = ntohs(port);
+	} else {
+		destport = ntohs(port);
+	}
+	reg = SM(destport, TOPAZ_EMAC_RX_DPI_IPT_PORT_DEST) |
+		SM(srcport, TOPAZ_EMAC_RX_DPI_IPT_PORT_SRC);
+
+	writel(reg, info->base + TOPAZ_EMAC_RX_DPI_IPT_MEM_DATA(iptuple));
+
+	topaz_dpi_iptuple_write_entry(info->base, TOPAZ_EMAC_RX_DPI_IPT_ENTRY_PORTS);
+}
+
+static void topaz_dpi_iptuple_set_srcport(const struct topaz_dpi_info *info, uint8_t iptuple, uint16_t port)
+{
+	topaz_dpi_iptuple_set_port(info, iptuple, port, 1);
+}
+
+static void topaz_dpi_iptuple_set_destport(const struct topaz_dpi_info *info, uint8_t iptuple, uint16_t port)
+{
+	topaz_dpi_iptuple_set_port(info, iptuple, port, 0);
+}
+
+#if 0
+void __topaz_dpi_iptuple_dump(const struct topaz_dpi_info *info, const char *func, int line)
+{
+	int entry;
+	int reg;
+
+	printk("%s caller %s:%d\n", __FUNCTION__, func, line);
+	for (entry = 0; entry <= 8; entry++) {
+		topaz_dpi_iptuple_read_entry(info->base, entry);
+		printk("entry %d: ", entry);
+		for (reg = 0; reg < 8; reg++) {
+			printk(" 0x%08x", readl(info->base + TOPAZ_EMAC_RX_DPI_IPT_MEM_DATA(reg)));
+		}
+		printk("\n");
+	}
+
+	for (reg = 0; reg < 8; reg++) {
+		printk("ipt %d refs sa %d da %d sp %d dp %d\n",
+				reg,
+				info->ipt_srcaddr[reg].refcount,
+				info->ipt_destaddr[reg].refcount,
+				info->ipt_srcport[reg].refcount,
+				info->ipt_destport[reg].refcount);
+	}
+}
+#define topaz_dpi_iptuple_dump(_info)	__topaz_dpi_iptuple_dump(_info, __FUNCTION__, __LINE__)
+#endif
+
+int topaz_dpi_filter_add(unsigned int emac,
+		const struct topaz_dpi_filter_request *req)
+{
+	struct topaz_dpi_info *info;
+	struct topaz_dpi_filter *filter;
+	uint32_t field_group = 0;
+	uint32_t iptuple_group = 0;
+	uint32_t out_ctrl;
+	uint32_t out_combo = 0;
+	int i;
+
+	info = topaz_dpi_info_get(emac);
+
+	topaz_dpi_lock(info);
+
+	filter = topaz_dpi_get_filter(info, req);
+	if (filter == NULL) {
+		topaz_dpi_unlock(info);
+		return -1;
+	}
+
+	/*
+	 * Increment reference count for each used ip tuple part, and each dpi field.
+	 * If reference count leaves zero, also modify hardware
+	 */
+	for (i = 0; i < TOPAZ_EMAC_NUM_DPI_IPTUPLES; i++) {
+		if (isset(filter->used.srcaddr, i)) {
+			iptuple_group |= TOPAZ_EMAC_RX_DPI_IPT_GROUP_SRCADDR(i);
+			if (info->ipt_srcaddr[i].refcount++ == 0) {
+				topaz_dpi_iptuple_set_srcaddr(info, i, &req->srcaddr);
+			}
+		}
+		if (isset(filter->used.destaddr, i)) {
+			iptuple_group |= TOPAZ_EMAC_RX_DPI_IPT_GROUP_DESTADDR(i);
+			if (info->ipt_destaddr[i].refcount++ == 0) {
+				topaz_dpi_iptuple_set_destaddr(info, i, &req->destaddr);
+			}
+		}
+		if (isset(filter->used.srcport, i)) {
+			iptuple_group |= TOPAZ_EMAC_RX_DPI_IPT_GROUP_SRCPORT(i);
+			if (info->ipt_srcport[i].refcount++ == 0) {
+				topaz_dpi_iptuple_set_srcport(info, i, req->srcport);
+			}
+		}
+		if (isset(filter->used.destport, i)) {
+			iptuple_group |= TOPAZ_EMAC_RX_DPI_IPT_GROUP_DESTPORT(i);
+			if (info->ipt_destport[i].refcount++ == 0) {
+				topaz_dpi_iptuple_set_destport(info, i, req->destport);
+			}
+		}
+	}
+	for (i = 0; i < TOPAZ_EMAC_NUM_DPI_FIELDS; i++) {
+		if (isset(filter->used.fields, i)) {
+			field_group |= (1 << i);
+			if (info->fields[i].refcount++ == 0) {
+				writel(info->fields[i].def.ctrl.raw, info->base + TOPAZ_EMAC_RX_DPI_FIELD_CTRL(i));
+				writel(info->fields[i].def.val, info->base + TOPAZ_EMAC_RX_DPI_FIELD_VAL(i));
+				writel(info->fields[i].def.mask, info->base + TOPAZ_EMAC_RX_DPI_FIELD_MASK(i));
+			}
+		}
+	}
+
+	/*
+	 * Enable DPI filter:
+	 *   - set dpi -> tid map
+	 *   - set filter field group
+	 *   - set filter iptuple group
+	 *   - set output port/node
+	 *   - enable dpi filter
+	 */
+	if (field_group)
+		out_combo |= TOPAZ_EMAC_RX_DPI_OUT_CTRL_DPI;
+	if (iptuple_group)
+		out_combo |= TOPAZ_EMAC_RX_DPI_OUT_CTRL_IPTUPLE;
+	out_ctrl = SM(req->out_node, TOPAZ_EMAC_RX_DPI_OUT_CTRL_NODE) |
+		SM(req->out_port, TOPAZ_EMAC_RX_DPI_OUT_CTRL_PORT) |
+		SM(out_combo, TOPAZ_EMAC_RX_DPI_OUT_CTRL_COMBO);
+	topaz_dpi_filter_add_set_tidmap(info, filter->index, req->tid);
+	writel(field_group, info->base + TOPAZ_EMAC_RX_DPI_FIELD_GROUP(filter->index));
+	writel(iptuple_group, info->base + TOPAZ_EMAC_RX_DPI_IPT_GROUP(filter->index));
+	writel(out_ctrl, info->base + TOPAZ_EMAC_RX_DPI_OUT_CTRL(filter->index));
+
+	topaz_dpi_unlock(info);
+
+	return filter->index;
+}
+EXPORT_SYMBOL(topaz_dpi_filter_add);
+
+void topaz_dpi_filter_del(unsigned int emac, int filter_no)
+{
+	struct topaz_dpi_info *info = topaz_dpi_info_get(emac);
+	struct topaz_dpi_filter *filter;
+	unsigned int i;
+
+	if (filter_no < 0 || filter_no >= TOPAZ_EMAC_NUM_DPI_FILTERS)
+		return;
+
+	topaz_dpi_lock(info);
+
+	filter = &info->filters[filter_no];
+
+	/* disable filters, field group & ip tuple group */
+	writel(readl(info->base + TOPAZ_EMAC_RXP_DPI_CTRL) & ~BIT(filter->index),
+			info->base + TOPAZ_EMAC_RXP_DPI_CTRL);
+	writel(0, info->base + TOPAZ_EMAC_RX_DPI_OUT_CTRL(filter->index));
+	writel(0, info->base + TOPAZ_EMAC_RX_DPI_FIELD_GROUP(filter->index));
+	writel(0, info->base + TOPAZ_EMAC_RX_DPI_IPT_GROUP(filter->index));
+
+	/* decrement reference counts, clear hw if they hit zero */
+	for (i = 0; i < TOPAZ_EMAC_NUM_DPI_IPTUPLES; i++) {
+		const struct in6_addr zero = IN6ADDR_ANY_INIT;
+		if (isset(filter->used.srcaddr, i) &&
+				--info->ipt_srcaddr[i].refcount == 0) {
+			topaz_dpi_iptuple_set_srcaddr(info, i, &zero);
+		}
+		if (isset(filter->used.destaddr, i) &&
+				--info->ipt_destaddr[i].refcount == 0) {
+			topaz_dpi_iptuple_set_destaddr(info, i, &zero);
+		}
+		if (isset(filter->used.srcport, i) &&
+				--info->ipt_srcport[i].refcount == 0) {
+			topaz_dpi_iptuple_set_srcport(info, i, 0);
+		}
+		if (isset(filter->used.destport, i) &&
+				--info->ipt_destport[i].refcount == 0) {
+			topaz_dpi_iptuple_set_destport(info, i, 0);
+		}
+	}
+	for (i = 0; i < TOPAZ_EMAC_NUM_DPI_FIELDS; i++) {
+		if (isset(filter->used.fields, i) &&
+				--info->fields[i].refcount == 0) {
+			writel(0, info->base + TOPAZ_EMAC_RX_DPI_FIELD_CTRL(i));
+			writel(0, info->base + TOPAZ_EMAC_RX_DPI_FIELD_VAL(i));
+			writel(0, info->base + TOPAZ_EMAC_RX_DPI_FIELD_MASK(i));
+		}
+	}
+
+	memset(&filter->used, 0, sizeof(filter->used));
+
+	TAILQ_REMOVE(&info->used_filters_head, filter, next);
+	TAILQ_INSERT_TAIL(&info->unused_filters_head, filter, next);
+
+	topaz_dpi_unlock(info);
+}
+EXPORT_SYMBOL(topaz_dpi_filter_del);
+
+static void topaz_dpi_hw_init(unsigned int emac)
+{
+	struct topaz_dpi_info *info = topaz_dpi_info_get(emac);
+	int i;
+
+	/* clear dpi fields */
+	for (i = 0; i < TOPAZ_EMAC_NUM_DPI_FIELDS; i++) {
+		writel(0, info->base + TOPAZ_EMAC_RX_DPI_FIELD_VAL(i));
+		writel(0, info->base + TOPAZ_EMAC_RX_DPI_FIELD_MASK(i));
+		writel(0, info->base + TOPAZ_EMAC_RX_DPI_FIELD_CTRL(i));
+	}
+
+	/* clear dpi filters and group registers */
+	for (i = 0; i < TOPAZ_EMAC_NUM_DPI_FILTERS; i++) {
+		writel(0, info->base + TOPAZ_EMAC_RX_DPI_OUT_CTRL(i));
+		writel(0, info->base + TOPAZ_EMAC_RX_DPI_FIELD_GROUP(i));
+		writel(0, info->base + TOPAZ_EMAC_RX_DPI_IPT_GROUP(i));
+	}
+
+	/* clear ip tuple memory */
+	for (i = 0; i < TOPAZ_EMAC_RX_DPI_IPT_MEM_DATA_MAX; i++) {
+		writel(0, info->base + TOPAZ_EMAC_RX_DPI_IPT_MEM_DATA(i));
+	}
+	for (i = 0; i < TOPAZ_EMAC_RX_DPI_IPT_ENTRIES; i++) {
+		topaz_dpi_iptuple_write_entry(info->base, i);
+	}
+}
+
+int topaz_dpi_init(unsigned int emac)
+{
+	unsigned int base_addr;
+
+	if (emac == 0) {
+		base_addr = RUBY_ENET0_BASE_ADDR;
+	} else if (emac == 1) {
+		base_addr = RUBY_ENET1_BASE_ADDR;
+	} else {
+		return -EINVAL;
+	}
+
+	topaz_dpi_info_init(topaz_dpi_info_get(emac), base_addr);
+	topaz_dpi_hw_init(emac);
+
+	return 0;
+}
+EXPORT_SYMBOL(topaz_dpi_init);
+
+MODULE_DESCRIPTION("Topaz EMAC DPI filters");
+MODULE_AUTHOR("Quantenna");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/qtn/topaz/fwt_if.c b/drivers/qtn/topaz/fwt_if.c
new file mode 100755
index 0000000..b56d9b8
--- /dev/null
+++ b/drivers/qtn/topaz/fwt_if.c
@@ -0,0 +1,780 @@
+/**
+ * (C) Copyright 2013 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ **/
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+#include <linux/igmp.h>
+#include <linux/hardirq.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ctype.h>
+#include <linux/if_ether.h>
+#include <linux/net/bridge/br_public.h>
+
+#include <net80211/if_ethersubr.h>
+#include <net80211/ieee80211.h>
+
+#include <qtn/qtn_debug.h>
+#include <qtn/qtn_uc_comm.h>
+#include <qtn/qtn_net_packet.h>
+#include <qtn/iputil.h>
+#include <qtn/topaz_tqe_cpuif.h>
+#include <qtn/topaz_fwt_cpuif.h>
+#include <qtn/topaz_fwt_if.h>
+#include <qtn/topaz_fwt_db.h>
+#include <qtn/topaz_fwt.h>
+#include <qtn/mproc_sync_base.h>
+
+MODULE_DESCRIPTION("Forwarding Table Interface");
+MODULE_AUTHOR("Quantenna Communications, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+#define PROC_NAME "topaz_fwt_if"
+
+/* Success definition in FWT Interface is return positive value */
+#define FWT_IF_SUCCESS(x)	((x) >= 0)
+/* Error definition in FWT Interface is return negative value */
+#define FWT_IF_ERROR(x)		(!(FWT_IF_SUCCESS(x)))
+
+/* specific commands keywords - must match fwt_if_usr_cmd */
+#define FWT_IF_KEY_CLEAR		"clear"
+#define FWT_IF_KEY_ON			"on"
+#define FWT_IF_KEY_OFF			"off"
+#define FWT_IF_KEY_PRINT		"print"
+#define FWT_IF_KEY_ADD_STATIC_MC	"add_static_mc"
+#define FWT_IF_KEY_DEL_STATIC_MC	"del_static_mc"
+#define FWT_IF_KEY_GET_MC_LIST		"get_mc_list"
+#define FWT_IF_KEY_ADD			"add"
+#define FWT_IF_KEY_DELETE		"del"
+#define FWT_IF_KEY_AUTO			"auto"
+#define FWT_IF_KEY_MANUAL		"manual"
+#define FWT_IF_KEY_NODE			"node"
+#define FWT_IF_KEY_4ADDR		"4addr"
+#define FWT_IF_KEY_HELP			"help"
+#define FWT_IF_KEY_DEBUG		"debug"
+#define FWT_IF_KEY_AGEING		"ageing"
+
+/* additional keywords */
+#define FWT_IF_KEY_PORT			"port"
+#define FWT_IF_KEY_ENABLE		"enable"
+#define FWT_IF_KEY_MAC			"mac"
+
+#define PRINT_FWT(a...)		do { if (g_debug) { printk(a); } } while(0)
+
+const char *g_port_names[] = TOPAZ_TQE_PORT_NAMES;
+static fwt_if_sw_cmd_hook g_fwt_if_cmd_hook = NULL;
+static char *fwt_if_outbuf = NULL;
+static spinlock_t fwt_if_outbuf_lock;	/* synchronise use of fwt_if_outbuf */
+
+/* set command keywords */
+static char *fwt_str_cmd[FWT_IF_MAX_CMD] = {
+		FWT_IF_KEY_CLEAR,
+		FWT_IF_KEY_ON,
+		FWT_IF_KEY_OFF,
+		FWT_IF_KEY_PRINT,
+		FWT_IF_KEY_ADD_STATIC_MC,
+		FWT_IF_KEY_DEL_STATIC_MC,
+		FWT_IF_KEY_GET_MC_LIST,
+		FWT_IF_KEY_ADD,
+		FWT_IF_KEY_DELETE,
+		FWT_IF_KEY_AUTO,
+		FWT_IF_KEY_MANUAL,
+		FWT_IF_KEY_4ADDR,
+		FWT_IF_KEY_DEBUG,
+		FWT_IF_KEY_HELP,
+		FWT_IF_KEY_AGEING,
+};
+
+/* value of extracted keywords from user */
+typedef	enum {
+	FWT_IF_ID,
+	FWT_IF_MAC,
+	FWT_IF_PORT,
+	FWT_IF_NODE,
+	FWT_IF_ENABLE,
+	FWT_IF_VALUE,
+	FWT_IF_MAX_PARAM,
+} fwt_if_usr_str;
+
+static inline int fwt_if_split_words(char **words, char *str)
+{
+	int word_count = 0;
+
+	/* skip leading space */
+	while (str && *str && isspace(*str)) {
+		str++;
+	}
+
+	while (str && *str) {
+		words[word_count++] = str;
+
+		/* skip this word */
+		while (str && *str && !isspace(*str)) {
+			str++;
+		}
+
+		/* replace spaces with NULL */
+		while (str && *str && isspace(*str)) {
+			*str = 0;
+			str++;
+		}
+	}
+
+	return word_count;
+}
+
+static int inline fwt_if_set_sw_cmd(fwt_if_usr_cmd cmd, struct fwt_if_common *data)
+{
+	if (g_fwt_if_cmd_hook) {
+		return g_fwt_if_cmd_hook(cmd, data);
+	}
+
+	return -1;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,3)
+/* back-ported from Linux 3.14.24 net_utils.c */
+static int mac_pton(const char *s, u8 *mac)
+{
+	int i;
+
+	/* XX:XX:XX:XX:XX:XX */
+	if (strlen(s) != (3 * ETH_ALEN - 1))
+		return 0;
+
+	/* Don't dirty result unless string is valid MAC. */
+	for (i = 0; i < ETH_ALEN; i++) {
+		if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1]))
+			return 0;
+		if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
+			return 0;
+	}
+	for (i = 0; i < ETH_ALEN; i++) {
+		mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
+	}
+	return 1;
+}
+#endif
+
+static int fwt_if_extract_parm(char *str, fwt_if_usr_str cmd, void *var)
+{
+	struct fwt_if_id *id;
+#ifdef CONFIG_IPV6
+	uint8_t *ip6;
+#endif
+	uint8_t *ip4;
+	uint8_t *mac;
+	int i, node_num, param;
+	uint8_t node[FWT_IF_USER_NODE_MAX];
+
+	/* clear node array */
+	memset(node, FWT_DB_INVALID_NODE, FWT_IF_USER_NODE_MAX);
+	if (!str) {
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case FWT_IF_MAC:
+		id = var;
+		mac = id->mac_be;
+		if (!mac_pton(str, mac))
+			return -EINVAL;
+		break;
+	case FWT_IF_ID:
+		/*
+		 * This keyword takes a MAC address or an IPv4 or IPv6 address. If it is a multicast
+		 * IP address it is also converted into a multicast layer 2 (MAC) address.
+		 */
+		id = var;
+#ifdef CONFIG_IPV6
+		ip6 = id->ip.u.ip6.in6_u.u6_addr8;
+#endif
+		ip4 = (void *) &id->ip.u.ip4;
+		mac = id->mac_be;
+		id->ip.proto = 0;
+
+#ifdef CONFIG_IPV6
+		/* accept complete and compressed IPv6 addr notation */
+		if (in6_pton(str, -1, ip6, -1, NULL)) {
+			if (!ipv6_addr_is_multicast(&id->ip.u.ip6))
+				return -EINVAL;
+			id->ip.proto = htons(ETHERTYPE_IPV6);
+			fwt_mcast_to_mac(mac, &id->ip);
+		} else
+#endif
+		/* accept XX:XX:XX:XX:XX:XX ethernet addr notation */
+		if (mac_pton(str, mac)) {
+			/* MAC handling OK */
+		}
+		/* accept x.x.x.x IPv4 addr notation */
+		else if (in4_pton(str, -1, ip4, -1, NULL)) {
+			if (!ipv4_is_multicast(id->ip.u.ip4))
+				return -EINVAL;
+			id->ip.proto = htons(ETHERTYPE_IP);
+			fwt_mcast_to_mac(mac, &id->ip);
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case FWT_IF_PORT:
+		/* Port array names order correspond to HW port numbers */
+		for (i = 0; i < TOPAZ_TQE_NUM_PORTS; i++) {
+			if (strcmp(str, g_port_names[i]) == 0) {
+				*((uint8_t*)var) = i;
+				return 1;
+			}
+		}
+		return -ENOENT;
+		break;
+	case FWT_IF_NODE:
+		node_num = sscanf(str, "%hhu:%hhu:%hhu:%hhu:%hhu:%hhu",
+						&node[0], &node[1], &node[2],
+						&node[3], &node[4], &node[5]);
+		if (node_num <= 0) {
+			return -EINVAL;
+		}
+
+		for (i = 0; i < node_num; i++) {
+			if (node[i] >= QTN_NCIDX_MAX) {
+				return -EINVAL;
+			}
+		}
+		memcpy(var, node, node_num * sizeof(uint8_t));
+		break;
+	case FWT_IF_ENABLE:
+		sscanf(str, "%u", &param);
+		if ( (param == 0) || (param == 1)) {
+			*((uint32_t*)var) = param;
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case FWT_IF_VALUE:
+		sscanf(str, "%d", &param);
+		*((int32_t*)var) = param;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 1;
+}
+
+/*
+ * Get the current value that attached to keyword definition
+ * @param str: the string to find keyword
+ * @param words: the target search
+ * @param word_count: number of words to search from
+ * @return: the value as a string
+ */
+static char *fwt_if_get_val_from_keyword(char *str, char **words, uint8_t word_count)
+{
+	while (words && --word_count) {
+		if (strcmp(str, *words) == 0) {
+			/* return the value attached to the keyword */
+			return *(++words);
+		}
+		/* advance to next word */
+		words++;
+	}
+	return NULL;
+}
+
+static void fwt_if_apply_user_print_help(void)
+{
+	printk("FWT commands\n");
+	printk("clear        remove all entries from the SW and HW FWT tables\n");
+	printk("on           enable the FWT interface\n");
+	printk("off          disable the FWT interface\n");
+	printk("print        print the contents of the SW FWT\n");
+	printk("add_static_mc <ip> mac <mac>\n");
+	printk("             add a static multicast entry\n");
+	printk("del_static_mc <ip> mac <mac>\n");
+	printk("             delete a static multicast entry\n");
+	printk("get_mc_list  return a list of multicast addresses\n");
+	printk("add <mac> port <port> node <node>\n");
+	printk("             add a unicast entry to the FWT\n");
+	printk("             e.g. add 01:ab:ac:34:67:20 port wmac node 3\n");
+	printk("add <ip> port <port> node <node>\n");
+	printk("             add a multicast entry to the FWT\n");
+	printk("             e.g. add 224.1.2.3 port wmac node 3\n");
+	printk("del <mac>    delete a table entry by MAC address\n");
+	printk("del <ip>     delete a table entry by IP address\n");
+	printk("auto         enable automatic generation of table entries by the bridge module\n");
+	printk("manual       enable manual-only creation of table entries, with no restrictions\n");
+	printk("4addr <mac> enable {0|1}\n");
+	printk("             enable or disable 4-address support for a MAC address\n");
+	printk("debug {0|1}  enable or disable debug messages\n");
+	printk("ageing {0|1} enable or disable table entry ageing\n");
+	printk("help         print this\n");
+}
+static int fwt_if_apply_user_debug_mode(char **words, uint8_t word_count)
+{
+	char *str_val;
+	int rc;
+	struct fwt_if_common data;
+
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_DEBUG, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_ENABLE, &data.param);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+	rc = fwt_if_set_sw_cmd(FWT_IF_CMD_DEBUG, &data);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+
+	return 1;
+}
+
+static int fwt_if_apply_user_ageing(char **words, uint8_t word_count)
+{
+	char *str_val;
+	int rc;
+	struct fwt_if_common data;
+
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_AGEING, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_ENABLE, &data.param);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+	rc = fwt_if_set_sw_cmd(FWT_IF_CMD_AGEING, &data);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+
+	return 1;
+}
+
+static int fwt_if_apply_user_4addr_mode(char **words, uint8_t word_count)
+{
+	char *str_val;
+	int rc;
+	struct fwt_if_common data;
+
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_4ADDR, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_ID, &data.id);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_ENABLE, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_ENABLE, &data.param);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+	rc = fwt_if_set_sw_cmd(FWT_IF_CMD_4ADDR, &data);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+
+	return 1;
+}
+
+static int fwt_if_apply_user_del_entry(char **words, uint8_t word_count)
+{
+	int rc;
+	struct fwt_if_common data;
+	char *str_val;
+
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_DELETE, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_ID, &data.id);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+	rc = fwt_if_set_sw_cmd(FWT_IF_CMD_DELETE, &data);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+	return 1;
+}
+
+static int fwt_if_apply_user_add_entry(char **words, uint8_t word_count)
+{
+	int rc;
+	char *str_val = NULL;
+	struct fwt_if_common data;
+
+	memset(data.node, FWT_DB_INVALID_NODE, sizeof(data.node));
+
+	/* ADD example: "add ab:34:be:af:34:42 port lhost node 2:4:3" */
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_ADD, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_ID, &data.id);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_PORT, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_PORT, &data.port);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_NODE, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_NODE, &data.node);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+
+	rc = fwt_if_set_sw_cmd(FWT_IF_CMD_ADD, &data);
+	if (FWT_IF_ERROR(rc)) {
+		return rc;
+	}
+
+	return 1;
+}
+
+static int fwt_if_apply_user_add_static_mc(char **words, uint8_t word_count)
+{
+	int rc;
+	char *str_val = NULL;
+	struct fwt_if_common data;
+	struct fwt_if_id *id = &data.id;
+
+	memset(data.node, FWT_DB_INVALID_NODE, sizeof(data.node));
+
+	/* ADD_STATIC_MC example: "add_static_mc 224.51.2.3 mac ab:34:be:af:34:42*/
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_ADD_STATIC_MC, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_ID, id);
+	if (FWT_IF_ERROR(rc))
+		return rc;
+
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_MAC, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_MAC, id);
+	if (FWT_IF_ERROR(rc))
+		return rc;
+
+	switch (id->ip.proto) {
+	case htons(ETHERTYPE_IP):
+		if (!IPUTIL_V4_ADDR_MULTICAST(id->ip.u.ip4))
+			return -EINVAL;
+		break;
+#ifdef CONFIG_IPV6
+	case htons(ETHERTYPE_IPV6):
+		if (!IPUTIL_V6_ADDR_MULTICAST(*id->ip.u.ip6.in6_u.u6_addr16))
+			return -EINVAL;
+		break;
+#endif
+	default:
+		return -EINVAL;
+	}
+
+	rc = fwt_if_set_sw_cmd(FWT_IF_CMD_ADD_STATIC_MC, &data);
+	if (FWT_IF_ERROR(rc))
+		return rc;
+
+	return 1;
+}
+
+static int fwt_if_apply_user_del_static_mc(char **words, uint8_t word_count)
+{
+	int rc;
+	char *str_val = NULL;
+	struct fwt_if_common data;
+	struct fwt_if_id *id = &data.id;
+
+	memset(data.node, FWT_DB_INVALID_NODE, sizeof(data.node));
+
+	/* DEL_STATIC_MC example: "del_static_mc 224.51.2.3 mac ab:34:be:af:34:42*/
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_DEL_STATIC_MC, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_ID, id);
+	if (FWT_IF_ERROR(rc))
+		return rc;
+
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_MAC, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_MAC, id);
+	if (FWT_IF_ERROR(rc))
+		return rc;
+
+	switch (id->ip.proto) {
+	case htons(ETHERTYPE_IP):
+		if (!IPUTIL_V4_ADDR_MULTICAST(id->ip.u.ip4))
+			return -EINVAL;
+		break;
+#ifdef CONFIG_IPV6
+	case htons(ETHERTYPE_IPV6):
+		if (!IPUTIL_V6_ADDR_MULTICAST(*id->ip.u.ip6.in6_u.u6_addr16))
+			return -EINVAL;
+		break;
+#endif
+	default:
+		return -EINVAL;
+	}
+
+	rc = fwt_if_set_sw_cmd(FWT_IF_CMD_DEL_STATIC_MC, &data);
+	if (FWT_IF_ERROR(rc))
+		return rc;
+
+	return 1;
+}
+
+#define FWT_IF_OUTBUF_LEN_MIN	32
+#define FWT_IF_OUTBUF_LEN_MAX	8192
+
+/* Must be called with the fwt_if_outbuf_lock held */
+static int fwt_if_get_mc_list(char **words, uint8_t word_count)
+{
+	int rc;
+	struct fwt_if_common data;
+	char *str_val = NULL;
+
+	if (fwt_if_outbuf) {
+		/* a command was issued previously but the output was not retrieved */
+		kfree(fwt_if_outbuf);
+		fwt_if_outbuf = NULL;
+	}
+
+	str_val = fwt_if_get_val_from_keyword(FWT_IF_KEY_GET_MC_LIST, words, word_count);
+	rc = fwt_if_extract_parm(str_val, FWT_IF_VALUE, &data.param);
+	if (FWT_IF_ERROR(rc))
+		return rc;
+
+	if (data.param <= FWT_IF_OUTBUF_LEN_MIN || data.param > FWT_IF_OUTBUF_LEN_MAX)
+		return -EINVAL;
+
+	fwt_if_outbuf = kmalloc(data.param, GFP_KERNEL);
+	if (!fwt_if_outbuf)
+		return -ENOMEM;
+
+	memset(fwt_if_outbuf, 0, data.param);
+	data.extra = fwt_if_outbuf;
+
+	fwt_if_set_sw_cmd(FWT_IF_CMD_GET_MC_LIST, &data);
+
+	return 1;
+}
+
+/* Apply user command.
+ * User command can control the FWT interface.
+ * @param cmd_num: command number
+ * @param words: the split words without spaces from the user space console interface
+ * @param word_count: number of words after split
+ * @return: status indication
+ */
+static int fwt_if_apply_user_command(fwt_if_usr_cmd cmd_num, char **words, uint8_t word_count)
+{
+	int rc = -EINVAL;
+	struct fwt_if_common data;
+
+	if ((word_count == 0) || (!words))
+		goto cmd_failure;
+
+	memset(&data, 0, sizeof(data));
+
+	switch(cmd_num) {
+		case FWT_IF_CMD_CLEAR:
+		case FWT_IF_CMD_ON:
+		case FWT_IF_CMD_OFF:
+		case FWT_IF_CMD_PRINT:
+		case FWT_IF_CMD_AUTO:
+		case FWT_IF_CMD_MANUAL:
+			rc = fwt_if_set_sw_cmd(cmd_num, &data);
+			break;
+		case FWT_IF_CMD_GET_MC_LIST:
+			spin_lock(&fwt_if_outbuf_lock);
+			rc = fwt_if_get_mc_list(words, word_count);
+			spin_unlock(&fwt_if_outbuf_lock);
+			break;
+		case FWT_IF_CMD_ADD:
+			rc = fwt_if_apply_user_add_entry(words, word_count);
+			break;
+		case FWT_IF_CMD_DELETE:
+			/* Delete example: "del ab:34:be:af:34:42" */
+			rc = fwt_if_apply_user_del_entry(words, word_count);
+			break;
+		case FWT_IF_CMD_ADD_STATIC_MC:
+			rc = fwt_if_apply_user_add_static_mc(words, word_count);
+			break;
+		case FWT_IF_CMD_DEL_STATIC_MC:
+			rc = fwt_if_apply_user_del_static_mc(words, word_count);
+			break;
+		case FWT_IF_CMD_4ADDR:
+			rc = fwt_if_apply_user_4addr_mode(words, word_count);
+			break;
+		case FWT_IF_CMD_DEBUG:
+			rc = fwt_if_apply_user_debug_mode(words, word_count);
+			break;
+		case FWT_IF_CMD_AGEING:
+			rc = fwt_if_apply_user_ageing(words, word_count);
+			break;
+		case FWT_IF_CMD_HELP:
+			rc = 0;
+			fwt_if_apply_user_print_help();
+			break;
+		default:
+			goto cmd_failure;
+		break;
+	}
+
+	if (FWT_IF_ERROR(rc))
+		goto cmd_failure;
+
+	return 1;
+
+cmd_failure:
+	if (word_count)
+		printk(KERN_INFO "Failed to parse command: %s\n", *words);
+	else
+		fwt_if_apply_user_print_help();
+
+	return -EPERM;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static ssize_t fwt_if_write_proc(struct file *file, const char __user *buffer,
+				size_t count, loff_t *f_pos)
+#else
+static int fwt_if_write_proc(struct file *file, const char __user *buffer,
+				unsigned long count, void *data)
+#endif
+{
+	char *cmd;
+	int i;
+	char **words;
+	uint8_t word_count;
+	fwt_if_usr_cmd cmd_num = 0;
+
+	cmd = kzalloc(count, GFP_KERNEL);
+	words = kzalloc(count * sizeof(char *) / 2, GFP_KERNEL);
+	if (!cmd || !words)
+		goto out;
+
+	if (copy_from_user(cmd, buffer, count))
+		goto out;
+
+	cmd[count - 1] = '\0';
+
+	word_count = fwt_if_split_words(words, cmd);
+	if (!word_count)
+		goto out;
+
+	for (i = 0; i < FWT_IF_MAX_CMD; i++, cmd_num++) {
+		if (strcmp(words[0], fwt_str_cmd[i]) == 0)
+			break;
+	}
+
+	local_bh_disable();
+
+	fwt_if_apply_user_command(cmd_num, words, word_count);
+
+	local_bh_enable();
+
+out:
+	kfree(cmd);
+	kfree(words);
+
+	return count;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+//TODO: As of now just signature has matched; need to enhance further
+static int fwt_if_show(struct seq_file *seq, void *v)
+{
+	int class_id = (int)seq->private;
+
+	seq_printf(seq, "class=%d, level=%d\n", class_id, class_id);
+	return 0;
+}
+
+static int fwt_if_open_proc(struct inode *inode, struct file *file)
+{
+	return single_open(file, fwt_if_show, PDE_DATA(inode));
+}
+
+static const struct file_operations fwt_if_fops = {
+	.owner = THIS_MODULE,
+	.open = fwt_if_open_proc,
+	.write = fwt_if_write_proc,
+	.read = seq_read,
+};
+#else
+static int fwt_if_read_proc(char *page, char **start, off_t off, int count, int *eof, void *_unused)
+{
+	int printed = 0;
+
+	spin_lock(&fwt_if_outbuf_lock);
+
+	if (fwt_if_outbuf) {
+		printed = strnlen(fwt_if_outbuf, count - 1);
+		strncpy(page, fwt_if_outbuf, printed);
+		kfree(fwt_if_outbuf);
+		fwt_if_outbuf = NULL;
+	} else {
+		page[0] = '\0';
+	}
+
+	spin_unlock(&fwt_if_outbuf_lock);
+
+	page[count - 1] = '\0';
+	*eof = 1;
+
+	return printed;
+}
+
+#endif
+
+
+void fwt_if_register_cbk_t(fwt_if_sw_cmd_hook cbk_func)
+{
+	g_fwt_if_cmd_hook = cbk_func;
+}
+EXPORT_SYMBOL(fwt_if_register_cbk_t);
+
+static void __exit fwt_if_exit(void)
+{
+	remove_proc_entry(PROC_NAME, NULL);
+}
+
+static int __init topaz_fwt_if_create_proc(void)
+{
+	struct proc_dir_entry *entry;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	entry = proc_create_data(PROC_NAME, 0600, NULL, &fwt_if_fops, NULL);
+	if (!entry) {
+		return -ENODEV;
+	}
+#else
+	entry = = create_proc_entry(PROC_NAME, 0600, NULL);
+	if (!entry) {
+		return -ENODEV;
+	}
+
+	entry->write_proc = fwt_if_write_proc;
+	entry->read_proc = fwt_if_read_proc;
+#endif
+	return 0;
+}
+
+static int __init fwt_if_init(void)
+{
+	int rc;
+
+	rc = topaz_fwt_if_create_proc();
+	if (rc)
+		return rc;
+
+	spin_lock_init(&fwt_if_outbuf_lock);
+
+	return 0;
+}
+module_init(fwt_if_init);
+module_exit(fwt_if_exit);
diff --git a/drivers/qtn/topaz/hbm.c b/drivers/qtn/topaz/hbm.c
new file mode 100644
index 0000000..b96b6eb
--- /dev/null
+++ b/drivers/qtn/topaz/hbm.c
@@ -0,0 +1,1314 @@
+/*
+ * (C) Copyright 2012 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * Quantenna HBM skb payload pool
+ */
+#include <linux/kernel.h>
+#include <linux/cache.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <qtn/dmautil.h>
+#include <asm/io.h>
+
+#include <common/queue.h>
+
+#include <qtn/topaz_hbm.h>
+#include <qtn/dmautil.h>
+
+#include <net80211/if_ethersubr.h>
+
+#define isspace(c) ((((c) == ' ') || (((unsigned int)((c) - 9)) <= (13 - 9))))
+
+#define TOPAZ_HBM_PROC_FILENAME "topaz_hbm"
+#define TOPAZ_HBM_IF_PROC_NAME "topaz_hbm_if"
+
+#define HBM_BUF_DEPLETION_TH		(20)
+#define HBM_BUF_POLL_S_INTRVAL		(10 * HZ)
+#define HBM_BUF_POLL_L_INTRVAL		(60 * HZ)
+#define HBM_BUF_MINIMUM_AVAIL_NUM	(3)
+#define HBM_BUF_MINIMUM_REL_NUM		(10 * HBM_BUF_POLL_S_INTRVAL)
+
+typedef enum hbm_if_usr_cmd {
+	HBM_IF_CMD_DUMPCTL = 0,
+	HBM_IF_CMD_STATS = 1,
+	HBM_IF_MAX_CMD,
+} hbm_if_usr_cmd;
+
+#define HBM_IF_KEY_DUMPCTL	"dumpctl"
+#define HBM_IF_KEY_STATS	"stats"
+static char* str_cmd[HBM_IF_MAX_CMD] = {
+	HBM_IF_KEY_DUMPCTL,
+	HBM_IF_KEY_STATS,
+};
+
+typedef enum hbm_stats {
+	HBM_CNT_INVALID_POOL = 0,
+	HBM_CNT_MISALIGNED = 1,
+	HBM_CNT_MAGIC_CORRUPTED = 2,
+	HBM_CNT_QUARANTINE_CORRUPTED = 3,
+	HBM_CNT_QUARANTINE_ALLOC_FAIL = 4,
+	HBM_CNT_QUARANTINE_OK = 5,
+	HBM_CNT_NUM,
+} hbm_stats;
+
+struct hbm_pool_cnt {
+	uint32_t prev_release_cnt;
+	uint32_t pool_depleted_cnt;
+};
+struct topaz_hbm_mnt {
+	uint32_t prev_unflow_cnt;
+	uint32_t unflow_flag;
+	struct hbm_pool_cnt wmac_pl;
+	struct hbm_pool_cnt emac_pl;
+};
+
+static DEFINE_TIMER(hbm_timer, NULL, 0, 0);
+static uint32_t topaz_hbm_stats[HBM_CNT_NUM] = {0};
+static const char *topaz_hbm_stats_names[HBM_CNT_NUM] = {
+	"Invalid pool",
+	"Misaligned pointer",
+	"Magic corrupted",
+	"Quarantine corrupted buffer",
+	"Quarantine allocation fail",
+	"Quarantine ok",
+};
+#define HBM_STATS(_idx, _num)	(topaz_hbm_stats[(_idx)] += (_num))
+
+#ifdef TOPAZ_EMAC_NULL_BUF_WR
+#define HBM_UFLOW_RECOVER_TH	32
+void __attribute__((section(".sram.data")))(*topaz_emac_null_buf_del_cb)(void) = NULL;
+EXPORT_SYMBOL(topaz_emac_null_buf_del_cb);
+#endif
+
+static const char *topaz_hbm_requestor_names[TOPAZ_HBM_MASTER_COUNT] = TOPAZ_HBM_REQUESTOR_NAMES;
+
+unsigned int topaz_hbm_pool_available(int8_t pool)
+{
+	uint32_t wr_ptr;
+	uint32_t rd_ptr;
+	static const unsigned int TOPAZ_HBM_MAX_POOL_COUNT = (1 << 16);
+
+	if (!topaz_hbm_pool_valid(pool)) {
+		printk(KERN_ERR"%s: Invalid pool %d\n", __func__, pool);
+		return TOPAZ_HBM_MAX_POOL_COUNT;
+	}
+
+	wr_ptr = readl(TOPAZ_HBM_WR_PTR(pool));
+	rd_ptr = readl(TOPAZ_HBM_RD_PTR(pool));
+
+	if (wr_ptr >= rd_ptr)
+		return (wr_ptr - rd_ptr);
+	else
+		return (TOPAZ_HBM_MAX_POOL_COUNT - rd_ptr + wr_ptr);
+}
+EXPORT_SYMBOL(topaz_hbm_pool_available);
+
+#define HBM_DUMP_SORT_ORDER_INV_BASE	100
+typedef enum hbm_dump_sort_type {
+	HBM_DUMP_SORT_ADDR = 0,					/* lower addr first */
+	HBM_DUMP_SORT_JIFF,					/* newest freed first */
+	HBM_DUMP_SORT_BAD_MAGIC,				/* bad magic first */
+	HBM_DUMP_SORT_ADDR_MAX = HBM_DUMP_SORT_ORDER_INV_BASE - 1,
+} hbm_dump_sort_type;
+
+static hbm_dump_sort_type topaz_hbm_dump_sort_type = HBM_DUMP_SORT_ADDR;
+static int topaz_hbm_dump_sort_range_min = 0;			/* meaning dependent on sort type */
+static int topaz_hbm_dump_sort_range_max = 0xFFFFFFFF;		/* meaning dependent on sort type */
+static int topaz_hbm_dump_num = 5;				/* max dump number */
+static int topaz_hbm_dump_len = 128;				/* bytes dump at head */
+static int topaz_hbm_dump_taillen = 32;				/* bytes dump at tail */
+static int topaz_hbm_dumped_num = 0;				/* currently dumpped number */
+
+#define TOPAZ_HBM_POOL_SIZE_MAX		(TOPAZ_HBM_BUF_EMAC_RX_COUNT + 1)
+uint32_t* topaz_hbm_dump_bufs_sorted[TOPAZ_HBM_POOL_SIZE_MAX] = {0};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int topaz_hbm_stat_rd(struct seq_file *sfile, void *data)
+#else
+
+static int topaz_hbm_stat_rd(char *page, char **start, off_t offset,
+		int count, int *eof, void *data)
+#endif
+{
+
+	uint32_t wr_ptr;
+	uint32_t rd_ptr;
+	int ret = 0;
+	unsigned long flags;
+	int req_rel_diff = 0;
+	int req_rel_perpool_diff[TOPAZ_HBM_POOL_COUNT];
+	int master, pool;
+	uint32_t overflow;
+	uint32_t underflow;
+	int allocated;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	char *p = page;
+#endif
+
+	local_irq_save(flags);
+	/* offsets for initial pool loading */
+	req_rel_perpool_diff[TOPAZ_HBM_BUF_EMAC_RX_POOL] = TOPAZ_HBM_BUF_EMAC_RX_COUNT;
+	req_rel_perpool_diff[TOPAZ_HBM_BUF_WMAC_RX_POOL] = TOPAZ_HBM_BUF_WMAC_RX_COUNT;
+	req_rel_perpool_diff[TOPAZ_HBM_AUC_FEEDBACK_POOL] = 0;
+	req_rel_perpool_diff[TOPAZ_HBM_EMAC_TX_DONE_POOL] = 0;
+
+	for (pool = 0; pool < TOPAZ_HBM_POOL_COUNT; ++pool) {
+		for (master = 0; master < TOPAZ_HBM_MASTER_COUNT; ++master) {
+			uint32_t req = readl(TOPAZ_HBM_POOL_REQUEST_CNT(master, pool));
+			uint32_t rel = readl(TOPAZ_HBM_POOL_RELEASE_CNT(master, pool));
+
+			req_rel_perpool_diff[pool] += req;
+			req_rel_perpool_diff[pool] -= rel;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+			seq_printf(sfile, "master %5s pool %d req %u rel %u\n",
+					topaz_hbm_requestor_names[master], pool, req, rel);
+#else
+			p += sprintf(p, "master %5s pool %d req %u rel %u\n",
+					topaz_hbm_requestor_names[master], pool, req, rel);
+#endif
+		}
+	}
+
+	for (pool = 0; pool < TOPAZ_HBM_POOL_COUNT; ++pool) {
+		req_rel_diff += req_rel_perpool_diff[pool];
+		wr_ptr = readl(TOPAZ_HBM_WR_PTR(pool));
+		rd_ptr = readl(TOPAZ_HBM_RD_PTR(pool));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		seq_printf(sfile, "pool %u rd_ptr %u wr_ptr %u\n", pool, rd_ptr, wr_ptr);
+#else
+		p += sprintf(p, "pool %u rd_ptr %u wr_ptr %u\n", pool, rd_ptr, wr_ptr);
+#endif
+	}
+
+	overflow = readl(TOPAZ_HBM_OVERFLOW_CNT);
+	underflow = readl(TOPAZ_HBM_UNDERFLOW_CNT);
+	allocated = req_rel_diff - underflow;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	seq_printf(sfile, "underflow %u overflow %u req rel diff %d allocated %d\n",
+		underflow, overflow, req_rel_diff, allocated);
+#else
+	p += sprintf(p, "underflow %u overflow %u req rel diff %d allocated %d\n",
+		underflow, overflow, req_rel_diff, allocated);
+#endif
+	if (overflow) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		seq_printf(sfile, "ERROR: overflow counter must be zero\n");
+#else
+		p += sprintf(p, "ERROR: overflow counter must be zero\n");
+#endif
+	}
+
+	if (underflow) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		seq_printf(sfile, "WARNING: underflow counter is non zero, may need to increase pool\n");
+#else
+		p += sprintf(p, "WARNING: underflow counter is non zero, may need to increase pool\n");
+#endif
+	}
+
+	for (pool = 0; pool < TOPAZ_HBM_POOL_COUNT; ++pool) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		seq_printf(sfile, "pool %d req rel diff %d, available %u\n", pool, req_rel_perpool_diff[pool],
+				topaz_hbm_pool_available(pool));
+#else
+		p += sprintf(p, "pool %d req rel diff %d, available %u\n", pool, req_rel_perpool_diff[pool],
+				topaz_hbm_pool_available(pool));
+#endif
+	}
+
+	local_irq_restore(flags);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	return ret;
+#else
+	*eof = 1;
+	return p - page;
+#endif
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int topaz_hbm_stat_open(struct inode *inode, struct file *file)
+{
+        return single_open(file, topaz_hbm_stat_rd, NULL);
+}
+
+static const struct file_operations topaz_hbm_stat_fops = {
+        .owner          = THIS_MODULE,
+        .open           = topaz_hbm_stat_open,
+        .read           = seq_read,
+        .llseek         = seq_lseek,
+        .release        = single_release,
+};
+#endif
+
+static int __init topaz_hbm_stat_init(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if (!proc_create(TOPAZ_HBM_PROC_FILENAME, 0x400, NULL, &topaz_hbm_stat_fops))
+		return -ENOMEM;
+#else	
+	if (!create_proc_read_entry(TOPAZ_HBM_PROC_FILENAME, 0,
+			NULL, topaz_hbm_stat_rd, NULL)) {
+		return -EEXIST;
+	}
+#endif
+	return 0;
+}
+
+static void __exit topaz_hbm_stat_exit(void)
+{
+	remove_proc_entry(TOPAZ_HBM_PROC_FILENAME, 0);
+}
+
+void topaz_hbm_init_pool_list(unsigned long *const pool_list, const uint16_t payload_count_s,
+		const uintptr_t payloads_bus, const uint32_t payload_size,
+		const uint32_t payload_headroom, const int8_t pool)
+{
+	uint32_t i;
+	const uint16_t payload_count = (1 << payload_count_s);
+
+	topaz_hbm_init((void *) virt_to_bus(pool_list), payload_count_s, pool, 0);
+
+	if (payloads_bus) {
+		for (i = 0; i < payload_count; i++) {
+			uintptr_t buf_bus = payloads_bus + (i * payload_size) + payload_headroom;
+			uint32_t *_p = bus_to_virt(buf_bus);
+			uint32_t *_m = topaz_hbm_buf_get_meta(_p);
+			uint32_t *enqueuep = _m - HBM_HR_OFFSET_ENQ_CNT;
+			uint32_t *freep = _m - HBM_HR_OFFSET_FREE_CNT;
+			uint32_t *statep = _m - HBM_HR_OFFSET_STATE;
+			uint32_t *magicp = _p - HBM_HR_OFFSET_MAGIC;
+			uint32_t *guardp =(uint32_t*)((uint32_t)_p + payload_size - payload_headroom -
+							TOPAZ_HBM_PAYLOAD_END_GUARD_SIZE);
+#if TOPAZ_HBM_BUF_EXTERNAL_META
+			uint32_t *meta_ptr_p = _p - HBM_HR_OFFSET_META_PTR;
+			uint32_t *meta_backptr_p = _m - HBM_HR_OFFSET_META_PTR;
+#endif
+			int j;
+#if TOPAZ_HBM_DEBUG_STAMPS
+			uint32_t *jiffp = _m - HBM_HR_OFFSET_FREE_JIFF;
+			uint32_t *ownerp = _m - HBM_HR_OFFSET_OWNER;
+			arc_write_uncached_32(jiffp, jiffies);
+			arc_write_uncached_32(ownerp, TOPAZ_HBM_OWNER_INIT);
+#endif
+			/* always setup magic and guard area to provide minimum detection */
+			arc_write_uncached_32(magicp, TOPAZ_HBM_BUF_GUARD_MAGIC);
+			arc_write_uncached_32(statep, 0);
+			for (j = 0; j < (TOPAZ_HBM_PAYLOAD_END_GUARD_SIZE >> 2); j++) {
+				arc_write_uncached_32((guardp + j), TOPAZ_HBM_BUF_GUARD_MAGIC);
+			}
+			arc_write_uncached_32(enqueuep, 1);
+			arc_write_uncached_32(freep, 0);
+
+#if TOPAZ_HBM_BUF_EXTERNAL_META
+			arc_write_uncached_32(meta_ptr_p, virt_to_bus(_m));
+			arc_write_uncached_32(meta_backptr_p, buf_bus);
+#endif
+
+			topaz_hbm_put_buf((void *) buf_bus, pool);
+		}
+	}
+
+	printk(KERN_INFO "%s pool %u pool_list 0x%p bus_range 0x%lx to 0x%lx sz %u count %u\n",
+			__FUNCTION__, pool, pool_list,
+			payloads_bus, payloads_bus + payload_size * payload_count,
+			payload_size, payload_count);
+}
+
+static int g_pools_inited = 0;
+
+static void topaz_hbm_init_payload_pools(void)
+{
+	unsigned long flags;
+	uintptr_t *topaz_hbm_emac_rx_ptrs = (void *) (RUBY_SRAM_BEGIN + TOPAZ_HBM_POOL_EMAC_RX_START);
+	uintptr_t *topaz_hbm_wmac_rx_ptrs = (void *) (RUBY_SRAM_BEGIN + TOPAZ_HBM_POOL_WMAC_RX_START);
+	uintptr_t *topaz_hbm_emac_free_ptrs = (void *) (RUBY_SRAM_BEGIN + TOPAZ_HBM_POOL_EMAC_TX_DONE_START);
+
+	printk("HBM pool: emac rx 0x%x to 0x%x, wmac rx 0x%x to 0x%x\n",
+		TOPAZ_HBM_POOL_EMAC_RX_START,
+		TOPAZ_HBM_POOL_EMAC_RX_END,
+		TOPAZ_HBM_POOL_WMAC_RX_START,
+		TOPAZ_HBM_POOL_WMAC_RX_END);
+
+	memset((void *) (RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_EMAC_RX_BASE), TOPAZ_HBM_BUF_PAYLOAD_POISON,
+			TOPAZ_HBM_BUF_EMAC_RX_TOTAL);
+	memset((void *) (RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_WMAC_RX_BASE), TOPAZ_HBM_BUF_PAYLOAD_POISON,
+			TOPAZ_HBM_BUF_WMAC_RX_TOTAL);
+	flush_and_inv_dcache_sizerange_safe((void *) (RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_EMAC_RX_BASE), TOPAZ_HBM_BUF_EMAC_RX_TOTAL);
+	flush_and_inv_dcache_sizerange_safe((void *) (RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_WMAC_RX_BASE), TOPAZ_HBM_BUF_WMAC_RX_TOTAL);
+
+	memset((void *) (RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_META_BASE), TOPAZ_HBM_BUF_PAYLOAD_POISON,
+			TOPAZ_HBM_BUF_META_TOTAL);
+	flush_and_inv_dcache_sizerange_safe((void *)(RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_META_BASE),
+			TOPAZ_HBM_BUF_META_TOTAL);
+#if TOPAZ_HBM_BUF_EXTERNAL_META
+	printk("HBM meta: emac rx 0x%x to 0x%x, wmac rx 0x%x to 0x%x\n",
+			TOPAZ_HBM_BUF_META_EMAC_RX_BASE,
+			TOPAZ_HBM_BUF_META_EMAC_RX_END,
+			TOPAZ_HBM_BUF_META_WMAC_RX_BASE,
+			TOPAZ_HBM_BUF_META_WMAC_RX_END);
+#else
+	printk("HBM used internal meta\n");
+#endif
+
+	local_irq_save(flags);
+
+	topaz_hbm_init_pool_list(topaz_hbm_emac_rx_ptrs, TOPAZ_HBM_BUF_EMAC_RX_COUNT_S,
+			RUBY_DRAM_BUS_BEGIN + TOPAZ_HBM_BUF_EMAC_RX_BASE,
+			TOPAZ_HBM_BUF_EMAC_RX_SIZE, TOPAZ_HBM_PAYLOAD_HEADROOM,
+			TOPAZ_HBM_BUF_EMAC_RX_POOL);
+	topaz_hbm_init_pool_list(topaz_hbm_wmac_rx_ptrs, TOPAZ_HBM_BUF_WMAC_RX_COUNT_S,
+			RUBY_DRAM_BUS_BEGIN + TOPAZ_HBM_BUF_WMAC_RX_BASE,
+			TOPAZ_HBM_BUF_WMAC_RX_SIZE, TOPAZ_HBM_PAYLOAD_HEADROOM,
+			TOPAZ_HBM_BUF_WMAC_RX_POOL);
+	topaz_hbm_init_pool_list(topaz_hbm_emac_free_ptrs, TOPAZ_HBM_EMAC_TX_DONE_COUNT_S,
+			0, 0, 0, TOPAZ_HBM_EMAC_TX_DONE_POOL);
+
+	local_irq_restore(flags);
+
+	g_pools_inited = 1;
+}
+
+int topaz_hbm_handle_buf_err(void *const buf_virt, int8_t dest_pool)
+{
+	if (!topaz_hbm_buf_ptr_valid(buf_virt)) {
+		HBM_STATS(HBM_CNT_MISALIGNED, 1);
+		printk(KERN_CRIT "%s: buf 0x%x misaligned: pool %u\n",
+				__FUNCTION__,
+				(unsigned int)buf_virt, dest_pool);
+		return 0;
+	}
+
+	HBM_STATS(HBM_CNT_MAGIC_CORRUPTED, 1);
+	hbm_buf_fix_buf_magic(buf_virt);
+
+	return 1;
+}
+
+void  __attribute__((section(".sram.text"))) topaz_hbm_filter_txdone_buf(void *const buf_bus)
+{
+	const int8_t dest_pool = topaz_hbm_payload_get_pool_bus(buf_bus);
+
+	if (dest_pool == TOPAZ_HBM_BUF_WMAC_RX_POOL ||
+			dest_pool == TOPAZ_HBM_BUF_EMAC_RX_POOL) {
+		uint32_t *const _p = bus_to_virt((uintptr_t) buf_bus);
+		uint32_t *_m = topaz_hbm_buf_get_meta(_p);
+		uint32_t *const enqueuep = _m - HBM_HR_OFFSET_ENQ_CNT;
+		uint32_t *const freep = _m - HBM_HR_OFFSET_FREE_CNT;
+		const uint32_t ec = arc_read_uncached_32(enqueuep);
+		const uint32_t fc = arc_read_uncached_32(freep) + 1;
+		const bool release = (ec && (fc == ec));
+
+		if (release) {
+			/* only fix magic corruption when we are sure no one else is using it right now */
+			if (TOPAZ_HBM_BUF_MAGIC_CHK_ALLPOOL || (dest_pool == TOPAZ_HBM_BUF_WMAC_RX_POOL)) {
+				int state = hbm_buf_check_buf_magic(_p);
+				if (unlikely(state)) {
+					if (!topaz_hbm_handle_buf_err(_p, dest_pool)) {
+						/* shouldn't put it back to pool */
+						return;
+					}
+				}
+			}
+#if TOPAZ_HBM_DEBUG_STAMPS
+			uint32_t *jiffp = _m - HBM_HR_OFFSET_FREE_JIFF;
+			uint32_t *ownerp = _m - HBM_HR_OFFSET_OWNER;
+			const uint32_t owner = arc_read_uncached_32(ownerp);
+			const uint8_t owner1 = (owner & 0xF) >> 0;
+
+			if (owner1 == TOPAZ_HBM_OWNER_FREE) {
+				/*
+				 * Double free check is already broken because a lot of free places
+				 * doesn't update the owner, both VB and PCIe platform.
+				 */
+				/*
+				uint32_t *sizep = _m - HBM_HR_OFFSET_SIZE;
+				const uint32_t size = arc_read_uncached_32(sizep);
+				printk(KERN_ERR "%s: double free of buf_bus %p size %u owner %08x\n",
+						__FUNCTION__, buf_bus, size, owner);
+				topaz_hbm_buf_show(_p, TOPAZ_HBM_BUF_DUMP_DFT, 0);
+				*/
+			}
+			arc_write_uncached_32(jiffp, jiffies);
+			arc_write_uncached_32(ownerp, (owner << 4) | TOPAZ_HBM_OWNER_FREE);
+#endif
+
+			if (ec != 1) {
+				arc_write_uncached_32(enqueuep, 1);
+				arc_write_uncached_32(freep, 0);
+			}
+			topaz_hbm_put_payload_aligned_bus(buf_bus, dest_pool);
+		} else {
+			arc_write_uncached_32(freep, fc);
+		}
+	} else {
+		HBM_STATS(HBM_CNT_INVALID_POOL, 1);
+		printk(KERN_CRIT "%s: unknown pool %hhd for buf_bus 0x%p\n",
+				__FUNCTION__, dest_pool, buf_bus);
+	}
+}
+EXPORT_SYMBOL(topaz_hbm_filter_txdone_buf);
+
+/*
+ * Safely return the buf.
+ * @pkt_bus needn't to be the pointer from the pool. It can be any location in the buffer.
+ */
+void topaz_hbm_release_buf_safe(void *const pkt_bus)
+{
+	const int8_t dest_pool = topaz_hbm_payload_get_pool_bus(pkt_bus);
+	void *buf_bus = topaz_hbm_payload_store_align_bus(pkt_bus, dest_pool, 0);
+	unsigned long flags;
+
+	local_irq_save(flags);
+	topaz_hbm_filter_txdone_buf(buf_bus);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(topaz_hbm_release_buf_safe);
+
+void topaz_hbm_filter_txdone_pool(void)
+{
+	unsigned long flags;
+	void *buf_bus;
+	const int8_t src_pool = TOPAZ_HBM_EMAC_TX_DONE_POOL;
+
+	const uint32_t mask = TOPAZ_HBM_EMAC_TX_DONE_COUNT - 1;
+	uint32_t wr_ptr;
+	uint32_t rd_ptr;
+	uint32_t wr_raw;
+	uint32_t rd_raw;
+	uint32_t i;
+	uint32_t count;
+	uint32_t full;
+
+	if (unlikely(!g_pools_inited)) {
+		return;
+	}
+
+	local_irq_save(flags);
+
+	wr_raw = readl(TOPAZ_HBM_WR_PTR(src_pool));
+	rd_raw = readl(TOPAZ_HBM_RD_PTR(src_pool));
+	wr_ptr = wr_raw & mask;
+	rd_ptr = rd_raw & mask;
+	full = ((wr_raw != rd_raw) && (wr_ptr == rd_ptr));
+
+	for (count = 0, i = rd_ptr; ((i != wr_ptr) || full); i = (i + 1) & mask, count++) {
+		buf_bus = topaz_hbm_get_payload_bus(src_pool);
+		if (buf_bus != NULL) {
+			topaz_hbm_filter_txdone_buf(buf_bus);
+		} else if (printk_ratelimit()) {
+			printk(KERN_CRIT "%s: read NULL from pool %d\n",
+					__FUNCTION__, src_pool);
+			break;
+		}
+		full = 0;
+	}
+#ifdef TOPAZ_EMAC_NULL_BUF_WR
+	if (topaz_emac_null_buf_del_cb) {
+		uint32_t n;
+		wr_ptr = readl(TOPAZ_HBM_WR_PTR(TOPAZ_HBM_BUF_EMAC_RX_POOL));
+		rd_ptr = readl(TOPAZ_HBM_RD_PTR(TOPAZ_HBM_BUF_EMAC_RX_POOL));
+		n = (wr_ptr - rd_ptr) % TOPAZ_HBM_BUF_EMAC_RX_COUNT;
+		if (n > HBM_UFLOW_RECOVER_TH)
+			topaz_emac_null_buf_del_cb();
+	}
+#endif
+	local_irq_restore(flags);
+
+	if (unlikely(count > (TOPAZ_HBM_EMAC_TX_DONE_COUNT * 3 / 4))) {
+		if (printk_ratelimit())
+			printk("Warning! %s count: %u\n", __FUNCTION__, count);
+	}
+}
+EXPORT_SYMBOL(topaz_hbm_filter_txdone_pool);
+
+static struct kmem_cache *shinfo_cache;
+
+static uint8_t *topaz_hbm_skb_allocator_payload_alloc(struct skb_shared_info **shinfo,
+		size_t size, gfp_t gfp_mask, int node)
+{
+	uint8_t *data;
+
+	size = SKB_DATA_ALIGN(size);
+
+	*shinfo = kmem_cache_alloc(shinfo_cache, gfp_mask);
+	if (*shinfo == NULL) {
+		return NULL;
+	}
+
+	if (size < topaz_hbm_pool_buf_max_size(TOPAZ_HBM_BUF_EMAC_RX_POOL)) {
+		data = topaz_hbm_get_payload_virt(TOPAZ_HBM_BUF_EMAC_RX_POOL);
+	} else {
+		data = kmalloc(size, gfp_mask);
+	}
+
+	if (data == NULL) {
+		kmem_cache_free(shinfo_cache, *shinfo);
+		*shinfo = NULL;
+	}
+
+	return data;
+}
+
+static void topaz_hbm_skb_allocator_payload_free(struct sk_buff *skb)
+{
+	void *buf_bus = (void *) virt_to_bus(skb->head);
+	const int8_t pool = topaz_hbm_payload_get_free_pool_bus(buf_bus);
+
+	buf_bus = topaz_hbm_payload_store_align_bus(buf_bus,
+		topaz_hbm_payload_get_pool_bus(buf_bus), 0);
+	kmem_cache_free(shinfo_cache, skb_shinfo(skb));
+
+	if (topaz_hbm_pool_valid(pool)) {
+		if (!skb->hbm_no_free) {
+			unsigned long flags;
+
+			local_irq_save(flags);
+
+			topaz_hbm_flush_skb_cache(skb);
+			topaz_hbm_filter_txdone_buf(buf_bus);
+
+			local_irq_restore(flags);
+		}
+	} else {
+		kfree(skb->head);
+	}
+
+	topaz_hbm_filter_txdone_pool();
+}
+
+const struct skb_allocator topaz_hbm_skb_allocator = {
+	.name = "topaz_hbm",
+	.skb_alloc = &skb_allocator_kmem_caches_skb_alloc,
+	.skb_free = &skb_allocator_kmem_caches_skb_free,
+	.payload_alloc = &topaz_hbm_skb_allocator_payload_alloc,
+	.payload_free = &topaz_hbm_skb_allocator_payload_free,
+	.max_size = 0,
+};
+
+#define QTN_HBM_MAX_FRAME_LEN	12000	/* 12000 is over maximum vht frame size,
+					and there is no rx frame whose size is over 12000 */
+struct sk_buff *_topaz_hbm_attach_skb(void *buf_virt, int8_t pool, int inv, uint8_t headroom
+		QTN_SKB_ALLOC_TRACE_ARGS)
+{
+	struct sk_buff *skb;
+	struct skb_shared_info *shinfo;
+	uint32_t buf_size;
+	uint8_t *buf_head;
+	uint8_t *buf_head2;
+	uintptr_t inv_start;
+	uintptr_t inv_end;
+
+	if (unlikely(headroom > TOPAZ_HBM_PAYLOAD_HEADROOM)) {
+		printk(KERN_WARNING "specified headroom(%u) should be smaller than %u\n",
+			headroom, TOPAZ_HBM_PAYLOAD_HEADROOM);
+		return NULL;
+	}
+
+	shinfo = kmem_cache_alloc(shinfo_cache, GFP_ATOMIC);
+	if (!shinfo) {
+		return NULL;
+	}
+
+	skb = skb_allocator_kmem_caches_skb_alloc(GFP_ATOMIC, 0, -1);
+	if (!skb) {
+		kmem_cache_free(shinfo_cache, shinfo);
+		return NULL;
+	}
+
+	/*
+	 * TODO FIXME: Restrict the buffer size less than 12k, because we saw ping failed
+	 * if we set skb buffer size as 17K
+	 */
+	buf_size = min((int)topaz_hbm_pool_buf_max_size(pool), QTN_HBM_MAX_FRAME_LEN);
+	buf_head = topaz_hbm_payload_store_align_virt(buf_virt, pool, 0);
+	buf_head2 = buf_head - headroom;
+
+	/* invalidate all packet dcache before passing to the kernel */
+	if (inv)
+		inv_dcache_sizerange_safe(buf_head, buf_size);
+
+	inv_start = (uintptr_t) align_buf_cache(buf_head);
+	inv_end = align_val_up((uintptr_t) buf_head + buf_size,
+			dma_get_cache_alignment());
+	inv_dcache_range(inv_start, inv_end);
+
+	__alloc_skb_init(skb, shinfo, buf_head2,
+			buf_size, 0, &topaz_hbm_skb_allocator
+			QTN_SKB_ALLOC_TRACE_ARGVARS);
+	skb_reserve(skb, ((uint8_t *) buf_virt) - buf_head2);
+
+	return skb;
+}
+EXPORT_SYMBOL(_topaz_hbm_attach_skb);
+
+/*
+ * Allocate a new buffer to hold the pkt. The new buffer is guranteed to be safe from wmac rx dma overrun.
+ * The original buffer is not used anymore. Caller should be responsible for freeing the buffer.
+ */
+struct sk_buff *topaz_hbm_attach_skb_quarantine(void *buf_virt, int pool, int len, uint8_t **whole_frm_hdr_p)
+{
+	uint8_t *buf_head;
+	struct sk_buff *skb = NULL;
+	uint32_t prev_len;
+	uint32_t buf_size;
+
+	KASSERT((pool == TOPAZ_HBM_BUF_WMAC_RX_POOL), ("buf quarantine is only for wmac rx pool, %d", pool));
+
+	buf_head = topaz_hbm_payload_store_align_virt(buf_virt, pool, 0);
+	if (hbm_buf_check_buf_magic(buf_head)) {
+		/* don't copy to new skb if it is aleady corrupted */
+		HBM_STATS(HBM_CNT_QUARANTINE_CORRUPTED, 1);
+		return NULL;
+	}
+
+	/* copy from buffer head in case mac header is needed */
+	prev_len = (uint32_t)buf_virt - (uint32_t)buf_head;
+	buf_size = prev_len + len;
+
+	skb = dev_alloc_skb(buf_size);
+	if (!skb) {
+		HBM_STATS(HBM_CNT_QUARANTINE_ALLOC_FAIL, 1);
+		return NULL;
+	}
+
+	/* caller only invalidate this pkt, not entire buffer */
+	inv_dcache_sizerange_safe(buf_head, prev_len);
+	if (whole_frm_hdr_p)
+		*whole_frm_hdr_p = skb->data;
+	memcpy(skb->data, buf_head, buf_size);
+	if (hbm_buf_check_buf_magic(buf_head)) {
+		/* if corruption happens during data copying */
+		HBM_STATS(HBM_CNT_QUARANTINE_CORRUPTED, 1);
+		goto post_check_fail;
+	}
+
+	/* reserve head space so that later caller's skb_put() covers the packet */
+	skb_reserve(skb, prev_len);
+	HBM_STATS(HBM_CNT_QUARANTINE_OK, 1);
+
+	/*
+	 * Quarantine is done. now we are sure the data copy in skb is not corrupted and won't
+	 */
+	return skb;
+
+post_check_fail:
+	if (skb)
+		dev_kfree_skb(skb);
+	return NULL;
+}
+EXPORT_SYMBOL(topaz_hbm_attach_skb_quarantine);
+
+static int topaz_hbm_bufs_sort_need_swap(const uint32_t *buf0, const uint32_t *buf1)
+{
+	int type;
+	int inv;
+	uint32_t v0;
+	uint32_t v1;
+
+	type = topaz_hbm_dump_sort_type;
+	inv = 0;
+	if (topaz_hbm_dump_sort_type >= HBM_DUMP_SORT_ORDER_INV_BASE) {
+		type -= HBM_DUMP_SORT_ORDER_INV_BASE;
+		inv = 1;
+	}
+
+	switch(type) {
+	case HBM_DUMP_SORT_ADDR:
+		v0 = (uint32_t)buf0;
+		v1 = (uint32_t)buf1;
+		break;
+	case HBM_DUMP_SORT_JIFF:
+		v0 = jiffies - arc_read_uncached_32(topaz_hbm_buf_get_meta(buf0) - HBM_HR_OFFSET_FREE_JIFF);
+		v1 = jiffies - arc_read_uncached_32(topaz_hbm_buf_get_meta(buf1) - HBM_HR_OFFSET_FREE_JIFF);
+		break;
+	case HBM_DUMP_SORT_BAD_MAGIC:
+		v0 = (arc_read_uncached_32(buf0 - HBM_HR_OFFSET_MAGIC) == TOPAZ_HBM_BUF_GUARD_MAGIC);
+		v1 = (arc_read_uncached_32(buf1 - HBM_HR_OFFSET_MAGIC) == TOPAZ_HBM_BUF_GUARD_MAGIC);
+		break;
+	default:
+		return 0;
+		break;
+	}
+
+	return (inv ? (v1 > v0) : (v0 > v1));
+}
+
+static void topaz_hbm_bufs_sort(int pool, int pool_size)
+{
+	int i;
+	int j;
+	uint32_t *buf0;
+	uint32_t *buf1;
+	uint32_t *buf;
+	int swapped;
+
+	memset(topaz_hbm_dump_bufs_sorted, 0, sizeof(topaz_hbm_dump_bufs_sorted));
+	for (i = 0; i < pool_size; i++) {
+		topaz_hbm_dump_bufs_sorted[i] = (uint32_t*)
+			topaz_hbm_payload_store_align_from_index(pool, i);
+	}
+
+	/* bubble sort */
+	for (i = 0; i < (pool_size - 1); i++) {
+		swapped = 0;
+		for (j = 0; j < (pool_size - i - 1); j++) {
+			buf0 = topaz_hbm_dump_bufs_sorted[j];
+			buf1 = topaz_hbm_dump_bufs_sorted[j + 1];
+			if (topaz_hbm_bufs_sort_need_swap(buf0, buf1)) {
+				buf = buf0;
+				topaz_hbm_dump_bufs_sorted[i] = buf1;
+				topaz_hbm_dump_bufs_sorted[j] = buf;
+				swapped = 1;
+			}
+		}
+		if (!swapped)
+			break;
+	}
+
+	topaz_hbm_dumped_num = 0;
+}
+
+static int topaz_hbm_buf_in_range(const uint32_t *buf)
+{
+	int type;
+	uint32_t v;
+
+	type = topaz_hbm_dump_sort_type;
+	if (topaz_hbm_dump_sort_type >= HBM_DUMP_SORT_ORDER_INV_BASE) {
+		type -= HBM_DUMP_SORT_ORDER_INV_BASE;
+	}
+
+	switch(type) {
+	case HBM_DUMP_SORT_ADDR:
+		v = (uint32_t)buf;
+		break;
+	case HBM_DUMP_SORT_JIFF:
+		v = jiffies - arc_read_uncached_32(topaz_hbm_buf_get_meta(buf) - HBM_HR_OFFSET_FREE_JIFF);
+		break;
+	case HBM_DUMP_SORT_BAD_MAGIC:
+		v = (arc_read_uncached_32(buf - HBM_HR_OFFSET_MAGIC) == TOPAZ_HBM_BUF_GUARD_MAGIC);
+		break;
+	default:
+		return 0;
+		break;
+	}
+
+	return ((topaz_hbm_dump_sort_range_min <= v) && (v <= topaz_hbm_dump_sort_range_max));
+}
+
+static void *topaz_hbm_bufs_emac_seq_start(struct seq_file *sfile, loff_t *pos)
+{
+	if (*pos > (TOPAZ_HBM_POOL_SIZE_MAX - 1))
+		return NULL;
+
+	if (*pos == 0)
+		topaz_hbm_bufs_sort(TOPAZ_HBM_BUF_EMAC_RX_POOL, TOPAZ_HBM_BUF_EMAC_RX_COUNT);
+
+	return topaz_hbm_dump_bufs_sorted[*pos];
+}
+
+static void *topaz_hbm_bufs_wmac_seq_start(struct seq_file *sfile, loff_t *pos)
+{
+	if (*pos > (TOPAZ_HBM_POOL_SIZE_MAX - 1))
+		return NULL;
+
+	if (*pos == 0)
+		topaz_hbm_bufs_sort(TOPAZ_HBM_BUF_WMAC_RX_POOL, TOPAZ_HBM_BUF_WMAC_RX_COUNT);
+
+	return topaz_hbm_dump_bufs_sorted[*pos];
+}
+
+static void* topaz_hbm_bufs_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+	if (*pos > (TOPAZ_HBM_POOL_SIZE_MAX - 1))
+		return NULL;
+
+	*pos += 1;
+
+	return topaz_hbm_dump_bufs_sorted[*pos];
+}
+
+static void topaz_hbm_bufs_seq_stop(struct seq_file *sfile, void *v)
+{
+}
+
+static int topaz_hbm_bufs_seq_show(struct seq_file *sfile, void *v)
+{
+	const uint32_t *_p = v;
+	const uint32_t *_m = topaz_hbm_buf_get_meta(_p);
+	const uint32_t *enqueuep = _m - HBM_HR_OFFSET_ENQ_CNT;
+	const uint32_t *freep = _m - HBM_HR_OFFSET_FREE_CNT;
+	const uint32_t *jiffp = _m - HBM_HR_OFFSET_FREE_JIFF;
+	const uint32_t *ownerp = _m - HBM_HR_OFFSET_OWNER;
+	const uint32_t *sizep = _m - HBM_HR_OFFSET_SIZE;
+	const uint32_t *magicp = _p - HBM_HR_OFFSET_MAGIC;
+	const uint32_t ec = arc_read_uncached_32(enqueuep);
+	const uint32_t fc = arc_read_uncached_32(freep);
+	const uint32_t jc = arc_read_uncached_32(jiffp);
+	const uint32_t oc = arc_read_uncached_32(ownerp);
+	const uint32_t sz = arc_read_uncached_32(sizep);
+	const uint32_t magic = arc_read_uncached_32(magicp);
+	const uint8_t *d;
+	int dump_bytes;
+	int i;
+	uint8_t *tail;
+	int tail_bytes;
+	uint32_t whole_size;
+	uint32_t payload_size;
+	int pool;
+	uint32_t idx;
+
+	if (!topaz_hbm_buf_in_range(_p)) {
+		return 0;
+	}
+
+	if (topaz_hbm_dumped_num++ >= topaz_hbm_dump_num) {
+		return 0;
+	}
+
+	pool = topaz_hbm_buf_identify_buf_virt(v, &whole_size, &idx);
+	if (pool < 0) {
+		seq_printf(sfile, "invalid hbm buffer %x\n", (unsigned int)v);
+		return 0;
+	}
+	payload_size = whole_size - TOPAZ_HBM_PAYLOAD_HEADROOM;
+
+	dump_bytes = (topaz_hbm_dump_len == TOPAZ_HBM_BUF_DUMP_MAX) ? payload_size : topaz_hbm_dump_len;
+
+	d = v;
+	inv_dcache_sizerange_safe(v, dump_bytes);
+	seq_printf(sfile, "%p ec %u fp %u own %08x size %u j %u (%u s ago) mg %x\n",
+			v, ec, fc, oc, sz, jc, (((uint32_t) jiffies) - jc) / HZ, magic);
+	for (i = 0; i < dump_bytes; ) {
+		if (!(i % 32))
+			seq_printf(sfile, "%08x ", (i - i % 32));
+		++i;
+		seq_printf(sfile, "%02x%s", *d++, (i % 32) == 0 ? "\n" : " ");
+	}
+
+	if (topaz_hbm_dump_taillen) {
+		seq_printf(sfile, "\n");
+		tail_bytes = topaz_hbm_dump_taillen;
+		tail = (uint8_t*)((uint32_t)v + payload_size - tail_bytes);
+		inv_dcache_sizerange_safe(tail, tail_bytes);
+		seq_printf(sfile, "%p tail %p\n", v, tail);
+		for (i = 0; i < tail_bytes; ) {
+			if (!(i % 32))
+				seq_printf(sfile, "%08x ", (i - i % 32));
+			++i;
+			seq_printf(sfile, "%02x%s", *tail++, (i % 32) == 0 ? "\n" : " ");
+		}
+	}
+	seq_printf(sfile, "\n");
+
+	return 0;
+}
+
+static struct seq_operations topaz_hbm_bufs_emac_seq_ops = {
+	.start = topaz_hbm_bufs_emac_seq_start,
+	.next  = topaz_hbm_bufs_seq_next,
+	.stop  = topaz_hbm_bufs_seq_stop,
+	.show  = topaz_hbm_bufs_seq_show
+};
+
+static struct seq_operations topaz_hbm_bufs_wmac_seq_ops = {
+	.start = topaz_hbm_bufs_wmac_seq_start,
+	.next  = topaz_hbm_bufs_seq_next,
+	.stop  = topaz_hbm_bufs_seq_stop,
+	.show  = topaz_hbm_bufs_seq_show
+};
+
+static int topaz_hbm_bufs_emac_proc_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &topaz_hbm_bufs_emac_seq_ops);
+}
+
+static int topaz_hbm_bufs_wmac_proc_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &topaz_hbm_bufs_wmac_seq_ops);
+}
+
+static struct file_operations topaz_hbm_bufs_emac_proc_ops = {
+	.owner   = THIS_MODULE,
+	.open    = topaz_hbm_bufs_emac_proc_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release
+};
+
+static struct file_operations topaz_hbm_bufs_wmac_proc_ops = {
+	.owner   = THIS_MODULE,
+	.open    = topaz_hbm_bufs_wmac_proc_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release
+};
+
+static inline int hbm_if_split_words(char **words, char *str)
+{
+	int word_count = 0;
+
+	/* skip leading space */
+	while (str && *str && isspace(*str)) {
+		str++;
+	}
+
+	while (str && *str) {
+		words[word_count++] = str;
+
+		/* skip this word */
+		while (str && *str && !isspace(*str)) {
+			str++;
+		}
+
+		/* replace spaces with NULL */
+		while (str && *str && isspace(*str)) {
+			*str = 0;
+			str++;
+		}
+	}
+
+	return word_count;
+}
+
+static int hbm_if_cmd_dumpctl(char **words, uint8_t word_count)
+{
+	int idx = 1;
+
+	if (word_count >= (idx + 1))
+		sscanf(words[idx++], "%u", &topaz_hbm_dump_sort_type);
+	if (word_count >= (idx + 1))
+		sscanf(words[idx++], "0x%x", &topaz_hbm_dump_sort_range_min);
+	if (word_count >= (idx + 1))
+		sscanf(words[idx++], "0x%x", &topaz_hbm_dump_sort_range_max);
+	if (word_count >= (idx + 1))
+		sscanf(words[idx++], "%u", &topaz_hbm_dump_num);
+	if (word_count >= (idx + 1))
+		sscanf(words[idx++], "%u", &topaz_hbm_dump_len);
+	if (word_count >= (idx + 1))
+		sscanf(words[idx++], "%u", &topaz_hbm_dump_taillen);
+
+	printk("hbm_if set dump ctl: sort_type %u sort_range [0x%x 0x%x] num %u len %u %u\n",
+			topaz_hbm_dump_sort_type,
+			topaz_hbm_dump_sort_range_min,
+			topaz_hbm_dump_sort_range_max,
+			topaz_hbm_dump_num,
+			topaz_hbm_dump_len,
+			topaz_hbm_dump_taillen
+			);
+	return 0;
+}
+
+static int hbm_if_cmd_show_stats(char **words, uint8_t word_count)
+{
+	int i;
+
+	printk("HBM stats:\n");
+
+	for (i = 0; i < HBM_CNT_NUM; i++) {
+		printk("%s = %u\n", topaz_hbm_stats_names[i], topaz_hbm_stats[i]);
+	}
+
+	return 0;
+}
+
+/* Apply user command.
+ * User command can control the HBM interface.
+ * @param cmd_num: command number
+ * @param words: the split words without spaces from the user space console interface
+ * @param word_count: number of words after split
+ * @return: status indication
+ */
+static int hbm_if_apply_user_command(hbm_if_usr_cmd cmd_num, char **words, uint8_t word_count)
+{
+	int rc = -EINVAL;
+
+	if ((word_count == 0) || (!words)) {
+		goto cmd_failure;
+	}
+
+	switch(cmd_num) {
+		case HBM_IF_CMD_DUMPCTL:
+			rc = hbm_if_cmd_dumpctl(words, word_count);
+			break;
+		case HBM_IF_CMD_STATS:
+			rc = hbm_if_cmd_show_stats(words, word_count);
+			break;
+		default:
+			goto cmd_failure;
+			break;
+	}
+
+	if (rc < 0) {
+		goto cmd_failure;
+	}
+
+	return 1;
+
+cmd_failure:
+	if (words)
+		printk(KERN_INFO "Failed to parse command:%s, word count:%d\n", *words, word_count);
+	else
+		printk(KERN_INFO "Failed to parse command:(NULL)\n");
+
+	return -EPERM;
+}
+
+static int hbm_if_write_proc(struct file *file, const char __user *buffer,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		size_t count, loff_t *ppos)
+#else
+		unsigned long count, void *_unused)
+#endif
+{
+	char *cmd;
+	int rc, i;
+	char **words;
+	uint8_t word_count;
+	hbm_if_usr_cmd cmd_num = 0;
+
+	cmd = kmalloc(count, GFP_KERNEL);
+	words = kmalloc(count * sizeof(char *) / 2, GFP_KERNEL);
+	if (!cmd || !words) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	if (copy_from_user(cmd, buffer, count)) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	/* Set null at last byte, note that count already gives +1 byte count*/
+	cmd[count - 1] = '\0';
+
+	word_count = hbm_if_split_words(words, cmd);
+	for (i = 0; i < HBM_IF_MAX_CMD; i++, cmd_num++) {
+		/* Extract command from first word */
+		if (strcmp(words[0], str_cmd[i]) == 0) {
+			printk(KERN_INFO"HBM user command:%s  \n", str_cmd[i]);
+			break;
+		}
+	}
+
+	/* Exclude softirqs whilst manipulating forwarding table */
+	local_bh_disable();
+
+	rc = hbm_if_apply_user_command(cmd_num, words, word_count);
+
+	local_bh_enable();
+
+	out:
+	if (cmd) {
+		kfree(cmd);
+	}
+	if (words) {
+		kfree(words);
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	*ppos += count;
+#endif
+	return count;
+}
+
+static const struct file_operations hbm_if_fops = {
+        .write = hbm_if_write_proc,
+};
+static inline uint32_t hbm_get_rel_cnt(int pool)
+{
+	int master;
+	uint32_t rel = 0;
+
+	for (master = 0; master < TOPAZ_HBM_MASTER_COUNT; ++master)
+		rel += readl(TOPAZ_HBM_POOL_RELEASE_CNT(master, pool));
+
+	return rel;
+}
+
+static int topaz_hbm_pool_poll_stat(int pool, struct hbm_pool_cnt *ps)
+{
+	uint32_t free;
+	uint32_t fdelt;
+	int rc;
+
+	free = hbm_get_rel_cnt(pool);
+	fdelt = free - ps->prev_release_cnt;
+	ps->prev_release_cnt = free;
+
+	if ((topaz_hbm_pool_available(pool) < HBM_BUF_MINIMUM_AVAIL_NUM) &&
+			(fdelt < HBM_BUF_MINIMUM_REL_NUM)) {
+		ps->pool_depleted_cnt++;
+		rc = -1;
+	} else {
+		ps->pool_depleted_cnt = 0;
+		rc = 0;
+	}
+
+	return rc;
+}
+
+void topaz_hbm_monitor(unsigned long data)
+{
+	struct topaz_hbm_mnt *hm =  (struct topaz_hbm_mnt *)data;
+	uint32_t uf;
+	int rc;
+	unsigned long intval;
+
+	intval = HBM_BUF_POLL_L_INTRVAL;
+	if (!hm->unflow_flag) {
+		uf = readl(TOPAZ_HBM_UNDERFLOW_CNT);
+		if (uf - hm->prev_unflow_cnt) {
+			hm->unflow_flag = 1;
+			hm->prev_unflow_cnt = uf;
+		} else {
+			goto exit;
+		}
+	}
+
+	rc = topaz_hbm_pool_poll_stat(TOPAZ_HBM_BUF_WMAC_RX_POOL, &hm->wmac_pl);
+	rc += topaz_hbm_pool_poll_stat(TOPAZ_HBM_BUF_EMAC_RX_POOL, &hm->emac_pl);
+
+	if (rc == 0) {
+		hm->unflow_flag = 0;
+	} else {
+		if ((hm->wmac_pl.pool_depleted_cnt > HBM_BUF_DEPLETION_TH) ||
+			(hm->emac_pl.pool_depleted_cnt > HBM_BUF_DEPLETION_TH)) {
+			panic("HBM pool is depleted, wmac pool:%u, emac rx pool:%u\n",
+				topaz_hbm_pool_available(TOPAZ_HBM_BUF_WMAC_RX_POOL),
+				topaz_hbm_pool_available(TOPAZ_HBM_BUF_EMAC_RX_POOL));
+		}
+		intval = HBM_BUF_POLL_S_INTRVAL;
+	}
+exit:
+	mod_timer(&hbm_timer, jiffies + intval);
+}
+
+static int __init topaz_hbm_bufs_init(void)
+{
+	struct topaz_hbm_mnt *hm;
+	struct proc_dir_entry *e;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	e = proc_create("hbm_bufs_emac", 0x444, NULL, &topaz_hbm_bufs_emac_proc_ops);
+	if (!e)
+		goto error4;
+
+	e = proc_create("hbm_bufs_wmac", 0x444, NULL, &topaz_hbm_bufs_wmac_proc_ops);
+	if (!e)
+		goto error3;
+
+	e = proc_create(TOPAZ_HBM_IF_PROC_NAME, 0x444, NULL, &hbm_if_fops);
+	if (!e)
+		goto error2;
+
+	hm =  (struct topaz_hbm_mnt *)kzalloc(sizeof(*hm), GFP_KERNEL);
+	if (!hm)
+		goto error1;
+
+
+#else
+	if ((e = create_proc_entry("hbm_bufs_emac", 0, NULL)) != NULL) {
+		e->proc_fops = &topaz_hbm_bufs_emac_proc_ops;
+	}
+
+	if ((e = create_proc_entry("hbm_bufs_wmac", 0, NULL)) != NULL) {
+		e->proc_fops = &topaz_hbm_bufs_wmac_proc_ops;
+	}
+
+	struct proc_dir_entry *entry = create_proc_entry(TOPAZ_HBM_IF_PROC_NAME, 0600, NULL);
+	if (entry) {
+		entry->write_proc = hbm_if_write_proc;
+		entry->read_proc = NULL;
+	}
+
+	hm =  (struct topaz_hbm_mnt *)kzalloc(sizeof(*hm), GFP_KERNEL);
+	if (!hm) {
+		printk(KERN_ERR"%s: fail to allocate hm", __func__);
+		return -1;
+	}
+#endif
+
+	init_timer(&hbm_timer);
+	hbm_timer.data = (unsigned long)hm;
+	hbm_timer.function = &topaz_hbm_monitor;
+	mod_timer(&hbm_timer, jiffies + HBM_BUF_POLL_L_INTRVAL);
+
+	return 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+error1:
+	remove_proc_entry(TOPAZ_HBM_IF_PROC_NAME, 0);
+error2:
+	remove_proc_entry("hbm_bufs_wmac", 0);
+error3:
+	remove_proc_entry("hbm_bufs_emac", 0);
+error4:
+	return -1;
+#endif
+}
+
+static void __exit topaz_hbm_bufs_exit(void)
+{
+	remove_proc_entry("hbm_bufs_wmac", 0);
+	remove_proc_entry("hbm_bufs_emac", 0);
+	remove_proc_entry(TOPAZ_HBM_IF_PROC_NAME, NULL);
+
+	del_timer(&hbm_timer);
+	if (hbm_timer.data)
+		kfree((void *)hbm_timer.data);
+}
+
+static int __init topaz_hbm_module_init(void)
+{
+	COMPILE_TIME_ASSERT(TOPAZ_HBM_BUF_META_SIZE >= (HBM_HR_OFFSET_MAX * 4));
+
+	topaz_hbm_init_payload_pools();
+
+	shinfo_cache = kmem_cache_create("topaz_skb_shinfo_cache",
+			sizeof(struct skb_shared_info),
+			0,
+			SLAB_HWCACHE_ALIGN | SLAB_PANIC,
+			NULL);
+
+	skb_allocator_register(TOPAZ_HBM_SKB_ALLOCATOR, &topaz_hbm_skb_allocator, 0);
+
+	topaz_hbm_stat_init();
+	topaz_hbm_bufs_init();
+
+	return 0;
+}
+module_init(topaz_hbm_module_init)
+
+static void __exit topaz_hbm_module_exit(void)
+{
+	topaz_hbm_stat_exit();
+	topaz_hbm_bufs_exit();
+}
+module_exit(topaz_hbm_module_exit)
+
+
diff --git a/drivers/qtn/topaz/switch_emac.c b/drivers/qtn/topaz/switch_emac.c
new file mode 100644
index 0000000..a0a338f
--- /dev/null
+++ b/drivers/qtn/topaz/switch_emac.c
@@ -0,0 +1,1464 @@
+/**
+ * (C) Copyright 2011-2012 Quantenna Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ **/
+
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/board/soc.h>
+
+#include <qtn/topaz_tqe_cpuif.h>
+#include <qtn/topaz_tqe.h>
+#include <qtn/topaz_hbm_cpuif.h>
+#include <qtn/topaz_hbm.h>
+#include <qtn/topaz_fwt.h>
+#include <qtn/topaz_ipprt.h>
+#include <qtn/topaz_vlan_cpuif.h>
+
+#include <qtn/topaz_dpi.h>
+#include <common/topaz_emac.h>
+#include <drivers/ruby/emac_lib.h>
+
+#include <qtn/topaz_fwt_sw.h>
+#include <qtn/qtn_buffers.h>
+#include <qtn/qdrv_sch.h>
+#include <qtn/qtn_wmm_ac.h>
+#include <qtn/qtn_vlan.h>
+#include <qtn/hardware_revision.h>
+#include <qtn/shared_params.h>
+
+#include <asm/board/pm.h>
+
+#ifdef CONFIG_QVSP
+#include "qtn/qvsp.h"
+#endif
+
+#include <compat.h>
+
+#if defined (TOPAZ_SRAM_CONFIG)
+#define EMAC_DESC_USE_SRAM 0
+#else
+#define EMAC_DESC_USE_SRAM 1
+#endif
+
+#define EMAC_BONDING_GROUP 1
+#define EMAC_MAX_INTERFACE	2
+static int eth_ifindex[EMAC_MAX_INTERFACE]= {0};
+
+struct emac_port_info {
+	u32 base_addr;
+	u32 mdio_base_addr;
+	enum topaz_tqe_port tqe_port;
+	int irq;
+	const char *proc_name;
+};
+
+static int dscp_priority      = 0;
+static int dscp_value         = 0;
+static int emac_xflow_disable = 0;
+
+#define EMAC_WBSP_CTRL_DISABLED	0
+#define EMAC_WBSP_CTRL_ENABLED	1
+#define EMAC_WBSP_CTRL_SWAPPED	2
+#if defined (ERICSSON_CONFIG)
+static int emac_wbsp_ctrl = EMAC_WBSP_CTRL_ENABLED;
+#else
+static int emac_wbsp_ctrl = EMAC_WBSP_CTRL_DISABLED;
+#endif
+
+
+static const struct emac_port_info iflist[] = {
+	{
+		RUBY_ENET0_BASE_ADDR,
+		RUBY_ENET0_BASE_ADDR,
+		TOPAZ_TQE_EMAC_0_PORT,
+		RUBY_IRQ_ENET0,
+		"arasan_emac0",
+	},
+	{
+		RUBY_ENET1_BASE_ADDR,
+		RUBY_ENET0_BASE_ADDR,
+		TOPAZ_TQE_EMAC_1_PORT,
+		RUBY_IRQ_ENET1,
+		"arasan_emac1",
+	},
+};
+
+static struct net_device *topaz_emacs[ARRAY_SIZE(iflist)];
+static int topaz_emac_on[ARRAY_SIZE(iflist)];
+static unsigned int topaz_emac_prev_two_connected;
+static struct delayed_work topaz_emac_dual_emac_work;
+
+struct topaz_emac_priv {
+	struct emac_common com;
+	enum topaz_tqe_port tqe_port;
+};
+
+static int bonding = 0;
+module_param(bonding, int, 0644);
+MODULE_PARM_DESC(bonding, "using bonding for emac0 and emac1");
+
+static inline bool is_qtn_oui_packet(unsigned char *pkt_header)
+{
+	if ((pkt_header[0] == (QTN_OUI & 0xFF)) &&
+		(pkt_header[1] == ((QTN_OUI >> 8) & 0xFF)) &&
+		(pkt_header[2] == ((QTN_OUI >> 16) & 0xFF)) &&
+		(pkt_header[3] >= QTN_OUIE_WIFI_CONTROL_MIN) &&
+		(pkt_header[3] <= QTN_OUIE_WIFI_CONTROL_MAX))
+		return true;
+	else
+		return false;
+}
+
+static void topaz_emac_start(struct net_device *dev)
+{
+	struct topaz_emac_priv *priv = netdev_priv(dev);
+	struct emac_common *privc = &priv->com;
+
+	/*
+	 * These IRQ flags must be cleared when we start as stop_traffic()
+	 * relys on them to indicate when activity has stopped.
+	 */
+	emac_wr(privc, EMAC_DMA_STATUS_IRQ, DmaTxStopped | DmaRxStopped);
+
+	/* Start receive */
+	emac_setbits(privc, EMAC_DMA_CTRL, DmaStartRx);
+	emac_setbits(privc, EMAC_MAC_RX_CTRL, MacRxEnable);
+
+	/* Start transmit */
+	emac_setbits(privc, EMAC_MAC_TX_CTRL, MacTxEnable);
+	emac_setbits(privc, EMAC_DMA_CTRL, DmaStartTx);
+	emac_wr(privc, EMAC_DMA_TX_AUTO_POLL, 0x200);
+
+	/* Start rxp + txp */
+	emac_wr(privc, TOPAZ_EMAC_RXP_CTRL, (TOPAZ_EMAC_RXP_CTRL_ENABLE |
+			TOPAZ_EMAC_RXP_CTRL_TQE_SYNC_EN_BP |
+			TOPAZ_EMAC_RXP_CTRL_SYNC_TQE));
+	emac_wr(privc, TOPAZ_EMAC_TXP_CTRL, TOPAZ_EMAC_TXP_CTRL_AHB_ENABLE);
+
+	/* Clear out EMAC interrupts */
+	emac_wr(privc, EMAC_MAC_INT, ~0);
+	emac_wr(privc, EMAC_DMA_STATUS_IRQ, ~0);
+}
+
+static void topaz_emac_stop(struct net_device *dev)
+{
+	struct topaz_emac_priv *priv = netdev_priv(dev);
+	struct emac_common *privc = &priv->com;
+
+	/* Stop rxp + rxp */
+	emac_wr(privc, TOPAZ_EMAC_RXP_CTRL, 0);
+	emac_wr(privc, TOPAZ_EMAC_TXP_CTRL, 0);
+
+	/* Stop receive */
+	emac_clrbits(privc, EMAC_DMA_CTRL, DmaStartRx);
+	emac_clrbits(privc, EMAC_MAC_RX_CTRL, MacRxEnable);
+
+	/* Stop transmit */
+	emac_clrbits(privc, EMAC_MAC_TX_CTRL, MacTxEnable);
+	emac_clrbits(privc, EMAC_DMA_CTRL, DmaStartTx);
+	emac_wr(privc, EMAC_DMA_TX_AUTO_POLL, 0x0);
+}
+
+static void topaz_emac_init_rxp_set_default_port(struct emac_common *privc)
+{
+	union topaz_emac_rxp_outport_ctrl outport;
+	union topaz_emac_rxp_outnode_ctrl outnode;
+
+	outport.raw.word0 = 0;
+	outnode.raw.word0 = 0;
+
+	/* Lookup priority order: DPI -> VLAN -> IP proto -> FWT */
+	outport.data.dpi_prio = 3;
+	outport.data.vlan_prio = 2;
+	outport.data.da_prio = 0;
+	outport.data.ip_prio = 1;
+	outport.data.mcast_en = 1;
+	outport.data.mcast_port = TOPAZ_TQE_LHOST_PORT;	/* multicast redirect target node */
+	outport.data.mcast_sel = 0;	/* 0 = multicast judgement based on emac core status, not DA */
+	outport.data.dynamic_fail_port = TOPAZ_TQE_LHOST_PORT;
+	outport.data.sw_backdoor_port = TOPAZ_TQE_LHOST_PORT;
+	outport.data.static_fail_port = TOPAZ_TQE_LHOST_PORT;
+	outport.data.static_port_sel = 0;
+	outport.data.static_mode_en = 0;
+
+	outnode.data.mcast_node = 0;
+	outnode.data.dynamic_fail_node = 0;
+	outnode.data.sw_backdoor_node = 0;
+	outnode.data.static_fail_node = 0;
+	outnode.data.static_node_sel = 0;
+
+	emac_wr(privc, TOPAZ_EMAC_RXP_OUTPORT_CTRL, outport.raw.word0);
+	emac_wr(privc, TOPAZ_EMAC_RXP_OUTNODE_CTRL, outnode.raw.word0);
+}
+
+static void topaz_emac_init_rxp_dscp(struct emac_common *privc)
+{
+	uint8_t dscp_reg_index;
+
+	/*
+	 * EMAC RXP has 8 registers for DSCP -> TID mappings. Each register has 8 nibbles;
+	 * a single nibble corresponds to a particular DSCP that could be seen in a packet.
+	 * There are 64 different possible DSCP values (6 DSCP bits).
+	 * For example, register 0's nibbles correspond to:
+	 * Reg mask 0x0000000f -> DSCP 0x0. Mask is ANDed with the desired TID for DSCP 0x0.
+	 * Reg mask 0x000000f0 -> DSCP 0x1
+	 * ...
+	 * Reg mask 0xf0000000 -> DSCP 0x7
+	 * Next register is used for DSCP 0x8 - 0xf.
+	 */
+	for (dscp_reg_index = 0; dscp_reg_index < TOPAZ_EMAC_RXP_IP_DIFF_SRV_TID_REGS; dscp_reg_index++) {
+		uint8_t dscp_nibble_index;
+		uint32_t dscp_reg_val = 0;
+
+		for (dscp_nibble_index = 0; dscp_nibble_index < 8; dscp_nibble_index++) {
+			const uint8_t dscp = dscp_reg_index * 8 + dscp_nibble_index;
+			const uint8_t tid =  qdrv_dscp2tid_default(dscp);
+			dscp_reg_val |= (tid & 0xF) << (4 * dscp_nibble_index);
+		}
+
+		emac_wr(privc, TOPAZ_EMAC_RXP_IP_DIFF_SRV_TID_REG(dscp_reg_index), dscp_reg_val);
+	}
+}
+
+static void topaz_emac_init_rxptxp(struct net_device *dev)
+{
+	struct topaz_emac_priv *priv = netdev_priv(dev);
+	struct emac_common *privc = &priv->com;
+
+	emac_wr(privc, TOPAZ_EMAC_RXP_CTRL, 0);
+	emac_wr(privc, TOPAZ_EMAC_TXP_CTRL, 0);
+
+	topaz_emac_init_rxp_set_default_port(privc);
+	topaz_emac_init_rxp_dscp(privc);
+
+	emac_wr(privc, TOPAZ_EMAC_RXP_VLAN_PRI_TO_TID,
+			TOPAZ_EMAC_RXP_VLAN_PRI_TO_TID_PRI(0, 0) |
+			TOPAZ_EMAC_RXP_VLAN_PRI_TO_TID_PRI(1, 1) |
+			TOPAZ_EMAC_RXP_VLAN_PRI_TO_TID_PRI(2, 0) |
+			TOPAZ_EMAC_RXP_VLAN_PRI_TO_TID_PRI(3, 5) |
+			TOPAZ_EMAC_RXP_VLAN_PRI_TO_TID_PRI(4, 5) |
+			TOPAZ_EMAC_RXP_VLAN_PRI_TO_TID_PRI(5, 6) |
+			TOPAZ_EMAC_RXP_VLAN_PRI_TO_TID_PRI(6, 6) |
+			TOPAZ_EMAC_RXP_VLAN_PRI_TO_TID_PRI(7, 6));
+
+	emac_wr(privc, TOPAZ_EMAC_RXP_PRIO_CTRL,
+			SM(TOPAZ_EMAC_RXP_PRIO_IS_DSCP, TOPAZ_EMAC_RXP_PRIO_CTRL_TID_SEL));
+
+	emac_wr(privc, TOPAZ_EMAC_BUFFER_POOLS,
+		SM(TOPAZ_HBM_BUF_EMAC_RX_POOL, TOPAZ_EMAC_BUFFER_POOLS_RX_REPLENISH) |
+		SM(TOPAZ_HBM_EMAC_TX_DONE_POOL, TOPAZ_EMAC_BUFFER_POOLS_TX_RETURN));
+
+	emac_wr(privc, TOPAZ_EMAC_DESC_LIMIT, privc->tx.desc_count);
+
+	qdrv_dscp2tid_map_init();
+}
+
+static void topaz_emac_enable_ints(struct net_device *dev)
+{
+	struct topaz_emac_priv *priv = netdev_priv(dev);
+	struct emac_common *privc = &priv->com;
+
+	/* Clear any pending interrupts */
+	emac_wr(privc, EMAC_MAC_INT, emac_rd(privc, EMAC_MAC_INT));
+	emac_wr(privc, EMAC_DMA_STATUS_IRQ, emac_rd(privc, EMAC_DMA_STATUS_IRQ));
+}
+
+static void topaz_emac_disable_ints(struct net_device *dev)
+{
+	struct topaz_emac_priv *priv = netdev_priv(dev);
+	struct emac_common *privc = &priv->com;
+
+	emac_wr(privc, EMAC_MAC_INT_ENABLE, 0);
+	emac_wr(privc, EMAC_DMA_INT_ENABLE, 0);
+
+	emac_wr(privc, EMAC_MAC_INT, ~0x0);
+	emac_wr(privc, EMAC_DMA_STATUS_IRQ, ~0x0);
+}
+
+static int topaz_emac_ndo_open(struct net_device *dev)
+{
+
+	emac_lib_set_rx_mode(dev);
+	topaz_emac_start(dev);
+	emac_lib_pm_emac_add_notifier(dev);
+	topaz_emac_enable_ints(dev);
+	emac_lib_phy_start(dev);
+	netif_start_queue(dev);
+
+	eth_ifindex[dev->if_port] = dev->ifindex;
+	return 0;
+}
+
+
+static int topaz_emac_ndo_stop(struct net_device *dev)
+{
+	topaz_emac_disable_ints(dev);
+	emac_lib_pm_emac_remove_notifier(dev);
+	netif_stop_queue(dev);
+	emac_lib_phy_stop(dev);
+	topaz_emac_stop(dev);
+
+	return 0;
+}
+
+static int topaz_emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct topaz_emac_priv *priv = netdev_priv(dev);
+	union topaz_tqe_cpuif_ppctl ctl;
+	int8_t pool = TOPAZ_HBM_EMAC_TX_DONE_POOL;
+	int interface;
+	struct sk_buff *skb2;
+	struct qtn_vlan_dev *vdev = vport_tbl_lhost[priv->tqe_port];
+
+	for (interface = 0; interface < EMAC_MAX_INTERFACE; interface++){
+		/* In order to drop a packet, the following conditions has to be met:
+		 * emac_xflow_disable == 1
+		 * skb->skb_iif has to be none zero
+		 * skb->skb_iif the interface is Rx from emac0 or emac1
+		 */
+
+		if ((emac_xflow_disable) && (skb->skb_iif) &&
+		    ((skb->skb_iif) == eth_ifindex[interface])){
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+        }
+
+	/*
+	 * restore VLAN tag to packet if needed
+	 */
+	skb2 = switch_vlan_from_proto_stack(skb, vdev, 0, 1);
+	if (!skb2)
+		return NETDEV_TX_OK;
+
+	/* drop any WBSP control packet towards emac1 (Ethernet type 88b7)
+	Quantenna OUI (00 26 86) is located at data[14-16] followed by 1-byte type field [17] */
+	if ((emac_wbsp_ctrl == EMAC_WBSP_CTRL_ENABLED && dev->ifindex == eth_ifindex[1]) ||
+		(emac_wbsp_ctrl == EMAC_WBSP_CTRL_SWAPPED && dev->ifindex == eth_ifindex[0])) {
+		if (skb2->protocol == __constant_htons(ETHERTYPE_802A) &&
+			skb2->len > 17 && is_qtn_oui_packet(&skb2->data[14])) {
+			dev_kfree_skb(skb2);
+			return NETDEV_TX_OK;
+		}
+	}
+
+	topaz_tqe_cpuif_ppctl_init(&ctl,
+			priv->tqe_port, NULL, 1, 0,
+			0, 1, pool, 1, 0);
+
+	return tqe_tx(&ctl, skb2);
+}
+
+static const struct net_device_ops topaz_emac_ndo = {
+	.ndo_open = topaz_emac_ndo_open,
+	.ndo_stop = topaz_emac_ndo_stop,
+	.ndo_start_xmit = topaz_emac_ndo_start_xmit,
+	.ndo_set_mac_address = eth_mac_addr,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	.ndo_set_rx_mode = emac_lib_set_rx_mode,
+#else
+	.ndo_set_multicast_list = emac_lib_set_rx_mode,
+#endif
+	.ndo_get_stats = emac_lib_stats,
+	.ndo_do_ioctl = emac_lib_ioctl,
+};
+
+static void emac_bufs_free(struct net_device *dev)
+{
+	struct topaz_emac_priv *priv = netdev_priv(dev);
+	struct emac_common *privc = &priv->com;
+	int i;
+
+	for (i = 0; i < privc->rx.desc_count; i++) {
+		if (privc->rx.descs[i].bufaddr1) {
+			topaz_hbm_put_payload_realign_bus((void *) privc->rx.descs[i].bufaddr1,
+					TOPAZ_HBM_BUF_EMAC_RX_POOL);
+		}
+	}
+}
+
+int topaz_emac_descs_init(struct net_device *dev)
+{
+	struct topaz_emac_priv *priv = netdev_priv(dev);
+	struct emac_common *privc = &priv->com;
+	int i;
+	struct emac_desc __iomem *rx_bus_descs = (void *)privc->rx.descs_dma_addr;
+	struct emac_desc __iomem *tx_bus_descs = (void *)privc->tx.descs_dma_addr;
+
+	for (i = 0; i < privc->rx.desc_count; i++) {
+		unsigned long ctrl;
+		int bufsize;
+		void * buf_bus;
+
+		bufsize = TOPAZ_HBM_BUF_EMAC_RX_SIZE
+			- TOPAZ_HBM_PAYLOAD_HEADROOM
+			- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+		buf_bus = topaz_hbm_get_payload_bus(TOPAZ_HBM_BUF_EMAC_RX_POOL);
+		if (unlikely(!buf_bus)) {
+			printk("%s: buf alloc error buf 0x%p\n", __FUNCTION__, buf_bus);
+			return -1;
+		}
+
+		ctrl = min(bufsize, RxDescBuf1SizeMask) << RxDescBuf1SizeShift;
+		ctrl |= RxDescChain2ndAddr;
+
+		privc->rx.descs[i].status = RxDescOwn;
+		privc->rx.descs[i].control = ctrl;
+		privc->rx.descs[i].bufaddr1 = (unsigned long)buf_bus;
+		privc->rx.descs[i].bufaddr2 = (unsigned long)&rx_bus_descs[(i + 1) % privc->rx.desc_count];
+	}
+
+	for (i = 0; i < privc->tx.desc_count; i++) {
+		/*
+		 * For each transmitted buffer, TQE will update:
+		 * - tdes1 (control) according to TOPAZ_TQE_EMAC_TDES_1_CNTL & payload size
+		 * - tdes2 (bufaddr1) with payload dma address
+		 * So initializing these fields here is meaningless
+		 */
+		privc->tx.descs[i].status = 0x0;
+		privc->tx.descs[i].control = 0x0;
+		privc->tx.descs[i].bufaddr1 = 0x0;
+		privc->tx.descs[i].bufaddr2 = (unsigned long)&tx_bus_descs[(i + 1) % privc->tx.desc_count];
+	}
+
+	return 0;
+}
+
+static void topaz_emac_set_eth_addr(struct net_device *dev, int port_num)
+{
+	memcpy(dev->dev_addr, get_ethernet_addr(), ETH_ALEN);
+
+	if (port_num > 0) {
+		u32 val;
+
+		val = (u32)dev->dev_addr[5] +
+			((u32)dev->dev_addr[4] << 8) +
+			((u32)dev->dev_addr[3] << 16);
+		val += port_num;
+		dev->dev_addr[5] = (unsigned char)val;
+		dev->dev_addr[4] = (unsigned char)(val >> 8);
+		dev->dev_addr[3] = (unsigned char)(val >> 16);
+	}
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int topaz_emac_proc_show(struct seq_file *sfile, void *v)
+{
+	struct net_device *dev = sfile->private;
+	return emac_lib_stats_sprintf(sfile, dev);
+}
+
+static int topaz_emac_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, topaz_emac_proc_show, PDE_DATA(inode));
+}
+#else
+static int topaz_emac_proc_rd(char *buf, char **start, off_t offset, int count,
+		int *eof, void *data)
+{
+	char *p = buf;
+	struct net_device *dev = data;
+
+	p += emac_lib_stats_sprintf(p, dev);
+
+	*eof = 1;
+
+	return p - buf;
+}
+#endif
+
+static ssize_t topaz_emac_vlan_sel_sysfs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct emac_common *privc = netdev_priv(to_net_dev(dev));
+	uint32_t reg = emac_rd(privc, TOPAZ_EMAC_RXP_VLAN_PRI_CTRL);
+
+	return sprintf(buf, "%u\n", MS(reg, TOPAZ_EMAC_RXP_VLAN_PRI_CTRL_TAG));
+}
+
+static ssize_t topaz_emac_vlan_sel_sysfs_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct emac_common *privc = netdev_priv(to_net_dev(dev));
+	uint8_t tag_sel;
+
+	if (sscanf(buf, "%hhu", &tag_sel) == 1) {
+		emac_wr(privc, TOPAZ_EMAC_RXP_VLAN_PRI_CTRL,
+				SM(tag_sel, TOPAZ_EMAC_RXP_VLAN_PRI_CTRL_TAG));
+	}
+	return count;
+}
+
+static ssize_t topaz_emac_dscp_sysfs_update(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+
+	uint32_t dscp = 0;
+	uint8_t dscp_reg_index;
+	uint8_t dscp_nibble_index;
+	struct emac_common *privc = netdev_priv(to_net_dev(dev));
+
+	dscp_reg_index        = (dscp_priority / 8);
+	dscp_nibble_index     = (dscp_priority % 8);
+
+	if (buf[0] == 'U' || buf[0] == 'u'){
+		dscp = emac_rd(privc, TOPAZ_EMAC_RXP_IP_DIFF_SRV_TID_REG(dscp_reg_index));
+		dscp &= ~((0xF) << (4 * dscp_nibble_index));
+		dscp |= (dscp_value & 0xF) << (4 * dscp_nibble_index);
+		emac_wr(privc, TOPAZ_EMAC_RXP_IP_DIFF_SRV_TID_REG(dscp_reg_index), dscp);
+		g_dscp_value[privc->mac_id] = dscp_value & 0xFF;
+		g_dscp_flag = 1;
+	}
+	return count;
+}
+
+static ssize_t topaz_emac_dscp_prio_val_sysfs_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+
+	long num;
+	num = simple_strtol(buf, NULL, 10);
+	if (num < 0 || num >= 15)
+		return -EINVAL;
+	dscp_value = num;
+	return count;
+}
+
+static ssize_t topaz_emac_dscp_prio_sel_sysfs_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+
+	long num;
+	num = simple_strtol(buf, NULL, 10);
+	if (num < QTN_DSCP_MIN || num > QTN_DSCP_MAX)
+		return -EINVAL;
+	dscp_priority = num;
+	return count;
+}
+
+static ssize_t topaz_emac_dscp_sysfs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	uint32_t dscp = 0;
+	uint8_t dscp_reg_index;
+	char *p = 0;
+	int index = 0;
+	uint8_t dscp_nibble_index;
+	struct emac_common *privc = netdev_priv(to_net_dev(dev));
+
+	p = buf;
+	p += sprintf(p, "%s\n", "DSCP TABLE:");
+	for (dscp_reg_index = 0; dscp_reg_index < TOPAZ_EMAC_RXP_IP_DIFF_SRV_TID_REGS; dscp_reg_index++) {
+		dscp = emac_rd(privc, TOPAZ_EMAC_RXP_IP_DIFF_SRV_TID_REG(dscp_reg_index));
+		for (dscp_nibble_index = 0; dscp_nibble_index < 8; dscp_nibble_index++) {
+			p += sprintf(p, "Index \t %d: \t Data %x\n", index++, (dscp & 0xF));
+			dscp >>= 0x4;
+		}
+	}
+	return (int)(p - buf);
+}
+
+static ssize_t topaz_emacx_xflow_sysfs_show(struct device *dev, struct device_attribute *attr,
+						char *buff)
+{
+	int count = 0;
+
+	count += sprintf(buff + count, "%d\n", emac_xflow_disable);
+
+	return count;
+}
+
+static ssize_t topaz_emacx_xflow_sysfs_update(struct device *dev, struct device_attribute *attr,
+                const char *buf, size_t count)
+{
+        if (buf[0] == 'D' || buf[0] == 'd'){
+		emac_xflow_disable = 1;
+        } else if (buf[0] == 'E' || buf[0] == 'e'){
+		emac_xflow_disable = 0;
+	}
+        return count;
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static DEVICE_ATTR(device_dscp_update, S_IWUSR,
+		NULL, topaz_emac_dscp_sysfs_update);
+#else
+static DEVICE_ATTR(device_dscp_update, S_IWUGO,
+		NULL, topaz_emac_dscp_sysfs_update);
+#endif
+
+static int topaz_emac_dscp_update_sysfs_create(struct net_device *net_dev)
+{
+	return sysfs_create_file(&net_dev->dev.kobj, &dev_attr_device_dscp_update.attr);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static DEVICE_ATTR(device_emacx_xflow_update, S_IRUSR | S_IWUSR,
+		topaz_emacx_xflow_sysfs_show, topaz_emacx_xflow_sysfs_update);
+#else
+static DEVICE_ATTR(device_emacx_xflow_update, S_IRUGO | S_IWUGO,
+		topaz_emacx_xflow_sysfs_show, topaz_emacx_xflow_sysfs_update);
+#endif
+
+static int topaz_emacs_update_sysfs_create(struct net_device *net_dev)
+{
+	return sysfs_create_file(&net_dev->dev.kobj, &dev_attr_device_emacx_xflow_update.attr);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static DEVICE_ATTR(device_dscp_prio_val, S_IWUSR,
+		NULL, topaz_emac_dscp_prio_val_sysfs_store);
+#else
+static DEVICE_ATTR(device_dscp_prio_val, S_IWUGO,
+		NULL, topaz_emac_dscp_prio_val_sysfs_store);
+#endif
+
+static int topaz_emac_dscp_prio_val_sysfs_create(struct net_device *net_dev)
+{
+	return sysfs_create_file(&net_dev->dev.kobj, &dev_attr_device_dscp_prio_val.attr);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static DEVICE_ATTR(device_dscp_prio_sel, S_IWUSR,
+		NULL, topaz_emac_dscp_prio_sel_sysfs_store);
+#else
+static DEVICE_ATTR(device_dscp_prio_sel, S_IWUGO,
+		NULL, topaz_emac_dscp_prio_sel_sysfs_store);
+#endif
+
+static int topaz_emac_dscp_prio_sel_sysfs_create(struct net_device *net_dev)
+{
+	return sysfs_create_file(&net_dev->dev.kobj, &dev_attr_device_dscp_prio_sel.attr);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static DEVICE_ATTR(device_dscp_show, S_IRUSR,
+		topaz_emac_dscp_sysfs_show, NULL);
+#else
+static DEVICE_ATTR(device_dscp_show, S_IRUGO,
+		topaz_emac_dscp_sysfs_show, NULL);
+#endif
+
+static int topaz_emac_dscp_show_sysfs_create(struct net_device *net_dev)
+{
+	return sysfs_create_file(&net_dev->dev.kobj, &dev_attr_device_dscp_show.attr);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static DEVICE_ATTR(vlan_sel, S_IRUSR | S_IWUSR,
+		topaz_emac_vlan_sel_sysfs_show, topaz_emac_vlan_sel_sysfs_store);
+#else
+static DEVICE_ATTR(vlan_sel, S_IRUGO | S_IWUGO,
+		topaz_emac_vlan_sel_sysfs_show, topaz_emac_vlan_sel_sysfs_store);
+#endif
+
+static int topaz_emac_vlan_sel_sysfs_create(struct net_device *net_dev)
+{
+	return sysfs_create_file(&net_dev->dev.kobj, &dev_attr_vlan_sel.attr);
+}
+
+static void topaz_emac_vlan_sel_sysfs_remove(struct net_device *net_dev)
+{
+	sysfs_remove_file(&net_dev->dev.kobj, &dev_attr_vlan_sel.attr);
+}
+
+static uint8_t topaz_emac_fwt_sw_remap_port(uint8_t in_port, const uint8_t *mac_be)
+{
+	return mac_be[5] % 2;
+}
+
+static ssize_t topaz_emacx_wbsp_ctrl_sysfs_show(struct device *dev, struct device_attribute *attr,
+						char *buff)
+{
+	int count = 0;
+
+	count += sprintf(buff + count, "%d\n", emac_wbsp_ctrl);
+
+	return count;
+}
+
+static ssize_t topaz_emacx_wbsp_ctrl_sysfs_update(struct device *dev, struct device_attribute *attr,
+                const char *buf, size_t count)
+{
+        if (buf[0] == '0') {
+		emac_wbsp_ctrl = EMAC_WBSP_CTRL_DISABLED;
+        } else if (buf[0] == '1') {
+		emac_wbsp_ctrl = EMAC_WBSP_CTRL_ENABLED;
+	} else if (buf[0] == '2') {
+		emac_wbsp_ctrl = EMAC_WBSP_CTRL_SWAPPED;
+	}
+        return count;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static DEVICE_ATTR(device_emacx_wbsp_ctrl, S_IRUSR | S_IWUSR,
+		topaz_emacx_wbsp_ctrl_sysfs_show, topaz_emacx_wbsp_ctrl_sysfs_update);
+#else
+static DEVICE_ATTR(device_emacx_wbsp_ctrl, S_IRUGO | S_IWUGO,
+		topaz_emacx_wbsp_ctrl_sysfs_show, topaz_emacx_wbsp_ctrl_sysfs_update);
+#endif
+static int topaz_emacs_wbsp_ctrl_sysfs_create(struct net_device *net_dev)
+{
+	return sysfs_create_file(&net_dev->dev.kobj, &dev_attr_device_emacx_wbsp_ctrl.attr);
+}
+
+static void topaz_emac_tqe_rx_handler(void *token,
+		const union topaz_tqe_cpuif_descr *descr,
+		struct sk_buff *skb, uint8_t *whole_frm_hdr)
+{
+	struct net_device *dev = token;
+
+	skb->dev = dev;
+	skb->protocol = eth_type_trans(skb, skb->dev);
+
+	/* discard WBSP control packet coming from emac1 (Ethernet type 88b7)
+	Note that in this receive routine, header has been removed from data buffer, so
+	Quantenna OUI (00 26 86) is now located at data[0-2] followed by 1-byte type field [3] */
+	if ((emac_wbsp_ctrl == EMAC_WBSP_CTRL_ENABLED && dev->ifindex == eth_ifindex[1]) ||
+		(emac_wbsp_ctrl == EMAC_WBSP_CTRL_SWAPPED && dev->ifindex == eth_ifindex[0])) {
+		if (skb->protocol == __constant_htons(ETHERTYPE_802A) &&
+			skb->len > 3 && is_qtn_oui_packet(&skb->data[0])) {
+			dev_kfree_skb(skb);
+			return;
+		}
+	}
+
+	skb = switch_vlan_to_proto_stack(skb, 0);
+	if (skb)
+		netif_receive_skb(skb);
+}
+
+#ifdef TOPAZ_EMAC_NULL_BUF_WR
+static inline void topaz_hbm_emac_rx_pool_intr_init(void)
+{
+	uint32_t tmp;
+
+	tmp = readl(TOPAZ_HBM_CSR_REG);
+	tmp |= TOPAZ_HBM_CSR_INT_EN | TOPAZ_HBM_CSR_Q_EN(TOPAZ_HBM_BUF_EMAC_RX_POOL);
+	writel(tmp, TOPAZ_HBM_CSR_REG);
+
+	tmp = readl(RUBY_SYS_CTL_LHOST_ORINT_EN);
+	tmp |= TOPAZ_HBM_INT_EN;
+	writel(tmp, RUBY_SYS_CTL_LHOST_ORINT_EN);
+}
+
+static inline void topaz_hbm_emac_rx_pool_uf_intr_en(void)
+{
+	uint32_t tmp = readl(TOPAZ_HBM_CSR_REG);
+
+	tmp &= ~(TOPAZ_HBM_CSR_INT_MSK_RAW);
+	tmp |= TOPAZ_HBM_CSR_UFLOW_INT_MASK(TOPAZ_HBM_BUF_EMAC_RX_POOL) |\
+		TOPAZ_HBM_CSR_UFLOW_INT_RAW(TOPAZ_HBM_BUF_EMAC_RX_POOL) |\
+		TOPAZ_HBM_CSR_INT_EN;
+	writel(tmp, TOPAZ_HBM_CSR_REG);
+}
+
+
+static inline int topaz_emac_rx_null_buf_del(struct emac_common *privc, int budget)
+{
+	uint32_t i;
+	uint32_t ei;
+
+	ei = (struct emac_desc *)emac_rd(privc, EMAC_DMA_CUR_RXDESC_PTR)
+		- (struct emac_desc *)privc->rx.descs_dma_addr;
+
+	for (i = (ei - budget) % QTN_BUFS_EMAC_RX_RING;
+			i != ei; i = (i + 1) % QTN_BUFS_EMAC_RX_RING) {
+		if (privc->rx.descs[i].status & RxDescOwn) {
+			if (!privc->rx.descs[i].bufaddr1) {
+				uint32_t buf_bus;
+				buf_bus = (uint32_t)topaz_hbm_get_payload_bus(TOPAZ_HBM_BUF_EMAC_RX_POOL);
+				if (buf_bus)
+					privc->rx.descs[i].bufaddr1 = buf_bus;
+				 else
+					return -1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void topaz_emac_set_outport(struct net_device *ndev, uint32_t enable)
+{
+#define TOPAZ_EMAC_OUTPORT_ENABLE	1
+#define TOPAZ_EMAC_OUTPORT_DISABLE	0
+	struct emac_common *privc;
+	union topaz_emac_rxp_outport_ctrl outport;
+	unsigned long flags;
+
+	privc = netdev_priv(ndev);
+
+	local_irq_save(flags);
+	outport.raw.word0 = emac_rd(privc, TOPAZ_EMAC_RXP_OUTPORT_CTRL);
+	if (enable) {
+		outport.data.static_port_sel = TOPAZ_TQE_LHOST_PORT;
+		outport.data.static_mode_en = TOPAZ_EMAC_OUTPORT_ENABLE;
+	} else {
+		outport.data.static_port_sel = 0;
+		outport.data.static_mode_en = TOPAZ_EMAC_OUTPORT_DISABLE;
+	}
+	emac_wr(privc, TOPAZ_EMAC_RXP_OUTPORT_CTRL, outport.raw.word0);
+	local_irq_restore(flags);
+}
+
+void topaz_emac_to_lhost(uint32_t enable)
+{
+	struct net_device *dev;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(iflist); i++) {
+		dev = topaz_emacs[i];
+		if (dev)
+			topaz_emac_set_outport(dev, enable);
+	}
+}
+EXPORT_SYMBOL(topaz_emac_to_lhost);
+
+int topaz_emac_get_bonding()
+{
+	return bonding;
+}
+EXPORT_SYMBOL(topaz_emac_get_bonding);
+
+static inline void topaz_emac_stop_rx(void)
+{
+	struct net_device *dev;
+	struct topaz_emac_priv *priv;
+	struct emac_common *privc;
+	int i;
+
+	/* Stop the emac, try to take the null buffer off and refill with new buffer */
+	for (i = 0; i < ARRAY_SIZE(iflist); i++) {
+		dev = topaz_emacs[i];
+		if (dev) {
+			priv = netdev_priv(dev);
+			privc = &priv->com;
+			/* Stop rxp + emac rx */
+			emac_wr(privc, TOPAZ_EMAC_RXP_CTRL, 0);
+			emac_clrbits(privc, EMAC_MAC_RX_CTRL, MacRxEnable);
+		}
+	}
+}
+
+static inline void topaz_emac_start_rx(void)
+{
+	struct net_device *dev;
+	struct topaz_emac_priv *priv;
+	struct emac_common *privc;
+	int i;
+
+	/* Stop the emac, try to take the null buffer off and refill with new buffer */
+	for (i = 0; i < ARRAY_SIZE(iflist); i++) {
+		dev = topaz_emacs[i];
+		if (dev) {
+			priv = netdev_priv(dev);
+			privc = &priv->com;
+			/* Start rxp + emac rx */
+			emac_setbits(privc, EMAC_MAC_RX_CTRL, MacRxEnable);
+			emac_wr(privc, TOPAZ_EMAC_RXP_CTRL, (TOPAZ_EMAC_RXP_CTRL_ENABLE |
+				TOPAZ_EMAC_RXP_CTRL_TQE_SYNC_EN_BP |
+				TOPAZ_EMAC_RXP_CTRL_SYNC_TQE));
+		}
+	}
+}
+
+void  __attribute__((section(".sram.text"))) topaz_emac_null_buf_del(void)
+{
+	struct net_device *dev;
+	struct topaz_emac_priv *priv;
+	struct emac_common *privc;
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < ARRAY_SIZE(iflist); i++) {
+		dev = topaz_emacs[i];
+		if (dev) {
+			priv = netdev_priv(dev);
+			privc = &priv->com;
+			ret += topaz_emac_rx_null_buf_del(privc, TOPAZ_HBM_BUF_EMAC_RX_COUNT - 1);
+		}
+	}
+
+	if (ret == 0) {
+		topaz_hbm_emac_rx_pool_uf_intr_en();
+		topaz_emac_start_rx();
+		topaz_emac_null_buf_del_cb = NULL;
+	}
+}
+
+
+static irqreturn_t __attribute__((section(".sram.text"))) topaz_hbm_handler(int irq, void *dev_id)
+{
+	uint32_t tmp;
+
+	tmp = readl(TOPAZ_HBM_CSR_REG);
+	if (tmp & TOPAZ_HBM_CSR_UFLOW_INT_RAW(TOPAZ_HBM_BUF_EMAC_RX_POOL)) {
+		topaz_emac_stop_rx();
+		tmp &= ~(TOPAZ_HBM_CSR_INT_MSK_RAW &
+			~(TOPAZ_HBM_CSR_UFLOW_INT_RAW(TOPAZ_HBM_BUF_EMAC_RX_POOL)));
+		tmp &= ~(TOPAZ_HBM_CSR_INT_EN);
+		writel(tmp, TOPAZ_HBM_CSR_REG);
+		topaz_emac_null_buf_del_cb = topaz_emac_null_buf_del;
+	}
+	return IRQ_HANDLED;
+}
+#endif
+
+static void __init topaz_dpi_filter_arp(int port_num)
+{
+	struct topaz_dpi_filter_request req;
+	struct topaz_dpi_field_def field;
+
+	/* ARP */
+	memset(&req, 0, sizeof(req));
+	memset(&field, 0, sizeof(field));
+
+	req.fields = &field;
+	req.field_count = 1;
+	req.out_port = TOPAZ_TQE_LHOST_PORT;
+	req.out_node = 0;
+	req.tid = 0;
+
+	field.ctrl.data.enable = TOPAZ_DPI_ENABLE;
+	field.ctrl.data.anchor = TOPAZ_DPI_ANCHOR_FRAME_START;
+	field.ctrl.data.cmp_op = TOPAZ_DPI_CMPOP_EQ;
+	field.ctrl.data.offset = 3; /* ethhdr->h_proto: ETH_ALEN*2/sizeof(dword)*/
+	field.val = (ETH_P_ARP << 16);
+	field.mask = 0xffff0000;
+
+        topaz_dpi_filter_add(port_num, &req);
+
+	/* 8021Q && ARP */
+	memset(&req, 0, sizeof(req));
+	memset(&field, 0, sizeof(field));
+
+	req.fields = &field;
+	req.field_count = 1;
+	req.out_port = TOPAZ_TQE_LHOST_PORT;
+
+	field.ctrl.data.enable = TOPAZ_DPI_ENABLE;
+	field.ctrl.data.anchor = TOPAZ_DPI_ANCHOR_VLAN0;
+	field.ctrl.data.cmp_op = TOPAZ_DPI_CMPOP_EQ;
+	field.ctrl.data.offset = 1;
+	field.val = (ETH_P_ARP << 16);
+	field.mask = 0xffff0000;
+
+	topaz_dpi_filter_add(port_num, &req);
+}
+
+static void __init topaz_dpi_filter_dscp_vi(int port_num)
+{
+        struct topaz_dpi_filter_request req;
+        struct topaz_dpi_field_def field;
+
+        /* DSCP VI */
+        memset(&req, 0, sizeof(req));
+        memset(&field, 0, sizeof(field));
+
+        req.fields = &field;
+        req.field_count = 1;
+        req.out_port = TOPAZ_TQE_LHOST_PORT;
+        req.out_node = 0;
+        req.tid = 0;
+
+        field.ctrl.data.enable = TOPAZ_DPI_ENABLE;
+        field.ctrl.data.anchor = TOPAZ_DPI_ANCHOR_IPV4;
+        field.ctrl.data.cmp_op = TOPAZ_DPI_CMPOP_EQ;
+        field.ctrl.data.offset = 0;
+	field.val =  0x00b80000;
+	field.mask = 0x00fc0000;
+
+        topaz_dpi_filter_add(port_num, &req);
+}
+
+static void __init topaz_dpi_filter_dhcp(int port_num)
+{
+	struct topaz_dpi_filter_request req;
+	struct topaz_dpi_field_def field;
+
+	/* UDP && srcport == 67 && dstport == 68 */
+
+	memset(&req, 0, sizeof(req));
+	memset(&field, 0, sizeof(field));
+
+	req.fields = &field;
+	req.field_count = 1;
+	req.out_port = TOPAZ_TQE_LHOST_PORT;
+	req.out_node = 0;
+	req.tid = 0;
+
+	field.ctrl.data.enable = TOPAZ_DPI_ENABLE;
+	field.ctrl.data.anchor = TOPAZ_DPI_ANCHOR_UDP;
+	field.ctrl.data.cmp_op = TOPAZ_DPI_CMPOP_EQ;
+	field.ctrl.data.offset = 0;	/* src_port/dst_port */
+	field.val = (DHCPSERVER_PORT << 16) | DHCPCLIENT_PORT;
+	field.mask = 0xffffffff;
+
+	topaz_dpi_filter_add(port_num, &req);
+
+	/* UDP && srcport == 68 && dstport == 67 */
+
+	memset(&req, 0, sizeof(req));
+	memset(&field, 0, sizeof(field));
+
+	req.fields = &field;
+	req.field_count = 1;
+	req.out_port = TOPAZ_TQE_LHOST_PORT;
+	req.out_node = 0;
+	req.tid = 0;
+
+	field.ctrl.data.enable = TOPAZ_DPI_ENABLE;
+	field.ctrl.data.anchor = TOPAZ_DPI_ANCHOR_UDP;
+	field.ctrl.data.cmp_op = TOPAZ_DPI_CMPOP_EQ;
+	field.ctrl.data.offset = 0;     /* src_port/dst_port */
+	field.val = (DHCPCLIENT_PORT << 16) | DHCPSERVER_PORT;
+	field.mask = 0xffffffff;
+
+	topaz_dpi_filter_add(port_num, &req);
+}
+
+#ifdef CONFIG_IPV6
+static void __init topaz_dpi_filter_dhcpv6(int port_num)
+{
+	struct topaz_dpi_filter_request req;
+	struct topaz_dpi_field_def field[2];
+
+	/* IPv6 && UDP && srcport == 547 && dstport == 546 */
+
+	memset(&req, 0, sizeof(req));
+	memset(field, 0, sizeof(field));
+
+	req.fields = field;
+	req.field_count = 2;
+	req.out_port = TOPAZ_TQE_LHOST_PORT;
+
+	field[0].ctrl.data.enable = TOPAZ_DPI_ENABLE;
+	field[0].ctrl.data.anchor = TOPAZ_DPI_ANCHOR_IPV6;
+	field[0].ctrl.data.cmp_op = TOPAZ_DPI_CMPOP_EQ;
+	field[0].ctrl.data.offset = 1;
+	field[0].val = (uint32_t)(IPPROTO_UDP << 8);
+	field[0].mask = (uint32_t)(0xff << 8);
+
+	field[1].ctrl.data.enable = TOPAZ_DPI_ENABLE;
+	field[1].ctrl.data.anchor = TOPAZ_DPI_ANCHOR_UDP;
+	field[1].ctrl.data.cmp_op = TOPAZ_DPI_CMPOP_EQ;
+	field[1].ctrl.data.offset = 0;
+	field[1].val = (DHCPV6SERVER_PORT << 16) | DHCPV6CLIENT_PORT;
+	field[1].mask = 0xffffffff;
+
+	topaz_dpi_filter_add(port_num, &req);
+}
+
+static void __init topaz_dpi_filter_icmpv6(int port_num)
+{
+	struct topaz_dpi_filter_request req;
+	struct topaz_dpi_field_def field;
+
+        /* IPv6 && ICMPv6 */
+
+	memset(&req, 0, sizeof(req));
+	memset(&field, 0, sizeof(field));
+
+	req.fields = &field;
+	req.field_count = 1;
+	req.out_port = TOPAZ_TQE_LHOST_PORT;
+
+	field.ctrl.data.enable = TOPAZ_DPI_ENABLE;
+	field.ctrl.data.anchor = TOPAZ_DPI_ANCHOR_IPV6;
+	field.ctrl.data.cmp_op = TOPAZ_DPI_CMPOP_EQ;
+	field.ctrl.data.offset = 1;
+	field.val = (uint32_t)(IPPROTO_ICMPV6 << 8);
+	field.mask = (uint32_t)(0xff << 8);
+
+	topaz_dpi_filter_add(port_num, &req);
+}
+#endif /* CONFIG_IPV6 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static const struct file_operations topaz_emac_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= topaz_emac_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+#endif
+
+
+static struct net_device * __init topaz_emac_init(int port_num)
+{
+	const struct emac_port_info * const port = &iflist[port_num];
+	struct topaz_emac_priv *priv = NULL;
+	struct emac_common *privc = NULL;
+	struct net_device *dev = NULL;
+	int rc;
+	int emac_cfg;
+	int emac_phy;
+	char devname[IFNAMSIZ + 1];
+
+	printk(KERN_INFO "%s, emac%d\n", __FUNCTION__, port_num);
+
+	if (emac_lib_board_cfg(port_num, &emac_cfg, &emac_phy)) {
+		return NULL;
+	}
+
+	if ((emac_cfg & EMAC_IN_USE) == 0) {
+		return NULL;
+	}
+
+	/* Allocate device structure */
+	sprintf(devname, "eth%d_emac%d", soc_id(), port_num);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dev = alloc_netdev(sizeof(struct topaz_emac_priv), devname, NET_NAME_UNKNOWN, ether_setup);
+#else
+	dev = alloc_netdev(sizeof(struct topaz_emac_priv), devname, ether_setup);
+#endif
+	if (!dev) {
+		printk(KERN_ERR "%s: alloc_netdev failed\n", __FUNCTION__);
+		return NULL;
+	}
+
+	/* Initialize device structure fields */
+	dev->netdev_ops = &topaz_emac_ndo;
+	dev->tx_queue_len = 8;
+	dev->irq = port->irq;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dev->ethtool_ops = &emac_lib_ethtool_ops;
+#else
+	SET_ETHTOOL_OPS(dev, &emac_lib_ethtool_ops);
+#endif
+	topaz_emac_set_eth_addr(dev, port_num);
+
+	/* Initialize private data */
+	priv = netdev_priv(dev);
+	memset(priv, 0, sizeof(*priv));
+	priv->tqe_port = port->tqe_port;
+	privc = &priv->com;
+	privc->dev = dev;
+	privc->mac_id = port_num;
+	privc->vbase = port->base_addr;
+	privc->mdio_vbase = port->mdio_base_addr;
+	privc->emac_cfg = emac_cfg;
+	privc->phy_addr = emac_phy;
+
+	/* Map the TQE port to the device port */
+	dev->if_port = port->tqe_port;
+	/* Initialize MII */
+	if (emac_lib_mii_init(dev)) {
+		goto mii_init_error;
+	}
+
+	/* Allocate descs & buffers */
+	if (emac_lib_descs_alloc(dev,
+				QTN_BUFS_EMAC_RX_RING, EMAC_DESC_USE_SRAM,
+				QTN_BUFS_EMAC_TX_RING, EMAC_DESC_USE_SRAM)) {
+		goto descs_alloc_error;
+	}
+	if (topaz_emac_descs_init(dev)) {
+		goto bufs_alloc_error;
+	}
+
+	/* Register device */
+	if ((rc = register_netdev(dev)) != 0) {
+		printk(KERN_ERR "%s: register_netdev returns %d\n", __FUNCTION__, rc);
+		goto netdev_register_error;
+	}
+
+	BUG_ON(priv->tqe_port != port_num);
+
+	if (switch_alloc_vlan_dev(port_num, EMAC_VDEV_IDX(port_num), dev->ifindex) == NULL) {
+		printk(KERN_ERR "%s: switch_alloc_vlan_dev returns error\n", __FUNCTION__);
+		goto tqe_register_error;
+	}
+
+	if (tqe_port_add_handler(port->tqe_port, &topaz_emac_tqe_rx_handler, dev)) {
+		printk(KERN_ERR "%s: topaz_port_add_handler returns error\n", __FUNCTION__);
+		goto switch_vlan_alloc_error;
+	}
+
+	/* Send EMAC through soft reset */
+	emac_wr(privc, EMAC_DMA_CONFIG, DmaSoftReset);
+	udelay(1000);
+	emac_wr(privc, EMAC_DMA_CONFIG, 0);
+
+	topaz_ipprt_clear_all_entries(port_num);
+	topaz_ipprt_set(port_num, IPPROTO_IGMP, TOPAZ_TQE_LHOST_PORT, 0);
+#if defined(TOPAZ_CONGE_CONFIG)
+	topaz_ipprt_set(port_num, IPPROTO_ICMP, TOPAZ_TQE_LHOST_PORT, 0);
+	topaz_ipprt_set(port_num, IPPROTO_TCP, TOPAZ_TQE_LHOST_PORT, 0);
+#endif
+	topaz_dpi_init(port_num);
+	topaz_dpi_filter_arp(port_num);
+	topaz_dpi_filter_dscp_vi(port_num);
+	topaz_dpi_filter_dhcp(port_num);
+#ifdef CONFIG_IPV6
+	topaz_dpi_filter_dhcpv6(port_num);
+	topaz_dpi_filter_icmpv6(port_num);
+#endif
+	emac_lib_init_dma(privc);
+	emac_lib_init_mac(dev);
+	topaz_emac_init_rxptxp(dev);
+
+	topaz_emac_ndo_stop(dev);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	proc_create_data(port->proc_name, 0, NULL, &topaz_emac_proc_fops, dev);
+#else
+	create_proc_read_entry(port->proc_name, 0, NULL, topaz_emac_proc_rd, dev);
+#endif
+	emac_lib_phy_power_create_proc(dev);
+	emac_lib_mdio_sysfs_create(dev);
+	topaz_emac_vlan_sel_sysfs_create(dev);
+	emac_lib_phy_reg_create_proc(dev);
+
+	if (bonding) {
+		fwt_sw_register_port_remapper(port->tqe_port, topaz_emac_fwt_sw_remap_port);
+		tqe_port_set_group(port->tqe_port, EMAC_BONDING_GROUP);
+		/* Don't multicast to both ports if they are bonded */
+		if (port->tqe_port == TOPAZ_TQE_EMAC_0_PORT)
+			tqe_port_register(port->tqe_port);
+	} else {
+		tqe_port_register(port->tqe_port);
+	}
+
+	/* Create sysfs for dscp misc operations */
+
+	topaz_emac_dscp_show_sysfs_create(dev);
+	topaz_emac_dscp_prio_sel_sysfs_create(dev);
+	topaz_emac_dscp_prio_val_sysfs_create(dev);
+	topaz_emac_dscp_update_sysfs_create(dev);
+	topaz_emacs_update_sysfs_create(dev);
+
+	topaz_emacs_wbsp_ctrl_sysfs_create(dev);
+
+	return dev;
+
+switch_vlan_alloc_error:
+	switch_free_vlan_dev_by_idx(EMAC_VDEV_IDX(port_num));
+tqe_register_error:
+	unregister_netdev(dev);
+netdev_register_error:
+	emac_bufs_free(dev);
+bufs_alloc_error:
+	emac_lib_descs_free(dev);
+descs_alloc_error:
+	emac_lib_mii_exit(dev);
+mii_init_error:
+	free_netdev(dev);
+
+	return NULL;
+}
+
+static void __exit topaz_emac_exit(struct net_device *dev)
+{
+	struct topaz_emac_priv *priv = netdev_priv(dev);
+	struct emac_common *privc = &priv->com;
+	const struct emac_port_info * const port = &iflist[privc->mac_id];
+
+	topaz_emac_ndo_stop(dev);
+
+	emac_lib_phy_reg_remove_proc(dev);
+	topaz_emac_vlan_sel_sysfs_remove(dev);
+	emac_lib_mdio_sysfs_remove(dev);
+	emac_lib_phy_power_remove_proc(dev);
+	remove_proc_entry(port->proc_name, NULL);
+
+	tqe_port_unregister(priv->tqe_port);
+	tqe_port_remove_handler(priv->tqe_port);
+	switch_free_vlan_dev_by_idx(EMAC_VDEV_IDX(privc->mac_id));
+	unregister_netdev(dev);
+
+	emac_bufs_free(dev);
+	emac_lib_descs_free(dev);
+	emac_lib_mii_exit(dev);
+
+	free_netdev(dev);
+}
+
+static void __init topaz_emac_init_tqe(void)
+{
+	uint32_t tdes = TxDescIntOnComplete | TxDescFirstSeg | TxDescLastSeg | TxDescChain2ndAddr;
+	uint32_t ctrl = 0;
+
+	ctrl |= SM(tdes >> TOPAZ_TQE_EMAC_TDES_1_CNTL_SHIFT, TOPAZ_TQE_EMAC_TDES_1_CNTL_VAL);
+	ctrl |= SM(1, TOPAZ_TQE_EMAC_TDES_1_CNTL_MCAST_APPEND_CNTR_EN);
+
+	writel(ctrl, TOPAZ_TQE_EMAC_TDES_1_CNTL);
+}
+
+static int topaz_emac_find_emac(const struct net_device *const dev)
+{
+	int idx = 0;
+
+	while (idx < ARRAY_SIZE(iflist)) {
+		if (topaz_emacs[idx] == dev)
+			return idx;
+		++idx;
+	}
+
+	return -1;
+}
+
+static int topaz_emacs_connected_num(void)
+{
+	int idx = 0;
+	int in_use = 0;
+
+	while (idx < ARRAY_SIZE(iflist)) {
+		if (topaz_emac_on[idx])
+			++in_use;
+		++idx;
+	}
+
+	return in_use;
+}
+
+#define TOPAZ_EMAC_LINK_CHECK_PERIOD	5
+static int topaz_emac_link_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+	unsigned int two_emacs_connected;
+	struct net_device *dev = ptr;
+	int emac_idx;
+
+	if (event != NETDEV_CHANGE) {
+		return NOTIFY_DONE;
+	}
+
+	emac_idx = topaz_emac_find_emac(dev);
+	if (emac_idx < 0)
+		return NOTIFY_DONE;
+
+	topaz_emac_on[emac_idx] = netif_carrier_ok(dev);
+
+	two_emacs_connected = (topaz_emacs_connected_num() > 1);
+	if (topaz_emac_prev_two_connected != two_emacs_connected) {
+		pm_flush_work(&topaz_emac_dual_emac_work);
+		topaz_emac_prev_two_connected = two_emacs_connected;
+		pm_queue_work(&topaz_emac_dual_emac_work, HZ * TOPAZ_EMAC_LINK_CHECK_PERIOD);
+	}
+
+	return NOTIFY_DONE;
+}
+
+void topaz_emac_work_fn(struct work_struct *work)
+{
+	emac_lib_update_link_vars(topaz_emac_prev_two_connected);
+}
+
+static struct notifier_block topaz_link_notifier = {
+	.notifier_call = topaz_emac_link_event,
+};
+
+static int __init topaz_emac_module_init(void)
+{
+	int i;
+	int found = 0;
+	int emac_cfg_p0, emac_cfg_p1;
+	int emac_phy;
+
+	printk("emac wbsp: %d\n", emac_wbsp_ctrl);
+
+	if (!TOPAZ_HBM_SKB_ALLOCATOR_DEFAULT) {
+		printk(KERN_ERR "%s: switch_emac should be used with topaz hbm skb allocator only\n", __FUNCTION__);
+	}
+#ifdef TOPAZ_EMAC_NULL_BUF_WR
+	if (request_irq(TOPAZ_IRQ_HBM, &topaz_hbm_handler, 0, "hbm", topaz_emacs)) {
+		printk(KERN_ERR "Fail to request IRQ %d\n", TOPAZ_IRQ_HBM);
+		return -ENODEV;
+	}
+	topaz_hbm_emac_rx_pool_intr_init();
+	topaz_hbm_emac_rx_pool_uf_intr_en();
+#endif
+	emac_lib_board_cfg(0, &emac_cfg_p0, &emac_phy);
+	emac_lib_board_cfg(1, &emac_cfg_p1, &emac_phy);
+
+	if (_read_hardware_revision() >= HARDWARE_REVISION_TOPAZ_A2) {
+		topaz_tqe_emac_reflect_to(TOPAZ_TQE_LHOST_PORT, bonding);
+		printk("enable A2 %s\n", bonding ? "(bonded)":"(single)");
+	}
+
+	/* We only use rtl switch as PHY, do not do reset which will restore
+	 * to switch mode again. Only do so when using rtl ethernet tranceiver.
+	 */
+	if (!emac_lib_rtl_switch(emac_cfg_p0 | emac_cfg_p1)) {
+		/* Reset ext PHY. This is for bug#11906 */
+		emac_lib_enable(1);
+	}
+
+	topaz_emac_init_tqe();
+	topaz_vlan_clear_all_entries();
+
+	for (i = 0; i < ARRAY_SIZE(iflist); i++) {
+		topaz_emacs[i] = topaz_emac_init(i);
+		if (topaz_emacs[i]) {
+			topaz_emac_on[i] = netif_carrier_ok(topaz_emacs[i]);
+			found++;
+		}
+	}
+
+	if (!found) {
+#ifdef TOPAZ_EMAC_NULL_BUF_WR
+		free_irq(TOPAZ_IRQ_HBM, topaz_emacs);
+#endif
+		return -ENODEV;
+	} else {
+		if (found > 1) {
+			INIT_DELAYED_WORK(&topaz_emac_dual_emac_work, topaz_emac_work_fn);
+			register_netdevice_notifier(&topaz_link_notifier);
+		}
+		emac_lib_pm_save_add_notifier();
+	}
+
+	return 0;
+}
+
+static void __exit topaz_emac_module_exit(void)
+{
+	int i;
+	int found = 0;
+
+	for (i = 0; i < ARRAY_SIZE(iflist); i++) {
+		if (topaz_emacs[i]) {
+			topaz_emac_exit(topaz_emacs[i]);
+			++found;
+		}
+	}
+
+	if (found) {
+		emac_lib_pm_save_remove_notifier();
+	}
+
+	if (found > 1) {
+		unregister_netdevice_notifier(&topaz_link_notifier);
+		pm_flush_work(&topaz_emac_dual_emac_work);
+	}
+#ifdef TOPAZ_EMAC_NULL_BUF_WR
+	free_irq(TOPAZ_IRQ_HBM, topaz_emacs);
+#endif
+}
+
+module_init(topaz_emac_module_init);
+module_exit(topaz_emac_module_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/qtn/topaz/switch_tqe.c b/drivers/qtn/topaz/switch_tqe.c
new file mode 100644
index 0000000..1e09715
--- /dev/null
+++ b/drivers/qtn/topaz/switch_tqe.c
@@ -0,0 +1,1691 @@
+/**
+ * Copyright (c) 2011-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+#include <asm/system.h>
+#endif
+
+#include <qtn/dmautil.h>
+#include <drivers/ruby/dma_cache_ops.h>
+
+#include "topaz_test.h"
+#include <qtn/topaz_fwt_sw.h>
+#include <qtn/topaz_fwt_db.h>
+#include <qtn/topaz_tqe_cpuif.h>
+#include <qtn/topaz_tqe.h>
+#include <qtn/topaz_hbm_cpuif.h>
+#include <qtn/topaz_hbm.h>
+#include <qtn/topaz_fwt.h>
+#include <qtn/topaz_vlan_cpuif.h>
+#include "net80211/ieee80211.h"
+#include "net80211/if_ethersubr.h"
+#include <qtn/qtn_net_packet.h>
+#include <qtn/qdrv_sch.h>
+#include <qtn/topaz_congest_queue.h>
+#include <qtn/qtn_wowlan.h>
+#include <qtn/iputil.h>
+#include <qtn/mproc_sync.h>
+#include <qtn/qtn_vlan.h>
+#include "linux/net/bridge/br_public.h"
+
+int g_dscp_flag = 0;
+int g_dscp_value[2];
+uint16_t g_wowlan_host_state = 0;
+uint16_t g_wowlan_match_type = 0;
+uint16_t g_wowlan_l2_ether_type = 0x0842;
+uint16_t g_wowlan_l3_udp_port = 0xffff;
+uint8_t g_l2_ext_filter = 0;
+uint8_t g_l2_ext_filter_port = TOPAZ_TQE_NUM_PORTS;
+EXPORT_SYMBOL(g_l2_ext_filter);
+EXPORT_SYMBOL(g_l2_ext_filter_port);
+EXPORT_SYMBOL(g_wowlan_host_state);
+EXPORT_SYMBOL(g_wowlan_match_type);
+EXPORT_SYMBOL(g_wowlan_l2_ether_type);
+EXPORT_SYMBOL(g_wowlan_l3_udp_port);
+EXPORT_SYMBOL(g_dscp_flag);
+EXPORT_SYMBOL(g_dscp_value);
+
+int tqe_sem_en = 0;
+module_param(tqe_sem_en, int, 0644);
+
+struct tqe_netdev_priv {
+	struct napi_struct napi;
+	struct net_device_stats stats;
+
+	struct topaz_congest_queue *congest_queue;
+
+	ALIGNED_DMA_DESC(union, topaz_tqe_cpuif_descr) rx;
+};
+
+static tqe_fwt_get_mcast_hook g_tqe_fwt_get_mcast_hook = NULL;
+static tqe_fwt_get_mcast_ff_hook g_tqe_fwt_get_mcast_ff_hook = NULL;
+static tqe_fwt_get_ucast_hook g_tqe_fwt_get_ucast_hook = NULL;
+static tqe_fwt_false_miss_hook g_tqe_fwt_false_miss_hook = NULL;
+static tqe_fwt_get_from_mac_hook g_tqe_fwt_get_from_mac_hook = NULL;
+static tqe_fwt_add_from_mac_hook g_tqe_fwt_add_from_mac_hook = NULL;
+static tqe_fwt_del_from_mac_hook g_tqe_fwt_del_from_mac_hook = NULL;
+
+static tqe_mac_reserved_hook g_tqe_mac_reserved_hook = NULL;
+static inline void __sram_text tqe_rx_pkt_drop(const union topaz_tqe_cpuif_descr *desc);
+
+static struct {
+	tqe_port_handler handler;
+	void *token;
+	int32_t group;
+} tqe_port_handlers[TOPAZ_TQE_NUM_PORTS];
+
+int tqe_port_add_handler(enum topaz_tqe_port port, tqe_port_handler handler, void *token)
+{
+	if (port >= TOPAZ_TQE_NUM_PORTS || !handler) {
+		return -EINVAL;
+	}
+
+	tqe_port_handlers[port].handler = handler;
+	tqe_port_handlers[port].token = token;
+
+	return 0;
+}
+EXPORT_SYMBOL(tqe_port_add_handler);
+
+void tqe_port_remove_handler(enum topaz_tqe_port port)
+{
+	if (port >= TOPAZ_TQE_NUM_PORTS || !tqe_port_handlers[port].handler) {
+		printk(KERN_ERR "%s: invalid port %u\n", __FUNCTION__, port);
+		return;
+	}
+
+	tqe_port_handlers[port].handler = NULL;
+	tqe_port_handlers[port].token = NULL;
+}
+EXPORT_SYMBOL(tqe_port_remove_handler);
+
+static void tqe_port_set(const enum topaz_tqe_port port, const uint8_t enable)
+{
+	struct topaz_fwt_sw_mcast_entry *mcast_ent;
+
+	if (!g_tqe_fwt_get_mcast_ff_hook) {
+		return;
+	}
+
+	mcast_ent = g_tqe_fwt_get_mcast_ff_hook();
+	if (unlikely(!mcast_ent)) {
+		return;
+	}
+	if (enable) {
+		topaz_fwt_sw_mcast_port_set(mcast_ent, port);
+	} else {
+		topaz_fwt_sw_mcast_port_clear(mcast_ent, port);
+	}
+	topaz_fwt_sw_mcast_flush(mcast_ent);
+}
+
+void tqe_port_set_group(const enum topaz_tqe_port port, int32_t group)
+{
+	if ((port < TOPAZ_TQE_NUM_PORTS) && (group > 0))
+		tqe_port_handlers[port].group = group;
+}
+EXPORT_SYMBOL(tqe_port_set_group);
+
+void tqe_port_clear_group(const enum topaz_tqe_port port)
+{
+	if (port < TOPAZ_TQE_NUM_PORTS)
+		tqe_port_handlers[port].group = 0;
+}
+EXPORT_SYMBOL(tqe_port_clear_group);
+
+void tqe_port_register(const enum topaz_tqe_port port)
+{
+	tqe_port_set(port, 1);
+}
+EXPORT_SYMBOL(tqe_port_register);
+
+void tqe_port_unregister(const enum topaz_tqe_port port)
+{
+	tqe_port_set(port, 0);
+}
+EXPORT_SYMBOL(tqe_port_unregister);
+
+static inline int tqe_port_same_group(enum topaz_tqe_port in_port, enum topaz_tqe_port out_port)
+{
+	if (tqe_port_handlers[in_port].group > 0 &&
+			tqe_port_handlers[in_port].group == tqe_port_handlers[out_port].group)
+		return 1;
+
+	return 0;
+}
+
+struct update_multicast_tx_stats {
+	void (*fn)(void *ctx, uint8_t node);
+	void *ctx;
+};
+
+struct update_multicast_tx_stats update_multicast;
+
+void tqe_reg_multicast_tx_stats(void (*fn)(void *ctx, uint8_t), void *ctx)
+{
+	update_multicast.fn = fn;
+	update_multicast.ctx = ctx;
+}
+EXPORT_SYMBOL(tqe_reg_multicast_tx_stats);
+
+#if defined(CONFIG_ARCH_TOPAZ_SWITCH_TEST) || defined(CONFIG_ARCH_TOPAZ_SWITCH_TEST_MODULE)
+static void topaz_tqe_test_ctrl(const uint8_t *buff_virt_rx)
+{
+	const uint8_t ctrl_dstmac[ETH_ALEN] = TOPAZ_TEST_CTRL_DSTMAC;
+	const uint8_t ctrl_srcmac[ETH_ALEN] = TOPAZ_TEST_CTRL_SRCMAC;
+
+	if (memcmp(&buff_virt_rx[ETH_ALEN * 0], ctrl_dstmac, ETH_ALEN) == 0 &&
+		memcmp(&buff_virt_rx[ETH_ALEN * 1], ctrl_srcmac, ETH_ALEN) == 0) {
+
+		const char *test_str = (const char *)&buff_virt_rx[128];
+		unsigned long len;
+		char *cmd = NULL;
+		char **words = NULL;
+		int rc;
+		int word_count;
+		int (*parse)(int, char**) = NULL;
+
+		len = strlen(test_str);
+		cmd = kmalloc(len + 1, GFP_KERNEL);
+		words = kmalloc(len * sizeof(char *) / 2, GFP_KERNEL);
+		if (!cmd || !words) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		strcpy(cmd, test_str);
+		word_count = topaz_test_split_words(words, cmd);
+
+		if (strcmp(words[0], "dpi_test") == 0) {
+			parse = &topaz_dpi_test_parse;
+		} else if (strcmp(words[0], "fwt_test") == 0) {
+			parse = &topaz_fwt_test_parse;
+		} else if (strcmp(words[0], "ipprt_emac0") == 0) {
+			parse = &topaz_ipprt_emac0_test_parse;
+		} else if (strcmp(words[0], "ipprt_emac1") == 0) {
+			parse = &topaz_ipprt_emac1_test_parse;
+		} else if (strcmp(words[0], "vlan_test") == 0) {
+			parse = &topaz_vlan_test_parse;
+		} else {
+			printk("%s: invalid ctrl packet\n", __FUNCTION__);
+		}
+
+		if (parse) {
+			rc = parse(word_count - 1, words + 1);
+			printk("%s: rc %d '%s'\n", __FUNCTION__, rc, test_str);
+		}
+out:
+		if (cmd)
+			kfree(cmd);
+		if (words)
+			kfree(words);
+	}
+}
+#endif
+
+uint32_t
+switch_tqe_multi_proc_sem_down(char * funcname, int linenum)
+{
+#ifdef CONFIG_TOPAZ_PCIE_TARGET
+	uint32_t prtcnt;
+
+	if (tqe_sem_en == 0)
+		return 1;
+
+	prtcnt = 0;
+	while (_qtn_mproc_3way_tqe_sem_down(TOPAZ_MPROC_TQE_SEM_LHOST) == 0) {
+		if ((prtcnt & 0xff) == 0)
+			printk("%s line %d fail to get tqe semaphore\n", funcname, linenum);
+		prtcnt++;
+	}
+#endif
+	return 1;
+}
+
+EXPORT_SYMBOL(switch_tqe_multi_proc_sem_down);
+
+uint32_t
+switch_tqe_multi_proc_sem_up(void)
+{
+#ifdef CONFIG_TOPAZ_PCIE_TARGET
+	if (tqe_sem_en == 0)
+		return 1;
+
+	if (_qtn_mproc_3way_tqe_sem_up(TOPAZ_MPROC_TQE_SEM_LHOST)) {
+		return 1;
+	} else {
+		WARN_ONCE(1, "%s failed to relese HW semaphore\n", __func__);
+		return 0;
+	}
+#else
+	return 1;
+#endif
+}
+
+EXPORT_SYMBOL(switch_tqe_multi_proc_sem_up);
+
+static void tqe_buf_set_refcounts(void *buf_start, int32_t enqueue, int32_t free)
+{
+	uint32_t *p = buf_start;
+	uint32_t *_m = topaz_hbm_buf_get_meta(p);
+	uint32_t *enqueuep = _m - HBM_HR_OFFSET_ENQ_CNT;
+	uint32_t *freep = _m - HBM_HR_OFFSET_FREE_CNT;
+
+	if (enqueue >= 0)
+		arc_write_uncached_32(enqueuep, enqueue);
+	if (free >= 0)
+		arc_write_uncached_32(freep, free);
+}
+
+int topaz_tqe_xmit(union topaz_tqe_cpuif_ppctl *pp_cntl)
+{
+	int num = topaz_tqe_cpuif_port_to_num(TOPAZ_TQE_LOCAL_CPU);
+
+	topaz_tqe_wait();
+	switch_tqe_multi_proc_sem_down("topaz_tqe_xmit",__LINE__);
+	topaz_tqe_cpuif_tx_start(pp_cntl);
+	switch_tqe_multi_proc_sem_up();
+
+	wmb();
+
+	topaz_tqe_wait();
+	if ((qtn_mproc_sync_mem_read(TOPAZ_TQE_CPUIF_TXSTART(num)) &
+			TOPAZ_TQE_CPUIF_TX_START_NOT_SUCCESS))
+		return NET_XMIT_CN;
+	else
+		return NET_XMIT_SUCCESS;
+
+}
+
+void topaz_tqe_congest_queue_process(const union topaz_tqe_cpuif_descr *desc,
+		void *queue, uint8_t node, uint8_t tqe_tid,
+		union topaz_tqe_cpuif_ppctl *ppctl, uint8_t is_unicast)
+{
+	struct topaz_congest_queue *congest_queue = (struct topaz_congest_queue *)queue;
+	struct topaz_congest_q_desc *q_desc;
+	int8_t re_sched = 0;
+	int8_t ret = 0;
+
+	if (topaz_queue_congested(congest_queue, node, tqe_tid)) {
+		q_desc = topaz_get_congest_queue(congest_queue, node, tqe_tid);
+		ret = topaz_congest_enqueue(q_desc, ppctl);
+		if (ret == NET_XMIT_CN) {
+			topaz_hbm_congest_queue_put_buf(ppctl);
+		}
+
+		re_sched = topaz_congest_queue_xmit(q_desc, TOPAZ_SOFTIRQ_BUDGET);
+		if (re_sched)
+			tasklet_schedule(&congest_queue->congest_tx);
+
+	} else {
+		ret = congest_queue->xmit_func(ppctl);
+
+		if (unlikely(ret != NET_XMIT_SUCCESS)) {
+			if (is_unicast)
+				q_desc = topaz_congest_alloc_unicast_queue(congest_queue,
+										node, tqe_tid);
+			else
+				q_desc = topaz_congest_alloc_queue(congest_queue, node, tqe_tid);
+
+			if (!q_desc) {
+				topaz_hbm_congest_queue_put_buf(ppctl);
+			} else {
+				ret = topaz_congest_enqueue(q_desc, ppctl);
+
+				if (ret == NET_XMIT_CN) {
+					topaz_hbm_congest_queue_put_buf(ppctl);
+				} else {
+					tasklet_schedule(&congest_queue->congest_tx);
+				}
+			}
+		}
+	}
+}
+
+static inline struct qtn_vlan_dev *tqe_get_vlandev(uint8_t port, uint8_t node)
+{
+	if (qtn_vlan_port_indexable(port))
+		return vport_tbl_lhost[port];
+	else
+		return switch_vlan_dev_from_node(node);
+}
+
+/*
+ * Push a packet to the TQE
+ */
+static void __sram_text tqe_push_mcast(const void *token1, void *token2,
+					uint8_t port, uint8_t node, uint8_t tid)
+{
+	const union topaz_tqe_cpuif_descr *desc = token1;
+	union topaz_tqe_cpuif_ppctl ppctl;
+	const uint8_t portal = 1;
+	const uint16_t misc_user = 0;
+	void *queue = token2;
+	uint8_t tqe_free = queue ? 0 : 1;
+	struct qtn_vlan_dev *vdev;
+
+	if (vlan_enabled) {
+		vdev = tqe_get_vlandev(port, node);
+		if (!qtn_vlan_egress(vdev, node, bus_to_virt((uintptr_t)desc->data.pkt),
+				vdev->port == TOPAZ_TQE_WMAC_PORT, 1)) {
+			tqe_rx_pkt_drop(desc);
+			return;
+		}
+	}
+
+	topaz_tqe_cpuif_ppctl_init(&ppctl,
+			port, &node, 1, tid,
+			portal, 1, TOPAZ_HBM_EMAC_TX_DONE_POOL, tqe_free, misc_user);
+
+	ppctl.data.pkt = desc->data.pkt;
+	ppctl.data.length = desc->data.length;
+	ppctl.data.buff_ptr_offset = desc->data.buff_ptr_offset;
+
+	if (queue) {
+		topaz_tqe_congest_queue_process(desc, queue, node, tid, &ppctl, 0);
+	} else {
+		topaz_tqe_wait();
+		switch_tqe_multi_proc_sem_down("tqe_push_mcast",__LINE__);
+		topaz_tqe_cpuif_tx_start(&ppctl);
+		switch_tqe_multi_proc_sem_up();
+	}
+
+	if (port == TOPAZ_TQE_WMAC_PORT && update_multicast.fn)
+		update_multicast.fn(update_multicast.ctx, node);
+}
+
+static inline uint32_t tqe_mcast_enqueue_cnt(const struct topaz_fwt_sw_mcast_entry *const e,
+		uint8_t port_bitmap, const uint8_t in_port, const uint8_t in_node)
+{
+	uint32_t enqueues = 0;
+	uint8_t i;
+
+        /* Exclude input port. If WMAC, the port doesn't contribute, only nodes. */
+        port_bitmap &= ~((1 << in_port) | (1 << TOPAZ_TQE_WMAC_PORT));
+	for (i = 0; i < TOPAZ_TQE_NUM_PORTS; i++) {
+		if ((port_bitmap & BIT(i)) && !tqe_port_same_group(in_port, i))
+			enqueues++;
+	}
+
+        /* add wmac nodes */
+        for (i = 0; i < ARRAY_SIZE(e->node_bitmap) ; i++) {
+                enqueues += topaz_fwt_sw_count_bits(e->node_bitmap[i]);
+        }
+
+        /* must exclude the input node */
+        if (topaz_fwt_sw_mcast_node_is_set(e, in_port, in_node)) {
+                --enqueues;
+        }
+
+        return enqueues;
+}
+
+/*
+ * returns the number of TQE pushes; 0 means buffer is not consumed here
+ */
+static uint32_t __sram_text tqe_push_mc_ports(void *queue,
+		const struct topaz_fwt_sw_mcast_entry *mcast_ent_shared,
+		const union topaz_tqe_cpuif_descr *desc, uint8_t tid, uint8_t in_node,
+		uint32_t header_access_bytes)
+{
+	struct topaz_fwt_sw_mcast_entry mcast_ent;
+	enum topaz_tqe_port in_port = desc->data.in_port;
+	uint32_t push_count;
+	uint32_t pushes = 0;
+	uint8_t port = TOPAZ_TQE_FIRST_PORT;
+	void *buf_bus_rx = desc->data.pkt;
+	void *buf_virt_rx = bus_to_virt((unsigned long) buf_bus_rx);
+	const struct ether_header *eh = buf_virt_rx;
+	int offset = desc->data.buff_ptr_offset;
+
+	mcast_ent = *mcast_ent_shared;
+
+	/* The MuC handles snooped multicast directly */
+	if (in_port == TOPAZ_TQE_WMAC_PORT || in_port == TOPAZ_TQE_MUC_PORT) {
+		if (printk_ratelimit())
+			printk("%s: mcast pkt from mac t=%04x d=%pM s=%pM\n", __FUNCTION__,
+				eh->ether_type, eh->ether_dhost, eh->ether_shost);
+		return 0;
+	}
+
+	/* find the expected enqueue count and set the HBM buffer reference count */
+	push_count = tqe_mcast_enqueue_cnt(&mcast_ent, mcast_ent.port_bitmap,
+						in_port, in_node);
+	if (unlikely(!push_count)) {
+		return 0;
+	}
+
+	tqe_buf_set_refcounts((uint8_t *)buf_virt_rx + offset, push_count, 0);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dma_cache_wback_inv((unsigned long) buf_bus_rx, header_access_bytes);
+#else
+	flush_and_inv_dcache_range((unsigned long)buf_virt_rx, (unsigned long)(buf_virt_rx + header_access_bytes));
+#endif
+	/* push this packet to the tqe for each port/node */
+	while (mcast_ent.port_bitmap) {
+		if (mcast_ent.port_bitmap & 0x1) {
+			if (topaz_fwt_sw_mcast_port_has_nodes(port)) {
+				pushes += topaz_fwt_sw_mcast_do_per_node(tqe_push_mcast,
+							&mcast_ent, desc, queue, in_node, port, tid);
+			} else {
+				if (port != in_port && !tqe_port_same_group(in_port, port))  {
+					tqe_push_mcast(desc, NULL, port, 0, 0);
+					++pushes;
+				}
+			}
+		}
+		mcast_ent.port_bitmap >>= 1;
+		port++;
+	}
+
+	if (unlikely(pushes != push_count)) {
+		printk(KERN_CRIT "%s: pushes %u push_count %u, buffer leak imminent\n",
+				__FUNCTION__, pushes, push_count);
+	}
+
+	return push_count;
+}
+
+int __sram_text tqe_rx_get_node_id(const struct ether_header *eh)
+{
+	const struct fwt_db_entry *fwt_ent = NULL;
+
+	if (likely(g_tqe_fwt_get_ucast_hook)) {
+		fwt_ent = g_tqe_fwt_get_ucast_hook(eh->ether_shost, eh->ether_shost);
+		if (likely(fwt_ent) && fwt_ent->valid)
+			return fwt_ent->out_node;
+	}
+
+	return 0;
+}
+
+inline
+const uint16_t *
+tqe_rx_ether_type_skip_vlan(const struct ether_header *eh, uint32_t len)
+{
+	const uint16_t *ether_type = &eh->ether_type;
+
+	if (len < sizeof(struct ether_header)) {
+		return NULL;
+	}
+
+	while(qtn_ether_type_is_vlan(*ether_type)) {
+		if (len < sizeof(struct ether_header) + VLAN_HLEN) {
+			return NULL;
+		}
+		ether_type += VLAN_HLEN / sizeof(*ether_type);
+		len -= VLAN_HLEN;
+	}
+
+	return ether_type;
+}
+
+int __sram_text tqe_rx_multicast(void *congest_queue, const union topaz_tqe_cpuif_descr *desc)
+{
+	int timeout;
+	union topaz_fwt_lookup fwt_lu;
+	const struct topaz_fwt_sw_mcast_entry *mcast_ent = NULL;
+	const struct ether_header *eh = bus_to_virt((uintptr_t) desc->data.pkt);
+	const enum topaz_tqe_port in_port = desc->data.in_port;
+	const void *ipaddr = NULL;
+	uint8_t tid = 0;
+	const uint16_t *ether_type = tqe_rx_ether_type_skip_vlan(eh, desc->data.length);
+	const void *iphdr = NULL;
+	uint8_t vlan_index;
+	uint8_t in_node = 0;
+	uint32_t header_access_bytes = 0;
+	uint32_t ether_payload_length = 0;
+	uint8_t false_miss = 0;
+
+	if (unlikely(!ether_type)) {
+		printk(KERN_WARNING "%s: malformed packet in_port %u\n", __FUNCTION__, in_port);
+		return 0;
+	}
+
+	iphdr = ether_type + 1;
+	ether_payload_length = desc->data.length - ((char *)iphdr - (char *)eh);
+	header_access_bytes = iphdr - (const void *)eh;
+
+	/* FIXME: this won't work for 802.3 frames */
+	if (*ether_type == __constant_htons(ETH_P_IP)
+			&& iputil_mac_is_v4_multicast(eh->ether_dhost)
+			&& (ether_payload_length >= sizeof(struct qtn_ipv4))) {
+		const struct qtn_ipv4 *ipv4 = (const struct qtn_ipv4 *)iphdr;
+		/* do not accelerate IGMP */
+		if (ipv4->proto == QTN_IP_PROTO_IGMP) {
+			return 0;
+		}
+		ipaddr = &ipv4->dstip;
+
+		/* Option field doesn't take into account because they are not accessed */
+		header_access_bytes += sizeof (struct qtn_ipv4);
+	} else if (*ether_type == __constant_htons(ETH_P_IPV6)
+			&& iputil_mac_is_v6_multicast(eh->ether_dhost)
+			&& (ether_payload_length >= sizeof(struct qtn_ipv6))) {
+		const struct qtn_ipv6 *ipv6 = (const struct qtn_ipv6 *)iphdr;
+		ipaddr = &ipv6->dstip;
+
+		header_access_bytes += sizeof (struct qtn_ipv6);
+	}
+
+	if (ipaddr) {
+		topaz_tqe_vlan_gettid(bus_to_virt((uintptr_t)(desc->data.pkt)), &tid, &vlan_index);
+		fwt_lu = topaz_fwt_hw_lookup_wait_be(eh->ether_dhost, &timeout, &false_miss);
+		if (fwt_lu.data.valid && !timeout) {
+#ifndef TOPAZ_DISABLE_FWT_WAR
+			if (unlikely(false_miss && g_tqe_fwt_false_miss_hook))
+				g_tqe_fwt_false_miss_hook(fwt_lu.data.entry_addr, false_miss);
+#endif
+
+			if (g_tqe_fwt_get_mcast_hook)
+				mcast_ent = g_tqe_fwt_get_mcast_hook(fwt_lu.data.entry_addr,
+						ipaddr, *ether_type);
+
+			if (mcast_ent) {
+				if ((mcast_ent->flood_forward) && (in_port == TOPAZ_TQE_MUC_PORT)) {
+					in_node = tqe_rx_get_node_id(eh);
+					if (in_node == 0)
+						return 0;
+				}
+				return tqe_push_mc_ports(congest_queue, mcast_ent, desc, tid,
+								in_node, header_access_bytes);
+			}
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(tqe_rx_multicast);
+
+static inline void __sram_text tqe_rx_pkt_drop(const union topaz_tqe_cpuif_descr *desc)
+{
+	void *buf_virt_rx = bus_to_virt((unsigned long) desc->data.pkt);
+	uint16_t buflen = desc->data.length;
+	const int8_t dest_pool = topaz_hbm_payload_get_pool_bus(desc->data.pkt);
+	void *buf_bus = topaz_hbm_payload_store_align_bus(desc->data.pkt, dest_pool, 0);
+	unsigned long flags;
+
+	cache_op_before_rx(buf_virt_rx, buflen, 0);
+
+	local_irq_save(flags);
+	topaz_hbm_filter_txdone_buf(buf_bus);
+	local_irq_restore(flags);
+}
+
+void tqe_register_mac_reserved_cbk(tqe_mac_reserved_hook cbk_func)
+{
+	g_tqe_mac_reserved_hook = cbk_func;
+}
+EXPORT_SYMBOL(tqe_register_mac_reserved_cbk);
+
+void tqe_register_ucastfwt_cbk(tqe_fwt_get_ucast_hook cbk_func)
+{
+	g_tqe_fwt_get_ucast_hook = cbk_func;
+}
+EXPORT_SYMBOL(tqe_register_ucastfwt_cbk);
+
+void tqe_register_macfwt_cbk(tqe_fwt_get_from_mac_hook cbk_func,
+			tqe_fwt_add_from_mac_hook add_func, tqe_fwt_del_from_mac_hook del_func)
+{
+	 g_tqe_fwt_get_from_mac_hook = cbk_func;
+	 g_tqe_fwt_add_from_mac_hook = add_func;
+	 g_tqe_fwt_del_from_mac_hook = del_func;
+}
+EXPORT_SYMBOL(tqe_register_macfwt_cbk);
+
+static int topaz_swfwd_tqe_xmit(const fwt_db_entry *fwt_ent,
+				const union topaz_tqe_cpuif_descr *desc,
+				void *queue)
+{
+	uint8_t port;
+	uint8_t node;
+	uint16_t misc_user;
+	uint8_t tid = 0;
+	union topaz_tqe_cpuif_ppctl ctl;
+	uint8_t portal;
+	uint8_t vlan_index;
+	uint8_t tqe_free = queue ? 0 : 1;
+	struct qtn_vlan_dev *vdev;
+
+	if (fwt_ent->out_port == TOPAZ_TQE_LHOST_PORT)
+		return 0;
+
+	if (vlan_enabled) {
+		vdev = tqe_get_vlandev(fwt_ent->out_port, fwt_ent->out_node);
+		if (!qtn_vlan_egress(vdev, fwt_ent->out_node,
+				bus_to_virt((uintptr_t)desc->data.pkt),
+				vdev->port == TOPAZ_TQE_WMAC_PORT, 1)) {
+			tqe_rx_pkt_drop(desc);
+			return 1;
+		}
+	}
+
+	if (TOPAZ_TQE_PORT_IS_EMAC(desc->data.in_port)) {
+		if(g_dscp_flag){
+			tid = (g_dscp_value[desc->data.in_port] & 0xFF);
+		} else {
+			topaz_tqe_vlan_gettid(bus_to_virt((uintptr_t)(desc->data.pkt)), &tid, &vlan_index);
+		}
+	} else {
+		topaz_tqe_vlan_gettid(bus_to_virt((uintptr_t)(desc->data.pkt)), &tid, &vlan_index);
+	}
+
+	port = fwt_ent->out_port;
+	node = fwt_ent->out_node;
+	portal = fwt_ent->portal;
+	misc_user = 0;
+	topaz_tqe_cpuif_ppctl_init(&ctl,
+			port, &node, 1, tid,
+			portal, 1, 0, tqe_free, misc_user);
+
+	ctl.data.pkt = (void *)desc->data.pkt;
+	ctl.data.buff_ptr_offset = desc->data.buff_ptr_offset;
+	ctl.data.length = desc->data.length;
+	ctl.data.buff_pool_num = TOPAZ_HBM_EMAC_TX_DONE_POOL;
+
+	if (queue) {
+		topaz_tqe_congest_queue_process(desc, queue, node, tid, &ctl, 1);
+	} else {
+		while (topaz_tqe_cpuif_tx_nready());
+		switch_tqe_multi_proc_sem_down("topaz_swfwd_tqe_xmit",__LINE__);
+		topaz_tqe_cpuif_tx_start(&ctl);
+		switch_tqe_multi_proc_sem_up();
+	}
+
+	return 1;
+}
+
+#define DHCP_PORT_C2S	(__constant_htons(DHCPCLIENT_PORT) | \
+		__constant_htons(DHCPSERVER_PORT) << 16)
+#define DHCP_PORT_S2C	(__constant_htons(DHCPSERVER_PORT) | \
+		__constant_htons(DHCPCLIENT_PORT) << 16)
+static inline int topaz_is_dhcp(const struct iphdr *ipv4h)
+{
+	const struct udphdr *udph;
+	uint32_t srcdst;
+
+	if (ipv4h->protocol != IPPROTO_UDP)
+		return 0;
+
+	udph = (const struct udphdr *)((const uint8_t *)ipv4h + (ipv4h->ihl << 2));
+	srcdst = (udph->source | udph->dest << 16);
+
+	if (srcdst == DHCP_PORT_C2S || srcdst == DHCP_PORT_S2C)
+		return 1;
+
+        return 0;
+}
+
+#ifdef CONFIG_IPV6
+static inline int topaz_ipv6_not_accel(const struct ipv6hdr *ipv6h, int seg_len)
+{
+	uint8_t nexthdr;
+	const struct udphdr *udph;
+	const struct icmp6hdr *icmph;
+	int nhdr_off;
+
+	nhdr_off = iputil_v6_skip_exthdr(ipv6h, sizeof(struct ipv6hdr),
+			&nexthdr, seg_len, NULL, NULL);
+
+	if (nexthdr == IPPROTO_UDP) {
+		udph = (const struct udphdr *)((const uint8_t *)ipv6h + nhdr_off);
+		if (udph->source == __constant_htons(DHCPV6SERVER_PORT)
+				&& udph->dest == __constant_htons(DHCPV6CLIENT_PORT))
+			return 1;
+	} else if (nexthdr == IPPROTO_ICMPV6) {
+		icmph = (const struct icmp6hdr *)((const uint8_t *)ipv6h + nhdr_off);
+		if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION
+				|| icmph->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)
+			return 1;
+	}
+
+	return 0;
+}
+#endif
+
+struct tqe_unknown_dst_entry {
+	unsigned char dst_mac[ETH_ALEN];
+	unsigned long updated;
+	STAILQ_ENTRY(tqe_unknown_dst_entry) next;
+};
+
+typedef STAILQ_HEAD(, tqe_unknown_dst_entry) tqe_unknown_dst_entry_head;
+
+static int tqe_unknown_dst_entry_tot = 0;
+static int tqe_unknown_dst_entry_max = 32;
+module_param(tqe_unknown_dst_entry_max, int, 0644);
+static int tqe_unknown_dst_expiry = HZ;
+module_param(tqe_unknown_dst_expiry, int, 0644);
+
+static spinlock_t tqe_unknown_dst_lock;
+static tqe_unknown_dst_entry_head tqe_unknown_dst_entries;
+static struct timer_list tqe_unknown_dst_timer;
+
+static int tqe_unknown_dst_entry_add(const unsigned char *mac)
+{
+	struct tqe_unknown_dst_entry *entry;
+
+	if (tqe_unknown_dst_entry_tot >= tqe_unknown_dst_entry_max)
+		return -EBUSY;
+
+	STAILQ_FOREACH(entry, &tqe_unknown_dst_entries, next) {
+		if (memcmp(entry->dst_mac, mac, ETH_ALEN) == 0) {
+			entry->updated = jiffies;
+			return 0;
+		}
+	}
+
+	entry = kmalloc(sizeof(struct tqe_unknown_dst_entry), GFP_ATOMIC);
+	if (entry == NULL)
+		return -ENOMEM;
+
+	memcpy(entry->dst_mac, mac, ETH_ALEN);
+	entry->updated = jiffies;
+	STAILQ_INSERT_TAIL(&tqe_unknown_dst_entries, entry, next);
+
+	tqe_unknown_dst_entry_tot++;
+
+	if (tqe_unknown_dst_entry_tot == 1)
+		mod_timer(&tqe_unknown_dst_timer, jiffies + tqe_unknown_dst_expiry);
+
+	return 0;
+}
+
+static int tqe_unknown_dst_entry_del(struct tqe_unknown_dst_entry *entry)
+{
+	if (entry == NULL)
+		return -EINVAL;
+
+	KASSERT(tqe_unknown_dst_entry_tot > 0, ("should not be 0"));
+
+	STAILQ_REMOVE(&tqe_unknown_dst_entries, entry, tqe_unknown_dst_entry, next);
+	kfree(entry);
+
+	tqe_unknown_dst_entry_tot--;
+
+	return 0;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+/* RCU lock must be held */
+static struct net_bridge *tqe_find_bridge(const struct net_device *ndev)
+{
+	struct net_bridge *br = NULL;
+
+	if ((ndev->flags & IFF_SLAVE) && ndev->master)
+		ndev = ndev->master;
+
+	if (rcu_dereference(ndev->br_port) != NULL)
+		br = ndev->br_port->br;
+
+	return br;
+}
+#endif
+
+static int tqe_unknown_dst_local_find(const struct net_device *dev, unsigned char *mac)
+{
+	struct net_bridge_fdb_entry *fdb;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	struct net_bridge_port *port;
+#else
+	struct net_bridge *br;
+#endif
+	int is_local = 0;
+
+	if ((br_fdb_get_hook == NULL) || (br_fdb_put_hook == NULL))
+		return 0;
+
+	if (dev == NULL)
+		return 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	port = get_br_port(dev);
+
+	if (port == NULL)
+		return 0;
+
+	fdb = br_fdb_get_hook(port->br, NULL, mac);
+	if (fdb == NULL)
+		return 0;
+#else
+	rcu_read_lock();
+
+	br = tqe_find_bridge(dev);
+	if (!br)
+		goto out;
+
+	fdb = br_fdb_get_hook(br, NULL, mac);
+	if (fdb == NULL)
+		goto out;
+#endif
+
+	is_local = fdb->is_local;
+
+	br_fdb_put_hook(fdb);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+out:
+	rcu_read_unlock();
+#endif
+	return is_local;
+}
+
+static void tqe_unknown_dst_timer_func(unsigned long data)
+{
+	struct tqe_unknown_dst_entry *cur_entry;
+	struct tqe_unknown_dst_entry *tmp_entry;
+
+	const struct fwt_db_entry *fwt_entry;
+
+	if ((g_tqe_fwt_get_from_mac_hook == NULL) || (g_tqe_fwt_del_from_mac_hook == NULL))
+		return;
+
+	spin_lock(&tqe_unknown_dst_lock);
+
+	STAILQ_FOREACH_SAFE(cur_entry, &tqe_unknown_dst_entries, next, tmp_entry) {
+		if (time_before(jiffies, cur_entry->updated + tqe_unknown_dst_expiry))
+			continue;
+
+		tqe_unknown_dst_entry_del(cur_entry);
+
+		fwt_entry = g_tqe_fwt_get_from_mac_hook(cur_entry->dst_mac);
+		/*
+		 * keep the "drop" FWT entry if it has been updated
+		 * with correct destination port
+		 */
+		if ((fwt_entry != NULL) && (fwt_entry->out_port == TOPAZ_TQE_DROP_PORT))
+			g_tqe_fwt_del_from_mac_hook(cur_entry->dst_mac);
+	}
+
+	if (tqe_unknown_dst_entry_tot > 0)
+		mod_timer(&tqe_unknown_dst_timer, jiffies + tqe_unknown_dst_expiry);
+
+	spin_unlock(&tqe_unknown_dst_lock);
+}
+
+static void tqe_unknown_dst_entry_init(void)
+{
+	STAILQ_INIT(&tqe_unknown_dst_entries);
+
+	spin_lock_init(&tqe_unknown_dst_lock);
+
+	init_timer(&tqe_unknown_dst_timer);
+
+	tqe_unknown_dst_timer.function = tqe_unknown_dst_timer_func;
+}
+
+static int topaz_tx_unknown_unicast(const union topaz_tqe_cpuif_descr *desc,
+			const struct fwt_db_entry *fwt_ent)
+{
+	const struct fwt_db_entry *fwt_dst;
+
+	struct ether_header *eth;
+	struct net_device *dev;
+
+	int ret;
+
+	if (tqe_unknown_dst_entry_max == 0)
+		return 0;
+
+	if ((fwt_ent != NULL) && (fwt_ent->out_port == TOPAZ_TQE_DROP_PORT)) {
+		/*
+		 * Few packets may still be pushed to LHost before the "drop"
+		 * FWT entry takes effect
+		 */
+		tqe_rx_pkt_drop(desc);
+
+		return 1;
+	}
+
+	dev = (struct net_device *)tqe_port_handlers[desc->data.in_port].token;
+	eth = bus_to_virt((uintptr_t)desc->data.pkt);
+	/*
+	 * Local addresses of Linux bridge don't have corresponding hardware FWT entries,
+	 * hence they are always "unknown"
+	 */
+	if (tqe_unknown_dst_local_find(dev, eth->ether_dhost))
+		return 0;
+
+	if ((g_tqe_fwt_get_from_mac_hook == NULL) || (g_tqe_fwt_add_from_mac_hook == NULL))
+		return 0;
+	/*
+	 * TODO fwt_ent is NULL in two cases:
+	 *	src MAC address is not found in FWT
+	 *	dst MAC address is not found in FWT
+	 */
+	fwt_dst = g_tqe_fwt_get_from_mac_hook(eth->ether_dhost);
+	/*
+	 * dst MAC address is found in FWT, but src MAC address is not: pass up for
+	 * src MAC learning
+	 */
+	if (fwt_dst != NULL)
+		return 0;
+
+	spin_lock(&tqe_unknown_dst_lock);
+	ret = tqe_unknown_dst_entry_add(eth->ether_dhost);
+	spin_unlock(&tqe_unknown_dst_lock);
+
+	if (ret < 0)
+		return 0;
+	/*
+	 * add a "drop" FWT entry to push packets destined to same dst
+	 * MAC address to "drop" port and drop them
+	 */
+	g_tqe_fwt_add_from_mac_hook(eth->ether_dhost, TOPAZ_TQE_DROP_PORT, 0, NULL);
+
+	return 0;
+}
+
+static int __sram_text tqe_rx_pktfwd(void *queue, const union topaz_tqe_cpuif_descr *desc)
+{
+	enum topaz_tqe_port in_port = desc->data.in_port;
+	const struct fwt_db_entry *fwt_ent;
+	const struct ether_header *eh = bus_to_virt((uintptr_t) desc->data.pkt);
+	const struct vlan_ethhdr *vlan_hdr = (struct vlan_ethhdr *)eh;
+	const struct iphdr *ipv4h;
+	const struct ipv6hdr *ipv6h;
+	uint16_t ether_type;
+	uint16_t ether_hdrlen;
+
+	if (!TOPAZ_TQE_PORT_IS_EMAC(in_port))
+		return 0;
+
+	if (unlikely(iputil_eth_is_multicast(eh)))
+		return 0;
+
+	ether_type = eh->ether_type;
+	if (ether_type == __constant_htons(ETH_P_8021Q)) {
+		ether_type = vlan_hdr->h_vlan_encapsulated_proto;
+		ipv4h = (const struct iphdr *)(vlan_hdr + 1);
+		ipv6h = (const struct ipv6hdr *)(vlan_hdr + 1);
+		ether_hdrlen = sizeof(struct vlan_ethhdr);
+	} else {
+		ipv4h = (const struct iphdr *)(eh + 1);
+		ipv6h = (const struct ipv6hdr *)(eh + 1);
+		ether_hdrlen = sizeof(struct ether_header);
+	}
+
+	if (ether_type == __constant_htons(ETH_P_ARP))
+		return 0;
+
+	if (ether_type == __constant_htons(ETH_P_IP)
+			&& topaz_is_dhcp(ipv4h))
+		return 0;
+
+#ifdef CONFIG_IPV6
+	if (ether_type == __constant_htons(ETH_P_IPV6)
+			&& topaz_ipv6_not_accel(ipv6h, desc->data.length - ether_hdrlen))
+		return 0;
+#endif
+
+	if (unlikely(!g_tqe_fwt_get_ucast_hook))
+		return 0;
+
+	fwt_ent = g_tqe_fwt_get_ucast_hook(eh->ether_shost, eh->ether_dhost);
+	if (unlikely(!fwt_ent || (fwt_ent->out_port == TOPAZ_TQE_DROP_PORT)))
+		return topaz_tx_unknown_unicast(desc, fwt_ent);
+
+	/* Don't return to sender */
+	if (unlikely((in_port == fwt_ent->out_port) ||
+			tqe_port_same_group(in_port, fwt_ent->out_port))) {
+		tqe_rx_pkt_drop(desc);
+		return 1;
+	}
+
+	return topaz_swfwd_tqe_xmit(fwt_ent, desc, queue);
+}
+
+int wowlan_magic_packet_check(const union topaz_tqe_cpuif_descr *desc)
+{
+	const struct ether_header *eh = bus_to_virt((uintptr_t) desc->data.pkt);
+	const uint16_t *ether_type = NULL;
+	const void *iphdr = NULL;
+	uint32_t ether_payload_length = 0;
+
+	if (likely(!g_wowlan_host_state) ||
+			(desc->data.in_port != TOPAZ_TQE_MUC_PORT))
+		return 0;
+
+	ether_type = tqe_rx_ether_type_skip_vlan(eh, desc->data.length);
+	if (unlikely(!ether_type)) {
+		return 0;
+	}
+
+	iphdr = (void *)(ether_type + 1);
+	ether_payload_length = desc->data.length - ((char *)iphdr - (char *)eh);
+	if ((*ether_type == __constant_htons(ETH_P_IP))
+			&& (ether_payload_length < sizeof(struct iphdr))) {
+		return 0;
+	}
+
+	return wowlan_is_magic_packet(*ether_type, eh, iphdr,
+			g_wowlan_match_type,
+			g_wowlan_l2_ether_type,
+			g_wowlan_l3_udp_port);
+}
+
+static int tqe_rx_l2_ext_filter_handler(union topaz_tqe_cpuif_descr *desc, struct sk_buff *skb)
+{
+	enum topaz_tqe_port in_port = desc->data.in_port;
+	const struct fwt_db_entry *fwt_ent;
+	const struct ether_header *eh = bus_to_virt((uintptr_t) desc->data.pkt);
+
+	if (in_port != g_l2_ext_filter_port)
+		return 0;
+
+	if (unlikely(!g_tqe_fwt_get_from_mac_hook))
+		return 0;
+
+	fwt_ent = g_tqe_fwt_get_from_mac_hook(eh->ether_shost);
+	if (unlikely(!fwt_ent))
+		return 0;
+
+	if (TOPAZ_TQE_PORT_IS_WMAC(fwt_ent->out_port)) {
+		/* Change the in port to prevent FWT updates */
+		desc->data.in_port = TOPAZ_TQE_MUC_PORT;
+		desc->data.misc_user = fwt_ent->out_node;
+		skb->ext_l2_filter = 1;
+		return 1;
+	}
+
+	return 0;
+}
+
+int __sram_text tqe_rx_l2_ext_filter(union topaz_tqe_cpuif_descr *desc, struct sk_buff *skb)
+{
+	if (unlikely(g_l2_ext_filter))
+		return tqe_rx_l2_ext_filter_handler(desc, skb);
+
+	return 0;
+}
+EXPORT_SYMBOL(tqe_rx_l2_ext_filter);
+
+void __sram_text tqe_rx_call_port_handler(union topaz_tqe_cpuif_descr *desc,
+		struct sk_buff *skb, uint8_t *whole_frm_hdr)
+{
+	enum topaz_tqe_port in_port = desc->data.in_port;
+
+	tqe_port_handlers[in_port].handler(tqe_port_handlers[in_port].token,
+						desc, skb, whole_frm_hdr);
+}
+EXPORT_SYMBOL(tqe_rx_call_port_handler);
+
+static int __sram_text tqe_rx_desc_handler(const struct tqe_netdev_priv *priv, union topaz_tqe_cpuif_descr *desc)
+{
+	enum topaz_tqe_port in_port = desc->data.in_port;
+	void *buf_bus_rx = desc->data.pkt;
+	void *buf_virt_rx = bus_to_virt((unsigned long) buf_bus_rx);
+	uint16_t buflen = desc->data.length;
+	const int8_t pool = topaz_hbm_payload_get_pool_bus(buf_bus_rx);
+	const struct ether_header *eh = bus_to_virt((uintptr_t) desc->data.pkt);
+	uint8_t vinfo_hdr = 0;
+
+	if (unlikely(buf_bus_rx == NULL)) {
+		if (printk_ratelimit()) {
+			printk(KERN_CRIT "%s: NULL buffer from TQE, len %u, in port %u",
+					__FUNCTION__, buflen, in_port);
+		}
+		return -1;
+	}
+
+	if (unlikely(buflen < ETH_HLEN)) {
+		printk(KERN_WARNING
+			"%s: buffer from TQE smaller than ethernet header, len %u, in port %u",
+							__FUNCTION__, buflen, in_port);
+		return -1;
+	}
+
+	if (unlikely(!topaz_hbm_pool_valid(pool))) {
+		printk(KERN_CRIT "%s: invalid pool buffer from TQE: 0x%p", __FUNCTION__, buf_bus_rx);
+		return -1;
+	}
+
+	if (likely((in_port < TOPAZ_TQE_NUM_PORTS) && tqe_port_handlers[in_port].handler)) {
+		struct sk_buff *skb;
+		uint8_t *whole_frm_hdr;
+
+		topaz_hbm_debug_stamp(topaz_hbm_payload_store_align_virt(buf_virt_rx, pool, 0),
+				TOPAZ_HBM_OWNER_LH_RX_TQE, buflen);
+
+		/* invalidate enough for l3 packet inspection for multicast frames */
+		inv_dcache_sizerange_safe(buf_virt_rx, 64);
+
+#if defined(CONFIG_ARCH_TOPAZ_SWITCH_TEST) || defined(CONFIG_ARCH_TOPAZ_SWITCH_TEST_MODULE)
+		topaz_tqe_test_ctrl(buf_virt_rx);
+#endif
+		if (TOPAZ_TQE_PORT_IS_EMAC(in_port)) {
+			if (vlan_enabled) {
+				struct qtn_vlan_dev *vdev = vport_tbl_lhost[in_port];
+				BUG_ON(vdev == NULL);
+
+				if (!qtn_vlan_ingress(vdev, 0,
+						buf_virt_rx, 0, 0, 1)) {
+					tqe_rx_pkt_drop(desc);
+					return 0;
+				}
+			}
+		} else if (unlikely(((in_port == TOPAZ_TQE_WMAC_PORT) || (in_port == TOPAZ_TQE_MUC_PORT)))) {
+			if (g_tqe_mac_reserved_hook && g_tqe_mac_reserved_hook(eh->ether_shost)) {
+				tqe_rx_pkt_drop(desc);
+				return 0;
+			}
+		} else {
+			BUG_ON(1);
+		}
+
+		if (vlan_enabled)
+			vinfo_hdr = QVLAN_PKTCTRL_LEN;
+
+		if (likely(!wowlan_magic_packet_check(desc))) {
+#ifdef CONFIG_TOPAZ_PCIE_HOST
+			if (tqe_rx_multicast(NULL, desc))
+#else
+			if (tqe_rx_multicast(priv->congest_queue, desc))
+#endif
+				return 0;
+
+			if (tqe_rx_pktfwd(priv->congest_queue, desc))
+				return 0;
+		}
+
+#if TOPAZ_HBM_BUF_WMAC_RX_QUARANTINE
+		if (pool == TOPAZ_HBM_BUF_WMAC_RX_POOL)
+		{
+			skb = topaz_hbm_attach_skb_quarantine(buf_virt_rx, pool, buflen, &whole_frm_hdr);
+			/* now desc doesn't link to the new skb data buffer */
+			if (skb) {
+				/* new buf is used, no need for original one */
+				tqe_rx_pkt_drop(desc);
+			}
+		}
+		else
+#endif
+		{
+			skb = topaz_hbm_attach_skb((uint8_t *)buf_virt_rx, pool, vinfo_hdr);
+			whole_frm_hdr = skb->head;
+		}
+		if (skb) {
+			/* attach VLAN information to skb */
+			skb_put(skb, buflen);
+			if (vlan_enabled) {
+				struct qtn_vlan_pkt *pkt = qtn_vlan_get_info(skb->data);
+				if (unlikely(pkt->magic != QVLAN_PKT_MAGIC)) {
+					if (printk_ratelimit())
+						printk(KERN_WARNING "%s: magic not right. \
+							magic 0x%02x, flag 0x%02x\n",
+							__func__, pkt->magic, pkt->flag);
+				} else {
+					skb->vlan_tci = pkt->vlan_info & QVLAN_MASK_VID;
+				}
+				M_FLAG_SET(skb, M_VLAN_TAGGED);
+			}
+
+			/* Frame received from external L2 filter will not have MAC header */
+			if (tqe_rx_l2_ext_filter(desc, skb))
+				whole_frm_hdr = NULL;
+			tqe_rx_call_port_handler(desc, skb, whole_frm_hdr);
+			return 0;
+		}
+
+	} else {
+		if (printk_ratelimit()) {
+			printk(KERN_ERR "%s: input from unhandled port %u misc %u\n",
+					__FUNCTION__, in_port, (unsigned)desc->data.misc_user);
+		}
+	}
+
+	tqe_rx_pkt_drop(desc);
+
+	return 0;
+}
+
+static void tqe_irq_enable(void)
+{
+	topaz_tqe_cpuif_setup_irq(1, 0);
+}
+
+static void tqe_irq_disable(void)
+{
+	topaz_tqe_cpuif_setup_irq(0, 0);
+}
+
+static int __sram_text tqe_rx_napi_handler(struct napi_struct *napi, int budget)
+{
+	int processed = 0;
+	struct tqe_netdev_priv *priv = container_of(napi, struct tqe_netdev_priv, napi);
+
+	while (processed < budget) {
+		union topaz_tqe_cpuif_status status;
+		union topaz_tqe_cpuif_descr __iomem *desc_bus;
+		union topaz_tqe_cpuif_descr *desc_virt;
+		union topaz_tqe_cpuif_descr desc_local;
+		uintptr_t inv_start;
+		size_t inv_size;
+
+		status = topaz_tqe_cpuif_get_status();
+		if (status.data.empty) {
+			break;
+		}
+
+		desc_bus = topaz_tqe_cpuif_get_curr();
+		desc_virt = bus_to_virt((uintptr_t) desc_bus);
+
+		/* invalidate descriptor and copy to the stack */
+		inv_start = (uintptr_t) align_buf_cache(desc_virt);
+		inv_size = align_buf_cache_size(desc_virt, sizeof(*desc_virt));
+		inv_dcache_range(inv_start, inv_start + inv_size);
+		memcpy(&desc_local, desc_virt, sizeof(*desc_virt));
+
+		if (likely(desc_local.data.own)) {
+			topaz_tqe_cpuif_put_back(desc_bus);
+			tqe_rx_desc_handler(priv, &desc_local);
+			++processed;
+		} else {
+			printk("%s unowned descriptor? desc_bus 0x%p 0x%08x 0x%08x 0x%08x 0x%08x\n",
+					__FUNCTION__, desc_bus,
+					desc_local.raw.dw0, desc_local.raw.dw1,
+					desc_local.raw.dw2, desc_local.raw.dw3);
+			break;
+		}
+	}
+
+	if (processed < budget) {
+		napi_complete(napi);
+		tqe_irq_enable();
+	}
+
+	return processed;
+}
+
+static irqreturn_t __sram_text tqe_irqh(int irq, void *_dev)
+{
+	struct net_device *dev = _dev;
+	struct tqe_netdev_priv *priv = netdev_priv(dev);
+
+	napi_schedule(&priv->napi);
+	tqe_irq_disable();
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * TQE network device ops
+ */
+static int tqe_ndo_open(struct net_device *dev)
+{
+	return -ENODEV;
+}
+
+static int tqe_ndo_stop(struct net_device *dev)
+{
+	return -ENODEV;
+}
+
+static int tqe_tx_buf(union topaz_tqe_cpuif_ppctl *ppctl,
+		void __iomem *virt_buf, unsigned long data_len, int8_t pool)
+{
+	const uintptr_t bus_data_start = virt_to_bus(virt_buf);
+	const long buff_ptr_offset = topaz_hbm_payload_buff_ptr_offset_bus((void *)bus_data_start, pool, NULL);
+
+	ppctl->data.pkt = (void *) bus_data_start;
+	ppctl->data.buff_ptr_offset = buff_ptr_offset;
+	ppctl->data.length = data_len;
+	/* always free to txdone pool */
+	ppctl->data.buff_pool_num = TOPAZ_HBM_EMAC_TX_DONE_POOL;
+
+	while (topaz_tqe_cpuif_tx_nready());
+	
+	switch_tqe_multi_proc_sem_down("tqe_tx_buf",__LINE__);
+	topaz_tqe_cpuif_tx_start(ppctl);
+	switch_tqe_multi_proc_sem_up();
+
+	return 0;
+}
+
+void tqe_register_fwt_cbk(tqe_fwt_get_mcast_hook get_mcast_cbk_func,
+				tqe_fwt_get_mcast_ff_hook get_mcast_ff_cbk_func,
+				tqe_fwt_false_miss_hook false_miss_func)
+{
+	g_tqe_fwt_get_mcast_hook = get_mcast_cbk_func;
+	g_tqe_fwt_get_mcast_ff_hook = get_mcast_ff_cbk_func;
+	g_tqe_fwt_false_miss_hook = false_miss_func;
+}
+EXPORT_SYMBOL(tqe_register_fwt_cbk);
+
+int tqe_tx(union topaz_tqe_cpuif_ppctl *ppctl, struct sk_buff *skb)
+{
+	unsigned int data_len = skb->len;
+	void *buf_virt = skb->data;
+	void *buf_bus = (void *) virt_to_bus(buf_virt);
+	void *buf_virt_vlan;
+	int8_t pool = topaz_hbm_payload_get_pool_bus(buf_bus);
+	const bool hbm_can_use = !vlan_enabled &&
+		topaz_hbm_pool_valid(pool) &&
+		(atomic_read(&skb->users) == 1) &&
+		(atomic_read(&skb_shinfo(skb)->dataref) == 1);
+
+	if (hbm_can_use) {
+		/*
+		 * skb is otherwise unused; clear to send out tqe.
+		 * Set flag such that payload isn't returned to the hbm on free
+		 */
+		skb->hbm_no_free = 1;
+
+		topaz_hbm_flush_skb_cache(skb);
+	} else {
+		void *hbm_buf_virt;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+		uintptr_t flush_start;
+		size_t flush_size;
+#endif
+		if (data_len < TOPAZ_HBM_BUF_EMAC_RX_SIZE) {
+			pool = TOPAZ_HBM_BUF_EMAC_RX_POOL;
+		} else {
+			/*
+			 * requested impossibly large transmission
+			 */
+			if (printk_ratelimit()) {
+				printk(KERN_ERR "%s: requested oversize transmission"
+						" (%u bytes) to port %d\n",
+						__FUNCTION__, data_len, ppctl->data.out_port);
+			}
+			kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+
+		hbm_buf_virt = topaz_hbm_get_payload_virt(pool);
+		if (unlikely(!hbm_buf_virt)) {
+			/* buffer will be stored in gso_skb and re-attempted for xmit */
+			return NETDEV_TX_BUSY;
+		}
+
+		topaz_hbm_debug_stamp(hbm_buf_virt, TOPAZ_HBM_OWNER_LH_TX_TQE, data_len);
+
+		memcpy(hbm_buf_virt, buf_virt, data_len);
+		buf_virt = hbm_buf_virt;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		if (M_FLAG_ISSET(skb, M_VLAN_TAGGED)) {
+			buf_virt_vlan = qtn_vlan_get_info(buf_virt);
+			memcpy(buf_virt_vlan, (uint8_t *)qtn_vlan_get_info(skb->data),
+					QVLAN_PKTCTRL_LEN);
+
+			dma_cache_wback_inv((unsigned long) buf_virt_vlan, data_len + QVLAN_PKTCTRL_LEN);
+		} else {
+				dma_cache_wback_inv((unsigned long) buf_virt, data_len);
+		}
+#else
+		if (M_FLAG_ISSET(skb, M_VLAN_TAGGED)) {
+			buf_virt_vlan = qtn_vlan_get_info(buf_virt);
+			memcpy(buf_virt_vlan, (uint8_t *)qtn_vlan_get_info(skb->data),
+					QVLAN_PKTCTRL_LEN);
+			flush_start = (uintptr_t) align_buf_cache(buf_virt_vlan);
+			flush_size = align_buf_cache_size(buf_virt_vlan, data_len + QVLAN_PKTCTRL_LEN);
+		} else {
+			flush_start = (uintptr_t) align_buf_cache(buf_virt);
+			flush_size = align_buf_cache_size(buf_virt, data_len);
+		}
+
+		flush_and_inv_dcache_range(flush_start, flush_start + flush_size);
+#endif
+	}
+	dev_kfree_skb(skb);
+
+	tqe_tx_buf(ppctl, buf_virt, data_len, pool);
+
+	return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL(tqe_tx);
+
+static int tqe_ndo_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	return NETDEV_TX_BUSY;
+}
+
+static const struct net_device_ops tqe_ndo = {
+	.ndo_open = tqe_ndo_open,
+	.ndo_stop = tqe_ndo_stop,
+	.ndo_start_xmit = tqe_ndo_start_xmit,
+	.ndo_set_mac_address = eth_mac_addr,
+};
+
+static int tqe_descs_alloc(struct tqe_netdev_priv *priv)
+{
+	int i;
+	union topaz_tqe_cpuif_descr __iomem *bus_descs;
+
+	if (ALIGNED_DMA_DESC_ALLOC(&priv->rx, QTN_BUFS_LHOST_TQE_RX_RING, TOPAZ_TQE_CPUIF_RXDESC_ALIGN, 1)) {
+		return -ENOMEM;
+	}
+
+	bus_descs = (void *)priv->rx.descs_dma_addr;
+	for (i = 0; i < QTN_BUFS_LHOST_TQE_RX_RING; i++) {
+		priv->rx.descs[i].data.next = &bus_descs[(i + 1) % QTN_BUFS_LHOST_TQE_RX_RING];
+	}
+
+	printk(KERN_INFO "%s: %u tqe_rx_descriptors at kern uncached 0x%p bus 0x%p\n",
+			__FUNCTION__, priv->rx.desc_count, priv->rx.descs, bus_descs);
+
+	topaz_tqe_cpuif_setup_ring((void *)priv->rx.descs_dma_addr, priv->rx.desc_count);
+
+	return 0;
+}
+
+static void tqe_descs_free(struct tqe_netdev_priv *priv)
+{
+	if (priv->rx.descs) {
+		ALIGNED_DMA_DESC_FREE(&priv->rx);
+	}
+}
+
+void print_tqe_counters(struct tqe_netdev_priv *priv)
+{
+	int i;
+
+	if (priv->congest_queue == NULL)
+		return;
+
+	for (i = 0; i < TOPAZ_CONGEST_QUEUE_NUM; i++)
+		printk("rx_congest_fwd %d:\t%08x \t%d\n",
+			i, priv->congest_queue->queues[i].congest_xmit,
+			priv->congest_queue->queues[i].qlen);
+
+	for (i = 0; i < TOPAZ_CONGEST_QUEUE_NUM; i++)
+		printk("rx_congest_drop %d:\t%08x\n",
+			i, priv->congest_queue->queues[i].congest_drop);
+
+	for (i = 0; i < TOPAZ_CONGEST_QUEUE_NUM; i++)
+		printk("rx_congest_enq_fail %d:\t%08x\n",
+			i, priv->congest_queue->queues[i].congest_enq_fail);
+
+	/* Congest Queue */
+	printk("rx_congest_entry:\t%08x\n", priv->congest_queue->func_entry);
+	printk("rx_congest_retry:\t%08x\n", priv->congest_queue->cnt_retries);
+	printk("total len:\t%08x \tunicast count:%d\n",
+			priv->congest_queue->total_qlen,
+			priv->congest_queue->unicast_qcount);
+	printk("active tid num:\t%08x\n", qtn_mproc_sync_shared_params_get()->active_tid_num);
+}
+
+static ssize_t tqe_dbg_show(struct device *dev, struct device_attribute *attr,
+						char *buff)
+{
+	return 0;
+}
+
+static void tqe_init_port_handler(void)
+{
+	memset(tqe_port_handlers, 0, sizeof(tqe_port_handlers));
+	return;
+}
+
+static ssize_t tqe_dbg_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct net_device *ndev = container_of(dev, struct net_device, dev);
+	struct tqe_netdev_priv *priv = netdev_priv(ndev);
+	char buffer[128];
+	char *str = buffer;
+	char *token;
+	uint32_t cmd;
+
+	strncpy(str, buf, sizeof(buffer) - 1);
+
+	token = strsep(&str, " ,\n");
+	cmd = (uint32_t)simple_strtoul(token, NULL, 10);
+	switch (cmd) {
+	case 0:
+		print_tqe_counters(priv);
+		break;
+	case 1:
+		topaz_congest_dump(priv->congest_queue);
+		break;
+	case 2:
+		topaz_congest_node(priv->congest_queue);
+		break;
+	default:
+		break;
+	}
+
+	return count;
+}
+DEVICE_ATTR(dbg, S_IWUSR | S_IRUSR, tqe_dbg_show, tqe_dbg_set); /* dev_attr_dbg */
+
+static struct net_device * __init tqe_netdev_init(void)
+{
+	int rc = 0;
+	struct net_device *dev = NULL;
+	struct tqe_netdev_priv *priv;
+	static const int tqe_netdev_irq = TOPAZ_IRQ_TQE;
+
+	tqe_init_port_handler();
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dev = alloc_netdev(sizeof(struct tqe_netdev_priv), "tqe", NET_NAME_UNKNOWN, &ether_setup);
+#else
+	dev = alloc_netdev(sizeof(struct tqe_netdev_priv), "tqe", &ether_setup);
+#endif
+
+	if (!dev) {
+		printk(KERN_ERR "%s: unable to allocate dev\n", __FUNCTION__);
+		goto netdev_alloc_error;
+	}
+	priv = netdev_priv(dev);
+
+	dev->base_addr = 0;
+	dev->irq = tqe_netdev_irq;
+	dev->watchdog_timeo = 60 * HZ;
+	dev->tx_queue_len = 1;
+	dev->netdev_ops = &tqe_ndo;
+
+	/* Initialise TQE */
+	topaz_tqe_cpuif_setup_reset(1);
+	topaz_tqe_cpuif_setup_reset(0);
+
+	if (tqe_descs_alloc(priv)) {
+		goto desc_alloc_error;
+	}
+
+	rc = request_irq(dev->irq, &tqe_irqh, 0, dev->name, dev);
+	if (rc) {
+		printk(KERN_ERR "%s: unable to get %s IRQ %d\n",
+				__FUNCTION__, dev->name, tqe_netdev_irq);
+		goto irq_request_error;
+	}
+#ifndef CONFIG_TOPAZ_PCIE_HOST
+	/* Initialize congestion queue */
+	priv->congest_queue = topaz_congest_queue_init();
+	if (priv->congest_queue == NULL){
+		printk(KERN_ERR "LHOST TQE: Can't allocate congest queue\n");
+		goto congest_queue_alloc_error;
+	}
+	priv->congest_queue->xmit_func = topaz_tqe_xmit;
+#endif
+	rc = register_netdev(dev);
+	if (rc) {
+		printk(KERN_ERR "%s: Cannot register net device '%s', error %d\n",
+				__FUNCTION__, dev->name, rc);
+		goto netdev_register_error;
+	}
+
+	netif_napi_add(dev, &priv->napi, &tqe_rx_napi_handler, 8);
+	napi_enable(&priv->napi);
+
+	tqe_irq_enable();
+
+	device_create_file(&dev->dev, &dev_attr_dbg);
+
+	tqe_unknown_dst_entry_init();
+
+	return dev;
+
+netdev_register_error:
+	topaz_congest_queue_exit(priv->congest_queue);
+#ifndef CONFIG_TOPAZ_PCIE_HOST
+congest_queue_alloc_error:
+	free_irq(dev->irq, dev);
+#endif
+irq_request_error:
+	tqe_descs_free(priv);
+desc_alloc_error:
+	free_netdev(dev);
+netdev_alloc_error:
+	return NULL;
+}
+
+
+static void __exit tqe_netdev_exit(struct net_device *dev)
+{
+	struct tqe_netdev_priv *priv  = netdev_priv(dev);
+
+	device_remove_file(&dev->dev, &dev_attr_dbg);
+	tqe_irq_disable();
+	free_irq(dev->irq, dev);
+	free_netdev(dev);
+	topaz_congest_queue_exit(priv->congest_queue);
+}
+
+static struct net_device *tqe_netdev;
+
+static int __init tqe_module_init(void)
+{
+	tqe_netdev = tqe_netdev_init();
+
+	return tqe_netdev ? 0 : -EFAULT;
+}
+
+static void __exit tqe_module_exit(void)
+{
+	tqe_netdev_exit(tqe_netdev);
+}
+
+module_init(tqe_module_init);
+module_exit(tqe_module_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/qtn/topaz/switch_vlan.c b/drivers/qtn/topaz/switch_vlan.c
new file mode 100644
index 0000000..4b7d9d3
--- /dev/null
+++ b/drivers/qtn/topaz/switch_vlan.c
@@ -0,0 +1,941 @@
+/**
+ * Copyright (c) 2015 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <linux/proc_fs.h>
+#endif
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/spinlock.h>
+#include <linux/net/bridge/br_public.h>
+
+#include <net80211/if_ethersubr.h>
+#include <qtn/topaz_tqe_cpuif.h>
+#include <qtn/qtn_skb_cb.h>
+#include <qtn/qtn_vlan.h>
+#include <qtn/lhost_muc_comm.h>
+#include <drivers/ruby/emac_lib.h>
+
+__sram_data uint8_t vlan_enabled;
+EXPORT_SYMBOL(vlan_enabled);
+
+__sram_data struct qtn_vlan_dev *vdev_tbl_lhost[VLAN_INTERFACE_MAX];
+EXPORT_SYMBOL(vdev_tbl_lhost);
+
+__sram_data struct qtn_vlan_dev *vdev_tbl_bus[VLAN_INTERFACE_MAX];
+EXPORT_SYMBOL(vdev_tbl_bus);
+
+__sram_data struct qtn_vlan_dev *vport_tbl_lhost[TOPAZ_TQE_NUM_PORTS];
+EXPORT_SYMBOL(vport_tbl_lhost);
+
+__sram_data struct qtn_vlan_dev *vport_tbl_bus[TOPAZ_TQE_NUM_PORTS];
+EXPORT_SYMBOL(vport_tbl_bus);
+
+struct qtn_vlan_info qtn_vlan_info;
+EXPORT_SYMBOL(qtn_vlan_info);
+
+static __sram_data uint8_t node2vap_tbl[QTN_NCIDX_MAX];
+
+static DEFINE_SPINLOCK(lock);
+
+#define		SWITCH_VLAN_PROC	"topaz_vlan"
+#define		INVALID_VAP_IDX		0xff
+
+static inline void __switch_vlan_add_member(struct qtn_vlan_dev *vdev, uint16_t vid)
+{
+	set_bit_a(vdev->u.member_bitmap, vid);
+}
+
+static inline void __switch_vlan_del_member(struct qtn_vlan_dev *vdev, uint16_t vid)
+{
+	clr_bit_a(vdev->u.member_bitmap, vid);
+}
+
+static inline void __switch_vlan_tag_member(struct qtn_vlan_dev *vdev, uint16_t vid)
+{
+	set_bit_a(vdev->tag_bitmap, vid);
+}
+
+static inline void __switch_vlan_untag_member(struct qtn_vlan_dev *vdev, uint16_t vid)
+{
+	clr_bit_a(vdev->tag_bitmap, vid);
+}
+
+static inline void
+switch_vlan_set_tagrx(struct qtn_vlan_info *vlan_info, uint16_t vlanid, uint8_t tagrx)
+{
+	uint32_t *tagrx_bitmap = vlan_info->vlan_tagrx_bitmap;
+	tagrx = tagrx & QVLAN_TAGRX_BITMASK;
+
+	tagrx_bitmap[qvlan_tagrx_index(vlanid)] &=
+		~(QVLAN_TAGRX_BITMASK << qvlan_tagrx_shift(vlanid));
+
+	tagrx_bitmap[qvlan_tagrx_index(vlanid)] |=
+		tagrx << (qvlan_tagrx_shift(vlanid));
+}
+
+static inline int switch_vlan_manage_tagrx(struct qtn_vlan_dev *vdev,
+		uint16_t vlanid, uint8_t tag, uint32_t member_quit)
+{
+	struct qtn_vlan_dev *other_dev;
+
+	if (vdev->port == TOPAZ_TQE_EMAC_0_PORT)
+		other_dev = vport_tbl_lhost[TOPAZ_TQE_EMAC_1_PORT];
+	else if (vdev->port == TOPAZ_TQE_EMAC_1_PORT)
+		other_dev = vport_tbl_lhost[TOPAZ_TQE_EMAC_0_PORT];
+	else if (vdev->port == TOPAZ_TQE_PCIE_PORT || vdev->port == TOPAZ_TQE_DSP_PORT) {
+		other_dev = NULL;
+	} else {
+		return qtn_vlan_get_tagrx(qtn_vlan_info.vlan_tagrx_bitmap, vlanid);
+	}
+
+	if (other_dev && !member_quit
+			&& qtn_vlan_is_member(other_dev, vlanid)
+			&& qtn_vlan_is_tagged_member(other_dev, vlanid) != !!tag) {
+		/*
+		 * NOTE: All ethernet ports should have the same tag/untag config
+		 * for one VLAN ID. This is to avoid confusion for multicast packets
+		 * destined for multiple ethernet ports.
+		 */
+		printk(KERN_INFO"Warning:port %u forced to %s VLAN %u packets\n",
+			other_dev->port, tag ? "tag" : "untag", vlanid);
+
+		if (tag)
+			__switch_vlan_tag_member(other_dev, vlanid);
+		else
+			__switch_vlan_untag_member(other_dev, vlanid);
+	} else if (member_quit) {
+		if (!other_dev || !qtn_vlan_is_member(other_dev, vlanid))
+			return QVLAN_TAGRX_UNTOUCH;
+		else
+			return qtn_vlan_get_tagrx(qtn_vlan_info.vlan_tagrx_bitmap, vlanid);
+	}
+
+	return (tag ? QVLAN_TAGRX_TAG : QVLAN_TAGRX_STRIP);
+}
+
+static void switch_vlan_add(struct qtn_vlan_dev *vdev, uint16_t vlanid, uint8_t tag)
+{
+	int tagrx;
+
+	if (!qtn_vlan_is_member(vdev, vlanid)) {
+		__switch_vlan_add_member(vdev, vlanid);
+	}
+
+	/* update tag bitmap */
+	if (tag)
+		__switch_vlan_tag_member(vdev, vlanid);
+	else
+		__switch_vlan_untag_member(vdev, vlanid);
+
+	tagrx = switch_vlan_manage_tagrx(vdev, vlanid, tag, 0);
+	switch_vlan_set_tagrx(&qtn_vlan_info, vlanid, tagrx);
+}
+
+static void switch_vlan_del(struct qtn_vlan_dev *vdev, uint16_t vlanid)
+{
+	int tagrx;
+	if (!qtn_vlan_is_member(vdev, vlanid))
+		return;
+
+	tagrx = switch_vlan_manage_tagrx(vdev, vlanid, 0, 1);
+	switch_vlan_set_tagrx(&qtn_vlan_info, vlanid, tagrx);
+
+	__switch_vlan_del_member(vdev, vlanid);
+	__switch_vlan_untag_member(vdev, vlanid);
+}
+
+struct qtn_vlan_dev *switch_alloc_vlan_dev(uint8_t port, uint8_t idx, int ifindex)
+{
+	struct qtn_vlan_dev *vdev = NULL;
+	struct qtn_vlan_user_interface *vintf = NULL;
+	dma_addr_t bus_addr, bus_addr2;
+
+	spin_lock_bh(&lock);
+
+	if (vdev_tbl_lhost[idx] != NULL)
+		goto out;
+
+	vdev = (struct qtn_vlan_dev *)dma_alloc_coherent(NULL,
+		sizeof(struct qtn_vlan_dev), &bus_addr, GFP_ATOMIC);
+	if (!vdev)
+		goto out;
+
+	memset(vdev, 0, sizeof(*vdev));
+	vdev->pvid = QVLAN_DEF_PVID;
+	vdev->bus_addr = (unsigned long)bus_addr;
+	vdev->port = port;
+	vdev->idx = idx;
+	vdev->ifindex = ifindex;
+
+	vintf = (struct qtn_vlan_user_interface *)dma_alloc_coherent(NULL,
+		sizeof(struct qtn_vlan_user_interface), &bus_addr2, GFP_ATOMIC);
+	if (!vintf)
+		goto out;
+
+	memset(vintf, 0, sizeof(*vintf));
+	vintf->bus_addr = bus_addr2;
+	vintf->mode = QVLAN_MODE_ACCESS;
+	vdev->user_data = (void *)vintf;
+
+	arc_write_uncached_32((uint32_t *)&vdev_tbl_lhost[idx], (uint32_t)vdev);
+	arc_write_uncached_32((uint32_t *)&vdev_tbl_bus[idx], (uint32_t)bus_addr);
+
+	if (qtn_vlan_port_indexable(port)) {
+		arc_write_uncached_32((uint32_t *)&vport_tbl_lhost[port], (uint32_t)vdev);
+		arc_write_uncached_32((uint32_t *)&vport_tbl_bus[port], (uint32_t)bus_addr);
+	}
+
+	switch_vlan_add(vdev, QVLAN_PRIO_VID, 0);
+	switch_vlan_add(vdev, vdev->pvid, 0);
+
+	spin_unlock_bh(&lock);
+	return vdev;
+
+out:
+	if (vdev)
+		dma_free_coherent(NULL, sizeof(struct qtn_vlan_dev), vdev, (dma_addr_t)(vdev->bus_addr));
+	spin_unlock_bh(&lock);
+
+	return NULL;
+}
+EXPORT_SYMBOL(switch_alloc_vlan_dev);
+
+void switch_free_vlan_dev(struct qtn_vlan_dev *vdev)
+{
+	struct qtn_vlan_user_interface *vintf = (struct qtn_vlan_user_interface *)vdev->user_data;
+
+	spin_lock_bh(&lock);
+	/* vlan_info_tbl[info->idx] = NULL; */
+	arc_write_uncached_32((uint32_t *)&vdev_tbl_lhost[vdev->idx], (uint32_t)NULL);
+	arc_write_uncached_32((uint32_t *)&vdev_tbl_bus[vdev->idx], (uint32_t)NULL);
+
+	if (qtn_vlan_port_indexable(vdev->port)) {
+		arc_write_uncached_32((uint32_t *)&vport_tbl_lhost[vdev->idx], (uint32_t)NULL);
+		arc_write_uncached_32((uint32_t *)&vport_tbl_bus[vdev->idx], (uint32_t)NULL);
+	}
+	spin_unlock_bh(&lock);
+
+	dma_free_coherent(NULL, sizeof(struct qtn_vlan_dev), vdev, (dma_addr_t)(vdev->bus_addr));
+	dma_free_coherent(NULL, sizeof(struct qtn_vlan_user_interface), vintf, (dma_addr_t)(vintf->bus_addr));
+}
+EXPORT_SYMBOL(switch_free_vlan_dev);
+
+void switch_free_vlan_dev_by_idx(uint8_t idx)
+{
+	BUG_ON(idx >= VLAN_INTERFACE_MAX);
+
+	switch_free_vlan_dev(vdev_tbl_lhost[idx]);
+}
+EXPORT_SYMBOL(switch_free_vlan_dev_by_idx);
+
+#ifdef CONFIG_TOPAZ_DBDC_HOST
+static enum topaz_tqe_port g_topaz_tqe_pcie_rel_port = TOPAZ_TQE_DROP_PORT;
+void tqe_register_pcie_rel_port(const enum topaz_tqe_port tqe_port)
+{
+	g_topaz_tqe_pcie_rel_port = tqe_port;
+}
+EXPORT_SYMBOL(tqe_register_pcie_rel_port);
+#endif
+
+struct qtn_vlan_dev*
+switch_vlan_dev_get_by_port(uint8_t port)
+{
+#ifdef CONFIG_TOPAZ_DBDC_HOST
+	uint8_t dev_id = EXTRACT_DEV_ID_FROM_PORT_ID(port);
+
+	port = EXTRACT_PORT_ID_FROM_PORT_ID(port);
+
+	if (port == g_topaz_tqe_pcie_rel_port)
+		return vdev_tbl_lhost[QFP_VDEV_IDX(dev_id)];
+#endif
+	return vport_tbl_lhost[port];
+}
+EXPORT_SYMBOL(switch_vlan_dev_get_by_port);
+
+struct qtn_vlan_dev*
+switch_vlan_dev_get_by_idx(uint8_t idx)
+{
+	return vdev_tbl_lhost[idx];
+}
+EXPORT_SYMBOL(switch_vlan_dev_get_by_idx);
+
+typedef void (*_fn_vlan_member)(struct qtn_vlan_dev *vdev,
+			uint16_t vid, uint8_t tag);
+static int switch_vlan_member_comm(struct qtn_vlan_dev *vdev, uint16_t vid,
+		uint8_t tag, _fn_vlan_member handler)
+{
+	if (vid == QVLAN_VID_ALL) {
+		for (vid = 0; vid < QVLAN_VID_MAX; vid++)
+			handler(vdev, vid, tag);
+	} else if (vid < QVLAN_VID_MAX) {
+		handler(vdev, vid, tag);
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void _vlan_add_member(struct qtn_vlan_dev *vdev, uint16_t vid, uint8_t tag)
+{
+	spin_lock_bh(&lock);
+	switch_vlan_add(vdev, vid, tag);
+	spin_unlock_bh(&lock);
+}
+
+int switch_vlan_add_member(struct qtn_vlan_dev *vdev, uint16_t vid, uint8_t tag)
+{
+	return switch_vlan_member_comm(vdev, vid, tag, _vlan_add_member);
+}
+EXPORT_SYMBOL(switch_vlan_add_member);
+
+static void _vlan_del_member(struct qtn_vlan_dev *vdev, uint16_t vid, uint8_t arg)
+{
+	spin_lock_bh(&lock);
+	switch_vlan_del(vdev, vid);
+	spin_unlock_bh(&lock);
+}
+
+int switch_vlan_del_member(struct qtn_vlan_dev *vdev, uint16_t vid)
+{
+	return switch_vlan_member_comm(vdev, vid, 0, _vlan_del_member);
+}
+EXPORT_SYMBOL(switch_vlan_del_member);
+
+static void _vlan_tag_member(struct qtn_vlan_dev *vdev, uint16_t vid, uint8_t arg)
+{
+	int tagrx;
+	spin_lock_bh(&lock);
+	if (!qtn_vlan_is_member(vdev, vid))
+		goto out;
+
+	__switch_vlan_tag_member(vdev, vid);
+	tagrx = switch_vlan_manage_tagrx(vdev, vid, 1, 0);
+	switch_vlan_set_tagrx(&qtn_vlan_info, vid, tagrx);
+out:
+	spin_unlock_bh(&lock);
+}
+
+int switch_vlan_tag_member(struct qtn_vlan_dev *vdev, uint16_t vid)
+{
+	return switch_vlan_member_comm(vdev, vid, 0, _vlan_tag_member);
+}
+EXPORT_SYMBOL(switch_vlan_tag_member);
+
+static void _vlan_untag_member(struct qtn_vlan_dev *vdev, uint16_t vid, uint8_t arg)
+{
+	int tagrx;
+	spin_lock_bh(&lock);
+	if (!qtn_vlan_is_member(vdev, vid))
+		goto out;
+
+	__switch_vlan_untag_member(vdev, vid);
+	tagrx = switch_vlan_manage_tagrx(vdev, vid, 0, 0);
+	switch_vlan_set_tagrx(&qtn_vlan_info, vid, tagrx);
+out:
+	spin_unlock_bh(&lock);
+}
+
+int switch_vlan_untag_member(struct qtn_vlan_dev *vdev, uint16_t vid)
+{
+	return switch_vlan_member_comm(vdev, vid, 0, _vlan_untag_member);
+}
+EXPORT_SYMBOL(switch_vlan_untag_member);
+
+static void __switch_vlan_set_pvid(struct qtn_vlan_dev *vdev, uint16_t vid)
+{
+	switch_vlan_del(vdev, vdev->pvid);
+	switch_vlan_add(vdev, vid, 0);
+
+	vdev->pvid = vid;
+}
+
+int switch_vlan_set_pvid(struct qtn_vlan_dev *vdev, uint16_t vid)
+{
+	if (vid >= QVLAN_VID_MAX)
+		return -EINVAL;
+
+	spin_lock_bh(&lock);
+	__switch_vlan_set_pvid(vdev, vid);
+	spin_unlock_bh(&lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(switch_vlan_set_pvid);
+
+int switch_vlan_set_priority(struct qtn_vlan_dev *vdev, uint8_t priority)
+{
+	if (priority > QVLAN_PRIO_MAX)
+		return -EINVAL;
+
+	spin_lock_bh(&lock);
+	vdev->priority = priority;
+	spin_unlock_bh(&lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(switch_vlan_set_priority);
+
+static void __switch_vlan_set_mode(struct qtn_vlan_dev *vdev, uint8_t mode)
+{
+	if (qtn_vlan_is_mode(vdev, mode))
+		return;
+
+	((struct qtn_vlan_user_interface *)vdev->user_data)->mode = mode;
+}
+
+int switch_vlan_set_mode(struct qtn_vlan_dev *vdev, uint8_t mode)
+{
+	if (mode >= QVLAN_MODE_MAX)
+		return -EINVAL;
+
+	spin_lock_bh(&lock);
+	__switch_vlan_set_mode(vdev, mode);
+	spin_unlock_bh(&lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(switch_vlan_set_mode);
+
+static inline void __switch_vlan_clear_dev(struct qtn_vlan_dev *vdev)
+{
+	memset(&vdev->u, 0, sizeof(vdev->u));
+	memset(&vdev->tag_bitmap, 0, sizeof(vdev->tag_bitmap));
+	memset(&vdev->ig_pass, 0, sizeof(vdev->ig_pass));
+	memset(&vdev->ig_drop, 0, sizeof(vdev->ig_drop));
+	memset(&vdev->eg_pass, 0, sizeof(vdev->eg_pass));
+	memset(&vdev->eg_drop, 0, sizeof(vdev->eg_drop));
+	vdev->pvid = QVLAN_DEF_PVID;
+	vdev->flags = 0;
+	vdev->priority = 0;
+}
+
+void switch_vlan_dyn_enable(struct qtn_vlan_dev *vdev)
+{
+	spin_lock_bh(&lock);
+
+	__switch_vlan_clear_dev(vdev);
+	vdev->flags |= QVLAN_DEV_F_DYNAMIC;
+
+	spin_unlock_bh(&lock);
+}
+EXPORT_SYMBOL(switch_vlan_dyn_enable);
+
+void switch_vlan_dyn_disable(struct qtn_vlan_dev *vdev)
+{
+	spin_lock_bh(&lock);
+
+	__switch_vlan_clear_dev(vdev);
+	vdev->pvid = QVLAN_DEF_PVID;
+	switch_vlan_add(vdev, vdev->pvid, 0);
+
+	spin_unlock_bh(&lock);
+}
+EXPORT_SYMBOL(switch_vlan_dyn_disable);
+
+int switch_vlan_set_node(struct qtn_vlan_dev *vdev, uint16_t ncidx, uint16_t vlanid)
+{
+	int ret = 0;
+
+	spin_lock_bh(&lock);
+
+	if (!QVLAN_IS_DYNAMIC(vdev)
+			|| ncidx >= QTN_NCIDX_MAX
+			|| !qtn_vlan_is_valid(vlanid)
+			|| vdev->port != TOPAZ_TQE_WMAC_PORT) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	vdev->u.node_vlan[ncidx] = vlanid;
+
+out:
+	spin_unlock_bh(&lock);
+	return ret;
+}
+EXPORT_SYMBOL(switch_vlan_set_node);
+
+int switch_vlan_clr_node(struct qtn_vlan_dev *vdev, uint16_t ncidx)
+{
+	int ret = 0;
+
+	spin_lock_bh(&lock);
+
+	if (!QVLAN_IS_DYNAMIC(vdev)
+			|| ncidx >= QTN_NCIDX_MAX
+			|| vdev->port != TOPAZ_TQE_WMAC_PORT) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	vdev->u.node_vlan[ncidx] = QVLAN_VID_ALL;
+
+out:
+	spin_unlock_bh(&lock);
+	return ret;
+}
+EXPORT_SYMBOL(switch_vlan_clr_node);
+
+static struct sk_buff *switch_vlan_tag_pkt(struct sk_buff *skb, uint16_t vlan_tci)
+{
+	struct vlan_ethhdr *veth;
+	struct qtn_vlan_pkt old;
+	struct qtn_vlan_pkt *new;
+
+	memcpy(&old, qtn_vlan_get_info(skb->data), sizeof(old));
+
+	if (skb_cow_head(skb, VLAN_HLEN) < 0) {
+		kfree_skb(skb);
+		return NULL;
+	}
+	veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
+
+	/* Move the mac addresses to the beginning of the new header. */
+	memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
+	veth->h_vlan_proto = __constant_htons(ETH_P_8021Q);
+	veth->h_vlan_TCI = htons(vlan_tci);
+
+	new = qtn_vlan_get_info(skb->data);
+	memcpy(new, &old, sizeof(*new));
+	new->flag |= ((vlan_tci & QVLAN_MASK_VID) != QVLAN_PRIO_VID
+			? QVLAN_PKT_TAGGED : QVLAN_PKT_ZERO_TAGGED);
+
+	return skb;
+}
+
+static struct sk_buff *switch_vlan_untag_pkt(struct sk_buff *skb, int copy)
+{
+	struct sk_buff *skb2;
+	struct vlan_ethhdr *veth;
+	struct qtn_vlan_pkt *pktinfo;
+
+	if (copy) {
+		skb2 = skb_copy(skb, GFP_ATOMIC);
+		kfree_skb(skb);
+	} else {
+		skb2 = skb;
+	}
+
+	if (!skb2)
+		return NULL;
+
+	veth = (struct vlan_ethhdr *)(skb2->data);
+	memmove((uint8_t *)veth - QVLAN_PKTCTRL_LEN + VLAN_HLEN,
+		(uint8_t *)veth - QVLAN_PKTCTRL_LEN,
+		QVLAN_PKTCTRL_LEN + 2 * ETH_ALEN);
+
+	skb_pull(skb2, VLAN_HLEN);
+
+	pktinfo = qtn_vlan_get_info(skb2->data);
+	pktinfo->flag &= ~(QVLAN_PKT_TAGGED | QVLAN_PKT_ZERO_TAGGED);
+
+	return skb2;
+}
+
+static struct sk_buff *switch_vlan_replace_tag(struct sk_buff *skb, uint16_t vlan_tci, int copy)
+{
+	struct sk_buff *skb2 = skb;
+	struct vlan_ethhdr *veth;
+	struct qtn_vlan_pkt *pktinfo;
+
+	veth = (struct vlan_ethhdr *)(skb->data);
+	if (unlikely(veth->h_vlan_proto != __constant_htons(ETH_P_8021Q))) {
+		if (printk_ratelimit())
+			printk(KERN_ERR"802.1Q VLAN header is missing\n");
+		return skb;
+	}
+
+	if (copy) {
+		skb2 = skb_copy(skb, GFP_ATOMIC);
+		kfree_skb(skb);
+	}
+
+	if (!skb2)
+		return NULL;
+
+	veth = (struct vlan_ethhdr *)(skb2->data);
+	veth->h_vlan_TCI = htons(vlan_tci);
+
+	pktinfo = qtn_vlan_get_info(skb2->data);
+	pktinfo->flag &= ~QVLAN_PKT_ZERO_TAGGED;
+	pktinfo->flag |= QVLAN_PKT_TAGGED;
+
+	return skb2;
+}
+
+struct net_device *switch_vlan_find_br(struct net_device *ndev)
+{
+	struct net_device *brdev = NULL;
+	struct net_bridge_port *port = NULL;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	port = get_br_port(ndev);
+#else
+	if ((ndev->flags & IFF_SLAVE) && ndev->master)
+		ndev = ndev->master;
+
+		port = ndev->br_port;
+#endif
+
+	rcu_read_lock();
+	if (rcu_dereference(port) != NULL)
+		brdev = port->br->dev;
+	rcu_read_unlock();
+
+	return brdev;
+}
+
+/*
+ * supposed to be called after eth_type_trans and before netif_receive_skb
+ * VLAN ingress handling should be done already
+ */
+struct sk_buff *switch_vlan_to_proto_stack(struct sk_buff *skb, int copy)
+{
+	struct qtn_vlan_pkt *pkt;
+	uint16_t vlan_id;
+	struct net_device *brdev;
+	int tag_in_frame;
+	int prio_tag_in_frame;
+	int should_tag;
+
+	BUG_ON(!skb_mac_header_was_set(skb));
+
+	if (!vlan_enabled)
+		return skb;
+
+	if (skb->protocol == __constant_htons(ETH_P_PAE))
+		return skb;
+
+	M_FLAG_SET(skb, M_ORIG_OUTSIDE);
+
+	pkt = qtn_vlan_get_info(skb_mac_header(skb));
+
+	tag_in_frame = !!(pkt->flag & QVLAN_PKT_TAGGED);
+	prio_tag_in_frame = !!(pkt->flag & QVLAN_PKT_ZERO_TAGGED);
+	vlan_id = (pkt->vlan_info & QVLAN_MASK_VID);
+
+	brdev = switch_vlan_find_br(skb->dev);
+	if (likely(brdev)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+		should_tag = 0;
+		//TODO: need to find replacement for vlan_check_vlan_exist
+#else
+		should_tag = !!vlan_check_vlan_exist(brdev, vlan_id);
+#endif
+	} else {
+		return skb;
+	}
+
+	if (tag_in_frame != should_tag || prio_tag_in_frame != should_tag) {
+		skb_push(skb, ETH_HLEN);
+		if (should_tag) {
+			if (prio_tag_in_frame) {
+				skb = switch_vlan_replace_tag(skb, pkt->vlan_info, copy);
+			} else if (!tag_in_frame) {
+				skb = switch_vlan_tag_pkt(skb, pkt->vlan_info);
+			}
+		} else {
+			skb = switch_vlan_untag_pkt(skb, copy);
+		}
+
+		if (skb)
+			skb->protocol = eth_type_trans(skb, skb->dev);
+	}
+
+	return skb;
+}
+EXPORT_SYMBOL(switch_vlan_to_proto_stack);
+
+static struct sk_buff *switch_vlan_add_pktinfo(struct sk_buff *skb)
+{
+	struct qtn_vlan_pkt *pktinfo;
+
+	if (skb_headroom(skb) < QVLAN_PKTCTRL_LEN) {
+		struct sk_buff *skb2;
+
+		skb2 = skb_copy_expand(skb, QVLAN_PKTCTRL_LEN * 2, skb_tailroom(skb), GFP_ATOMIC);
+		if (!skb2) {
+			kfree_skb(skb);
+			return NULL;
+		}
+
+		kfree_skb(skb);
+		skb = skb2;
+	}
+
+	pktinfo = qtn_vlan_get_info(skb->data);
+	pktinfo->magic = QVLAN_PKT_MAGIC;
+	pktinfo->flag = 0;
+
+	M_FLAG_SET(skb, M_VLAN_TAGGED);
+	return skb;
+}
+
+struct sk_buff *switch_vlan_from_proto_stack(struct sk_buff *skb, struct qtn_vlan_dev *outdev,
+	uint16_t ncidx, int copy)
+{
+	struct qtn_vlan_pkt *pktinfo;
+	struct vlan_ethhdr *veth;
+	uint16_t vlan_tci;
+	uint8_t vlan_flag;
+
+	if (!vlan_enabled)
+		return skb;
+
+	if (!M_FLAG_ISSET(skb, M_ORIG_OUTSIDE)) {
+		/*
+		 * The packet is generated by the device.
+		 * A qtn_vlan_pkt structure is needed.
+		 */
+		skb = switch_vlan_add_pktinfo(skb);
+		if (!skb)
+			return NULL;
+
+		pktinfo = qtn_vlan_get_info(skb->data);
+
+		if (M_FLAG_ISSET(skb, M_ORIG_BR)) {
+			veth = (struct vlan_ethhdr *)(skb->data);
+
+			if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
+				vlan_tci = ntohs(veth->h_vlan_TCI);
+				pktinfo->flag |= QVLAN_PKT_TAGGED;
+				pktinfo->vlan_info = vlan_tci;
+			} else {
+				pktinfo->flag |= QVLAN_PKT_SKIP_CHECK;
+			}
+		} else {
+			pktinfo->flag |= QVLAN_PKT_SKIP_CHECK;
+		}
+	}
+
+	if (!qtn_vlan_egress(outdev, ncidx, skb->data, 1, 0))
+		goto drop_out;
+
+	pktinfo = qtn_vlan_get_info(skb->data);
+	vlan_flag = pktinfo->flag;
+	vlan_tci = pktinfo->vlan_info;
+
+	if (vlan_flag & QVLAN_PKT_TXACTION_UNTAG)
+		skb = switch_vlan_untag_pkt(skb, copy);
+
+	if ((vlan_flag & QVLAN_PKT_TXACTION_TAG) && skb) {
+		if (vlan_flag & QVLAN_PKT_TXACTION_VLAN0)
+			vlan_tci = QVLAN_PRIO_VID | (vlan_tci & ~QVLAN_MASK_VID);
+
+		skb = switch_vlan_tag_pkt(skb, vlan_tci);
+	}
+
+	/* Mark the skb as SKIP_CHECK so AuC won't do repeated work */
+	pktinfo = qtn_vlan_get_info(skb->data);
+	pktinfo->flag |= QVLAN_PKT_SKIP_CHECK;
+
+	return skb;
+drop_out:
+	kfree_skb(skb);
+	return NULL;
+}
+EXPORT_SYMBOL(switch_vlan_from_proto_stack);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+
+static void switch_vlan_stats_sprintf(void *data)
+{
+	struct seq_file *sfile = data;
+	struct qtn_vlan_dev *vdev;
+	struct net_device *ndev;
+	int i;
+
+	spin_lock_bh(&lock);
+
+	for (i = 0; i < VLAN_INTERFACE_MAX; i++) {
+		vdev = vdev_tbl_lhost[i];
+		if (!vdev)
+			continue;
+
+		ndev = dev_get_by_index(&init_net, vdev->ifindex);
+		if (unlikely(!ndev))
+			continue;
+
+		seq_printf(sfile, "%s\ti-pass\t\te-pass\t\ti-drop\t\te-drop\t\tmagic-invalid\n", ndev->name);
+		seq_printf(sfile, "Lhost\t%u\t\t%u\t\t%u\t\t%u\t\t%u\n", vdev->ig_pass.lhost, vdev->eg_pass.lhost,
+				vdev->ig_drop.lhost, vdev->eg_drop.lhost, vdev->magic_invalid.lhost);
+		seq_printf(sfile, "MuC\t%u\t\t%u\t\t%u\t\t%u\t\t%u\n", vdev->ig_pass.muc, vdev->eg_pass.muc,
+				vdev->ig_drop.muc, vdev->eg_drop.muc, vdev->magic_invalid.muc);
+
+		dev_put(ndev);
+	}
+
+	spin_unlock_bh(&lock);
+}
+
+static int switch_vlan_proc_show(struct seq_file *sfile, void *v)
+{
+	switch_vlan_stats_sprintf(sfile);
+
+	return 0;
+}
+
+static int switch_vlan_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, switch_vlan_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations switch_vlan_stats_fops = {
+	.owner		= THIS_MODULE,
+	.open		= switch_vlan_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#else
+static int switch_vlan_stats_rd(char *page, char **start, off_t offset,
+		int count, int *eof, void *data)
+{
+	char *p = page;
+	struct qtn_vlan_dev *vdev;
+	struct net_device *ndev;
+	int i;
+
+	spin_lock_bh(&lock);
+
+	for (i = 0; i < VLAN_INTERFACE_MAX; i++) {
+		vdev = vdev_tbl_lhost[i];
+		if (!vdev)
+			continue;
+
+		ndev = dev_get_by_index(&init_net, vdev->ifindex);
+		if (unlikely(!ndev))
+			continue;
+
+		p += sprintf(p, "%s\ti-pass\t\te-pass\t\ti-drop\t\te-drop\t\tmagic-invalid\n", ndev->name);
+		p += sprintf(p, "Lhost\t%u\t\t%u\t\t%u\t\t%u\t\t%u\n", vdev->ig_pass.lhost, vdev->eg_pass.lhost,
+				vdev->ig_drop.lhost, vdev->eg_drop.lhost, vdev->magic_invalid.lhost);
+		p += sprintf(p, "MuC\t%u\t\t%u\t\t%u\t\t%u\t\t%u\n", vdev->ig_pass.muc, vdev->eg_pass.muc,
+				vdev->ig_drop.muc, vdev->eg_drop.muc, vdev->magic_invalid.muc);
+
+		dev_put(ndev);
+	}
+
+	spin_unlock_bh(&lock);
+
+	*eof = 1;
+	return p - page;
+}
+#endif
+
+void switch_vlan_dev_reset(struct qtn_vlan_dev *vdev, uint8_t mode)
+{
+	uint32_t i;
+
+	spin_lock_bh(&lock);
+	for (i = 0; i < QVLAN_VID_MAX; i++) {
+		if (qtn_vlan_is_member(vdev, i))
+			switch_vlan_del(vdev, i);
+	}
+
+	memset(vdev->u.member_bitmap, 0, sizeof(vdev->u.member_bitmap));
+	memset(vdev->tag_bitmap, 0, sizeof(vdev->tag_bitmap));
+
+	switch_vlan_add(vdev, QVLAN_PRIO_VID, 0);
+	__switch_vlan_set_pvid(vdev, QVLAN_DEF_PVID);
+	__switch_vlan_set_mode(vdev, mode);
+
+	vdev->priority = 0;
+
+	spin_unlock_bh(&lock);
+}
+EXPORT_SYMBOL(switch_vlan_dev_reset);
+
+void switch_vlan_reset(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < VLAN_INTERFACE_MAX; i++) {
+		if (vdev_tbl_lhost[i])
+			switch_vlan_dev_reset(vdev_tbl_lhost[i], QVLAN_MODE_ACCESS);
+	}
+}
+EXPORT_SYMBOL(switch_vlan_reset);
+
+int switch_vlan_register_node(uint16_t ncidx, struct qtn_vlan_dev *vdev)
+{
+	if (unlikely(ncidx >= ARRAY_SIZE(node2vap_tbl)))
+		return -EINVAL;
+
+	node2vap_tbl[ncidx] = vdev->idx;
+
+	return 0;
+}
+EXPORT_SYMBOL(switch_vlan_register_node);
+
+void switch_vlan_unregister_node(uint16_t ncidx)
+{
+	node2vap_tbl[ncidx] = INVALID_VAP_IDX;
+}
+EXPORT_SYMBOL(switch_vlan_unregister_node);
+
+struct qtn_vlan_dev *switch_vlan_dev_from_node(uint16_t ncidx)
+{
+	if (node2vap_tbl[ncidx] == INVALID_VAP_IDX)
+		return NULL;
+
+	return vdev_tbl_lhost[node2vap_tbl[ncidx]];
+}
+EXPORT_SYMBOL(switch_vlan_dev_from_node);
+
+static int __init switch_vlan_module_init(void)
+{
+	struct proc_dir_entry  *entry;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 3)
+	entry = proc_create_data(SWITCH_VLAN_PROC, 0600, NULL, &switch_vlan_stats_fops, NULL);
+	if (!entry) {
+		return -ENODEV;
+	}
+#else
+	entry = create_proc_read_entry(SWITCH_VLAN_PROC, 0,
+			NULL, switch_vlan_stats_rd, 0))
+	if (!entry) {
+		return -EEXIST;
+	}
+#endif
+
+	memset(node2vap_tbl, INVALID_VAP_IDX, sizeof(node2vap_tbl));
+
+	return 0;
+}
+
+static void __exit switch_vlan_module_exit(void)
+{
+	remove_proc_entry(SWITCH_VLAN_PROC, 0);
+}
+
+module_init(switch_vlan_module_init);
+module_exit(switch_vlan_module_exit);
+
+MODULE_DESCRIPTION("VLAN control panel");
+MODULE_AUTHOR("Quantenna");
+MODULE_LICENSE("GPL");
diff --git a/drivers/qtn/topaz/temp_sens.c b/drivers/qtn/topaz/temp_sens.c
new file mode 100644
index 0000000..8a69bb7
--- /dev/null
+++ b/drivers/qtn/topaz/temp_sens.c
@@ -0,0 +1,171 @@
+/*
+ * (C) Copyright 2013 Quantenna Communications Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <asm/io.h>
+
+#include <qtn/qtn_debug.h>
+#include <common/topaz_platform.h>
+
+#define PROC_NAME "temp_sens"
+#define TOPAZ_TEMP_SENS_DRIVER_NAME		"topaz_tempsens"
+
+#define TOPAZ_TEMPSENS_INIT_VAL	-40
+#define TOPAZ_TEMPSENS_CODE_TBL_SIZE 34
+#define TOPAZ_TEMPSENS_STEP 5 /*Each point on the table corresponds to 5 Degree C step */
+
+/* Temperature curve is non-linear, this is a table of values reported by the temperature sensor for a range from -40 to 130 C for increments of 5 C*/
+const int code_idx[TOPAZ_TEMPSENS_CODE_TBL_SIZE] = {3800, 3792, 3783, 3774, 3765, 3756, 3747, 3737, 3728, 3718, 3708, 3698, 3688, 3678, 3667, 3656, 3645,
+	                    3634, 3623, 3611, 3600, 3588, 3575, 3563, 3550, 3537, 3524, 3510, 3496, 3482, 3467, 3452, 3437, 3421};
+
+int topaz_read_internal_temp_sens(int *temp_intvl)
+{
+	int temp;
+	int idx = 0;
+	*temp_intvl = TOPAZ_TEMPSENS_INIT_VAL;
+
+	temp = (readl(TOPAZ_SYS_CTL_TEMP_SENS_DATA) & TOPAZ_SYS_CTL_TEMP_SENS_DATA_TEMP);
+
+	for (idx = 0; idx < TOPAZ_TEMPSENS_CODE_TBL_SIZE; idx++) {
+		if (temp >= code_idx[idx]) {
+			*temp_intvl = *temp_intvl + (idx * TOPAZ_TEMPSENS_STEP);
+			break;
+		}
+	}
+	return idx;
+}
+EXPORT_SYMBOL(topaz_read_internal_temp_sens);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int topaz_temp_sens_read_proc(struct file *file, char __user * buffer,
+		size_t count, loff_t *ppos)
+{
+	int lower_temp_intvl;
+	int upper_temp_intvl;
+	int len = 0;
+
+	lower_temp_intvl= TOPAZ_TEMPSENS_INIT_VAL;
+	upper_temp_intvl= TOPAZ_TEMPSENS_INIT_VAL;
+
+	/* Determine the upper interval corresponding to temp sens value */
+	topaz_read_internal_temp_sens(&upper_temp_intvl);
+
+	/* Lower interval is 5 degree centigrade below */
+	lower_temp_intvl = upper_temp_intvl - TOPAZ_TEMPSENS_STEP;
+
+	len += sprintf(buffer, "Temperature between %d - %d C\n",
+						lower_temp_intvl, upper_temp_intvl);
+	*ppos += len;
+	return len;
+}
+#else
+static int topaz_temp_sens_read_proc(char *page, char **start, off_t offset,
+		int count, int *eof, void *_unused)
+{
+	int lower_temp_intvl;
+	int upper_temp_intvl;
+	const unsigned int lim = PAGE_SIZE - 1;
+	int len = 0;
+
+	lower_temp_intvl= TOPAZ_TEMPSENS_INIT_VAL;
+	upper_temp_intvl= TOPAZ_TEMPSENS_INIT_VAL;
+
+	if (offset > 0) {
+		*eof = 1;
+		return 0;
+	}
+	/* Determine the upper interval corresponding to temp sens value */
+	topaz_read_internal_temp_sens(&upper_temp_intvl);
+
+	/* Lower interval is 5 degree centigrade below */
+	lower_temp_intvl = upper_temp_intvl - TOPAZ_TEMPSENS_STEP;
+
+	len += snprintf(&page[len], lim-len, "Temperature between %d - %d C\n",
+						lower_temp_intvl, upper_temp_intvl);
+
+	return len;
+}
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static const struct file_operations fops_sens = {
+	.read = topaz_temp_sens_read_proc,
+};
+#endif
+
+static int __init topaz_temp_sens_create_proc(void)
+{
+	struct proc_dir_entry *entry;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	entry = proc_create(PROC_NAME, 0x444, NULL, &fops_sens);
+	if (!entry) {
+		return -ENOMEM;
+	}
+
+#else
+	entry = create_proc_entry(PROC_NAME, 0600, NULL);
+	if (!entry) {
+		return -ENOMEM;
+	}
+
+	entry->write_proc = NULL;
+	entry->read_proc = topaz_temp_sens_read_proc;
+#endif
+	return 0;
+}
+
+int __init topaz_temp_sens_init(void)
+{
+	int rc;
+
+	rc = topaz_temp_sens_create_proc();
+	if (rc) {
+		return rc;
+	}
+
+	writel(TOPAZ_SYS_CTL_TEMPSENS_CTL_SHUTDWN, TOPAZ_SYS_CTL_TEMPSENS_CTL);
+	writel(~(TOPAZ_SYS_CTL_TEMPSENS_CTL_START_CONV), TOPAZ_SYS_CTL_TEMPSENS_CTL);
+	writel(TOPAZ_SYS_CTL_TEMPSENS_CTL_START_CONV, TOPAZ_SYS_CTL_TEMPSENS_CTL);
+
+	printk(KERN_DEBUG "%s success\n", __FUNCTION__);
+
+	return 0;
+}
+
+static void __exit topaz_temp_sens_exit(void)
+{
+	remove_proc_entry(PROC_NAME, NULL);
+}
+
+module_init(topaz_temp_sens_init);
+module_exit(topaz_temp_sens_exit);
+
+MODULE_DESCRIPTION("Topaz Temperature Sensor");
+MODULE_AUTHOR("Quantenna");
+MODULE_LICENSE("GPL");
diff --git a/drivers/qtn/topaz/topaz_congest_queue.c b/drivers/qtn/topaz/topaz_congest_queue.c
new file mode 100644
index 0000000..632147b
--- /dev/null
+++ b/drivers/qtn/topaz/topaz_congest_queue.c
@@ -0,0 +1,409 @@
+/**
+ * Copyright (c) 2012-2013 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+
+#include <linux/netdevice.h>
+#include <qtn/topaz_tqe.h>
+#include <qtn/topaz_tqe_cpuif.h>
+#include <qtn/topaz_hbm_cpuif.h>
+#include <qtn/topaz_hbm.h>
+#include <qtn/topaz_congest_queue.h>
+
+#define TOPAZ_DEF_TASKLET_BUDGET	32
+#define TOPAZ_DEF_CONGEST_TIMEOUT	(2 * HZ)
+
+#define TOPAZ_HBM_POOL_THRESHOLD	2048
+
+struct topaz_congest_queue *g_congest_queue_ptr = NULL;
+
+void topaz_congest_node(struct topaz_congest_queue *queue)
+{
+	struct topaz_congest_q_desc *ptr;
+	int i;
+
+	for (i = 0; i < TOPAZ_CONGEST_QUEUE_NUM; i++) {
+		ptr = &queue->queues[i];
+
+		if (ptr->valid == 0)
+			continue;
+
+		printk("Queue Number: %4d\t", i);
+		printk("qlen: %4d head: %4d tail: %4d\t", ptr->qlen, ptr->head, ptr->tail);
+		printk("node_id: %4d tid: %4d\n", ptr->node_id, ptr->tid);
+	}
+}
+EXPORT_SYMBOL(topaz_congest_node);
+
+void topaz_congest_dump(struct topaz_congest_queue *queue)
+{
+	int i;
+
+	topaz_congest_node(queue);
+
+	printk("Dump queue length logs:\n");
+	for (i = 0; i < TOPAZ_CONGEST_PKT_MAX; i++) {
+		printk("%20d:\t%d\n", i, queue->logs[i]);
+	}
+}
+EXPORT_SYMBOL(topaz_congest_dump);
+
+struct reg_congest_stats {
+	void (*fn)(void *ctx, uint32_t type, uint8_t q_num, uint32_t q_value);
+	void *ctx;
+};
+
+struct reg_congest_stats pktlog_update_cgq_stats;
+void reg_congest_queue_stats(void (*fn)(void *, uint32_t, uint8_t, uint32_t), void *ctx)
+{
+	pktlog_update_cgq_stats.fn = fn;
+	pktlog_update_cgq_stats.ctx = ctx;
+}
+EXPORT_SYMBOL(reg_congest_queue_stats);
+
+struct topaz_congest_queue* topaz_congest_queue_get(void)
+{
+	return g_congest_queue_ptr;
+}
+EXPORT_SYMBOL(topaz_congest_queue_get);
+
+inline int get_active_tid_num(void)
+{
+	volatile struct shared_params* sp = qtn_mproc_sync_shared_params_get();
+	if (likely(sp)) {
+		return sp->active_tid_num;
+	} else {
+		return 0;
+	}
+}
+
+void topaz_congest_set_unicast_queue_count(uint32_t qnum)
+{
+	if (qnum <= TOPAZ_CONGEST_QUEUE_NUM)
+		topaz_congest_queue_get()->max_unicast_qcount = qnum;
+}
+EXPORT_SYMBOL(topaz_congest_set_unicast_queue_count);
+
+struct topaz_congest_q_desc* topaz_congest_alloc_unicast_queue(struct topaz_congest_queue *congest_queue,
+														uint32_t node_id,
+														uint32_t tid)
+{
+	struct topaz_congest_q_desc* unicast_queue;
+
+	if (get_active_tid_num() > congest_queue->max_unicast_qcount)
+		return NULL;
+
+	if (congest_queue->unicast_qcount >= congest_queue->max_unicast_qcount)
+		return NULL;
+
+	unicast_queue = topaz_congest_alloc_queue(congest_queue, node_id, tid);
+
+	if (unicast_queue == NULL)
+		return NULL;
+
+	unicast_queue->is_unicast = 1;
+	congest_queue->unicast_qcount ++;
+
+	return unicast_queue;
+}
+EXPORT_SYMBOL(topaz_congest_alloc_unicast_queue);
+
+struct topaz_congest_q_desc* topaz_congest_alloc_queue(struct topaz_congest_queue *congest_queue, uint32_t node_id,	uint32_t tid)
+{
+	struct topaz_congest_q_desc *queue;
+	int i;
+
+	if (congest_queue->total_qlen >= TOPAZ_CONGEST_TOTAL_PKT_MAX)
+		return NULL;
+
+	if (topaz_hbm_pool_available(TOPAZ_HBM_BUF_EMAC_RX_POOL) <= TOPAZ_HBM_POOL_THRESHOLD)
+		return NULL;
+
+	for (i = 0; i < TOPAZ_CONGEST_QUEUE_NUM; i++) {
+		queue = &congest_queue->queues[i];
+
+		if (queue->valid == 0) {
+			queue->valid = 1;
+
+			queue->node_id = node_id;
+			queue->tid = tid;
+			queue->head = 0;
+			queue->tail = 0;
+			queue->qlen = 0;
+			queue->index = i;
+			queue->last_retry_success = 1;
+			queue->retry_timeout = jiffies + queue->congest_queue->congest_timeout;
+			queue->is_unicast = 0;
+
+			congest_queue->ptrs[node_id][tid] = queue;
+
+			return queue;
+		}
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(topaz_congest_alloc_queue);
+
+__attribute__((section(".sram.text"))) int topaz_congest_enqueue(struct topaz_congest_q_desc* queue, union topaz_tqe_cpuif_ppctl *ppctl)
+{
+	struct topaz_congest_q_elem *ptr;
+	uint32_t index;
+	int8_t pool;
+	int ret = 0;
+
+	if ((queue->qlen >= TOPAZ_CONGEST_PKT_MAX) ||
+			(queue->congest_queue->total_qlen >= TOPAZ_CONGEST_TOTAL_PKT_MAX)) {
+		queue->congest_enq_fail++;
+		ret = NET_XMIT_CN;
+		goto make_stats;
+	}
+
+	pool = topaz_hbm_payload_get_pool_bus(ppctl->data.pkt);
+	if (topaz_hbm_pool_available(pool) <= TOPAZ_HBM_POOL_THRESHOLD) {
+		queue->congest_enq_fail++;
+		ret = NET_XMIT_CN;
+		goto make_stats;
+	}
+
+	queue->congest_queue->logs[queue->qlen]++;
+
+	index = queue->tail;
+	ptr = &queue->elems[index];
+
+	ptr->ppctl.raw.ppctl0 = ppctl->raw.ppctl0;
+	ptr->ppctl.raw.ppctl1 = ppctl->raw.ppctl1;
+	ptr->ppctl.raw.ppctl2 = ppctl->raw.ppctl2;
+	ptr->ppctl.raw.ppctl3 = ppctl->raw.ppctl3;
+	ptr->ppctl.raw.ppctl4 = ppctl->raw.ppctl4;
+	ptr->ppctl.raw.ppctl5 = ppctl->raw.ppctl5;
+
+	if (++index == TOPAZ_CONGEST_PKT_MAX)
+		index = 0;
+
+	queue->tail = index;
+	queue->qlen++;
+	queue->congest_queue->total_qlen++;
+
+make_stats:
+	if (pktlog_update_cgq_stats.fn) {
+		pktlog_update_cgq_stats.fn(pktlog_update_cgq_stats.ctx,
+					TOPAZ_CONGEST_QUEUE_STATS_QLEN,
+					queue->index,
+					queue->qlen);
+		pktlog_update_cgq_stats.fn(pktlog_update_cgq_stats.ctx,
+					TOPAZ_CONGEST_QUEUE_STATS_ENQFAIL,
+					queue->index,
+					queue->congest_enq_fail);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(topaz_congest_enqueue);
+
+void topaz_congest_release_queue(struct topaz_congest_queue *congest_queue, uint32_t node_id, uint32_t tid)
+{
+	struct topaz_congest_q_desc *queue;
+
+	queue = congest_queue->ptrs[node_id][tid];
+
+	BUG_ON(queue->qlen != 0);
+
+	queue->node_id = 0;
+	queue->tid = 0;
+	queue->head = 0;
+	queue->tail = 0;
+	queue->qlen = 0;
+	queue->last_retry_success = 1;
+
+	queue->valid = 0;
+
+	congest_queue->ptrs[node_id][tid] = NULL;
+
+	if (queue->is_unicast) {
+		queue->is_unicast = 0;
+		if(congest_queue->unicast_qcount > 0)
+			congest_queue->unicast_qcount --;
+	}
+}
+EXPORT_SYMBOL(topaz_congest_release_queue);
+
+inline static union topaz_tqe_cpuif_ppctl *topaz_congest_peek(struct topaz_congest_q_desc *queue)
+{
+	if (queue->qlen == 0)
+		return NULL;
+
+	return &queue->elems[queue->head].ppctl;
+}
+
+inline static void topaz_congest_dequeue(struct topaz_congest_q_desc *queue)
+{
+	if (++queue->head == TOPAZ_CONGEST_PKT_MAX)
+		queue->head = 0;
+
+	queue->qlen--;
+	queue->congest_queue->total_qlen--;
+	if (pktlog_update_cgq_stats.fn)
+		pktlog_update_cgq_stats.fn(pktlog_update_cgq_stats.ctx, TOPAZ_CONGEST_QUEUE_STATS_QLEN, queue->index, queue->qlen);
+
+	/* Initial status setting */
+	queue->last_retry_success = 1;
+
+	if (queue->qlen == 0)
+		topaz_congest_release_queue(queue->congest_queue, queue->node_id, queue->tid);
+}
+
+void topaz_hbm_congest_queue_put_buf(const union topaz_tqe_cpuif_ppctl *ppctl)
+{
+	unsigned long flags;
+	uint8_t *buf = ppctl->data.pkt + ppctl->data.buff_ptr_offset;
+
+	local_irq_save(flags);
+	topaz_hbm_filter_txdone_buf(buf);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(topaz_hbm_congest_queue_put_buf);
+
+noinline static void topaz_congest_queue_clear(struct topaz_congest_q_desc *queue)
+{
+	struct topaz_congest_q_elem *ptr;
+
+	queue->congest_drop += queue->qlen;
+
+	while(queue->qlen) {
+		ptr = &queue->elems[queue->head];
+		topaz_hbm_congest_queue_put_buf(&ptr->ppctl);
+		topaz_congest_dequeue(queue);
+	}
+}
+
+__attribute__((section(".sram.text"))) int topaz_congest_queue_xmit(struct topaz_congest_q_desc *queue, uint32_t budget)
+{
+	union topaz_tqe_cpuif_ppctl *pp_cntl;
+	struct topaz_congest_queue *congest_queue = queue->congest_queue;
+	int re_sched = 0;
+	int ret;
+
+	congest_queue->xmit_entry++;
+
+	if (queue->last_retry_success == 0) {
+		if (time_after(jiffies, queue->retry_timeout)) {
+			/* PNPT queue very likely is gone */
+			topaz_congest_queue_clear(queue);
+			return 0;
+		}
+	}
+
+	while (1) {
+		pp_cntl = topaz_congest_peek(queue);
+		if (!pp_cntl)
+			break;
+
+		congest_queue->cnt_retries++;
+
+		ret = congest_queue->xmit_func(pp_cntl);
+
+		if (ret == NET_XMIT_CN) {
+			queue->last_retry_success = 0;
+			re_sched = 1;
+			break;
+		}
+
+		queue->retry_timeout = jiffies + congest_queue->congest_timeout;
+		queue->congest_xmit++;
+
+		/* Transmit successfully */
+		topaz_congest_dequeue(queue);
+
+		if (--budget == 0)
+			break;
+	}
+
+	if (budget == 0)
+		re_sched = 1;
+
+	return re_sched;
+}
+EXPORT_SYMBOL(topaz_congest_queue_xmit);
+
+__attribute__((section(".sram.text"))) void congest_tx_tasklet(unsigned long data)
+{
+	struct topaz_congest_queue *congest_queue = (struct topaz_congest_queue *)data;
+	struct topaz_congest_q_desc *queue;
+	int re_sched = 0;
+	int ret;
+	int i;
+
+	congest_queue->func_entry++;
+
+	for (i = 0; i < TOPAZ_CONGEST_QUEUE_NUM; i++) {
+		queue = &congest_queue->queues[i];
+
+		if (queue->valid == 0)
+			continue;
+
+		ret = topaz_congest_queue_xmit(queue, congest_queue->tasklet_budget);
+		if (ret == 1)
+			re_sched = 1;
+	}
+
+	if (re_sched == 1) {
+		tasklet_schedule(&congest_queue->congest_tx);
+	}
+}
+EXPORT_SYMBOL(congest_tx_tasklet);
+
+struct topaz_congest_queue* topaz_congest_queue_init(void)
+{
+	struct topaz_congest_queue *queue;
+	int i;
+
+	queue = kmalloc(sizeof(struct topaz_congest_queue), GFP_KERNEL | __GFP_ZERO);
+	if (queue == NULL) {
+		printk(KERN_ERR"Out of memory\n");
+		return NULL;
+	}
+	g_congest_queue_ptr = queue;
+
+	for (i = 0; i < TOPAZ_CONGEST_QUEUE_NUM; i++) {
+		queue->queues[i].congest_queue = queue;
+		queue->queues[i].last_retry_success = 1;
+	}
+	queue->tasklet_budget = TOPAZ_DEF_TASKLET_BUDGET;
+	queue->congest_timeout = TOPAZ_DEF_CONGEST_TIMEOUT;
+	tasklet_init(&queue->congest_tx, congest_tx_tasklet, (unsigned long)queue);
+	queue->xmit_func = NULL;
+	queue->tasklet_extra_proc = NULL;
+	queue->max_unicast_qcount = TOPAZ_CONGEST_MAX_UNICAST_QCOUNT;
+
+	return queue;
+}
+EXPORT_SYMBOL(topaz_congest_queue_init);
+
+void topaz_congest_queue_exit(struct topaz_congest_queue* queue)
+{
+	kfree(queue);
+}
+EXPORT_SYMBOL(topaz_congest_queue_exit);
+
+MODULE_DESCRIPTION("CONGEST QUEUE");
+MODULE_AUTHOR("Quantenna");
+MODULE_LICENSE("GPL");
diff --git a/drivers/qtn/topaz/topaz_test.h b/drivers/qtn/topaz/topaz_test.h
new file mode 100644
index 0000000..9c42f55
--- /dev/null
+++ b/drivers/qtn/topaz/topaz_test.h
@@ -0,0 +1,98 @@
+/**
+ * Copyright (c) 2008-2012 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#ifndef __TOPAZ_TEST_H
+#define __TOPAZ_TEST_H
+
+#if defined(__KERNEL__)
+#include <linux/ctype.h>
+#include <qtn/qtn_debug.h>
+#endif
+
+#define TOPAZ_TEST_ASSERT_EQUAL(x, y)						\
+	do {									\
+		if (!((x) == (y))) {						\
+			DBGFN("%s:%d:%s, '%s' %d 0x%x != '%s' %d 0x%x\n",	\
+					__FILE__, __LINE__, __FUNCTION__,	\
+					#x, (int)(x), (unsigned int)(x),	\
+					#y, (int)(y), (unsigned int)(y));	\
+			return -1;						\
+		}								\
+	} while(0)
+
+
+static inline const char * topaz_test_skip_space(const char *c)
+{
+	while (c && *c && isspace(*c)) {
+		c++;
+	}
+	return c;
+}
+
+static inline const char *topaz_test_skip_nonspace(const char *c)
+{
+	while (c && *c && !isspace(*c)) {
+		c++;
+	}
+	return c;
+}
+
+static inline const char * topaz_test_next_word(const char *c)
+{
+	return topaz_test_skip_space(topaz_test_skip_nonspace(topaz_test_skip_space(c)));
+}
+
+static inline int topaz_test_split_words(char **words, char *c)
+{
+	int word_count = 0;
+
+	/* skip leading space */
+	while (c && *c && isspace(*c)) {
+		c++;
+	}
+
+	while (c && *c) {
+		words[word_count++] = c;
+
+		/* skip this word */
+		while (c && *c && !isspace(*c)) {
+			c++;
+		}
+
+		/* replace spaces with NULL */
+		while (c && *c && isspace(*c)) {
+			*c = 0;
+			c++;
+		}
+	}
+
+	return word_count;
+}
+
+#define TOPAZ_TEST_CTRL_SRCMAC	{ 0x00, 0x26, 0x86, 0x00, 0x00, 0x00 }
+#define TOPAZ_TEST_CTRL_DSTMAC	{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }
+
+int topaz_dpi_test_parse(int argc, char **argv);
+int topaz_fwt_test_parse(int argc, char **argv);
+int topaz_ipprt_emac0_test_parse(int argc, char **argv);
+int topaz_ipprt_emac1_test_parse(int argc, char **argv);
+int topaz_vlan_test_parse(int argc, char **argv);
+
+#endif /* __TOPAZ_TEST_H */
+
diff --git a/drivers/qtn/topaz/tqe.c b/drivers/qtn/topaz/tqe.c
new file mode 100644
index 0000000..608c7f4
--- /dev/null
+++ b/drivers/qtn/topaz/tqe.c
@@ -0,0 +1,133 @@
+/**
+ * Copyright (c) 2012-2013 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ **/
+
+#include <linux/version.h>
+#include <linux/module.h>
+
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+
+#include <asm/hardware.h>
+
+#include <asm/board/platform.h>
+
+#define TOPAZ_TQE_PROC_FILENAME	"topaz_tqe"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int topaz_tqe_stat_rd(struct file *file, char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	char *p = buffer;
+	int i;
+	struct {
+		uint32_t reg;
+		const char *name;
+	} regs[] = {
+		{ TOPAZ_TQE_OUTPORT_EMAC0_CNT, "emac0", },
+		{ TOPAZ_TQE_OUTPORT_EMAC1_CNT, "emac1", },
+		{ TOPAZ_TQE_OUTPORT_WMAC_CNT, "wmac", },
+		{ TOPAZ_TQE_OUTPORT_LHOST_CNT, "lhost", },
+		{ TOPAZ_TQE_OUTPORT_MUC_CNT, "muc", },
+		{ TOPAZ_TQE_OUTPORT_DSP_CNT, "dsp", },
+		{ TOPAZ_TQE_OUTPORT_AUC_CNT, "auc", },
+		{ TOPAZ_TQE_OUTPORT_PCIE_CNT, "pcie", },
+		{ TOPAZ_TQE_DROP_CNT, "drop", },
+		{ TOPAZ_TQE_DROP_EMAC0_CNT, "emac0 d", },
+		{ TOPAZ_TQE_DROP_EMAC1_CNT, "emac1 d", },
+		{ TOPAZ_TQE_DROP_WMAC_CNT, "wmac d", },
+		{ TOPAZ_TQE_DROP_LHOST_CNT, "lhost d", },
+		{ TOPAZ_TQE_DROP_MUC_CNT, "muc d", },
+		{ TOPAZ_TQE_DROP_DSP_CNT, "dsp d", },
+		{ TOPAZ_TQE_DROP_AUC_CNT, "auc d", },
+		{ TOPAZ_TQE_DROP_PCIE_CNT, "pcie d", },
+	};
+
+	for (i = 0; i < ARRAY_SIZE(regs); i++) {
+		uint32_t reg = readl(regs[i].reg);
+		p += sprintf(p, "%8s = %u 0x%08x\n", regs[i].name, reg, reg);
+	}
+
+	*ppos += p - buffer;
+
+	return p - buffer;
+}
+
+static const struct file_operations fops_tqe = {
+	.read = topaz_tqe_stat_rd,
+};
+#else
+static int topaz_tqe_stat_rd(char *page, char **start, off_t offset,
+		int count, int *eof, void *data)
+{
+	char *p = page;
+	int i;
+	struct {
+		uint32_t reg;
+		const char *name;
+	} regs[] = {
+		{ TOPAZ_TQE_OUTPORT_EMAC0_CNT, "emac0", },
+		{ TOPAZ_TQE_OUTPORT_EMAC1_CNT, "emac1", },
+		{ TOPAZ_TQE_OUTPORT_WMAC_CNT, "wmac", },
+		{ TOPAZ_TQE_OUTPORT_LHOST_CNT, "lhost", },
+		{ TOPAZ_TQE_OUTPORT_MUC_CNT, "muc", },
+		{ TOPAZ_TQE_OUTPORT_DSP_CNT, "dsp", },
+		{ TOPAZ_TQE_OUTPORT_AUC_CNT, "auc", },
+		{ TOPAZ_TQE_OUTPORT_PCIE_CNT, "pcie", },
+		{ TOPAZ_TQE_DROP_CNT, "drop", },
+		{ TOPAZ_TQE_DROP_EMAC0_CNT, "emac0 d", },
+		{ TOPAZ_TQE_DROP_EMAC1_CNT, "emac1 d", },
+		{ TOPAZ_TQE_DROP_WMAC_CNT, "wmac d", },
+		{ TOPAZ_TQE_DROP_LHOST_CNT, "lhost d", },
+		{ TOPAZ_TQE_DROP_MUC_CNT, "muc d", },
+		{ TOPAZ_TQE_DROP_DSP_CNT, "dsp d", },
+		{ TOPAZ_TQE_DROP_AUC_CNT, "auc d", },
+		{ TOPAZ_TQE_DROP_PCIE_CNT, "pcie d", },
+	};
+
+	for (i = 0; i < ARRAY_SIZE(regs); i++) {
+		uint32_t reg = readl(regs[i].reg);
+		p += sprintf(p, "%8s = %u 0x%08x\n", regs[i].name, reg, reg);
+	}
+
+	return p - page;
+}
+#endif
+
+static int __init topaz_tqe_stat_init(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	if (!proc_create(TOPAZ_TQE_PROC_FILENAME, 0x444, NULL, &fops_tqe))
+		return -EEXIST;
+
+#else
+	if (!create_proc_read_entry(TOPAZ_TQE_PROC_FILENAME, 0,
+				NULL, topaz_tqe_stat_rd, NULL)) {
+		return -EEXIST;
+	}
+#endif
+	return 0;
+}
+
+late_initcall(topaz_tqe_stat_init);
+
+MODULE_DESCRIPTION("TQE");
+MODULE_AUTHOR("Quantenna");
+MODULE_LICENSE("GPL");
+
+
diff --git a/drivers/qtn/unaligned_access/unaligned_access.h b/drivers/qtn/unaligned_access/unaligned_access.h
new file mode 100644
index 0000000..638283b
--- /dev/null
+++ b/drivers/qtn/unaligned_access/unaligned_access.h
@@ -0,0 +1,255 @@
+#ifndef __UNALIGNED_ACCESS_H
+#define __UNALIGNED_ACCESS_H
+
+#ifdef __KERNEL__
+#define PRINT	printk
+#else
+#include <stdio.h>	
+#define PRINT	printf
+#endif
+
+#define REGS_SAVED 29
+
+struct regs {
+	unsigned long reg[REGS_SAVED];	// registers 0-12 are caller saves, 13-25 callee saves, gp, fp, sp
+};
+
+/* fit 4k (1024 * 32bit) of aligned values, 4k of unaligned, and a tiny pad */
+static char ua_test_buf[8200];
+static void* ua_test_aligned_buf;
+static void* ua_test_unaligned_buf;
+
+/* look through the memory at and address +- 16b */
+/* N.B. this printk itself triggers unaligned accesses */
+static inline void look_buf(char* buf, const unsigned long* values) {
+	int i;
+	for (i = -4; i <= 4; i++)
+		buf += sprintf(buf, "%08lx ", values[i]);
+}
+
+/* check for differences in the registers, caused by either the load or by register write back.
+   put the results as sprintfs into 'buf'. returns the number of bytes written into 'buf', i.e. 0 = nothing to report */
+static inline int check_regs(char* buf,
+		const struct regs *regs_pre_a, const struct regs *regs_post_a,
+		const struct regs *regs_pre_ua, const struct regs *regs_post_ua) 
+{
+	char *p = buf;
+	int i;
+	for (i = 0; i < REGS_SAVED; i++) { 
+		unsigned long pre_a = regs_pre_a->reg[i]; 			
+		unsigned long post_a = regs_post_a->reg[i]; 			
+		unsigned long pre_ua = regs_pre_ua->reg[i]; 			
+		unsigned long post_ua = regs_post_ua->reg[i]; 			
+		if ((pre_a != post_a || pre_a != pre_ua || pre_a != post_ua) 
+				&& !(pre_a == (unsigned long)ua_test_aligned_buf 
+					&& pre_ua == (unsigned long)ua_test_unaligned_buf 
+					&& post_a == (unsigned long)ua_test_aligned_buf 
+					&& post_ua == (unsigned long)ua_test_unaligned_buf) && i != 11) {
+			p += sprintf(p, "register %d differs: pre a/ua: %lu (0x%08lx) / %lu (0x%08lx) post %lu (0x%08lx) / %lu (0x%08lx)",
+					i, pre_a, pre_a, pre_ua, pre_ua, post_a, post_a, post_ua, post_ua); 
+			if (pre_a != pre_ua || post_a != post_ua) 
+				p += sprintf(p, "A/UA DIFFERS");
+			p += sprintf(p, "\n");
+		}
+	}
+
+	return p > buf;	
+}
+
+/* register stores, to observe difference in register state before and after instructions */
+static struct regs regs_aligned_pre, regs_aligned_post, regs_unaligned_pre, regs_unaligned_post;
+
+#define COPY_REGS(r)						\
+	rtp = &r;						\
+asm volatile (	"sub	%0, %0, 4\n"  	\
+		"st.a	r0, [%0, 4]\n"	\
+		"st.a	r1, [%0, 4]\n"	\
+		"st.a	r2, [%0, 4]\n"	\
+		"st.a	r3, [%0, 4]\n"	\
+		"st.a	r4, [%0, 4]\n"	\
+		"st.a	r5, [%0, 4]\n"	\
+		"st.a	r6, [%0, 4]\n"	\
+		"st.a	r7, [%0, 4]\n"	\
+		"st.a	r8, [%0, 4]\n"	\
+		"st.a	r9, [%0, 4]\n"	\
+		"st.a	r10, [%0, 4]\n"	\
+		"st.a	r11, [%0, 4]\n"	\
+		"st.a	r12, [%0, 4]\n"	\
+		"st.a	r13, [%0, 4]\n"	\
+		"st.a	r14, [%0, 4]\n"	\
+		"st.a	r15, [%0, 4]\n"	\
+		"st.a	r16, [%0, 4]\n"	\
+		"st.a	r17, [%0, 4]\n"	\
+		"st.a	r18, [%0, 4]\n"	\
+		"st.a	r19, [%0, 4]\n"	\
+"st.a	r20, [%0, 4]\n"	\
+"st.a	r21, [%0, 4]\n"	\
+"st.a	r22, [%0, 4]\n"	\
+"st.a	r23, [%0, 4]\n"	\
+"st.a	r24, [%0, 4]\n"	\
+"st.a	r25, [%0, 4]\n"	\
+"st.a	r26, [%0, 4]\n"	\
+"st.a	r27, [%0, 4]\n"	\
+"st.a	r28, [%0, 4]\n"	\
+:: "r" (rtp)	\
+);		
+
+/* fill registers with values to set up a test, then execute for both val1a and val1b 
+   which are typically 2 memory areas, 1 for aligned and 1 for unaligned data */
+#define _CHECK(reg1, val1a, val1b, reg2, val2, reg3, val3, code) {		\
+	tp = (void*)val1a;							\
+	asm volatile ("mov " #reg1 ",%0" ::"r"(tp) : #reg1);			\
+	tp = (void*)val2;							\
+	asm volatile ("mov " #reg2 ",%0" ::"r"(tp) : #reg2); 			\
+	tp = (void*)val3;							\
+	asm volatile ("mov " #reg3 ",%0" ::"r"(tp) : #reg3); 			\
+	COPY_REGS(regs_aligned_pre);						\
+	asm volatile (code);							\
+	COPY_REGS(regs_aligned_post);						\
+	tp = (void*)val1b;							\
+	asm volatile ("mov " #reg1 ",%0" ::"r"(tp) : #reg1);			\
+	tp = (void*)val2;							\
+	asm volatile ("mov " #reg2 ",%0" ::"r"(tp) : #reg2); 			\
+	tp = (void*)val3;							\
+	asm volatile ("mov " #reg3 ",%0" ::"r"(tp) : #reg3); 			\
+	COPY_REGS(regs_unaligned_pre);						\
+	asm volatile (code);							\
+	COPY_REGS(regs_unaligned_post);						\
+	err = check_regs(buf, &regs_aligned_pre, &regs_aligned_post, &regs_unaligned_pre, &regs_unaligned_post);\
+	PRINT("%s: fragment %s\n", __FUNCTION__, #code);			\
+	if (err) PRINT("%s\n", buf);						\
+	look_buf(buf, (unsigned long*)ua_test_aligned_buf);			\
+	PRINT("%s aligned:   %s\n", __FUNCTION__, buf);				\
+	look_buf(buf, (unsigned long*)ua_test_unaligned_buf);			\
+	PRINT("%s unaligned: %s\n", __FUNCTION__, buf);				\
+}
+
+#define CHECK(reg1, reg2, code) _CHECK(reg1, ua_test_aligned_buf, ua_test_unaligned_buf, reg2, (void*) 0x12345678, reg2, (void*)0x12345678, code)
+
+static inline int test_fragments(void) 
+{
+	char buf[1024] = {0};							
+	int err;
+	void* tp;
+	register void* rtp asm ("r11"); (void)rtp;
+
+	CHECK(r6,  r13, "ld 	r13, [r6, 0]\n"::);
+	CHECK(r8,  r4,  "ld 	r4,[r8,16]\n"::);
+	CHECK(r8,  r4,  "ld 	r4,[r8,16]\n"::);
+	CHECK(r16, r2,  "ld	r2,[r16,4]\n"::);
+	CHECK(r16, r5,  "ld	r5,[r16,8]\n"::);
+	CHECK(r16, r5,  "ld.a	r5,[r16,8]\n"::);
+	CHECK(r16, r5,  "ld.ab	r5,[r16,8]\n"::);
+	CHECK(r16, r3,  "ld	r3,[r16,4]\n"::);
+	CHECK(r13, r1,  "ld_s	r1,[r13,16]\n"::);
+	CHECK(r13, r2,  "ld_s	r2,[r13,12]\n"::);
+	CHECK(r16, r3,  "ld	r3,[r16,12]\n"::);
+	CHECK(r14, r2,  "ld_s	r2,[r14,4]\n"::);
+	CHECK(r14, r3,  "ld_s	r3,[r14,8]\n"::);
+	CHECK(r5,  r2,  "ld	r2,[r5,12]\n"::);
+
+	// one of each permutation of each instruction, as found in a kernel objdump
+	CHECK(r3,  r2,  "ld	r2,[r3,12]\n"::);
+	CHECK(r3,  r2,  "ld.a	r2,[r3,8]\n"::);
+	CHECK(r3,  r5,  "ld.ab	r5,[r3,4]\n"::);
+	// .di instructions are not handled by the routine, they cause a sigsegv
+	//CHECK(r3, "ld.a.di	r8,[r3,4]\n"::);
+	CHECK(r3,  r2,  "ld.as	r2,[r3,92]\n"::);
+	//CHECK(r3, "ld.di	r2,[r3,8]\n"::);
+	CHECK(r3,  r2,  "ld_s	r2,[r3,4]\n"::);
+	CHECK(r3,  r2,  "ldw	r2,[r3,2]\n"::);
+	CHECK(r17, r4,  "ldw	r4,[r17,6]\n"::);
+	CHECK(r3,  r2,  "ldw	r2,[r3,0]\n"::);
+	CHECK(r3,  r2,  "ldw.a	r2,[r3,-2]\n"::);
+	CHECK(r3,  r5,  "ldw.ab	r5,[r3,2]\n"::);
+	//CHECK(r3, "ldw.ab.di	r8,[r3,12]\n"::);
+	//CHECK(r3, "ldw.a.di	r8,[r3,16]\n"::);
+	CHECK(r3,  r2,  "ldw.as	r2,[r3,161]\n"::);
+	//CHECK(r3, "ldw.as.di	r8,[r3,20]\n"::);
+	//CHECK(r3, "ldw.di	r2,[r3,24]\n"::);
+	CHECK(r3,  r0,  "ldw_s	r0,[r3,8]\n"::);
+	CHECK(r3,  r2, "ldw_s.x	r2,[r3,18]\n"::);
+	CHECK(r3,  r2,  "ldw.x	r2,[r3,18]\n"::);
+	//CHECK(r3, "ldw.x.ab.di	r4,[r3,-184]\n"::);
+	CHECK(r3,  r8,"ldw.x.as	r8,[r3,-256]\n"::);
+	
+	CHECK(r3, r6, "st	r6,[r3,12]\n"::);
+	CHECK(r3, r6, "st.a	r6,[r3,-12]\n"::);
+	CHECK(r3, r6, "st.ab	r6,[r3,4]\n"::);
+	CHECK(r3, r7,  "st	r7,[r3,12]\n"::);
+	CHECK(r3, r8,  "st.as	r8,[r3,-8]\n"::);
+	//CHECK(r3, "st.di	r2,[r3,-24]\n"::);
+	CHECK(r3, r12, "st_s	r12,[r3,4]\n"::);
+	CHECK(r3, r5, "stw.ab	r5,[r3,2]\n"::);
+	CHECK(r3, r6, "stw.a	r6,[r3,-6]\n"::);
+	CHECK(r3, r5, "stw.ab	r5,[r3,6]\n"::);
+	CHECK(r3, r2, "stw.as	r2,[r3,-10]\n"::);
+	CHECK(r3, r2, "stw_s	r2,[r3,10]\n"::);
+
+	// higher registers
+	CHECK(r22,  r18, "ld	r18,[r22,12]\n"::);
+	// altering sp and fp crash the test harness (make sense...). compiler doesn't like gp being clobbered
+	//CHECK(sp,  r2, "ld.a	r2,[sp,8]\n"::);
+	//CHECK(fp,  r5, "ld.ab	r5,[fp,4]\n"::);
+	//CHECK(gp,  r2, "ld.as	r2,[gp,92]\n"::);
+	CHECK(r15, r2,  "ld_s	r2,[r15,4]\n"::);
+	CHECK(r22, r24, "ldw	r24,[r22,-10]\n"::);
+	//CHECK(sp,  r2, "ldw.a	r2,[sp,-2]\n"::);
+	//CHECK(gp,  r5, "ldw.ab	r5,[gp,2]\n"::);
+	//CHECK(fp,  r2, "ldw.as	r2,[fp,161]\n"::);
+	CHECK(r12, r14, "ldw_s	r14,[r12,8]\n"::);
+	CHECK(r13, r15, "ldw_s.x r15,[r13,18]\n"::);
+	CHECK(r19, r16, "ldw.x	r16,[r19,18]\n"::);
+	CHECK(r24, r22, "ldw.x.as r22,[r24,-256]\n"::);
+	
+	_CHECK(r20, ua_test_aligned_buf, ua_test_unaligned_buf, r21, 16, r22, 0x14, "ld	r22,[r20,r21]\n"::);
+	_CHECK(r20, ua_test_aligned_buf, ua_test_unaligned_buf, r21, -16, r23, 0x14, "ld	r23,[r20,r21]\n"::);
+	_CHECK(r20, ua_test_aligned_buf, ua_test_unaligned_buf, r21, 0, r24, 0x14, "ld	r24,[r20,r21]\n"::);
+
+	CHECK(r20, r21, "st	r21,[r20,12]\n"::);
+	CHECK(r21, r22, "st.a	r22,[r21,-12]\n"::);
+	CHECK(r22, r23, "st.ab	r23,[r22,4]\n"::);
+	CHECK(r23, r24, "st	r24,[r23,12]\n"::);
+	CHECK(r24, r19, "st.as	r19,[r24,-8]\n"::);
+	CHECK(r15, r14, "st_s	r14,[r15,4]\n"::);
+	CHECK(r23, r17, "stw.ab	r17,[r23,2]\n"::);
+	//CHECK(sp, r6, "stw.a	r6,[sp,-6]\n"::);
+	//CHECK(gp, r5, "stw.ab	r5,[gp,6]\n"::);
+	//CHECK(fp, r2, "stw.as	r2,[fp,-10]\n"::);
+	CHECK(r14, r2, "stw_s	r2,[r14,14]\n"::);
+
+	return 0;
+}
+
+/* copy an int to a buffer at arbitrary offset, using byte operations to avoid faulting */
+static inline void copy_ua(const int i, char* buf) {
+	// little endian
+	buf[3] = (i >> 24) & 0xFF;
+	buf[2] = (i >> 16) & 0xFF;
+	buf[1] = (i >> 8)  & 0xFF;
+	buf[0] = (i >> 0)  & 0xFF;
+}
+
+static inline int do_unaligned_access(int n) {
+
+	// the first half of the buffer will be aligned values
+	// second half will be unaligned, same stuff but 1kb+1b offset
+	int i;
+	for (i = 0; i < 1024; i++) { 
+		copy_ua(i * n, &ua_test_buf[4*i]);
+		copy_ua(i * n, &ua_test_buf[4*i + 4096 + 1]);
+	}
+
+	ua_test_aligned_buf = (void*)&ua_test_buf[2048];	// 2k of offset in either direction
+	ua_test_unaligned_buf = (void*)&ua_test_buf[4096 + 2048 + 1];
+
+	PRINT("%s ua_test_buf %p ua_test_aligned_buf %p ua_test_unaligned_buf %p\n", 
+			__FUNCTION__, ua_test_buf, ua_test_aligned_buf, ua_test_unaligned_buf);
+
+	int r = test_fragments();
+
+	return r;
+}
+
+#endif // __UNALIGNED_ACCESS_H
+
diff --git a/drivers/qtn/wlan/Makefile b/drivers/qtn/wlan/Makefile
new file mode 100644
index 0000000..967dc81
--- /dev/null
+++ b/drivers/qtn/wlan/Makefile
@@ -0,0 +1,83 @@
+#
+# Author: Mats Aretun
+#
+
+EXTRA_CFLAGS += -Wall -Werror  -I$(PWD) -I../include -DDEBUG -DDEMO_CONTROL
+EXTRA_CFLAGS += -I../drivers/include/shared -I../drivers/include/kernel -I../drivers/
+EXTRA_CFLAGS += -mlong-calls
+EXTRA_CFLAGS += -DQDRV -DQSCS_ENABLED -DQTN_BG_SCAN -DQBMPS_ENABLE
+EXTRA_CFLAGS += -DCONFIG_QTN_80211K_SUPPORT
+EXTRA_CFLAGS += -DQTN_DYN_CCA=1
+EXTRA_CFLAGS += -Wframe-larger-than=2048
+# Non associated clients monitoring support
+EXTRA_CFLAGS += -DCONFIG_NAC_MONITOR
+
+ifneq ($(KERNELRELEASE),)
+
+wlan-objs	+=	if_media.o \
+				ieee80211_beacon.o \
+				ieee80211_beacon_desc.o \
+				ieee80211_crypto.o \
+				ieee80211_crypto_none.o \
+				ieee80211_chan_select.o \
+				ieee80211_proto.o \
+				ieee80211_output.o \
+				ieee80211_power.o \
+				ieee80211_scan.o \
+				ieee80211_wireless.o \
+				ieee80211_linux.o \
+				ieee80211_rate.o \
+				ieee80211_input.o \
+				ieee80211_node.o \
+				ieee80211_tdls.o \
+				ieee80211_tpc.o \
+				ieee80211_mlme_statistics.o \
+				ieee80211.o
+
+wlan_acl-objs		+=	ieee80211_acl.o
+wlan_scan_sta-objs	+=	ieee80211_scan_sta.o
+wlan_scan_ap-objs	+=	ieee80211_scan_ap.o
+ifeq ($(board_config),topaz_qfdr_config)
+wlan_qfdr-objs		+=	ieee80211_qfdr.o
+endif
+
+wlan_ccmp-objs  += ieee80211_crypto_ccmp.o
+wlan_tkip-objs  += ieee80211_crypto_tkip.o
+wlan_xauth-objs := ieee80211_xauth.o
+
+obj-m += wlan.o
+obj-m += wlan_acl.o
+obj-m += wlan_scan_ap.o
+obj-m += wlan_scan_sta.o
+obj-m += wlan_ccmp.o
+obj-m += wlan_tkip.o
+obj-m += wlan_xauth.o
+ifeq ($(board_config),topaz_qfdr_config)
+obj-m += wlan_qfdr.o
+endif
+
+else
+
+#KERNELDIR	?= /lib/modules/$(shell uname -r)/build
+KERNELDIR	?= ../../linux-2.6.20.1
+INSTALL		= INSTALL_MOD_PATH=../linux/modules
+CROSS		= ARCH=arm CROSS_COMPILE=../buildroot/build_arm/staging_dir/bin/arm-linux-
+PWD			:= $(shell pwd)
+
+default:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
+
+install:
+	$(MAKE) -C $(KERNELDIR) $(CROSS) $(INSTALL) M=$(PWD) modules_install
+
+endif
+
+clean:
+	rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions
+
+depend .depend dep:
+	$(CC) $(CFLAGS) -M *.c > .depend
+
+ifeq (.depend,$(wildcard .depend))
+include .depend
+endif
diff --git a/drivers/qtn/wlan/ieee80211.c b/drivers/qtn/wlan/ieee80211.c
new file mode 100644
index 0000000..b4a10b3
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211.c
@@ -0,0 +1,3542 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211.c 2617 2007-07-26 14:38:46Z mrenzmann $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 generic handler
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>		/* XXX for rtnl_lock */
+
+#include <asm/board/pm.h>
+
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+#include "net80211/_ieee80211.h"
+#include "net80211/ieee80211_tpc.h"
+
+#include <qtn/qtn_buffers.h>
+#include <qtn/qtn_global.h>
+#include <qdrv/qdrv_vap.h>
+
+const char *ieee80211_phymode_name[] = {
+	"auto",		/* IEEE80211_MODE_AUTO */
+	"11a",		/* IEEE80211_MODE_11A */
+	"11b",		/* IEEE80211_MODE_11B */
+	"11g",		/* IEEE80211_MODE_11G */
+	"FH",		/* IEEE80211_MODE_FH */
+	"turboA",	/* IEEE80211_MODE_TURBO_A */
+	"turboG",	/* IEEE80211_MODE_TURBO_G */
+	"11na",		/* IEEE80211_MODE_11NA */
+	"11ng",		/* IEEE80211_MODE_11NG */
+	"11ng40",	/* IEEE80211_MODE_11NG_HT40PM */
+	"11na40",	/* IEEE80211_MODE_11NA_HT40PM */
+	"11ac20",	/* IEEE80211_MODE_11AC_VHT20PM */
+	"11ac40",	/* IEEE80211_MODE_11AC_VHT40PM */
+	"11ac80",	/* IEEE80211_MODE_11AC_VHT80PM */
+	"11ac160",	/* IEEE80211_MODE_11AC_VHT160PM */
+};
+EXPORT_SYMBOL(ieee80211_phymode_name);
+
+/* integer portion of HT rates */
+const u_int16_t ht_rate_table_20MHz_400[] = {
+							7,
+							14,
+							21,
+							28,
+							43,
+							57,
+							65,
+							72,
+							14,
+							28,
+							43,
+							57,
+							86,
+							115,
+							130,
+							144
+						};
+
+const u_int16_t ht_rate_table_20MHz_800[] = {
+							6,
+							13,
+							19,
+							26,
+							39,
+							52,
+							58,
+							65,
+							13,
+							26,
+							39,
+							52,
+							78,
+							104,
+							117,
+							130
+						};
+
+const u_int16_t ht_rate_table_40MHz_400[] = {
+							15,
+							30,
+							45,
+							60,
+							90,
+							120,
+							135,
+							150,
+							30,
+							60,
+							90,
+							120,
+							180,
+							240,
+							270,
+							300
+						};
+
+const u_int16_t ht_rate_table_40MHz_800[] = {
+							13,
+							27,
+							40,
+							54,
+							81,
+							108,
+							121,
+							135,
+							27,
+							54,
+							81,
+							108,
+							162,
+							216,
+							243,
+							270
+						};
+
+EXPORT_SYMBOL(ht_rate_table_40MHz_800);
+EXPORT_SYMBOL(ht_rate_table_40MHz_400);
+EXPORT_SYMBOL(ht_rate_table_20MHz_800);
+EXPORT_SYMBOL(ht_rate_table_20MHz_400);
+
+/* Please update it when the definition of ieee80211_phymode changed */
+static const u_int ieee80211_chanflags[] = {
+	0,				/* IEEE80211_MODE_AUTO */
+	IEEE80211_CHAN_A,		/* IEEE80211_MODE_11A */
+	IEEE80211_CHAN_B,		/* IEEE80211_MODE_11B */
+	IEEE80211_CHAN_PUREG,		/* IEEE80211_MODE_11G */
+	IEEE80211_CHAN_FHSS,		/* IEEE80211_MODE_FH */
+	IEEE80211_CHAN_108A,		/* IEEE80211_MODE_TURBO_A */
+	IEEE80211_CHAN_108G,		/* IEEE80211_MODE_TURBO_G */
+	IEEE80211_CHAN_11NA,		/* IEEE80211_MODE_11NA */
+	IEEE80211_CHAN_11NG,		/* IEEE80211_MODE_11NG */
+	IEEE80211_CHAN_11NG_HT40,	/* IEEE80211_MODE_11NG_HT40PM */
+	IEEE80211_CHAN_11NA_HT40,	/* IEEE80211_MODE_11NA_HT40PM */
+	IEEE80211_CHAN_11AC,
+	IEEE80211_CHAN_11AC_VHT40,
+	IEEE80211_CHAN_11AC_VHT80,
+};
+
+static void ieee80211com_media_status(void *, struct ifmediareq *);
+static int ieee80211com_media_change(void *);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static struct rtnl_link_stats64 *ieee80211_getstats64(struct net_device *dev, struct rtnl_link_stats64 *stats64);
+#else
+static struct net_device_stats *ieee80211_getstats(struct net_device *);
+#endif
+static int ieee80211_change_mtu(struct net_device *, int);
+static void ieee80211_set_multicast_list(struct net_device *);
+
+MALLOC_DEFINE(M_80211_VAP, "80211vap", "802.11 vap state");
+
+/*
+ * Country Code Table for code-to-string conversion.
+ */
+struct country_code_to_string {
+	u_int16_t iso_code;
+	const char *iso_name;
+};
+
+static const  struct country_code_to_string country_strings[] = {
+    {CTRY_DEBUG,		"DB" },
+    {CTRY_DEFAULT,		"NA" },
+    {CTRY_AFGHANISTAN,		"AF" },
+    {CTRY_ALBANIA,		"AL" },
+    {CTRY_ALGERIA,		"DZ" },
+    {CTRY_AMERICAN_SAMOA,	"AS" },
+    {CTRY_ANDORRA,		"AD" },
+    {CTRY_ANGOLA,		"AO" },
+    {CTRY_ANGUILLA,		"AI" },
+    {CTRY_ANTARTICA,		"AQ" },
+    {CTRY_ANTIGUA,		"AG" },
+    {CTRY_ARGENTINA,		"AR" },
+    {CTRY_ARMENIA,		"AM" },
+    {CTRY_ARUBA,		"AW" },
+    {CTRY_AUSTRALIA,		"AU" },
+    {CTRY_AUSTRIA,		"AT" },
+    {CTRY_AZERBAIJAN,		"AZ" },
+    {CTRY_BAHAMAS,		"BS" },
+    {CTRY_BAHRAIN,		"BH" },
+    {CTRY_BANGLADESH,		"BD" },
+    {CTRY_BARBADOS,		"BB" },
+    {CTRY_BELARUS,		"BY" },
+    {CTRY_BELGIUM,		"BE" },
+    {CTRY_BELIZE,		"BZ" },
+    {CTRY_BENIN,		"BJ" },
+    {CTRY_BERMUDA,		"BM" },
+    {CTRY_BHUTAN,		"BT" },
+    {CTRY_BOLIVIA,		"BO" },
+    {CTRY_BOSNIA_AND_HERZEGOWINA,	"BA" },
+    {CTRY_BOTSWANA,		"BW" },
+    {CTRY_BOUVET_ISLAND,	"BV" },
+    {CTRY_BRAZIL,		"BR" },
+    {CTRY_BRITISH_INDIAN_OCEAN_TERRITORY,	"IO" },
+    {CTRY_BRUNEI_DARUSSALAM,	"BN" },
+    {CTRY_BULGARIA,		"BG" },
+    {CTRY_BURKINA_FASO,		"BF" },
+    {CTRY_BURUNDI,		"BI" },
+    {CTRY_CAMBODIA,		"KH" },
+    {CTRY_CAMEROON,		"CM" },
+    {CTRY_CANADA,		"CA" },
+    {CTRY_CAPE_VERDE,		"CV" },
+    {CTRY_CAYMAN_ISLANDS,	"KY" },
+    {CTRY_CENTRAL_AFRICAN_REPUBLIC,	"CF" },
+    {CTRY_CHAD,			"TD" },
+    {CTRY_CHILE,		"CL" },
+    {CTRY_CHINA,		"CN" },
+    {CTRY_CHRISTMAS_ISLAND,	"CX" },
+    {CTRY_COCOS_ISLANDS,	"CC" },
+    {CTRY_COLOMBIA,		"CO" },
+    {CTRY_COMOROS,		"KM" },
+    {CTRY_CONGO,		"CG" },
+    {CTRY_COOK_ISLANDS,		"CK" },
+    {CTRY_COSTA_RICA,		"CR" },
+    {CTRY_COTE_DIVOIRE,		"CI" },
+    {CTRY_CROATIA,		"HR" },
+    {CTRY_CYPRUS,		"CY" },
+    {CTRY_CZECH,		"CZ" },
+    {CTRY_DENMARK,		"DK" },
+    {CTRY_DJIBOUTI,		"DJ" },
+    {CTRY_DOMINICA,		"DM" },
+    {CTRY_DOMINICAN_REPUBLIC,	"DO" },
+    {CTRY_ECUADOR,		"EC" },
+    {CTRY_EGYPT,		"EG" },
+    {CTRY_EL_SALVADOR,		"SV" },
+    {CTRY_EQUATORIAL_GUINEA,	"GQ" },
+    {CTRY_ERITREA,		"ER" },
+    {CTRY_ESTONIA,		"EE" },
+    {CTRY_ETHIOPIA,		"ET" },
+    {CTRY_FALKLAND_ISLANDS,	"FK" },
+    {CTRY_EUROPE,		"EU" },
+    {CTRY_FIJI,			"FJ" },
+    {CTRY_FINLAND,		"FI" },
+    {CTRY_FRANCE,		"FR" },
+    {CTRY_FRANCE2,		"F2" },
+    {CTRY_FRENCH_GUIANA,	"GF" },
+    {CTRY_FRENCH_POLYNESIA,	"PF" },
+    {CTRY_FRENCH_SOUTHERN_TERRITORIES,	"TF" },
+    {CTRY_GABON,		"GA" },
+    {CTRY_GAMBIA,		"GM" },
+    {CTRY_GEORGIA,		"GE" },
+    {CTRY_GERMANY,		"DE" },
+    {CTRY_GHANA,		"GH" },
+    {CTRY_GIBRALTAR,		"GI" },
+    {CTRY_GREECE,		"GR" },
+    {CTRY_GREENLAND,		"GL" },
+    {CTRY_GRENADA,		"GD" },
+    {CTRY_GUADELOUPE,		"GP" },
+    {CTRY_GUAM,			"GU" },
+    {CTRY_GUATEMALA,		"GT" },
+    {CTRY_GUINEA,		"GN" },
+    {CTRY_GUINEA_BISSAU,	"GW" },
+    {CTRY_GUYANA,		"GY" },
+    {CTRY_HAITI,		"HT" },
+    {CTRY_HONDURAS,		"HN" },
+    {CTRY_HONG_KONG,		"HK" },
+    {CTRY_HUNGARY,		"HU" },
+    {CTRY_ICELAND,		"IS" },
+    {CTRY_INDIA,		"IN" },
+    {CTRY_INDONESIA,		"ID" },
+    {CTRY_IRAN,			"IR" },
+    {CTRY_IRELAND,		"IE" },
+    {CTRY_ISRAEL,		"IL" },
+    {CTRY_ITALY,		"IT" },
+    {CTRY_JAPAN,		"JP" },
+    {CTRY_JAPAN1,		"J1" },
+    {CTRY_JAPAN2,		"J2" },    
+    {CTRY_JAPAN3,		"J3" },
+    {CTRY_JAPAN4,		"J4" },
+    {CTRY_JAPAN5,		"J5" },    
+    {CTRY_JAPAN7,		"JP" },
+    {CTRY_JAPAN6,		"JP" },
+    {CTRY_JAPAN8,		"JP" },
+    {CTRY_JAPAN9,	      	"JP" },
+    {CTRY_JAPAN10,	      	"JP" }, 
+    {CTRY_JAPAN11,	      	"JP" },
+    {CTRY_JAPAN12,	      	"JP" },
+    {CTRY_JAPAN13,	      	"JP" },
+    {CTRY_JAPAN14,	      	"JP" },
+    {CTRY_JAPAN15,	      	"JP" },
+    {CTRY_JAPAN16,	      	"JP" }, 
+    {CTRY_JAPAN17,	      	"JP" },
+    {CTRY_JAPAN18,	      	"JP" },
+    {CTRY_JAPAN19,	      	"JP" },
+    {CTRY_JAPAN20,	      	"JP" },
+    {CTRY_JAPAN21,	      	"JP" }, 
+    {CTRY_JAPAN22,	      	"JP" },
+    {CTRY_JAPAN23,	      	"JP" },
+    {CTRY_JAPAN24,	      	"JP" },
+    {CTRY_JAPAN25,	      	"JP" }, 
+    {CTRY_JAPAN26,	      	"JP" },
+    {CTRY_JAPAN27,	      	"JP" },
+    {CTRY_JAPAN28,	      	"JP" },
+    {CTRY_JAPAN29,	      	"JP" },
+    {CTRY_JAPAN30,      	"JP" },
+    {CTRY_JAPAN31,      	"JP" },
+    {CTRY_JAPAN32,      	"JP" },
+    {CTRY_JAPAN33,      	"JP" },
+    {CTRY_JAPAN34,      	"JP" },
+    {CTRY_JAPAN35,      	"JP" },
+    {CTRY_JAPAN36,      	"JP" },
+    {CTRY_JAPAN37,      	"JP" },
+    {CTRY_JAPAN38,      	"JP" },
+    {CTRY_JAPAN39,      	"JP" },
+    {CTRY_JAPAN40,      	"JP" },
+    {CTRY_JAPAN41,      	"JP" },
+    {CTRY_JAPAN42,      	"JP" },
+    {CTRY_JAPAN43,      	"JP" },
+    {CTRY_JAPAN44,      	"JP" },
+    {CTRY_JAPAN45,      	"JP" },
+    {CTRY_JAPAN46,      	"JP" },
+    {CTRY_JAPAN47,      	"JP" },
+    {CTRY_JAPAN48,      	"JP" },
+    {CTRY_JORDAN,		"JO" },
+    {CTRY_KAZAKHSTAN,		"KZ" },
+    {CTRY_KOREA_NORTH,		"KP" },
+    {CTRY_KOREA_ROC,		"KR" },
+    {CTRY_KOREA_ROC2,		"K2" },
+    {CTRY_KUWAIT,		"KW" },
+    {CTRY_LATVIA,		"LV" },
+    {CTRY_LEBANON,		"LB" },
+    {CTRY_LIECHTENSTEIN,	"LI" },
+    {CTRY_LITHUANIA,		"LT" },
+    {CTRY_LUXEMBOURG,		"LU" },
+    {CTRY_MACAU,		"MO" },
+    {CTRY_MACEDONIA,		"MK" },
+    {CTRY_MALAYSIA,		"MY" },
+    {CTRY_MEXICO,		"MX" },
+    {CTRY_MONACO,		"MC" },
+    {CTRY_MOROCCO,		"MA" },
+    {CTRY_NEPAL,		"NP" },
+    {CTRY_NETHERLANDS,		"NL" },
+    {CTRY_NEW_ZEALAND,		"NZ" },
+    {CTRY_NORWAY,		"NO" },
+    {CTRY_OMAN,			"OM" },
+    {CTRY_PAKISTAN,		"PK" },
+    {CTRY_PANAMA,		"PA" },
+    {CTRY_PERU,			"PE" },
+    {CTRY_PHILIPPINES,		"PH" },
+    {CTRY_POLAND,		"PL" },
+    {CTRY_PORTUGAL,		"PT" },
+    {CTRY_PUERTO_RICO,		"PR" },
+    {CTRY_QATAR,		"QA" },
+    {CTRY_ROMANIA,		"RO" },
+    {CTRY_RUSSIA,		"RU" },
+    {CTRY_SAUDI_ARABIA,		"SA" },
+    {CTRY_SINGAPORE,		"SG" },
+    {CTRY_SLOVAKIA,		"SK" },
+    {CTRY_SLOVENIA,		"SI" },
+    {CTRY_SOUTH_AFRICA,		"ZA" },
+    {CTRY_SPAIN,		"ES" },
+    {CTRY_SRILANKA,		"LK" },
+    {CTRY_SWEDEN,		"SE" },
+    {CTRY_SWITZERLAND,		"CH" },
+    {CTRY_SYRIA,		"SY" },
+    {CTRY_TAIWAN,		"TW" },
+    {CTRY_THAILAND,		"TH" },
+    {CTRY_TRINIDAD_Y_TOBAGO,	"TT" },
+    {CTRY_TUNISIA,		"TN" },
+    {CTRY_TURKEY,		"TR" },
+    {CTRY_UKRAINE,		"UA" },
+    {CTRY_UAE,			"AE" },
+    {CTRY_UNITED_KINGDOM,	"GB" },
+    {CTRY_UNITED_STATES,	"US" },
+    {CTRY_UNITED_STATES_FCC49,	"US" },
+    {CTRY_URUGUAY,		"UY" },
+    {CTRY_UZBEKISTAN,		"UZ" },
+    {CTRY_VENEZUELA,		"VE" },
+    {CTRY_VIET_NAM,		"VN" },
+    {CTRY_YEMEN,		"YE" },
+    {CTRY_ZIMBABWE,		"ZW" }
+};
+
+static const struct operating_class_table us_oper_class_table[] = {
+	{1, 115, 20, {36,40,44,48}, 0},
+	{2, 118, 20, {52,56,60,64}, IEEE80211_OC_BEHAV_DFS_50_100},
+	{3, 124, 20, {149,153,157,161}, IEEE80211_OC_BEHAV_NOMADIC},
+	{4, 121, 20, {100,104,108,112,116,120,124,128,132,136,140},
+		IEEE80211_OC_BEHAV_DFS_50_100 | IEEE80211_OC_BEHAV_EIRP_TXPOWENV},
+	{5, 125, 20, {149,153,157,161,165}, IEEE80211_OC_BEHAV_LICEN_EXEP},
+	{12, 81, 25, {1,2,3,4,5,6,7,8,9,10,11}, IEEE80211_OC_BEHAV_LICEN_EXEP},
+	{22, 116, 40, {36,44}, IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{23, 119, 40, {52,60}, IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{24, 122, 40, {100,108,116,124,132}, IEEE80211_OC_BEHAV_CHAN_LOWWER |
+		IEEE80211_OC_BEHAV_DFS_50_100 | IEEE80211_OC_BEHAV_EIRP_TXPOWENV},
+	{25, 126, 40, {149,157}, IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{26, 126, 40, {149,157}, IEEE80211_OC_BEHAV_LICEN_EXEP | IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{27, 117, 40, {40,48}, IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{28, 120, 40, {56,64}, IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{29, 123, 40, {104,112,120,128,136}, IEEE80211_OC_BEHAV_NOMADIC |
+		IEEE80211_OC_BEHAV_CHAN_UPPER |	IEEE80211_OC_BEHAV_DFS_50_100 | IEEE80211_OC_BEHAV_EIRP_TXPOWENV},
+	{30, 127, 40, {153,161}, IEEE80211_OC_BEHAV_NOMADIC | IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{31, 127, 40, {153,161}, IEEE80211_OC_BEHAV_LICEN_EXEP | IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{32, 83, 40, {1,2,3,4,5,6,7}, IEEE80211_OC_BEHAV_LICEN_EXEP | IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{33, 84, 40, {5,6,7,8,9,10,11}, IEEE80211_OC_BEHAV_LICEN_EXEP | IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{128, 128, 80, {36,40,44,48,52,56,60,64,100,104,108,112,116,120,124,128,132,136,140,144,149,153,157,161},
+		IEEE80211_OC_BEHAV_EIRP_TXPOWENV},
+	{130, 130, 80, {36,40,44,48,52,56,60,64,100,104,108,112,116,120,124,128,132,136,140,144,149,153,157,161},
+		IEEE80211_OC_BEHAV_80PLUS | IEEE80211_OC_BEHAV_EIRP_TXPOWENV},
+};
+
+static const struct operating_class_table eu_oper_class_table[] = {
+	{1, 115, 20, {36,40,44,48},0},
+	{2, 118, 20, {52,56,60,64}, IEEE80211_OC_BEHAV_NOMADIC},
+	{3, 121, 20, {100,104,108,112,116,120,124,128,132,136,140}, 0},
+	{4, 81, 25, {1,2,3,4,5,6,7,8,9,10,11,12,13}, 0},
+	{5, 116, 40, {36,44}, IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{6, 119, 40, {52,60}, IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{7, 122, 40, {100,108,116,124,132}, IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{8, 117, 40, {40,48}, IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{9, 120, 40, {56,64}, IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{10, 123, 40, {104,112,120,128,136}, IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{11, 83, 40, {1,2,3,4,5,6,7,8,9}, IEEE80211_OC_BEHAV_LICEN_EXEP | IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{12, 84, 40, {5,6,7,8,9,10,11,12,13}, IEEE80211_OC_BEHAV_LICEN_EXEP | IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{128, 128, 80, {36,40,44,48,52,56,60,64,100,104,108,112,116,120,124,128},
+		IEEE80211_OC_BEHAV_EIRP_TXPOWENV},
+	{130, 130, 80, {36,40,44,48,52,56,60,64,100,104,108,112,116,120,124,128},
+		IEEE80211_OC_BEHAV_80PLUS | IEEE80211_OC_BEHAV_EIRP_TXPOWENV},
+};
+
+static const struct operating_class_table jp_oper_class_table[] = {
+	{1, 115, 20, {36,40,44,48}, 0},
+	{30, 81, 25, {1,2,3,4,5,6,7,8,9,10,11,12,13}, IEEE80211_OC_BEHAV_LICEN_EXEP},
+	{31, 82, 25, {14}, IEEE80211_OC_BEHAV_LICEN_EXEP},
+	{32, 118, 20, {52,56,60,64}, 0},
+	{33, 118, 20, {52,56,60,64}, 0},
+	{34, 121, 20, {100,104,108,112,116,120,124,128,132,136,140}, IEEE80211_OC_BEHAV_DFS_50_100},
+	{35, 121, 20, {100,104,108,112,116,120,124,128,132,136,140}, IEEE80211_OC_BEHAV_DFS_50_100},
+	{36, 116, 40, {36,44}, IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{37, 119, 40, {52,60}, IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{38, 119, 40, {52,60}, IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{39, 122, 40, {100,108,116,124,132}, IEEE80211_OC_BEHAV_CHAN_LOWWER|IEEE80211_OC_BEHAV_DFS_50_100},
+	{40, 122, 40, {100,108,116,124,132}, IEEE80211_OC_BEHAV_CHAN_LOWWER|IEEE80211_OC_BEHAV_DFS_50_100},
+	{41, 117, 40, {40,48}, IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{42, 120, 40, {56,64}, IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{43, 120, 40, {56,64}, IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{44, 123, 40, {104,112,120,128,136}, IEEE80211_OC_BEHAV_CHAN_UPPER|IEEE80211_OC_BEHAV_DFS_50_100},
+	{45, 123, 40, {104,112,120,128,136}, IEEE80211_OC_BEHAV_CHAN_UPPER|IEEE80211_OC_BEHAV_DFS_50_100},
+	{56, 83, 40, {1,2,3,4,5,6,7,8,9}, IEEE80211_OC_BEHAV_LICEN_EXEP | IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{57, 84, 40, {5,6,7,8,9,10,11,12,13}, IEEE80211_OC_BEHAV_LICEN_EXEP | IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{128, 128, 80, {36,40,44,48,52,56,60,64,100,104,108,112,116,120,124,128},
+		IEEE80211_OC_BEHAV_EIRP_TXPOWENV},
+	{130, 130, 80, {36,40,44,48,52,56,60,64,100,104,108,112,116,120,124,128},
+		IEEE80211_OC_BEHAV_80PLUS | IEEE80211_OC_BEHAV_EIRP_TXPOWENV},
+};
+
+static const struct operating_class_table gb_oper_class_table[] = {
+	{81, 81, 25, {1,2,3,4,5,6,7,8,9,10,11,12,13}, 0},
+	{82, 82, 25, {14}, 0},
+	{83, 83, 40, {1,2,3,4,5,6,7,8,9}, IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{84, 84, 40, {5,6,7,8,9,10,11,12,13}, IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{115, 115, 20, {36,40,44,48}, 0},
+	{116, 116, 40, {36,44}, IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{117, 117, 40, {40,48}, IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{118, 118, 20, {52,56,60,64}, IEEE80211_OC_BEHAV_DFS_50_100},
+	{119, 119, 40, {52,60}, IEEE80211_OC_BEHAV_CHAN_LOWWER|IEEE80211_OC_BEHAV_DFS_50_100},
+	{120, 120, 40, {56,64}, IEEE80211_OC_BEHAV_CHAN_UPPER|IEEE80211_OC_BEHAV_DFS_50_100},
+	{121, 121, 20, {100,104,108,112,116,120,124,128,132,136,140,144}, IEEE80211_OC_BEHAV_DFS_50_100},
+	{122, 122, 40, {100,108,116,124,132,140}, IEEE80211_OC_BEHAV_CHAN_LOWWER|IEEE80211_OC_BEHAV_DFS_50_100},
+	{123, 123, 40, {104,112,120,128,136,144}, IEEE80211_OC_BEHAV_CHAN_UPPER|IEEE80211_OC_BEHAV_DFS_50_100},
+	{124, 124, 20, {149,153,157,161}, IEEE80211_OC_BEHAV_NOMADIC},
+	{126, 126, 40, {149,157}, IEEE80211_OC_BEHAV_CHAN_LOWWER},
+	{127, 127, 40, {153,161}, IEEE80211_OC_BEHAV_CHAN_UPPER},
+	{128, 128, 80, {36,40,44,48,52,56,60,64,100,104,108,112,116,120,124,128,132,136,140,144,149,153,157,161},
+		IEEE80211_OC_BEHAV_EIRP_TXPOWENV},
+	{130, 130, 80, {36,40,44,48,52,56,60,64,100,104,108,112,116,120,124,128,132,136,140,144,149,153,157,161},
+		IEEE80211_OC_BEHAV_80PLUS | IEEE80211_OC_BEHAV_EIRP_TXPOWENV},
+};
+
+/* Make sure the global class entry must be the last one */
+static const struct region_to_oper_class oper_class_table[] = {
+	{"US", 17, {1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,128,130}, 3, {12,32,33}, us_oper_class_table},
+	{"EU", 11, {1,2,3,5,6,7,8,9,10,128,130}, 3, {4,11,12}, eu_oper_class_table},
+	{"JP", 17, {1,32,33,34,35,36,37,38,39,40,41,42,43,44,45,128,130}, 4, {30,31,56,57}, jp_oper_class_table},
+	{"GB", 14, {115,116,117,118,119,120,121,122,123,124,126,127,128,130}, 4, {81,82,83,84}, gb_oper_class_table},
+};
+#define OPER_CLASS_GB_INDEX (ARRAY_SIZE(oper_class_table) - 1)
+
+static struct ieee80211_band_info ieee80211_bands[IEEE80211_BAND_IDX_MAX] = {
+	/* {band_chan_step, band_first_chan, band_chan_cnt} */
+	{IEEE80211_24G_CHAN_SEC_SHIFT,	1,  13},
+	{IEEE80211_24G_CHAN_SEC_SHIFT,	14, 1},
+	{IEEE80211_CHAN_SEC_SHIFT,	36, 4},
+	{IEEE80211_CHAN_SEC_SHIFT,	52, 4},
+	{IEEE80211_CHAN_SEC_SHIFT,	100, 12},
+	{IEEE80211_CHAN_SEC_SHIFT,	149, 4},
+	/* isolate chan 165 for IOT as per sniffer capture */
+	{IEEE80211_CHAN_SEC_SHIFT,	165, 1},
+};
+
+struct ieee80211_band_info *ieee80211_get_band_info(int band_idx)
+{
+	if (band_idx >= IEEE80211_BAND_IDX_MAX)
+		return NULL;
+
+	return &ieee80211_bands[band_idx];
+}
+
+#if defined(QBMPS_ENABLE)
+/*******************************************************************************/
+/* ieee80211_sta_bmps_update: allocate, re-allocate or free BMPS NULL frame    */
+/*                                                                             */
+/* NOTE: this function should be called whenever a new assocation              */
+/*       happens, because node id associated with the frame needs              */
+/*       to be updated                                                         */
+/*******************************************************************************/
+int ieee80211_sta_bmps_update(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct sk_buff *skb = NULL;
+
+	if (!ni)
+		return -1;
+
+	ieee80211_ref_node(ni);
+
+	if (ic->ic_flags_qtn & IEEE80211_QTN_BMPS) {
+		/* set null frame */
+		skb = ieee80211_get_nulldata(ni);
+		if (!skb) {
+			ieee80211_free_node(ni);
+			return -1;
+		}
+		if (ic->ic_bmps_set_frame(ic, ni, skb)) {
+			dev_kfree_skb(skb);
+			ieee80211_free_node(ni);
+			return -1;
+		}
+	} else {
+		/* free null frame */
+		ic->ic_bmps_release_frame(ic);
+	}
+
+	ieee80211_free_node(ni);
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_sta_bmps_update);
+#endif
+
+int ieee80211_is_idle_state(struct ieee80211com *ic)
+{
+	int ret = 1;
+	struct ieee80211vap *vap;
+	int wds_link_active = 0;
+	struct ieee80211_node *wds_ni;
+	struct ieee80211vap *sta_vap = NULL;
+	int nvaps = 0;
+#if defined(QBMPS_ENABLE)
+	struct qdrv_vap *qv;
+#endif
+
+	IEEE80211_LOCK_IRQ(ic);
+
+	if (ic->ic_ocac.ocac_running) {
+		ret = 0;
+		goto quit;
+	}
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		nvaps ++;
+		if (vap->iv_opmode == IEEE80211_M_STA)
+			sta_vap = vap;
+	}
+
+
+	/* Checking non-sta mode for WDS link */
+	if (!(sta_vap && (nvaps == 1)) && (ic->ic_sta_assoc > 0)) {
+		if (ic->ic_wds_links == ic->ic_sta_assoc) {
+			TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+				if (vap->iv_opmode == IEEE80211_M_WDS) {
+					wds_ni = ieee80211_get_wds_peer_node_noref(vap);
+					if (wds_ni) {
+						if (IEEE80211_BA_IS_COMPLETE(wds_ni->ni_ba_rx[IEEE80211_WDS_LINK_MAINTAIN_BA_TID].state) ||
+							IEEE80211_BA_IS_COMPLETE(wds_ni->ni_ba_tx[IEEE80211_WDS_LINK_MAINTAIN_BA_TID].state)) {
+							wds_link_active = 1;
+							break;
+						}
+					}
+				}
+			}
+
+			if (!wds_link_active) {
+				ret = 1;
+				goto quit;
+			}
+		}
+
+		ret = 0;
+		goto quit;
+	}
+
+#if defined(QBMPS_ENABLE)
+	/* here is the logic which decide should power-save or not */
+	if (sta_vap) {
+		ret = 0;
+		if (nvaps > 1) {
+			/* multiple VAPS, and one of them is STA */
+			/* force power-saving off */
+			goto quit;
+		}
+		if ((sta_vap->iv_state == IEEE80211_S_RUN) &&
+		    (ic->ic_flags_qtn & IEEE80211_QTN_BMPS) &&
+		    !(ic->ic_flags & IEEE80211_F_SCAN) &&
+		    !(ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) &&
+		    !(ic->ic_flags_qtn & IEEE80211_QTN_SAMP_CHAN) &&
+		    !sta_vap->iv_swbmiss_bmps_warning) {
+			/* for single STA VAP: mark as idle only if */
+			/* 1. BMPS power-saving is enabled, and */
+			/* 2. not in SCAN process, and */
+			/* 3. not in SCS sample channel process, and */
+			/* 4. no beacon missing warning */
+			qv = container_of(sta_vap, struct qdrv_vap, iv);
+			if (qv->qv_bmps_mode == BMPS_MODE_MANUAL) {
+				/* manual mode */
+				ret = 1;
+				goto quit;
+			} else if ((qv->qv_bmps_mode == BMPS_MODE_AUTO) &&
+				   (!sta_vap->iv_bmps_tput_high)) {
+				/* auto mode */
+				/* and tput is low */
+				ret = 1;
+				goto quit;
+			}
+		}
+	}
+#else
+	if (sta_vap) {
+		ret = 0;
+		goto quit;
+	}
+#endif
+
+quit:
+	IEEE80211_UNLOCK_IRQ(ic);
+
+	return ret;
+}
+EXPORT_SYMBOL(ieee80211_is_idle_state);
+
+int ieee80211_is_on_weather_channel(struct ieee80211com *ic, struct ieee80211_channel *chan)
+{
+	int is_weather_chan;
+	int cur_bw;
+
+	if (chan == NULL || chan == IEEE80211_CHAN_ANYC)
+		return 0;
+
+	cur_bw = ieee80211_get_bw(ic);
+	is_weather_chan = chan->ic_flags & IEEE80211_CHAN_WEATHER;
+
+	if (cur_bw >= BW_HT40) {
+		is_weather_chan |= chan->ic_flags & IEEE80211_CHAN_WEATHER_40M;
+		if (cur_bw >= BW_HT80)
+			is_weather_chan |= chan->ic_flags & IEEE80211_CHAN_WEATHER_80M;
+	}
+
+	return !!is_weather_chan;
+}
+EXPORT_SYMBOL(ieee80211_is_on_weather_channel);
+
+#if defined(QBMPS_ENABLE)
+/************************************************************/
+/* ieee80211_bmps_tput_check: calculate TX/RX tput          */
+/*                                                          */
+/* NOTE: this tput information will be used to decide       */
+/*       entering/exiting power-saving state automatically  */
+/*       while BMPS works in AUTO mode                      */
+/************************************************************/
+static void
+ieee80211_bmps_tput_check(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *)arg;
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	struct rtnl_link_stats64 *stats;
+	uint64_t rx_bytes_diff, tx_bytes_diff, curr_tput_kbps;
+#else
+	struct net_device_stats *stats;
+	uint32_t rx_bytes_diff, tx_bytes_diff, curr_tput_kbps;
+#endif
+
+	if (vap) {
+		stats = &vap->iv_devstats;
+		if (vap->iv_ic->ic_get_shared_vap_stats) {
+			/* get VAP TX/RX bytes stats info */
+			vap->iv_ic->ic_get_shared_vap_stats(vap);
+			/* calculate overall TX & RX tput */
+			/* over the past measuring period */
+			rx_bytes_diff = stats->rx_bytes -
+						ic->ic_bmps_tput_check.prev_rx_bytes;
+			tx_bytes_diff = stats->tx_bytes -
+						ic->ic_bmps_tput_check.prev_tx_bytes;
+
+			ic->ic_bmps_tput_check.prev_rx_bytes = stats->rx_bytes;
+			ic->ic_bmps_tput_check.prev_tx_bytes = stats->tx_bytes;
+
+			curr_tput_kbps = ((rx_bytes_diff + tx_bytes_diff) * 8) /
+							(BMPS_TPUT_MEASURE_PERIOD_MS);
+			if (curr_tput_kbps > BMPS_TPUT_THRESHOLD_UPPER) {
+				/* tput is above upper threshold */
+				/* it is time to exit BMPS power-saving */
+				if (!vap->iv_bmps_tput_high ||
+				    (vap->iv_bmps_tput_high == -1)) {
+					vap->iv_bmps_tput_high = 1;
+			                ic->ic_pm_reason = IEEE80211_PM_LEVEL_TPUT_ABOVE_UPPER_THRSH;
+					ieee80211_pm_queue_work(ic);
+				}
+			} else if (curr_tput_kbps < BMPS_TPUT_THRESHOLD_LOWER){
+				/* tput is below lower threshold */
+				/* it is time to enter BMPS power-saving */
+				if (vap->iv_bmps_tput_high ||
+				    (vap->iv_bmps_tput_high == -1)) {
+					vap->iv_bmps_tput_high = 0;
+			                ic->ic_pm_reason = IEEE80211_PM_LEVEL_TPUT_BELOW_LOWER_THRSH;
+					ieee80211_pm_queue_work(ic);
+				}
+			}
+		}
+	}
+
+	mod_timer(&ic->ic_bmps_tput_check.tput_timer,
+			jiffies + (BMPS_TPUT_MEASURE_PERIOD_MS / 1000) * HZ);
+}
+#endif
+
+static char *trigger_reason_str[] = {"","IEEE80211_PM_LEVEL_REMAIN_CHANNEL_WORK",
+	"IEEE80211_PM_LEVEL_CCA_WORK",
+	"IEEE80211_PM_LEVEL_TPUT_ABOVE_UPPER_THRSH",
+	"IEEE80211_PM_LEVEL_TPUT_BELOW_LOWER_THRSH",
+	"IEEE80211_PM_LEVEL_VAP_ATTACH",
+	"IEEE80211_PM_LEVEL_VAP_DETACH",
+	"IEEE80211_PM_LEVEL_RCVD_ADDBA_REQ",
+	"IEEE80211_PM_LEVEL_RCVD_ADDBA_RESP",
+	"IEEE80211_PM_LEVEL_SWBCN_MISS",
+	"IEEE80211_PM_LEVEL_JOIN_BSS",
+	"IEEE80211_PM_LEVEL_LEAVE_BSS",
+	"IEEE80211_PM_LEVEL_INACTIVITY_IN_WDS",
+	"IEEE80211_PM_LEVEL_NODE_JOIN",
+	"IEEE80211_PM_LEVEL_NODE_LEFT",
+	"IEEE80211_PM_LEVEL_DEVICE_INIT",
+	"IEEE80211_PM_LEVEL_SWBCN_MISS_2",
+	"IEEE80211_PM_LEVEL_NEW_STATE_IEEE80211_S_RUN",
+	"IEEE80211_PM_LEVEL_SCAN_START",
+	"IEEE80211_PM_LEVEL_SCAN_STOP",
+	"IEEE80211_PM_LEVEL_SIWFREQ",
+	"IEEE80211_PM_LEVEL_SIWSCAN",
+	"IEEE80211_PM_LEVEL_STOP_OCAC_SDFS",
+	"IEEE80211_PM_LEVEL_BCN_SCHEME_CHANGED_FOR_2VAPS",
+	"IEEE80211_PM_LEVEL_OCAC_SDFS_TIMER",
+	"IEEE80211_PM_LEVEL_BCN_SCHEME_CHANGED",
+	"IEEE80211_PM_LEVEL_CAC_COMPLETED",
+	"IEEE80211_PM_LEVEL_CSA_DFS_ACTION",
+        "IEEE80211_PM_LEVEL_ICAC_COMPLETE_ACTION",
+};
+
+const char * ieee80211_get_pm_level_change_trigger_reason(int pm_reason)
+{
+
+	if ((pm_reason < IEEE80211_PM_LEVEL_CCA_WORK ) || (pm_reason >= IEEE80211_PM_LEVEL_REASON_MAX))
+		return " ";
+
+	return trigger_reason_str[pm_reason];
+}
+
+static void ieee80211_coc_pm_trigger_channel_switch(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *)arg;
+	/* Get the level again, to be safer */
+	int level = ieee80211_is_idle_state(ic) ? BOARD_PM_LEVEL_IDLE : PM_QOS_DEFAULT_VALUE;
+
+	if (unlikely(IEEE80211_CSW_REASON_COC != ic->ic_csa_reason)) {
+		/* Race in channel change is highly unlikely.
+		 * Add a safe check;
+		 */
+		ic->ic_coc_cc_reason = IEEE80211_CSW_REASON_UNKNOWN;
+		return;
+	}
+
+	ieee80211_finish_csa((unsigned long) ic);
+	pm_qos_update_requirement(PM_QOS_POWER_SAVE, BOARD_PM_GOVERNOR_WLAN, level);
+
+	if (ic->ic_curchan) {
+		COC_DBG_QEVT(ic2dev(ic), "CoC: Channel changed to %d as PM level changed to %d\n",
+			ic->ic_curchan->ic_ieee, level);
+	}
+}
+
+static int ieee80211_coc_pm_action_trigger_channel_change(struct ieee80211com *ic, int pick_flags)
+{
+	struct ieee80211_channel *best_channel = NULL;
+        int best_chan_ieee;
+	int ret;
+
+	best_chan_ieee = ieee80211_scs_pick_channel(ic,
+			pick_flags,
+			IEEE80211_SCS_NA_CC);
+
+	best_channel = ieee80211_find_channel_by_ieee(ic, best_chan_ieee);
+
+	if ((NULL == best_channel) || (!ic->ic_check_channel(ic, best_channel, 0, 0))) {
+		return IEEE80211_COC_BEST_CHANNEL_NOT_FOUND;
+	} else {
+		/* Schedule a channel switch;
+		 * 1. Option1 : Since no STAs are connected Stack can change
+		 *		the channel to selected non-dfs immediately without CSA
+		 * 2. Option2 : While channel change is in progress,
+		 *		an association attempt by a STA could trigger races
+		 *		Hence let us invoke CSA logic all the time
+		 */
+		ret = ieee80211_enter_csa(ic, best_channel, ieee80211_coc_pm_trigger_channel_switch,
+				IEEE80211_CSW_REASON_COC,
+				IEEE80211_DEFAULT_CHANCHANGE_TBTT_COUNT,
+				IEEE80211_CSA_MUST_STOP_TX,
+				IEEE80211_CSA_F_BEACON | IEEE80211_CSA_F_ACTION);
+		if (ret == 0) {
+			/* CSA scheduled successfully */
+			return IEEE80211_COC_REASON_SUCCESS;
+
+		} else {
+			return IEEE80211_COC_REASON_CSA_NOT_TRIGGERED;
+		}
+	}
+}
+
+static bool ieee80211_coc_resolve_valid_modes(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap = NULL;
+
+	/* If one of the VAPs is STA and is associated,
+	 * Channel change is always driven by connected AP
+	 */
+	IEEE80211_LOCK_IRQ(ic);
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if ((vap->iv_opmode == IEEE80211_M_STA) &&
+				(vap->iv_state == IEEE80211_S_RUN)) {
+			return false;
+		}
+	}
+	IEEE80211_UNLOCK_IRQ(ic);
+
+	/* All other cases are valid*/
+	return true;
+}
+
+static bool ieee80211_pm_change_should_trigger_channel_change(struct ieee80211com *ic, int dest_pm_level)
+{
+	bool cc_flag = ic->ic_dfs_is_eu_region() && ieee80211_coc_resolve_valid_modes(ic);
+
+	bool cur_chan_is_dfs = (is_ieee80211_chan_valid(ic->ic_curchan)) &&
+		                (ic->ic_curchan->ic_flags & IEEE80211_CHAN_DFS);
+
+	/* ETSI : While changing to pm level 1:PM_QOS_DEFAULT_VALUE,
+	 * try to move to best DFS channel
+	 * 1. if we had earlier transitioned from DFS_channel-->Non_DFS_Channel
+	 *    because of CoC,
+	 * 2. OCAC/SDFS is not running on current non-dfs channel.
+	 * 3. Change to pm level was triggered by STA association;
+	 * 4. And current channel is non-dfs channel
+	 */
+	if (PM_QOS_DEFAULT_VALUE == dest_pm_level) {
+		return ((ic->ic_coc_move_to_ndfs) &&
+				(!ic->ic_ocac.ocac_running) &&
+				(false == cur_chan_is_dfs) &&
+				(cc_flag) &&
+				(IEEE80211_CSW_REASON_COC == ic->ic_coc_cc_reason) &&
+				(IEEE80211_PM_LEVEL_NODE_JOIN == ic->ic_pm_reason));
+	}
+
+	/* ETSI : While changing to pm level 5:BOARD_PM_LEVEL_IDLE,
+	 * try to move to best non-dfs channel
+	 * 1. if the current channel is DFS channel
+	 */
+
+	return ((true == cur_chan_is_dfs) && cc_flag && (ic->ic_coc_move_to_ndfs));
+}
+
+
+static bool ieee80211_pm_channel_change(struct ieee80211com *ic, int dest_pm_level, unsigned int pick_flags)
+{
+	if (ieee80211_pm_change_should_trigger_channel_change(ic, dest_pm_level)) {
+		if (IEEE80211_COC_REASON_SUCCESS == ieee80211_coc_pm_action_trigger_channel_change(ic,
+					(pick_flags))) {
+			return true;
+		}
+	}
+	return false;
+}
+
+static void ieee80211_coc_pm_action(struct ieee80211com *ic)
+{
+	int dest_pm_level = ieee80211_is_idle_state(ic) ? BOARD_PM_LEVEL_IDLE : PM_QOS_DEFAULT_VALUE;
+
+	if ((!ic->ic_coc_move_to_ndfs) ||				/* When ic_coc_move_to_ndfs is zero always accept pm change */
+		(!ic->ic_dfs_is_eu_region()) ||				/* FCC  : Accept the pm level change to 1 and 5 for FCC */
+		(PM_QOS_DEFAULT_VALUE == dest_pm_level) ||		/* ETSI : Accept the pm level change to 1 in ETSI*/
+		(is_ieee80211_chan_valid(ic->ic_curchan) &&
+		(!(ic->ic_curchan->ic_flags & IEEE80211_CHAN_DFS))))	/* ETSI : Accept the pm level change to 5 for ETSI, If current channel is non-dfs */
+	{
+		pm_qos_update_requirement(PM_QOS_POWER_SAVE, BOARD_PM_GOVERNOR_WLAN, dest_pm_level);
+
+		if (ieee80211_pm_channel_change(ic, dest_pm_level, IEEE80211_SCS_PICK_AVAILABLE_DFS_ONLY | IEEE80211_SCS_PICK_ANYWAY)) {
+				ic->ic_coc_cc_reason = IEEE80211_CSW_REASON_UNKNOWN;
+		}
+
+		return;
+	}
+
+	/* DFS:1 ---> NDFS:5
+	 * Moving to pm level 5 in ETSI;
+	 * 1. First move to best non-dfs channel
+	 * 2. After successful channel change, move the pm level to 5
+	 * 3. If CSA fails for any reason,
+	 * pm level remains at previous value which is 1
+	 */
+	if (ieee80211_pm_channel_change(ic, dest_pm_level, IEEE80211_SCS_PICK_NON_DFS_ONLY | IEEE80211_SCS_PICK_ANYWAY)) {
+			ic->ic_coc_cc_reason = IEEE80211_CSW_REASON_COC;
+	}
+}
+
+static void
+ieee80211_update_pm(struct work_struct *work)
+{
+	struct ieee80211com *ic = container_of(work, struct ieee80211com, pm_work.work);
+
+	if (ic->ic_curchan->ic_flags & IEEE80211_CHAN_DFS_CAC_IN_PROGRESS)
+		return;
+
+	ieee80211_coc_pm_action(ic);
+}
+
+static void
+ieee80211_pm_period_change(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *)arg;
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+	static int cnt = 0;
+	int value;
+	int v = 0;
+	int period_cnt = BOARD_PM_PERIOD_CNT;
+	int period_change_interval = BOARD_PM_PERIOD_CHANGE_INTERVAL;
+
+	if (vap && vap->iv_bss) {
+		if ((ic->ic_pm_state[QTN_PM_PERIOD_CNT] >= 1) &&
+				(ic->ic_pm_state[QTN_PM_PERIOD_CNT] <= BOARD_PM_PERIOD_CNT)) {
+			period_cnt = ic->ic_pm_state[QTN_PM_PERIOD_CNT];
+		}
+
+		v = (ic->ic_pm_state[QTN_PM_PERIOD_GROUP] >> (8 * (cnt % period_cnt))) & 0xFF;
+		value = QTN_PM_PACK_PARAM_VALUE(QTN_PM_PDUTY_PERIOD_MS, v);
+		ic->ic_setparam(vap->iv_bss, IEEE80211_PARAM_PWR_SAVE, value, NULL, 0);
+	}
+
+	if (ic->ic_pm_state[QTN_PM_PERIOD_CHANGE_INTERVAL] >= BOARD_PM_PERIOD_CHANGE_INTERVAL) {
+		period_change_interval = ic->ic_pm_state[QTN_PM_PERIOD_CHANGE_INTERVAL];
+	}
+
+	mod_timer(&ic->ic_pm_period_change, jiffies + period_change_interval * HZ);
+	cnt++;
+}
+
+void
+ieee80211_pm_queue_work_custom(struct ieee80211com *ic, unsigned long delay)
+{
+	pm_queue_work(&ic->pm_work, delay);
+}
+EXPORT_SYMBOL(ieee80211_pm_queue_work_custom);
+
+void
+ieee80211_pm_queue_work(struct ieee80211com *ic)
+{
+	unsigned long delay;
+	int idle = ieee80211_is_idle_state(ic);
+
+	if (idle) {
+#if defined(QBMPS_ENABLE)
+		if ((ic->ic_flags_qtn & IEEE80211_QTN_BMPS) &&
+		    (ic->ic_opmode & IEEE80211_M_STA))
+			delay = BOARD_PM_WLAN_STA_IDLE_TIMEOUT;
+		else
+#endif
+			delay = BOARD_PM_WLAN_IDLE_TIMEOUT;
+	} else
+		delay = BOARD_PM_WLAN_DEFAULT_TIMEOUT;
+
+	pm_queue_work(&ic->pm_work, delay);
+}
+EXPORT_SYMBOL(ieee80211_pm_queue_work);
+
+static void
+ieee80211_vap_remove_ie(struct ieee80211vap *vap)
+{
+	int i;
+
+	for (i = 0; i < IEEE80211_APPIE_NUM_OF_FRAME; i++) {
+		if (vap->app_ie[i].ie != NULL) {
+			FREE(vap->app_ie[i].ie, M_DEVBUF);
+			vap->app_ie[i].ie = NULL;
+			vap->app_ie[i].length = 0;
+		}
+	}
+
+	if (vap->iv_opt_ie != NULL) {
+		FREE(vap->iv_opt_ie, M_DEVBUF);
+		vap->iv_opt_ie = NULL;
+		vap->iv_opt_ie_len = 0;
+	}
+
+	if (vap->qtn_pairing_ie.ie != NULL) {
+		FREE(vap->qtn_pairing_ie.ie, M_DEVBUF);
+		vap->qtn_pairing_ie.ie = NULL;
+		vap->qtn_pairing_ie.length = 0;
+	}
+
+}
+
+void init_wowlan_params(struct ieee80211_wowlan *wowlan)
+{
+	wowlan->host_state = 0;
+	wowlan->wowlan_match = 0;
+	wowlan->L2_ether_type = 0x0842;
+	wowlan->L3_udp_port = 0xffff;
+	wowlan->pattern.len = 0;
+	memset(wowlan->pattern.magic_pattern, 0, 256);
+}
+
+static void
+ieee80211_extender_start_scan(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *)arg;
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+	if (ic->ic_extender_role != IEEE80211_EXTENDER_ROLE_RBS)
+		return;
+
+	if (!vap || vap->iv_opmode != IEEE80211_M_HOSTAP) {
+		mod_timer(&ic->ic_extender_scan_timer, jiffies +
+				IEEE80211_EXTENDER_SCAN_MBS_INTERVAL * HZ);
+		return;
+	}
+
+	if (time_after(jiffies, ic->ic_extender_mbs_detected_jiffies +
+				IEEE80211_EXTENDER_MBS_INVALID_TIMEOUT * HZ)) {
+		(void) ieee80211_start_scan(vap,
+			IEEE80211_SCAN_ACTIVE |
+			IEEE80211_SCAN_ONCE |
+			IEEE80211_SCAN_QTN_SEARCH_MBS |
+			IEEE80211_SCAN_NOPICK,
+			IEEE80211_SCAN_FOREVER,
+			0, NULL);
+	}
+
+	mod_timer(&ic->ic_extender_scan_timer, jiffies +
+			IEEE80211_EXTENDER_SCAN_MBS_INTERVAL * HZ);
+}
+
+int
+ieee80211_ifattach(struct ieee80211com *ic)
+{
+	struct ieee80211_channel *c;
+	struct ifmediareq imr;
+	int i;
+
+	_MOD_INC_USE(THIS_MODULE, return -ENODEV);
+
+	/*
+	 * Pick an initial operating mode until we have a vap
+	 * created to lock it down correctly.  This is only
+	 * drivers have something defined for configuring the
+	 * hardware at startup.
+	 */
+	ic->ic_opmode = IEEE80211_M_STA;	/* everyone supports this */
+
+	/*
+	 * Fill in 802.11 available channel set, mark
+	 * all available channels as active, and pick
+	 * a default channel if not already specified.
+	 */
+	KASSERT(0 < ic->ic_nchans && ic->ic_nchans < IEEE80211_CHAN_MAX,
+		("invalid number of channels specified: %u", ic->ic_nchans));
+	memset(ic->ic_chan_avail, 0, sizeof(ic->ic_chan_avail));
+	ic->ic_modecaps |= 1<<IEEE80211_MODE_AUTO;
+	for (i = 0; i < ic->ic_nchans; i++) {
+		c = &ic->ic_channels[i];
+		KASSERT(c->ic_flags != 0, ("channel with no flags"));
+		KASSERT(c->ic_ieee < IEEE80211_CHAN_MAX,
+			("channel with bogus ieee number %u", c->ic_ieee));
+		/* make sure only valid 2.4G or 5G channels are set as available */
+                if (((c->ic_ieee >= QTN_2G_FIRST_OPERATING_CHAN) && (c->ic_ieee <= QTN_2G_LAST_OPERATING_CHAN)) ||
+                    ((c->ic_ieee >= QTN_5G_FIRST_OPERATING_CHAN) && (c->ic_ieee <= QTN_5G_LAST_OPERATING_CHAN))) {
+                        setbit(ic->ic_chan_avail, c->ic_ieee);
+                }
+
+		/*
+		 * Identify mode capabilities.
+		 */
+		if (IEEE80211_IS_CHAN_A(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11A;
+		if (IEEE80211_IS_CHAN_B(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11B;
+		if (IEEE80211_IS_CHAN_PUREG(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11G;
+		if (IEEE80211_IS_CHAN_FHSS(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_FH;
+		if (IEEE80211_IS_CHAN_108A(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_TURBO_A;
+		if (IEEE80211_IS_CHAN_108G(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_TURBO_G;
+		if (IEEE80211_IS_CHAN_11NG(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11NG;
+		if (IEEE80211_IS_CHAN_11NA(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11NA;
+		if (IEEE80211_IS_CHAN_11NG_HT40PLUS(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11NG_HT40PM;
+		if (IEEE80211_IS_CHAN_11NG_HT40MINUS(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11NG_HT40PM;
+		if (IEEE80211_IS_CHAN_11NA_HT40PLUS(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11NA_HT40PM;
+		if (IEEE80211_IS_CHAN_11NA_HT40MINUS(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11NA_HT40PM;
+		if (IEEE80211_IS_CHAN_11AC(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11AC_VHT20PM;
+		if (IEEE80211_IS_CHAN_11AC_VHT40PLUS(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11AC_VHT40PM;
+		if (IEEE80211_IS_CHAN_11AC_VHT40MINUS(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11AC_VHT40PM;
+		if (IEEE80211_IS_CHAN_11AC_VHT80_EDGEPLUS(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11AC_VHT80PM;
+		if (IEEE80211_IS_CHAN_11AC_VHT80_CNTRPLUS(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11AC_VHT80PM;
+		if (IEEE80211_IS_CHAN_11AC_VHT80_CNTRMINUS(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11AC_VHT80PM;
+		if (IEEE80211_IS_CHAN_11AC_VHT80_EDGEMINUS(c))
+			ic->ic_modecaps |= 1<<IEEE80211_MODE_11AC_VHT80PM;
+	}
+	/* initialize candidate channels to all available */
+	memcpy(ic->ic_chan_active, ic->ic_chan_avail,
+		sizeof(ic->ic_chan_avail));
+
+	memset(ic->ic_chan_availability_status, IEEE80211_CHANNEL_STATUS_AVAILABLE, sizeof(ic->ic_chan_availability_status));
+	/* validate ic->ic_curmode */
+	if ((ic->ic_modecaps & (1<<ic->ic_curmode)) == 0)
+		ic->ic_curmode = IEEE80211_MODE_AUTO;
+	/*
+	 * When 11g is supported, force the rate set to
+	 * include basic rates suitable for a mixed b/g bss.
+	 */
+	if (ic->ic_modecaps & (1<<IEEE80211_MODE_11G))
+		ieee80211_set11gbasicrates(
+			&ic->ic_sup_rates[IEEE80211_MODE_11G],
+			IEEE80211_MODE_11G);
+
+	/* 11n also checks for 11g basic rates for capabilities */
+	if (ic->ic_modecaps & (1<<IEEE80211_MODE_11NG))
+		ieee80211_set11gbasicrates(
+			&ic->ic_sup_rates[IEEE80211_MODE_11NG],
+			IEEE80211_MODE_11G);
+
+	/* setup initial channel settings */
+	ic->ic_bsschan = IEEE80211_CHAN_ANYC;
+	ic->ic_des_chan = IEEE80211_CHAN_ANYC;
+
+	/* arbitrarily pick the first channel */
+	ic->ic_curchan = &ic->ic_channels[1];
+	ic->ic_prevchan = ic->ic_curchan;
+
+	/* Enable marking of dfs by default */
+	ic->ic_flags_ext |= IEEE80211_FEXT_MARKDFS;
+
+	/* Phytype OFDM. FIXME: this may change with RFIC5 */
+	ic->ic_phytype = IEEE80211_T_OFDM;
+
+	/* Enable LDPC by default */
+	ic->ldpc_enabled = 1;
+
+	ic->ic_gi_select_enable = QTN_GLOBAL_INIT_SELECT_GI_ENABLE;
+	ic->ic_pppc_select_enable = QTN_GLOBAL_INIT_SELECT_PPPC_ENABLE;
+
+	ic->ic_def_matrix = QTN_GLOBAL_INIT_DEF_MATRIX;
+
+	/*
+	 * Enable WME by default if we're capable.
+	 */
+	if (ic->ic_caps & IEEE80211_C_WME)
+		ic->ic_flags |= IEEE80211_F_WME;
+	(void) ieee80211_setmode(ic, ic->ic_curmode);
+
+	if (ic->ic_lintval == 0) {
+		ic->ic_lintval = IEEE80211_BINTVAL_DEFAULT;
+		ic->ic_lintval_backup = IEEE80211_BINTVAL_DEFAULT;
+	}
+	if (ic->ic_bcn_hang_timeout == 0)
+		ic->ic_bcn_hang_timeout = IEEE80211_BEACON_HANG_TIMEOUT_DFLT;
+	ic->ic_bmisstimeout = 7 * ic->ic_lintval;	/* default 7 beacons */
+	IEEE80211_LOCK_INIT(ic, "ieee80211com");
+	IEEE80211_VAPS_LOCK_INIT(ic, "ieee80211com_vaps");
+	TAILQ_INIT(&ic->ic_vaps);
+
+	ic->ic_txpowlimit = IEEE80211_TXPOWER_MAX;
+	ic->ic_txpowlimit = IEEE80211_TXPOWER_MIN;
+	ic->ic_newtxpowlimit = IEEE80211_TXPOWER_MAX;
+
+	ic->ic_extender_role = IEEE80211_EXTENDER_ROLE_NONE;
+	ic->ic_extender_mbs_best_rssi = IEEE80211_EXTENDER_DEFAULT_MBS_BEST_RSSI;
+	ic->ic_extender_rbs_best_rssi = IEEE80211_EXTENDER_DEFAULT_RBS_BEST_RSSI;
+	ic->ic_extender_mbs_wgt = IEEE80211_EXTENDER_DEFAULT_MBS_WGT;
+	ic->ic_extender_rbs_wgt = IEEE80211_EXTENDER_DEFAULT_RBS_WGT;
+	init_timer(&ic->ic_extender_scan_timer);
+	ic->ic_extender_scan_timer.function = ieee80211_extender_start_scan;
+	ic->ic_extender_scan_timer.data = (unsigned long)ic;
+	ic->ic_extender_mbs_detected_jiffies = jiffies;
+	ic->ic_extender_rssi_continue = 0;
+	ic->ic_scan_opchan_enable = 0;
+	ic->ic_extender_bgscanintvl = IEEE80211_BGSCAN_INTVAL_DEFAULT * HZ;
+	ic->ic_extender_mbs_rssi_margin = IEEE80211_EXTENDER_DEFAULT_MBS_RSSI_MARGIN;
+	ic->ic_scan_tbl_len_max = IEEE80211_SCAN_TBL_LEN_MAX_DFLT;
+	ic->ic_bw_auto_select = 0;
+	ic->ic_max_system_bw = BW_HT80;
+	ic->ic_bss_bw = ic->ic_max_system_bw;
+	ic->ic_oper_class_table = &oper_class_table[OPER_CLASS_GB_INDEX];
+	ic->ic_autochan_dbg_level = CHAN_SEL_LOG_ERR;
+
+	ieee80211_crypto_attach(ic);
+	ieee80211_node_attach(ic);
+	ieee80211_power_attach(ic);
+	ieee80211_proto_attach(ic);
+	ieee80211_scan_attach(ic);
+	ieee80211_tpc_query_init(&ic->ic_tpc_query_info, ic, TPC_INTERVAL_DEFAULT);
+	ieee80211_doth_measurement_init(ic);
+
+	ieee80211_media_setup(ic, &ic->ic_media, ic->ic_caps,
+		ieee80211com_media_change, ieee80211com_media_status);
+	ieee80211com_media_status((void *) ic, &imr);
+	ifmedia_set(&ic->ic_media, imr.ifm_active);
+
+	INIT_DELAYED_WORK(&ic->pm_work, ieee80211_update_pm);
+	pm_qos_add_requirement(PM_QOS_POWER_SAVE, BOARD_PM_GOVERNOR_WLAN, PM_QOS_DEFAULT_VALUE);
+	init_timer(&ic->ic_pm_period_change);
+	ic->ic_pm_period_change.function = ieee80211_pm_period_change;
+	ic->ic_pm_period_change.data = (unsigned long) ic;
+
+#if defined(QBMPS_ENABLE)
+	init_timer(&ic->ic_bmps_tput_check.tput_timer);
+	ic->ic_bmps_tput_check.tput_timer.function = ieee80211_bmps_tput_check;
+	ic->ic_bmps_tput_check.tput_timer.data = (unsigned long) ic;
+#endif
+
+	ic->ic_offchan_protect.offchan_stop_expire.function = ieee80211_off_channel_timeout;
+	init_timer(&ic->ic_offchan_protect.offchan_stop_expire);
+
+	init_waitqueue_head(&ic->ic_scan_comp);
+
+	init_wowlan_params(&ic->ic_wowlan);
+
+	ic->ic_vap_default_state = IEEE80211_VAP_STATE_ENABLED;
+
+	ic->ic_max_boot_cac_duration = -1;
+
+	ic->ic_boot_cac_end_jiffy = 0;
+	ic->ic_rx_bar_sync = QTN_RX_BAR_SYNC_QTN;
+
+	ic->ic_vopt.state = IEEE80211_VOPT_DISABLED;
+	ic->ic_vopt.cur_state = IEEE80211_VOPT_DISABLED;
+	ic->ic_vopt.bbf =  QTN_GLOBAL_PSEL_MATRIX_ENABLE;
+	ic->ic_vopt.pppc = ic->ic_pppc_select_enable;
+	ic->ic_vopt.airfair = ic->ic_airfair;
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_ifattach);
+
+void
+ieee80211_ifdetach(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+
+#if defined(QBMPS_ENABLE)
+	del_timer(&ic->ic_bmps_tput_check.tput_timer);
+#endif
+	del_timer_sync(&ic->ic_offchan_protect.offchan_stop_expire);
+
+	pm_flush_work(&ic->pm_work);
+	pm_qos_remove_requirement(PM_QOS_POWER_SAVE, BOARD_PM_GOVERNOR_WLAN);
+
+	rtnl_lock();
+	while ((vap = TAILQ_FIRST(&ic->ic_vaps)) != NULL)
+		ic->ic_vap_delete(vap);
+	rtnl_unlock();
+
+	ieee80211_clean_chanset_values(ic);
+	ieee80211_scs_free_tdls_stats_list(ic);
+	ieee80211_doth_measurement_deinit(ic);
+	ieee80211_tpc_query_deinit(&ic->ic_tpc_query_info);
+	ieee80211_scan_detach(ic);
+	ieee80211_proto_detach(ic);
+	ieee80211_crypto_detach(ic);
+	ieee80211_power_detach(ic);
+	ieee80211_node_detach(ic);
+	ifmedia_removeall(&ic->ic_media);
+
+	IEEE80211_VAPS_LOCK_DESTROY(ic);
+	IEEE80211_LOCK_DESTROY(ic);
+
+	_MOD_DEC_USE(THIS_MODULE);
+}
+EXPORT_SYMBOL(ieee80211_ifdetach);
+
+static void ieee80211_vap_init_tdls(struct ieee80211vap *vap)
+{
+	if (vap->iv_opmode == IEEE80211_M_STA) {
+		/* AP support tdls and STA disable tdls by default */
+		vap->iv_flags_ext |= IEEE80211_FEXT_TDLS_PROHIB;
+		vap->iv_flags_ext &= ~IEEE80211_FEXT_TDLS_CS_PROHIB;
+		vap->iv_flags_ext &= ~IEEE80211_FEXT_AP_TDLS_PROHIB;
+		vap->tdls_discovery_interval = DEFAULT_TDLS_DISCOVER_INTERVAL;
+		vap->tdls_node_life_cycle = DEFAULT_TDLS_LIFE_CYCLE;
+		vap->tdls_path_sel_prohibited = DEFAULT_TDLS_PATH_SEL_MODE;
+		vap->tdls_timeout_time = DEFAULT_TDLS_TIMEOUT_TIME;
+		vap->tdls_path_sel_weight = DEFAULT_TDLS_LINK_WEIGHT;
+		vap->tdls_training_pkt_cnt = DEFAULT_TDLS_RATE_DETECTION_PKT_CNT;
+		vap->tdls_uapsd_indicat_wnd = DEFAULT_TDLS_UAPSD_INDICATION_WND;
+		vap->tdls_path_sel_pps_thrshld = DEFAULT_TDLS_PATH_SEL_PPS_THRSHLD;
+		vap->tdls_path_sel_rate_thrshld = DEFAULT_TDLS_PATH_SEL_RATE_THRSHLD;
+		vap->tdls_verbose = DEFAULT_TDLS_VERBOSE;
+		vap->tdls_min_valid_rssi = DEFAULT_TDLS_MIN_RSSI;
+		vap->tdls_switch_ints = DEFAULT_TDLS_LINK_SWITCH_INV;
+		vap->tdls_phy_rate_wgt = DEFAULT_TDLS_PHY_RATE_WEIGHT;
+		vap->tdls_fixed_off_chan = DEFAULT_TDLS_FIXED_OFF_CHAN;
+		vap->tdls_fixed_off_chan_bw = BW_INVALID;
+		vap->tdls_chan_switching = 0;
+		vap->tdls_cs_disassoc_pending = 0;
+		vap->tdls_cs_node = NULL;
+		spin_lock_init(&vap->tdls_ps_lock);
+	}
+}
+
+int
+ieee80211_vap_setup(struct ieee80211com *ic, struct net_device *dev,
+	const char *name, int unit, int opmode, int flags)
+{
+#define	IEEE80211_C_OPMODE \
+	(IEEE80211_C_IBSS | IEEE80211_C_HOSTAP | IEEE80211_C_AHDEMO | \
+	 IEEE80211_C_MONITOR)
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct net_device_ops *pndo = (struct net_device_ops *)dev->netdev_ops;
+	int err;
+
+	if (name != NULL) {
+		if (strchr(name, '%')) {
+			if ((err = dev_alloc_name(dev, name)) < 0) {
+				printk(KERN_ERR "can't alloc name %s\n", name);
+				return err;
+			}
+		} else {
+			strncpy(dev->name, name, sizeof(dev->name));
+		}
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	pndo->ndo_get_stats64 = ieee80211_getstats64;
+#else
+	pndo->ndo_get_stats = ieee80211_getstats;
+#endif
+	pndo->ndo_open = ieee80211_open;
+	pndo->ndo_stop = ieee80211_stop;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	pndo->ndo_set_rx_mode = ieee80211_set_multicast_list;
+#else
+	pndo->ndo_set_multicast_list = ieee80211_set_multicast_list;
+#endif
+	pndo->ndo_change_mtu = ieee80211_change_mtu;
+	dev->tx_queue_len = QTN_BUFS_WMAC_TX_QDISC;
+
+	/*
+	 * The caller is assumed to allocate the device with
+	 * alloc_etherdev or similar so we arrange for the
+	 * space to be reclaimed accordingly.
+	 */
+	dev->destructor = free_netdev;
+
+	vap->iv_ic = ic;
+	vap->iv_dev = dev;			/* back pointer */
+	vap->iv_unit = unit;
+	vap->iv_flags = ic->ic_flags;		/* propagate common flags */
+	vap->iv_flags_ext = ic->ic_flags_ext;
+	vap->iv_xrvap = NULL;
+	vap->iv_ath_cap = ic->ic_ath_cap;
+	/* Default Multicast traffic to lowest rate of 1000 Kbps*/
+	vap->iv_mcast_rate = 1000;
+
+	vap->iv_caps = ic->ic_caps &~ IEEE80211_C_OPMODE;
+
+	/* Enabling short GI by default. This may be right place to set it */
+	vap->iv_ht_flags |= IEEE80211_HTF_SHORTGI_ENABLED;
+	vap->iv_ht_flags |= IEEE80211_HTF_LDPC_ENABLED;
+
+	/* Initialize vht capability flags  */
+	vap->iv_vht_flags = ic->ic_vhtcap.cap_flags;
+
+	/* Disable STBC by default  */
+	vap->iv_ht_flags &= ~(IEEE80211_HTCAP_C_TXSTBC | IEEE80211_HTCAP_C_RXSTBC);
+	vap->iv_vht_flags &= ~(IEEE80211_VHTCAP_C_TX_STBC);
+
+	vap->iv_rx_amsdu_enable = QTN_RX_AMSDU_DYNAMIC;
+	vap->iv_rx_amsdu_threshold_cca = IEEE80211_RX_AMSDU_THRESHOLD_CCA;
+	vap->iv_rx_amsdu_threshold_pmbl = IEEE80211_RX_AMSDU_THRESHOLD_PMBL;
+	vap->iv_rx_amsdu_pmbl_wf_sp = IEEE80211_RX_AMSDU_PMBL_WF_SP;
+	vap->iv_rx_amsdu_pmbl_wf_lp = IEEE80211_RX_AMSDU_PMBL_WF_LP;
+
+	switch (opmode) {
+	case IEEE80211_M_STA:
+		/* WDS/Repeater */
+		if (flags & IEEE80211_NO_STABEACONS)
+		{
+			vap->iv_flags_ext |= IEEE80211_FEXT_SWBMISS;
+			vap->iv_link_loss_enabled = 1;
+			vap->iv_bcn_miss_thr = 0;
+		}
+		vap->iv_caps |= IEEE80211_C_WDS;
+		vap->iv_flags_ext |= IEEE80211_FEXT_WDS;
+		/* do DBS specific initialization, keep it open for all chipset,
+		 * as we don't want chip specific execution here, for RF with no
+		 * dual band support, these initiazation won't be referenced.
+		 */
+		vap->iv_pref_band = IEEE80211_5Ghz;
+		/* 2,4ghz specific station profile */
+		vap->iv_2_4ghz_prof.phy_mode = IEEE80211_MODE_11NG_HT40PM;
+		vap->iv_2_4ghz_prof.vht = 0;
+		vap->iv_2_4ghz_prof.bw = 40;
+
+		/* 5ghz specific station profile */
+		vap->iv_5ghz_prof.phy_mode = IEEE80211_MODE_11AC_VHT80PM;
+		vap->iv_5ghz_prof.vht = 1;
+		vap->iv_5ghz_prof.bw = 80;
+		break;
+	case IEEE80211_M_IBSS:
+		vap->iv_caps |= IEEE80211_C_IBSS;
+		vap->iv_ath_cap &= ~IEEE80211_ATHC_XR;
+		break;
+	case IEEE80211_M_AHDEMO:
+		vap->iv_caps |= IEEE80211_C_AHDEMO;
+		vap->iv_ath_cap &= ~IEEE80211_ATHC_XR;
+		break;
+	case IEEE80211_M_HOSTAP:
+		vap->iv_caps |= IEEE80211_C_HOSTAP;
+		vap->iv_ath_cap &= ~IEEE80211_ATHC_TURBOP;
+		if ((vap->iv_flags & IEEE80211_VAP_XR) == 0)
+			vap->iv_ath_cap &= ~IEEE80211_ATHC_XR;
+		vap->iv_caps |= IEEE80211_C_WDS;
+		vap->iv_flags_ext |= IEEE80211_FEXT_WDS;
+		vap->iv_flags |= IEEE80211_F_DROPUNENC;
+		vap->iv_flags_ext2 = IEEE80211_FEXT_SYNC_CONFIG;
+		break;
+	case IEEE80211_M_MONITOR:
+		vap->iv_caps |= IEEE80211_C_MONITOR;
+		vap->iv_ath_cap &= ~(IEEE80211_ATHC_XR | IEEE80211_ATHC_TURBOP);
+		break;
+	case IEEE80211_M_WDS:
+		vap->iv_caps |= IEEE80211_C_WDS;
+		vap->iv_ath_cap &= ~(IEEE80211_ATHC_XR | IEEE80211_ATHC_TURBOP);
+		vap->iv_flags_ext |= IEEE80211_FEXT_WDS;
+		/* Set WDS according to Extender Role */
+		ieee80211_vap_wds_mode_change(vap);
+		break;
+	}
+	vap->iv_opmode = opmode;
+	IEEE80211_INIT_TQUEUE(&vap->iv_stajoin1tq, ieee80211_sta_join1_tasklet, vap);
+
+	vap->iv_chanchange_count = 0;
+
+	/*
+	 * Enable various functionality by default if we're capable.
+	 */
+	if (vap->iv_caps & IEEE80211_C_WME)
+		vap->iv_flags |= IEEE80211_F_WME;
+	if (vap->iv_caps & IEEE80211_C_FF)
+		vap->iv_flags |= IEEE80211_F_FF;
+
+	vap->iv_dtim_period = IEEE80211_DTIM_DEFAULT;
+
+	vap->iv_monitor_crc_errors = 0;
+	vap->iv_monitor_phy_errors = 0;
+
+	/* Defaults for implicit BA and global BA mask */
+	vap->iv_ba_control = 0xFFFF;
+	vap->iv_implicit_ba = 0x1;
+	vap->iv_max_ba_win_size = IEEE80211_DEFAULT_BA_WINSIZE;
+
+	vap->iv_mcs_config = IEEE80211_MCS_AUTO_RATE_ENABLE;
+
+	/* initialize TDLS Function */
+	ieee80211_vap_init_tdls(vap);
+
+	/* Only need the peer entry in AID table for WDS mode VAP */
+	if (opmode == IEEE80211_M_WDS)
+		vap->iv_max_aid = 1;
+
+	IEEE80211_ADDR_COPY(vap->iv_myaddr, dev->dev_addr);
+	/* NB: defer setting dev_addr so driver can override */
+
+	vap->iv_blacklist_timeout = msecs_to_jiffies(IEEE80211_BLACKLIST_TIMEOUT * MSEC_PER_SEC);
+#define IEEE80211_RATE_TRAINING_COUNT_DEFAULT		300
+#define IEEE80211_RATE_TRAINING_BURST_COUNT_DEFAULT	32
+	vap->iv_rate_training_count = IEEE80211_RATE_TRAINING_COUNT_DEFAULT;
+	vap->iv_rate_training_burst_count = IEEE80211_RATE_TRAINING_BURST_COUNT_DEFAULT;
+	vap->iv_mc_to_uc = IEEE80211_QTN_MC_TO_UC_LEGACY;
+	vap->iv_reliable_bcst = 1;
+	vap->iv_ap_fwd_lncb = 1;
+	vap->iv_tx_amsdu = 1;
+	vap->iv_tx_amsdu_11n = 1;
+	vap->iv_tx_max_amsdu = IEEE80211_VHTCAP_MAX_MPDU_11454;
+	vap->allow_tkip_for_vht = 0;
+	vap->is_block_all_assoc = 0;
+	vap->iv_vap_state = IEEE80211_VAP_STATE_ENABLED;
+
+	ieee80211_crypto_vattach(vap);
+	ieee80211_node_vattach(vap);
+	ieee80211_power_vattach(vap);
+	ieee80211_proto_vattach(vap);
+	ieee80211_scan_vattach(vap);
+	ieee80211_vlan_vattach(vap);
+	ieee80211_ioctl_vattach(vap);
+	ieee80211_sysctl_vattach(vap);
+
+	return 1;
+#undef IEEE80211_C_OPMODE
+}
+EXPORT_SYMBOL(ieee80211_vap_setup);
+
+int
+ieee80211_vap_attach(struct ieee80211vap *vap,
+	ifm_change_cb_t media_change, ifm_stat_cb_t media_status)
+{
+	struct net_device *dev = vap->iv_dev;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ifmediareq imr;
+
+	ieee80211_node_latevattach(vap);	/* XXX move into vattach */
+	ieee80211_power_latevattach(vap);	/* XXX move into vattach */
+
+	memset(vap->wds_mac, 0x00, IEEE80211_ADDR_LEN);
+
+	(void) ieee80211_media_setup(ic, &vap->iv_media,
+		vap->iv_caps, media_change, media_status);
+	ieee80211_media_status((void *) vap, &imr);
+	ifmedia_set(&vap->iv_media, imr.ifm_active);
+
+	IEEE80211_LOCK_IRQ(ic);
+	TAILQ_INSERT_TAIL(&ic->ic_vaps, vap, iv_next);
+	IEEE80211_UNLOCK_IRQ(ic);
+
+	IEEE80211_ADDR_COPY(dev->dev_addr, vap->iv_myaddr);
+
+	ieee80211_scanner_get(vap->iv_opmode, 1);
+
+        ic->ic_pm_reason = IEEE80211_PM_LEVEL_VAP_ATTACH;
+
+	ieee80211_pm_queue_work(ic);
+
+	ieee80211_wme_initparams(vap);
+
+	/* Fix issue that tx power will be abnormal when dynamically switch from station mode to AP mode*/
+	ieee80211_pwr_adjust(vap, 0);
+
+	ieee80211_tdls_vattach(vap);
+
+	INIT_LIST_HEAD(&vap->sample_sta_list);
+	spin_lock_init(&vap->sample_sta_lock);
+	vap->sample_sta_count = 0;
+
+	/* NB: rtnl is held on entry so don't use register_netdev */
+	if (register_netdevice(dev)) {
+		printk(KERN_ERR "%s: unable to register device\n", dev->name);
+		return 0;
+	} else {
+		return 1;
+	}
+}
+EXPORT_SYMBOL(ieee80211_vap_attach);
+
+void
+ieee80211_vap_detach(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	IEEE80211_CANCEL_TQUEUE(&vap->iv_stajoin1tq);
+	IEEE80211_LOCK_IRQ(ic);
+	TAILQ_REMOVE(&ic->ic_vaps, vap, iv_next);
+	if (TAILQ_EMPTY(&ic->ic_vaps))		/* reset to supported mode */
+		ic->ic_opmode = IEEE80211_M_STA;
+	IEEE80211_UNLOCK_IRQ(ic);
+
+	/*
+	 * Change state to 'INIT' to disassociate WDS peer node
+	 */
+	if (vap->iv_opmode == IEEE80211_M_WDS)
+		ieee80211_new_state(vap, IEEE80211_S_INIT, 0);
+
+	ifmedia_removeall(&vap->iv_media);
+
+	sample_rel_client_data(vap);
+	ieee80211_mac_acl(vap, IEEE80211_MACCMD_DETACH);
+	ieee80211_sysctl_vdetach(vap);
+	ieee80211_proc_cleanup(vap);
+	ieee80211_ioctl_vdetach(vap);
+	ieee80211_vlan_vdetach(vap);
+	ieee80211_scan_vdetach(vap);
+	ieee80211_proto_vdetach(vap);
+	ieee80211_crypto_vdetach(vap);
+	ieee80211_power_vdetach(vap);
+	ieee80211_tdls_vdetach(vap);
+	ieee80211_node_vdetach(vap);
+	ieee80211_vap_remove_ie(vap);
+	ieee80211_extender_vdetach(vap);
+
+        ic->ic_pm_reason = IEEE80211_PM_LEVEL_VAP_DETACH;
+
+	ieee80211_pm_queue_work(ic);
+
+}
+EXPORT_SYMBOL(ieee80211_vap_detach);
+
+void
+ieee80211_vap_detach_late(struct ieee80211vap *vap)
+{
+	/* NB: rtnl is held on entry so don't use unregister_netdev */
+	unregister_netdevice(vap->iv_dev);
+}
+EXPORT_SYMBOL(ieee80211_vap_detach_late);
+
+/*
+ * Convert MHz frequency to IEEE channel number.
+ */
+u_int
+ieee80211_mhz2ieee(u_int freq, u_int flags)
+{
+	if (flags & IEEE80211_CHAN_2GHZ) {	/* 2GHz band */
+		if (freq == 2484)		/* Japan */
+			return 14;
+		if ((freq >= 2412) && (freq < 2484)) /* don't number non-IEEE channels */
+			return (freq - 2407) / 5;
+		return 0;
+	} else if (flags & IEEE80211_CHAN_5GHZ)	{	/* 5Ghz band */
+		if ((freq >= 5150) && (freq <= 5845))	/* don't number non-IEEE channels */
+			return (freq - 5000) / 5;
+		return 0;
+	} else {
+		/* something is fishy, don't do anything */
+		return 0;
+	}
+}
+EXPORT_SYMBOL(ieee80211_mhz2ieee);
+
+/*
+ * Convert channel to IEEE channel number.
+ */
+u_int
+ieee80211_chan2ieee(struct ieee80211com *ic, const struct ieee80211_channel *c)
+{
+	if (c == NULL) {
+		printk("invalid channel (NULL)\n");
+		return 0;
+	}
+	return (c == IEEE80211_CHAN_ANYC ?  IEEE80211_CHAN_ANY : c->ic_ieee);
+}
+EXPORT_SYMBOL(ieee80211_chan2ieee);
+
+/*
+ * Convert IEEE channel number to MHz frequency.
+ */
+u_int
+ieee80211_ieee2mhz(u_int chan, u_int flags)
+{
+	if (flags & IEEE80211_CHAN_2GHZ) {	/* 2GHz band */
+		if (chan == 14)
+			return 2484;
+		if (chan < 14)
+			return 2407 + chan * 5;
+		else
+			return 2512 + ((chan - 15) * 20);
+	} else if (flags & IEEE80211_CHAN_5GHZ) {	/* 5Ghz band */
+		return 5000 + (chan * 5);
+	} else {					/* either, guess */
+		if (chan == 14)
+			return 2484;
+		if (chan < 14)			/* 0-13 */
+			return 2407 + chan * 5;
+		if (chan < 27)			/* 15-26 */
+			return 2512 + ((chan - 15) * 20);
+		return 5000 + (chan * 5);
+	}
+}
+EXPORT_SYMBOL(ieee80211_ieee2mhz);
+
+/*
+ * Locate a channel given a frequency+flags.  We cache
+ * the previous lookup to optimize swithing between two
+ * channels--as happens with dynamic turbo.
+ */
+struct ieee80211_channel *
+ieee80211_find_channel(struct ieee80211com *ic, int freq, int flags)
+{
+	struct ieee80211_channel *c;
+	int i;
+
+	flags &= IEEE80211_CHAN_ALLTURBO;
+	c = ic->ic_prevchan;
+	if (c != NULL && c->ic_freq == freq &&
+	    (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
+		return c;
+	/* brute force search */
+	for (i = 0; i < ic->ic_nchans; i++) {
+		c = &ic->ic_channels[i];
+		if (c->ic_freq == freq) /* &&
+		    (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags) */
+			return c;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(ieee80211_find_channel);
+
+/*
+ * Setup the media data structures according to the channel and
+ * rate tables.  This must be called by the driver after
+ * ieee80211_attach and before most anything else.
+ */
+int
+ieee80211_media_setup(struct ieee80211com *ic,
+	struct ifmedia *media, u_int32_t caps,
+	ifm_change_cb_t media_change, ifm_stat_cb_t media_stat)
+{
+#define	ADD(_media, _s, _o) \
+	ifmedia_add(_media, IFM_MAKEWORD(IFM_IEEE80211, (_s), (_o), 0), 0, NULL)
+	int i, j, mode, rate, maxrate, mword, mopt, r;
+	struct ieee80211_rateset *rs;
+	struct ieee80211_rateset allrates;
+
+	/*
+	 * Fill in media characteristics.
+	 */
+	ifmedia_init(media, 0, media_change, media_stat);
+	maxrate = 0;
+	memset(&allrates, 0, sizeof(allrates));
+	for (mode = IEEE80211_MODE_AUTO; mode < IEEE80211_MODE_MAX; mode++) {
+		static const u_int mopts[] = { 
+			IFM_AUTO,
+			IFM_IEEE80211_11A,
+			IFM_IEEE80211_11B,
+			IFM_IEEE80211_11G,
+			IFM_IEEE80211_FH,
+			IFM_IEEE80211_11A | IFM_IEEE80211_TURBO,
+			IFM_IEEE80211_11G | IFM_IEEE80211_TURBO,
+			IFM_IEEE80211_11NA,
+			IFM_IEEE80211_11NG,
+			IFM_IEEE80211_11NG_HT40PM,
+			IFM_IEEE80211_11NA_HT40PM,
+			IFM_IEEE80211_11AC_VHT20PM,
+			IFM_IEEE80211_11AC_VHT40PM,
+			IFM_IEEE80211_11AC_VHT80PM,
+			IFM_IEEE80211_11AC_VHT160PM,
+		};
+		if ((ic->ic_modecaps & (1<<mode)) == 0)
+			continue;
+		mopt = mopts[mode];
+		ADD(media, IFM_AUTO, mopt);	/* e.g. 11a auto */
+		if (caps & IEEE80211_C_IBSS)
+			ADD(media, IFM_AUTO, mopt | IFM_IEEE80211_ADHOC);
+		if (caps & IEEE80211_C_HOSTAP)
+			ADD(media, IFM_AUTO, mopt | IFM_IEEE80211_HOSTAP);
+		if (caps & IEEE80211_C_AHDEMO)
+			ADD(media, IFM_AUTO, mopt | IFM_IEEE80211_ADHOC | IFM_FLAG0);
+		if (caps & IEEE80211_C_MONITOR)
+			ADD(media, IFM_AUTO, mopt | IFM_IEEE80211_MONITOR);
+		if (caps & IEEE80211_C_WDS)
+			ADD(media, IFM_AUTO, mopt | IFM_IEEE80211_WDS);
+		if (mode == IEEE80211_MODE_AUTO)
+			continue;
+		rs = &ic->ic_sup_rates[mode];
+		for (i = 0; i < rs->rs_nrates; i++) {
+			rate = rs->rs_rates[i];
+			if(mode < IEEE80211_MODE_11NA)
+				mword = ieee80211_rate2media(ic, rate, mode);
+			else
+			{
+				/* This may contain both legacy and 11n rates */
+				if(i < IEEE80211_RATE_SIZE) // 8 Legacy rates
+					rate = rate & IEEE80211_RATE_VAL;
+
+				mword = ieee80211_mcs2media(ic, rate, mode);
+			}
+
+			if (mword == 0)
+				continue;
+			ADD(media, mword, mopt);
+			if (caps & IEEE80211_C_IBSS)
+				ADD(media, mword, mopt | IFM_IEEE80211_ADHOC);
+			if (caps & IEEE80211_C_HOSTAP)
+				ADD(media, mword, mopt | IFM_IEEE80211_HOSTAP);
+			if (caps & IEEE80211_C_AHDEMO)
+				ADD(media, mword, mopt | IFM_IEEE80211_ADHOC | IFM_FLAG0);
+			if (caps & IEEE80211_C_MONITOR)
+				ADD(media, mword, mopt | IFM_IEEE80211_MONITOR);
+			if (caps & IEEE80211_C_WDS)
+				ADD(media, mword, mopt | IFM_IEEE80211_WDS);
+			/*
+			 * Add rate to the collection of all rates.
+			 */
+			r = rate & IEEE80211_RATE_VAL;
+			for (j = 0; j < allrates.rs_nrates; j++)
+				if (allrates.rs_rates[j] == r)
+					break;
+			if (j == allrates.rs_nrates) {
+				/* unique, add to the set */
+				allrates.rs_rates[j] = r;
+				allrates.rs_nrates++;
+			}
+			rate = (rate & IEEE80211_RATE_VAL) / 2;
+			if (rate > maxrate)
+				maxrate = rate;
+		}
+	}
+	for (i = 0; i < allrates.rs_nrates; i++) {
+		if(mode < IEEE80211_MODE_11NA)
+			mword = ieee80211_rate2media(ic, allrates.rs_rates[i],
+					IEEE80211_MODE_AUTO);
+		else
+			mword = ieee80211_mcs2media(ic, allrates.rs_rates[i],
+					IEEE80211_MODE_AUTO);
+		if (mword == 0)
+			continue;
+		mword = IFM_SUBTYPE(mword);	/* remove media options */
+		ADD(media, mword, 0);
+		if (caps & IEEE80211_C_IBSS)
+			ADD(media, mword, IFM_IEEE80211_ADHOC);
+		if (caps & IEEE80211_C_HOSTAP)
+			ADD(media, mword, IFM_IEEE80211_HOSTAP);
+		if (caps & IEEE80211_C_AHDEMO)
+			ADD(media, mword, IFM_IEEE80211_ADHOC | IFM_FLAG0);
+		if (caps & IEEE80211_C_MONITOR)
+			ADD(media, mword, IFM_IEEE80211_MONITOR);
+		if (caps & IEEE80211_C_WDS)
+			ADD(media, mword, IFM_IEEE80211_WDS);
+	}
+	return maxrate;
+#undef ADD
+}
+
+void
+ieee80211_announce(struct ieee80211com *ic)
+{
+	int i, mode, rate, mword;
+	struct ieee80211_rateset *rs;
+
+	for (mode = IEEE80211_MODE_11A; mode < IEEE80211_MODE_MAX; mode++) {
+		if ((ic->ic_modecaps & (1<<mode)) == 0)
+			continue;
+		printk("%s rates: ", ieee80211_phymode_name[mode]);
+		rs = &ic->ic_sup_rates[mode];
+		for (i = 0; i < rs->rs_nrates; i++) {
+			rate = rs->rs_rates[i];
+			mword = ieee80211_rate2media(ic, rate, mode);
+			if (mword == 0)
+				continue;
+			printf("%s%d%sMbps", (i != 0 ? " " : ""),
+			    (rate & IEEE80211_RATE_VAL) / 2,
+			    ((rate & 0x1) != 0 ? ".5" : ""));
+		}
+		printf("\n");
+	}
+
+	printk("H/W encryption support:");
+
+	if (ic->ic_caps & IEEE80211_C_WEP)
+		printk(" WEP");
+	if (ic->ic_caps & IEEE80211_C_AES)
+		printk(" AES");
+	if (ic->ic_caps & IEEE80211_C_AES_CCM)
+		printk(" AES_CCM");
+	if (ic->ic_caps & IEEE80211_C_CKIP)
+		printk(" CKIP");
+	if (ic->ic_caps & IEEE80211_C_TKIP)
+		printk(" TKIP");
+	printk("\n");
+}
+EXPORT_SYMBOL(ieee80211_announce);
+
+void
+ieee80211_announce_channels(struct ieee80211com *ic)
+{
+	const struct ieee80211_channel *c;
+	char type;
+	int i;
+
+	printf("Chan  Freq  RegPwr  MinPwr  MaxPwr\n");
+	for (i = 0; i < ic->ic_nchans; i++) {
+		c = &ic->ic_channels[i];
+		if (IEEE80211_IS_CHAN_ST(c))
+			type = 'S';
+		else if (IEEE80211_IS_CHAN_108A(c))
+			type = 'T';
+		else if (IEEE80211_IS_CHAN_108G(c))
+			type = 'G';
+		else if (IEEE80211_IS_CHAN_A(c))
+			type = 'a';
+		else if (IEEE80211_IS_CHAN_11NG(c))
+			type = 'n';
+		else if (IEEE80211_IS_CHAN_11NA(c))
+			type = 'n';
+		else if (IEEE80211_IS_CHAN_ANYG(c))
+			type = 'g';
+		else if (IEEE80211_IS_CHAN_B(c))
+			type = 'b';
+		else
+			type = 'f';
+		printf("%4d  %4d%c %6d  %6d  %6d\n"
+			, c->ic_ieee, c->ic_freq, type
+			, c->ic_maxregpower
+			, c->ic_minpower, c->ic_maxpower
+		);
+	}
+}
+EXPORT_SYMBOL(ieee80211_announce_channels);
+
+/*
+ * Common code to calculate the media status word
+ */
+static int
+media_status(enum ieee80211_opmode opmode, u_int16_t mode)
+{
+	int status;
+
+	status = IFM_IEEE80211;
+	switch (opmode) {
+	case IEEE80211_M_STA:
+		break;
+	case IEEE80211_M_AHDEMO:
+		status |= IFM_IEEE80211_ADHOC | IFM_FLAG0;
+		break;
+	case IEEE80211_M_IBSS:
+		status |= IFM_IEEE80211_ADHOC;
+		break;
+	case IEEE80211_M_HOSTAP:
+		status |= IFM_IEEE80211_HOSTAP;
+		break;
+	case IEEE80211_M_MONITOR:
+		status |= IFM_IEEE80211_MONITOR;
+		break;
+	case IEEE80211_M_WDS:
+		status |= IFM_IEEE80211_WDS;
+		break;
+	}
+
+	status |= IFM_MAKEMODE(mode);
+
+	return status;
+}
+
+/*
+ * Handle a media requests on the base interface.
+ */
+static void
+ieee80211com_media_status(void *data, struct ifmediareq *imr)
+{
+	struct ieee80211com *ic = (struct ieee80211com *) data;
+
+	imr->ifm_status = IFM_AVALID;
+	if (!TAILQ_EMPTY(&ic->ic_vaps))
+		imr->ifm_status |= IFM_ACTIVE;
+	imr->ifm_active = media_status(ic->ic_opmode, 0);
+}
+
+/*
+ * Convert a media specification to an 802.11 phy mode.
+ */
+static int
+media2mode(const struct ifmedia_entry *ime, enum ieee80211_phymode *mode)
+{
+
+	switch (IFM_MODE(ime->ifm_media)) {
+	case IFM_IEEE80211_11A:
+		*mode = IEEE80211_MODE_11A;
+		break;
+	case IFM_IEEE80211_11B:
+		*mode = IEEE80211_MODE_11B;
+		break;
+	case IFM_IEEE80211_11G:
+		*mode = IEEE80211_MODE_11G;
+		break;
+	case IFM_IEEE80211_11NG:
+		*mode = IEEE80211_MODE_11NG;
+		break;
+	case IFM_IEEE80211_11NA:
+		*mode = IEEE80211_MODE_11NA;
+		break;
+	case IFM_IEEE80211_11NG_HT40PM:
+		*mode = IEEE80211_MODE_11NG_HT40PM;
+		break;
+	case IFM_IEEE80211_11NA_HT40PM:
+		*mode = IEEE80211_MODE_11NA_HT40PM;
+		break;
+	case IFM_IEEE80211_FH:
+		*mode = IEEE80211_MODE_FH;
+		break;
+	case IFM_IEEE80211_11AC_VHT20PM:
+		*mode = IEEE80211_MODE_11AC_VHT20PM;
+		break;
+	case IFM_IEEE80211_11AC_VHT40PM:
+		*mode = IEEE80211_MODE_11AC_VHT40PM;
+		break;
+	case IFM_IEEE80211_11AC_VHT80PM:
+		*mode = IEEE80211_MODE_11AC_VHT80PM;
+		break;
+	case IFM_IEEE80211_11AC_VHT160PM:
+		*mode = IEEE80211_MODE_11AC_VHT160PM;
+		break;
+	case IFM_AUTO:
+		*mode = IEEE80211_MODE_AUTO;
+		break;
+	default:
+		return 0;
+	}
+	/*
+	 * Turbo mode is an ``option''.  
+	 * XXX: Turbo currently does not apply to AUTO
+	 */
+	if (ime->ifm_media & IFM_IEEE80211_TURBO) {
+		if (*mode == IEEE80211_MODE_11A)
+			*mode = IEEE80211_MODE_TURBO_A;
+		else if (*mode == IEEE80211_MODE_11G)
+			*mode = IEEE80211_MODE_TURBO_G;
+		else
+			return 0;
+	}
+	return 1;
+}
+
+static int
+ieee80211com_media_change(void *data)
+{
+	struct ieee80211com *ic = (struct ieee80211com *) data;
+	struct ieee80211vap *vap;
+	struct ifmedia_entry *ime = ic->ic_media.ifm_cur;
+	enum ieee80211_phymode newphymode;
+	int j, error = 0;
+
+	/* XXX is rtnl held here? */
+	/*
+	 * First, identify the phy mode.
+	 */
+	if (!media2mode(ime, &newphymode))
+		return -EINVAL;
+	/* NB: mode must be supported, no need to check */
+	/*
+	 * Autoselect doesn't make sense when operating as an AP.
+	 * If no phy mode has been selected, pick one and lock it
+	 * down so rate tables can be used in forming beacon frames
+	 * and the like.
+	 */
+
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
+	    newphymode == IEEE80211_MODE_AUTO) {
+		for (j = IEEE80211_MODE_11A; j < IEEE80211_MODE_MAX; j++)
+			if (ic->ic_modecaps & (1 << j)) {
+				newphymode = j;
+				break;
+			}
+	}
+
+	/*
+	 * Handle phy mode change.
+	 */
+
+	IEEE80211_LOCK_IRQ(ic);
+	if (ic->ic_curmode != newphymode) {		/* change phy mode */
+		error = ieee80211_setmode(ic, newphymode);
+		if (error != 0) {
+			IEEE80211_UNLOCK_IRQ_EARLY(ic);
+			return error;
+		}
+		TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+			/* reset WME state */
+			ieee80211_wme_initparams_locked(vap);
+			ieee80211_adjust_wme_by_vappri(ic);
+			/*
+			 * Setup an initial rate set according to the
+			 * current/default channel selected above.  This
+			 * will be changed when scanning but must exist
+			 * now so drivers have a consistent state.
+			 */
+			KASSERT(vap->iv_bss != NULL, ("no bss node"));
+			vap->iv_bss->ni_rates = ic->ic_sup_rates[newphymode];
+		}
+		error = -ENETRESET;
+	}
+	IEEE80211_UNLOCK_IRQ(ic);
+
+#ifdef notdef
+	if (error == 0)
+		ifp->if_baudrate = ifmedia_baudrate(ime->ifm_media);
+#endif
+	return error;
+}
+
+static int
+findrate(struct ieee80211com *ic, enum ieee80211_phymode mode, int rate)
+{
+#define	IEEERATE(_ic,_m,_i) \
+	((_ic)->ic_sup_rates[_m].rs_rates[_i] & IEEE80211_RATE_VAL)
+#define	IEEE11NRATE(_ic,_m,_i) \
+	((_ic)->ic_sup_rates[_m].rs_rates[_i])
+	int i, nrates = ic->ic_sup_rates[mode].rs_nrates;
+	for (i = 0; i < nrates; i++)
+	{
+		if(i < IEEE80211_RATE_SIZE)
+		{
+			/* Legacy Rates */
+			if (IEEERATE(ic, mode, i) == rate)
+				return i;
+		}
+		else
+		{
+			/* 11n rates */
+			if (IEEE11NRATE(ic, mode, i) == rate)
+				return i;
+		}
+	}
+	return -1;
+#undef IEEERATE
+#undef IEEE11NRATE
+}
+
+/*
+ * Convert a media specification to a rate index and possibly a mode
+ * (if the rate is fixed and the mode is specified as ``auto'' then
+ * we need to lock down the mode so the index is meaningful).
+ */
+static int
+checkrate(struct ieee80211com *ic, enum ieee80211_phymode mode, int rate)
+{
+
+	/*
+	 * Check the rate table for the specified/current phy.
+	 */
+	if (mode == IEEE80211_MODE_AUTO) {
+		int i;
+		/*
+		 * In autoselect mode search for the rate.
+		 */
+		for (i = IEEE80211_MODE_11A; i < IEEE80211_MODE_MAX; i++) {
+			if ((ic->ic_modecaps & (1 << i)) &&
+			    findrate(ic, i, rate) != -1)
+				return 1;
+		}
+		return 0;
+	} else {
+		/*
+		 * Mode is fixed, check for rate.
+		 */
+		return (findrate(ic, mode, rate) != -1);
+	}
+}
+
+/*
+ * Handle a media change request; the only per-vap
+ * information that is meaningful is the fixed rate
+ * and desired phy mode.
+ */
+int
+ieee80211_media_change(void *data)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *) data;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ifmedia_entry *ime = vap->iv_media.ifm_cur;
+	enum ieee80211_phymode newmode;
+	int newrate, error;
+
+	/*
+	 * First, identify the desired phy mode.
+	 */
+	if (!media2mode(ime, &newmode)) {
+		return -EINVAL;
+	}
+
+	/*
+	 * Check for fixed/variable rate.
+	 */
+	if (IFM_SUBTYPE(ime->ifm_media) != IFM_AUTO) {
+		/*
+		 * Convert media subtype to rate and potentially
+		 * lock down the mode.
+		 */
+
+		if(newmode >= IEEE80211_MODE_11NA)
+			newrate = ieee80211_media2mcs(ime->ifm_media);
+		else
+			newrate = ieee80211_media2rate(ime->ifm_media);
+
+		if (newrate == 0 || !checkrate(ic, newmode, newrate))
+		{
+			return -EINVAL;
+		}
+	} else
+		newrate = IEEE80211_FIXED_RATE_NONE;
+
+	/*
+	 * Install the rate+mode settings.
+	 */
+	error = 0;
+	if (vap->iv_fixed_rate != newrate ||
+		newrate == IEEE80211_FIXED_RATE_NONE) {
+		vap->iv_fixed_rate = newrate;		/* fixed tx rate */
+		error = -ENETRESET;
+
+		if (newrate == IEEE80211_FIXED_RATE_NONE)
+			newrate = 0x90; // To put MuC in Auto Rate
+
+		/* Forward these parameters to the driver and MuC */
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_FIXED_TX_RATE, newrate, NULL, 0);
+	}
+
+	if (ic->ic_des_mode != newmode) {
+		ic->ic_des_mode = newmode;		/* desired phymode */
+		error = -ENETRESET;
+	}
+	return error;
+}
+EXPORT_SYMBOL(ieee80211_media_change);
+
+void
+ieee80211_media_status(void *data, struct ifmediareq *imr)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *) data;
+	struct ieee80211com *ic = vap->iv_ic;
+	enum ieee80211_phymode mode;
+	int mediarate = IFM_AUTO;
+
+	imr->ifm_status = IFM_AVALID;
+	/*
+	 * NB: use the current channel's mode to lock down a xmit
+	 * rate only when running; otherwise we may have a mismatch
+	 * in which case the rate will not be convertible.
+	 */
+	if (vap->iv_state == IEEE80211_S_RUN) {
+		imr->ifm_status |= IFM_ACTIVE;
+		mode = ic->ic_curmode;
+	} else {
+		mode = IEEE80211_MODE_AUTO;
+	}
+
+	/*
+	 * FIXME: Bug #2324
+	 * Assumption that QTN devices support 5Ghz N channels so we
+	 * calculate the IFM based on desired mode only
+	 */
+	imr->ifm_active = media_status(vap->iv_opmode, ic->ic_des_mode);
+
+	/*
+	 * Calculate a current rate if possible.
+	 */
+	if (vap->iv_state == IEEE80211_S_RUN) {
+		if (vap->iv_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
+			/*
+			 * A fixed rate is set, report that.
+			 */
+			if (mode < IEEE80211_MODE_11NA) {
+				/* Legacy mode */
+				imr->ifm_active |= ieee80211_rate2media(ic,
+					vap->iv_fixed_rate, mode);
+			} else {
+				/* 11n mode */
+				mediarate |= ieee80211_mcs2media(ic,
+					vap->iv_fixed_rate, mode);
+				if (IFM_AUTO == mediarate)
+					SCSDBG(SCSLOG_INFO, "Couldn't find compatible mediarate\n");
+				imr->ifm_active |= mediarate;
+			}
+		} else {
+			imr->ifm_active |= IFM_AUTO;
+		}
+	}
+}
+EXPORT_SYMBOL(ieee80211_media_status);
+
+/*
+ * Set the current phy mode.
+ */
+int
+ieee80211_setmode(struct ieee80211com *ic, enum ieee80211_phymode mode)
+{
+	if (ic->ic_des_mode != IEEE80211_MODE_11B &&
+		ic->ic_des_mode != IEEE80211_MODE_11A) {
+		ieee80211_reset_erp(ic, mode);	/* reset ERP state */
+	}
+
+	ic->ic_curmode = mode;		/* NB: must do post reset_erp */
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_setmode);
+
+/*
+ * Return the phy mode for with the specified channel.
+ */
+enum ieee80211_phymode
+ieee80211_chan2mode(const struct ieee80211_channel *chan)
+{
+	/*
+	 * Callers should handle this case properly, rather than
+	 * just relying that this function returns a sane value.
+	 * XXX Probably needs to be revised.
+	 */
+	KASSERT(chan != IEEE80211_CHAN_ANYC, ("channel not setup"));
+
+	if (IEEE80211_IS_CHAN_11AC_VHT80_EDGEPLUS(chan))
+		return IEEE80211_MODE_11AC_VHT80PM;
+	else if (IEEE80211_IS_CHAN_11AC_VHT80_CNTRPLUS(chan))
+		return IEEE80211_MODE_11AC_VHT80PM;
+	else if (IEEE80211_IS_CHAN_11AC_VHT80_CNTRMINUS(chan))
+		return IEEE80211_MODE_11AC_VHT80PM;
+	else if (IEEE80211_IS_CHAN_11AC_VHT80_EDGEMINUS(chan))
+		return IEEE80211_MODE_11AC_VHT80PM;
+	if (IEEE80211_IS_CHAN_11AC_VHT40PLUS(chan))
+		return IEEE80211_MODE_11AC_VHT40PM;
+	else if (IEEE80211_IS_CHAN_11AC_VHT40MINUS(chan))
+		return IEEE80211_MODE_11AC_VHT40PM;
+	if (IEEE80211_IS_CHAN_11AC(chan))
+		return IEEE80211_MODE_11AC_VHT20PM;
+	if (IEEE80211_IS_CHAN_11NG_HT40PLUS(chan))
+		return IEEE80211_MODE_11NG_HT40PM;
+	else if (IEEE80211_IS_CHAN_11NG_HT40MINUS(chan))
+		return IEEE80211_MODE_11NG_HT40PM;
+	if (IEEE80211_IS_CHAN_11NA_HT40PLUS(chan))
+		return IEEE80211_MODE_11NA_HT40PM;
+	else if (IEEE80211_IS_CHAN_11NA_HT40MINUS(chan))
+		return IEEE80211_MODE_11NA_HT40PM;
+	if (IEEE80211_IS_CHAN_11NG(chan))
+		return IEEE80211_MODE_11NG;
+	else if (IEEE80211_IS_CHAN_11NA(chan))
+		return IEEE80211_MODE_11NA;
+	else if (IEEE80211_IS_CHAN_108G(chan))
+		return IEEE80211_MODE_TURBO_G;
+	else if (IEEE80211_IS_CHAN_TURBO(chan))
+		return IEEE80211_MODE_TURBO_A;
+	else if (IEEE80211_IS_CHAN_A(chan))
+		return IEEE80211_MODE_11A;
+	else if (IEEE80211_IS_CHAN_ANYG(chan))
+		return IEEE80211_MODE_11G;
+	else if (IEEE80211_IS_CHAN_B(chan))
+		return IEEE80211_MODE_11B;
+	else if (IEEE80211_IS_CHAN_FHSS(chan))
+		return IEEE80211_MODE_FH;
+
+	/* NB: should not get here */
+	printk("%s: cannot map channel to mode; freq %u flags 0x%x\n",
+		__func__, chan->ic_freq, chan->ic_flags);
+	return IEEE80211_MODE_11B;
+}
+EXPORT_SYMBOL(ieee80211_chan2mode);
+
+/*
+ * convert IEEE80211 rate value to ifmedia subtype.
+ * ieee80211 rate is in unit of 0.5Mbps.
+ */
+int
+ieee80211_rate2media(struct ieee80211com *ic, int rate, enum ieee80211_phymode mode)
+{
+#define	N(a)	(sizeof(a) / sizeof(a[0]))
+	static const struct {
+		u_int	m;	/* rate + mode */
+		u_int	r;	/* if_media rate */
+	} rates[] = {
+		{   2 | IFM_IEEE80211_FH, IFM_IEEE80211_FH1 },
+		{   4 | IFM_IEEE80211_FH, IFM_IEEE80211_FH2 },
+		{   2 | IFM_IEEE80211_11B, IFM_IEEE80211_DS1 },
+		{   4 | IFM_IEEE80211_11B, IFM_IEEE80211_DS2 },
+		{  11 | IFM_IEEE80211_11B, IFM_IEEE80211_DS5 },
+		{  22 | IFM_IEEE80211_11B, IFM_IEEE80211_DS11 },
+		{  44 | IFM_IEEE80211_11B, IFM_IEEE80211_DS22 },
+		{   3 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM1_50 },
+		{   4 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM2_25 },
+		{   6 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM3 },
+		{   9 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM4_50 },
+		{  12 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM6 },
+		{  18 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM9 },
+		{  24 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM12 },
+		{  27 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM13_5 },
+		{  36 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM18 },
+		{  48 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM24 },
+		{  54 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM27 },
+		{  72 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM36 },
+		{  96 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM48 },
+		{ 108 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM54 },
+		{   2 | IFM_IEEE80211_11G, IFM_IEEE80211_DS1 },
+		{   4 | IFM_IEEE80211_11G, IFM_IEEE80211_DS2 },
+		{  11 | IFM_IEEE80211_11G, IFM_IEEE80211_DS5 },
+		{  22 | IFM_IEEE80211_11G, IFM_IEEE80211_DS11 },
+		{  12 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM6 },
+		{  18 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM9 },
+		{  24 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM12 },
+		{  36 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM18 },
+		{  48 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM24 },
+		{  72 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM36 },
+		{  96 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM48 },
+		{ 108 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM54 },
+		/* NB: OFDM72 doesn't really exist so we don't handle it */
+	};
+	u_int mask, i;
+
+	mask = rate & IEEE80211_RATE_VAL;
+	switch (mode) {
+	case IEEE80211_MODE_11A:
+	case IEEE80211_MODE_TURBO_A:
+		mask |= IFM_IEEE80211_11A;
+		break;
+	case IEEE80211_MODE_11B:
+		mask |= IFM_IEEE80211_11B;
+		break;
+	case IEEE80211_MODE_FH:
+		mask |= IFM_IEEE80211_FH;
+		break;
+	case IEEE80211_MODE_AUTO:
+		/* NB: ic may be NULL for some drivers */
+		if (ic && ic->ic_phytype == IEEE80211_T_FH) {
+			mask |= IFM_IEEE80211_FH;
+			break;
+		}
+		/* NB: hack, 11g matches both 11b+11a rates */
+		/* fall thru... */
+	case IEEE80211_MODE_11G:
+	case IEEE80211_MODE_TURBO_G:
+		mask |= IFM_IEEE80211_11G;
+		break;
+	default:
+		break;
+	}
+	for (i = 0; i < N(rates); i++)
+		if (rates[i].m == mask)
+			return rates[i].r;
+	return IFM_AUTO;
+#undef N
+}
+EXPORT_SYMBOL(ieee80211_rate2media);
+
+int
+ieee80211_mcs2media(struct ieee80211com *ic, int mcs, enum ieee80211_phymode mode)
+{
+#define	N(a)	(sizeof(a) / sizeof(a[0]))
+	static const struct {
+		u_int	m;	/* rate + mode */
+		u_int	r;	/* if_media rate */
+	} rates[] = {
+
+		/* Only MCS0-MCS15 (2 streams) are supported */
+		{  12 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_LEG_6 },
+		{  18 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_LEG_9 },
+		{  24 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_LEG_12 },
+		{  36 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_LEG_18 },
+		{  48 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_LEG_24 },
+		{  72 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_LEG_36 },
+		{  96 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_LEG_48 },
+		{ 108 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_LEG_54 },
+		{  12 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_LEG_6 },
+		{  18 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_LEG_9 },
+		{  24 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_LEG_12 },
+		{  36 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_LEG_18 },
+		{  48 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_LEG_24 },
+		{  72 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_LEG_36 },
+		{  96 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_LEG_48 },
+		{ 108 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_LEG_54 },
+
+		{  12 | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_6 },
+		{  18 | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_9 },
+		{  24 | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_12 },
+		{  36 | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_18 },
+		{  48 | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_24 },
+		{  72 | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_36 },
+		{  96 | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_48 },
+		{ 108 | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_54 },
+		{  12 | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_6 },
+		{  18 | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_9 },
+		{  24 | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_12 },
+		{  36 | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_18 },
+		{  48 | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_24 },
+		{  72 | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_36 },
+		{  96 | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_48 },
+		{ 108 | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_LEG_54 },
+#if 0
+		{  13  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_0 },
+		{  26  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_1 },
+		{  39  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_2 },
+		{  52  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_3 },
+		{  78  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_4 },
+		{  104 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_5 },
+		{  117 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_6 },
+		{  130 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_7 },
+		{  26  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_8 },
+		{  52  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_9 },
+		{  78  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_10 },
+		{  104 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_11 },
+		{  156 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_12 },
+		{  208 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_13 },
+		{  234 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_14 },
+		{  260 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_15 },
+		{  13  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_0 },
+		{  26  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_1 },
+		{  39  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_2 },
+		{  52  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_3 },
+		{  78  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_4 },
+		{  104 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_5 },
+		{  117 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_6 },
+		{  130 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_7 },
+		{  26  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_8 },
+		{  52  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_9 },
+		{  78  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_10 },
+		{  104 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_11 },
+		{  156 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_12 },
+		{  208 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_13 },
+		{  234 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_14 },
+		{  260 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_15 },
+#else
+		{  0x80  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_0 },
+		{  0x81  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_1 },
+		{  0x82  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_2 },
+		{  0x83  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_3 },
+		{  0x84  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_4 },
+		{  0x85 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_5 },
+		{  0x86 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_6 },
+		{  0x87 | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_7 },
+		{  0x88  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_8 },
+		{  0x89  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_9 },
+		{  0x8A  | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_10 },
+		{  0x8B | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_11 },
+		{  0x8C | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_12 },
+		{  0x8D | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_13 },
+		{  0x8E | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_14 },
+		{  0x8F | IFM_IEEE80211_11NA, IFM_IEEE80211_OFDM_HT_15 },
+		{  0x80  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_0 },
+		{  0x81  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_1 },
+		{  0x82  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_2 },
+		{  0x83  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_3 },
+		{  0x84  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_4 },
+		{  0x85 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_5 },
+		{  0x86 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_6 },
+		{  0x87 | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_7 },
+		{  0x88  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_8 },
+		{  0x89  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_9 },
+		{  0x8A  | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_10 },
+		{  0x8B | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_11 },
+		{  0x8C | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_12 },
+		{  0x8D | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_13 },
+		{  0x8E | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_14 },
+		{  0x8F | IFM_IEEE80211_11NG, IFM_IEEE80211_OFDM_HT_15 },
+
+		{  0x80  | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_0 },
+		{  0x81  | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_1 },
+		{  0x82  | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_2 },
+		{  0x83  | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_3 },
+		{  0x84  | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_4 },
+		{  0x85 | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_5 },
+		{  0x86 | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_6 },
+		{  0x87 | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_7 },
+		{  0x88  | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_8 },
+		{  0x89  | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_9 },
+		{  0x8A  | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_10 },
+		{  0x8B | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_11 },
+		{  0x8C | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_12 },
+		{  0x8D | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_13 },
+		{  0x8E | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_14 },
+		{  0x8F | IFM_IEEE80211_11NA_HT40PM, IFM_IEEE80211_OFDM_HT_15 },
+		{  0x80  | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_0 },
+		{  0x81  | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_1 },
+		{  0x82  | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_2 },
+		{  0x83  | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_3 },
+		{  0x84  | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_4 },
+		{  0x85 | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_5 },
+		{  0x86 | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_6 },
+		{  0x87 | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_7 },
+		{  0x88  | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_8 },
+		{  0x89  | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_9 },
+		{  0x8A  | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_10 },
+		{  0x8B | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_11 },
+		{  0x8C | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_12 },
+		{  0x8D | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_13 },
+		{  0x8E | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_14 },
+		{  0x8F | IFM_IEEE80211_11NG_HT40PM, IFM_IEEE80211_OFDM_HT_15 },
+#endif
+
+	};
+	u_int mask, i;
+
+	mask = mcs;
+	switch (mode) {
+	case IEEE80211_MODE_11NA:
+		mask |= IFM_IEEE80211_11NA;
+		break;
+	case IEEE80211_MODE_11NG:
+		mask |= IFM_IEEE80211_11NG;
+		break;
+	case IEEE80211_MODE_11NA_HT40PM:
+		mask |= IFM_IEEE80211_11NA_HT40PM;
+		break;
+	case IEEE80211_MODE_11NG_HT40PM:
+		mask |= IFM_IEEE80211_11NG_HT40PM;
+		break;
+	case IEEE80211_MODE_11AC_VHT20PM:
+		mask |= IFM_IEEE80211_11AC_VHT20PM;
+		break;
+	case IEEE80211_MODE_11AC_VHT40PM:
+		mask |= IFM_IEEE80211_11AC_VHT40PM;
+		break;
+	case IEEE80211_MODE_11AC_VHT80PM:
+		mask |= IFM_IEEE80211_11AC_VHT80PM;
+		break;
+	default:
+		break;
+	}
+	for (i = 0; i < N(rates); i++)
+		if (rates[i].m == mask)
+			return rates[i].r;
+	return IFM_AUTO;
+#undef N
+}
+EXPORT_SYMBOL(ieee80211_mcs2media);
+
+int
+ieee80211_mcs2rate(int mcs, int mode, int sgi, int vht)
+{
+#define N(a)    (sizeof(a[0]) / sizeof(a[0][0][0]))
+	u_int32_t rates[2][2][77] = {{{
+
+			  /* LGI & 20 MHz */
+			  /* MCS0-MC31 (4 streams) are supported */
+			  13, 26, 39, 52, 78, 104, 117, 130,
+			  26, 52, 78, 104, 156, 208, 234, 260,
+			  39, 78, 117, 156, 234, 312, 351, 390,
+			  52, 104, 156, 208, 312, 416, 468, 520,
+
+			  12, 78, 104, 130, 117, 156, 195, 104, /* UEQM */
+			  130, 130, 156, 182, 182, 208, 156, 195,
+			  195, 234, 273, 273, 312, 130, 156, 182,
+			  156, 182, 208, 234, 208, 234, 260, 260,
+			  286, 195, 234, 273, 234, 273, 312, 351,
+			  312, 351, 390, 390, 429},
+		  {
+
+			  /* LGI & 40 MHz */
+			  /* MCS0-MCS31 (4 streams) are supported */
+			  27, 54, 81, 108, 162, 216, 243, 270,
+			  54, 108, 162, 216, 324, 432, 486, 540,
+			  81, 162, 243, 324, 486, 648, 729, 810,
+			  108, 216, 324, 432, 648, 864, 972, 1080,
+
+			  12, 162, 216, 270, 243, 324, 405, 216, /* UEQM */
+			  270, 270, 324, 378, 378, 432, 324, 405,
+			  405, 486, 567, 567, 648, 270, 324, 378,
+			  324, 378, 432, 486, 432, 486, 540, 540,
+			  594, 405, 486, 567, 486, 567, 648, 729,
+			  648, 729, 810, 810, 891}},
+		  {{
+
+			   /* SGI & 20 MHz */
+			   /* MCS0-MC31 (4 streams) are supported */
+			   14, 28, 42, 56, 86, 114, 130, 144,
+			   28, 56, 86, 114, 172, 230, 260, 288,
+			   42, 86, 130, 172, 260, 346, 390, 432,
+			   56, 114, 172, 230, 346, 462, 520, 576,
+
+			   12, 86, 114, 144, 130, 172, 216, 86,	/* UEQM */
+			   114, 114, 172, 202, 202, 230, 172, 216,
+			   216, 260, 302, 302, 346, 144, 172, 202,
+			   172, 202, 230, 260, 230, 260, 288, 288,
+			   316, 216, 260, 302, 260, 302, 346, 390,
+			   346, 390, 432, 432, 476},
+		  {
+
+			  /* SGI * 40 MHz */
+			  /* MCS0-MC31 (4 streams) are supported */
+			  30, 60, 90, 120, 180, 240, 270, 300,
+			  60, 120, 180, 240, 360, 480, 540, 600,
+			  90, 180, 270, 360, 540, 720, 810, 900,
+			  120, 240, 360, 480, 720, 960, 1080,1200,
+
+			  12, 180, 240, 300, 270, 360, 450, 240, /* UEQM */
+			  300, 300, 360, 420, 420, 480, 360, 450,
+			  450, 540, 630, 630, 720, 300, 360, 420,
+			  360, 420, 480, 540, 480, 540, 600, 600,
+			  660, 450, 540, 630, 540, 630, 720, 810,
+			  720, 810, 900, 900, 990}}
+		  };
+
+	u_int32_t vht_rates[2][2][10] = {
+		{{
+			  /* LGI & 80 MHz */
+			  /* MCS0-MC9 */
+			  59, 117, 176, 234, 351, 468, 527, 585, 702, 780
+		 },
+		 {
+			  /* LGI & 160/80+80 MHz */
+			  /* MCS0-MC9 */
+			  117, 234, 351, 468, 702, 936, 1053, 1170, 1404, 1560
+		 }},
+		{{
+			  /* SGI & 80 MHz */
+			  /* MCS0-MC9 */
+			  65, 130, 195, 260, 390, 520, 585, 650, 780, 867
+		 },
+		 {
+			  /* SGI & 160 MHz */
+			  /* MCS0-MC9 */
+			  130, 260, 390, 520, 780, 1040, 1170, 1300, 1560, 1733
+		 }}};
+	if (vht) {
+		if(mcs >= 10)
+			return -1;
+
+		return (vht_rates[sgi][mode][mcs]);
+	} else {
+		if(mcs >= N(rates))
+			return -1;
+
+		return (rates[sgi][mode][mcs]);
+	}
+
+#undef N
+}
+EXPORT_SYMBOL(ieee80211_mcs2rate);
+
+int
+ieee80211_rate2mcs(int rate, int mode, int sgi)
+{
+#define N(a)    (sizeof(a[0]) / sizeof(a[0][0][0]))
+	static const struct {
+		u_int   r;      /* rate */
+		u_int   m;      /* mcs */
+	} rates[2][2][16] = {{{
+
+			/* Only MCS0-MCS15 (2 streams) are supported */
+			{13,          0x80 },
+			{26,          0x81 },
+			{39,          0x82 },
+			{52,          0x83 },
+			{78,          0x84 },
+			{104,         0x85 },
+			{117,         0x86 },
+			{130,         0x87 },
+			{26,          0x88 },
+			{52,          0x89 },
+			{78,          0x8A },
+			{104,         0x8B },
+			{156,         0x8C },
+			{208,         0x8D },
+			{234,         0x8E },
+			{260,         0x8F },
+		},
+		{
+
+			/* Only MCS0-MCS15 (2 streams) are supported */
+			{27,          0x80 },
+			{54,          0x81 },
+			{81,          0x82 },
+			{108,         0x83 },
+			{162,         0x84 },
+			{216,         0x85 },
+			{243,         0x86 },
+			{270,         0x87 },
+			{54,          0x88 },
+			{108,         0x89 },
+			{162,         0x8A },
+			{216,         0x8B },
+			{324,         0x8C },
+			{432,         0x8D },
+			{486,         0x8E },
+			{540,         0x8F },
+		}},
+		{{
+
+			 /* Only MCS0-MCS15 (2 streams) are supported */
+			 {14,          0x80 },
+			 {28,          0x81 },
+			 {42,          0x82 },
+			 {56,          0x83 },
+			 {86,          0x84 },
+			 {114,         0x85 },
+			 {130,         0x86 },
+			 {144,         0x87 },
+			 {28,          0x88 },
+			 {56,          0x89 },
+			 {86,          0x8A },
+			 {114,         0x8B },
+			 {172,         0x8C },
+			 {230,         0x8D },
+			 {260,         0x8E },
+			 {288,         0x8F },
+		 },
+		{
+
+			/* Only MCS0-MCS15 (2 streams) are supported */
+			{30,          0x80 },
+			{60,          0x81 },
+			{90,          0x82 },
+			{120,         0x83 },
+			{180,         0x84 },
+			{240,         0x85 },
+			{270,         0x86 },
+			{300,         0x87 },
+			{60,          0x88 },
+			{120,         0x89 },
+			{180,         0x8A },
+			{240,         0x8B },
+			{360,         0x8C },
+			{480,         0x8D },
+			{540,         0x8E },
+			{600,         0x8F },
+		}}};
+	int i;
+
+	for (i = 0; i < N(rates); i++)
+	{
+		if (rates[sgi][mode][i].r == rate)
+			return (rates[sgi][mode][i].m);
+	}
+	return -1;
+
+#undef N
+}
+EXPORT_SYMBOL(ieee80211_rate2mcs);
+
+int
+ieee80211_media2rate(int mword)
+{
+#define	N(a)	(sizeof(a) / sizeof(a[0]))
+	static const int ieeerates[] = {
+		-1,		/* IFM_AUTO */
+		0,		/* IFM_MANUAL */
+		0,		/* IFM_NONE */
+		2,		/* IFM_IEEE80211_FH1 */
+		4,		/* IFM_IEEE80211_FH2 */
+		2,		/* IFM_IEEE80211_DS1 */
+		4,		/* IFM_IEEE80211_DS2 */
+		11,		/* IFM_IEEE80211_DS5 */
+		22,		/* IFM_IEEE80211_DS11 */
+		44,		/* IFM_IEEE80211_DS22 */
+		3,		/* IFM_IEEE80211_OFDM1_50 */
+		4,		/* IFM_IEEE80211_OFDM2_25 */
+		6,		/* IFM_IEEE80211_OFDM3 */
+		9,		/* IFM_IEEE80211_OFDM4_50 */
+		12,		/* IFM_IEEE80211_OFDM6 */
+		18,		/* IFM_IEEE80211_OFDM9 */
+		24,		/* IFM_IEEE80211_OFDM12 */
+		27,		/* IFM_IEEE80211_OFDM13_5 */
+		36,		/* IFM_IEEE80211_OFDM18 */
+		48,		/* IFM_IEEE80211_OFDM24 */
+		54,		/* IFM_IEEE80211_OFDM27 */
+		72,		/* IFM_IEEE80211_OFDM36 */
+		96,		/* IFM_IEEE80211_OFDM48 */
+		108,		/* IFM_IEEE80211_OFDM54 */
+		144,		/* IFM_IEEE80211_OFDM72 */
+	};
+	return IFM_SUBTYPE(mword) < N(ieeerates) ?
+		ieeerates[IFM_SUBTYPE(mword)] : 0;
+#undef N
+}
+EXPORT_SYMBOL(ieee80211_media2rate);
+
+int
+ieee80211_media2mcs(int mword)
+{
+#define	N(a)	(sizeof(a) / sizeof(a[0]))
+	static const int ieee11nrates[] = {
+		-1,		/* IFM_AUTO */
+		12,		/* IFM_IEEE80211_OFDM_HT_LEG6 */
+		18,		/* IFM_IEEE80211_OFDM_HT_LEG9 */
+		24,		/* IFM_IEEE80211_OFDM_HT_LEG12 */
+		36,		/* IFM_IEEE80211_OFDM_HT_LEG18 */
+		48,		/* IFM_IEEE80211_OFDM_HT_LEG24 */
+		72,		/* IFM_IEEE80211_OFDM_HT_LEG36 */
+		96,		/* IFM_IEEE80211_OFDM_HT_LEG48 */
+		108,	/* IFM_IEEE80211_OFDM_HT_LEG54 */
+		0x80,		/* IFM_MCS_0 */
+		0x81,		/* IFM_MCS_1 */
+		0x82,		/* IFM_MCS_2 */
+		0x83,		/* IFM_MCS_3 */
+		0x84,		/* IFM_MCS_4 */
+		0x85,		/* IFM_MCS_5 */
+		0x86,		/* IFM_MCS_6 */
+		0x87,		/* IFM_MCS_7 */
+		0x88,		/* IFM_MCS_8 */
+		0x89,		/* IFM_MCS_9 */
+		0x8A,		/* IFM_MCS_10 */
+		0x8B,		/* IFM_MCS_11 */
+		0x8C,		/* IFM_MCS_12 */
+		0x8D,		/* IFM_MCS_13 */
+		0x8E,		/* IFM_MCS_14 */
+		0x8F,		/* IFM_MCS_15 */
+	};
+	return IFM_SUBTYPE(mword) < N(ieee11nrates) ?
+		ieee11nrates[IFM_SUBTYPE(mword)] : 0;
+#undef N
+}
+EXPORT_SYMBOL(ieee80211_media2mcs);
+/*
+ * Return netdevice statistics.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static struct rtnl_link_stats64 *
+ieee80211_getstats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
+#else
+static struct net_device_stats *
+ieee80211_getstats(struct net_device *dev)
+#endif
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	struct rtnl_link_stats64 *stats = &vap->iv_devstats;
+#else
+	struct net_device_stats *stats = &vap->iv_devstats;
+#endif
+	uint32_t extra_tx_errors, extra_tx_dropped, extra_rx_errors;
+	if (vap->iv_ic->ic_get_shared_vap_stats && (vap->iv_ic->ic_get_shared_vap_stats(vap)) < 0)
+		return stats;
+
+	extra_tx_errors = vap->iv_stats.is_tx_nodefkey
+				 + vap->iv_stats.is_tx_noheadroom
+				 + vap->iv_stats.is_crypto_enmicfail;
+	extra_tx_dropped = vap->iv_stats.is_tx_nobuf
+				  + vap->iv_stats.is_tx_nonode
+				  + vap->iv_stats.is_tx_unknownmgt
+				  + vap->iv_stats.is_tx_badcipher
+				  + vap->iv_stats.is_tx_nodefkey;
+	extra_rx_errors = vap->iv_stats.is_rx_tooshort
+				  + vap->iv_stats.is_rx_wepfail
+				  + vap->iv_stats.is_rx_decap
+				  + vap->iv_stats.is_rx_nobuf
+				  + vap->iv_stats.is_rx_decryptcrc
+				  + vap->iv_stats.is_rx_ccmpmic
+				  + vap->iv_stats.is_rx_tkipmic
+				  + vap->iv_stats.is_rx_tkipicv;
+
+	vap->iv_stats.is_tx_nodefkey = 0;
+	vap->iv_stats.is_tx_noheadroom = 0;
+	vap->iv_stats.is_crypto_enmicfail = 0;
+
+	vap->iv_stats.is_tx_nobuf = 0;
+	vap->iv_stats.is_tx_nonode = 0;
+	vap->iv_stats.is_tx_unknownmgt = 0;
+	vap->iv_stats.is_tx_badcipher = 0;
+	vap->iv_stats.is_tx_nodefkey = 0;
+
+	vap->iv_stats.is_rx_tooshort = 0;
+	vap->iv_stats.is_rx_wepfail = 0;
+	vap->iv_stats.is_rx_decap = 0;
+	vap->iv_stats.is_rx_nobuf = 0;
+	vap->iv_stats.is_rx_decryptcrc = 0;
+	vap->iv_stats.is_rx_ccmpmic = 0;
+	vap->iv_stats.is_rx_tkipmic = 0;
+	vap->iv_stats.is_rx_tkipicv = 0;
+
+	/* XXX total guess as to what to count where */
+	/* update according to private statistics */
+	stats->tx_errors += extra_tx_errors;
+	stats->tx_dropped += extra_tx_dropped;
+	stats->rx_errors += extra_rx_errors;
+	stats->rx_crc_errors = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	memcpy(stats64, stats, sizeof(*stats));
+	return stats64;
+#else
+	return stats;
+#endif
+}
+
+static int
+ieee80211_change_mtu(struct net_device *dev, int mtu)
+{
+	if (!(IEEE80211_MTU_MIN < mtu && mtu <= IEEE80211_MTU_MAX))
+		return -EINVAL;
+	dev->mtu = mtu;
+	/* XXX coordinate with parent device */
+	return 0;
+}
+
+static void
+ieee80211_set_multicast_list(struct net_device *dev)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+
+	IEEE80211_LOCK_IRQ(ic);
+	if (dev->flags & IFF_PROMISC) {
+		if ((vap->iv_flags & IEEE80211_F_PROMISC) == 0) {
+			vap->iv_flags |= IEEE80211_F_PROMISC;
+			ic->ic_promisc++;
+		}
+	} else {
+		if (vap->iv_flags & IEEE80211_F_PROMISC) {
+			vap->iv_flags &= ~IEEE80211_F_PROMISC;
+			ic->ic_promisc--;
+		}
+	}
+	if (dev->flags & IFF_ALLMULTI) {
+		if ((vap->iv_flags & IEEE80211_F_ALLMULTI) == 0) {
+			vap->iv_flags |= IEEE80211_F_ALLMULTI;
+			ic->ic_allmulti++;
+		}
+	} else {
+		if (vap->iv_flags & IEEE80211_F_ALLMULTI) {
+			vap->iv_flags &= ~IEEE80211_F_ALLMULTI;
+			ic->ic_allmulti--;
+		}
+	}
+	IEEE80211_UNLOCK_IRQ(ic);
+
+}
+
+/*
+ * 0: OK
+ * <0: NOK, with value derived from errno vals.
+ */
+
+#define	N(a)	(sizeof (a) / sizeof (a[0]))
+
+int
+ieee80211_country_string_to_countryid( const char *input_str, u_int16_t *p_iso_code )
+{
+	int	retval = -EINVAL;
+	int	iter;
+
+	if (strnlen( input_str, 3 ) >= 3) {
+		return( -E2BIG );
+	}
+
+	for (iter = 0; iter < N(country_strings) && retval < 0; iter++) {
+		if (strcasecmp( country_strings[iter].iso_name, input_str ) == 0) {
+			*p_iso_code = country_strings[iter].iso_code;
+			retval = 0;
+		}
+	}
+
+	return( retval );
+}
+
+int
+ieee80211_countryid_to_country_string( const u_int16_t iso_code, char *output_str )
+{
+	int	retval = -EINVAL;
+	int	iter;
+
+	for (iter = 0; iter < N(country_strings) && retval < 0; iter++) {
+		if (iso_code == country_strings[iter].iso_code) {
+			strncpy( output_str, country_strings[iter].iso_name, 2 );
+			output_str[ 2 ] = '\0';
+			retval = 0;
+		}
+	}
+
+	return( retval );
+}
+
+int
+ieee80211_region_to_operating_class(struct ieee80211com *ic, char *region_str)
+{
+	int retval = -EINVAL;
+	int i;
+	int j;
+
+	for (i = 0; i < ARRAY_SIZE(oper_class_table); i++) {
+		if (strcasecmp(oper_class_table[i].region_name, region_str) == 0) {
+			for (j = 0; j < oper_class_table[i].class_num_5g; j++)
+				setbit(ic->ic_oper_class, oper_class_table[i].classes_5g[j]);
+
+			if (ic->ic_rf_chipid == CHIPID_DUAL) {
+				for (j = 0; j < oper_class_table[i].class_num_24g; j++)
+					setbit(ic->ic_oper_class, oper_class_table[i].classes_24g[j]);
+			}
+
+			ic->ic_oper_class_table = &oper_class_table[i];
+
+			retval = 0;
+			break;
+		}
+	}
+
+	if (retval < 0) {
+		for (j = 0; j < oper_class_table[OPER_CLASS_GB_INDEX].class_num_5g; j++)
+				setbit(ic->ic_oper_class, oper_class_table[OPER_CLASS_GB_INDEX].classes_5g[j]);
+
+		if (ic->ic_rf_chipid == CHIPID_DUAL) {
+			for (j = 0; j < oper_class_table[OPER_CLASS_GB_INDEX].class_num_24g; j++)
+				setbit(ic->ic_oper_class, oper_class_table[OPER_CLASS_GB_INDEX].classes_24g[j]);
+		}
+
+		ic->ic_oper_class_table = &oper_class_table[OPER_CLASS_GB_INDEX];
+
+		retval = 0;
+	}
+
+	return retval;
+}
+
+void
+ieee80211_get_prichan_list_by_operating_class(struct ieee80211com *ic,
+			int bw,
+			uint8_t *chan_list,
+			uint32_t flag)
+{
+	int i;
+	int j;
+	uint32_t table_size = ic->ic_oper_class_table->class_num_5g +
+				ic->ic_oper_class_table->class_num_24g;
+
+	KASSERT(ic->ic_oper_class_table, ("Uninitialized operating table"));
+
+	for (i = 0; i < table_size; i++) {
+		if (ic->ic_oper_class_table->class_table[i].bandwidth == bw &&
+				(ic->ic_oper_class_table->class_table[i].behavior & flag)) {
+			for (j = 0; j < ARRAY_SIZE(ic->ic_oper_class_table->class_table[i].chan_set) &&
+					ic->ic_oper_class_table->class_table[i].chan_set[j]; j++) {
+				setbit(chan_list, ic->ic_oper_class_table->class_table[i].chan_set[j]);
+			}
+		}
+	}
+}
+
+int
+ieee80211_get_current_operating_class(uint16_t iso_code, int chan, int bw)
+{
+	int i;
+	int j;
+
+	switch (iso_code) {
+	case CTRY_UNITED_STATES:
+		for (i = 0; i < ARRAY_SIZE(us_oper_class_table); i++) {
+			if (us_oper_class_table[i].bandwidth == bw) {
+				for (j = 0; j < sizeof(us_oper_class_table[i].chan_set); j++) {
+					if (us_oper_class_table[i].chan_set[j] == chan)
+						return us_oper_class_table[i].index;
+				}
+			}
+		}
+		break;
+	case CTRY_EUROPE:
+		for (i = 0; i < ARRAY_SIZE(eu_oper_class_table); i++) {
+			if (eu_oper_class_table[i].bandwidth == bw) {
+				for (j = 0; j < sizeof(eu_oper_class_table[i].chan_set); j++) {
+					if (eu_oper_class_table[i].chan_set[j] == chan)
+						return eu_oper_class_table[i].index;
+				}
+			}
+		}
+		break;
+	case CTRY_JAPAN:
+		for (i = 0; i < ARRAY_SIZE(jp_oper_class_table); i++) {
+			if (jp_oper_class_table[i].bandwidth == bw) {
+				for (j = 0; j < sizeof(jp_oper_class_table[i].chan_set); j++) {
+					if (jp_oper_class_table[i].chan_set[j] == chan)
+						return jp_oper_class_table[i].index;
+				}
+			}
+		}
+		break;
+	default:
+		for (i = 0; i < ARRAY_SIZE(gb_oper_class_table); i++) {
+			if (gb_oper_class_table[i].bandwidth == bw) {
+				for (j = 0; j < sizeof(gb_oper_class_table[i].chan_set); j++) {
+					if (gb_oper_class_table[i].chan_set[j] == chan)
+						return gb_oper_class_table[i].index;
+				}
+			}
+		}
+		break;
+	}
+
+	return 0;
+}
+
+void
+ieee80211_build_countryie(struct ieee80211com *ic)
+{
+	int i, j, chanflags, found;
+	struct ieee80211_channel *c;
+	u_int8_t chanlist[IEEE80211_CHAN_MAX + 1];
+	u_int8_t chancnt = 0;
+	u_int8_t *cur_runlen, *cur_chan, *cur_pow, prevchan;
+
+	/*
+	 * Fill in country IE.
+	 */
+	memset(&ic->ic_country_ie, 0, sizeof(ic->ic_country_ie));
+	ic->ic_country_ie.country_id = IEEE80211_ELEMID_COUNTRY;
+	ic->ic_country_ie.country_len = 0; /* init needed by following code */
+
+	/* initialize country IE */
+	found = -EINVAL;
+	if (ic->ic_spec_country_code != CTRY_DEFAULT) {
+		found = ieee80211_countryid_to_country_string(ic->ic_spec_country_code,
+					(char *)ic->ic_country_ie.country_str);
+	} else {
+		found = ieee80211_countryid_to_country_string(ic->ic_country_code,
+					(char *)ic->ic_country_ie.country_str);
+	}
+	if (found < 0) {
+		printk("bad country string ignored: %d\n",
+			ic->ic_country_code);
+		ic->ic_country_ie.country_str[0] = ' ';
+		ic->ic_country_ie.country_str[1] = ' ';
+	}
+
+	/*
+	 * indoor/outdoor portion in country string.
+	 * It should be one of:
+	 *     'I' indoor only
+	 *     'O' outdoor only
+	 *     ' ' all enviroments
+	 *  Default: we currently support both indoor and outdoor.
+	 *  If we need support other options later,
+	 *  use 'ic->ic_country_outdoor' to control it.
+	 */
+	ic->ic_country_ie.country_str[2] = ' ';
+
+	ic->ic_country_ie.country_len += 3;	/* Country string - 3 characters added in */
+
+	/*
+	 * runlength encoded channel max tx power info.
+	 */
+	cur_runlen = &ic->ic_country_ie.country_triplet[1];
+	cur_chan = &ic->ic_country_ie.country_triplet[0];
+	cur_pow = &ic->ic_country_ie.country_triplet[2];
+	prevchan = 0;
+
+	if ((ic->ic_flags_ext & IEEE80211_FEXT_REGCLASS) && ic->ic_nregclass) {
+		/*
+		 * Add regulatory triplets.
+		 * chan/no_of_chans/tx power triplet is overridden as
+		 * as follows:
+		 * cur_chan == REGULATORY EXTENSION ID.
+		 * cur_runlen = Regulatory class.
+		 * cur_pow = coverage class.
+		 */
+		for (i=0; i < ic->ic_nregclass; i++) {
+			*cur_chan = IEEE80211_REG_EXT_ID;
+			*cur_runlen = ic->ic_regclassids[i];
+			*cur_pow = ic->ic_coverageclass;
+
+			cur_runlen +=3;
+			cur_chan += 3;
+			cur_pow += 3;
+			ic->ic_country_ie.country_len += 3;
+		}
+	} else if (ic->ic_bsschan != IEEE80211_CHAN_ANYC) {
+		if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
+			chanflags = IEEE80211_CHAN_5GHZ;
+		else
+			chanflags = IEEE80211_CHAN_2GHZ;
+
+		memset(&chanlist[0], 0, sizeof(chanlist));
+		/* XXX not right due to duplicate entries */
+		for (i = 0; i < ic->ic_nchans; i++) {
+			c = &ic->ic_channels[i];
+
+			if (c == NULL || isclr(ic->ic_chan_active, c->ic_ieee))
+				continue;
+
+			/* Does channel belong to current operation mode */
+			if (!(c->ic_flags & chanflags))
+				continue;
+
+			/* Skip previously reported channels */
+			for (j = 0; j < chancnt; j++)
+				if (c->ic_ieee == chanlist[j])
+					break;
+			
+			if (j != chancnt) /* found a match */
+				continue;
+
+			chanlist[chancnt] = c->ic_ieee;
+			chancnt++;
+	
+			/* Skip turbo channels */
+			if (IEEE80211_IS_CHAN_TURBO(c))
+				continue;
+	
+			/* Skip half/quarter rate channels */
+			if (IEEE80211_IS_CHAN_HALF(c) || 
+			    IEEE80211_IS_CHAN_QUARTER(c))
+				continue;
+	
+			if (*cur_runlen == 0) {
+				(*cur_runlen)++;
+				*cur_pow = c->ic_maxregpower;
+				*cur_chan = c->ic_ieee;
+				prevchan = c->ic_ieee;
+				ic->ic_country_ie.country_len += 3;
+			} else if (*cur_pow == c->ic_maxregpower &&
+			    c->ic_ieee == prevchan + 1) {
+				(*cur_runlen)++;
+				prevchan = c->ic_ieee;
+			} else {
+				cur_runlen +=3;
+				cur_chan += 3;
+				cur_pow += 3;
+				(*cur_runlen)++;
+				*cur_pow = c->ic_maxregpower;
+				*cur_chan = c->ic_ieee;
+				prevchan = c->ic_ieee;
+				ic->ic_country_ie.country_len += 3;
+			}
+		}
+	}
+
+	/* pad */
+	if (ic->ic_country_ie.country_len & 1)
+		ic->ic_country_ie.country_len++;
+
+#undef N
+}
+
+u_int
+ieee80211_get_chanflags(enum ieee80211_phymode mode)
+{
+	KASSERT(mode < ARRAY_SIZE(ieee80211_chanflags), ("Unexpected mode %u", mode));
+	return ieee80211_chanflags[mode];
+}
+EXPORT_SYMBOL(ieee80211_get_chanflags);
+
+/*
+ * Change the WDS mode of a specified WDS VAP, called in following circumstances:
+ * WDS VAP initialization
+ * WDS Extender Role changing
+ */
+int
+ieee80211_vap_wds_mode_change(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	switch (ic->ic_extender_role) {
+	case IEEE80211_EXTENDER_ROLE_MBS:
+		IEEE80211_VAP_WDS_SET_MBS(vap);
+		break;
+	case IEEE80211_EXTENDER_ROLE_RBS:
+		IEEE80211_VAP_WDS_SET_RBS(vap);
+		break;
+	case IEEE80211_EXTENDER_ROLE_NONE:
+		IEEE80211_VAP_WDS_SET_NONE(vap);
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+int
+ieee80211_dual_sec_chan_supported(struct ieee80211com *ic, int chan)
+{
+	int max_chan;
+
+	if (ic->ic_country_code == CTRY_UNITED_STATES)
+		max_chan = IEEE80211_MAX_DUAL_EXT_CHAN_24G_US;
+	else
+		max_chan = IEEE80211_MAX_DUAL_EXT_CHAN_24G;
+
+	if ((chan >= IEEE80211_MIN_DUAL_EXT_CHAN_24G) &&
+			(chan <= max_chan))
+		return 1;
+
+	return 0;
+}
+
+void
+ieee80211_update_sec_chan_offset(struct ieee80211_channel *chan, int offset)
+{
+	if (offset == IEEE80211_HTINFO_CHOFF_SCA) {
+		chan->ic_flags |= IEEE80211_CHAN_HT40U;
+		chan->ic_flags &= ~IEEE80211_CHAN_HT40D;
+		chan->ic_center_f_40MHz = chan->ic_ieee + IEEE80211_40M_CENT_FREQ_OFFSET;
+	} else if (offset == IEEE80211_HTINFO_CHOFF_SCB) {
+		chan->ic_flags |= IEEE80211_CHAN_HT40D;
+		chan->ic_flags &= ~IEEE80211_CHAN_HT40U;
+		chan->ic_center_f_40MHz = chan->ic_ieee - IEEE80211_40M_CENT_FREQ_OFFSET;
+	}
+}
+
+int
+ieee80211_get_ap_sec_chan_offset(const struct ieee80211_scan_entry *se)
+{
+	struct ieee80211_ie_htinfo *htinfo =
+		(struct ieee80211_ie_htinfo *)se->se_htinfo_ie;
+	int sec20_offset;
+
+	if (!htinfo)
+		sec20_offset = IEEE80211_HTINFO_CHOFF_SCN;
+	else
+		sec20_offset = IEEE80211_HTINFO_B1_EXT_CHOFFSET(htinfo);
+
+	return sec20_offset;
+}
+
+int
+ieee80211_get_max_ap_bw(const struct ieee80211_scan_entry *se)
+{
+	struct ieee80211_ie_htcap *htcap =
+		(struct ieee80211_ie_htcap *)se->se_htcap_ie;
+	struct ieee80211_ie_htinfo *htinfo =
+		(struct ieee80211_ie_htinfo *)se->se_htinfo_ie;
+	struct ieee80211_ie_vhtcap *vhtcap =
+		(struct ieee80211_ie_vhtcap *)se->se_vhtcap_ie;
+	struct ieee80211_ie_vhtop *vhtop =
+		(struct ieee80211_ie_vhtop *)se->se_vhtop_ie;
+	int max_bw = BW_HT20;
+
+	if (htinfo) {
+		if (htinfo->hi_byte1 & IEEE80211_HTINFO_B1_SEC_CHAN_OFFSET)
+			max_bw = BW_HT40;
+	} else if (htcap) {
+		if (htcap->hc_cap[0] & IEEE80211_HTCAP_C_CHWIDTH40)
+			max_bw = BW_HT40;
+	}
+
+	if (vhtop) {
+		int vhtop_bw = IEEE80211_VHTOP_GET_CHANWIDTH(vhtop);
+		if ((vhtop_bw == IEEE80211_VHTOP_CHAN_WIDTH_160MHZ) ||
+				(vhtop_bw == IEEE80211_VHTOP_CHAN_WIDTH_80PLUS80MHZ))
+			max_bw = BW_HT160;
+		else if (vhtop_bw == IEEE80211_VHTOP_CHAN_WIDTH_80MHZ)
+			max_bw = BW_HT80;
+	} else if (vhtcap) {
+		int vhtcap_bw = IEEE80211_VHTCAP_GET_CHANWIDTH(vhtcap);
+		if (vhtcap_bw == IEEE80211_VHTCAP_CW_80M_ONLY)
+			max_bw = BW_HT80;
+		else if ((vhtcap_bw == IEEE80211_VHTCAP_CW_160M) ||
+				(vhtcap_bw == IEEE80211_VHTCAP_CW_160_AND_80P80M))
+			max_bw = BW_HT160;
+	}
+
+	return max_bw;
+}
+
+int
+ieee80211_get_max_node_bw(struct ieee80211_node *ni)
+{
+	int max_bw = BW_HT20;
+
+	if (IEEE80211_NODE_IS_HT(ni)) {
+		if (ni->ni_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40)
+			max_bw = BW_HT40;
+	}
+
+	if (IEEE80211_NODE_IS_VHT(ni)) {
+		if (ni->ni_vhtcap.chanwidth == IEEE80211_VHTCAP_CW_80M_ONLY)
+			max_bw = BW_HT80;
+		else if ((ni->ni_vhtcap.chanwidth == IEEE80211_VHTCAP_CW_160M) ||
+				(ni->ni_vhtcap.chanwidth == IEEE80211_VHTCAP_CW_160_AND_80P80M))
+			max_bw = BW_HT160;
+	}
+
+	return max_bw;
+}
+
+int
+ieee80211_get_max_system_bw(struct ieee80211com *ic)
+{
+	int max_bw = ic->ic_max_system_bw;
+
+	if (max_bw >= BW_HT80) {
+		if (!IS_IEEE80211_VHT_ENABLED(ic) ||
+				!ieee80211_swfeat_is_supported(SWFEAT_ID_VHT, 1))
+			max_bw = BW_HT40;
+	}
+
+	if (max_bw >= BW_HT40) {
+		if (ic->ic_curmode <= IEEE80211_MODE_11NG)
+			max_bw = BW_HT20;
+	}
+
+	return max_bw;
+}
+
+int
+ieee80211_get_max_channel_bw(struct ieee80211com *ic, int channel)
+{
+	if (isset(ic->ic_chan_active_80, channel))
+		return BW_HT80;
+	else if (isset(ic->ic_chan_active_40, channel))
+		return BW_HT40;
+	else
+		return BW_HT20;
+}
+
+int
+ieee80211_get_max_bw(struct ieee80211vap *vap,
+		struct ieee80211_node *ni, uint32_t chan)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	int max_bw = ieee80211_get_max_node_bw(ni);
+	int ic_bw = ieee80211_get_max_system_bw(ic);
+	int chan_bw = ieee80211_get_max_channel_bw(ic, chan);
+
+	max_bw = MIN(max_bw, ic_bw);
+	max_bw = MIN(max_bw, chan_bw);
+
+	return max_bw;
+}
+
diff --git a/drivers/qtn/wlan/ieee80211_acl.c b/drivers/qtn/wlan/ieee80211_acl.c
new file mode 100644
index 0000000..534cfab
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_acl.c
@@ -0,0 +1,338 @@
+/*-
+ * Copyright (c) 2004-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_acl.c 1721 2006-09-20 08:45:13Z mentor $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 MAC ACL support.
+ *
+ * When this module is loaded the sender address of each received
+ * frame is passed to the iac_check method and the module indicates
+ * if the frame should be accepted or rejected.  If the policy is
+ * set to ACL_POLICY_OPEN then all frames are accepted w/o checking
+ * the address.  Otherwise, the address is looked up in the database
+ * and if found the frame is either accepted (ACL_POLICY_ALLOW)
+ * or rejected (ACL_POLICY_DENY).
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+
+enum {
+	ACL_POLICY_OPEN		= 0,	/* open, don't check ACLs */
+	ACL_POLICY_ALLOW	= 1,	/* allow traffic from MAC */
+	ACL_POLICY_DENY		= 2,	/* deny traffic from MAC */
+};
+
+#define	ACL_HASHSIZE	32
+
+struct acl {
+	TAILQ_ENTRY(acl) acl_list;
+	LIST_ENTRY(acl) acl_hash;
+	u_int8_t acl_macaddr[IEEE80211_ADDR_LEN];
+};
+
+struct aclstate {
+	acl_lock_t as_lock;
+	int as_policy;
+	TAILQ_HEAD(, acl) as_list;	/* list of all ACLs */
+	ATH_LIST_HEAD(, acl) as_hash[ACL_HASHSIZE];
+};
+
+/* simple hash is enough for variation of macaddr */
+#define	ACL_HASH(addr)	\
+	(((const u_int8_t *)(addr))[IEEE80211_ADDR_LEN - 1] % ACL_HASHSIZE)
+
+MALLOC_DEFINE(M_80211_ACL, "acl", "802.11 station acl");
+
+static void acl_free_all_locked(struct aclstate *);
+
+static int
+acl_attach(struct ieee80211vap *vap)
+{
+	struct aclstate *as;
+
+	_MOD_INC_USE(THIS_MODULE, return 0);
+
+	MALLOC(as, struct aclstate *, sizeof(struct aclstate),
+		M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (as == NULL) {
+		_MOD_DEC_USE(THIS_MODULE);
+		return 0;
+	}
+	ACL_LOCK_INIT(as, "acl");
+	TAILQ_INIT(&as->as_list);
+	as->as_policy = ACL_POLICY_OPEN;
+	vap->iv_as = as;
+	return 1;
+}
+
+static void
+acl_detach(struct ieee80211vap *vap)
+{
+	struct aclstate *as = vap->iv_as;
+
+	if (as == NULL)
+		return;
+
+	ACL_LOCK(as);
+	acl_free_all_locked(as);
+	ACL_UNLOCK(as);
+	vap->iv_as = NULL;
+	ACL_LOCK_DESTROY(as);
+	FREE(as, M_DEVBUF);
+
+	_MOD_DEC_USE(THIS_MODULE);
+}
+
+static __inline struct acl *
+_find_acl(struct aclstate *as, const u_int8_t *macaddr)
+{
+	struct acl *acl;
+	int hash;
+
+	hash = ACL_HASH(macaddr);
+	LIST_FOREACH(acl, &as->as_hash[hash], acl_hash) {
+		if (IEEE80211_ADDR_EQ(acl->acl_macaddr, macaddr))
+			return acl;
+	}
+
+	return NULL;
+}
+
+static void
+_acl_free(struct aclstate *as, struct acl *acl)
+{
+
+	TAILQ_REMOVE(&as->as_list, acl, acl_list);
+	LIST_REMOVE(acl, acl_hash);
+	FREE(acl, M_80211_ACL);
+}
+
+static int
+acl_check(struct ieee80211vap *vap, const u_int8_t mac[IEEE80211_ADDR_LEN])
+{
+	struct aclstate *as = vap->iv_as;
+
+	switch (as->as_policy) {
+	case ACL_POLICY_OPEN:
+		return 1;
+	case ACL_POLICY_ALLOW:
+		return _find_acl(as, mac) != NULL;
+	case ACL_POLICY_DENY:
+		return _find_acl(as, mac) == NULL;
+	}
+
+	return 0;		/* should not happen */
+}
+
+static int
+acl_add(struct ieee80211vap *vap, const u_int8_t mac[IEEE80211_ADDR_LEN])
+{
+	struct aclstate *as = vap->iv_as;
+	struct acl *acl, *new;
+	int hash;
+
+	MALLOC(new, struct acl *, sizeof(struct acl), M_80211_ACL, M_NOWAIT | M_ZERO);
+	if (new == NULL) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL,
+			"ACL: add %s failed, no memory\n", ether_sprintf(mac));
+		/* XXX statistic */
+		return -ENOMEM;
+	}
+
+	ACL_LOCK(as);
+	hash = ACL_HASH(mac);
+	LIST_FOREACH(acl, &as->as_hash[hash], acl_hash) {
+		if (IEEE80211_ADDR_EQ(acl->acl_macaddr, mac)) {
+			ACL_UNLOCK(as);
+			FREE(new, M_80211_ACL);
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL,
+				"ACL: add %s failed, already present\n",
+				ether_sprintf(mac));
+			return -EEXIST;
+		}
+	}
+	IEEE80211_ADDR_COPY(new->acl_macaddr, mac);
+	TAILQ_INSERT_TAIL(&as->as_list, new, acl_list);
+	LIST_INSERT_HEAD(&as->as_hash[hash], new, acl_hash);
+	ACL_UNLOCK(as);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL,
+		"ACL: add %s\n", ether_sprintf(mac));
+
+	return 0;
+}
+
+static int
+acl_remove(struct ieee80211vap *vap, const u_int8_t mac[IEEE80211_ADDR_LEN])
+{
+	struct aclstate *as = vap->iv_as;
+	struct acl *acl;
+
+	ACL_LOCK(as);
+	acl = _find_acl(as, mac);
+	if (acl != NULL)
+		_acl_free(as, acl);
+	ACL_UNLOCK(as);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL,
+		"ACL: remove %s%s\n", ether_sprintf(mac),
+		acl == NULL ? ", not present" : "");
+
+	return (acl == NULL ? ENOENT : 0);
+}
+
+static void
+acl_free_all_locked(struct aclstate *as)
+{
+	struct acl *acl;
+
+	ACL_LOCK_ASSERT(as);
+
+	while ((acl = TAILQ_FIRST(&as->as_list)) != NULL)
+		_acl_free(as, acl);
+}
+
+static int
+acl_free_all(struct ieee80211vap *vap)
+{
+	struct aclstate *as = vap->iv_as;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL, "ACL: %s\n", "free all");
+
+	ACL_LOCK(as);
+	acl_free_all_locked(vap->iv_as);
+	ACL_UNLOCK(as);
+
+	return 0;
+}
+
+static int
+acl_add_mac_list(struct ieee80211vap *vap, int num_macs, struct ieee80211_mac_addr *mac_list)
+{
+	int i = 0;
+	int ret = 0;
+
+	while (i < num_macs) {
+		ret = acl_add(vap, mac_list[i].addr);
+		if (ret < 0 && ret != -EEXIST)
+			return ret;
+		i++;
+	}
+
+	return ret;
+}
+
+static int
+acl_setpolicy(struct ieee80211vap *vap, int policy)
+{
+	struct aclstate *as = vap->iv_as;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL,
+		"ACL: set policy to %u\n", policy);
+
+	switch (policy) {
+	case IEEE80211_MACCMD_POLICY_OPEN:
+		as->as_policy = ACL_POLICY_OPEN;
+		/* Flush silently to reset */
+		acl_free_all(vap);
+		break;
+	case IEEE80211_MACCMD_POLICY_ALLOW:
+		as->as_policy = ACL_POLICY_ALLOW;
+		break;
+	case IEEE80211_MACCMD_POLICY_DENY:
+		as->as_policy = ACL_POLICY_DENY;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+acl_getpolicy(struct ieee80211vap *vap)
+{
+	struct aclstate *as = vap->iv_as;
+
+	return as->as_policy;
+}
+
+/*
+ * Module glue.
+ */
+
+MODULE_AUTHOR("Errno Consulting, Sam Leffler");
+MODULE_DESCRIPTION("802.11 wireless support: MAC-based ACL policy");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual BSD/GPL");
+#endif
+
+static const struct ieee80211_aclator mac = {
+	.iac_name	= "mac",
+	.iac_attach	= acl_attach,
+	.iac_detach	= acl_detach,
+	.iac_check	= acl_check,
+	.iac_add	= acl_add,
+	.iac_remove	= acl_remove,
+	.iac_flush	= acl_free_all,
+	.iac_setpolicy	= acl_setpolicy,
+	.iac_getpolicy	= acl_getpolicy,
+	.iac_add_mac_list = acl_add_mac_list,
+};
+
+static int __init
+init_ieee80211_acl(void)
+{
+	ieee80211_aclator_register(&mac);
+	return 0;
+}
+module_init(init_ieee80211_acl);
+
+static void __exit
+exit_ieee80211_acl(void)
+{
+	ieee80211_aclator_unregister(&mac);
+}
+module_exit(exit_ieee80211_acl);
diff --git a/drivers/qtn/wlan/ieee80211_beacon.c b/drivers/qtn/wlan/ieee80211_beacon.c
new file mode 100644
index 0000000..370ff71
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_beacon.c
@@ -0,0 +1,635 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_beacon.c 2029 2007-01-30 04:01:29Z proski $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 beacon handling routines
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+#include "net80211/if_media.h"
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_beacon_desc.h"
+
+/*
+ * Add the Epigram IE to a frame
+ */
+static const u_int8_t ieee80211_epigram[] = {0x00, 0x90, 0x4c, 0x03, 0x00, 0x01};
+
+uint8_t * ieee80211_add_epigram_ie(uint8_t *frm)
+{
+	*frm++ = IEEE80211_ELEMID_VENDOR;
+	*frm++ = sizeof(ieee80211_epigram);
+	memcpy(frm, ieee80211_epigram, sizeof(ieee80211_epigram));
+	frm += sizeof(ieee80211_epigram);
+	return frm;
+}
+
+__inline__
+uint8_t ieee80211_wband_chanswitch_ie_len(uint32_t bw)
+{
+	return ((bw >= BW_HT80) ? IEEE80211_WBAND_CHANSWITCH_IE_LEN : 0);
+}
+
+uint8_t *ieee80211_add_beacon_header(struct ieee80211_node *ni, uint8_t *frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_frame *wh = (struct ieee80211_frame *)frm;
+
+	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
+		IEEE80211_FC0_SUBTYPE_BEACON;
+	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+	wh->i_dur[0] = 0;
+	wh->i_dur[1] = 0;
+	IEEE80211_ADDR_COPY(wh->i_addr1, vap->iv_dev->broadcast);
+	IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
+	IEEE80211_ADDR_COPY(wh->i_addr3, ni->ni_bssid);
+	*(u_int16_t *)wh->i_seq = 0;
+	frm += sizeof (struct ieee80211_frame);
+	return frm;
+}
+
+uint8_t *ieee80211_add_mandatory_field(struct ieee80211_node *ni, uint8_t *frm,
+		struct ieee80211_beacon_offsets *bo)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	u_int16_t capinfo;
+
+	/* XXX timestamp is set by hardware/driver */
+	memset(frm, 0, 8);
+	frm += 8;
+	/* beacon interval */
+	*(__le16 *)frm = htole16(ni->ni_intval);
+	frm += 2;
+
+	/* capability information */
+	if (vap->iv_opmode == IEEE80211_M_IBSS)
+		capinfo = IEEE80211_CAPINFO_IBSS;
+	else
+		capinfo = IEEE80211_CAPINFO_ESS;
+	if (vap->iv_flags & IEEE80211_F_PRIVACY)
+		capinfo |= IEEE80211_CAPINFO_PRIVACY;
+	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+	    IEEE80211_IS_CHAN_2GHZ(ic->ic_bsschan))
+		capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
+	if (ic->ic_flags & IEEE80211_F_SHSLOT)
+		capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
+	if (ic->ic_flags & IEEE80211_F_DOTH)
+		capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;
+	if (IEEE80211_COM_NEIGHREPORT_ENABLED(ic))
+		capinfo |= IEEE80211_CAPINFO_RM;
+	bo->bo_caps = (__le16 *)frm;
+	*(__le16 *)frm = htole16(capinfo);
+	frm += 2;
+
+	/* ssid */
+	*frm++ = IEEE80211_ELEMID_SSID;
+	if ((vap->iv_flags & IEEE80211_F_HIDESSID) == 0) {
+		*frm++ = ni->ni_esslen;
+		memcpy(frm, ni->ni_essid, ni->ni_esslen);
+		frm += ni->ni_esslen;
+	} else
+		*frm++ = 0;
+	return frm;
+}
+
+static uint8_t *
+ieee80211_beacon_init(struct ieee80211_node *ni, struct ieee80211_beacon_offsets *bo,
+	uint8_t *frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	u_int8_t add_erp = 1;
+	int16_t htinfo_channel_width = 0;
+	int16_t htinfo_2nd_channel_offset = 0;
+	int ap_pure_tkip = 0;
+	uint32_t bw;
+	uint8_t wband_chanswitch_ie_len;
+
+	KASSERT(ic->ic_bsschan != IEEE80211_CHAN_ANYC, ("no bss chan"));
+
+	if (vap->iv_bss && !vap->allow_tkip_for_vht)
+	      ap_pure_tkip = (vap->iv_bss->ni_rsn.rsn_ucastcipherset == IEEE80211_C_TKIP);
+
+	/*
+	 * Which mode AP is operating in? 20 MHz or 20/40 Mhz.
+	 * Needs to know to put correct channel no in beacon
+	 */
+	ieee80211_get_channel_bw_offset(ic, &htinfo_channel_width, &htinfo_2nd_channel_offset);
+
+	frm = ieee80211_add_beacon_desc_mandatory_fields(ni, frm, bo);
+
+	/* supported rates */
+	frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_RATES, frm);
+
+	frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_DSPARMS, frm);
+	bo->bo_tim = frm;
+	/* adding ELEMENTID_IBSSPARAMS or ELEMENTID_TIM here */
+		/* IBSS/TIM */
+	if (vap->iv_opmode == IEEE80211_M_IBSS) {
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_IBSSPARMS, frm);
+		bo->bo_tim_len = 0;
+	} else {
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_TIM, frm);
+		bo->bo_tim_len = sizeof(((struct ieee80211_tim_ie *)0)->tim_bitmap);
+	}
+
+	bo->bo_tim_trailer = frm;
+	/*
+	 * Tight coupling between Country IE and Power Constraint IE
+	 * Both using IEEE80211_FEXT_COUNTRYIE to optional enable them.
+	 */
+	/* country */
+	if ((ic->ic_flags_ext & IEEE80211_FEXT_COUNTRYIE) || ((ic->ic_flags & IEEE80211_F_DOTH)
+				&& (ic->ic_flags_ext & IEEE80211_FEXT_TPC)))
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_COUNTRY, frm);
+
+	if (IS_IEEE80211_11NG(ic)) {
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_20_40_BSS_COEX, frm);
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_OBSS_SCAN, frm);
+	}
+
+	/* BSS load element */
+	bo->bo_bss_load = frm;
+	frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_BSS_LOAD, frm);
+
+	/* RRM enabled element */
+	bo->bo_rrm_enabled = frm;
+	frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_RRM_ENABLED, frm);
+
+	if (vap->iv_mdid)
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_MOBILITY_DOMAIN, frm);
+	/* Power constraint */
+	if (((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_COUNTRYIE))
+			|| ((ic->ic_flags & IEEE80211_F_DOTH) &&
+				(ic->ic_flags_ext & IEEE80211_FEXT_TPC)))
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_PWRCNSTR, frm);
+
+	/* Transmit power envelope */
+	if (IS_IEEE80211_VHT_ENABLED(ic) && (ic->ic_flags & IEEE80211_F_DOTH) &&
+			!(ic->ic_flags_ext & IEEE80211_FEXT_TPC))
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VHTXMTPWRENVLP, frm);
+
+	/*TPC Report*/
+	if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+		bo->bo_tpc_rep = frm;
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_TPCREP, frm);
+	}
+
+	/* Channel Switch Announcement */
+	if ((ic->ic_flags & IEEE80211_F_CHANSWITCH) && ic->ic_csa_count > 0) {
+		bo->bo_chanswitch = frm;
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_CHANSWITCHANN, frm);
+	}
+
+	if (IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan) ||
+		(IEEE80211_IS_CHAN_11N(ic->ic_bsschan))) {
+		if (ic->ic_curmode == IEEE80211_MODE_11A ||
+			ic->ic_curmode == IEEE80211_MODE_11B) {
+			add_erp = 0;
+		}
+		if (add_erp) {
+			bo->bo_erp = frm;
+			frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_ERP, frm);
+		}
+	}
+
+	bo->bo_htinfo = frm;
+	if (IEEE80211_IS_CHAN_ANYN(ic->ic_bsschan) &&
+		(ic->ic_curmode >= IEEE80211_MODE_11NA) && !ap_pure_tkip) {
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_HTCAP, frm);
+		bo->bo_htinfo = frm;
+		ic->ic_htinfo.ctrlchannel = ieee80211_chan2ieee(ic, ic->ic_bsschan);
+		ic->ic_htinfo.byte1 = (htinfo_channel_width ?
+					(ic->ic_htinfo.byte1 | IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40) :
+					(ic->ic_htinfo.byte1 & ~IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40));
+		ic->ic_htinfo.choffset = htinfo_2nd_channel_offset;
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_HTINFO, frm);
+	}
+
+	/* Secondary Channel Offset */
+	if (bo->bo_chanswitch)
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_SEC_CHAN_OFF, frm);
+
+	/* Ext. Supp. Rates */
+	frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_XRATES, frm);
+
+	/* WME */
+	bo->bo_wme = frm;
+	frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VENDOR_WME, frm);
+	vap->iv_flags &= ~IEEE80211_F_WMEUPDATE;
+
+	/* WPA 1+2 */
+	frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VENDOR_WPA, frm);
+
+	/* Can have VHT mode with 40MHz bandwidth */
+	frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VHTCAP, frm);
+
+	if (bo->bo_chanswitch) {
+		/* Wide Bandwidth Channel Switch */
+		bw = ieee80211_get_bw(ic);
+		wband_chanswitch_ie_len = ieee80211_wband_chanswitch_ie_len(bw);
+		if (wband_chanswitch_ie_len)
+			frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_WBWCHANSWITCH, frm);
+
+		/* Channel Siwtch Wrapper */
+		if (IS_IEEE80211_VHT_ENABLED(ic))
+			frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_CHANSWITCHWRP, frm);
+	}
+
+	/* athAdvCaps */
+	bo->bo_ath_caps = frm;
+	if (vap->iv_bss && vap->iv_bss->ni_ath_flags)
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VENDOR_ATH, frm);
+
+	frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VENDOR_QTN, frm);
+	/* Extender IE */
+	if (!IEEE80211_COM_WDS_IS_NONE(ic) && (vap == TAILQ_FIRST(&ic->ic_vaps))) {
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VENDOR_EXT_ROLE, frm);
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VENDOR_EXT_BSSID, frm);
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VENDOR_EXT_STATE, frm);
+	}
+
+#ifdef CONFIG_QVSP
+	/* QTN WME IE */
+	frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VENDOR_QTN_WME, frm);
+#endif
+
+	/* Add epigram IE to address interop issue with Gen 1 (other vendor) STB */
+	frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VENDOR_EPIGRAM, frm);
+
+	/* The QTN OCAC State IE prevents APs in the neighbourhood performing OCAC at the same time */
+	if (ieee80211_swfeat_is_supported(SWFEAT_ID_OCAC, 0)) {
+		bo->bo_ocac_state = frm;
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VENDOR_QTN_OCAC_STATE, frm);
+	}
+
+	if (vap->qtn_pairing_ie.ie) {
+		frm = ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VENDOR_PPS2, frm);
+	}
+
+	/* XR */
+	bo->bo_xr = frm;
+
+	bo->bo_cca = frm; /* CCA info */
+	bo->bo_appie_buf = frm;
+	bo->bo_appie_buf_len = 0;
+
+	bo->bo_tim_trailerlen = frm - bo->bo_tim_trailer;
+
+	return frm;
+}
+
+/*
+ * Allocate a beacon frame and fillin the appropriate bits.
+ */
+struct sk_buff *
+ieee80211_beacon_alloc(struct ieee80211_node *ni,
+	struct ieee80211_beacon_offsets *bo)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_frame *wh;
+	struct sk_buff *skb;
+	int pktlen;
+	u_int8_t *frm;
+	struct ieee80211_rateset *rs;
+
+	/*
+	 * beacon frame format
+	 *	[8] time stamp
+	 *	[2] beacon interval
+	 *	[2] capability information
+	 *	[tlv] ssid
+	 *	[tlv] supported rates
+	 *	[7] FH/DS parameter set
+	 *	[tlv] IBSS/TIM parameter set
+	 *	[tlv] country code
+	 *	[3] power constraint
+	 *	[5] channel switch announcement
+	 *	[4] TPC Report
+	 *	[3] extended rate phy (ERP)
+	 *	[tlv] extended supported rates
+	 *	[tlv] WME parameters
+	 *	[tlv] WPA/RSN parameters
+	 *	[tlv] HT Capabilities
+	 *	[tlv] HT Information
+	 *	[tlv] Atheros Advanced Capabilities
+	 *	[tlv] AtherosXR parameters
+	 *	[tlv] Quantenna flags
+	 *	[tlv] epigram
+	 *	[tlv] RRM enabled
+	 *	[tlv] Mobility domain
+	 * NB: we allocate the max space required for the TIM bitmap.
+	 */
+	rs = &ni->ni_rates;
+	pktlen = 8					/* time stamp */
+		 + sizeof(u_int16_t)			/* beacon interval */
+		 + sizeof(u_int16_t)			/* capability information */
+		 + 2 + ni->ni_esslen			/* ssid */
+		 + 2 + IEEE80211_RATE_SIZE		/* supported rates */
+		 + 7					/* FH/DS parameters max(7,3) */
+		 + 2 + 4 + vap->iv_tim_len		/* IBSS/TIM parameter set*/
+		 + ic->ic_country_ie.country_len + 2	/* country code */
+		 + 7					/* BSS load */
+		 + 3					/* power constraint */
+		 + 4					/* tpc report */
+		 + 5					/* channel switch announcement */
+		 + 3					/* ERP */
+		 + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) /* Ext. Supp. Rates */
+		 + sizeof(struct ieee80211_wme_param)
+		 + (vap->iv_caps & IEEE80211_C_WPA ?	/* WPA 1+2 */
+			2 * sizeof(struct ieee80211_ie_wpa) : 0)
+		 + sizeof(struct ieee80211_ie_athAdvCap)
+		 +	((ic->ic_curmode >= IEEE80211_MODE_11NA) ?
+				 (sizeof(struct ieee80211_ie_htcap) +
+					 sizeof(struct ieee80211_ie_htinfo)):0)
+		 + sizeof(struct ieee80211_ie_qtn)
+		 + (vap->qtn_pairing_ie.ie ? sizeof(struct ieee80211_ie_qtn_pairing) : 0)
+		 + ((IS_IEEE80211_11NG(ic)) ? sizeof(struct ieee80211_20_40_coex_param)
+				+ sizeof(struct ieee80211_obss_scan_ie) : 0)
+#ifdef CONFIG_QVSP
+		 + ((ic->ic_wme.wme_throt_add_qwme_ie && (vap->iv_flags & IEEE80211_F_WME)) ?
+				 sizeof(struct ieee80211_ie_qtn_wme) : 0)
+#endif
+		 + ((ic->ic_vendor_fix & VENDOR_FIX_BRCM_DHCP) ? (2 + sizeof(ieee80211_epigram)) : 0)
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+		 + (ic->ic_flags & IEEE80211_F_CCA ?
+				sizeof(struct ieee80211_ie_measreq) +
+				sizeof(struct ieee80211_ie_measure_comm) : 0)
+#else
+		 + (ic->ic_flags & IEEE80211_F_CCA ? sizeof(struct ieee80211_ie_measreq) : 0)
+#endif
+		 + (IS_IEEE80211_DUALBAND_VHT_ENABLED(ic) ?
+				(sizeof(struct ieee80211_ie_vhtcap) +
+				sizeof(struct ieee80211_ie_vhtop) +
+				sizeof(struct ieee80211_ie_chsw_wrapper) +
+				sizeof(struct ieee80211_ie_wbchansw) +
+				sizeof(struct ieee80211_ie_vtxpwren)): 0)
+		 + (ic->ic_extender_role ?
+				(sizeof(struct ieee80211_qtn_ext_role) +
+				 sizeof(struct ieee80211_qtn_ext_bssid)) : 0)
+		 + sizeof(struct ieee80211_ie_rrm)
+		 + (vap->iv_mdid ? sizeof(struct ieee80211_md_ie) : 0)
+		;
+
+	if (ieee80211_swfeat_is_supported(SWFEAT_ID_OCAC, 0))
+		pktlen += sizeof(struct ieee80211_ie_qtn_ocac_state);
+
+	skb = ieee80211_getmgtframe(&frm, pktlen);
+	if (skb == NULL) {
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
+			"%s: cannot get buf; size %u", __func__, pktlen);
+		vap->iv_stats.is_tx_nobuf++;
+		return NULL;
+	}
+
+	frm = ieee80211_beacon_init(ni, bo, frm);
+
+	skb_trim(skb, frm - skb->data);
+
+	wh = (struct ieee80211_frame *)
+		skb_push(skb, sizeof(struct ieee80211_frame));
+
+	ieee80211_add_beacon_desc_header(ni, (uint8_t *) wh);
+
+	return skb;
+}
+EXPORT_SYMBOL(ieee80211_beacon_alloc);
+
+u_int32_t
+get_chansw_ie_len(struct ieee80211com *ic)
+{
+	u_int32_t length = IEEE80211_CHANSWITCHANN_BYTES;
+
+	if (IS_IEEE80211_VHT_ENABLED(ic)) {
+		length += sizeof(struct ieee80211_ie_chsw_wrapper);
+		if (ieee80211_get_bw(ic) > BW_HT20) {
+			length += sizeof(struct ieee80211_ie_wbchansw);
+		}
+		if ((ic->ic_flags & IEEE80211_F_DOTH) &&
+		    (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+			length += sizeof(struct ieee80211_ie_vtxpwren);
+		}
+	}
+	return length;
+}
+
+__inline__
+uint8_t ieee80211_sec_chan_off_ie_len(void)
+{
+	return IEEE80211_SEC_CHAN_OFF_IE_LEN;
+}
+
+/*
+ * Update the dynamic parts of a beacon frame based on the current state.
+ */
+void
+ieee80211_beacon_update(struct ieee80211_node *ni,
+	struct ieee80211_beacon_offsets *bo, struct sk_buff *skb, int mcast)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	u_int16_t capinfo;
+
+	IEEE80211_LOCK(ic);
+
+	/* XXX faster to recalculate entirely or just changes? */
+	if (vap->iv_opmode == IEEE80211_M_IBSS)
+		capinfo = IEEE80211_CAPINFO_IBSS;
+	else
+		capinfo = IEEE80211_CAPINFO_ESS;
+	if (vap->iv_flags & IEEE80211_F_PRIVACY)
+		capinfo |= IEEE80211_CAPINFO_PRIVACY;
+	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+	    IEEE80211_IS_CHAN_2GHZ(ic->ic_bsschan))
+		capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
+	if (ic->ic_flags & IEEE80211_F_SHSLOT)
+		capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
+	if (ic->ic_flags & IEEE80211_F_DOTH)
+		capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;
+	if (IEEE80211_COM_NEIGHREPORT_ENABLED(ic))
+		capinfo |= IEEE80211_CAPINFO_RM;
+
+	*bo->bo_caps = htole16(capinfo);
+
+	if (vap->iv_flags & IEEE80211_F_WME) {
+		struct ieee80211_wme_state *wme = ieee80211_vap_get_wmestate(vap);
+
+		/*
+		 * Check for aggressive mode change.  When there is
+		 * significant high priority traffic in the BSS
+		 * throttle back BE traffic by using conservative
+		 * parameters.  Otherwise BE uses aggressive params
+		 * to optimize performance of legacy/non-QoS traffic.
+		 */
+		if (wme->wme_flags & WME_F_AGGRMODE) {
+			if (wme->wme_hipri_traffic >
+			    wme->wme_hipri_switch_thresh) {
+				IEEE80211_NOTE(vap, IEEE80211_MSG_WME, ni,
+					"%s: traffic %u, disable aggressive mode",
+					__func__, wme->wme_hipri_traffic);
+				wme->wme_flags &= ~WME_F_AGGRMODE;
+				ieee80211_wme_updateparams_locked(vap);
+				wme->wme_hipri_traffic =
+					wme->wme_hipri_switch_hysteresis;
+			} else
+				wme->wme_hipri_traffic = 0;
+		} else {
+			if (wme->wme_hipri_traffic <=
+			    wme->wme_hipri_switch_thresh) {
+				IEEE80211_NOTE(vap, IEEE80211_MSG_WME, ni,
+					"%s: traffic %u, enable aggressive mode",
+					__func__, wme->wme_hipri_traffic);
+				wme->wme_flags |= WME_F_AGGRMODE;
+				ieee80211_wme_updateparams_locked(vap);
+				wme->wme_hipri_traffic = 0;
+			} else
+				wme->wme_hipri_traffic =
+					wme->wme_hipri_switch_hysteresis;
+		}
+		/* XXX multi-bss */
+		if (vap->iv_flags & IEEE80211_F_WMEUPDATE) {
+			/*
+			 * Only update the content without changing length, descriptor is
+			 * already queued
+			 */
+			ieee80211_add_wme_param(bo->bo_wme, wme, IEEE80211_VAP_UAPSD_ENABLED(vap), 0);
+			vap->iv_flags &= ~IEEE80211_F_WMEUPDATE;
+		}
+	}
+
+	/* Only update the content without changing length, descriptor is already queued */
+	if (IEEE80211_IS_CHAN_ANYN(ic->ic_bsschan) && (ic->ic_curmode >= IEEE80211_MODE_11NA)) {
+		struct ieee80211_ie_htinfo *htinfo =
+			(struct ieee80211_ie_htinfo *)(void *)bo->bo_htinfo;
+		if (vap->iv_ht_flags & IEEE80211_HTF_HTINFOUPDATE) {
+			if (vap->iv_bss->ni_rsn.rsn_ucastcipherset != IEEE80211_C_TKIP) {
+				ieee80211_add_htinfo(ni, (u_int8_t *)htinfo, &ic->ic_htinfo);
+				vap->iv_ht_flags &= ~IEEE80211_HTF_HTINFOUPDATE;
+			}
+		}
+	}
+
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {	/* NB: no IBSS support*/
+		struct ieee80211_tim_ie *tie =
+			(struct ieee80211_tim_ie *) bo->bo_tim;
+		BUG_ON(bo->bo_tim == NULL);
+		/*
+		 * TIM IE is programmed in QTN FW hence code to manupulate TIM is removed
+		*/
+		tie->tim_bitctl = 0;
+
+		ieee80211_add_bss_load(bo->bo_bss_load, vap);
+
+		if (ic->ic_flags & IEEE80211_F_CCA) {
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+			size_t chan_cca_ie_bytes = sizeof(struct ieee80211_ie_measreq) + sizeof(struct ieee80211_ie_measure_comm);
+#else
+			size_t chan_cca_ie_bytes = sizeof(struct ieee80211_ie_measreq);
+#endif
+			/* queue cca IE at first place */
+			ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_MEASREQ, bo->bo_cca);
+			bo->bo_xr += chan_cca_ie_bytes;/* FIXME ADM: not used should remove them.*/
+			bo->bo_appie_buf += chan_cca_ie_bytes;
+			bo->bo_tim_trailerlen += chan_cca_ie_bytes;
+			skb_put(skb, chan_cca_ie_bytes);
+		}
+		ieee80211_add_rrm_enabled(bo->bo_rrm_enabled, vap);
+
+		if ((ic->ic_flags_ext & IEEE80211_FEXT_ERPUPDATE) &&
+		    bo->bo_erp) {
+			(void) ieee80211_add_erp(bo->bo_erp, ic);
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_ERPUPDATE;
+		}
+	}
+	/* if it is a mode change beacon for dynamic turbo case. */
+	if (((ic->ic_ath_cap & IEEE80211_ATHC_BOOST) != 0) ^
+			IEEE80211_IS_CHAN_TURBO(ic->ic_curchan))
+		/* Already queued, update the content only so no need to add descriptor */
+		ieee80211_add_athAdvCap(bo->bo_ath_caps, vap->iv_bss->ni_ath_flags,
+			vap->iv_bss->ni_ath_defkeyindex);
+
+	if ((vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length != 0) &&
+	    (vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].ie != NULL)) {
+		/* adjust the buffer size if the size is changed */
+		if (vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length != bo->bo_appie_buf_len) {
+			int diff_len;
+			diff_len = vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length - bo->bo_appie_buf_len;
+
+			if (diff_len > 0)
+				skb_put(skb, diff_len);
+			else
+				skb_trim(skb, skb->len + diff_len);
+
+			bo->bo_appie_buf_len = vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length;
+			/* update the trailer lens */
+			bo->bo_tim_trailerlen += diff_len;
+
+		}
+		ieee80211_add_beacon_desc_ie(ni, IEEE80211_ELEMID_VENDOR_APP, bo->bo_appie_buf);
+
+		vap->iv_flags_ext &= ~IEEE80211_FEXT_APPIE_UPDATE;
+	}
+
+	IEEE80211_UNLOCK(ic);
+
+	return;
+}
+EXPORT_SYMBOL(ieee80211_beacon_update);
+
+void ieee80211_beacon_update_all(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			continue;
+		if (vap->iv_state != IEEE80211_S_RUN)
+			continue;
+
+		ic->ic_beacon_update(vap);
+	}
+}
+EXPORT_SYMBOL(ieee80211_beacon_update_all);
diff --git a/drivers/qtn/wlan/ieee80211_beacon_desc.c b/drivers/qtn/wlan/ieee80211_beacon_desc.c
new file mode 100644
index 0000000..8f0488a
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_beacon_desc.c
@@ -0,0 +1,439 @@
+/**
+  Copyright (c) 2015 Quantenna Communications Inc
+  All Rights Reserved
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License
+  as published by the Free Software Foundation; either version 2
+  of the License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+**/
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <ruby/plat_dma_addr.h>
+#endif
+
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_beacon_desc.h"
+#include "qdrv/qdrv_vap.h"
+
+/* This fucntion allocates a beacon ie and associate the beacon ie buffer. */
+static void ieee80211_beacon_associate_ie(struct beacon_shared_ie_t *ie, uint8_t *frm, uint8_t size)
+{
+	if (ie == NULL || frm == NULL)
+		return;
+	ie->size = size;
+	/* convert to bus address for MuC access only */
+	ie->buf = plat_kernel_addr_to_dma(NULL, frm);
+
+	/* to avoid memory remap, keep lhost original buffer address for debug purpose */
+	ie->lhost_buf = frm;
+	ie->next = NULL;
+	ie->next_muc_addr = 0;
+}
+
+static struct beacon_shared_ie_t *ieee80211_beacon_alloc_ie(struct ieee80211_beacon_param_t *param)
+{
+	struct beacon_shared_ie_t *ie;
+
+	if (param == NULL)
+		return NULL;
+
+	if ((param->curr + sizeof(*ie) - (uint32_t)param->buf) >
+			param->size) {
+		panic("%s: allocated space %d is not enough for adding more ie descriptors\n",
+				__func__, param->size);
+	}
+	ie = (struct beacon_shared_ie_t *)param->curr;
+	param->curr += sizeof(*ie);
+	return ie;
+}
+
+/*
+ * This function allocates an ie descriptor, construct it with ie payload buffer and
+ * queue it to the list
+ */
+static void ieee80211_add_beacon_ie_desc(struct ieee80211_beacon_param_t *param, uint8_t *frm,
+		uint16_t size)
+{
+	struct beacon_shared_ie_t *new_ie;
+
+	if (param == NULL || frm == NULL || size == 0)
+		return;
+	new_ie = ieee80211_beacon_alloc_ie(param);
+	if (new_ie == NULL)
+		return;
+	ieee80211_beacon_associate_ie(new_ie, frm, size);
+
+	if (param->head == NULL) {
+		param->head = new_ie;
+		param->tail = new_ie;
+	} else {
+		param->tail->next = new_ie;
+		param->tail->next_muc_addr = plat_kernel_addr_to_dma(NULL, new_ie);
+		param->tail = new_ie;
+	}
+}
+
+/* This function is same as above but always append the descriptor as header. */
+static void ieee80211_add_beacon_ie_desc_head(struct ieee80211_beacon_param_t *param, uint8_t *frm,
+		uint16_t size)
+{
+	struct beacon_shared_ie_t *new_ie, *temp;
+
+	if (param == NULL || frm == NULL || size == 0)
+		return;
+	new_ie = ieee80211_beacon_alloc_ie(param);
+	if (new_ie == NULL)
+		return;
+	ieee80211_beacon_associate_ie(new_ie, frm, size);
+
+	temp = param->head;
+	new_ie->next = temp;
+	new_ie->next_muc_addr = plat_kernel_addr_to_dma(NULL, temp);
+	param->head = new_ie;
+	/* if header wasn't there, then we need setup tail as same as header */
+	if (temp == NULL)
+		param->tail = new_ie;
+}
+
+int ieee80211_beacon_create_param(struct ieee80211vap *vap)
+{
+	if (vap == NULL)
+		return -EINVAL;
+	vap->param = kmalloc(sizeof(struct ieee80211_beacon_param_t), GFP_KERNEL);
+	if (vap->param == NULL) {
+		printk("Error, %s failed to allocate beacon param %d bytes\n", __func__,
+				sizeof(struct ieee80211_beacon_param_t));
+		return -ENOMEM;
+	}
+
+	vap->param->size = BEACON_PARAM_SIZE;
+	vap->param->curr = (uint32_t)&vap->param->buf[0];
+	vap->param->head = NULL;
+	vap->param->tail = NULL;
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_beacon_create_param);
+
+void ieee80211_beacon_flush_param(struct ieee80211_beacon_param_t *param)
+{
+	if (param == NULL)
+		return;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	dma_cache_wback_inv((unsigned long)param, sizeof(struct ieee80211_beacon_param_t));
+#else
+	/* Flush the liner memory so that MuC can rebuild the beacon ie via link list */
+	flush_dcache_range((uint32_t)param,
+			(uint32_t)param + sizeof(struct ieee80211_beacon_param_t));
+#endif
+	return;
+}
+EXPORT_SYMBOL(ieee80211_beacon_flush_param);
+
+/* This free the descriptor memory and reset the parameters */
+void ieee80211_beacon_destroy_param(struct ieee80211vap *vap)
+{
+	kfree(vap->param);
+	vap->param = NULL;
+}
+EXPORT_SYMBOL(ieee80211_beacon_destroy_param);
+
+uint8_t *ieee80211_add_beacon_desc_header(struct ieee80211_node *ni, uint8_t *frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_beacon_param_t *param = vap->param;
+	uint8_t *post_frm;
+
+	post_frm = ieee80211_add_beacon_header(ni, frm);
+	/*
+	 * Always queue the beacon frame header on the list header position so that MuC
+	 * can compose the frame easily.
+	 */
+	ieee80211_add_beacon_ie_desc_head(param, frm, post_frm - frm);
+	return post_frm;
+}
+EXPORT_SYMBOL(ieee80211_add_beacon_desc_header);
+
+uint8_t *ieee80211_add_beacon_desc_mandatory_fields(struct ieee80211_node *ni, uint8_t *frm,
+		struct ieee80211_beacon_offsets *bo)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_beacon_param_t *param = vap->param;
+	uint8_t *post_frm;
+
+	post_frm = ieee80211_add_mandatory_field(ni, frm, bo);
+	ieee80211_add_beacon_ie_desc(param, frm, post_frm - frm);
+	return post_frm;
+}
+EXPORT_SYMBOL(ieee80211_add_beacon_desc_mandatory_fields);
+
+/*
+ * This function provides a common interface to add ie field, ext_ie_id is extended to
+ * support same IE id in different descriptors.
+ */
+uint8_t *ieee80211_add_beacon_desc_ie(struct ieee80211_node *ni, uint16_t ext_ie_id, uint8_t *frm)
+{
+	uint8_t *pre_frm = frm, *post_frm = frm;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_beacon_param_t *param = vap->param;
+	struct ieee80211_rateset *rs = &ic->ic_sup_rates[ic->ic_curmode];
+	struct ieee80211_tim_ie *tie;
+	int ap_pure_tkip = 0;
+
+	if (vap->iv_bss && !vap->allow_tkip_for_vht)
+		ap_pure_tkip = (vap->iv_bss->ni_rsn.rsn_ucastcipherset == IEEE80211_C_TKIP);
+
+
+	if (param == NULL)
+		return 0;
+
+	switch (ext_ie_id) {
+	case IEEE80211_ELEMID_RATES:
+		/* supported rates */
+		frm = ieee80211_add_rates(frm, rs);
+		break;
+	case IEEE80211_ELEMID_DSPARMS:
+		/* XXX: better way to check this? */
+		/* XXX: how about DS ? */
+		if (!IEEE80211_IS_CHAN_FHSS(ic->ic_bsschan)) {
+			*frm++ = IEEE80211_ELEMID_DSPARMS;
+			*frm++ = 1;
+			*frm++ = ieee80211_chan2ieee(ic, ic->ic_bsschan);
+		}
+		break;
+	case IEEE80211_ELEMID_IBSSPARMS:
+		*frm++ = IEEE80211_ELEMID_IBSSPARMS;
+		*frm++ = 2;
+		*frm++ = 0;
+		*frm++ = 0;		/* TODO: ATIM window */
+		break;
+	case IEEE80211_ELEMID_TIM:
+			/* IBSS/TIM */
+		tie = (struct ieee80211_tim_ie *) frm;
+
+		tie->tim_ie = IEEE80211_ELEMID_TIM;
+		/* tim length */
+		tie->tim_len = sizeof(*tie) - sizeof(tie->tim_len) - sizeof(tie->tim_ie);
+		tie->tim_count = 0;	/* DTIM count */
+		tie->tim_period = vap->iv_dtim_period;	/* DTIM period */
+		tie->tim_bitctl = 0;	/* bitmap control */
+		/* Partial virtual bitmap */
+		memset(&tie->tim_bitmap[0], 0, sizeof(tie->tim_bitmap));
+		frm += sizeof(struct ieee80211_tim_ie);
+		break;
+	case IEEE80211_ELEMID_COUNTRY:
+		frm = ieee80211_add_country(frm, ic);
+		break;
+	case IEEE80211_ELEMID_20_40_BSS_COEX:
+		frm = ieee80211_add_20_40_bss_coex_ie(frm, vap->iv_coex);
+		break;
+	case IEEE80211_ELEMID_OBSS_SCAN:
+		frm = ieee80211_add_obss_scan_ie(frm, &ic->ic_obss_ie);
+		break;
+	case IEEE80211_ELEMID_BSS_LOAD:
+		frm = ieee80211_add_bss_load(frm, vap);
+		break;
+	case IEEE80211_ELEMID_PWRCNSTR:
+		*frm++ = IEEE80211_ELEMID_PWRCNSTR;
+		*frm++ = 1;
+		*frm++ = IEEE80211_PWRCONSTRAINT_VAL(ic);
+		break;
+	case IEEE80211_ELEMID_VHTXMTPWRENVLP:
+		frm = ieee80211_add_vhttxpwr_envelope(frm, ic);
+		break;
+	case IEEE80211_ELEMID_TPCREP:
+		*frm++ = IEEE80211_ELEMID_TPCREP;
+		*frm++ = 2;
+		*frm++ = 0;	/* tx power would be updated in macfw */
+		*frm++ = 0;	/* link margin is always 0 in beacon*/
+		break;
+	case IEEE80211_ELEMID_CHANSWITCHANN:
+		frm = ieee80211_add_csa(frm, ic->ic_csa_mode,
+				ic->ic_csa_chan->ic_ieee, ic->ic_csa_count);
+		break;
+	case IEEE80211_ELEMID_ERP:
+		frm = ieee80211_add_erp(frm, ic);
+		break;
+	case IEEE80211_ELEMID_HTCAP:
+		frm = ieee80211_add_htcap(ni, frm, &ic->ic_htcap, IEEE80211_FC0_SUBTYPE_BEACON);
+		break;
+	case IEEE80211_ELEMID_HTINFO:
+		frm = ieee80211_add_htinfo(ni, frm, &ic->ic_htinfo);
+		break;
+	case IEEE80211_ELEMID_SEC_CHAN_OFF:
+		ieee80211_add_sec_chan_off(&frm, ic, ic->ic_csa_chan->ic_ieee);
+		break;
+	case IEEE80211_ELEMID_XRATES:
+		frm = ieee80211_add_xrates(frm, rs);
+		break;
+	case IEEE80211_ELEMID_VENDOR_WME:
+		if (vap->iv_flags & IEEE80211_F_WME) {
+			struct ieee80211_wme_state *wme = ieee80211_vap_get_wmestate(vap);
+			frm = ieee80211_add_wme_param(frm, wme, IEEE80211_VAP_UAPSD_ENABLED(vap), 0);
+		}
+		break;
+	case IEEE80211_ELEMID_VENDOR_WPA:
+		if (!vap->iv_osen && (vap->iv_flags & IEEE80211_F_WPA))
+			frm = ieee80211_add_wpa(frm, vap);
+		break;
+	case IEEE80211_ELEMID_VHTCAP:
+		if (IS_IEEE80211_VHT_ENABLED(ic) && !ap_pure_tkip) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,
+					"%s: VHT is Enabled in network\n", __func__);
+			/* VHT capability */
+			frm = ieee80211_add_vhtcap(ni, frm, &ic->ic_vhtcap, IEEE80211_FC0_SUBTYPE_BEACON);
+
+			/* VHT Operation element */
+			if ((IEEE80211_IS_VHT_40(ic)) || (IEEE80211_IS_VHT_20(ic))) {
+				ic->ic_vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ;
+				ic->ic_vhtop.centerfreq0 = 0;
+			} else if (IEEE80211_IS_VHT_80(ic)) {
+				ic->ic_vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_80MHZ;
+				ic->ic_vhtop.centerfreq0 = ic->ic_bsschan->ic_center_f_80MHz;
+			} else {
+				ic->ic_vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_160MHZ;
+				ic->ic_vhtop.centerfreq0 = ic->ic_bsschan->ic_center_f_160MHz;
+			}
+			frm = ieee80211_add_vhtop(ni, frm, &ic->ic_vhtop);
+
+			if (SIGMA_TESTBED_SUPPORT &&
+					ic->ic_vht_opmode_notif != IEEE80211_VHT_OPMODE_NOTIF_DEFAULT) {
+				frm = ieee80211_add_vhtop_notif(ni, frm, ic, 0);
+			}
+		} else if (IS_IEEE80211_11NG_VHT_ENABLED(ic) && !ap_pure_tkip) {
+			/* QTN 2.4G band VHT IE */
+			frm = ieee80211_add_vhtcap(ni, frm, &ic->ic_vhtcap_24g, IEEE80211_FC0_SUBTYPE_BEACON);
+			frm = ieee80211_add_vhtop(ni, frm, &ic->ic_vhtop_24g);
+		} else {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,
+					"%s: VHT is disabled in network\n", __func__);
+		}
+		break;
+	case IEEE80211_ELEMID_WBWCHANSWITCH:
+		frm = ieee80211_add_wband_chanswitch(frm, ic);
+		break;
+	case IEEE80211_ELEMID_CHANSWITCHWRP:
+		frm = ieee80211_add_chansw_wrap(frm, ic);
+		break;
+	case IEEE80211_ELEMID_VENDOR_ATH:
+		frm = ieee80211_add_athAdvCap(frm, vap->iv_bss->ni_ath_flags,
+				vap->iv_bss->ni_ath_defkeyindex);
+		break;
+	case IEEE80211_ELEMID_VENDOR_QTN:
+		frm = ieee80211_add_qtn_ie(frm, ic,
+				(vap->iv_flags_ext & IEEE80211_FEXT_WDS ? IEEE80211_QTN_BRIDGEMODE : 0),
+				(vap->iv_flags_ext & IEEE80211_FEXT_WDS ?
+					(IEEE80211_QTN_BRIDGEMODE | IEEE80211_QTN_LNCB) : 0),
+				0, 0, 0);
+		break;
+
+	case IEEE80211_ELEMID_VENDOR_EXT_ROLE:
+		frm = ieee80211_add_qtn_extender_role_ie(frm, ic->ic_extender_role);
+		break;
+	case IEEE80211_ELEMID_VENDOR_EXT_BSSID:
+		frm = ieee80211_add_qtn_extender_bssid_ie(vap, frm);
+		break;
+	case IEEE80211_ELEMID_VENDOR_EXT_STATE:
+		frm = ieee80211_add_qtn_extender_state_ie(frm, !!ic->ic_ocac.ocac_cfg.ocac_enable);
+		break;
+	case IEEE80211_ELEMID_VENDOR_QTN_WME:
+		if (ic->ic_wme.wme_throt_add_qwme_ie
+				&& (vap->iv_flags & IEEE80211_F_WME))
+			frm = ieee80211_add_qtn_wme_param(vap, frm);
+		break;
+	case IEEE80211_ELEMID_VENDOR_EPIGRAM:
+		if (ic->ic_vendor_fix & VENDOR_FIX_BRCM_DHCP)
+			frm = ieee80211_add_epigram_ie(frm);
+		break;
+	case IEEE80211_ELEMID_VENDOR_APP:
+		ieee80211_update_bss_tm((u_int8_t *)vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].ie,
+					vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length, ic, vap);
+		memcpy(frm, vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].ie,
+			vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length);
+		frm += vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length;
+		break;
+	case IEEE80211_ELEMID_MEASREQ:
+		{
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+		size_t chan_cca_ie_bytes = sizeof(struct ieee80211_ie_measreq) + sizeof(struct ieee80211_ie_measure_comm);
+		struct ieee80211_ie_measure_comm *ie_comm = (struct ieee80211_ie_measure_comm *)frm;
+		struct ieee80211_ie_measreq *ie = (struct ieee80211_ie_measreq *) ie_comm->data;
+
+		ie_comm->id = IEEE80211_ELEMID_MEASREQ;
+		ie_comm->len = chan_cca_ie_bytes - 2;
+		ie_comm->token = ic->ic_cca_token;
+		ie_comm->mode = IEEE80211_CCA_REQMODE_ENABLE | IEEE80211_CCA_REQMODE_REQUEST;
+		ie_comm->type = IEEE80211_CCA_MEASTYPE_CCA;
+#else
+		size_t chan_cca_ie_bytes = sizeof(struct ieee80211_ie_measreq);
+		struct ieee80211_ie_measreq *ie = (struct ieee80211_ie_measreq *) frm;
+
+		ie->id = IEEE80211_ELEMID_MEASREQ;
+		ie->len = sizeof(struct ieee80211_ie_measreq) - 2;
+		ie->meas_token = ic->ic_cca_token;
+		ie->req_mode = IEEE80211_CCA_REQMODE_ENABLE | IEEE80211_CCA_REQMODE_REQUEST;
+		ie->meas_type = IEEE80211_CCA_MEASTYPE_CCA;
+#endif
+		ie->chan_num = ic->ic_cca_chan;
+		ie->start_tsf = htonll(ic->ic_cca_start_tsf);
+		ie->duration_tu = htons(ic->ic_cca_duration_tu);
+
+		frm += chan_cca_ie_bytes;
+		break;
+		}
+	case IEEE80211_ELEMID_VENDOR_QTN_OCAC_STATE:
+		frm = ieee80211_add_qtn_ocac_state_ie(frm);
+		break;
+	case IEEE80211_ELEMID_RRM_ENABLED:
+		frm = ieee80211_add_rrm_enabled(frm, vap);
+		break;
+	case IEEE80211_ELEMID_MOBILITY_DOMAIN:
+		frm = ieee80211_add_mdie(frm, vap);
+		break;
+	default:
+		break;
+	}
+	post_frm = frm;
+	if (post_frm != pre_frm)
+		ieee80211_add_beacon_ie_desc(param, pre_frm, post_frm - pre_frm);
+
+	return post_frm;
+}
+EXPORT_SYMBOL(ieee80211_add_beacon_desc_ie);
+
+void ieee80211_dump_beacon_desc_ie(struct ieee80211_beacon_param_t *param)
+{
+	struct beacon_shared_ie_t  *desc;
+	int index = 0;
+
+	desc = param->head;
+	while (desc) {
+		printk("LHOST Dump Beacon IE %d\n", ++index);
+		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_ADDRESS,
+			16, 1, desc->lhost_buf, desc->size, false);
+		desc = desc->next;
+	}
+
+}
+EXPORT_SYMBOL(ieee80211_dump_beacon_desc_ie);
diff --git a/drivers/qtn/wlan/ieee80211_chan_select.c b/drivers/qtn/wlan/ieee80211_chan_select.c
new file mode 100755
index 0000000..29ce0a9
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_chan_select.c
@@ -0,0 +1,1424 @@
+/*-
+ * Copyright (c) 2016 Quantenna
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_chan_select.h"
+
+
+static const struct autochan_ranking_params g_ranking_params_2g_scsoff = {
+	0, 0, 100, 0, 10, 0, -80, 2, 5,
+};
+
+static const struct autochan_ranking_params g_ranking_params_5g_scsoff = {
+	10, 5, 100, 20, 10, -30, -80, 2, 5,
+};
+
+static const struct autochan_ranking_params g_ranking_params_5g_scson = {
+	100, 20, 10, 5, 10, -30, -80, 2, 5,
+};
+
+static const struct chan_aci_params g_aci_params[CHAN_NUMACIBINS] = {
+	{-30, 80, 5},
+	{-62, 20, 1},
+};
+
+static struct ieee80211_chanset g_chansets_2g_bw20[] = {
+	{ 1, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  1, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 2, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  2, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 3, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  3, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 4, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  4, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 5, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  5, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 6, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  6, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 7, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  7, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 8, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  8, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 9, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  9, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{10, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 10, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{11, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 11, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{12, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 12, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{13, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 13, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{14, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 14, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+};
+
+static struct ieee80211_chanset g_chansets_2g_bw40[] = {
+	{ 1, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40,  3, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 2, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40,  4, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 3, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40,  5, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 4, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40,  6, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 5, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40,  7, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 6, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40,  8, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 7, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40,  9, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 8, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40, 10, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 9, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40, 11, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 5, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40,  3, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 6, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40,  4, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 7, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40,  5, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 8, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40,  6, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 9, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40,  7, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{10, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40,  8, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{11, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40,  9, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{12, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40, 10, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{13, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40, 11, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+};
+
+static struct ieee80211_chanset g_chansets_5g_bw20[] = {
+	{ 36, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  36, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 40, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  40, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 44, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  44, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 48, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  48, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 52, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  52, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 56, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  56, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 60, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  60, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 64, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20,  64, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{100, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 100, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{104, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 104, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{108, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 108, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{112, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 112, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{116, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 116, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{120, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 120, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{124, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 124, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{128, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 128, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{132, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 132, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{136, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 136, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{140, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 140, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{144, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 144, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{149, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 149, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{153, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 153, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{157, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 157, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{161, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 161, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{165, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 165, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{169, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 169, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{184, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 184, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{188, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 188, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{192, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 192, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{196, IEEE80211_HTINFO_CHOFF_SCN, BW_HT20, 196, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+};
+
+static struct ieee80211_chanset g_chansets_5g_bw40[] = {
+	{ 36, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40,  38, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 40, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40,  38, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 44, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40,  46, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 48, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40,  46, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 52, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40,  54, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 56, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40,  54, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 60, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40,  62, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 64, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40,  62, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{100, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40, 102, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{104, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40, 102, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{108, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40, 110, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{112, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40, 110, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{116, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40, 118, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{120, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40, 118, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{124, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40, 126, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{128, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40, 126, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{132, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40, 134, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{136, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40, 134, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{140, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40, 142, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{144, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40, 142, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{149, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40, 151, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{153, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40, 151, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{157, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40, 159, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{161, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40, 159, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{184, IEEE80211_HTINFO_CHOFF_SCA, BW_HT40, 186, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{188, IEEE80211_HTINFO_CHOFF_SCB, BW_HT40, 186, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{192, IEEE80211_HTINFO_CHOFF_SCA, BW_HT20, 194, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{196, IEEE80211_HTINFO_CHOFF_SCB, BW_HT20, 194, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+};
+
+static struct ieee80211_chanset g_chansets_5g_bw80[] = {
+	{ 36, IEEE80211_HTINFO_CHOFF_SCA, BW_HT80,  42, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 40, IEEE80211_HTINFO_CHOFF_SCB, BW_HT80,  42, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 44, IEEE80211_HTINFO_CHOFF_SCA, BW_HT80,  42, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 48, IEEE80211_HTINFO_CHOFF_SCB, BW_HT80,  42, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 52, IEEE80211_HTINFO_CHOFF_SCA, BW_HT80,  58, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 56, IEEE80211_HTINFO_CHOFF_SCB, BW_HT80,  58, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 60, IEEE80211_HTINFO_CHOFF_SCA, BW_HT80,  58, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 64, IEEE80211_HTINFO_CHOFF_SCB, BW_HT80,  58, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{100, IEEE80211_HTINFO_CHOFF_SCA, BW_HT80, 106, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{104, IEEE80211_HTINFO_CHOFF_SCB, BW_HT80, 106, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{108, IEEE80211_HTINFO_CHOFF_SCA, BW_HT80, 106, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{112, IEEE80211_HTINFO_CHOFF_SCB, BW_HT80, 106, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{116, IEEE80211_HTINFO_CHOFF_SCA, BW_HT80, 122, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{120, IEEE80211_HTINFO_CHOFF_SCB, BW_HT80, 122, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{124, IEEE80211_HTINFO_CHOFF_SCA, BW_HT80, 122, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{128, IEEE80211_HTINFO_CHOFF_SCB, BW_HT80, 122, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{132, IEEE80211_HTINFO_CHOFF_SCA, BW_HT80, 138, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{136, IEEE80211_HTINFO_CHOFF_SCB, BW_HT80, 138, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{140, IEEE80211_HTINFO_CHOFF_SCA, BW_HT80, 138, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{144, IEEE80211_HTINFO_CHOFF_SCB, BW_HT80, 138, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{149, IEEE80211_HTINFO_CHOFF_SCA, BW_HT80, 155, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{153, IEEE80211_HTINFO_CHOFF_SCB, BW_HT80, 155, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{157, IEEE80211_HTINFO_CHOFF_SCA, BW_HT80, 155, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{161, IEEE80211_HTINFO_CHOFF_SCB, BW_HT80, 155, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+};
+
+static struct ieee80211_chanset g_chansets_5g_bw160[] = {
+	{ 36, IEEE80211_HTINFO_CHOFF_SCA, BW_HT160,  50, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 40, IEEE80211_HTINFO_CHOFF_SCB, BW_HT160,  50, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 44, IEEE80211_HTINFO_CHOFF_SCA, BW_HT160,  50, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 48, IEEE80211_HTINFO_CHOFF_SCB, BW_HT160,  50, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 52, IEEE80211_HTINFO_CHOFF_SCA, BW_HT160,  50, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 56, IEEE80211_HTINFO_CHOFF_SCB, BW_HT160,  50, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 60, IEEE80211_HTINFO_CHOFF_SCA, BW_HT160,  50, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{ 64, IEEE80211_HTINFO_CHOFF_SCB, BW_HT160,  50, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{100, IEEE80211_HTINFO_CHOFF_SCA, BW_HT160, 114, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{104, IEEE80211_HTINFO_CHOFF_SCB, BW_HT160, 114, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{108, IEEE80211_HTINFO_CHOFF_SCA, BW_HT160, 114, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{112, IEEE80211_HTINFO_CHOFF_SCB, BW_HT160, 114, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{116, IEEE80211_HTINFO_CHOFF_SCA, BW_HT160, 114, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{120, IEEE80211_HTINFO_CHOFF_SCB, BW_HT160, 114, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{124, IEEE80211_HTINFO_CHOFF_SCA, BW_HT160, 114, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+	{128, IEEE80211_HTINFO_CHOFF_SCB, BW_HT160, 114, 0, 0, {0}, {0}, 0, 0, 0, 0, 0, 0, 0, 0},
+};
+
+
+__inline int
+ieee80211_chan_selection_allowed(struct ieee80211com *ic)
+{
+	if ((ic->ic_opmode == IEEE80211_M_HOSTAP) &&
+		IS_IEEE80211_24G_BAND(ic))
+		return 1;
+	else
+		return 0;
+}
+
+static struct ieee80211_chanset *
+ieee80211_find_chan_table(int band, int bw, int *table_size)
+{
+	struct ieee80211_chanset *table = NULL;
+	*table_size = 0;
+
+	if (band == IEEE80211_2_4Ghz) {
+		switch (bw) {
+		case BW_HT20:
+			table = g_chansets_2g_bw20;
+			*table_size = ARRAY_SIZE(g_chansets_2g_bw20);
+			break;
+		case BW_HT40:
+			table = g_chansets_2g_bw40;
+			*table_size = ARRAY_SIZE(g_chansets_2g_bw40);
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (bw) {
+		case BW_HT20:
+			table = g_chansets_5g_bw20;
+			*table_size = ARRAY_SIZE(g_chansets_5g_bw20);
+			break;
+		case BW_HT40:
+			table = g_chansets_5g_bw40;
+			*table_size = ARRAY_SIZE(g_chansets_5g_bw40);
+			break;
+		case BW_HT80:
+			table = g_chansets_5g_bw80;
+			*table_size = ARRAY_SIZE(g_chansets_5g_bw80);
+			break;
+		case BW_HT160:
+			table = g_chansets_5g_bw160;
+			*table_size = ARRAY_SIZE(g_chansets_5g_bw160);
+			break;
+		default:
+			break;
+		}
+	}
+
+	return table;
+}
+
+static struct ieee80211_chanset *
+ieee80211_find_chanset(struct ieee80211_chanset_table *table,
+	int chan, int bw, int sec_chan)
+{
+	struct ieee80211_chanset *chanset = NULL;
+	int i;
+
+	if (!table)
+		return NULL;
+
+	for (i = 0; i < table->num; i++) {
+		if ((chan == table->chanset[i].pri_chan) &&
+			(bw == table->chanset[i].bw) &&
+			(sec_chan == table->chanset[i].sec20_offset)) {
+			chanset = &table->chanset[i];
+			break;
+		}
+	}
+
+	return chanset;
+}
+
+static struct ieee80211_chanset *
+ieee80211_get_beacon_chanset(int chan, int bw, int sec_off)
+{
+	struct ieee80211_chanset_table chanset_table;
+	struct ieee80211_chanset *chanset = NULL;
+	int table_size;
+	int band;
+
+	if (chan <= QTN_2G_LAST_OPERATING_CHAN)
+		band = IEEE80211_2_4Ghz;
+	else
+		band = IEEE80211_5Ghz;
+
+	chanset = ieee80211_find_chan_table(band, bw, &table_size);
+	if (!chanset)
+		return NULL;
+
+	chanset_table.chanset = chanset;
+	chanset_table.num = table_size;
+
+	return ieee80211_find_chanset(&chanset_table, chan, bw, sec_off);
+}
+
+
+static int
+ieee80211_get_chanset_cci_high_edge(struct ieee80211com *ic,
+	struct ieee80211_chanset *chanset)
+{
+	int start_freq;
+	int neighbor_type;
+	int cci_span = chanset->bw / 2;
+
+	if (IS_IEEE80211_24G_BAND(ic)) {
+		neighbor_type = ieee80211_get_type_of_neighborhood(ic);
+		if (neighbor_type == IEEE80211_NEIGHBORHOOD_TYPE_VERY_DENSE)
+			cci_span = ic->ic_autochan_ranking_params.dense_cci_span;
+	}
+
+	if (chanset->pri_chan < QTN_5G_FIRST_OPERATING_CHAN)
+		start_freq = IEEE80211_2GBAND_START_FREQ;
+	else if (chanset->pri_chan >= QTN_4G_FIRST_OPERATING_CHAN)
+		start_freq = IEEE80211_4GBAND_START_FREQ;
+	else
+		start_freq = IEEE80211_5GBAND_START_FREQ;
+
+	return start_freq + chanset->center_chan *
+			IEEE80211_CHAN_SPACE + cci_span;
+}
+
+static int
+ieee80211_get_chanset_cci_low_edge(struct ieee80211com *ic,
+	struct ieee80211_chanset *chanset)
+{
+	int start_freq;
+	int neighbor_type;
+	int cci_span = chanset->bw / 2;
+
+	if (IS_IEEE80211_24G_BAND(ic)) {
+		neighbor_type = ieee80211_get_type_of_neighborhood(ic);
+		if (neighbor_type == IEEE80211_NEIGHBORHOOD_TYPE_VERY_DENSE)
+			cci_span = ic->ic_autochan_ranking_params.dense_cci_span;
+	}
+
+	if (chanset->pri_chan < QTN_5G_FIRST_OPERATING_CHAN)
+		start_freq = IEEE80211_2GBAND_START_FREQ;
+	else if (chanset->pri_chan >= QTN_4G_FIRST_OPERATING_CHAN)
+		start_freq = IEEE80211_4GBAND_START_FREQ;
+	else
+		start_freq = IEEE80211_5GBAND_START_FREQ;
+
+	return start_freq + chanset->center_chan *
+			IEEE80211_CHAN_SPACE - cci_span;
+}
+
+static void
+ieee80211_reset_chan_table_values(struct ieee80211com *ic,
+	struct ieee80211_chanset_table *table)
+{
+	struct ieee80211_chanset *chanset;
+	int i;
+
+	for (i = 0; i < table->num; i++) {
+		chanset = &table->chanset[i];
+		chanset->invalid = 0;
+		chanset->inactive = 0;
+		memset(chanset->cca_array, 0, sizeof(chanset->cca_array));
+		memset(chanset->cca_pri, 0, sizeof(chanset->cca_pri));
+		chanset->cca_intf = 0;
+		chanset->cci_instnt = 0;
+		chanset->aci_instnt = 0;
+		chanset->cci_longterm = 0;
+		chanset->aci_longterm = 0;
+		chanset->range_cost = 0;
+		chanset->is_dfs = 0;
+		chanset->cost = 0;
+	}
+}
+
+static int
+ieee80211_udpate_chan_table_invalid_flag(struct ieee80211com *ic,
+	struct ieee80211_chanset *table, int chanset_size)
+{
+	struct ieee80211_chanset *chan;
+	struct ieee80211_channel *ch;
+	uint8_t *active_list = ic->ic_chan_active;
+	int sec20;
+	int sec40u;
+	int sec40l;
+	int i;
+
+	for (i = 0; i < chanset_size; i++) {
+		chan = &table[i];
+
+		switch (chan->bw) {
+		case BW_HT160:
+			/* TODO: 160Mhz support here */
+			break;
+		case BW_HT80:
+			active_list = ic->ic_chan_active_80;
+			break;
+		case BW_HT40:
+			active_list = ic->ic_chan_active_40;
+			break;
+		case BW_HT20:
+			active_list = ic->ic_chan_active_20;
+			break;
+		default:
+			active_list = ic->ic_chan_active;
+			break;
+		}
+
+		if (isclr(active_list, chan->pri_chan)) {
+			chan->invalid = 1;
+			continue;
+		}
+
+		if (isset(ic->ic_chan_pri_inactive, chan->pri_chan)) {
+			chan->invalid = 1;
+			continue;
+		}
+
+		if (ieee80211_is_channel_disabled(ic, chan->pri_chan, chan->bw)) {
+			chan->invalid = 1;
+			continue;
+		}
+
+		if (chan->bw > BW_HT20) {
+			if (chan->sec20_offset == IEEE80211_HTINFO_CHOFF_SCA)
+				sec20 = chan->pri_chan + IEEE80211_CHAN_SEC_SHIFT;
+			else if (chan->sec20_offset == IEEE80211_HTINFO_CHOFF_SCB)
+				sec20 = chan->pri_chan - IEEE80211_CHAN_SEC_SHIFT;
+			else
+				sec20 = chan->pri_chan;
+
+			if (isclr(active_list, sec20)) {
+				chan->invalid = 1;
+				continue;
+			}
+
+			if (chan->bw > BW_HT40) {
+				ch = findchannel_any(ic, chan->pri_chan, ic->ic_des_mode);
+				if (!is_ieee80211_chan_valid(ch)) {
+					IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+						"%s: fail to find channel %d\n",
+						__func__, chan->pri_chan);
+					continue;
+				}
+
+				sec40u = ieee80211_find_sec40u_chan(ch);
+				sec40l = ieee80211_find_sec40l_chan(ch);
+
+				if (isclr(active_list, sec40u) ||
+						isclr(active_list, sec40l)) {
+					chan->invalid = 1;
+					continue;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_update_scan_cca_info(struct ieee80211com *ic)
+{
+	struct qtn_scs_scan_info scan_info;
+	struct ieee80211_chanset *chanset;
+	struct ieee80211_channel *chan;
+	int sec20_offset;
+	int ret;
+	int i;
+
+	if (!ieee80211_chan_selection_allowed(ic))
+		return -1;
+
+	for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+		chan = findchannel(ic, i, ic->ic_des_mode);
+		if (!is_ieee80211_chan_valid(chan))
+			continue;
+
+		ret = ieee80211_scs_get_scaled_scan_info(ic, i, &scan_info);
+		if (ret != 0)
+			continue;
+
+		if (scan_info.bw_sel == BW_HT20) {
+			sec20_offset = IEEE80211_HTINFO_CHOFF_SCN;
+		} else {
+			if (chan->ic_flags & IEEE80211_CHAN_HT40U)
+				sec20_offset = IEEE80211_HTINFO_CHOFF_SCA;
+			else if (chan->ic_flags & IEEE80211_CHAN_HT40D)
+				sec20_offset = IEEE80211_HTINFO_CHOFF_SCB;
+			else
+				sec20_offset = IEEE80211_HTINFO_CHOFF_SCN;
+		}
+
+		chanset = ieee80211_find_chanset(&ic->ic_autochan_table,
+				i, scan_info.bw_sel, sec20_offset);
+		if (!chanset) {
+			IEEE80211_CSDBG(CHAN_SEL_LOG_WARN,
+				"%s: don't find chanset for channel %d"
+				" bw %d and sec_chan_offset %d\n", __func__,
+				i, scan_info.bw_sel, sec20_offset);
+			continue;
+		}
+
+		chanset->cca_intf = scan_info.cca_intf;
+		chanset->cca_array[0] = scan_info.cca_pri;
+		chanset->cca_array[1] = scan_info.cca_sec20;
+		chanset->cca_array[2] = scan_info.cca_sec40;
+
+		/*
+		 * variables cca_pri are designed to store CCA levels in different
+		 * RSSI strength, while currently hardware doesn't support this.
+		 * So only store the total CCA level in the last entry.
+		 */
+		chanset->cca_pri[0] = 0;
+		chanset->cca_pri[1] = scan_info.cca_pri;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_update_chan_table_cci_instnt(struct ieee80211com *ic,
+	struct ieee80211_chanset *chanset, int chanset_size)
+{
+	int i;
+
+	for (i = 0; i < chanset_size; i++)
+		chanset[i].cci_instnt = chanset[i].cca_intf;
+
+	return 0;
+}
+
+static int
+ieee80211_udpate_chan_table_aci_instnt(struct ieee80211com *ic,
+	struct ieee80211_chanset *chanset, int chanset_size)
+{
+	struct ieee80211_chanset *c1;
+	struct ieee80211_chanset *c2;
+	int c1_upperedge;
+	int c1_loweredge;
+	int c2_upperedge;
+	int c2_loweredge;
+	int i, j, k;
+
+	for (i = 0; i < chanset_size; i++) {
+		c1 = &chanset[i];
+		c1_upperedge = ieee80211_get_chanset_cci_high_edge(ic, c1);
+		c1_loweredge = ieee80211_get_chanset_cci_low_edge(ic, c1);
+
+		for (j = 0; j < chanset_size; j++) {
+			c2 = &chanset[j];
+			c2_upperedge = ieee80211_get_chanset_cci_high_edge(ic, c2);
+			c2_loweredge = ieee80211_get_chanset_cci_low_edge(ic, c2);
+
+			for (k = 0; k < CHAN_NUMACIBINS; k++) {
+				if ((c2_upperedge > (c1_loweredge - g_aci_params[k].bw)) &&
+					(c2_upperedge <= c1_loweredge))
+					c1->aci_instnt += c2->cca_pri[k];
+
+				if ((c2_loweredge < (c1_upperedge + g_aci_params[k].bw)) &&
+					(c2_loweredge >= c1_upperedge))
+					c1->aci_instnt += c2->cca_pri[k];
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_udpate_chan_table_cci_aci_longterm(struct ieee80211com *ic,
+	struct ieee80211_chanset *chanset, int chanset_size)
+{
+	struct ap_state *as = ic->ic_scan->ss_priv;
+	struct ap_scan_entry *se, *next;
+	struct ieee80211_scan_entry *ise;
+	struct ieee80211_chanset *chan;
+	char ssid[IEEE80211_NWID_LEN + 1];
+	int aci = CHAN_NUMACIBINS - 1;
+	int b_sec_offset;
+	int b_bw;
+	int rssi;
+	int b_upperedge;
+	int b_loweredge;
+	int c_upperedge;
+	int c_loweredge;
+	int i, j;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP)
+		return -1;
+
+	for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+		TAILQ_FOREACH_SAFE(se, &as->as_scan_list[i].asl_head, ase_list, next) {
+			ise = &se->base;
+
+			b_bw = ieee80211_get_max_ap_bw(ise);
+			b_sec_offset = ieee80211_get_ap_sec_chan_offset(ise);
+			chan = ieee80211_get_beacon_chanset(i, b_bw, b_sec_offset);
+			if (!chan) {
+				memset(ssid, 0, sizeof(ssid));
+				memcpy(ssid, &ise->se_ssid[2],
+					MIN(IEEE80211_NWID_LEN, ise->se_ssid[1]));
+
+				IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+					"%s: fail to find chanset for beacon %s "
+					"(channel %d bw %d sec20_offset %d)\n", __func__,
+					ssid, i, b_bw, b_sec_offset);
+				continue;
+			}
+			b_upperedge = ieee80211_get_chanset_cci_high_edge(ic, chan);
+			b_loweredge = ieee80211_get_chanset_cci_low_edge(ic, chan);
+
+			rssi = ise->se_rssi - IEEE80211_PSEUDO_RSSI_TRANSITON_FACTOR;
+
+			for (j = 0; j < CHAN_NUMACIBINS; j++) {
+				if (rssi >= g_aci_params[j].rssi) {
+					aci = j;
+					break;
+				}
+			}
+
+			for (j = 0; j < chanset_size; j++) {
+				chan = &chanset[j];
+				c_upperedge = ieee80211_get_chanset_cci_high_edge(ic, chan);
+				c_loweredge = ieee80211_get_chanset_cci_low_edge(ic, chan);
+
+				if ((rssi > ic->ic_autochan_ranking_params.min_cochan_rssi) &&
+						(b_upperedge > c_loweredge) &&
+						(b_loweredge < c_upperedge))
+					chan->cci_longterm++;
+				if (((b_upperedge > (c_loweredge - g_aci_params[aci].bw)) &&
+						(b_upperedge <= c_loweredge)) ||
+					((b_loweredge < (c_upperedge + g_aci_params[aci].bw)) &&
+						(b_loweredge >= c_upperedge)))
+					chan->aci_longterm += g_aci_params[aci].weight;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_update_chan_table_range_cost(struct ieee80211com *ic,
+	struct ieee80211_chanset *chanset, int chanset_size)
+{
+	struct ieee80211_channel *chan;
+	int maxpower_chan;
+	int maxpower_reg;
+	int i;
+
+	for (i = 0; i < chanset_size; i++) {
+		chan = findchannel_any(ic, chanset[i].pri_chan, ic->ic_des_mode);
+		if (!is_ieee80211_chan_valid(chan)) {
+			IEEE80211_CSDBG(CHAN_SEL_LOG_WARN,
+				"%s: fail to find channel %d\n", __func__,
+				chanset[i].pri_chan);
+			continue;
+		}
+
+		maxpower_chan = chan->ic_maxpower_table
+			[PWR_IDX_BF_OFF][PWR_IDX_1SS][chanset[i].bw];
+		maxpower_reg = chan->ic_maxregpower;
+
+		chanset[i].range_cost =	maxpower_reg - maxpower_chan;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_update_chan_table_dfs_flag(struct ieee80211com *ic,
+	struct ieee80211_chanset *chanset, int chanset_size)
+{
+	struct ieee80211_channel *chan;
+	int i;
+
+	for (i = 0; i < chanset_size; i++) {
+		chan = findchannel_any(ic, chanset[i].pri_chan, ic->ic_des_mode);
+		if (!is_ieee80211_chan_valid(chan)) {
+			IEEE80211_CSDBG(CHAN_SEL_LOG_WARN,
+				"%s: fail to find channel %d\n", __func__,
+				chanset[i].pri_chan);
+			continue;
+		}
+
+		if (chan->ic_flags & IEEE80211_CHAN_DFS)
+			chanset[i].is_dfs = 1;
+		else
+			chanset[i].is_dfs = 0;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_udpate_chan_table_inactive_flag(struct ieee80211com *ic,
+	struct ieee80211_chanset *table, int chanset_size)
+{
+	struct ap_state *as = ic->ic_scan->ss_priv;
+	struct ap_scan_entry *se, *next;
+	struct ieee80211_scan_entry *ise;
+	char ssid[IEEE80211_NWID_LEN + 1];
+	struct ieee80211_chanset *chanset;
+	struct ieee80211_channel chan;
+	struct ieee80211_channel *ch;
+	int b_bw;
+	int b_pri_chan;
+	int b_sec_offset;
+	int b_sec20_chan;
+	int i, j;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP)
+		return -1;
+
+	for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+		TAILQ_FOREACH_SAFE(se, &as->as_scan_list[i].asl_head, ase_list, next) {
+			ise = &se->base;
+
+			b_bw = ieee80211_get_max_ap_bw(ise);
+			b_sec_offset = ieee80211_get_ap_sec_chan_offset(ise);
+			chanset = ieee80211_get_beacon_chanset(i, b_bw, b_sec_offset);
+			if (!chanset) {
+				memset(ssid, 0, sizeof(ssid));
+				memcpy(ssid, &ise->se_ssid[2],
+					MIN(IEEE80211_NWID_LEN, ise->se_ssid[1]));
+
+				IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+					"%s: fail to find chanset for beacon %s "
+					"(channel %d bw %d sec20_offset %d)\n", __func__,
+					ssid, i, b_bw, b_sec_offset);
+				continue;
+			}
+
+			ch = findchannel_any(ic, i, ic->ic_des_mode);
+			if (!is_ieee80211_chan_valid(ch)) {
+				IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+					"%s: fail to find channel %d for beacon %s\n",
+					__func__, i, ssid);
+				continue;
+			}
+
+			b_pri_chan = i;
+			if (b_sec_offset == IEEE80211_HTINFO_CHOFF_SCA)
+				b_sec20_chan = b_pri_chan + IEEE80211_SEC_CHAN_OFFSET;
+			else if (b_sec_offset == IEEE80211_HTINFO_CHOFF_SCB)
+				b_sec20_chan = b_pri_chan - IEEE80211_SEC_CHAN_OFFSET;
+			else
+				b_sec20_chan = 0;
+
+			for (j = 0; j < chanset_size; j++) {
+				chanset = &table[j];
+
+				if (chanset->inactive == 1)
+					continue;
+
+				if (chanset->bw == BW_HT20)
+					continue;
+
+				ch = findchannel_any(ic, chanset->pri_chan, ic->ic_des_mode);
+				if (!is_ieee80211_chan_valid(ch)) {
+					IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+						"%s: fail to find channel %d\n",
+						__func__, chanset->pri_chan);
+					continue;
+				}
+
+				memcpy(&chan, ch, sizeof(chan));
+				if (ieee80211_dual_sec_chan_supported(ic, chanset->pri_chan))
+					ieee80211_update_sec_chan_offset(&chan, chanset->sec20_offset);
+
+				chanset->inactive = !ieee80211_20_40_operation_permitted(ic,
+						&chan, b_pri_chan, b_sec20_chan);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_udpate_chan_table_cost(struct ieee80211com *ic,
+	struct ieee80211_chanset *chanset, int chanset_size)
+{
+	int i;
+	struct autochan_ranking_params *rank_params = &ic->ic_autochan_ranking_params;
+
+	for (i = 0; i < chanset_size; i++) {
+		chanset[i].cost =
+			rank_params->cci_instnt_factor * chanset[i].cci_instnt +
+			rank_params->aci_instnt_factor * chanset[i].aci_instnt +
+			rank_params->cci_longterm_factor * chanset[i].cci_longterm +
+			rank_params->aci_longterm_factor * chanset[i].aci_longterm +
+			rank_params->range_factor * chanset[i].range_cost +
+			rank_params->dfs_factor * chanset[i].is_dfs -
+			rank_params->dfs_factor;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_udpate_chan_table_values(struct ieee80211com *ic,
+	struct ieee80211_chanset *chanset, int chanset_size)
+{
+	if (!ieee80211_chan_selection_allowed(ic))
+		return -1;
+
+	ieee80211_udpate_chan_table_invalid_flag(ic, chanset, chanset_size);
+	ieee80211_update_chan_table_cci_instnt(ic, chanset, chanset_size);
+	ieee80211_udpate_chan_table_aci_instnt(ic, chanset, chanset_size);
+	ieee80211_udpate_chan_table_cci_aci_longterm(ic, chanset, chanset_size);
+	ieee80211_update_chan_table_range_cost(ic, chanset, chanset_size);
+	ieee80211_update_chan_table_dfs_flag(ic, chanset, chanset_size);
+	ieee80211_udpate_chan_table_inactive_flag(ic, chanset, chanset_size);
+	ieee80211_udpate_chan_table_cost(ic, chanset, chanset_size);
+
+	return 0;
+}
+
+static void
+ieee80211_dump_neighbor_beacon_info(struct ieee80211com *ic)
+{
+	struct ap_state *as = ic->ic_scan->ss_priv;
+	struct ap_scan_entry *se, *next;
+	struct ieee80211_scan_entry *ise;
+	struct ieee80211_chanset *chan;
+	char ssid[IEEE80211_NWID_LEN + 1];
+	int aci = CHAN_NUMACIBINS - 1;
+	int b_sec_offset;
+	int b_bw;
+	int rssi;
+	int b_upperedge;
+	int b_loweredge;
+	int neighbor_type;
+	int i, j;
+
+	neighbor_type = ieee80211_get_type_of_neighborhood(ic);
+	IEEE80211_CSDBG(CHAN_SEL_LOG_INFO, "%d BSSes are found and the environment is %s\n",
+		ic->ic_neighbor_count, ieee80211_neighborhood_type2str(neighbor_type));
+	IEEE80211_CSDBG(CHAN_SEL_LOG_INFO, "%-32s %-7s %-9s %-9s %-9s"
+		" %-9s %-4s %-8s\n", "Beacon", "Channel", "Bandwidth",
+		"Sec20_off", "CCI_edge-", "CCI_edge+", "RSSI", "ACI_span");
+
+	for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+		TAILQ_FOREACH_SAFE(se, &as->as_scan_list[i].asl_head, ase_list, next) {
+			ise = &se->base;
+
+			memset(ssid, 0, sizeof(ssid));
+			memcpy(ssid, &ise->se_ssid[2], MIN(IEEE80211_NWID_LEN, ise->se_ssid[1]));
+
+			b_bw = ieee80211_get_max_ap_bw(ise);
+			b_sec_offset = ieee80211_get_ap_sec_chan_offset(ise);
+			chan = ieee80211_get_beacon_chanset(i, b_bw, b_sec_offset);
+			if (!chan) {
+				IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+					"%s: fail to find chanset for beacon %s "
+					"(channel %d bw %d sec20_offset %d)\n", __func__,
+					ssid, i, b_bw, b_sec_offset);
+				continue;
+			}
+			b_upperedge = ieee80211_get_chanset_cci_high_edge(ic, chan);
+			b_loweredge = ieee80211_get_chanset_cci_low_edge(ic, chan);
+
+			rssi = ise->se_rssi - IEEE80211_PSEUDO_RSSI_TRANSITON_FACTOR;
+
+			for (j = 0; j < CHAN_NUMACIBINS; j++) {
+				if (rssi >= g_aci_params[j].rssi) {
+					aci = j;
+					break;
+				}
+			}
+
+			IEEE80211_CSDBG(CHAN_SEL_LOG_INFO,
+				"%-32s %-7d %-9s %-9d %-9d %-9d %-4d %-8s\n",
+				ssid, i, ieee80211_bw2str(b_bw), b_sec_offset,
+				b_loweredge, b_upperedge, rssi,
+				ieee80211_bw2str(g_aci_params[aci].bw));
+		}
+	}
+}
+
+static void
+ieee80211_dump_chan_table_values(struct ieee80211com *ic,
+	struct ieee80211_chanset *chanset, int chanset_size)
+{
+	struct ieee80211_chanset *chan;
+	int c_upperedge;
+	int c_loweredge;
+	int i, j;
+
+	IEEE80211_CSDBG(CHAN_SEL_LOG_INFO, "Dump Chanset table info:\n");
+	IEEE80211_CSDBG(CHAN_SEL_LOG_INFO, "%-7s %-8s %-9s %-11s %-9s %-9s %-7s"
+			" %-8s %-19s %-13s %-10s %-10s %-12s %-12s %-10s %-6s %-5s\n",
+			"Chanset", "Pri_chan", "Bandwidth", "Center_chan", "CCI_edge-",
+			"CCI_edge+", "Invalid", "Inactive", "CCA_Array[0~3]", "CCA_Pri[0~1]",
+			"CCI_instnt", "ACI_instnt", "CCI_longterm", "ACI_longterm",
+			"Range_cost", "Is_dfs", "Cost");
+
+	for (i = 0; i < chanset_size; i++) {
+		chan = &chanset[i];
+
+		c_upperedge = ieee80211_get_chanset_cci_high_edge(ic, chan);
+		c_loweredge = ieee80211_get_chanset_cci_low_edge(ic, chan);
+
+		IEEE80211_CSDBG(CHAN_SEL_LOG_INFO,  "%-7d %-8d %-9s %-11d %-9d %-9d"
+			" %-7d %-8d ", i, chan->pri_chan, ieee80211_bw2str(chan->bw),
+			chan->center_chan, c_loweredge, c_upperedge, chan->invalid,
+			chan->inactive);
+
+		for (j = 0; j < ARRAY_SIZE(chan->cca_array); j++)
+			IEEE80211_CSDBG(CHAN_SEL_LOG_INFO,
+					"%-4d ", chan->cca_array[j]);
+		for (j = 0; j < ARRAY_SIZE(chan->cca_pri); j++)
+			IEEE80211_CSDBG(CHAN_SEL_LOG_INFO, "%-6d ", chan->cca_pri[j]);
+
+		IEEE80211_CSDBG(CHAN_SEL_LOG_INFO, "%-10d %-10d %-12d %-12d %-10d %-6d"
+			" %-5d\n", chan->cci_instnt, chan->aci_instnt, chan->cci_longterm,
+			chan->aci_longterm, chan->range_cost, chan->is_dfs, chan->cost);
+	}
+}
+
+static struct ieee80211_chanset *
+ieee80211_get_best_chanset(struct ieee80211com *ic,
+	struct ieee80211_chanset *chanset, int chanset_size)
+{
+	struct ieee80211_chanset *best_160 = NULL;
+	struct ieee80211_chanset *best_80 = NULL;
+	struct ieee80211_chanset *best_40 = NULL;
+	struct ieee80211_chanset *best_20 = NULL;
+	struct ieee80211_chanset *best = NULL;
+	int i;
+
+	for (i = 0; i < chanset_size; i++) {
+		if ((chanset[i].bw == BW_HT160) &&
+				(ic->ic_max_system_bw >= BW_HT160) &&
+				(ic->ic_autochan_last_scan_bw <= BW_HT160) &&
+				(chanset[i].invalid == 0) &&
+				(chanset[i].inactive == 0) &&
+				(!best_160 ||
+					(chanset[i].cost < best_160->cost)))
+			best_160 = &chanset[i];
+		else if ((chanset[i].bw == BW_HT80) &&
+				(ic->ic_max_system_bw >= BW_HT80) &&
+				(ic->ic_autochan_last_scan_bw <= BW_HT80) &&
+				(chanset[i].invalid == 0) &&
+				(chanset[i].inactive == 0) &&
+				(!best_80 ||
+					(chanset[i].cost < best_80->cost)))
+			best_80 = &chanset[i];
+		else if ((chanset[i].bw == BW_HT40) &&
+				(ic->ic_max_system_bw >= BW_HT40) &&
+				(ic->ic_autochan_last_scan_bw <= BW_HT40) &&
+				(chanset[i].invalid == 0) &&
+				(chanset[i].inactive == 0) &&
+				(!best_40 ||
+					(chanset[i].cost < best_40->cost)))
+			best_40 = &chanset[i];
+		else if ((chanset[i].bw == BW_HT20) &&
+				(ic->ic_max_system_bw >= BW_HT20) &&
+				(ic->ic_autochan_last_scan_bw <= BW_HT20) &&
+				(chanset[i].invalid == 0) &&
+				(chanset[i].inactive == 0) &&
+				(!best_20 ||
+					(chanset[i].cost < best_20->cost)))
+			best_20 = &chanset[i];
+	}
+
+	best = best_160 ? best_160 : (best_80 ? best_80 : (best_40 ? best_40 : best_20));
+
+	if (ic->ic_bw_auto_select) {
+		if (IS_IEEE80211_24G_BAND(ic)) {
+			if (best_40 && best_20 && (best_40->cost >
+					ic->ic_autochan_ranking_params.maxbw_minbenefit
+						* best_20->cost))
+				best = best_20;
+		} else {
+			if (best_160 && best_80 && (best_160->cost >
+					ic->ic_autochan_ranking_params.maxbw_minbenefit
+						* best_80->cost))
+				best = best_80;
+		}
+	}
+
+	return best;
+}
+
+void
+ieee80211_init_chanset_ranking_params(struct ieee80211com *ic)
+{
+	if (IS_IEEE80211_24G_BAND(ic)) {
+		ic->ic_autochan_ranking_params = g_ranking_params_2g_scsoff;
+	} else {
+		if (!ic->ic_scs.scs_enable)
+			ic->ic_autochan_ranking_params = g_ranking_params_5g_scsoff;
+		else
+			ic->ic_autochan_ranking_params = g_ranking_params_5g_scson;
+	}
+
+	IEEE80211_CSDBG(CHAN_SEL_LOG_INFO,
+			"Chanset ranking params:\n"
+			"cci_instnt_factor\t%d\n"
+			"aci_instnt_factor\t%d\n"
+			"cci_longterm_factor\t%d\n"
+			"aci_longterm_factor\t%d\n"
+			"range_factor\t\t%d\n"
+			"dfs_factor\t\t%d\n"
+			"min_cochan_rssi\t\t%d\n"
+			"maxbw_minbenefit\t\t%d\n"
+			"dense_cci_span\t\t%dMHz\n",
+			ic->ic_autochan_ranking_params.cci_instnt_factor,
+			ic->ic_autochan_ranking_params.aci_instnt_factor,
+			ic->ic_autochan_ranking_params.cci_longterm_factor,
+			ic->ic_autochan_ranking_params.aci_longterm_factor,
+			ic->ic_autochan_ranking_params.range_factor,
+			ic->ic_autochan_ranking_params.dfs_factor,
+			ic->ic_autochan_ranking_params.min_cochan_rssi,
+			ic->ic_autochan_ranking_params.maxbw_minbenefit,
+			ic->ic_autochan_ranking_params.dense_cci_span);
+}
+
+static void
+ieee80211_check_chanset_table(struct ieee80211com *ic)
+{
+	int found;
+	int i, j;
+
+	for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+		if (isclr(ic->ic_chan_avail, i))
+			continue;
+
+		found = 0;
+		for (j = 0; j < ARRAY_SIZE(g_chansets_2g_bw20); j++) {
+			if (g_chansets_2g_bw20[j].pri_chan == i) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (found)
+			continue;
+
+		for (j = 0; j < ARRAY_SIZE(g_chansets_5g_bw20); j++) {
+			if (g_chansets_5g_bw20[j].pri_chan == i) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (!found)
+			IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+				"%s: fail to find channel %d in chanset table,"
+				" please update chanset table\n", __func__, i);
+	}
+}
+
+static int
+ieee80211_init_chanset_table(struct ieee80211com *ic)
+{
+	struct ieee80211_chanset *chanset = NULL;
+	int bw = ieee80211_get_bw(ic);
+	int total_size = 0;
+	int table_size = 0;
+	int offset = 0;
+	int band;
+
+	ieee80211_check_chanset_table(ic);
+
+	if (ic->ic_autochan_table.num) {
+		ieee80211_free(ic->ic_autochan_table.chanset);
+		ic->ic_autochan_table.chanset = NULL;
+		ic->ic_autochan_table.num = 0;
+	}
+
+	if (IS_IEEE80211_24G_BAND(ic)) {
+		band = IEEE80211_2_4Ghz;
+
+		total_size = ARRAY_SIZE(g_chansets_2g_bw20);
+		if (bw >= BW_HT40)
+			total_size += ARRAY_SIZE(g_chansets_2g_bw40);
+	} else {
+		band = IEEE80211_5Ghz;
+
+		total_size = ARRAY_SIZE(g_chansets_5g_bw20);
+		if (bw >= BW_HT40)
+			total_size += ARRAY_SIZE(g_chansets_5g_bw40);
+		if (bw >= BW_HT80)
+			total_size += ARRAY_SIZE(g_chansets_5g_bw80);
+		if (bw >= BW_HT160)
+			total_size += ARRAY_SIZE(g_chansets_5g_bw160);
+	}
+
+	ic->ic_autochan_table.chanset =
+		ieee80211_malloc(total_size * sizeof(*chanset), GFP_KERNEL);
+	if (!ic->ic_autochan_table.chanset) {
+		IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+			"%s: fail to allocate channel table\n", __func__);
+		return -1;
+	}
+	ic->ic_autochan_table.num = total_size;
+
+	while (bw >= BW_HT20) {
+		chanset = ieee80211_find_chan_table(band, bw, &table_size);
+		if (!chanset) {
+			IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+				"%s: fail to find channel table for band %s"
+				" and bandwidth %d\n", __func__,
+				IS_IEEE80211_24G_BAND(ic) ? "2.4G" : "5G", bw);
+			goto fail;
+		}
+
+		if ((offset + table_size) > total_size) {
+			IEEE80211_CSDBG(CHAN_SEL_LOG_INFO,
+				"%s: channel set table overflow for %s %s\n",
+				__func__, IS_IEEE80211_24G_BAND(ic) ? "2.4G" : "5G",
+				ieee80211_bw2str(bw));
+			goto fail;
+		}
+
+		memcpy(&ic->ic_autochan_table.chanset[offset],
+				chanset, table_size * sizeof(*chanset));
+
+		IEEE80211_CSDBG(CHAN_SEL_LOG_INFO,
+			"%s: initialize chanset table for band %s bandwith %s\n",
+			__func__, IS_IEEE80211_24G_BAND(ic) ? "2.4G" : "5G",
+			ieee80211_bw2str(bw));
+
+		offset += table_size;
+		bw = bw >> 1;
+	}
+
+	ieee80211_reset_chan_table_values(ic, &ic->ic_autochan_table);
+
+	return 0;
+
+fail:
+	ieee80211_free(ic->ic_autochan_table.chanset);
+	ic->ic_autochan_table.chanset = NULL;
+	ic->ic_autochan_table.num = 0;
+
+	return -1;
+}
+
+static int
+ieee80211_add_chanset_scan_type(struct ieee80211com *ic, int bw)
+{
+	char *type_str;
+	int index = 0;
+	int i;
+
+	for (i = 0; i < CHAN_SELECT_SCAN_MAX; i++) {
+		if (ic->ic_autochan_scan_type[i] == CHAN_SELECT_SCAN_INVALID) {
+			index = i;
+			break;
+		}
+	}
+
+	ic->ic_autochan_last_scan_bw = bw;
+
+	switch (bw) {
+	case BW_HT20:
+		ic->ic_autochan_scan_type[index] = CHAN_SELECT_SCAN_BW20;
+		type_str = "BW20";
+		break;
+	case BW_HT40:
+		if (IS_IEEE80211_24G_BAND(ic)) {
+			if (index > CHAN_SELECT_SCAN_MAX - 2) {
+				IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+					"%s: incorrect scan type index %d\n",
+					__func__, index);
+				return -1;
+			}
+
+			ic->ic_autochan_scan_type[index] = CHAN_SELECT_SCAN_BW40_ABOVE;
+			ic->ic_autochan_scan_type[index+1] = CHAN_SELECT_SCAN_BW40_BELOW;
+			type_str = "BW40_ABOVE BW40_BELOW";
+		} else {
+			ic->ic_autochan_scan_type[index] = CHAN_SELECT_SCAN_BW40;
+			type_str = "BW40";
+		}
+		break;
+	case BW_HT80:
+		ic->ic_autochan_scan_type[index] = CHAN_SELECT_SCAN_BW80;
+		type_str = "BW80";
+		break;
+	case BW_HT160:
+		ic->ic_autochan_scan_type[index] = CHAN_SELECT_SCAN_BW160;
+		type_str = "BW160";
+		break;
+	default:
+		type_str = "INVALID";
+		break;
+	}
+
+	IEEE80211_CSDBG(CHAN_SEL_LOG_INFO,
+		"%s: add chanset scan type %s\n", __func__, type_str);
+
+	return 0;
+}
+
+static void
+ieee80211_set_chanset_sec_chan(struct ieee80211com *ic, int sec_offset)
+{
+	struct ieee80211_channel *chan;
+	int i;
+
+	for (i = 1; i <= QTN_2G_LAST_OPERATING_CHAN; i++) {
+		if (!ieee80211_dual_sec_chan_supported(ic, i))
+			continue;
+
+		chan = findchannel_any(ic, i, ic->ic_des_mode);
+		if (!is_ieee80211_chan_valid(chan)) {
+			IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+				"%s: fail to find channel %d\n",
+				__func__, i);
+			continue;
+		}
+
+		IEEE80211_CSDBG(CHAN_SEL_LOG_INFO,
+			"%s: set sec_chan_offset to %d for channel %d\n",
+			__func__, sec_offset, i);
+
+		ieee80211_update_sec_chan_offset(chan, sec_offset);
+	}
+}
+
+__inline int
+ieee80211_chanset_scan_finished(struct ieee80211com *ic)
+{
+	if ((IS_IEEE80211_5G_BAND(ic)) ||
+		(ic->ic_autochan_scan_type[0] == CHAN_SELECT_SCAN_INVALID))
+		return 1;
+	else
+		return 0;
+}
+EXPORT_SYMBOL(ieee80211_chanset_scan_finished);
+
+int
+ieee80211_start_chanset_scan(struct ieee80211vap *vap, int scan_flags)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	char *bw_str = IEEE80211_BWSTR_20;
+
+	if ((ic->ic_autochan_scan_type[0] == CHAN_SELECT_SCAN_BW40) ||
+		(ic->ic_autochan_scan_type[0] == CHAN_SELECT_SCAN_BW40_ABOVE) ||
+		(ic->ic_autochan_scan_type[0] == CHAN_SELECT_SCAN_BW40_BELOW)) {
+		scan_flags |= IEEE80211_SCAN_BW40;
+		bw_str = IEEE80211_BWSTR_40;
+	} else if (ic->ic_autochan_scan_type[0] == CHAN_SELECT_SCAN_BW80) {
+		scan_flags |= IEEE80211_SCAN_BW80;
+		bw_str = IEEE80211_BWSTR_80;
+	}
+
+	if (ic->ic_autochan_scan_type[0] == CHAN_SELECT_SCAN_BW40_ABOVE)
+		ieee80211_set_chanset_sec_chan(ic, IEEE80211_HTINFO_CHOFF_SCA);
+	else if (ic->ic_autochan_scan_type[0] == CHAN_SELECT_SCAN_BW40_BELOW)
+		ieee80211_set_chanset_sec_chan(ic, IEEE80211_HTINFO_CHOFF_SCB);
+
+	IEEE80211_CSDBG(CHAN_SEL_LOG_INFO, "%s: Start scan with bandwidth %s\n",
+		__func__, bw_str);
+
+	return ieee80211_check_scan(vap, scan_flags, IEEE80211_SCAN_FOREVER,
+			vap->iv_des_nssid, vap->iv_des_ssid, NULL);
+}
+EXPORT_SYMBOL(ieee80211_start_chanset_scan);
+
+int
+ieee80211_start_chanset_selection(struct ieee80211vap *vap, int scan_flags)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	int bw = ieee80211_get_bw(ic);
+	int ret;
+
+	ic->ic_autochan_scan_flags = scan_flags;
+	memset(ic->ic_autochan_scan_type, CHAN_SELECT_SCAN_INVALID,
+		sizeof(ic->ic_autochan_scan_type));
+
+	ret = ieee80211_init_chanset_table(ic);
+	if (ret < 0) {
+		IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+			"%s: fail to initilize channel table\n", __func__);
+		return ret;
+	}
+
+	/*
+	 * Only once scan with 20M bandwidth can gather enough information
+	 * if instantaneous factors are all 0
+	 */
+	if ((ic->ic_autochan_ranking_params.cci_instnt_factor == 0) &&
+			(ic->ic_autochan_ranking_params.aci_instnt_factor == 0))
+		bw = BW_HT20;
+
+	ret = ieee80211_add_chanset_scan_type(ic, bw);
+	if (ret < 0) {
+		IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+			"%s: fail to add scan bandwidth %s\n",
+			__func__, ieee80211_bw2str(bw));
+		return ret;
+	}
+
+	if ((ic->ic_bw_auto_select) && (bw > BW_HT20)) {
+		ret = ieee80211_add_chanset_scan_type(ic, bw/2);
+		if (ret < 0) {
+			IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+				"%s: fail to add scan bandwidth %s\n",
+				__func__, ieee80211_bw2str(bw/2));
+			return ret;
+		}
+	}
+
+	return ieee80211_start_chanset_scan(vap, scan_flags);
+}
+
+__inline void
+ieee80211_chanset_shift_scan_type(struct ieee80211com *ic)
+{
+	int i;
+
+	for (i = 0; i < CHAN_SELECT_SCAN_MAX - 1; i++)
+		ic->ic_autochan_scan_type[i] = ic->ic_autochan_scan_type[i + 1];
+	ic->ic_autochan_scan_type[CHAN_SELECT_SCAN_MAX - 1] = CHAN_SELECT_SCAN_INVALID;
+}
+
+struct ieee80211_channel *
+ieee80211_chanset_pick_channel(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_chanset *best = NULL;
+	struct ieee80211_channel *chan = NULL;
+	int cur_bw = ieee80211_get_bw(ic);
+	int bw;
+	int ret;
+
+	ieee80211_update_scan_cca_info(ic);
+	ieee80211_chanset_shift_scan_type(ic);
+
+	if (ic->ic_bsschan != IEEE80211_CHAN_ANYC) {
+		IEEE80211_CSDBG(CHAN_SEL_LOG_WARN,
+			"%s: BSS channel is already configured"
+			" and bypass channel selection\n", __func__);
+		return ic->ic_bsschan;
+	}
+
+	if (!ieee80211_chanset_scan_finished(ic)) {
+		IEEE80211_CSDBG(CHAN_SEL_LOG_INFO,
+			"%s: Channel selection not finished,"
+			" start next scan\n", __func__);
+		return NULL;
+	}
+
+	ieee80211_udpate_chan_table_values(ic,
+		ic->ic_autochan_table.chanset, ic->ic_autochan_table.num);
+
+	ieee80211_dump_neighbor_beacon_info(ic);
+
+	ieee80211_dump_chan_table_values(ic,
+		ic->ic_autochan_table.chanset, ic->ic_autochan_table.num);
+
+	best = ieee80211_get_best_chanset(ic,
+		ic->ic_autochan_table.chanset, ic->ic_autochan_table.num);
+
+	if (!best) {
+		if (ic->ic_autochan_last_scan_bw > BW_HT20) {
+			IEEE80211_CSDBG(CHAN_SEL_LOG_INFO,
+				"%s: all candidate channels are inactive,"
+				"try to halve bandwidth and rescan\n", __func__);
+
+			bw = ic->ic_autochan_last_scan_bw >> 1;
+			ret = ieee80211_add_chanset_scan_type(ic, bw);
+			if (ret < 0) {
+				IEEE80211_CSDBG(CHAN_SEL_LOG_INFO,
+					"%s: failed to add new scan tyep for "
+					"bandwidth %s\n", __func__,
+					ieee80211_bw2str(bw));
+				return NULL;
+			}
+		}
+	} else {
+		IEEE80211_CSDBG(CHAN_SEL_LOG_INFO,
+			"%s: candidate channel %d bandwidth %s sec20_offsest %d\n",
+			__func__, best->pri_chan, ieee80211_bw2str(best->bw),
+			best->sec20_offset);
+
+		chan = findchannel_any(ic, best->pri_chan, ic->ic_des_mode);
+		if (!is_ieee80211_chan_valid(chan)) {
+			IEEE80211_CSDBG(CHAN_SEL_LOG_ERR,
+				"%s: fail to find candidate channel %d\n",
+				__func__, best->pri_chan);
+			return NULL;
+		}
+
+		if (ieee80211_dual_sec_chan_supported(ic, best->pri_chan))
+			ieee80211_update_sec_chan_offset(chan, best->sec20_offset);
+
+		if (cur_bw != best->bw) {
+			IEEE80211_CSDBG(CHAN_SEL_LOG_INFO, "%s: change bandwidth to %s\n",
+				__func__, ieee80211_bw2str(best->bw));
+			ieee80211_change_bw(vap, best->bw, 0);
+		}
+	}
+
+	return chan;
+}
+EXPORT_SYMBOL(ieee80211_chanset_pick_channel);
+
+void
+ieee80211_clean_chanset_values(struct ieee80211com *ic)
+{
+	ieee80211_free(ic->ic_autochan_table.chanset);
+	ic->ic_autochan_table.chanset = NULL;
+	ic->ic_autochan_table.num = 0;
+
+	memset(ic->ic_autochan_scan_type, CHAN_SELECT_SCAN_INVALID,
+			sizeof(ic->ic_autochan_scan_type));
+	ic->ic_autochan_scan_flags = 0;
+}
+
diff --git a/drivers/qtn/wlan/ieee80211_crypto.c b/drivers/qtn/wlan/ieee80211_crypto.c
new file mode 100644
index 0000000..6c9adba
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_crypto.c
@@ -0,0 +1,634 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_crypto.c 1732 2006-09-24 21:06:25Z mentor $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 generic crypto support.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+
+#include "net80211/ieee80211.h"
+#include "net80211/if_ethersubr.h"		/* XXX ETHER_HDR_LEN */
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+
+/*
+ * Table of registered cipher modules.
+ */
+static const struct ieee80211_cipher *ciphers[IEEE80211_CIPHER_MAX];
+
+static int _ieee80211_crypto_delkey(struct ieee80211vap *,
+	struct ieee80211_key *, struct ieee80211_node *);
+
+/*
+ * Default "null" key management routines.
+ */
+static int
+null_key_alloc(struct ieee80211vap *vap, const struct ieee80211_key *k)
+{
+	return IEEE80211_KEYIX_NONE;
+}
+static int
+null_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k,
+	const u_int8_t mac[IEEE80211_ADDR_LEN])
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	ic->ic_delkey(vap, k, mac);
+	return 1;
+}
+
+/* 
+ * Use default routines to transport key to Muc
+ */
+static int
+null_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
+	const u_int8_t mac[IEEE80211_ADDR_LEN])
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	ic->ic_setkey(vap, k, mac);
+	return 1;
+}
+
+static void null_key_update(struct ieee80211vap *vap)
+{
+}
+
+/*
+ * Write-arounds for common operations.
+ */
+static __inline void
+cipher_detach(struct ieee80211_key *key)
+{
+	key->wk_cipher->ic_detach(key);
+}
+
+static __inline void *
+cipher_attach(struct ieee80211vap *vap, struct ieee80211_key *key)
+{
+	return key->wk_cipher->ic_attach(vap, key);
+}
+
+/* 
+ * Wrappers for driver key management methods.
+ */
+static __inline int
+dev_key_alloc(struct ieee80211vap *vap, const struct ieee80211_key *key)
+{
+	return vap->iv_key_alloc(vap, key);
+}
+
+static __inline int
+dev_key_delete(struct ieee80211vap *vap,	const struct ieee80211_key *key,
+	struct ieee80211_node *ni)
+{
+	return vap->iv_key_delete(vap, key, ni ? ni->ni_macaddr : NULL);
+}
+
+static __inline int
+dev_key_set(struct ieee80211vap *vap, const struct ieee80211_key *key,
+	const u_int8_t mac[IEEE80211_ADDR_LEN])
+{
+	return vap->iv_key_set(vap, key, mac);
+}
+
+/*
+ * Setup crypto support for a device/shared instance.
+ */
+void
+ieee80211_crypto_attach(struct ieee80211com *ic)
+{
+	/* NB: we assume everything is pre-zero'd */
+	ciphers[IEEE80211_CIPHER_NONE] = &ieee80211_cipher_none;
+}
+EXPORT_SYMBOL(ieee80211_crypto_attach);
+
+/*
+ * Teardown crypto support.
+ */
+void
+ieee80211_crypto_detach(struct ieee80211com *ic)
+{
+}
+EXPORT_SYMBOL(ieee80211_crypto_detach);
+
+/*
+ * Setup crypto support for a vap.
+ */
+void
+ieee80211_crypto_vattach(struct ieee80211vap *vap)
+{
+	int i;
+
+	/* NB: we assume everything is pre-zero'd */
+	vap->iv_def_txkey = IEEE80211_KEYIX_NONE;
+	for (i = 0; i < IEEE80211_WEP_NKID; i++)
+		ieee80211_crypto_resetkey(vap, &vap->iv_nw_keys[i],
+			IEEE80211_KEYIX_NONE);
+	ieee80211_crypto_resetkey(vap, &vap->iv_wds_peer_key,
+			IEEE80211_KEYIX_NONE);
+	/*
+	 * Initialize the driver key support routines to noop entries.
+	 * This is useful especially for the cipher test modules.
+	 */
+	vap->iv_key_alloc = null_key_alloc;
+	vap->iv_key_set = null_key_set;
+	vap->iv_key_delete = null_key_delete;
+	vap->iv_key_update_begin = null_key_update;
+	vap->iv_key_update_end = null_key_update;
+}
+EXPORT_SYMBOL(ieee80211_crypto_vattach);
+
+/*
+ * Teardown crypto support for a vap.
+ */
+void
+ieee80211_crypto_vdetach(struct ieee80211vap *vap)
+{
+	ieee80211_crypto_delglobalkeys(vap);
+}
+EXPORT_SYMBOL(ieee80211_crypto_vdetach);
+
+/*
+ * Register a crypto cipher module.
+ */
+void
+ieee80211_crypto_register(const struct ieee80211_cipher *cip)
+{
+	if (cip->ic_cipher >= IEEE80211_CIPHER_MAX) {
+		printf("%s: cipher %s has an invalid cipher index %u\n",
+			__func__, cip->ic_name, cip->ic_cipher);
+		return;
+	}
+	if (ciphers[cip->ic_cipher] != NULL && ciphers[cip->ic_cipher] != cip) {
+		printf("%s: cipher %s registered with a different template\n",
+			__func__, cip->ic_name);
+		return;
+	}
+	ciphers[cip->ic_cipher] = cip;
+}
+EXPORT_SYMBOL(ieee80211_crypto_register);
+
+/*
+ * Unregister a crypto cipher module.
+ */
+void
+ieee80211_crypto_unregister(const struct ieee80211_cipher *cip)
+{
+	if (cip->ic_cipher >= IEEE80211_CIPHER_MAX) {
+		printf("%s: cipher %s has an invalid cipher index %u\n",
+			__func__, cip->ic_name, cip->ic_cipher);
+		return;
+	}
+	if (ciphers[cip->ic_cipher] != NULL && ciphers[cip->ic_cipher] != cip) {
+		printf("%s: cipher %s registered with a different template\n",
+			__func__, cip->ic_name);
+		return;
+	}
+	/* NB: don't complain about not being registered */
+	/* XXX disallow if references */
+	ciphers[cip->ic_cipher] = NULL;
+}
+EXPORT_SYMBOL(ieee80211_crypto_unregister);
+
+int
+ieee80211_crypto_available(u_int cipher)
+{
+	return cipher < IEEE80211_CIPHER_MAX && ciphers[cipher] != NULL;
+}
+EXPORT_SYMBOL(ieee80211_crypto_available);
+
+/* XXX well-known names! */
+static const char *cipher_modnames[] = {
+	"wlan_wep",	/* IEEE80211_CIPHER_WEP */
+	"wlan_tkip",	/* IEEE80211_CIPHER_TKIP */
+	"wlan_aes_ocb",	/* IEEE80211_CIPHER_AES_OCB */
+	"wlan_ccmp",	/* IEEE80211_CIPHER_AES_CCM */
+	"wlan_ckip",	/* IEEE80211_CIPHER_CKIP */
+};
+
+/*
+ * Establish a relationship between the specified key and cipher
+ * and, if necessary, allocate a hardware index from the driver.
+ * Note that when a fixed key index is required it must be specified
+ * and we blindly assign it w/o consulting the driver (XXX).
+ *
+ * This must be the first call applied to a key; all the other key
+ * routines assume wk_cipher is setup.
+ *
+ * Locking must be handled by the caller using:
+ *	ieee80211_key_update_begin(vap);
+ *	ieee80211_key_update_end(vap);
+ */
+int
+ieee80211_crypto_newkey(struct ieee80211vap *vap,
+	int cipher, int flags, struct ieee80211_key *key)
+{
+#define	N(a)	(sizeof(a) / sizeof(a[0]))
+	const struct ieee80211_cipher *cip;
+	void *keyctx;
+	int oflags;
+
+	/*
+	 * Validate cipher and set reference to cipher routines.
+	 */
+	if (cipher >= IEEE80211_CIPHER_MAX) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+			"%s: invalid cipher %u\n", __func__, cipher);
+		vap->iv_stats.is_crypto_badcipher++;
+		return 0;
+	}
+	cip = ciphers[cipher];
+	if (cip == NULL) {
+		/*
+		 * Auto-load cipher module if we have a well-known name
+		 * for it.  It might be better to use string names rather
+		 * than numbers and craft a module name based on the cipher
+		 * name; e.g. wlan_cipher_<cipher-name>.
+		 */
+		if (cipher < N(cipher_modnames)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+				"%s: unregistered cipher %u, load module %s\n",
+				__func__, cipher, cipher_modnames[cipher]);
+			ieee80211_load_module(cipher_modnames[cipher]);
+			/*
+			 * If cipher module loaded it should immediately
+			 * call ieee80211_crypto_register which will fill
+			 * in the entry in the ciphers array.
+			 */
+			cip = ciphers[cipher];
+		}
+		if (cip == NULL) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+				"%s: unable to load cipher %u, module %s\n",
+				__func__, cipher,
+				cipher < N(cipher_modnames) ?
+					cipher_modnames[cipher] : "<unknown>");
+			vap->iv_stats.is_crypto_nocipher++;
+			return 0;
+		}
+	}
+
+	oflags = key->wk_flags;
+	flags &= IEEE80211_KEY_COMMON;
+	/*
+	 * If the hardware does not support the cipher then
+	 * fallback to a host-based implementation.
+	 */
+	if ((vap->iv_caps & (1<<cipher)) == 0) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+		    "%s: no h/w support for cipher %s, falling back to s/w\n",
+		    __func__, cip->ic_name);
+		flags |= IEEE80211_KEY_SWCRYPT;
+	}
+	/*
+	 * Hardware TKIP with software MIC is an important
+	 * combination; we handle it by flagging each key,
+	 * the cipher modules honor it.
+	 */
+	if (cipher == IEEE80211_CIPHER_TKIP) {
+		if ((vap->iv_caps & IEEE80211_C_TKIPMIC) == 0) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+				"%s: no h/w support for TKIP MIC, falling back to s/w\n",
+				__func__);        
+			flags |= IEEE80211_KEY_SWMIC;
+		}
+	}
+
+	/*
+	 * Bind cipher to key instance.  Note we do this
+	 * after checking the device capabilities so the
+	 * cipher module can optimize space usage based on
+	 * whether or not it needs to do the cipher work.
+	 */
+	if (key->wk_cipher != cip || key->wk_flags != flags) {
+again:
+		/*
+		 * Fill in the flags so cipher modules can see s/w
+		 * crypto requirements and potentially allocate
+		 * different state and/or attach different method
+		 * pointers.
+		 *
+		 * XXX this is not right when s/w crypto fallback
+		 *     fails and we try to restore previous state.
+		 */
+		key->wk_flags = flags;
+		keyctx = cip->ic_attach(vap, key);
+		if (keyctx == NULL) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+				"%s: unable to attach cipher %s\n",
+				__func__, cip->ic_name);
+			key->wk_flags = oflags;	/* restore old flags */
+			vap->iv_stats.is_crypto_attachfail++;
+			return 0;
+		}
+		cipher_detach(key);
+		key->wk_cipher = cip;		/* XXX refcnt? */
+		key->wk_private = keyctx;
+	}
+	/*
+	 * Commit to requested usage so driver can see the flags.
+	 */
+	key->wk_flags = flags;
+
+	/*
+	 * Ask the driver for a key index if we don't have one.
+	 * Note that entries in the global key table always have
+	 * an index; this means it's safe to call this routine
+	 * for these entries just to setup the reference to the
+	 * cipher template.  Note also that when using software
+	 * crypto we also call the driver to give us a key index.
+	 */
+	if (key->wk_keyix == IEEE80211_KEYIX_NONE) {
+		key->wk_keyix = dev_key_alloc(vap, key);
+		if (key->wk_keyix == IEEE80211_KEYIX_NONE) {
+			/*
+			 * Driver has no room; fallback to doing crypto
+			 * in the host.  We change the flags and start the
+			 * procedure over.  If we get back here then there's
+			 * no hope and we bail.  Note that this can leave
+			 * the key in a inconsistent state if the caller
+			 * continues to use it.
+			 */
+			if ((key->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
+				vap->iv_stats.is_crypto_swfallback++;
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+					"%s: no h/w resources for cipher %s, "
+					"falling back to s/w\n",
+					__func__, cip->ic_name);
+				oflags = key->wk_flags;
+				flags |= IEEE80211_KEY_SWCRYPT;
+				if (cipher == IEEE80211_CIPHER_TKIP)
+					flags |= IEEE80211_KEY_SWMIC;
+				goto again;
+			}
+			vap->iv_stats.is_crypto_keyfail++;
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+				"%s: unable to setup cipher %s\n",
+				__func__, cip->ic_name);
+			return 0;
+		}
+	}
+	return 1;
+#undef N
+}
+EXPORT_SYMBOL(ieee80211_crypto_newkey);
+
+/*
+ * Remove the key (no locking, for internal use).
+ */
+static int
+_ieee80211_crypto_delkey(struct ieee80211vap *vap, struct ieee80211_key *key,
+	struct ieee80211_node *ni)
+{
+	u_int16_t keyix;
+
+	KASSERT(key->wk_cipher != NULL, ("No cipher!"));
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+		"%s: %s keyix %u flags 0x%x tsc %llu len %u\n",
+		__func__, key->wk_cipher->ic_name,
+		key->wk_keyix, key->wk_flags,
+		key->wk_keytsc, key->wk_keylen);
+
+	keyix = key->wk_keyix;
+	if (keyix != IEEE80211_KEYIX_NONE) {
+		/*
+		 * Remove hardware entry.
+		 */
+		/* XXX key cache */
+		if (!dev_key_delete(vap, key, ni)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+				"%s: driver did not delete key index %u\n",
+				__func__, keyix);
+			vap->iv_stats.is_crypto_delkey++;
+			/* XXX recovery? */
+		}
+	}
+
+	cipher_detach(key);
+	memset(key, 0, sizeof(*key));
+	ieee80211_crypto_resetkey(vap, key, IEEE80211_KEYIX_NONE);
+	return 1;
+}
+
+/*
+ * Remove the specified key.
+ */
+int
+ieee80211_crypto_delkey(struct ieee80211vap *vap, struct ieee80211_key *key,
+	struct ieee80211_node *ni)
+{
+	int status;
+
+	ieee80211_key_update_begin(vap);
+	status = _ieee80211_crypto_delkey(vap, key, ni);
+	ieee80211_key_update_end(vap);
+
+	return status;
+}
+EXPORT_SYMBOL(ieee80211_crypto_delkey);
+
+/*
+ * Clear the global key table.
+ */
+void
+ieee80211_crypto_delglobalkeys(struct ieee80211vap *vap)
+{
+	int i;
+
+	ieee80211_key_update_begin(vap);
+	for (i = 0; i < IEEE80211_WEP_NKID; i++)
+		(void) _ieee80211_crypto_delkey(vap, &vap->iv_nw_keys[i], NULL);
+	_ieee80211_crypto_delkey(vap, &vap->iv_wds_peer_key, NULL);
+	ieee80211_key_update_end(vap);
+}
+EXPORT_SYMBOL(ieee80211_crypto_delglobalkeys);
+
+/*
+ * Set the contents of the specified key.
+ *
+ * Locking must be handled by the caller using:
+ *	ieee80211_key_update_begin(vap);
+ *	ieee80211_key_update_end(vap);
+ */
+int
+ieee80211_crypto_setkey(struct ieee80211vap *vap, struct ieee80211_key *key,
+	const u_int8_t macaddr[IEEE80211_ADDR_LEN],
+	struct ieee80211_node *ni)
+{
+	const struct ieee80211_cipher *cip = key->wk_cipher;
+	int ret;
+
+	KASSERT(cip != NULL, ("No cipher!"));
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+		"%s: %s keyix %u flags 0x%x mac %s  tsc %llu len %u\n",
+		__func__, cip->ic_name, key->wk_keyix,
+		key->wk_flags, ether_sprintf(macaddr),
+		key->wk_keytsc, key->wk_keylen);
+
+	/*
+	 * Give cipher a chance to validate key contents.
+	 * XXX should happen before modifying state.
+	 */
+	if (!cip->ic_setkey(key)) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+			"%s: cipher %s rejected key index %u len %u flags 0x%x\n",
+			__func__, cip->ic_name, key->wk_keyix,
+			key->wk_keylen, key->wk_flags);
+		vap->iv_stats.is_crypto_setkey_cipher++;
+		return 0;
+	}
+	if (key->wk_keyix == IEEE80211_KEYIX_NONE) {
+		/* XXX nothing allocated, should not happen */
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+			"%s: no key index; should not happen!\n", __func__);
+		vap->iv_stats.is_crypto_setkey_nokey++;
+		return 0;
+	}
+	ret = dev_key_set(vap, key, macaddr);
+
+	return ret;
+}
+EXPORT_SYMBOL(ieee80211_crypto_setkey);
+
+/*
+ * Add privacy headers appropriate for the specified key.
+ */
+struct ieee80211_key *
+ieee80211_crypto_encap(struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_key *k;
+	struct ieee80211_frame *wh;
+	const struct ieee80211_cipher *cip;
+	u_int8_t keyid;
+
+	/*
+	 * Multicast traffic always uses the multicast key.
+	 * Otherwise if a unicast key is set we use that and
+	 * it is always key index 0.  When no unicast key is
+	 * set we fall back to the default transmit key.
+	 */
+	wh = (struct ieee80211_frame *)skb->data;
+	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
+	    ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
+		if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE) {
+			IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO,
+				wh->i_addr1,
+				"no default transmit key (%s) deftxkey %u",
+				__func__, vap->iv_def_txkey);
+			vap->iv_stats.is_tx_nodefkey++;
+			return NULL;
+		}
+		keyid = vap->iv_def_txkey;
+		k = &vap->iv_nw_keys[vap->iv_def_txkey];
+	} else {
+		keyid = 0;
+		k = &ni->ni_ucastkey;
+	}
+	cip = k->wk_cipher;
+	if (skb_headroom(skb) < cip->ic_header) {
+		/*
+		 * Should not happen; ieee80211_skbhdr_adjust should
+		 * have allocated enough space for all headers.
+		 */
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr1,
+			"%s: malformed packet for cipher %s; headroom %u",
+			__func__, cip->ic_name, skb_headroom(skb));
+		vap->iv_stats.is_tx_noheadroom++;
+		return NULL;
+	}
+	return (cip->ic_encap(k, skb, keyid << 6) ? k : NULL);
+}
+EXPORT_SYMBOL(ieee80211_crypto_encap);
+
+/*
+ * Validate and strip privacy headers (and trailer) for a
+ * received frame that has the Protected Frame bit set.
+ */
+struct ieee80211_key *
+ieee80211_crypto_decap(struct ieee80211_node *ni, struct sk_buff *skb, int hdrlen)
+{
+#define	IEEE80211_WEP_HDRLEN	(IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN)
+#define	IEEE80211_WEP_MINLEN \
+	(sizeof(struct ieee80211_frame) + \
+	IEEE80211_WEP_HDRLEN + IEEE80211_WEP_CRCLEN)
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_key *k;
+	struct ieee80211_frame *wh;
+	const struct ieee80211_cipher *cip;
+	const u_int8_t *ivp;
+	u_int8_t keyid;
+
+	/* NB: this minimum size data frame could be bigger */
+	if (skb->len < IEEE80211_WEP_MINLEN) {
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
+			"%s: WEP data frame too short, len %u",
+			__func__, skb->len);
+		vap->iv_stats.is_rx_tooshort++;	/* XXX need unique stat? */
+		return NULL;
+	}
+	/*
+	 * Locate the key. If unicast and there is no unicast
+	 * key then we fall back to the key id in the header.
+	 * This assumes unicast keys are only configured when
+	 * the key id in the header is meaningless (typically 0).
+	 */
+	wh = (struct ieee80211_frame *) skb->data;
+	ivp = skb->data + hdrlen;
+	keyid = ivp[IEEE80211_WEP_IVLEN];
+	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
+	    ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none)
+		k = &vap->iv_nw_keys[keyid >> 6];
+	else
+		k = &ni->ni_ucastkey;
+
+	cip = k->wk_cipher;
+	return (cip->ic_decap(k, skb, hdrlen) ? k : NULL);
+#undef IEEE80211_WEP_MINLEN
+#undef IEEE80211_WEP_HDRLEN
+}
+EXPORT_SYMBOL(ieee80211_crypto_decap);
diff --git a/drivers/qtn/wlan/ieee80211_crypto_ccmp.c b/drivers/qtn/wlan/ieee80211_crypto_ccmp.c
new file mode 100644
index 0000000..f513e0d
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_crypto_ccmp.c
@@ -0,0 +1,661 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_crypto_ccmp.c 2144 2007-02-21 20:58:48Z proski $
+ */
+
+/*
+ * IEEE 802.11i AES-CCMP crypto support.
+ *
+ * Part of this module is derived from similar code in the Host
+ * AP driver. The code is used with the consent of the author and
+ * it's license is included below.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/init.h>
+
+#include <linux/crypto.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include <linux/scatterlist.h>
+#else
+#include <asm/scatterlist.h>
+#endif
+
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+
+#define AES_BLOCK_LEN 16
+
+struct ccmp_ctx {
+	struct ieee80211vap *cc_vap;	/* for diagnostics + statistics */
+	struct ieee80211com *cc_ic;
+	struct crypto_cipher *cc_tfm;
+};
+
+static void *ccmp_attach(struct ieee80211vap *, struct ieee80211_key *);
+static void ccmp_detach(struct ieee80211_key *);
+static int ccmp_setkey(struct ieee80211_key *);
+static int ccmp_encap(struct ieee80211_key *, struct sk_buff *, u_int8_t);
+static int ccmp_decap(struct ieee80211_key *, struct sk_buff *, int);
+static int ccmp_enmic(struct ieee80211_key *, struct sk_buff *, int);
+static int ccmp_demic(struct ieee80211_key *, struct sk_buff *, int);
+
+static const struct ieee80211_cipher ccmp = {
+	.ic_name	= "AES-CCM",
+	.ic_cipher	= IEEE80211_CIPHER_AES_CCM,
+	.ic_header	= IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
+			  IEEE80211_WEP_EXTIVLEN,
+	.ic_trailer	= IEEE80211_WEP_MICLEN,
+	.ic_miclen	= 0,
+	.ic_attach	= ccmp_attach,
+	.ic_detach	= ccmp_detach,
+	.ic_setkey	= ccmp_setkey,
+	.ic_encap	= ccmp_encap,
+	.ic_decap	= ccmp_decap,
+	.ic_enmic	= ccmp_enmic,
+	.ic_demic	= ccmp_demic,
+};
+
+static int ccmp_encrypt(struct ieee80211_key *, struct sk_buff *, int);
+static int ccmp_decrypt(struct ieee80211_key *, u_int64_t, struct sk_buff *, int);
+
+static void *
+ccmp_attach(struct ieee80211vap *vap, struct ieee80211_key *k)
+{
+	struct ccmp_ctx *ctx;
+
+	_MOD_INC_USE(THIS_MODULE, return NULL);
+
+	MALLOC(ctx, struct ccmp_ctx *, sizeof(struct ccmp_ctx),
+		M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (ctx == NULL) {
+		vap->iv_stats.is_crypto_nomem++;
+		_MOD_DEC_USE(THIS_MODULE);
+		return NULL;
+	}
+
+	ctx->cc_vap = vap;
+	ctx->cc_ic = vap->iv_ic;
+	return ctx;
+}
+
+static void
+ccmp_detach(struct ieee80211_key *k)
+{
+	struct ccmp_ctx *ctx = k->wk_private;
+
+	if (ctx->cc_tfm != NULL)
+		crypto_free_cipher(ctx->cc_tfm);
+	FREE(ctx, M_DEVBUF);
+
+	_MOD_DEC_USE(THIS_MODULE);
+}
+
+static int
+ccmp_setkey(struct ieee80211_key *k)
+{
+	struct ccmp_ctx *ctx = k->wk_private;
+
+	if (k->wk_keylen != (128 / NBBY)) {
+		IEEE80211_DPRINTF(ctx->cc_vap, IEEE80211_MSG_CRYPTO,
+			"%s: Invalid key length %u, expecting %u\n",
+			__func__, k->wk_keylen, 128 / NBBY);
+		return 0;
+	}
+	
+	if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
+		if (ctx->cc_tfm == NULL)
+			ctx->cc_tfm = crypto_alloc_cipher("aes", 0,
+							  CRYPTO_ALG_ASYNC);
+		
+		if (ctx->cc_tfm == NULL) {
+			IEEE80211_DPRINTF(ctx->cc_vap, IEEE80211_MSG_CRYPTO,
+				"%s: Tried to add a software crypto key, but software crypto not available\n",
+				__func__);
+			return 0;
+		}
+		
+		crypto_cipher_setkey(ctx->cc_tfm, k->wk_key, k->wk_keylen);
+	}
+	return 1;
+}
+
+/*
+ * Add privacy headers appropriate for the specified key.
+ */
+static int
+ccmp_encap(struct ieee80211_key *k, struct sk_buff *skb, u_int8_t keyid)
+{
+	struct ccmp_ctx *ctx = k->wk_private;
+	struct ieee80211com *ic = ctx->cc_ic;
+	u_int8_t *ivp;
+	int hdrlen;
+
+	hdrlen = ieee80211_hdrspace(ic, skb->data);
+
+	/*
+	 * Copy down 802.11 header and add the IV, KeyID, and ExtIV.
+	 */
+	ivp = skb_push(skb, ccmp.ic_header);
+	memmove(ivp, ivp + ccmp.ic_header, hdrlen);
+	ivp += hdrlen;
+
+	k->wk_keytsc++;		/* XXX wrap at 48 bits */
+	ivp[0] = k->wk_keytsc >> 0;		/* PN0 */
+	ivp[1] = k->wk_keytsc >> 8;		/* PN1 */
+	ivp[2] = 0;				/* Reserved */
+	ivp[3] = keyid | IEEE80211_WEP_EXTIV;	/* KeyID | ExtID */
+	ivp[4] = k->wk_keytsc >> 16;		/* PN2 */
+	ivp[5] = k->wk_keytsc >> 24;		/* PN3 */
+	ivp[6] = k->wk_keytsc >> 32;		/* PN4 */
+	ivp[7] = k->wk_keytsc >> 40;		/* PN5 */
+
+	/*
+	 * Finally, do software encrypt if neeed.
+	 */
+	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) &&
+	    !ccmp_encrypt(k, skb, hdrlen))
+		return 0;
+
+	return 1;
+}
+
+/*
+ * Add MIC to the frame as needed.
+ */
+static int
+ccmp_enmic(struct ieee80211_key *k, struct sk_buff *skb, int force)
+{
+	return 1;
+}
+
+static __inline uint64_t
+READ_6(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5)
+{
+	uint32_t iv32 = (b0 << 0) | (b1 << 8) | (b2 << 16) | (b3 << 24);
+	uint16_t iv16 = (b4 << 0) | (b5 << 8);
+	return (((uint64_t)iv16) << 32) | iv32;
+}
+
+/*
+ * Validate and strip privacy headers (and trailer) for a
+ * received frame. The specified key should be correct but
+ * is also verified.
+ */
+static int
+ccmp_decap(struct ieee80211_key *k, struct sk_buff *skb, int hdrlen)
+{
+	struct ccmp_ctx *ctx = k->wk_private;
+	struct ieee80211vap *vap = ctx->cc_vap;
+	struct ieee80211_frame *wh;
+	uint8_t *ivp;
+	uint64_t pn;
+	u_int8_t tid;
+
+	/*
+	 * Header should have extended IV and sequence number;
+	 * verify the former and validate the latter.
+	 */
+	wh = (struct ieee80211_frame *)skb->data;
+	ivp = skb->data + hdrlen;
+	if ((ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV) == 0) {
+		/*
+		 * No extended IV; discard frame.
+		 */
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+			"%s", "Missing ExtIV for AES-CCM cipher");
+		vap->iv_stats.is_rx_ccmpformat++;
+		return 0;
+	}
+	tid = 0;
+	if (IEEE80211_QOS_HAS_SEQ(wh)) 
+		tid = ((struct ieee80211_qosframe *)wh)->i_qos[0] & IEEE80211_QOS_TID;
+	/* NB: assume IEEE80211_WEP_MINLEN covers the extended IV */ 
+	pn = READ_6(ivp[0], ivp[1], ivp[4], ivp[5], ivp[6], ivp[7]);
+	if (pn <= k->wk_keyrsc[tid]) {
+		/*
+		 * Replay violation.
+		 */
+		ieee80211_notify_replay_failure(vap, wh, k, pn);
+		vap->iv_stats.is_rx_ccmpreplay++;
+		return 0;
+	}
+
+	/*
+	 * Check if the device handled the decrypt in hardware.
+	 * If so we just strip the header; otherwise we need to
+	 * handle the decrypt in software.  Note that for the
+	 * latter we leave the header in place for use in the
+	 * decryption work.
+	 */
+	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) &&
+	    !ccmp_decrypt(k, pn, skb, hdrlen))
+		return 0;
+
+	/*
+	 * Copy up 802.11 header and strip crypto bits.
+	 */
+	memmove(skb->data + ccmp.ic_header, skb->data, hdrlen);
+	skb_pull(skb, ccmp.ic_header);
+	while (skb->next != NULL)
+		skb = skb->next;
+	skb_trim(skb, skb->len - ccmp.ic_trailer);
+
+	/*
+	 * Ok to update rsc now.
+	 */
+	k->wk_keyrsc[tid] = pn;
+
+	return 1;
+}
+
+/*
+ * Verify and strip MIC from the frame.
+ */
+static int
+ccmp_demic(struct ieee80211_key *k, struct sk_buff *skb, int hdrlen)
+{
+	return 1;
+}
+
+static __inline void
+xor_block(u8 *b, const u8 *a, size_t len)
+{
+	int i;
+	for (i = 0; i < len; i++)
+		b[i] ^= a[i];
+}
+
+static void
+rijndael_encrypt(struct crypto_cipher *tfm, const void *src, void *dst)
+{
+	crypto_cipher_encrypt_one(tfm, dst, src);
+}
+
+/*
+ * Host AP crypt: host-based CCMP encryption implementation for Host AP driver
+ *
+ * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ *
+ * Alternatively, this software may be distributed under the terms of BSD
+ * license.
+ */
+
+static void
+ccmp_init_blocks(struct crypto_cipher *tfm, struct ieee80211_frame *wh,
+	u_int64_t pn, size_t dlen,
+	uint8_t b0[AES_BLOCK_LEN], uint8_t aad[2 * AES_BLOCK_LEN],
+	uint8_t auth[AES_BLOCK_LEN], uint8_t s0[AES_BLOCK_LEN])
+{
+#define	IS_QOS_DATA(wh)	IEEE80211_QOS_HAS_SEQ(wh)
+
+	/* CCM Initial Block:
+	 * Flag (Include authentication header, M=3 (8-octet MIC),
+	 *       L=1 (2-octet Dlen))
+	 * Nonce: 0x00 | A2 | PN
+	 * Dlen */
+	b0[0] = 0x59;
+	/* NB: b0[1] set below */
+	IEEE80211_ADDR_COPY(b0 + 2, wh->i_addr2);
+	b0[8] = pn >> 40;
+	b0[9] = pn >> 32;
+	b0[10] = pn >> 24;
+	b0[11] = pn >> 16;
+	b0[12] = pn >> 8;
+	b0[13] = pn >> 0;
+	b0[14] = (dlen >> 8) & 0xff;
+	b0[15] = dlen & 0xff;
+
+	/* AAD:
+	 * FC with bits 4..6 and 11..13 masked to zero; 14 is always one
+	 * A1 | A2 | A3
+	 * SC with bits 4..15 (seq#) masked to zero
+	 * A4 (if present)
+	 * QC (if present)
+	 */
+	aad[0] = 0;	/* AAD length >> 8 */
+	/* NB: aad[1] set below */
+	aad[2] = wh->i_fc[0] & 0x8f;	/* XXX magic #s */
+	aad[3] = wh->i_fc[1] & 0xc7;	/* XXX magic #s */
+	/* NB: we know 3 addresses are contiguous */
+	memcpy(aad + 4, wh->i_addr1, 3 * IEEE80211_ADDR_LEN);
+	aad[22] = wh->i_seq[0] & IEEE80211_SEQ_FRAG_MASK;
+	aad[23] = 0; /* all bits masked */
+	/*
+	 * Construct variable-length portion of AAD based
+	 * on whether this is a 4-address frame/QOS frame.
+	 * We always zero-pad to 32 bytes before running it
+	 * through the cipher.
+	 *
+	 * We also fill in the priority bits of the CCM
+	 * initial block as we know whether or not we have
+	 * a QOS frame.
+	 */
+	if (IEEE80211_IS_4ADDRESS(wh)) {
+		IEEE80211_ADDR_COPY(aad + 24,
+			((struct ieee80211_frame_addr4 *)wh)->i_addr4);
+		if (IS_QOS_DATA(wh)) {
+			struct ieee80211_qosframe_addr4 *qwh4 =
+				(struct ieee80211_qosframe_addr4 *) wh;
+			aad[30] = qwh4->i_qos[0] & 0x0f;/* just priority bits */
+			aad[31] = 0;
+			b0[1] = aad[30];
+			aad[1] = 22 + IEEE80211_ADDR_LEN + 2;
+		} else {
+			*(u_int16_t *)&aad[30] = 0;
+			b0[1] = 0;
+			aad[1] = 22 + IEEE80211_ADDR_LEN;
+		}
+	} else {
+		if (IS_QOS_DATA(wh)) {
+			struct ieee80211_qosframe *qwh =
+				(struct ieee80211_qosframe*) wh;
+			aad[24] = qwh->i_qos[0] & 0x0f;	/* just priority bits */
+			aad[25] = 0;
+			b0[1] = aad[24];
+			aad[1] = 22 + 2;
+		} else {
+			*(u_int16_t *)&aad[24] = 0;
+			b0[1] = 0;
+			aad[1] = 22;
+		}
+		*(u_int16_t *)&aad[26] = 0;
+		*(u_int32_t *)&aad[28] = 0;
+	}
+
+	/* Start with the first block and AAD */
+	rijndael_encrypt(tfm, b0, auth);
+	xor_block(auth, aad, AES_BLOCK_LEN);
+	rijndael_encrypt(tfm, auth, auth);
+	xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
+	rijndael_encrypt(tfm, auth, auth);
+	b0[0] &= 0x07;
+	b0[14] = b0[15] = 0;
+	rijndael_encrypt(tfm, b0, s0);
+#undef	IS_QOS_DATA
+}
+
+#define	CCMP_ENCRYPT(_i, _b, _b0, _pos, _e, _len) do {	\
+	/* Authentication */				\
+	xor_block(_b, _pos, _len);			\
+	rijndael_encrypt(ctx->cc_tfm, _b, _b);		\
+	/* Encryption, with counter */			\
+	_b0[14] = (_i >> 8) & 0xff;			\
+	_b0[15] = _i & 0xff;				\
+	rijndael_encrypt(ctx->cc_tfm, _b0, _e);		\
+	xor_block(_pos, _e, _len);			\
+} while (0)
+
+static int
+ccmp_encrypt(struct ieee80211_key *key, struct sk_buff *skb0, int hdrlen)
+{
+	struct ccmp_ctx *ctx = key->wk_private;
+	struct ieee80211_frame *wh = (struct ieee80211_frame *) skb0->data;
+	struct sk_buff *skb;
+	int data_len, i;
+	uint8_t aad[2 * AES_BLOCK_LEN], b0[AES_BLOCK_LEN], b[AES_BLOCK_LEN];
+	uint8_t e[AES_BLOCK_LEN], s0[AES_BLOCK_LEN];
+	uint8_t *mic, *pos;
+	u_int space;
+
+	ctx->cc_vap->iv_stats.is_crypto_ccmp++;
+
+	skb = skb0;
+	data_len = skb->len;
+	while (skb->next != NULL) {
+		skb = skb->next;
+		data_len += skb->len;
+	}
+	data_len -= hdrlen + ccmp.ic_header;
+	if (skb_tailroom(skb) < ccmp.ic_trailer) {
+		/* NB: should not happen */
+		IEEE80211_NOTE_MAC(ctx->cc_vap, IEEE80211_MSG_CRYPTO,
+			wh->i_addr1, "No room for %s MIC, tailroom %u",
+			ccmp.ic_name, skb_tailroom(skb));
+		/* XXX statistic */
+		return 0;
+	}
+	ccmp_init_blocks(ctx->cc_tfm, wh, key->wk_keytsc,
+		data_len, b0, aad, b, s0);
+
+	i = 1;
+	skb = skb0;
+	pos = skb->data + hdrlen + ccmp.ic_header;
+	/* NB: assumes header is entirely in first skbuf */
+	space = skb->len - (hdrlen + ccmp.ic_header);
+	for (;;) {
+		if (space > data_len)
+			space = data_len;
+		/*
+		 * Do full blocks.
+		 */
+		while (space >= AES_BLOCK_LEN) {
+			CCMP_ENCRYPT(i, b, b0, pos, e, AES_BLOCK_LEN);
+			pos += AES_BLOCK_LEN, space -= AES_BLOCK_LEN;
+			data_len -= AES_BLOCK_LEN;
+			i++;
+		}
+		if (data_len <= 0)		/* no more data */
+			break;
+		if (skb->next == NULL) {	/* last buffer */
+			if (space != 0) {
+				/*
+				 * Short last block.
+				 */
+				CCMP_ENCRYPT(i, b, b0, pos, e, space);
+			}
+			break;
+		}
+		skb = skb->next;
+		if (space != 0) {
+			uint8_t *pos_next;
+			u_int space_next;
+			u_int len;
+
+			/*
+			 * Block straddles buffers, split references.  We
+			 * do not handle splits that require >2 buffers.
+			 */
+			pos_next = skb->data;
+			len = min(data_len, AES_BLOCK_LEN);
+			space_next = len > space ? len - space : 0;
+			KASSERT(skb->len >= space_next,
+				("not enough data in following buffer, "
+				"skb len %u need %u\n", skb->len, space_next));
+
+			xor_block(b + space, pos_next, space_next);
+			CCMP_ENCRYPT(i, b, b0, pos, e, space);
+			xor_block(pos_next, e + space, space_next);
+			data_len -= len;
+			/* XXX could check for data_len <= 0 */
+			i++;
+
+			pos = pos_next + space_next;
+			space = skb->len - space_next;
+		} else {
+			/*
+			 * Setup for next buffer.
+			 */
+			pos = skb->data;
+			space = skb->len;
+		}
+	}
+	/* tack on MIC */
+	mic = skb_put(skb, ccmp.ic_trailer);
+	for (i = 0; i < ccmp.ic_trailer; i++)
+		mic[i] = b[i] ^ s0[i];
+	return 1;
+}
+#undef CCMP_ENCRYPT
+
+#define	CCMP_DECRYPT(_i, _b, _b0, _pos, _a, _len) do {	\
+	/* Decrypt, with counter */			\
+	_b0[14] = (_i >> 8) & 0xff;			\
+	_b0[15] = _i & 0xff;				\
+	rijndael_encrypt(ctx->cc_tfm, _b0, _b);		\
+	xor_block(_pos, _b, _len);			\
+	/* Authentication */				\
+	xor_block(_a, _pos, _len);			\
+	rijndael_encrypt(ctx->cc_tfm, _a, _a);		\
+} while (0)
+
+static int
+ccmp_decrypt(struct ieee80211_key *key, u_int64_t pn, struct sk_buff *skb0, int hdrlen)
+{
+	struct ccmp_ctx *ctx = key->wk_private;
+	struct ieee80211_frame *wh = (struct ieee80211_frame *) skb0->data;
+	struct sk_buff *skb;
+	uint8_t aad[2 * AES_BLOCK_LEN];
+	uint8_t b0[AES_BLOCK_LEN], b[AES_BLOCK_LEN], a[AES_BLOCK_LEN];
+	size_t data_len;
+	int i;
+	uint8_t *pos, *mic;
+	u_int space;
+
+	ctx->cc_vap->iv_stats.is_crypto_ccmp++;
+
+	skb = skb0;
+	data_len = skb->len;
+	while (skb->next != NULL) {
+		skb = skb->next;
+		data_len += skb->len;
+	}
+	data_len -= hdrlen + ccmp.ic_header + ccmp.ic_trailer;
+	/* NB: skb left pointing at last in chain */
+	ccmp_init_blocks(ctx->cc_tfm, wh, pn, data_len, b0, aad, a, b);
+	/* NB: this is the last in the chain */
+	/* XXX assert skb->len >= ccmp.ic_trailer */
+	mic = skb->data + skb->len - ccmp.ic_trailer;
+	xor_block(mic, b, ccmp.ic_trailer);
+
+	i = 1;
+	skb = skb0;
+	pos = skb->data + hdrlen + ccmp.ic_header;
+	space = skb->len - (hdrlen + ccmp.ic_header);
+	for (;;) {
+		if (space > data_len)
+			space = data_len;
+		while (space >= AES_BLOCK_LEN) {
+			CCMP_DECRYPT(i, b, b0, pos, a, AES_BLOCK_LEN);
+			pos += AES_BLOCK_LEN;
+			space -= AES_BLOCK_LEN;
+			data_len -= AES_BLOCK_LEN;
+			i++;
+		}
+		if (data_len <= 0)		/* no more data */
+			break;
+		skb = skb->next;
+		if (skb == NULL) {		/* last buffer */
+			if (space != 0)		/* short last block */
+				CCMP_DECRYPT(i, b, b0, pos, a, space);
+			break;
+		}
+		if (space != 0) {
+			uint8_t *pos_next;
+			u_int space_next;
+			u_int len;
+
+			/*
+			 * Block straddles buffers, split references.  We
+			 * do not handle splits that require >2 buffers.
+			 */
+			pos_next = skb->data;
+			len = min(data_len, (size_t) AES_BLOCK_LEN);
+			space_next = len > space ? len - space : 0;
+			KASSERT(skb->len >= space_next,
+				("not enough data in following buffer, "
+				"skb len %u need %u\n", skb->len, space_next));
+
+			xor_block(b+space, pos_next, space_next);
+			CCMP_DECRYPT(i, b, b0, pos, a, space);
+			xor_block(pos_next, b+space, space_next);
+			data_len -= len;
+			i++;
+
+			pos = pos_next + space_next;
+			space = skb->len - space_next;
+		} else {
+			/*
+			 * Setup for next buffer.
+			 */
+			pos = skb->data;
+			space = skb->len;
+		}
+	}
+
+	if (memcmp(mic, a, ccmp.ic_trailer) != 0) {
+		IEEE80211_NOTE_MAC(ctx->cc_vap, IEEE80211_MSG_CRYPTO,
+			wh->i_addr2,
+			"AES-CCM decrypt failed; MIC mismatch (keyix %u, rsc %llu)",
+			key->wk_keyix, pn);
+		ctx->cc_vap->iv_stats.is_rx_ccmpmic++;
+		return 0;
+	}
+	return 1;
+}
+#undef CCMP_DECRYPT
+
+/*
+ * Module glue.
+ */
+
+MODULE_AUTHOR("Errno Consulting, Sam Leffler");
+MODULE_DESCRIPTION("802.11 wireless support: AES-CCM cipher");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual BSD/GPL");
+#endif
+
+static int __init
+init_crypto_ccmp(void)
+{
+	ieee80211_crypto_register(&ccmp);
+	return 0;
+}
+module_init(init_crypto_ccmp);
+
+static void __exit
+exit_crypto_ccmp(void)
+{
+	ieee80211_crypto_unregister(&ccmp);
+}
+module_exit(exit_crypto_ccmp);
diff --git a/drivers/qtn/wlan/ieee80211_crypto_none.c b/drivers/qtn/wlan/ieee80211_crypto_none.c
new file mode 100644
index 0000000..99b16c1
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_crypto_none.c
@@ -0,0 +1,146 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_crypto_none.c 1721 2006-09-20 08:45:13Z mentor $
+ */
+
+/*
+ * IEEE 802.11 NULL crypto support.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+
+static void *none_attach(struct ieee80211vap *, struct ieee80211_key *);
+static void none_detach(struct ieee80211_key *);
+static int none_setkey(struct ieee80211_key *);
+static int none_encap(struct ieee80211_key *, struct sk_buff *, u_int8_t);
+static int none_decap(struct ieee80211_key *, struct sk_buff *, int);
+static int none_enmic(struct ieee80211_key *, struct sk_buff *, int);
+static int none_demic(struct ieee80211_key *, struct sk_buff *, int);
+
+const struct ieee80211_cipher ieee80211_cipher_none = {
+	.ic_name	= "NONE",
+	.ic_cipher	= IEEE80211_CIPHER_NONE,
+	.ic_header	= 0,
+	.ic_trailer	= 0,
+	.ic_miclen	= 0,
+	.ic_attach	= none_attach,
+	.ic_detach	= none_detach,
+	.ic_setkey	= none_setkey,
+	.ic_encap	= none_encap,
+	.ic_decap	= none_decap,
+	.ic_enmic	= none_enmic,
+	.ic_demic	= none_demic,
+};
+EXPORT_SYMBOL(ieee80211_cipher_none);
+
+static void *
+none_attach(struct ieee80211vap *vap, struct ieee80211_key *k)
+{
+	return vap;		/* for diagnostics+stats */
+}
+
+static void
+none_detach(struct ieee80211_key *k)
+{
+	(void) k;
+}
+
+static int
+none_setkey(struct ieee80211_key *k)
+{
+	(void) k;
+	return 1;
+}
+
+static int
+none_encap(struct ieee80211_key *k, struct sk_buff *skb, u_int8_t keyid)
+{
+	struct ieee80211vap *vap = k->wk_private;
+#ifdef IEEE80211_DEBUG
+	struct ieee80211_frame *wh = (struct ieee80211_frame *)skb->data;
+#endif
+
+	/*
+	 * The specified key is not setup; this can
+	 * happen, at least, when changing keys.
+	 */
+	IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr1,
+		"key id %u is not set (encap)", keyid>>6);
+	vap->iv_stats.is_tx_badcipher++;
+	return 0;
+}
+
+static int
+none_decap(struct ieee80211_key *k, struct sk_buff *skb, int hdrlen)
+{
+	struct ieee80211vap *vap = k->wk_private;
+#ifdef IEEE80211_DEBUG
+	struct ieee80211_frame *wh = (struct ieee80211_frame *)skb->data;
+	const u_int8_t *ivp = (const u_int8_t *)&wh[1];
+#endif
+
+	/*
+	 * The specified key is not setup; this can
+	 * happen, at least, when changing keys.
+	 */
+	/* XXX useful to know dst too */
+	IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+		"key id %u is not set (decap)", ivp[IEEE80211_WEP_IVLEN] >> 6);
+	vap->iv_stats.is_rx_badkeyid++;
+	return 0;
+}
+
+static int
+none_enmic(struct ieee80211_key *k, struct sk_buff *skb, int force)
+{
+	struct ieee80211vap *vap = k->wk_private;
+
+	vap->iv_stats.is_tx_badcipher++;
+	return 0;
+}
+
+static int
+none_demic(struct ieee80211_key *k, struct sk_buff *skb, int hdrlen)
+{
+	struct ieee80211vap *vap = k->wk_private;
+
+	vap->iv_stats.is_rx_badkeyid++;
+	return 0;
+}
diff --git a/drivers/qtn/wlan/ieee80211_crypto_tkip.c b/drivers/qtn/wlan/ieee80211_crypto_tkip.c
new file mode 100644
index 0000000..4067384
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_crypto_tkip.c
@@ -0,0 +1,1070 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_crypto_tkip.c 2028 2007-01-30 03:51:52Z proski $
+ */
+
+/*
+ * IEEE 802.11i TKIP crypto support.
+ *
+ * Part of this module is derived from similar code in the Host
+ * AP driver. The code is used with the consent of the author and
+ * it's license is included below.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+
+static void *tkip_attach(struct ieee80211vap *, struct ieee80211_key *);
+static void tkip_detach(struct ieee80211_key *);
+static int tkip_setkey(struct ieee80211_key *);
+static int tkip_encap(struct ieee80211_key *, struct sk_buff *, u_int8_t);
+static int tkip_enmic(struct ieee80211_key *, struct sk_buff *, int);
+static int tkip_decap(struct ieee80211_key *, struct sk_buff *, int);
+static int tkip_demic(struct ieee80211_key *, struct sk_buff *, int);
+
+static const struct ieee80211_cipher tkip  = {
+	.ic_name	= "TKIP",
+	.ic_cipher	= IEEE80211_CIPHER_TKIP,
+	.ic_header	= IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
+			  IEEE80211_WEP_EXTIVLEN,
+	.ic_trailer	= IEEE80211_WEP_CRCLEN,
+	.ic_miclen	= IEEE80211_WEP_MICLEN,
+	.ic_attach	= tkip_attach,
+	.ic_detach	= tkip_detach,
+	.ic_setkey	= tkip_setkey,
+	.ic_encap	= tkip_encap,
+	.ic_decap	= tkip_decap,
+	.ic_enmic	= tkip_enmic,
+	.ic_demic	= tkip_demic,
+};
+
+struct tkip_ctx {
+	struct ieee80211vap *tc_vap;	/* for diagnostics + statistics */
+	struct ieee80211com *tc_ic;
+
+	u16	tx_ttak[5];
+	int	tx_phase1_done;
+	u8	tx_rc4key[16];		/* XXX for test module; make locals? */
+
+	u16	rx_ttak[5];
+	int	rx_phase1_done;
+	u8	rx_rc4key[16];		/* XXX for test module; make locals? */
+	uint64_t rx_rsc;		/* held until MIC verified */
+};
+
+static void michael_mic(struct tkip_ctx *, const u8 *,
+	struct sk_buff *, u_int, size_t,
+	u8 mic[IEEE80211_WEP_MICLEN]);
+static int tkip_encrypt(struct tkip_ctx *, struct ieee80211_key *,
+	struct sk_buff *, int);
+static int tkip_decrypt(struct tkip_ctx *, struct ieee80211_key *,
+	struct sk_buff *, int);
+
+static void *
+tkip_attach(struct ieee80211vap *vap, struct ieee80211_key *k)
+{
+	struct tkip_ctx *ctx;
+
+	_MOD_INC_USE(THIS_MODULE, return NULL);
+
+	MALLOC(ctx, struct tkip_ctx *, sizeof(struct tkip_ctx),
+		M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (ctx == NULL) {
+		vap->iv_stats.is_crypto_nomem++;
+		_MOD_DEC_USE(THIS_MODULE);
+		return NULL;
+	}
+
+	ctx->tc_vap = vap;
+	ctx->tc_ic = vap->iv_ic;
+	return ctx;
+}
+
+static void
+tkip_detach(struct ieee80211_key *k)
+{
+	struct tkip_ctx *ctx = k->wk_private;
+
+	FREE(ctx, M_DEVBUF);
+
+	_MOD_DEC_USE(THIS_MODULE);
+}
+
+static int
+tkip_setkey(struct ieee80211_key *k)
+{
+	struct tkip_ctx *ctx = k->wk_private;
+
+	if (k->wk_keylen != (128 / NBBY)) {
+		(void) ctx;		/* XXX */
+		IEEE80211_DPRINTF(ctx->tc_vap, IEEE80211_MSG_CRYPTO,
+			"%s: Invalid key length %u, expecting %u\n",
+			__func__, k->wk_keylen, 128 / NBBY);
+		return 0;
+	}
+	k->wk_keytsc = 1;		/* TSC starts at 1 */
+	return 1;
+}
+
+/*
+ * Add privacy headers and do any s/w encryption required.
+ */
+static int
+tkip_encap(struct ieee80211_key *k, struct sk_buff *skb, u_int8_t keyid)
+{
+	struct tkip_ctx *ctx = k->wk_private;
+	struct ieee80211vap *vap = ctx->tc_vap;
+	struct ieee80211com *ic = ctx->tc_ic;
+	u_int8_t *ivp;
+	int hdrlen;
+
+	/*
+	 * Handle TKIP counter measures requirement.
+	 */
+	if (vap->iv_flags & IEEE80211_F_COUNTERM) {
+#ifdef IEEE80211_DEBUG
+		struct ieee80211_frame *wh =
+			(struct ieee80211_frame *) skb->data;
+#endif
+
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+			"Discard frame due to countermeasures (%s)", __func__);
+		vap->iv_stats.is_crypto_tkipcm++;
+		return 0;
+	}
+	hdrlen = ieee80211_hdrspace(ic, skb->data);
+
+	/*
+	 * Copy down 802.11 header and add the IV, KeyID, and ExtIV.
+	 */
+	ivp = skb_push(skb, tkip.ic_header);
+	memmove(ivp, ivp + tkip.ic_header, hdrlen);
+	ivp += hdrlen;
+
+	ivp[0] = k->wk_keytsc >> 8;		/* TSC1 */
+	ivp[1] = (ivp[0] | 0x20) & 0x7f;	/* WEP seed */
+	ivp[2] = k->wk_keytsc >> 0;		/* TSC0 */
+	ivp[3] = keyid | IEEE80211_WEP_EXTIV;	/* KeyID | ExtID */
+	ivp[4] = k->wk_keytsc >> 16;		/* TSC2 */
+	ivp[5] = k->wk_keytsc >> 24;		/* TSC3 */
+	ivp[6] = k->wk_keytsc >> 32;		/* TSC4 */
+	ivp[7] = k->wk_keytsc >> 40;		/* TSC5 */
+
+	/*
+	 * Finally, do software encrypt if neeed.
+	 */
+	if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
+		if (!tkip_encrypt(ctx, k, skb, hdrlen))
+			return 0;
+		/* NB: tkip_encrypt handles wk_keytsc */
+	} else
+		k->wk_keytsc++;
+
+	return 1;
+}
+
+/*
+ * Add MIC to the frame as needed.
+ */
+static int
+tkip_enmic(struct ieee80211_key *k, struct sk_buff *skb0, int force)
+{
+	struct tkip_ctx *ctx = k->wk_private;
+
+	if (force || (k->wk_flags & IEEE80211_KEY_SWMIC)) {
+		struct ieee80211_frame *wh =
+			(struct ieee80211_frame *) skb0->data;
+		struct ieee80211vap *vap = ctx->tc_vap;
+		struct ieee80211com *ic = ctx->tc_ic;
+		int hdrlen;
+		struct sk_buff *skb;
+		size_t data_len;
+		uint8_t mic[IEEE80211_WEP_MICLEN];
+
+		vap->iv_stats.is_crypto_tkipenmic++;
+
+		skb = skb0;
+		data_len = skb->len;
+		while (skb->next != NULL) {
+			skb = skb->next;
+			data_len += skb->len;
+		}
+		if (skb_tailroom(skb) < tkip.ic_miclen) {
+			/* NB: should not happen */
+			IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO,
+				wh->i_addr1,
+				"No room for Michael MIC, tailroom %u",
+				skb_tailroom(skb));
+			/* XXX statistic */
+			return 0;
+		}
+
+		hdrlen = ieee80211_hdrspace(ic, wh);
+		michael_mic(ctx, k->wk_txmic,
+			skb0, hdrlen, data_len - hdrlen, mic);
+		memcpy(skb_put(skb, tkip.ic_miclen), mic, tkip.ic_miclen);
+	}
+	return 1;
+}
+
+static __inline uint64_t
+READ_6(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5)
+{
+	uint32_t iv32 = (b0 << 0) | (b1 << 8) | (b2 << 16) | (b3 << 24);
+	uint16_t iv16 = (b4 << 0) | (b5 << 8);
+	return (((uint64_t)iv16) << 32) | iv32;
+}
+
+/*
+ * Validate and strip privacy headers (and trailer) for a
+ * received frame.  If necessary, decrypt the frame using
+ * the specified key.
+ */
+static int
+tkip_decap(struct ieee80211_key *k, struct sk_buff *skb, int hdrlen)
+{
+	struct tkip_ctx *ctx = k->wk_private;
+	struct ieee80211vap *vap = ctx->tc_vap;
+	struct ieee80211_frame *wh;
+	uint8_t *ivp;
+	u_int8_t tid;
+
+	/*
+	 * Header should have extended IV and sequence number;
+	 * verify the former and validate the latter.
+	 */
+	wh = (struct ieee80211_frame *)skb->data;
+
+	ivp = skb->data + hdrlen;
+	if ((ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV) == 0) {
+		/*
+		 * No extended IV; discard frame.
+		 */
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+			"%s", "missing ExtIV for TKIP cipher");
+		vap->iv_stats.is_rx_tkipformat++;
+		return 0;
+	}
+	/*
+	 * Handle TKIP counter measures requirement.
+	 */
+	if (vap->iv_flags & IEEE80211_F_COUNTERM) {
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+			"discard frame due to countermeasures (%s)", __func__);
+		vap->iv_stats.is_crypto_tkipcm++;
+		return 0;
+	}
+
+	tid = 0;
+	if (IEEE80211_QOS_HAS_SEQ(wh)) 
+		tid = ((struct ieee80211_qosframe *)wh)->i_qos[0] & IEEE80211_QOS_TID;
+
+	ctx->rx_rsc = READ_6(ivp[2], ivp[0], ivp[4], ivp[5], ivp[6], ivp[7]);
+	if (ctx->rx_rsc <= k->wk_keyrsc[tid]) {
+		/*
+		 * Replay violation; notify upper layer.
+		 */
+		ieee80211_notify_replay_failure(vap, wh, k, ctx->rx_rsc);
+		vap->iv_stats.is_rx_tkipreplay++;
+		return 0;
+	}
+	/*
+	 * NB: We can't update the rsc in the key until MIC is verified.
+	 *
+	 * We assume we are not preempted between doing the check above
+	 * and updating wk_keyrsc when stripping the MIC in tkip_demic.
+	 * Otherwise we might process another packet and discard it as
+	 * a replay.
+	 */
+
+	/*
+	 * Check if the device handled the decrypt in hardware.
+	 * If so we just strip the header; otherwise we need to
+	 * handle the decrypt in software.
+	 */
+	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) &&
+	    !tkip_decrypt(ctx, k, skb, hdrlen))
+		return 0;
+
+	/*
+	 * Copy up 802.11 header and strip crypto bits.
+	 */
+	memmove(skb->data + tkip.ic_header, skb->data, hdrlen);
+	skb_pull(skb, tkip.ic_header);
+	while (skb->next != NULL)
+		skb = skb->next;
+	skb_trim(skb, skb->len - tkip.ic_trailer);
+
+	return 1;
+}
+
+/*
+ * Verify and strip MIC from the frame.
+ */
+static int
+tkip_demic(struct ieee80211_key *k, struct sk_buff *skb0, int hdrlen)
+{
+	struct tkip_ctx *ctx = k->wk_private;
+	struct sk_buff *skb;
+	size_t pktlen;
+	struct ieee80211_frame *wh ;
+	u_int8_t tid;
+
+	skb = skb0;
+	pktlen = skb->len;
+	while (skb->next != NULL) {
+		skb = skb->next;
+		pktlen += skb->len;
+	}
+	wh = (struct ieee80211_frame *) skb0->data;
+	/* NB: skb left pointing at last in chain */
+	if (k->wk_flags & IEEE80211_KEY_SWMIC) {
+		struct ieee80211vap *vap = ctx->tc_vap;
+		u8 mic[IEEE80211_WEP_MICLEN];
+		u8 mic0[IEEE80211_WEP_MICLEN];
+
+		vap->iv_stats.is_crypto_tkipdemic++;
+
+		michael_mic(ctx, k->wk_rxmic, 
+			skb0, hdrlen, pktlen - (hdrlen + tkip.ic_miclen),
+			mic);
+		/* XXX assert skb->len >= tkip.ic_miclen */
+		memcpy(mic0, skb->data + skb->len - tkip.ic_miclen,
+			tkip.ic_miclen);
+		if (memcmp(mic, mic0, tkip.ic_miclen)) {
+			/* NB: 802.11 layer handles statistic and debug msg */
+			ieee80211_notify_michael_failure(vap, wh, k->wk_keyix);
+			return 0;
+		}
+	}
+	/*
+	 * Strip MIC from the tail.
+	 */
+	skb_trim(skb, skb->len - tkip.ic_miclen);
+
+	/*
+	 * Ok to update rsc now that MIC has been verified.
+	 */
+	tid = 0;
+	if (IEEE80211_QOS_HAS_SEQ(wh)) 
+		tid = ((struct ieee80211_qosframe *)wh)->i_qos[0] & IEEE80211_QOS_TID;
+	k->wk_keyrsc[tid] = ctx->rx_rsc;
+
+	return 1;
+}
+
+/*
+ * Host AP crypt: host-based TKIP encryption implementation for Host AP driver
+ *
+ * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ *
+ * Alternatively, this software may be distributed under the terms of BSD
+ * license.
+ */
+
+static const __u32 crc32_table[256] = {
+	0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
+	0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
+	0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
+	0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
+	0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
+	0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
+	0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
+	0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
+	0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
+	0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
+	0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
+	0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
+	0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
+	0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
+	0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
+	0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
+	0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
+	0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
+	0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
+	0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
+	0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
+	0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
+	0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
+	0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
+	0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
+	0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
+	0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
+	0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
+	0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
+	0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
+	0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
+	0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
+	0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
+	0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
+	0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
+	0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
+	0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
+	0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
+	0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
+	0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
+	0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
+	0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
+	0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
+	0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
+	0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
+	0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
+	0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
+	0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
+	0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
+	0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
+	0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
+	0x2d02ef8dL
+};
+
+static __inline u16
+RotR1(u16 val)
+{
+	return (val >> 1) | (val << 15);
+}
+
+static __inline u8
+Lo8(u16 val)
+{
+	return val & 0xff;
+}
+
+static __inline u8
+Hi8(u16 val)
+{
+	return val >> 8;
+}
+
+static __inline u16
+Lo16(u32 val)
+{
+	return val & 0xffff;
+}
+
+static __inline u16
+Hi16(u32 val)
+{
+	return val >> 16;
+}
+
+static __inline u16
+Mk16(u8 hi, u8 lo)
+{
+	return lo | (((u16) hi) << 8);
+}
+
+static __inline u16
+Mk16_le(const __le16 *v)
+{
+	return le16_to_cpu(*v);
+}
+
+static const u16 Sbox[256] = {
+	0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
+	0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
+	0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
+	0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
+	0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
+	0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
+	0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
+	0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
+	0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
+	0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
+	0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
+	0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
+	0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
+	0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
+	0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
+	0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
+	0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
+	0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
+	0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
+	0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
+	0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
+	0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
+	0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
+	0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
+	0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
+	0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
+	0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
+	0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
+	0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
+	0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
+	0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
+	0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
+};
+
+static __inline u16
+_S_(u16 v)
+{
+	u16 t = Sbox[Hi8(v)];
+	return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
+}
+
+#define PHASE1_LOOP_COUNT 8
+
+static void
+tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32)
+{
+	int i, j;
+
+	/* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */
+	TTAK[0] = Lo16(IV32);
+	TTAK[1] = Hi16(IV32);
+	TTAK[2] = Mk16(TA[1], TA[0]);
+	TTAK[3] = Mk16(TA[3], TA[2]);
+	TTAK[4] = Mk16(TA[5], TA[4]);
+
+	for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
+		j = 2 * (i & 1);
+		TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j]));
+		TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j]));
+		TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j]));
+		TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j]));
+		TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i;
+	}
+}
+
+#ifndef _BYTE_ORDER
+#error "Don't know native byte order"
+#endif
+
+static void
+tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK, u16 IV16)
+{
+	/* Make temporary area overlap WEP seed so that the final copy can be
+	 * avoided on little endian hosts. */
+	u16 *PPK = (u16 *) &WEPSeed[4];
+
+	/* Step 1 - make copy of TTAK and bring in TSC */
+	PPK[0] = TTAK[0];
+	PPK[1] = TTAK[1];
+	PPK[2] = TTAK[2];
+	PPK[3] = TTAK[3];
+	PPK[4] = TTAK[4];
+	PPK[5] = TTAK[4] + IV16;
+
+	/* Step 2 - 96-bit bijective mixing using S-box */
+	PPK[0] += _S_(PPK[5] ^ Mk16_le((const __le16 *) &TK[0]));
+	PPK[1] += _S_(PPK[0] ^ Mk16_le((const __le16 *) &TK[2]));
+	PPK[2] += _S_(PPK[1] ^ Mk16_le((const __le16 *) &TK[4]));
+	PPK[3] += _S_(PPK[2] ^ Mk16_le((const __le16 *) &TK[6]));
+	PPK[4] += _S_(PPK[3] ^ Mk16_le((const __le16 *) &TK[8]));
+	PPK[5] += _S_(PPK[4] ^ Mk16_le((const __le16 *) &TK[10]));
+
+	PPK[0] += RotR1(PPK[5] ^ Mk16_le((const __le16 *) &TK[12]));
+	PPK[1] += RotR1(PPK[0] ^ Mk16_le((const __le16 *) &TK[14]));
+	PPK[2] += RotR1(PPK[1]);
+	PPK[3] += RotR1(PPK[2]);
+	PPK[4] += RotR1(PPK[3]);
+	PPK[5] += RotR1(PPK[4]);
+
+	/* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value
+	 * WEPSeed[0..2] is transmitted as WEP IV */
+	WEPSeed[0] = Hi8(IV16);
+	WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
+	WEPSeed[2] = Lo8(IV16);
+	WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((const __le16 *) &TK[0])) >> 1);
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+	{
+		int i;
+		for (i = 0; i < 6; i++)
+			PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8);
+	}
+#endif
+}
+
+static void
+wep_encrypt(u8 *key, struct sk_buff *skb0, u_int off, size_t data_len)
+{
+#define S_SWAP(a,b) do { uint8_t t = S[a]; S[a] = S[b]; S[b] = t; } while(0)
+	struct sk_buff *skb = skb0;
+	uint32_t i, j, k, crc;
+	size_t buflen;
+	uint8_t S[256];
+	uint8_t *pos, *icv;
+
+	/* Setup RC4 state */
+	for (i = 0; i < 256; i++)
+		S[i] = i;
+	j = 0;
+	for (i = 0; i < 256; i++) {
+		j = (j + S[i] + key[i & 0x0f]) & 0xff;
+		S_SWAP(i, j);
+	}
+
+	/* Compute CRC32 over unencrypted data and apply RC4 to data */
+	crc = ~0;
+	i = j = 0;
+	pos = skb->data + off;
+	buflen = skb->len - off;
+	for (;;) {
+		if (buflen > data_len)
+			buflen = data_len;
+		data_len -= buflen;
+		for (k = 0; k < buflen; k++) {
+			crc = crc32_table[(crc ^ *pos) & 0xff] ^ (crc >> 8);
+			i = (i + 1) & 0xff;
+			j = (j + S[i]) & 0xff;
+			S_SWAP(i, j);
+			*pos++ ^= S[(S[i] + S[j]) & 0xff];
+		}
+		if (skb->next == NULL) {
+			KASSERT(data_len == 0,
+				("missing data, data_len %u", (int)data_len));
+			break;
+		}
+		skb = skb->next;
+		pos = skb->data;
+		buflen = skb->len;
+	}
+	crc = ~crc;
+
+	icv = skb_put(skb, tkip.ic_trailer);
+	/* Append little-endian CRC32 and encrypt it to produce ICV */
+	icv[0] = crc;
+	icv[1] = crc >> 8;
+	icv[2] = crc >> 16;
+	icv[3] = crc >> 24;
+	for (k = 0; k < IEEE80211_WEP_CRCLEN; k++) {
+		i = (i + 1) & 0xff;
+		j = (j + S[i]) & 0xff;
+		S_SWAP(i, j);
+		icv[k] ^= S[(S[i] + S[j]) & 0xff];
+	}
+#undef S_SWAP
+}
+
+static int
+wep_decrypt(u8 *key, struct sk_buff *skb, u_int off, size_t data_len)
+{
+#define S_SWAP(a,b) do { uint8_t t = S[a]; S[a] = S[b]; S[b] = t; } while(0)
+	u32 i, j, k, crc;
+	u8 S[256];
+	u8 *pos, icv[4];
+	size_t buflen;
+
+	/* Setup RC4 state */
+	for (i = 0; i < 256; i++)
+		S[i] = i;
+	j = 0;
+	for (i = 0; i < 256; i++) {
+		j = (j + S[i] + key[i & 0x0f]) & 0xff;
+		S_SWAP(i, j);
+	}
+
+	/* Apply RC4 to data and compute CRC32 over decrypted data */
+	crc = ~0;
+	i = j = 0;
+	pos = skb->data + off;
+	buflen = skb->len - off;
+	for (;;) {
+		if (buflen > data_len)
+			buflen = data_len;
+		data_len -= buflen;
+		for (k = 0; k < buflen; k++) {
+			i = (i + 1) & 0xff;
+			j = (j + S[i]) & 0xff;
+			S_SWAP(i, j);
+			*pos ^= S[(S[i] + S[j]) & 0xff];
+			crc = crc32_table[(crc ^ *pos) & 0xff] ^ (crc >> 8);
+			pos++;
+		}
+		if (skb->next == NULL) {
+			if (data_len != 0) {
+				/* XXX msg? stat? cannot happen? */
+				return -1;
+			}
+			break;
+		}
+		skb = skb->next;
+		pos = skb->data;
+		buflen = skb->len;
+	}
+	crc = ~crc;
+
+	/* Encrypt little-endian CRC32 and verify that it matches with the
+	 * received ICV */
+	icv[0] = crc;
+	icv[1] = crc >> 8;
+	icv[2] = crc >> 16;
+	icv[3] = crc >> 24;
+	for (k = 0; k < 4; k++) {
+		i = (i + 1) & 0xff;
+		j = (j + S[i]) & 0xff;
+		S_SWAP(i, j);
+		if ((icv[k] ^ S[(S[i] + S[j]) & 0xff]) != *pos++) {
+			/* ICV mismatch - drop frame */
+			return -1;
+		}
+	}
+	return 0;
+#undef S_SWAP
+}
+
+
+static __inline u32
+rotl(u32 val, int bits)
+{
+	return (val << bits) | (val >> (32 - bits));
+}
+
+
+static __inline u32
+rotr(u32 val, int bits)
+{
+	return (val >> bits) | (val << (32 - bits));
+}
+
+
+static __inline u32
+xswap(u32 val)
+{
+	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
+}
+
+
+#define michael_block(l, r)	\
+do {				\
+	r ^= rotl(l, 17);	\
+	l += r;			\
+	r ^= xswap(l);		\
+	l += r;			\
+	r ^= rotl(l, 3);	\
+	l += r;			\
+	r ^= rotr(l, 2);	\
+	l += r;			\
+} while (0)
+
+
+static __inline u32
+get_le32_split(u8 b0, u8 b1, u8 b2, u8 b3)
+{
+	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
+}
+
+static __inline u32
+get_le32(const u8 *p)
+{
+	return get_le32_split(p[0], p[1], p[2], p[3]);
+}
+
+
+static __inline void
+put_le32(u8 *p, u32 v)
+{
+	p[0] = v;
+	p[1] = v >> 8;
+	p[2] = v >> 16;
+	p[3] = v >> 24;
+}
+
+/*
+ * Craft pseudo header used to calculate the MIC.
+ */
+static void
+michael_mic_hdr(const struct ieee80211_frame *wh0, u8 hdr[16])
+{
+	const struct ieee80211_frame_addr4 *wh =
+		(const struct ieee80211_frame_addr4 *) wh0;
+
+	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+	case IEEE80211_FC1_DIR_NODS:
+		IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
+		IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, wh->i_addr2);
+		break;
+	case IEEE80211_FC1_DIR_TODS:
+		IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
+		IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, wh->i_addr2);
+		break;
+	case IEEE80211_FC1_DIR_FROMDS:
+		IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
+		IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, wh->i_addr3);
+		break;
+	case IEEE80211_FC1_DIR_DSTODS:
+		IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
+		IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, wh->i_addr4);
+		break;
+	}
+
+	if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
+		const struct ieee80211_qosframe *qwh =
+			(const struct ieee80211_qosframe *) wh;
+		hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
+	} else
+		hdr[12] = 0;
+	hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
+}
+
+static void
+michael_mic(struct tkip_ctx *ctx, const u8 *key,
+	struct sk_buff *skb, u_int off, size_t data_len,
+	u8 mic[IEEE80211_WEP_MICLEN])
+{
+	uint8_t hdr[16];
+	u32 l, r;
+	const uint8_t *data;
+	u_int space;
+
+	michael_mic_hdr((struct ieee80211_frame *) skb->data, hdr);
+
+	l = get_le32(key);
+	r = get_le32(key + 4);
+
+	/* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
+	l ^= get_le32(hdr);
+	michael_block(l, r);
+	l ^= get_le32(&hdr[4]);
+	michael_block(l, r);
+	l ^= get_le32(&hdr[8]);
+	michael_block(l, r);
+	l ^= get_le32(&hdr[12]);
+	michael_block(l, r);
+
+	/* first buffer has special handling */
+	data = skb->data + off;
+	space = skb->len - off;
+	for (;;) {
+		if (space > data_len)
+			space = data_len;
+		/* collect 32-bit blocks from current buffer */
+		while (space >= sizeof(uint32_t)) {
+			l ^= get_le32(data);
+			michael_block(l, r);
+			data += sizeof(uint32_t), space -= sizeof(uint32_t);
+			data_len -= sizeof(uint32_t);
+		}
+		if (data_len < sizeof(uint32_t))
+			break;
+		skb = skb->next;
+		if (skb == NULL) {
+			KASSERT(0, ("out of data, data_len %lu\n",
+				    (unsigned long)data_len));
+			break;
+		}
+		if (space != 0) {
+			const uint8_t *data_next;
+			/*
+			 * Block straddles buffers, split references.
+			 */
+			data_next = skb->data;
+			KASSERT(skb->len >= sizeof(uint32_t) - space,
+				("not enough data in following buffer, "
+				"skb len %u need %u\n", skb->len,
+				(int)sizeof(uint32_t) - space));
+			switch (space) {
+			case 1:
+				l ^= get_le32_split(data[0], data_next[0],
+					data_next[1], data_next[2]);
+				data = data_next + 3;
+				space = skb->len - 3;
+				break;
+			case 2:
+				l ^= get_le32_split(data[0], data[1],
+					data_next[0], data_next[1]);
+				data = data_next + 2;
+				space = skb->len - 2;
+				break;
+			case 3:
+				l ^= get_le32_split(data[0], data[1],
+					data[2], data_next[0]);
+				data = data_next + 1;
+				space = skb->len - 1;
+				break;
+			}
+			michael_block(l, r);
+			data_len -= sizeof(uint32_t);
+		} else {
+			/*
+			 * Setup for next buffer.
+			 */
+			data = skb->data;
+			space = skb->len;
+		}
+	}
+	/* Last block and padding (0x5a, 4..7 x 0) */
+	switch (data_len) {
+	case 0:
+		l ^= get_le32_split(0x5a, 0, 0, 0);
+		break;
+	case 1:
+		l ^= get_le32_split(data[0], 0x5a, 0, 0);
+		break;
+	case 2:
+		l ^= get_le32_split(data[0], data[1], 0x5a, 0);
+		break;
+	case 3:
+		l ^= get_le32_split(data[0], data[1], data[2], 0x5a);
+		break;
+	}
+	michael_block(l, r);
+	/* l ^= 0; */
+	michael_block(l, r);
+
+	put_le32(mic, l);
+	put_le32(mic + 4, r);
+}
+
+static int
+tkip_encrypt(struct tkip_ctx *ctx, struct ieee80211_key *key,
+	struct sk_buff *skb0, int hdrlen)
+{
+	struct ieee80211_frame *wh = (struct ieee80211_frame *) skb0->data;
+	struct ieee80211vap *vap = ctx->tc_vap;
+	struct sk_buff *skb;
+	size_t pktlen;
+
+	vap->iv_stats.is_crypto_tkip++;
+
+	skb = skb0;
+	pktlen = skb->len;
+	while (skb->next != NULL) {
+		skb = skb->next;
+		pktlen += skb->len;
+	}
+	if (skb_tailroom(skb) < tkip.ic_trailer) {
+		/* NB: should not happen */
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO,
+			wh->i_addr1, "No room for TKIP CRC, tailroom %u",
+			skb_tailroom(skb));
+		/* XXX statistic */
+		return 0;
+	}
+
+	if (!ctx->tx_phase1_done) {
+		tkip_mixing_phase1(ctx->tx_ttak, key->wk_key, wh->i_addr2,
+			(u32)(key->wk_keytsc >> 16));
+		ctx->tx_phase1_done = 1;
+	}
+	tkip_mixing_phase2(ctx->tx_rc4key, key->wk_key, ctx->tx_ttak,
+		(u16) key->wk_keytsc);
+
+	wep_encrypt(ctx->tx_rc4key,
+		skb0, hdrlen + tkip.ic_header,
+		pktlen - (hdrlen + tkip.ic_header));
+
+	key->wk_keytsc++;
+	if ((u16)(key->wk_keytsc) == 0)
+		ctx->tx_phase1_done = 0;
+	return 1;
+}
+
+static int
+tkip_decrypt(struct tkip_ctx *ctx, struct ieee80211_key *key,
+	struct sk_buff *skb0, int hdrlen)
+{
+	struct ieee80211_frame *wh = (struct ieee80211_frame *) skb0->data;
+	struct ieee80211vap *vap = ctx->tc_vap;
+	struct sk_buff *skb;
+	size_t pktlen;
+	u32 iv32;
+	u16 iv16;
+	u_int8_t tid;
+
+	vap->iv_stats.is_crypto_tkip++;
+
+	skb = skb0;
+	pktlen = skb->len;
+	while (skb->next != NULL) {
+		skb = skb->next;
+		pktlen += skb->len;
+	}
+	/* NB: tkip_decap already verified header and left seq in rx_rsc */
+	iv16 = (u16) ctx->rx_rsc;
+	iv32 = (u32) (ctx->rx_rsc >> 16);
+
+	wh = (struct ieee80211_frame *) skb0->data;
+	tid = 0;
+	if (IEEE80211_QOS_HAS_SEQ(wh)) 
+		tid = ((struct ieee80211_qosframe *)wh)->i_qos[0] & IEEE80211_QOS_TID;
+	if (iv32 != (u32)(key->wk_keyrsc[tid] >> 16) || !ctx->rx_phase1_done) {
+		tkip_mixing_phase1(ctx->rx_ttak, key->wk_key,
+			wh->i_addr2, iv32);
+		ctx->rx_phase1_done = 1;
+	}
+	tkip_mixing_phase2(ctx->rx_rc4key, key->wk_key, ctx->rx_ttak, iv16);
+
+	/* NB: skb is unstripped; deduct headers + ICV to get payload */
+	if (wep_decrypt(ctx->rx_rc4key,
+	    skb0, hdrlen + tkip.ic_header,
+	    pktlen - (hdrlen + tkip.ic_header + tkip.ic_trailer))) {
+		if (iv32 != (u32)(key->wk_keyrsc[tid] >> 16)) {
+			/* Previously cached Phase1 result was already lost, so
+			 * it needs to be recalculated for the next packet. */
+			ctx->rx_phase1_done = 0;
+		}
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+			"[%s] TKIP ICV mismatch on decrypt (keyix %d, rsc %llu)\n",
+			ether_sprintf(wh->i_addr2), key->wk_keyix, ctx->rx_rsc);
+		vap->iv_stats.is_rx_tkipicv++;
+		return 0;
+	}
+	return 1;
+}
+
+/*
+ * Module glue.
+ */
+MODULE_AUTHOR("Errno Consulting, Sam Leffler");
+MODULE_DESCRIPTION("802.11 wireless support: TKIP cipher");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual BSD/GPL");
+#endif
+
+static int __init
+init_crypto_tkip(void)
+{
+	ieee80211_crypto_register(&tkip);
+	return 0;
+}
+module_init(init_crypto_tkip);
+
+static void __exit
+exit_crypto_tkip(void)
+{
+	ieee80211_crypto_unregister(&tkip);
+}
+module_exit(exit_crypto_tkip);
diff --git a/drivers/qtn/wlan/ieee80211_crypto_wep.c b/drivers/qtn/wlan/ieee80211_crypto_wep.c
new file mode 100644
index 0000000..9cbec26
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_crypto_wep.c
@@ -0,0 +1,519 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_crypto_wep.c 1721 2006-09-20 08:45:13Z mentor $
+ */
+
+/*
+ * IEEE 802.11 WEP crypto support.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/init.h>
+
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+
+static void *wep_attach(struct ieee80211vap *, struct ieee80211_key *);
+static void wep_detach(struct ieee80211_key *);
+static int wep_setkey(struct ieee80211_key *);
+static int wep_encap(struct ieee80211_key *, struct sk_buff *, u_int8_t);
+static int wep_decap(struct ieee80211_key *, struct sk_buff *, int);
+static int wep_enmic(struct ieee80211_key *, struct sk_buff *, int);
+static int wep_demic(struct ieee80211_key *, struct sk_buff *, int);
+
+static const struct ieee80211_cipher wep = {
+	.ic_name	= "WEP",
+	.ic_cipher	= IEEE80211_CIPHER_WEP,
+	.ic_header	= IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
+	.ic_trailer	= IEEE80211_WEP_CRCLEN,
+	.ic_miclen	= 0,
+	.ic_attach	= wep_attach,
+	.ic_detach	= wep_detach,
+	.ic_setkey	= wep_setkey,
+	.ic_encap	= wep_encap,
+	.ic_decap	= wep_decap,
+	.ic_enmic	= wep_enmic,
+	.ic_demic	= wep_demic,
+};
+
+static int wep_encrypt(struct ieee80211_key *, struct sk_buff *, int);
+static int wep_decrypt(struct ieee80211_key *, struct sk_buff *, int);
+
+struct wep_ctx {
+	struct ieee80211vap *wc_vap;	/* for diagnostics + statistics */
+	struct ieee80211com *wc_ic;	/* for diagnostics */
+	u_int32_t wc_iv;			/* initial vector for crypto */
+};
+
+static void *
+wep_attach(struct ieee80211vap *vap, struct ieee80211_key *k)
+{
+	struct wep_ctx *ctx;
+
+	_MOD_INC_USE(THIS_MODULE, return NULL);
+
+	MALLOC(ctx, struct wep_ctx *, sizeof(struct wep_ctx),
+		M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (ctx == NULL) {
+		vap->iv_stats.is_crypto_nomem++;
+		_MOD_DEC_USE(THIS_MODULE);
+		return NULL;
+	}
+
+	ctx->wc_vap = vap;
+	ctx->wc_ic = vap->iv_ic;
+	get_random_bytes(&ctx->wc_iv, sizeof(ctx->wc_iv));
+	return ctx;
+}
+
+static void
+wep_detach(struct ieee80211_key *k)
+{
+	struct wep_ctx *ctx = k->wk_private;
+
+	FREE(ctx, M_DEVBUF);
+
+	_MOD_DEC_USE(THIS_MODULE);
+}
+
+static int
+wep_setkey(struct ieee80211_key *k)
+{
+	return k->wk_keylen >= 40 / NBBY;
+}
+
+#ifndef _BYTE_ORDER
+#error "Don't know native byte order"
+#endif
+
+/*
+ * Add privacy headers appropriate for the specified key.
+ */
+static int
+wep_encap(struct ieee80211_key *k, struct sk_buff *skb, u_int8_t keyid)
+{
+	struct wep_ctx *ctx = k->wk_private;
+	struct ieee80211com *ic = ctx->wc_ic;
+	u_int32_t iv;
+	u_int8_t *ivp;
+	int hdrlen;
+
+	hdrlen = ieee80211_hdrspace(ic, skb->data);
+
+	/*
+	 * Copy down 802.11 header and add the IV + KeyID.
+	 */
+	ivp = skb_push(skb, wep.ic_header);
+	memmove(ivp, ivp + wep.ic_header, hdrlen);
+	ivp += hdrlen;
+
+	/*
+	 * XXX
+	 * IV must not duplicate during the lifetime of the key.
+	 * But no mechanism to renew keys is defined in IEEE 802.11
+	 * for WEP.  And the IV may be duplicated at other stations
+	 * because the session key itself is shared.  So we use a
+	 * pseudo random IV for now, though it is not the right way.
+	 *
+	 * NB: Rather than use a strictly random IV we select a
+	 * random one to start and then increment the value for
+	 * each frame.  This is an explicit tradeoff between
+	 * overhead and security.  Given the basic insecurity of
+	 * WEP this seems worthwhile.
+	 */
+
+	/*
+	 * Skip 'bad' IVs from Fluhrer/Mantin/Shamir:
+	 * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255
+	 */
+	iv = ctx->wc_iv;
+	if ((iv & 0xff00) == 0xff00) {
+		int B = (iv & 0xff0000) >> 16;
+		if (3 <= B && B < 16)
+			iv += 0x0100;
+	}
+	ctx->wc_iv = iv + 1;
+
+	/*
+	 * NB: Preserve byte order of IV for packet
+	 *     sniffers; it doesn't matter otherwise.
+	 */
+#if _BYTE_ORDER == _BIG_ENDIAN
+	ivp[0] = iv >> 0;
+	ivp[1] = iv >> 8;
+	ivp[2] = iv >> 16;
+#else
+	ivp[2] = iv >> 0;
+	ivp[1] = iv >> 8;
+	ivp[0] = iv >> 16;
+#endif
+	ivp[3] = keyid;
+
+	/*
+	 * Finally, do software encrypt if neeed.
+	 */
+	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) &&
+	    !wep_encrypt(k, skb, hdrlen))
+		return 0;
+
+	return 1;
+}
+
+/*
+ * Add MIC to the frame as needed.
+ */
+static int
+wep_enmic(struct ieee80211_key *k, struct sk_buff *skb, int force)
+{
+	return 1;
+}
+
+/*
+ * Validate and strip privacy headers (and trailer) for a
+ * received frame.  If necessary, decrypt the frame using
+ * the specified key.
+ */
+static int
+wep_decap(struct ieee80211_key *k, struct sk_buff *skb, int hdrlen)
+{
+	struct wep_ctx *ctx = k->wk_private;
+	struct ieee80211vap *vap = ctx->wc_vap;
+	struct ieee80211_frame *wh;
+
+	wh = (struct ieee80211_frame *)skb->data;
+
+	/*
+	 * Check if the device handled the decrypt in hardware.
+	 * If so we just strip the header; otherwise we need to
+	 * handle the decrypt in software.
+	 */
+	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) &&
+	    !wep_decrypt(k, skb, hdrlen)) {
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+			"%s", "WEP ICV mismatch on decrypt");
+		vap->iv_stats.is_rx_wepfail++;
+		return 0;
+	}
+
+	/*
+	 * Copy up 802.11 header and strip crypto bits.
+	 */
+	memmove(skb->data + wep.ic_header, skb->data, hdrlen);
+	skb_pull(skb, wep.ic_header);
+	skb_trim(skb, skb->len - wep.ic_trailer);
+
+	return 1;
+}
+
+/*
+ * Verify and strip MIC from the frame.
+ */
+static int
+wep_demic(struct ieee80211_key *k, struct sk_buff *skb, int hdrlen)
+{
+	return 1;
+}
+
+static const uint32_t crc32_table[256] = {
+	0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
+	0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
+	0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
+	0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
+	0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
+	0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
+	0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
+	0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
+	0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
+	0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
+	0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
+	0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
+	0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
+	0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
+	0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
+	0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
+	0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
+	0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
+	0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
+	0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
+	0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
+	0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
+	0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
+	0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
+	0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
+	0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
+	0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
+	0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
+	0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
+	0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
+	0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
+	0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
+	0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
+	0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
+	0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
+	0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
+	0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
+	0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
+	0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
+	0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
+	0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
+	0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
+	0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
+	0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
+	0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
+	0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
+	0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
+	0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
+	0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
+	0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
+	0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
+	0x2d02ef8dL
+};
+
+static int
+wep_encrypt(struct ieee80211_key *key, struct sk_buff *skb0, int hdrlen)
+{
+#define S_SWAP(a,b) do { uint8_t t = S[a]; S[a] = S[b]; S[b] = t; } while(0)
+	struct wep_ctx *ctx = key->wk_private;
+	struct ieee80211vap *vap = ctx->wc_vap;
+	struct sk_buff *skb = skb0;
+	u_int8_t rc4key[IEEE80211_WEP_IVLEN + IEEE80211_KEYBUF_SIZE];
+	uint8_t *icv;
+	uint32_t i, j, k, crc;
+	size_t buflen, data_len;
+	uint8_t S[256];
+	uint8_t *pos;
+	u_int off, keylen;
+
+	vap->iv_stats.is_crypto_wep++;
+
+	/* NB: this assumes the header was pulled up */
+	memcpy(rc4key, skb->data + hdrlen, IEEE80211_WEP_IVLEN);
+	memcpy(rc4key + IEEE80211_WEP_IVLEN, key->wk_key, key->wk_keylen);
+
+	/* Setup RC4 state */
+	for (i = 0; i < 256; i++)
+		S[i] = i;
+	j = 0;
+	keylen = key->wk_keylen + IEEE80211_WEP_IVLEN;
+	for (i = 0; i < 256; i++) {
+		j = (j + S[i] + rc4key[i % keylen]) & 0xff;
+		S_SWAP(i, j);
+	}
+
+	off = hdrlen + wep.ic_header;
+	data_len = skb->len - off;
+
+	/* Compute CRC32 over unencrypted data and apply RC4 to data */
+	crc = ~0;
+	i = j = 0;
+	pos = skb->data + off;
+	buflen = skb->len - off;
+	for (;;) {
+		if (buflen > data_len)
+			buflen = data_len;
+		data_len -= buflen;
+		for (k = 0; k < buflen; k++) {
+			crc = crc32_table[(crc ^ *pos) & 0xff] ^ (crc >> 8);
+			i = (i + 1) & 0xff;
+			j = (j + S[i]) & 0xff;
+			S_SWAP(i, j);
+			*pos++ ^= S[(S[i] + S[j]) & 0xff];
+		}
+		if (skb->next == NULL) {
+			if (data_len != 0) {		/* out of data */
+#ifdef IEEE80211_DEBUG
+				const struct ieee80211_frame *wh =
+				    (const struct ieee80211_frame *) skb0->data;
+#endif
+				IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO,
+					wh->i_addr2,
+					"out of data for WEP (data_len %lu)",
+					(unsigned long) data_len);
+				return 0;
+			}
+			break;
+		}
+		skb = skb->next;
+		pos = skb->data;
+		buflen = skb->len;
+	}
+	crc = ~crc;
+
+	if (skb_tailroom(skb) < wep.ic_trailer) {
+#ifdef IEEE80211_DEBUG
+		const struct ieee80211_frame *wh =
+			(const struct ieee80211_frame *) skb0->data;
+#endif
+		/* NB: should not happen */
+		IEEE80211_NOTE_MAC(ctx->wc_vap, IEEE80211_MSG_CRYPTO,
+			wh->i_addr1, "no room for %s ICV, tailroom %u",
+			wep.ic_name, skb_tailroom(skb));
+		/* XXX statistic */
+		return 0;
+	}
+	/* Append little-endian CRC32 and encrypt it to produce ICV */
+	icv = skb_put(skb, IEEE80211_WEP_CRCLEN);
+	icv[0] = crc;
+	icv[1] = crc >> 8;
+	icv[2] = crc >> 16;
+	icv[3] = crc >> 24;
+	for (k = 0; k < IEEE80211_WEP_CRCLEN; k++) {
+		i = (i + 1) & 0xff;
+		j = (j + S[i]) & 0xff;
+		S_SWAP(i, j);
+		icv[k] ^= S[(S[i] + S[j]) & 0xff];
+	}
+	return 1;
+#undef S_SWAP
+}
+
+static int
+wep_decrypt(struct ieee80211_key *key, struct sk_buff *skb0, int hdrlen)
+{
+#define S_SWAP(a,b) do { uint8_t t = S[a]; S[a] = S[b]; S[b] = t; } while(0)
+	struct wep_ctx *ctx = key->wk_private;
+	struct ieee80211vap *vap = ctx->wc_vap;
+	struct sk_buff *skb = skb0;
+	u_int8_t rc4key[IEEE80211_WEP_IVLEN + IEEE80211_KEYBUF_SIZE];
+	uint8_t icv[IEEE80211_WEP_CRCLEN];
+	uint32_t i, j, k, crc;
+	size_t buflen, data_len;
+	uint8_t S[256];
+	uint8_t *pos;
+	u_int off, keylen;
+
+	vap->iv_stats.is_crypto_wep++;
+
+	/* NB: this assumes the header was pulled up */
+	memcpy(rc4key, skb->data + hdrlen, IEEE80211_WEP_IVLEN);
+	memcpy(rc4key + IEEE80211_WEP_IVLEN, key->wk_key, key->wk_keylen);
+
+	/* Setup RC4 state */
+	for (i = 0; i < 256; i++)
+		S[i] = i;
+	j = 0;
+	keylen = key->wk_keylen + IEEE80211_WEP_IVLEN;
+	for (i = 0; i < 256; i++) {
+		j = (j + S[i] + rc4key[i % keylen]) & 0xff;
+		S_SWAP(i, j);
+	}
+
+	off = hdrlen + wep.ic_header;
+	data_len = skb->len - (off + wep.ic_trailer),
+
+	/* Compute CRC32 over unencrypted data and apply RC4 to data */
+	crc = ~0;
+	i = j = 0;
+	pos = skb->data + off;
+	buflen = skb->len - off;
+	for (;;) {
+		if (buflen > data_len)
+			buflen = data_len;
+		data_len -= buflen;
+		for (k = 0; k < buflen; k++) {
+			i = (i + 1) & 0xff;
+			j = (j + S[i]) & 0xff;
+			S_SWAP(i, j);
+			*pos ^= S[(S[i] + S[j]) & 0xff];
+			crc = crc32_table[(crc ^ *pos) & 0xff] ^ (crc >> 8);
+			pos++;
+		}
+		skb = skb->next;
+		if (skb == NULL) {
+			if (data_len != 0) {		/* out of data */
+#ifdef IEEE80211_DEBUG
+				const struct ieee80211_frame *wh =
+					(const struct ieee80211_frame *) skb0->data;
+#endif
+				IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO,
+					wh->i_addr2,
+					"out of data for WEP (data_len %lu)",
+					(unsigned long) data_len);
+				return 0;
+			}
+			break;
+		}
+		pos = skb->data;
+		buflen = skb->len;
+	}
+	crc = ~crc;
+
+	/* Encrypt little-endian CRC32 and verify that it matches with
+	 * received ICV */
+	icv[0] = crc;
+	icv[1] = crc >> 8;
+	icv[2] = crc >> 16;
+	icv[3] = crc >> 24;
+	for (k = 0; k < IEEE80211_WEP_CRCLEN; k++) {
+		i = (i + 1) & 0xff;
+		j = (j + S[i]) & 0xff;
+		S_SWAP(i, j);
+		/* XXX assumes ICV is contiguous in sk_buf */
+		if ((icv[k] ^ S[(S[i] + S[j]) & 0xff]) != *pos++) {
+			/* ICV mismatch - drop frame */
+			return 0;
+		}
+	}
+	return 1;
+#undef S_SWAP
+}
+
+/*
+ * Module glue.
+ */
+
+MODULE_AUTHOR("Errno Consulting, Sam Leffler");
+MODULE_DESCRIPTION("802.11 wireless support: WEP cipher");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual BSD/GPL");
+#endif
+
+static int __init
+init_crypto_wep(void)
+{
+	ieee80211_crypto_register(&wep);
+	return 0;
+}
+module_init(init_crypto_wep);
+
+static void __exit
+exit_crypto_wep(void)
+{
+	ieee80211_crypto_unregister(&wep);
+}
+module_exit(exit_crypto_wep);
diff --git a/drivers/qtn/wlan/ieee80211_input.c b/drivers/qtn/wlan/ieee80211_input.c
new file mode 100644
index 0000000..250c5d8
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_input.c
@@ -0,0 +1,12471 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_input.c 2610 2007-07-25 15:26:38Z mrenzmann $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 input handling.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/if_vlan.h>
+#include <net/iw_handler.h> /* wireless_send_event(..) */
+#include <linux/wireless.h> /* SIOCGIWTHRSPY */
+#include <linux/if_arp.h> /* ARPHRD_ETHER */
+#include <linux/jiffies.h>
+
+#include "net80211/if_llc.h"
+#include "net80211/if_ethersubr.h"
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_linux.h"
+#include "net80211/ieee80211_dot11_msg.h"
+#include "net80211/ieee80211_tpc.h"
+#include "net80211/ieee80211_tdls.h"
+#include "net80211/ieee80211_mlme_statistics.h"
+
+#include "qtn/wlan_ioctl.h"
+
+#include "qtn/qtn_global.h"
+#include "qtn_logging.h"
+
+#include <qdrv/qdrv_debug.h>
+#include <qtn/shared_params.h>
+#include <qtn/hardware_revision.h>
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+#include <linux/if_bridge.h>
+#include <linux/net/bridge/br_public.h>
+#endif
+
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+#include "net80211/ieee80211_bsa.h"
+#endif
+
+extern u_int16_t ht_rate_table_20MHz_800[];
+extern u_int16_t ht_rate_table_40MHz_800[];
+
+#ifdef IEEE80211_DEBUG
+/*
+ * Decide if a received management frame should be
+ * printed when debugging is enabled.  This filters some
+ * of the less interesting frames that come frequently
+ * (e.g. beacons).
+ */
+static __inline int
+doprint(struct ieee80211vap *vap, int subtype)
+{
+	switch (subtype) {
+	case IEEE80211_FC0_SUBTYPE_BEACON:
+		return (vap->iv_ic->ic_flags & IEEE80211_F_SCAN);
+	case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
+		return (vap->iv_opmode == IEEE80211_M_IBSS);
+	}
+	return 1;
+}
+
+/*
+ * Emit a debug message about discarding a frame or information
+ * element.  One format is for extracting the mac address from
+ * the frame header; the other is for when a header is not
+ * available or otherwise appropriate.
+ */
+#define	IEEE80211_DISCARD(_vap, _m, _wh, _type, _fmt, ...) do {		\
+	if ((_vap)->iv_debug & (_m))					\
+		ieee80211_discard_frame(_vap, _wh, _type, _fmt, __VA_ARGS__);\
+} while (0)
+#define	IEEE80211_DISCARD_IE(_vap, _m, _wh, _type, _fmt, ...) do {	\
+	if ((_vap)->iv_debug & (_m))					\
+		ieee80211_discard_ie(_vap, _wh, _type, _fmt, __VA_ARGS__);\
+} while (0)
+#define	IEEE80211_DISCARD_MAC(_vap, _m, _mac, _type, _fmt, ...) do {	\
+	if ((_vap)->iv_debug & (_m))					\
+		ieee80211_discard_mac(_vap, _mac, _type, _fmt, __VA_ARGS__);\
+} while (0)
+
+static const u_int8_t *ieee80211_getbssid(struct ieee80211vap *,
+	const struct ieee80211_frame *);
+static void ieee80211_discard_frame(struct ieee80211vap *,
+	const struct ieee80211_frame *, const char *, const char *, ...);
+static void ieee80211_discard_ie(struct ieee80211vap *,
+	const struct ieee80211_frame *, const char *, const char *, ...);
+static void ieee80211_discard_mac(struct ieee80211vap *,
+	const u_int8_t mac[IEEE80211_ADDR_LEN], const char *,
+	const char *, ...);
+#else
+#define	IEEE80211_DISCARD(_vap, _m, _wh, _type, _fmt, ...)
+#define	IEEE80211_DISCARD_IE(_vap, _m, _wh, _type, _fmt, ...)
+#define	IEEE80211_DISCARD_MAC(_vap, _m, _mac, _type, _fmt, ...)
+#endif /* IEEE80211_DEBUG */
+
+static struct sk_buff *ieee80211_defrag(struct ieee80211_node *,
+	struct sk_buff *, int);
+static void ieee80211_deliver_data(struct ieee80211_node *, struct sk_buff *);
+static struct sk_buff *ieee80211_decap(struct ieee80211vap *,
+	struct sk_buff *, int);
+static void ieee80211_send_error(struct ieee80211_node *, const u_int8_t *,
+	int, int);
+static void ieee80211_recv_pspoll(struct ieee80211_node *, struct sk_buff *);
+static int accept_data_frame(struct ieee80211vap *, struct ieee80211_node *,
+	struct ieee80211_key *, struct sk_buff *, struct ether_header *);
+static void forward_mgmt_to_app(struct ieee80211vap *vap, int subtype, struct sk_buff *skb,
+	struct ieee80211_frame *wh);
+static void forward_mgmt_to_app_for_further_processing(struct ieee80211vap *vap,
+	int subtype, struct sk_buff *skb, struct ieee80211_frame *wh);
+#ifdef USE_HEADERLEN_RESV
+static __be16 ath_eth_type_trans(struct sk_buff *, struct net_device *);
+#endif
+
+static void ieee80211_recv_action_tdls(struct ieee80211_node *ni, struct sk_buff *skb,
+	struct ieee80211_action *ia, int ieee80211_header, int rssi);
+
+static void ieee80211_recv_action_vht(struct ieee80211_node *ni,
+				      struct ieee80211_action *ia,
+				      int subtype,
+				      struct ieee80211_frame *wh,
+				      u_int8_t *frm,
+				      u_int8_t *efrm);
+static void ieee80211_recv_action_wnm(struct ieee80211_node *ni,
+				      struct ieee80211_action *ia,
+				      int subtype,
+				      struct ieee80211_frame *wh,
+				      u_int8_t *efrm);
+
+/**
+ * Given a node and the RSSI value of a just received frame from the node, this
+ * function checks if to raise an iwspy event because we iwspy the node and RSSI
+ * exceeds threshold (if active).
+ *
+ * @param vap: VAP
+ * @param ni: sender node
+ * @param rssi: RSSI value of received frame
+ */
+static void
+iwspy_event(struct ieee80211vap *vap, struct ieee80211_node *ni, u_int rssi)
+{
+	if (vap->iv_spy.thr_low && vap->iv_spy.num && ni && (rssi <
+		vap->iv_spy.thr_low || rssi > vap->iv_spy.thr_high)) {
+		int i;
+		for (i = 0; i < vap->iv_spy.num; i++) {
+			if (IEEE80211_ADDR_EQ(ni->ni_macaddr,
+				&(vap->iv_spy.mac[i * IEEE80211_ADDR_LEN]))) {
+
+				union iwreq_data wrq;
+				struct iw_thrspy thr;
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,
+					"%s: we spy %s, threshold is active "
+					"and rssi exceeds it -> raise an iwspy"
+					" event\n", __func__, ether_sprintf(
+					 ni->ni_macaddr));
+				memset(&wrq, 0, sizeof(wrq));
+				wrq.data.length = 1;
+				memset(&thr, 0, sizeof(struct iw_thrspy));
+				memcpy(thr.addr.sa_data, ni->ni_macaddr,
+					IEEE80211_ADDR_LEN);
+				thr.addr.sa_family = ARPHRD_ETHER;
+				set_quality(&thr.qual, rssi, vap->iv_ic->ic_channoise);
+				set_quality(&thr.low, vap->iv_spy.thr_low, vap->iv_ic->ic_channoise);
+				set_quality(&thr.high, vap->iv_spy.thr_high, vap->iv_ic->ic_channoise);
+				wireless_send_event(vap->iv_dev,
+					SIOCGIWTHRSPY, &wrq, (char*) &thr);
+				break;
+			}
+		}
+	}
+}
+
+static inline int
+ieee80211_tdls_status_mismatch(struct ieee80211_node *ni)
+{
+	if (IEEE80211_NODE_IS_TDLS_INACTIVE(ni) ||
+			IEEE80211_NODE_IS_TDLS_IDLE(ni))
+		return 1;
+
+	return 0;
+}
+
+int ieee80211_tdls_tqe_path_check(struct ieee80211_node *ni,
+	struct sk_buff *skb, int rssi, uint16_t ether_type)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_action *ia;
+	uint8_t *payload_type;
+	struct ether_header *eh = (struct ether_header *) skb->data;
+
+	if (ether_type == __constant_htons(ETHERTYPE_80211MGT)) {
+		payload_type = (uint8_t*)(eh + 1);
+		if ( *payload_type == IEEE80211_SNAP_TYPE_TDLS) {
+			if (vap->iv_opmode == IEEE80211_M_STA) {
+				IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+						"TDLS %s: got 802.11 management over data, type=%u ptr=%p (%p)\n",
+						__func__, *payload_type, payload_type, eh);
+				ia = (struct ieee80211_action *)(payload_type + 1);
+				ieee80211_recv_action_tdls(ni, skb, ia, 0, rssi);
+			}
+
+			if (vap->iv_opmode == IEEE80211_M_HOSTAP && (vap->hs20_enable || g_l2_ext_filter)) {
+				IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+						"%s Dropping TDLS frame due to HS2.0 enabled\n", __func__);
+				return 1;
+			}
+		} else {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: unsupported type %u\n",
+				__func__, *payload_type);
+			vap->iv_stats.is_rx_mgtdiscard++;
+		}
+	} else if (ieee80211_tdls_status_mismatch(ni)) {
+		enum ieee80211_tdls_operation operation;
+
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: data not allowed before tdls link is ready, peer status: %u\n",
+				__func__, ni->tdls_status);
+		vap->iv_stats.is_rx_tdls_stsmismatch++;
+		operation = IEEE80211_TDLS_TEARDOWN;
+		ieee80211_tdls_send_event(ni, IEEE80211_EVENT_TDLS, &operation);
+
+		return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_tdls_tqe_path_check);
+
+static int ieee80211_action_frame_check(struct ieee80211vap *vap,
+	struct sk_buff *skb, struct llc *llc, int min_len)
+{
+	int ret = 0;
+	if ((vap->iv_opmode == IEEE80211_M_STA) &&
+		(skb->len >= min_len) &&
+		(llc->llc_dsap == LLC_SNAP_LSAP) &&
+		(llc->llc_ssap == LLC_SNAP_LSAP) &&
+		(llc->llc_control == LLC_UI) &&
+		(llc->llc_snap.org_code[0] == 0) &&
+		(llc->llc_snap.org_code[1] == 0) &&
+		(llc->llc_snap.org_code[2] == 0) &&
+		(llc->llc_un.type_snap.ether_type ==
+			htons(ETHERTYPE_80211MGT))) {
+			ret = 1;
+
+	}
+	return ret;
+}
+
+static void ieee80211_tdls_mailbox_path_check(struct ieee80211_node *ni,
+	struct sk_buff *skb, struct llc *llc, int rssi, int min_len)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_action *ia;
+	uint8_t *payload_type;
+
+	min_len += sizeof(*payload_type) + sizeof(*ia);
+	if (ieee80211_action_frame_check(vap, skb, llc, min_len)) {
+		if (unlikely(ieee80211_msg(vap, IEEE80211_MSG_TDLS) &&
+				ieee80211_tdls_msg(vap, IEEE80211_TDLS_MSG_DBG))) {
+			ieee80211_dump_pkt(vap->iv_ic, skb->data, min_len, -1, rssi);
+		}
+		payload_type = (u_int8_t *)llc + LLC_SNAPFRAMELEN;
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: got 802.11 management over data, type=%u ptr=%p (%p)\n",
+			__func__, *payload_type, payload_type, llc);
+
+		if (*payload_type == IEEE80211_SNAP_TYPE_TDLS) {
+			ia = (struct ieee80211_action *)(payload_type + 1);
+			ieee80211_recv_action_tdls(ni, skb, ia, 1, rssi);
+		} else {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: unsupported type %u\n",
+				__func__, *payload_type);
+			vap->iv_stats.is_rx_mgtdiscard++;
+		}
+	} else if (ieee80211_tdls_status_mismatch(ni)) {
+		enum ieee80211_tdls_operation operation;
+
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: data not allowed before tdls link is ready, peer status: %u\n",
+				__func__, ni->tdls_status);
+		vap->iv_stats.is_rx_tdls_stsmismatch++;
+		operation = IEEE80211_TDLS_TEARDOWN;
+		ieee80211_tdls_send_event(ni, IEEE80211_EVENT_TDLS, &operation);
+	}
+}
+
+static int
+ieee80211_is_tdls_disc_resp(struct sk_buff *skb, int hdrlen)
+{
+	struct ieee80211_action *ia = (struct ieee80211_action *)(skb->data + hdrlen);
+
+	if (skb->len < (hdrlen + sizeof(struct ieee80211_action)))
+		return 0;
+
+	if ((ia->ia_category == IEEE80211_ACTION_CAT_PUBLIC) &&
+			(ia->ia_action == IEEE80211_ACTION_PUB_TDLS_DISC_RESP))
+		return 1;
+	else
+		return 0;
+}
+
+
+static int
+ieee80211_is_tdls_action_frame(struct sk_buff *skb, int hdrlen)
+{
+	static const uint8_t snap_e_header_pref[] = {LLC_SNAP_LSAP, LLC_SNAP_LSAP, LLC_UI, 0x00, 0x00};
+	uint8_t *data = &skb->data[hdrlen];
+	uint16_t ether_type = get_unaligned((uint16_t*)&data[6]);
+	int32_t snap_encap_pref = !memcmp(data, snap_e_header_pref, sizeof(snap_e_header_pref));
+
+	return (snap_encap_pref && (ether_type == htons(ETHERTYPE_80211MGT)));
+}
+
+static __inline int
+ieee80211_tdls_frame_should_accept(struct sk_buff *skb, int type, int hdrlen)
+{
+	return (type == IEEE80211_FC0_TYPE_DATA && ieee80211_is_tdls_action_frame(skb, hdrlen)) ||
+			(type == IEEE80211_FC0_TYPE_MGT && ieee80211_is_tdls_disc_resp(skb, hdrlen));
+}
+
+static int ieee80211_input_should_drop(struct ieee80211_node *ni, uint8_t *bssid,
+					struct ieee80211_frame *wh, uint8_t type,
+					uint8_t subtype, struct sk_buff *skb)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint8_t dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+
+	if (dir == IEEE80211_FC1_DIR_DSTODS)
+		return 0;
+
+#ifdef QTN_BG_SCAN
+	if ((ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) &&
+			(type == IEEE80211_FC0_TYPE_MGT) &&
+			(subtype == IEEE80211_FC0_SUBTYPE_BEACON ||
+				subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) {
+		return 0;
+	}
+#endif
+
+	if (ic->ic_flags_qtn & IEEE80211_QTN_MONITOR)
+		return 0;
+	if ((type != IEEE80211_FC0_TYPE_CTL) && vap->tdls_over_qhop_en
+			&& ieee80211_tdls_frame_should_accept(skb, type, ieee80211_hdrspace(ic, wh)))
+	      return 0;
+
+	/* PS-POLL frame in State 1 */
+	if (IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_myaddr) &&
+			(subtype == IEEE80211_FC0_SUBTYPE_PS_POLL)) {
+		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+			bssid, NULL, "%s", "ps-poll in unauth state");
+
+		vap->iv_stats.is_rx_ps_unauth++;
+
+		ieee80211_send_error(ni, wh->i_addr2,
+				IEEE80211_FC0_SUBTYPE_DEAUTH,
+				IEEE80211_REASON_NOT_AUTHED);
+		return 1;
+	}
+
+	/* Packet from unknown source - send deauth. */
+	if (ni == vap->iv_bss && !ieee80211_is_bcst(wh->i_addr1)) {
+		if (type == IEEE80211_FC0_TYPE_MGT && subtype == IEEE80211_FC0_SUBTYPE_DEAUTH) {
+			/*
+			 * Corner case
+			 * AP may have changed mode to STA but we are still unconscious.
+			 * If Deauthentication frames from AP are dropped here, we have no chance
+			 * to disconnect with AP.
+			 */
+			if (IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_bssid))
+				return 0;
+		}
+
+		/*
+		 * intended for Repeater AP but slip into STA interface
+		 * sliently discard
+		 */
+		if (vap->iv_opmode != IEEE80211_M_STA ||
+				type != IEEE80211_FC0_TYPE_MGT ||
+				(subtype != IEEE80211_FC0_SUBTYPE_PROBE_REQ &&
+#if defined(PLATFORM_QFDR)
+				subtype != IEEE80211_FC0_SUBTYPE_AUTH &&
+				subtype != IEEE80211_FC0_SUBTYPE_DEAUTH &&
+#endif
+				subtype != IEEE80211_FC0_SUBTYPE_ASSOC_REQ)) {
+			ieee80211_send_error(ni, wh->i_addr2,
+					IEEE80211_FC0_SUBTYPE_DEAUTH,
+					IEEE80211_REASON_NOT_AUTHED);
+		}
+	}
+
+	IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+		bssid, NULL, "not from bss %pM", ni->ni_bssid);
+	vap->iv_stats.is_rx_wrongbss++;
+
+	return 1;
+}
+
+void ieee80211_update_current_mode(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_vap->iv_ic;
+
+	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) {
+		if (IEEE80211_NODE_IS_VHT(ni)) {
+			ni->ni_wifi_mode = IEEE80211_WIFI_MODE_AC;
+		} else if (IEEE80211_NODE_IS_HT(ni)) {
+			ni->ni_wifi_mode = IEEE80211_WIFI_MODE_NA;
+		} else {
+			ni->ni_wifi_mode = IEEE80211_WIFI_MODE_A;
+		}
+	} else {
+		if (IEEE80211_NODE_IS_HT(ni)) {
+			ni->ni_wifi_mode = IEEE80211_WIFI_MODE_NG;
+		} else {
+			/* Check the last rate since the list was sorted */
+			if ((ni->ni_rates.rs_rates[ni->ni_rates.rs_nrates - 1]
+				& IEEE80211_RATE_VAL) > IEEE80211_RATE_11MBPS) {
+				ni->ni_wifi_mode = IEEE80211_WIFI_MODE_G;
+			} else {
+				ni->ni_wifi_mode = IEEE80211_WIFI_MODE_B;
+			}
+		}
+	}
+}
+
+static int ieee80211_input_pmf_should_drop(struct ieee80211vap *vap,
+				struct ieee80211_node *ni, struct ieee80211_frame *wh,
+				struct sk_buff *skb, u_int8_t subtype)
+{
+	if (!ni->ni_associd || !RSN_IS_MFP(ni->ni_rsn.rsn_caps))
+		return 0;
+
+	if (wh->i_fc[1] & IEEE80211_FC1_PROT) {
+		wh->i_fc[1] &= ~IEEE80211_FC1_PROT;
+		return 0;
+	}
+	if ((vap->iv_opmode == IEEE80211_M_STA)) {
+		if (!ni->ni_sa_query_timeout &&
+			(subtype == IEEE80211_FC0_SUBTYPE_DEAUTH ||
+				subtype == IEEE80211_FC0_SUBTYPE_DISASSOC)) {
+			if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+				forward_mgmt_to_app(vap, subtype, skb, wh);
+				return 1;
+			} else {
+				ieee80211_send_sa_query(ni, IEEE80211_ACTION_W_SA_QUERY_REQ,
+							++ni->ni_sa_query_tid);
+				return 1;
+			}
+		}
+	}
+	if ((subtype == IEEE80211_FC0_SUBTYPE_AUTH) &&
+			ieee80211_node_is_authorized(ni)) {
+		ieee80211_send_sa_query(ni, IEEE80211_ACTION_W_SA_QUERY_REQ,
+					++ni->ni_sa_query_tid);
+		return 1;
+	}
+	if (ieee80211_mgmt_is_robust(wh))
+		return 1;
+
+	return 0;
+}
+
+/*
+ * Process a received frame.  The node associated with the sender
+ * should be supplied.  If nothing was found in the node table then
+ * the caller is assumed to supply a reference to ic_bss instead.
+ * The RSSI and a timestamp are also supplied.  The RSSI data is used
+ * during AP scanning to select a AP to associate with; it can have
+ * any units so long as values have consistent units and higher values
+ * mean ``better signal''.  The receive timestamp is currently not used
+ * by the 802.11 layer.
+ *
+ * Context: softIRQ (tasklet)
+ */
+int
+ieee80211_input(struct ieee80211_node *ni,
+	struct sk_buff *skb, int rssi, u_int32_t rstamp)
+{
+#define	HAS_SEQ(type)	((type & 0x4) == 0)
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct net_device *dev = vap->iv_dev;
+	struct ieee80211_frame *wh;
+	struct ieee80211_key *key;
+	struct ether_header *eh;
+	struct llc *llc;
+	int hdrspace;
+	u_int8_t dir, type = -1, subtype;
+	u_int8_t *bssid;
+	u_int16_t rxseq;
+	/* Variable to track whether the node inactive timer should be reset */
+	int node_reference_held = 0;
+	struct qtn_wds_ext_event_data extender_event_data;
+
+	KASSERT(ni != NULL, ("null node"));
+
+	KASSERT(skb->len >= sizeof(struct ieee80211_frame_min),
+		("frame length too short: %u", skb->len));
+
+	/* XXX adjust device in sk_buff? */
+
+	type = -1;			/* undefined */
+	/*
+	 * In monitor mode, send everything directly to bpf.
+	 * Also do not process frames w/o i_addr2 any further.
+	 * XXX may want to include the CRC
+	 */
+	if (vap->iv_opmode == IEEE80211_M_MONITOR)
+		goto out;
+
+	if (skb->len < sizeof(struct ieee80211_frame_min)) {
+		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+			ni->ni_macaddr, NULL,
+			"too short (1): len %u", skb->len);
+		vap->iv_stats.is_rx_tooshort++;
+		goto out;
+	}
+
+	if ((vap->iv_opmode != IEEE80211_M_STA) || IEEE80211_NODE_IS_TDLS_ACTIVE(ni))
+		ni->ni_inact = ni->ni_inact_reload;
+
+	/*
+	 * Bit of a cheat here, we use a pointer for a 3-address
+	 * frame format but don't reference fields past outside
+	 * ieee80211_frame_min w/o first validating the data is
+	 * present.
+	 */
+	wh = (struct ieee80211_frame *)skb->data;
+
+	if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) !=
+	    IEEE80211_FC0_VERSION_0) {
+		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+			ni->ni_macaddr, NULL, "wrong version %x", wh->i_fc[0]);
+		vap->iv_stats.is_rx_badversion++;
+		goto err;
+	}
+
+	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+	if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
+		switch (vap->iv_opmode) {
+		case IEEE80211_M_STA:
+			if (dir == IEEE80211_FC1_DIR_NODS)
+				bssid = wh->i_addr3;
+			else
+				bssid = wh->i_addr2;
+			if (!IEEE80211_ADDR_EQ(bssid, ni->ni_bssid)) {
+				if (ieee80211_input_should_drop(ni, bssid, wh, type, subtype, skb)) {
+					goto out;
+				}
+			}
+			iwspy_event(vap, ni, rssi);
+			break;
+		case IEEE80211_M_IBSS:
+		case IEEE80211_M_AHDEMO:
+			if (dir != IEEE80211_FC1_DIR_NODS)
+				bssid = wh->i_addr1;
+			else if (type == IEEE80211_FC0_TYPE_CTL)
+				bssid = wh->i_addr1;
+			else {
+				if (skb->len < sizeof(struct ieee80211_frame)) {
+					IEEE80211_DISCARD_MAC(vap,
+						IEEE80211_MSG_INPUT, ni->ni_macaddr,
+						NULL, "too short (2): len %u",
+						skb->len);
+					vap->iv_stats.is_rx_tooshort++;
+					goto out;
+				}
+				bssid = wh->i_addr3;
+			}
+			/* Do not try to find a node reference if the packet really did come from the BSS */
+			if (type == IEEE80211_FC0_TYPE_DATA && ni == vap->iv_bss &&
+					!IEEE80211_ADDR_EQ(vap->iv_bss->ni_macaddr, wh->i_addr2)) {
+				/* Try to find sender in local node table. */
+				ni = ieee80211_find_node(vap->iv_bss->ni_table, wh->i_addr2);
+				if (ni == NULL) {
+					/*
+					 * Fake up a node for this newly discovered
+					 * member of the IBSS.  This should probably
+					 * done after an ACL check.
+					 */
+					ni = ieee80211_fakeup_adhoc_node(vap,
+							wh->i_addr2);
+					if (ni == NULL) {
+						/* NB: stat kept for alloc failure */
+						goto err;
+					}
+				}
+				node_reference_held = 1;
+			}
+			iwspy_event(vap, ni, rssi);
+			break;
+		case IEEE80211_M_HOSTAP:
+			if (dir != IEEE80211_FC1_DIR_NODS)
+				bssid = wh->i_addr1;
+			else if (type == IEEE80211_FC0_TYPE_CTL)
+				bssid = wh->i_addr1;
+			else {
+				if (skb->len < sizeof(struct ieee80211_frame)) {
+					IEEE80211_DISCARD_MAC(vap,
+						IEEE80211_MSG_INPUT, ni->ni_macaddr,
+						NULL, "too short (2): len %u",
+						skb->len);
+					vap->iv_stats.is_rx_tooshort++;
+					goto out;
+				}
+				bssid = wh->i_addr3;
+			}
+
+			/*
+			 * Validate the bssid.
+			 */
+			if (!IEEE80211_ADDR_EQ(bssid, vap->iv_bss->ni_bssid) &&
+			    !IEEE80211_ADDR_EQ(bssid, dev->broadcast)) {
+				/* It can be a beacon from other network. Required for certification. */
+				vap->iv_stats.is_rx_wrongbss++;
+				if (!((type == IEEE80211_FC0_TYPE_MGT) && ((subtype == IEEE80211_FC0_SUBTYPE_BEACON)
+#ifdef QTN_BG_SCAN
+						|| ((ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+								&& (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP))
+#endif /* QTN_BG_SCAN */
+				))) {
+					IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+						bssid, NULL, "%s %02X %02X", "not to bss", type, subtype);
+					goto out;
+				}
+			}
+			break;
+		case IEEE80211_M_WDS:
+			if (skb->len < sizeof(struct ieee80211_frame_addr4)) {
+				IEEE80211_DISCARD_MAC(vap,
+					IEEE80211_MSG_INPUT, ni->ni_macaddr,
+					NULL, "too short (3): len %u",
+					skb->len);
+				vap->iv_stats.is_rx_tooshort++;
+				goto out;
+			}
+			bssid = wh->i_addr1;
+			if (!IEEE80211_ADDR_EQ(bssid, vap->iv_myaddr) &&
+			    !IEEE80211_ADDR_EQ(bssid, dev->broadcast)) {
+				/* not interested in */
+				IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+					bssid, NULL, "%s", "not to bss");
+				vap->iv_stats.is_rx_wrongbss++;
+				goto out;
+			}
+			if (!IEEE80211_ADDR_EQ(wh->i_addr2, vap->wds_mac)) {
+				/* not interested in */
+				IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+					wh->i_addr2, NULL, "%s", "not from DS");
+				vap->iv_stats.is_rx_wrongbss++;
+				goto out;
+			}
+			break;
+		default:
+			/* XXX catch bad values */
+			goto out;
+		}
+		ni->ni_rstamp = rstamp;
+		ni->ni_last_rx = jiffies;
+		if (HAS_SEQ(type)) {
+			u_int8_t tid;
+			if (IEEE80211_QOS_HAS_SEQ(wh)) {
+				tid = ((struct ieee80211_qosframe *)wh)->
+					i_qos[0] & IEEE80211_QOS_TID;
+				if (TID_TO_WME_AC(tid) >= WME_AC_VI)
+					ic->ic_wme.wme_hipri_traffic++;
+				tid++;
+			} else
+				tid = 0;
+			rxseq = le16toh(*(__le16 *)wh->i_seq);
+			if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
+				IEEE80211_SEQ_EQ(rxseq, ni->ni_rxseqs[tid]) &&
+				!((type == IEEE80211_FC0_TYPE_MGT) &&
+				    (subtype == IEEE80211_FC0_SUBTYPE_AUTH))
+#ifdef QTN_BG_SCAN
+				&& !((ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+					&& (type == IEEE80211_FC0_TYPE_MGT)
+					&& (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP ||
+						subtype == IEEE80211_FC0_SUBTYPE_BEACON))
+#endif /* QTN_BG_SCAN */
+			    ) {
+				/* duplicate, discard */
+				IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+					bssid, "duplicate",
+					"seqno <%u,%u> fragno <%u,%u> tid %u",
+					rxseq >> IEEE80211_SEQ_SEQ_SHIFT,
+					ni->ni_rxseqs[tid] >>
+						IEEE80211_SEQ_SEQ_SHIFT,
+					rxseq & IEEE80211_SEQ_FRAG_MASK,
+					ni->ni_rxseqs[tid] &
+						IEEE80211_SEQ_FRAG_MASK,
+					tid);
+				vap->iv_stats.is_rx_dup++;
+				IEEE80211_NODE_STAT(ni, rx_dup);
+				goto out;
+			}
+			ni->ni_rxseqs[tid] = rxseq;
+		}
+		if (node_reference_held) {
+			ieee80211_free_node(ni);
+		}
+	}
+
+	switch (type) {
+	case IEEE80211_FC0_TYPE_DATA:
+		hdrspace = ieee80211_hdrspace(ic, wh);
+		if (skb->len < hdrspace) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+				wh, "data", "too short: len %u, expecting %u",
+				skb->len, hdrspace);
+			vap->iv_stats.is_rx_tooshort++;
+			goto out;		/* XXX */
+		}
+		switch (vap->iv_opmode) {
+		case IEEE80211_M_STA:
+			if ((dir != IEEE80211_FC1_DIR_FROMDS) &&
+					(dir != IEEE80211_FC1_DIR_NODS) &&
+					(!((vap->iv_flags_ext & IEEE80211_FEXT_WDS) &&
+					(dir == IEEE80211_FC1_DIR_DSTODS)))) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "data", "invalid dir 0x%x", dir);
+				vap->iv_stats.is_rx_wrongdir++;
+				goto out;
+			}
+			if ((dev->flags & IFF_MULTICAST) &&
+			    IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+				if (IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_myaddr)) {
+					/*
+					 * In IEEE802.11 network, multicast packet
+					 * sent from me is broadcasted from AP.
+					 * It should be silently discarded for
+					 * SIMPLEX interface.
+					 *
+					 * NB: Linux has no IFF_ flag to indicate
+					 *     if an interface is SIMPLEX or not;
+					 *     so we always assume it to be true.
+					 */
+					IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+						wh, NULL, "%s", "multicast echo");
+					vap->iv_stats.is_rx_mcastecho++;
+					goto out;
+				}
+				/*
+				 * if it is broadcast by me on behalf of
+				 * a station behind me, drop it.
+				 */
+				if (vap->iv_flags_ext & IEEE80211_FEXT_WDS) {
+					struct ieee80211_node_table *nt;
+					struct ieee80211_node *ni_wds;
+					nt = &ic->ic_sta;
+					ni_wds = ieee80211_find_wds_node(nt, wh->i_addr3);
+					if (ni_wds) {
+						ieee80211_free_node(ni_wds); /* Decr ref count */
+						IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+							wh, NULL, "%s",
+							"multicast echo originated from node behind me");
+						vap->iv_stats.is_rx_mcastecho++;
+						goto out;
+					}
+				}
+			}
+			break;
+		case IEEE80211_M_IBSS:
+		case IEEE80211_M_AHDEMO:
+			if (dir != IEEE80211_FC1_DIR_NODS) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "data", "invalid dir 0x%x", dir);
+				vap->iv_stats.is_rx_wrongdir++;
+				goto out;
+			}
+			/* XXX no power-save support */
+			break;
+		case IEEE80211_M_HOSTAP:
+			/*
+			 * FIXME - QOS Null check added because Quantenna image
+			 * currently doesn't set the to/from DS bits.
+			 */
+			if ((dir != IEEE80211_FC1_DIR_TODS) &&
+			    (dir != IEEE80211_FC1_DIR_DSTODS) &&
+			    (subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL)) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "data", "invalid dir 0x%x", dir);
+				vap->iv_stats.is_rx_wrongdir++;
+				goto out;
+			}
+			/* check if source STA is associated */
+			if (ni == vap->iv_bss) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "data", "%s", "unknown src");
+				/* NB: caller deals with reference */
+				if (vap->iv_state == IEEE80211_S_RUN) {
+					if ((dir == IEEE80211_FC1_DIR_DSTODS) &&
+						(IEEE80211_IS_MULTICAST(wh->i_addr1))) {
+						/*
+						 * Some 3rd party wds ap sends wds pkts with receiver
+						 * addr as bcast/mcast which will be received by our ap
+						 * and lead to a lot of deauth. But they just ignore our
+						 * deauth frame. To avoid too much deauth messages, We can
+						 * safely ignore them.
+						 */
+						IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+							wh, "data", "%s", "mcast wds pkt");
+					} else {
+						ieee80211_send_error(ni, wh->i_addr2,
+							IEEE80211_FC0_SUBTYPE_DEAUTH,
+							IEEE80211_REASON_NOT_AUTHED);
+					}
+				}
+				vap->iv_stats.is_rx_notassoc++;
+				goto err;
+			}
+			if (ni->ni_associd == 0) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "data", "%s", "unassoc src");
+				IEEE80211_SEND_MGMT(ni,
+					IEEE80211_FC0_SUBTYPE_DISASSOC,
+					IEEE80211_REASON_NOT_ASSOCED);
+				vap->iv_stats.is_rx_notassoc++;
+				goto err;
+			}
+			/*
+			 * If we're a 4 address packet, make sure we have an entry in
+			 * the node table for the packet source address (addr4).
+			 * If not, add one.
+			 */
+			if (dir == IEEE80211_FC1_DIR_DSTODS) {
+				struct ieee80211_node_table *nt;
+				struct ieee80211_frame_addr4 *wh4;
+				struct ieee80211_node *ni_wds;
+				if (!(vap->iv_flags_ext & IEEE80211_FEXT_WDS)) {
+					IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+						wh, "data", "%s", "4 addr not allowed");
+					goto err;
+				}
+				wh4 = (struct ieee80211_frame_addr4 *)skb->data;
+				nt = &ic->ic_sta;
+				ni_wds = ieee80211_find_wds_node(nt, wh4->i_addr4);
+				/* Last call increments ref count if !NULL */
+				if ((ni_wds != NULL) && (ni_wds != ni)) {
+					/*
+					 * node with source address (addr4) moved
+					 * to another WDS capable station.
+					 */
+					 (void) ieee80211_remove_wds_addr(nt, wh4->i_addr4);
+					 ieee80211_add_wds_addr(nt, ni, wh4->i_addr4, 0);
+				}
+				if (ni_wds == NULL)
+					ieee80211_add_wds_addr(nt, ni, wh4->i_addr4, 0);
+				else
+					ieee80211_free_node(ni_wds);
+			}
+
+			/*
+			 * Check for power save state change.
+			 */
+			if (!(ni->ni_flags & IEEE80211_NODE_UAPSD)) {
+				if ((wh->i_fc[1] & IEEE80211_FC1_PWR_MGT) ^
+				    (ni->ni_flags & IEEE80211_NODE_PWR_MGT))
+					ieee80211_node_pwrsave(ni, wh->i_fc[1] & IEEE80211_FC1_PWR_MGT);
+			} else if (ni->ni_flags & IEEE80211_NODE_PS_CHANGED) {
+				int pwr_save_changed = 0;
+				IEEE80211_LOCK_IRQ(ic);
+				if ((*(__le16 *)(&wh->i_seq[0])) == ni->ni_pschangeseq) {
+					ni->ni_flags &= ~IEEE80211_NODE_PS_CHANGED;
+					pwr_save_changed = 1;
+				}
+				IEEE80211_UNLOCK_IRQ(ic);
+				if (pwr_save_changed)
+					ieee80211_node_pwrsave(ni, wh->i_fc[1] & IEEE80211_FC1_PWR_MGT);
+			}
+			break;
+		case IEEE80211_M_WDS:
+			if (dir != IEEE80211_FC1_DIR_DSTODS) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "data", "invalid dir 0x%x", dir);
+				vap->iv_stats.is_rx_wrongdir++;
+				goto out;
+			}
+			break;
+		default:
+			/* XXX here to keep compiler happy */
+			goto out;
+		}
+
+		/*
+		 * Handle privacy requirements.  Note that we
+		 * must not be preempted from here until after
+		 * we (potentially) call ieee80211_crypto_demic;
+		 * otherwise we may violate assumptions in the
+		 * crypto cipher modules used to do delayed update
+		 * of replay sequence numbers.
+		 */
+		if (wh->i_fc[1] & IEEE80211_FC1_PROT) {
+			if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
+				/*
+				 * Discard encrypted frames when privacy is off.
+				 */
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "WEP", "%s", "PRIVACY off");
+				vap->iv_stats.is_rx_noprivacy++;
+				IEEE80211_NODE_STAT(ni, rx_noprivacy);
+				goto out;
+			}
+			key = ieee80211_crypto_decap(ni, skb, hdrspace);
+			if (key == NULL) {
+				/* NB: stats+msgs handled in crypto_decap */
+				IEEE80211_NODE_STAT(ni, rx_wepfail);
+				//FIXME: This MUST be re-enabled - it could present a security hole.
+				//Needs more thought.
+				//
+				//RK-2009-11-24: this was commented out to allow WPA2 AES fragments
+				//to pass through the slow driver path.
+
+				//goto out;
+			}
+			wh = (struct ieee80211_frame *)skb->data;
+			wh->i_fc[1] &= ~IEEE80211_FC1_PROT;
+		} else
+			key = NULL;
+
+		/*
+		 * Next up, any fragmentation.
+		 */
+		if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+			skb = ieee80211_defrag(ni, skb, hdrspace);
+			if (skb == NULL) {
+				/* Fragment dropped or frame not complete yet */
+				goto out;
+			}
+		}
+		/*
+		 * Next strip any MSDU crypto bits.
+		 */
+		if (key != NULL &&
+		    !ieee80211_crypto_demic(vap, key, skb, hdrspace)) {
+			IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+				ni->ni_macaddr, "data", "%s", "demic error");
+			IEEE80211_NODE_STAT(ni, rx_demicfail);
+			goto out;
+		}
+
+		/* TDLS data encapsulated management frame */
+		llc = (struct llc *) (skb->data + hdrspace);
+		ieee80211_tdls_mailbox_path_check(ni, skb, llc, rssi,
+					hdrspace + LLC_SNAPFRAMELEN);
+
+		/*
+		 * Finally, strip the 802.11 header.
+		 */
+		wh = NULL;		/* no longer valid, catch any uses */
+		skb = ieee80211_decap(vap, skb, hdrspace);
+		if (skb == NULL) {
+			/* don't count Null data frames as errors */
+			if (subtype == IEEE80211_FC0_SUBTYPE_NODATA)
+				goto out;
+			IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+				ni->ni_macaddr, "data", "%s", "decap error");
+			vap->iv_stats.is_rx_decap++;
+			IEEE80211_NODE_STAT(ni, rx_decap);
+			goto err;
+		}
+		eh = (struct ether_header *) skb->data;
+
+		if (! accept_data_frame(vap, ni, key, skb, eh))
+			goto out;
+
+		vap->iv_devstats.rx_packets++;
+		vap->iv_devstats.rx_bytes += skb->len;
+		IEEE80211_NODE_STAT(ni, rx_data);
+		IEEE80211_NODE_STAT_ADD(ni, rx_bytes, skb->len);
+		ic->ic_lastdata = jiffies;
+
+		/* if sub type is NULL DATA or QOS NULL DATA, don't send to linux protocol stack */
+		if ((subtype == IEEE80211_FC0_SUBTYPE_NODATA) || (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_INPUT,
+						"%s: NULL or QOS NULL DATA: don't deliver to linux protocol stack\n", __func__);
+			goto out;
+		}
+
+		ieee80211_deliver_data(ni, skb);
+
+		return IEEE80211_FC0_TYPE_DATA;
+
+	case IEEE80211_FC0_TYPE_MGT:
+		/* Only accept action frames and peer beacons for WDS */
+		if (vap->iv_opmode == IEEE80211_M_WDS &&
+				subtype != IEEE80211_FC0_SUBTYPE_ACTION_NOACK &&
+				subtype != IEEE80211_FC0_SUBTYPE_ACTION &&
+				subtype != IEEE80211_FC0_SUBTYPE_BEACON) {
+			struct ieee80211vap *pri_vap = TAILQ_FIRST(&ic->ic_vaps);
+			if ((ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_MBS) &&
+					ieee80211_extender_find_peer_wds_info(ic, wh->i_addr2)) {
+				IEEE80211_EXTENDER_DPRINTF(vap, IEEE80211_EXTENDER_MSG_WARN,
+						"QHop: unexpected frame 0x%x from peer %pM\n",
+						subtype, wh->i_addr2);
+				extender_event_data_prepare(ic, NULL,
+						&extender_event_data,
+						WDS_EXT_LINK_STATUS_UPDATE,
+						wh->i_addr2);
+
+				ieee80211_extender_send_event(pri_vap, &extender_event_data, NULL);
+				ieee80211_extender_remove_peer_wds_info(ic, wh->i_addr2);
+			}
+			vap->iv_stats.is_rx_mgtdiscard++;
+			goto out;
+		}
+		IEEE80211_NODE_STAT(ni, rx_mgmt);
+
+		if (dir != IEEE80211_FC1_DIR_NODS) {
+			vap->iv_stats.is_rx_wrongdir++;
+			goto err;
+		}
+		if (skb->len < sizeof(struct ieee80211_frame)) {
+			IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+				ni->ni_macaddr, "mgt", "too short: len %u",
+				skb->len);
+			vap->iv_stats.is_rx_tooshort++;
+			goto out;
+		}
+#ifdef IEEE80211_DEBUG
+		if ((ieee80211_msg_debug(vap) && doprint(vap, subtype)) ||
+		    ieee80211_msg_dumppkts(vap)) {
+			ieee80211_note(vap, "received %s from %s rssi %d\n",
+				ieee80211_mgt_subtype_name[subtype >>
+				IEEE80211_FC0_SUBTYPE_SHIFT],
+				ether_sprintf(wh->i_addr2), rssi);
+		}
+#endif
+
+		if (vap->iv_pmf) {
+			if (ieee80211_input_pmf_should_drop(vap, ni, wh, skb, subtype))
+				goto out;
+		}
+
+		if (wh->i_fc[1] & IEEE80211_FC1_PROT) {
+
+			if (subtype != IEEE80211_FC0_SUBTYPE_AUTH) {
+				/*
+				 * Only shared key auth frames with a challenge
+				 * should be encrypted, discard all others.
+				 */
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, ieee80211_mgt_subtype_name[subtype >>
+					IEEE80211_FC0_SUBTYPE_SHIFT],
+					"%s", "WEP set but not permitted");
+				vap->iv_stats.is_rx_mgtdiscard++; /* XXX */
+				goto out;
+			}
+			if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
+				/*
+				 * Discard encrypted frames when privacy is off.
+				 */
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "mgt", "%s", "WEP set but PRIVACY off");
+				vap->iv_stats.is_rx_noprivacy++;
+				goto out;
+			}
+			hdrspace = ieee80211_hdrspace(ic, wh);
+			key = ieee80211_crypto_decap(ni, skb, hdrspace);
+			if (key == NULL) {
+				/* NB: stats+msgs handled in crypto_decap */
+				goto out;
+			}
+			wh = (struct ieee80211_frame *)skb->data;
+			wh->i_fc[1] &= ~IEEE80211_FC1_PROT;
+		}
+		ic->ic_recv_mgmt(ni, skb, subtype, rssi, rstamp);
+
+		goto out;
+
+	case IEEE80211_FC0_TYPE_CTL: {
+		u_int8_t reason;
+		IEEE80211_NODE_STAT(ni, rx_ctrl);
+		vap->iv_stats.is_rx_ctl++;
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+			if (subtype == IEEE80211_FC0_SUBTYPE_PS_POLL)
+				ieee80211_recv_pspoll(ni, skb);
+
+		/*if a sta receive a PS-POLL, a deauth should be sent*/
+		if (vap->iv_opmode == IEEE80211_M_STA &&
+		    subtype == IEEE80211_FC0_SUBTYPE_PS_POLL &&
+		    vap->iv_state < IEEE80211_S_RUN) {
+			if (vap->iv_state <= IEEE80211_S_AUTH) {
+				reason = IEEE80211_REASON_NOT_AUTHED;
+			} else {
+				reason = IEEE80211_REASON_NOT_ASSOCED;
+			}
+
+			IEEE80211_DISCARD(vap,
+				IEEE80211_MSG_POWER | IEEE80211_MSG_DEBUG,
+				wh, "receive ps-poll", "state-%d, send deauth",
+				((reason == IEEE80211_REASON_NOT_AUTHED) ? 1:2));
+
+			vap->iv_stats.is_ps_unassoc++;
+			ieee80211_send_error(ni, wh->i_addr2,
+					IEEE80211_FC0_SUBTYPE_DEAUTH, reason);
+		}
+
+		goto out;
+	}
+
+	default:
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, NULL, "bad frame type 0x%x", type);
+		/* should not come here */
+		break;
+	}
+err:
+	vap->iv_devstats.rx_errors++;
+out:
+
+	if (skb != NULL)
+		dev_kfree_skb(skb);
+	return type;
+#undef HAS_SEQ
+}
+EXPORT_SYMBOL(ieee80211_input);
+
+
+/*
+ * Determines whether a frame should be accepted, based on information
+ * about the frame's origin and encryption, and policy for this vap.
+ */
+static int accept_data_frame(struct ieee80211vap *vap,
+			struct ieee80211_node *ni, struct ieee80211_key *key,
+			struct sk_buff *skb, struct ether_header *eh)
+{
+#define IS_EAPOL(eh) ((eh)->ether_type == __constant_htons(ETH_P_PAE))
+#define PAIRWISE_SET(vap) ((vap)->iv_nw_keys[0].wk_cipher != &ieee80211_cipher_none)
+	if (IS_EAPOL(eh)) {
+		/* encrypted eapol is always OK */
+		if (key)
+			return 1;
+		/* cleartext eapol is OK if we don't have pairwise keys yet */
+		if (! PAIRWISE_SET(vap))
+			return 1;
+		/* cleartext eapol is OK if configured to allow it */
+		if (! IEEE80211_VAP_DROPUNENC_EAPOL(vap))
+			return 1;
+		/* cleartext eapol is OK if other unencrypted is OK */
+		if (! (vap->iv_flags & IEEE80211_F_DROPUNENC))
+			return 1;
+		/* not OK */
+		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+			eh->ether_shost, "data",
+			"unauthorized port: ether type 0x%x len %u",
+			ntohs(eh->ether_type), skb->len);
+		vap->iv_stats.is_rx_unauth++;
+		vap->iv_devstats.rx_errors++;
+		IEEE80211_NODE_STAT(ni, rx_unauth);
+		return 0;
+	}
+
+	if (!ieee80211_node_is_authorized(ni)) {
+		/*
+		* Deny any non-PAE frames received prior to
+		* authorization.  For open/shared-key
+		* authentication the port is mark authorized
+		* after authentication completes.  For 802.1x
+		* the port is not marked authorized by the
+		* authenticator until the handshake has completed.
+		*/
+		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+			eh->ether_shost, "data",
+			"unauthorized port: ether type 0x%x len %u",
+			ntohs(eh->ether_type), skb->len);
+		vap->iv_stats.is_rx_unauth++;
+		vap->iv_devstats.rx_errors++;
+		IEEE80211_NODE_STAT(ni, rx_unauth);
+		return 0;
+	}
+
+	return 1;
+
+#undef IS_EAPOL
+#undef PAIRWISE_SET
+}
+
+/*
+ * Context: softIRQ (tasklet)
+ */
+int
+ieee80211_input_all(struct ieee80211com *ic,
+	struct sk_buff *skb, int rssi, u_int32_t rstamp)
+{
+	struct ieee80211vap *vap;
+	int type = -1;
+	struct sk_buff *skb1;
+	struct ieee80211_node *ni;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_opmode == IEEE80211_M_WDS) {
+			/* Discard input from non-peer */
+			continue;
+		}
+
+		if (TAILQ_NEXT(vap, iv_next) != NULL) {
+			skb1 = skb_copy(skb, GFP_ATOMIC);
+			if (skb1 == NULL) {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_INPUT,
+					"%s: SKB copy failed\n", __func__);
+				continue;
+			}
+		} else {
+			skb1 = skb;
+			skb = NULL;
+		}
+
+		ni = vap->iv_bss;
+		ieee80211_ref_node(ni);
+		type = ieee80211_input(ni, skb1, rssi, rstamp);
+		ieee80211_free_node(ni);
+	}
+
+	/* No more vaps, reclaim skb */
+	if (skb != NULL)
+		dev_kfree_skb(skb);
+
+	return type;
+}
+EXPORT_SYMBOL(ieee80211_input_all);
+
+/*
+ * This function reassemble fragments using the skb of the 1st fragment,
+ * if large enough. If not, a new skb is allocated to hold incoming
+ * fragments.
+ *
+ * Fragments are copied at the end of the previous fragment.  A different
+ * strategy could have been used, where a non-linear skb is allocated and
+ * fragments attached to that skb.
+ */
+static struct sk_buff *
+ieee80211_defrag(struct ieee80211_node *ni, struct sk_buff *skb, int hdrlen)
+{
+	struct ieee80211_frame *wh = (struct ieee80211_frame *) skb->data;
+	u_int16_t rxseq, last_rxseq;
+	u_int8_t fragno, last_fragno;
+	u_int8_t more_frag = wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
+
+	rxseq = le16_to_cpu(*(__le16 *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
+	fragno = le16_to_cpu(*(__le16 *)wh->i_seq) & IEEE80211_SEQ_FRAG_MASK;
+
+	/* Quick way out, if there's nothing to defragment */
+	if (!more_frag && fragno == 0 && ni->ni_rxfrag == NULL)
+		return skb;
+
+	ni->ni_stats.ns_rx_fragment_pkts++;
+
+	/*
+	 * Remove frag to ensure it doesn't get reaped by timer.
+	 */
+	if (ni->ni_table == NULL) {
+		/*
+		 * Should never happen.  If the node is orphaned (not in
+		 * the table) then input packets should not reach here.
+		 * Otherwise, a concurrent request that yanks the table
+		 * should be blocked by other interlocking and/or by first
+		 * shutting the driver down.  Regardless, be defensive
+		 * here and just bail
+		 */
+		/* XXX need msg+stat */
+		dev_kfree_skb(skb);
+		return NULL;
+	}
+
+	/*
+	 * Use this lock to make sure ni->ni_rxfrag is
+	 * not freed by the timer process while we use it.
+	 * XXX bogus
+	 */
+	IEEE80211_NODE_LOCK_IRQ(ni->ni_table);
+
+	/*
+	 * Update the time stamp.  As a side effect, it
+	 * also makes sure that the timer will not change
+	 * ni->ni_rxfrag for at least 1 second, or in
+	 * other words, for the remaining of this function.
+	 */
+	ni->ni_rxfragstamp = jiffies;
+
+	IEEE80211_NODE_UNLOCK_IRQ(ni->ni_table);
+
+	/*
+	 * Validate that fragment is in order and
+	 * related to the previous ones.
+	 */
+	if (ni->ni_rxfrag) {
+		struct ieee80211_frame *lwh;
+
+		lwh = (struct ieee80211_frame *) ni->ni_rxfrag->data;
+		last_rxseq = le16_to_cpu(*(__le16 *)lwh->i_seq) >>
+			IEEE80211_SEQ_SEQ_SHIFT;
+		last_fragno = le16_to_cpu(*(__le16 *)lwh->i_seq) &
+			IEEE80211_SEQ_FRAG_MASK;
+		if (rxseq != last_rxseq
+		    || fragno != last_fragno + 1
+		    || (!IEEE80211_ADDR_EQ(wh->i_addr1, lwh->i_addr1))
+		    || (!IEEE80211_ADDR_EQ(wh->i_addr2, lwh->i_addr2))
+		    || (ni->ni_rxfrag->end - ni->ni_rxfrag->tail <
+			skb->len)) {
+			/*
+			 * Unrelated fragment or no space for it,
+			 * clear current fragments
+			 */
+			dev_kfree_skb(ni->ni_rxfrag);
+			ni->ni_rxfrag = NULL;
+		}
+	}
+
+	/* If this is the first fragment */
+	if (ni->ni_rxfrag == NULL && fragno == 0) {
+		ni->ni_rxfrag = skb;
+		/* If more frags are coming */
+		if (more_frag) {
+			if (skb_is_nonlinear(skb)) {
+				/*
+				 * We need a continous buffer to
+				 * assemble fragments
+				 */
+				ni->ni_rxfrag = skb_copy(skb, GFP_ATOMIC);
+				dev_kfree_skb(skb);
+			}
+			/*
+			 * Check that we have enough space to hold
+			 * incoming fragments
+			 * 1. Don't assume MTU is the RX frame size limit.
+			 * 2. Don't assume original packet starts from skb->head, in case
+			 * kernel reserve some bytes at headroom.
+			 */
+			else if ((skb_end_pointer(skb) - skb->data) <
+				 (IEEE80211_MAX_LEN  + hdrlen)) {
+				ni->ni_rxfrag = skb_copy_expand(skb, 0,
+					(IEEE80211_MAX_LEN + hdrlen - skb->len),
+					GFP_ATOMIC);
+				dev_kfree_skb(skb);
+			}
+		}
+	} else {
+		if (ni->ni_rxfrag) {
+			struct ieee80211_frame *lwh = (struct ieee80211_frame *)
+				ni->ni_rxfrag->data;
+
+			/*
+			 * We know we have enough space to copy,
+			 * we've verified that before
+			 */
+			/* Copy current fragment at end of previous one */
+			memcpy(skb_tail_pointer(ni->ni_rxfrag),
+			       skb->data + hdrlen, skb->len - hdrlen);
+			/* Update tail and length */
+			skb_put(ni->ni_rxfrag, skb->len - hdrlen);
+			/* Keep a copy of last sequence and fragno */
+			*(__le16 *) lwh->i_seq = *(__le16 *) wh->i_seq;
+		}
+		/* we're done with the fragment */
+		dev_kfree_skb(skb);
+	}
+
+	if (more_frag) {
+		/* More to come */
+		skb = NULL;
+	} else {
+		/* Last fragment received, we're done! */
+		skb = ni->ni_rxfrag;
+		ni->ni_rxfrag = NULL;
+	}
+	return skb;
+}
+
+static void
+ieee80211_deliver_data(struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct net_device *dev = vap->iv_dev;
+	struct ether_header *eh = (struct ether_header *) skb->data;
+
+	if (unlikely(g_l2_ext_filter)) {
+		if (!skb->ext_l2_filter && vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (!skb->dev)
+				skb->dev = dev;
+
+#ifdef USE_HEADERLEN_RESV
+			skb->protocol = ath_eth_type_trans(skb, skb->dev);
+#else
+			skb->protocol = eth_type_trans(skb, skb->dev);
+#endif
+			if (!(skb->protocol == __constant_htons(ETH_P_PAE) &&
+					IEEE80211_ADDR_EQ(eh->ether_dhost, vap->iv_myaddr))) {
+				vap->iv_ic->ic_send_to_l2_ext_filter(vap, skb);
+				return ;
+			}
+		}
+	}
+
+	/*
+	 * perform as a bridge within the vap
+	 * - intra-vap bridging only
+	 */
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
+	    (vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0) {
+		struct sk_buff *skb1 = NULL;
+
+		if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
+			skb1 = skb_copy(skb, GFP_ATOMIC);
+		} else {
+			/*
+			 * Check if destination is associated with the
+			 * same vap and authorized to receive traffic.
+			 * Beware of traffic destined for the vap itself;
+			 * sending it will not work; just let it be
+			 * delivered normally.
+			 */
+			struct ieee80211_node *ni1 = ieee80211_find_node(
+				&vap->iv_ic->ic_sta, eh->ether_dhost);
+			if (ni1 != NULL) {
+				if (ni1->ni_vap == vap &&
+				    ieee80211_node_is_authorized(ni1) &&
+				    ni1 != vap->iv_bss) {
+					skb1 = skb;
+					skb = NULL;
+				}
+				/* XXX statistic? */
+				ieee80211_free_node(ni1);
+			}
+		}
+		if (skb1 != NULL) {
+			skb1->dev = dev;
+
+			skb_reset_mac_header(skb1);
+			skb_set_network_header(skb1, sizeof(struct ether_header));
+
+			skb1->protocol = __constant_htons(ETH_P_802_2);
+			/* XXX insert vlan tag before queue it? */
+			dev_queue_xmit(skb1);
+		}
+	}
+
+	if (skb != NULL) {
+		if (!skb->dev)
+			skb->dev = dev;
+
+#ifdef USE_HEADERLEN_RESV
+		skb->protocol = ath_eth_type_trans(skb, skb->dev);
+#else
+		skb->protocol = eth_type_trans(skb, skb->dev);
+#endif
+		if (ni->ni_vlan != 0 && vap->iv_vlgrp != NULL) {
+		/* TODO: There is no equivalent function in 4.7. For now lets
+		 * just pass this skb to upper layer
+		 */
+#ifdef QTN_ENABLE_BRIDGE
+			/* attach vlan tag */
+			vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan);
+#else
+			netif_rx(skb);
+#endif
+		} else {
+			netif_rx(skb);
+		}
+		dev->last_rx = jiffies;
+	}
+}
+
+static struct sk_buff *
+ieee80211_decap(struct ieee80211vap *vap, struct sk_buff *skb, int hdrlen)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_qosframe_addr4 wh;	/* Max size address frames */
+	struct ether_header *eh;
+	struct llc *llc;
+	__be16 ether_type = 0;
+
+	memcpy(&wh, skb->data, hdrlen);	/* Only copy hdrlen over */
+	llc = (struct llc *) skb_pull(skb, hdrlen);
+	if (skb->len >= LLC_SNAPFRAMELEN &&
+	    llc->llc_dsap == LLC_SNAP_LSAP && llc->llc_ssap == LLC_SNAP_LSAP &&
+	    llc->llc_control == LLC_UI && llc->llc_snap.org_code[0] == 0 &&
+	    llc->llc_snap.org_code[1] == 0 && llc->llc_snap.org_code[2] == 0) {
+		ether_type = llc->llc_un.type_snap.ether_type;
+		skb_pull(skb, LLC_SNAPFRAMELEN);
+		llc = NULL;
+	}
+	eh = (struct ether_header *) skb_push(skb, sizeof(struct ether_header));
+	switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+	case IEEE80211_FC1_DIR_NODS:
+		IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr1);
+		/*
+		 * for TDLS Function, TDLS link with third-party station
+		 * which is 3-address mode.
+		 */
+		ic->ic_bridge_set_dest_addr(skb, (void *)eh);
+		IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr2);
+		break;
+	case IEEE80211_FC1_DIR_TODS:
+		IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr3);
+		IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr2);
+		break;
+	case IEEE80211_FC1_DIR_FROMDS:
+		IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr1);
+		ic->ic_bridge_set_dest_addr(skb, (void *)eh);
+		IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr3);
+		break;
+	case IEEE80211_FC1_DIR_DSTODS:
+		IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr3);
+		/*
+		 * for TDLS Function, associate with third-party AP
+		 * which is 3-address mode.
+		 */
+		if (IEEE80211_ADDR_EQ(wh.i_addr1, wh.i_addr3))
+			ic->ic_bridge_set_dest_addr(skb, (void *)eh);
+		IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr4);
+		break;
+	}
+	if (!ALIGNED_POINTER(skb->data + sizeof(*eh), u_int32_t)) {
+		struct sk_buff *n;
+
+		/* XXX does this always work? */
+		n = skb_copy(skb, GFP_ATOMIC);
+		dev_kfree_skb(skb);
+		if (n == NULL)
+			return NULL;
+		skb = n;
+		eh = (struct ether_header *) skb->data;
+	}
+	if (llc != NULL)
+		eh->ether_type = htons(skb->len - sizeof(*eh));
+	else
+		eh->ether_type = ether_type;
+	return skb;
+}
+
+int
+ieee80211_parse_rates(struct ieee80211_node *ni,
+	const u_int8_t *rates, const u_int8_t *xrates)
+{
+	struct ieee80211_rateset *rs = &ni->ni_rates;
+
+	memset(rs, 0, sizeof(*rs));
+	rs->rs_nrates = rates[1];
+	rs->rs_legacy_nrates = rates[1];
+	memcpy(rs->rs_rates, rates + 2, rs->rs_nrates);
+	if (xrates != NULL) {
+		u_int8_t nxrates = 0;
+		/*
+		 * Tack on 11g extended supported rate element.
+		 */
+		nxrates = xrates[1];
+		if (rs->rs_nrates + nxrates > IEEE80211_RATE_MAXSIZE) {
+			struct ieee80211vap *vap = ni->ni_vap;
+
+			nxrates = IEEE80211_RATE_MAXSIZE - rs->rs_nrates;
+			IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE, ni,
+				"extended rate set too large;"
+				" only using %u of %u rates",
+				nxrates, xrates[1]);
+			vap->iv_stats.is_rx_rstoobig++;
+		}
+		memcpy(rs->rs_rates + rs->rs_nrates, xrates+2, nxrates);
+		rs->rs_nrates += nxrates;
+		rs->rs_legacy_nrates += nxrates;
+	}
+
+	return 1;
+}
+
+/*
+ * Install received rate set information in the node's state block.
+ */
+int
+ieee80211_setup_rates(struct ieee80211_node *ni,
+	const u_int8_t *rates, const u_int8_t *xrates, int flags)
+{
+	struct ieee80211_rateset *rs = &ni->ni_rates;
+
+	memset(rs, 0, sizeof(*rs));
+	rs->rs_nrates = rates[1];
+	rs->rs_legacy_nrates = rates[1];
+	memcpy(rs->rs_rates, rates + 2, rs->rs_nrates);
+	if (xrates != NULL) {
+		u_int8_t nxrates = 0;
+		/*
+		 * Tack on 11g extended supported rate element.
+		 */
+		nxrates = xrates[1];
+		if (rs->rs_nrates + nxrates > IEEE80211_RATE_MAXSIZE) {
+			struct ieee80211vap *vap = ni->ni_vap;
+
+			nxrates = IEEE80211_RATE_MAXSIZE - rs->rs_nrates;
+			IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE, ni,
+				"extended rate set too large;"
+				" only using %u of %u rates",
+				nxrates, xrates[1]);
+			vap->iv_stats.is_rx_rstoobig++;
+		}
+		memcpy(rs->rs_rates + rs->rs_nrates, xrates+2, nxrates);
+		rs->rs_nrates += nxrates;
+		rs->rs_legacy_nrates += nxrates;
+	}
+	return ieee80211_fix_rate(ni, flags);
+}
+
+static void
+ieee80211_auth_open(struct ieee80211_node *ni, struct ieee80211_frame *wh,
+	int rssi, u_int32_t rstamp, u_int16_t seq, u_int16_t status)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	int node_reference_held = 0;
+
+	if (ni->ni_authmode == IEEE80211_AUTH_SHARED) {
+		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+			ni->ni_macaddr, "open auth",
+			"bad sta auth mode %u", ni->ni_authmode);
+		vap->iv_stats.is_rx_bad_auth++;	/* XXX maybe a unique error? */
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			/*
+			 * To send the frame to the requesting STA we have to create a node
+			 * for the station that we're going to reject.
+			 */
+			if (ni == vap->iv_bss) {
+				ni = ieee80211_tmp_node(vap, wh->i_addr2);
+				if (ni == NULL) {
+					return;
+				}
+				node_reference_held = 1;
+			}
+
+			IEEE80211_SEND_MGMT(ni,	IEEE80211_FC0_SUBTYPE_AUTH,
+				(seq + 1) | (IEEE80211_STATUS_ALG << 16));
+
+			if (node_reference_held) {
+				ieee80211_free_node(ni);
+			}
+			return;
+		}
+	}
+	switch (vap->iv_opmode) {
+	case IEEE80211_M_IBSS:
+		if (vap->iv_state != IEEE80211_S_RUN ||
+		    seq != IEEE80211_AUTH_OPEN_REQUEST) {
+			vap->iv_stats.is_rx_bad_auth++;
+			return;
+		}
+		ieee80211_new_state(vap, IEEE80211_S_AUTH,
+			wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK);
+		break;
+
+	case IEEE80211_M_AHDEMO:
+	case IEEE80211_M_WDS:
+		/* should not come here */
+		break;
+
+	case IEEE80211_M_HOSTAP:
+		if (vap->iv_state != IEEE80211_S_RUN ||
+		    seq != IEEE80211_AUTH_OPEN_REQUEST) {
+			vap->iv_stats.is_rx_bad_auth++;
+			mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_AUTH_FAILS, 1);
+			return;
+		}
+		/* always accept open authentication requests */
+		if (ni == vap->iv_bss) {
+			ni = ieee80211_dup_bss(vap, wh->i_addr2);
+			if (ni == NULL) {
+				mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_AUTH_FAILS, 1);
+				return;
+			}
+			ni->ni_node_type = IEEE80211_NODE_TYPE_STA;
+			node_reference_held = 1;
+		}
+
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, seq + 1);
+		IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH,
+			ni, "station authenticated (%s)", "open");
+
+		if (node_reference_held) {
+			ieee80211_free_node(ni);
+		}
+		mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_AUTH, 1);
+		break;
+
+	case IEEE80211_M_STA:
+		if (vap->iv_state != IEEE80211_S_AUTH ||
+		    seq != IEEE80211_AUTH_OPEN_RESPONSE) {
+			vap->iv_stats.is_rx_bad_auth++;
+			return;
+		}
+		if (status != 0) {
+			IEEE80211_NOTE(vap,
+				IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni,
+				"open auth failed (reason %d)", status);
+			vap->iv_stats.is_rx_auth_fail++;
+			ieee80211_new_state(vap, IEEE80211_S_SCAN,
+				IEEE80211_SCAN_FAIL_STATUS);
+		} else
+			ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
+		break;
+	case IEEE80211_M_MONITOR:
+		break;
+	}
+}
+
+/*
+ * Send a management frame error response to the specified
+ * station.  If ni is associated with the station then use
+ * it; otherwise allocate a temporary node suitable for
+ * transmitting the frame and then free the reference so
+ * it will go away as soon as the frame has been transmitted.
+ */
+static void
+ieee80211_send_error(struct ieee80211_node *ni,
+	const u_int8_t *mac, int subtype, int arg)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	int node_reference_held = 0;
+
+	if (ni == vap->iv_bss) {
+		if (vap->iv_opmode == IEEE80211_M_STA) {
+			ni = _ieee80211_tmp_node(vap, mac, mac);
+		} else {
+			ni = ieee80211_tmp_node(vap, mac);
+		}
+		if (ni == NULL) {
+			return;
+		}
+		node_reference_held = 1;
+	}
+
+	IEEE80211_SEND_MGMT(ni, subtype, arg);
+
+	if (node_reference_held) {
+		ieee80211_free_node(ni);
+	}
+}
+
+static int
+alloc_challenge(struct ieee80211_node *ni)
+{
+	if (ni->ni_challenge == NULL)
+		MALLOC(ni->ni_challenge, u_int32_t*, IEEE80211_CHALLENGE_LEN,
+			M_DEVBUF, M_NOWAIT);
+	if (ni->ni_challenge == NULL) {
+		IEEE80211_NOTE(ni->ni_vap,
+			IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni,
+			"%s", "shared key challenge alloc failed");
+		/* XXX statistic */
+	}
+	return (ni->ni_challenge != NULL);
+}
+
+/* XXX TODO: add statistics */
+static void
+ieee80211_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh,
+	u_int8_t *frm, u_int8_t *efrm, int rssi, u_int32_t rstamp,
+	u_int16_t seq, u_int16_t status)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	u_int8_t *challenge;
+	int node_reference_held = 0;
+	int estatus;
+
+	/*
+	 * NB: this can happen as we allow pre-shared key
+	 * authentication to be enabled w/o wep being turned
+	 * on so that configuration of these can be done
+	 * in any order.  It may be better to enforce the
+	 * ordering in which case this check would just be
+	 * for sanity/consistency.
+	 */
+	estatus = 0;			/* NB: silence compiler */
+	if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
+		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+			ni->ni_macaddr, "shared key auth",
+			"%s", " PRIVACY is disabled");
+		estatus = IEEE80211_STATUS_ALG;
+		goto bad;
+	}
+	/*
+	 * Pre-shared key authentication is evil; accept
+	 * it only if explicitly configured (it is supported
+	 * mainly for compatibility with clients like OS X).
+	 */
+	if (ni->ni_authmode != IEEE80211_AUTH_AUTO &&
+	    ni->ni_authmode != IEEE80211_AUTH_SHARED) {
+		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+			ni->ni_macaddr, "shared key auth",
+			"bad sta auth mode %u", ni->ni_authmode);
+		vap->iv_stats.is_rx_bad_auth++;	/* XXX maybe a unique error? */
+		estatus = IEEE80211_STATUS_ALG;
+		goto bad;
+	}
+
+	challenge = NULL;
+	if (frm + 1 < efrm) {
+		if ((frm[1] + 2) > (efrm - frm)) {
+			IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+				ni->ni_macaddr, "shared key auth",
+				"ie %d/%d too long",
+				frm[0], (frm[1] + 2) - (efrm - frm));
+			vap->iv_stats.is_rx_bad_auth++;
+			estatus = IEEE80211_STATUS_CHALLENGE;
+			goto bad;
+		}
+		if (*frm == IEEE80211_ELEMID_CHALLENGE)
+			challenge = frm;
+		frm += frm[1] + 2;
+	}
+	switch (seq) {
+	case IEEE80211_AUTH_SHARED_CHALLENGE:
+	case IEEE80211_AUTH_SHARED_RESPONSE:
+		if (challenge == NULL) {
+			IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+				ni->ni_macaddr, "shared key auth",
+				"%s", "no challenge");
+			vap->iv_stats.is_rx_bad_auth++;
+			estatus = IEEE80211_STATUS_CHALLENGE;
+			goto bad;
+		}
+		if (challenge[1] != IEEE80211_CHALLENGE_LEN) {
+			IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+				ni->ni_macaddr, "shared key auth",
+				"bad challenge len %d", challenge[1]);
+			vap->iv_stats.is_rx_bad_auth++;
+			estatus = IEEE80211_STATUS_CHALLENGE;
+			goto bad;
+		}
+	default:
+		break;
+	}
+	switch (vap->iv_opmode) {
+	case IEEE80211_M_MONITOR:
+	case IEEE80211_M_AHDEMO:
+	case IEEE80211_M_IBSS:
+	case IEEE80211_M_WDS:
+		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+			ni->ni_macaddr, "shared key auth",
+			"bad operating mode %u", vap->iv_opmode);
+		return;
+	case IEEE80211_M_HOSTAP:
+		if (vap->iv_state != IEEE80211_S_RUN) {
+			IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+				ni->ni_macaddr, "shared key auth",
+				"bad state %u", vap->iv_state);
+			estatus = IEEE80211_STATUS_ALG;	/* XXX */
+			goto bad;
+		}
+
+		switch (seq) {
+		case IEEE80211_AUTH_SHARED_REQUEST:
+			if (ni == vap->iv_bss) {
+				ni = ieee80211_dup_bss(vap, wh->i_addr2);
+				if (ni == NULL) {
+					return;
+				}
+				ni->ni_node_type = IEEE80211_NODE_TYPE_STA;
+				node_reference_held = 1;
+			}
+			ni->ni_rssi = rssi;
+			ni->ni_rstamp = rstamp;
+			ni->ni_last_rx = jiffies;
+			if (!alloc_challenge(ni)) {
+				/* NB: don't return error so they rexmit */
+				if (node_reference_held) {
+					ieee80211_free_node(ni);
+				}
+				return;
+			}
+			get_random_bytes(ni->ni_challenge,
+				IEEE80211_CHALLENGE_LEN);
+			IEEE80211_NOTE(vap,
+				IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni,
+				"shared key %sauth request", node_reference_held ? "" : "re");
+			break;
+		case IEEE80211_AUTH_SHARED_RESPONSE:
+			if (ni == vap->iv_bss) {
+				IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+					ni->ni_macaddr, "shared key response",
+					"%s", "unknown station");
+				/* NB: don't send a response */
+				return;
+			}
+			if (ni->ni_challenge == NULL) {
+				IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+					ni->ni_macaddr, "shared key response",
+					"%s", "no challenge recorded");
+				vap->iv_stats.is_rx_bad_auth++;
+				estatus = IEEE80211_STATUS_CHALLENGE;
+				goto bad;
+			}
+			if (memcmp(ni->ni_challenge, &challenge[2],
+			    challenge[1]) != 0) {
+				IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+					ni->ni_macaddr, "shared key response",
+					"%s", "challenge mismatch");
+				vap->iv_stats.is_rx_auth_fail++;
+				estatus = IEEE80211_STATUS_CHALLENGE;
+				goto bad;
+			}
+			IEEE80211_NOTE(vap,
+				IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni,
+				"station authenticated (%s)", "shared key");
+			ieee80211_node_authorize(ni);
+			break;
+		default:
+			IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+				ni->ni_macaddr, "shared key auth",
+				"bad seq %d", seq);
+			vap->iv_stats.is_rx_bad_auth++;
+			estatus = IEEE80211_STATUS_SEQUENCE;
+			goto bad;
+		}
+
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, seq + 1);
+
+		if (node_reference_held) {
+			ieee80211_free_node(ni);
+		}
+
+		break;
+
+	case IEEE80211_M_STA:
+		if (vap->iv_state != IEEE80211_S_AUTH)
+			return;
+		switch (seq) {
+		case IEEE80211_AUTH_SHARED_PASS:
+			if (ni->ni_challenge != NULL) {
+				FREE(ni->ni_challenge, M_DEVBUF);
+				ni->ni_challenge = NULL;
+			}
+			if (status != 0) {
+				IEEE80211_NOTE_MAC(vap,
+					IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH,
+					ieee80211_getbssid(vap, wh),
+					"shared key auth failed (reason %d)",
+					status);
+				vap->iv_stats.is_rx_auth_fail++;
+				/* XXX IEEE80211_SCAN_FAIL_STATUS */
+				goto bad;
+			}
+			ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
+			break;
+		case IEEE80211_AUTH_SHARED_CHALLENGE:
+			if (!alloc_challenge(ni))
+				goto bad;
+			/* XXX could optimize by passing recvd challenge */
+			memcpy(ni->ni_challenge, &challenge[2], challenge[1]);
+			IEEE80211_SEND_MGMT(ni,
+				IEEE80211_FC0_SUBTYPE_AUTH, seq + 1);
+			break;
+		default:
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_AUTH,
+				wh, "shared key auth", "bad seq %d", seq);
+			vap->iv_stats.is_rx_bad_auth++;
+			goto bad;
+		}
+		break;
+	}
+	if(vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_AUTH, 1);
+	}
+	return;
+bad:
+	/*
+	 * Send an error response; but only when operating as an AP.
+	 */
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		/* XXX hack to workaround calling convention */
+		ieee80211_send_error(ni, wh->i_addr2,
+			IEEE80211_FC0_SUBTYPE_AUTH,
+			(seq + 1) | (estatus<<16));
+		mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_AUTH_FAILS, 1);
+
+	} else if (vap->iv_opmode == IEEE80211_M_STA) {
+		/*
+		 * Kick the state machine.  This short-circuits
+		 * using the mgt frame timeout to trigger the
+		 * state transition.
+		 */
+		if (vap->iv_state == IEEE80211_S_AUTH)
+			ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+	}
+}
+
+/* Verify the existence and length of __elem or get out. */
+#define IEEE80211_VERIFY_ELEMENT(__elem, __maxlen) do {			\
+	if ((__elem) == NULL) {						\
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID,		\
+			wh, ieee80211_mgt_subtype_name[subtype >>	\
+				IEEE80211_FC0_SUBTYPE_SHIFT],		\
+			"%s", "no " #__elem );				\
+		vap->iv_stats.is_rx_elem_missing++;			\
+		return;							\
+	}								\
+	if ((__elem)[1] > (__maxlen)) {					\
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID,		\
+			wh, ieee80211_mgt_subtype_name[subtype >>	\
+				IEEE80211_FC0_SUBTYPE_SHIFT],		\
+			"bad " #__elem " len %d", (__elem)[1]);		\
+		vap->iv_stats.is_rx_elem_toobig++;			\
+		return;							\
+	}								\
+} while (0)
+
+#define	IEEE80211_VERIFY_LENGTH(_len, _minlen) do {			\
+	if ((_len) < (_minlen)) {					\
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID,		\
+			wh, ieee80211_mgt_subtype_name[subtype >>	\
+				IEEE80211_FC0_SUBTYPE_SHIFT],		\
+			"%s", "ie too short");				\
+		vap->iv_stats.is_rx_elem_toosmall++;			\
+		return;							\
+	}								\
+} while (0)
+
+#define	IEEE80211_VERIFY_TDLS_LENGTH(_len, _minlen) do {			\
+		if ((_len) < (_minlen)) {					\
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS,		\
+			IEEE80211_TDLS_MSG_DBG, "%s", "ie too short", __FUNCTION__);				\
+			vap->iv_stats.is_rx_elem_toosmall++;			\
+			return;							\
+		}								\
+} while (0)
+
+#ifdef IEEE80211_DEBUG
+static void
+ieee80211_ssid_mismatch(struct ieee80211vap *vap, const char *tag,
+	u_int8_t mac[IEEE80211_ADDR_LEN], u_int8_t *ssid)
+{
+	printf("[%s] discard %s frame, ssid mismatch: ",
+		ether_sprintf(mac), tag);
+	ieee80211_print_essid(ssid + 2, ssid[1]);
+	printf("\n");
+}
+#endif
+
+enum ieee80211_verify_ssid_action {
+	IEEE80211_VERIFY_SSID_ACTION_NO = 0,
+	IEEE80211_VERIFY_SSID_ACTION_RETURN = 1,
+	IEEE80211_VERIFY_SSID_ACTION_NODE_DEL_AND_RETURN = 2
+};
+
+static int ieee80211_verify_ssid(struct ieee80211vap *vap,
+		struct ieee80211_node *ni,
+		struct ieee80211_frame *wh,
+		u_int8_t *ssid,
+		int subtype)
+{
+	if (ssid[1] != 0 &&
+	    (ssid[1] != (vap->iv_bss)->ni_esslen ||
+	    memcmp(ssid + 2, (vap->iv_bss)->ni_essid, ssid[1]) != 0)) {
+#ifdef IEEE80211_DEBUG
+		if (ieee80211_msg_input(vap) &&
+		    subtype != IEEE80211_FC0_SUBTYPE_PROBE_REQ) {
+			ieee80211_ssid_mismatch(vap,
+			    ieee80211_mgt_subtype_name[subtype >>
+				IEEE80211_FC0_SUBTYPE_SHIFT],
+				wh->i_addr2, ssid);
+		}
+#endif
+		vap->iv_stats.is_rx_ssidmismatch++;
+		if ((ni != vap->iv_bss) && ((subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
+			    (subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ))) {
+			return IEEE80211_VERIFY_SSID_ACTION_NODE_DEL_AND_RETURN;
+		} else {
+			return IEEE80211_VERIFY_SSID_ACTION_RETURN;
+		}
+	} else if ((ssid[1] == 0) && (vap->iv_flags & IEEE80211_F_HIDESSID) &&
+			(subtype == IEEE80211_FC0_SUBTYPE_PROBE_REQ)) {
+		return IEEE80211_VERIFY_SSID_ACTION_RETURN;
+	}
+
+	/* Reject empty ssid in association requests */
+	if ((vap->iv_qtn_options & IEEE80211_QTN_NO_SSID_ASSOC_DISABLED) &&
+	    ((subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
+	    (subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) &&
+	    (ssid[1] == 0)) {
+		return IEEE80211_VERIFY_SSID_ACTION_RETURN;
+	}
+
+	return IEEE80211_VERIFY_SSID_ACTION_NO;
+}
+
+/* unaligned little endian access */
+#define LE_READ_2(p)					\
+	((u_int16_t)					\
+	 ((((const u_int8_t *)(p))[0]      ) |		\
+	  (((const u_int8_t *)(p))[1] <<  8)))
+#define LE_READ_3(p)					\
+	((u_int32_t)					\
+	 ((((const u_int8_t *)(p))[0]      ) |		\
+	  (((const u_int8_t *)(p))[1] <<  8) |		\
+	  (((const u_int8_t *)(p))[2] << 16) | 0))
+#define LE_READ_4(p)					\
+	((u_int32_t)					\
+	 ((((const u_int8_t *)(p))[0]      ) |		\
+	  (((const u_int8_t *)(p))[1] <<  8) |		\
+	  (((const u_int8_t *)(p))[2] << 16) |		\
+	  (((const u_int8_t *)(p))[3] << 24)))
+
+#define BE_READ_2(p)					\
+	((u_int16_t)					\
+	 ((((const u_int8_t *)(p))[1]      ) |		\
+	  (((const u_int8_t *)(p))[0] <<  8)))
+#define BE_READ_4(p)					\
+	((u_int32_t)					\
+	 ((((const u_int8_t *)(p))[3]      ) |		\
+	  (((const u_int8_t *)(p))[2] <<  8) |		\
+	  (((const u_int8_t *)(p))[1] << 16) |		\
+	  (((const u_int8_t *)(p))[0] << 24)))
+
+static __inline int
+iswpaoui(const u_int8_t *frm)
+{
+	return frm[1] > 3 && LE_READ_4(frm+2) == ((WPA_RSN_OUI_TYPE<<24)|WPA_OUI);
+}
+
+static __inline int
+iswmeoui(const u_int8_t *frm)
+{
+	return frm[1] > 3 && LE_READ_4(frm+2) == ((WME_OUI_TYPE<<24)|WME_OUI);
+}
+
+static __inline int
+iswmeparam(const u_int8_t *frm)
+{
+	return frm[1] > 5 && LE_READ_4(frm+2) == ((WME_OUI_TYPE<<24)|WME_OUI) &&
+		frm[6] == WME_PARAM_OUI_SUBTYPE;
+}
+
+static __inline int
+iswmeinfo(const u_int8_t *frm)
+{
+	return frm[1] > 5 && LE_READ_4(frm+2) == ((WME_OUI_TYPE<<24)|WME_OUI) &&
+		frm[6] == WME_INFO_OUI_SUBTYPE;
+}
+
+static __inline int
+iswscoui(const u_int8_t *frm)
+{
+	return frm[1] > 3 && LE_READ_4(frm+2) == ((WSC_OUI_TYPE<<24)|WPA_OUI);
+}
+
+static __inline int
+isatherosoui(const u_int8_t *frm)
+{
+	return frm[1] > 3 && LE_READ_4(frm+2) == ((ATH_OUI_TYPE<<24)|ATH_OUI);
+}
+
+static __inline int
+isqtnie(const u_int8_t *frm)
+{
+	return (frm[1] > 3 &&
+		((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
+		((frm[5] == QTN_OUI_CFG)));
+}
+
+static __inline int
+isosenie(const u_int8_t *frm)
+{
+	return (frm[1] > 3 &&
+		(LE_READ_3(frm + 2) == WFA_OUI) &&
+		((frm[5] == WFA_TYPE_OSEN)));
+}
+
+static __inline int
+is_peer_mrvl( u_int8_t *rlnk, void *bcmie, void *rtkie, struct ieee80211_ie_qtn *qtnie,
+                struct ieee80211_ie_vhtcap *vhtcap, struct ieee80211_node *ni)
+{
+        if (unlikely(!bcmie && !qtnie && !rlnk && !rtkie && !ieee80211_node_is_intel(ni) &&
+                 (ni->ni_flags & IEEE80211_NODE_VHT) &&
+                !IEEE80211_VHTCAP_GET_SU_BEAMFORMER((struct ieee80211_ie_vhtcap *)vhtcap) &&
+                 IEEE80211_VHTCAP_GET_SU_BEAMFORMEE((struct ieee80211_ie_vhtcap *)vhtcap) &&
+                (IEEE80211_VHTCAP_GET_BFSTSCAP((struct ieee80211_ie_vhtcap *)vhtcap) == IEEE80211_VHTCAP_RX_STS_4)) &&
+                !IEEE80211_VHT_HAS_3SS(ni->ni_vhtcap.rxmcsmap)) {
+                        return 1;
+                }
+                return 0;
+}
+
+#ifdef CONFIG_QVSP
+static __inline int
+isvspie(const u_int8_t *frm)
+{
+	return (frm[1] > 3 &&
+		((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
+		((frm[5] == QTN_OUI_VSP_CTRL)));
+}
+
+static __inline int
+isqtnwmeie(const uint8_t *frm)
+{
+	return (frm[1] > 3 &&
+		((LE_READ_4(frm + 2) & 0x00ffffff) == QTN_OUI) &&
+		((frm[5] == QTN_OUI_QWME)));
+}
+#endif
+
+static __inline int
+is_qtn_scs_oui(const u_int8_t *frm)
+{
+	return (frm[1] > 3 &&
+		((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
+		((frm[5] == QTN_OUI_SCS)));
+}
+
+static __inline int
+isbroadcomoui(const u_int8_t *frm)
+{
+	return (frm[1] > 3 && (LE_READ_4(frm+2) & 0x00ffffff) == BCM_OUI);
+}
+
+static __inline int
+isbroadcomoui2(const u_int8_t *frm)
+{
+	return (frm[1] > 3 && (LE_READ_4(frm+2) & 0x00ffffff) == BCM_OUI_2);
+}
+
+static __inline int
+isqtnpairoui(const u_int8_t *frm)
+{
+	return (frm[1] > 3 &&
+		((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
+		(frm[5] == QTN_OUI_PAIRING));
+}
+
+static __inline int
+is_qtn_oui_tdls_brmacs(const u_int8_t *frm)
+{
+	return (frm[1] > 3 &&
+		((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
+		((frm[5] == QTN_OUI_TDLS_BRMACS)));
+}
+
+static __inline int
+is_qtn_oui_tdls_sta_info(const u_int8_t *frm)
+{
+	return (frm[1] > 3 &&
+		((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
+		((frm[5] == QTN_OUI_TDLS)));
+}
+
+static __inline int
+isrlnkoui(const u_int8_t *frm)
+{
+	return (frm[1] > 3 &&
+		((LE_READ_4(frm+2) & 0x00ffffff) == RLNK_OUI));
+}
+
+static __inline int
+is_qtn_ext_role_oui(const u_int8_t *frm)
+{
+	return ((frm[1] > 3) &&
+		((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
+		(frm[5] == QTN_OUI_EXTENDER_ROLE));
+}
+
+static __inline int
+is_qtn_ext_bssid_oui(const u_int8_t *frm)
+{
+	return (frm[1] > 3 &&
+		((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
+		(frm[5] == QTN_OUI_EXTENDER_BSSID));
+}
+
+static __inline int
+is_qtn_ext_state_oui(const u_int8_t *frm)
+{
+	return (frm[1] > 3 &&
+		((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
+		(frm[5] == QTN_OUI_EXTENDER_STATE));
+}
+
+static __inline int
+isrealtekoui(const u_int8_t *frm)
+{
+	return (frm[1] > 3 &&
+		((LE_READ_4(frm+2) & 0x00ffffff) == RTK_OUI));
+}
+
+static __inline int
+isqtnmrespoui(const u_int8_t *frm)
+{
+	return (frm[0] == IEEE80211_ELEMID_VENDOR) &&
+			(frm[1] >= 5) && (LE_READ_3(&frm[2]) == QTN_OUI) &&
+			(frm[6] == QTN_OUI_RM_SPCIAL || frm[6] == QTN_OUI_RM_ALL);
+}
+
+static __inline int
+isbrcmvhtoui(const u_int8_t *frm)
+{
+	return (frm[0] == IEEE80211_ELEMID_VENDOR) &&
+			(frm[1] >= 5) && (LE_READ_3(&frm[2]) == BCM_OUI) &&
+			(LE_READ_2(&frm[5]) == BCM_OUI_VHT_TYPE);
+}
+
+static __inline int
+is_qtn_ocac_state_ie(const u_int8_t *frm)
+{
+	return (frm[0] == IEEE80211_ELEMID_VENDOR) &&
+			(frm[1] == OCAC_STATE_IE_LEN) &&
+			(LE_READ_3(&frm[2]) == QTN_OUI) &&
+			(frm[5] == QTN_OUI_OCAC_STATE);
+}
+
+/*
+ * Convert a WPA cipher selector OUI to an internal
+ * cipher algorithm.  Where appropriate we also
+ * record any key length.
+ */
+static int
+wpa_cipher(u_int8_t *sel, u_int8_t *keylen)
+{
+#define	WPA_SEL(x)	(((x) << 24) | WPA_OUI)
+	u_int32_t w = LE_READ_4(sel);
+
+	switch (w) {
+	case WPA_SEL(WPA_CSE_NULL):
+		return IEEE80211_CIPHER_NONE;
+	case WPA_SEL(WPA_CSE_WEP40):
+		if (keylen)
+			*keylen = 40 / NBBY;
+		return IEEE80211_CIPHER_WEP;
+	case WPA_SEL(WPA_CSE_WEP104):
+		if (keylen)
+			*keylen = 104 / NBBY;
+		return IEEE80211_CIPHER_WEP;
+	case WPA_SEL(WPA_CSE_TKIP):
+		return IEEE80211_CIPHER_TKIP;
+	case WPA_SEL(WPA_CSE_CCMP):
+		return IEEE80211_CIPHER_AES_CCM;
+	}
+	return 32;		/* NB: so 1<< is discarded */
+#undef WPA_SEL
+}
+
+/*
+ * Convert a WPA key management/authentication algorithm
+ * to an internal code.
+ */
+static int
+wpa_keymgmt(u_int8_t *sel)
+{
+#define	WPA_SEL(x)	(((x)<<24)|WPA_OUI)
+	u_int32_t w = LE_READ_4(sel);
+
+	switch (w) {
+	case WPA_SEL(WPA_ASE_8021X_UNSPEC):
+		return WPA_ASE_8021X_UNSPEC;
+	case WPA_SEL(WPA_ASE_8021X_PSK):
+		return WPA_ASE_8021X_PSK;
+	case WPA_SEL(WPA_ASE_NONE):
+		return WPA_ASE_NONE;
+	}
+	return 0;		/* NB: so is discarded */
+#undef WPA_SEL
+}
+
+/*
+ * Parse a WPA information element to collect parameters
+ * and validate the parameters against what has been
+ * configured for the system.
+ */
+static int
+ieee80211_parse_wpa(struct ieee80211vap *vap, u_int8_t *frm,
+	struct ieee80211_rsnparms *rsn_parm, const struct ieee80211_frame *wh)
+{
+	u_int8_t len = frm[1];
+	u_int32_t w;
+	int n;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	/*
+	 * Check the length once for fixed parts: OUI, type,
+	 * version, mcast cipher, and 2 selector counts.
+	 * Other, variable-length data, must be checked separately.
+	 */
+	if (!(vap->iv_flags & IEEE80211_F_WPA1)) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "WPA", "vap not WPA, flags 0x%x", vap->iv_flags);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+
+	if (len < 14) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "WPA", "too short, len %u", len);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	frm += 6, len -= 4;		/* NB: len is payload only */
+	/* NB: iswapoui already validated the OUI and type */
+	w = LE_READ_2(frm);
+	if (w != WPA_VERSION) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "WPA", "bad version %u", w);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	frm += 2;
+	len -= 2;
+
+	/* multicast/group cipher */
+	w = wpa_cipher(frm, &rsn_parm->rsn_mcastkeylen);
+	if (w != rsn_parm->rsn_mcastcipher) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "WPA", "mcast cipher mismatch; got %u, expected %u",
+			w, rsn_parm->rsn_mcastcipher);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	if (!IEEE80211_IS_TKIP_ALLOWED(ic)) {
+		if (w == IEEE80211_CIPHER_TKIP)
+			return IEEE80211_REASON_STA_CIPHER_NOT_SUPP;
+	}
+	frm += 4;
+	len -= 4;
+
+	/* unicast ciphers */
+	n = LE_READ_2(frm);
+	frm += 2;
+	len -= 2;
+	if (len < n*4+2) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "WPA", "ucast cipher data too short; len %u, n %u",
+			len, n);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	w = 0;
+	for (; n > 0; n--) {
+		w |= 1 << wpa_cipher(frm, &rsn_parm->rsn_ucastkeylen);
+		frm += 4;
+		len -= 4;
+	}
+	w &= rsn_parm->rsn_ucastcipherset;
+	if (w == 0) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "WPA", "%s", "ucast cipher set empty");
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	if (w & (1 << IEEE80211_CIPHER_TKIP)) {
+		if (!IEEE80211_IS_TKIP_ALLOWED(ic))
+			return IEEE80211_REASON_STA_CIPHER_NOT_SUPP;
+		else
+			rsn_parm->rsn_ucastcipher = IEEE80211_CIPHER_TKIP;
+	} else {
+		rsn_parm->rsn_ucastcipher = IEEE80211_CIPHER_AES_CCM;
+	}
+
+	/* key management algorithms */
+	n = LE_READ_2(frm);
+	frm += 2;
+	len -= 2;
+	if (len < n * 4) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "WPA", "key mgmt alg data too short; len %u, n %u",
+			len, n);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	w = 0;
+	for (; n > 0; n--) {
+		w |= wpa_keymgmt(frm);
+		frm += 4;
+		len -= 4;
+	}
+	w &= rsn_parm->rsn_keymgmtset;
+	if (w == 0) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "WPA", "%s", "no acceptable key mgmt alg");
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	if (w & WPA_ASE_8021X_UNSPEC)
+		rsn_parm->rsn_keymgmt = WPA_ASE_8021X_UNSPEC;
+	else
+		rsn_parm->rsn_keymgmt = WPA_ASE_8021X_PSK;
+
+	if (len > 2)		/* optional capabilities */
+		rsn_parm->rsn_caps = LE_READ_2(frm);
+
+	return 0;
+}
+
+/*
+ * Convert an RSN cipher selector OUI to an internal
+ * cipher algorithm.  Where appropriate we also
+ * record any key length.
+ */
+static int
+rsn_cipher(u_int8_t *sel, u_int8_t *keylen)
+{
+#define	RSN_SEL(x)	(((x) << 24) | RSN_OUI)
+	u_int32_t w = LE_READ_4(sel);
+
+	switch (w) {
+	case RSN_SEL(RSN_CSE_NULL):
+		return IEEE80211_CIPHER_NONE;
+	case RSN_SEL(RSN_CSE_WEP40):
+		if (keylen)
+			*keylen = 40 / NBBY;
+		return IEEE80211_CIPHER_WEP;
+	case RSN_SEL(RSN_CSE_WEP104):
+		if (keylen)
+			*keylen = 104 / NBBY;
+		return IEEE80211_CIPHER_WEP;
+	case RSN_SEL(RSN_CSE_TKIP):
+		return IEEE80211_CIPHER_TKIP;
+	case RSN_SEL(RSN_CSE_CCMP):
+		return IEEE80211_CIPHER_AES_CCM;
+	case RSN_SEL(RSN_CSE_WRAP):
+		return IEEE80211_CIPHER_AES_OCB;
+	}
+	return 32;		/* NB: so 1<< is discarded */
+#undef RSN_SEL
+}
+
+/*
+ * Convert an RSN key management/authentication algorithm
+ * to an internal code.
+ */
+static int
+rsn_keymgmt(u_int8_t *sel)
+{
+#define	RSN_SEL(x)	(((x) << 24) | RSN_OUI)
+	u_int32_t w = LE_READ_4(sel);
+
+	switch (w) {
+	case RSN_SEL(RSN_ASE_8021X_UNSPEC):
+		return RSN_ASE_8021X_UNSPEC;
+	case RSN_SEL(RSN_ASE_8021X_PSK):
+		return RSN_ASE_8021X_PSK;
+	case RSN_SEL(RSN_ASE_FT_PSK):
+		return RSN_ASE_FT_PSK;
+	case RSN_SEL(RSN_ASE_FT_8021X):
+		return RSN_ASE_FT_8021X;
+	case RSN_SEL(RSN_ASE_8021X_SHA256):
+		return RSN_ASE_8021X_SHA256;
+	case RSN_SEL(RSN_ASE_8021X_PSK_SHA256):
+		return RSN_ASE_8021X_PSK_SHA256;
+	case RSN_SEL(RSN_ASE_NONE):
+		return RSN_ASE_NONE;
+	}
+	return 0;		/* NB: so is discarded */
+#undef RSN_SEL
+}
+
+/*
+ * Parse a WPA/RSN information element to collect parameters
+ * and populate the rsn parameters in struct
+ */
+int
+ieee80211_get_rsn_from_ie(struct ieee80211vap *vap, u_int8_t *frm,
+	struct ieee80211_rsnparms *rsn_parm)
+{
+	u_int8_t len = frm[1];
+	u_int32_t w;
+	int n;
+
+	/*
+	 * Check the length once for fixed parts:
+	 * version, mcast cipher, and 2 selector counts.
+	 * Other, variable-length data, must be checked separately.
+	 */
+	if (!(vap->iv_flags & IEEE80211_F_WPA2)) {
+		printk( "vap not RSN, flags 0x%x", vap->iv_flags);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+
+	if (len < 10) {
+		printk( "too short, len %u", len);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	frm += 2;
+	w = LE_READ_2(frm);
+	if (w != RSN_VERSION) {
+		printk( "bad version %u", w);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	frm += 2;
+	len -= 2;
+
+	/* multicast/group cipher */
+	w = rsn_cipher(frm, &rsn_parm->rsn_mcastkeylen);
+	rsn_parm->rsn_mcastcipher = w;
+	frm += 4;
+	len -= 4;
+
+	/* unicast ciphers */
+	n = LE_READ_2(frm);
+	frm += 2;
+	len -= 2;
+	if (len < n * 4 + 2) {
+		printk("ucast cipher data too short; len %u, n %u",
+			len, n);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	w = 0;
+	for (; n > 0; n--) {
+		w |= 1 << rsn_cipher(frm, &rsn_parm->rsn_ucastkeylen);
+		frm += 4;
+		len -= 4;
+	}
+
+	if (w == 0) {
+		printk( "%s", "ucast cipher set empty");
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	rsn_parm->rsn_ucastcipherset = w;
+	if (w & (1<<IEEE80211_CIPHER_TKIP))
+		rsn_parm->rsn_ucastcipher = IEEE80211_CIPHER_TKIP;
+	else
+		rsn_parm->rsn_ucastcipher = IEEE80211_CIPHER_AES_CCM;
+
+	/* key management algorithms */
+	n = LE_READ_2(frm);
+	frm += 2;
+	len -= 2;
+	if (len < n * 4) {
+		printk( "key mgmt alg data too short; len %u, n %u",
+			len, n);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	w = 0;
+	for (; n > 0; n--) {
+		w |= rsn_keymgmt(frm);
+		frm += 4;
+		len -= 4;
+	}
+
+	if (w == 0) {
+		printk("%s", "no acceptable key mgmt alg");
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	rsn_parm->rsn_keymgmtset = w;
+	if (w & RSN_ASE_8021X_UNSPEC)
+		rsn_parm->rsn_keymgmt = RSN_ASE_8021X_UNSPEC;
+	if (w & RSN_ASE_8021X_PSK)
+		rsn_parm->rsn_keymgmt = RSN_ASE_8021X_UNSPEC;
+	if (w & RSN_ASE_FT_PSK)
+		rsn_parm->rsn_keymgmt = RSN_ASE_FT_PSK;
+	if (w & RSN_ASE_FT_8021X)
+		rsn_parm->rsn_keymgmt = RSN_ASE_FT_8021X;
+
+	/* optional RSN capabilities */
+	if (len >= 2)
+		rsn_parm->rsn_caps = LE_READ_2(frm);
+	/* XXXPMKID */
+	return 0;
+}
+
+/*
+ * Parse a WPA/RSN information element to collect parameters
+ * and validate the parameters against what has been
+ * configured for the system.
+ */
+static int
+ieee80211_parse_rsn(struct ieee80211vap *vap, u_int8_t *frm,
+	struct ieee80211_rsnparms *rsn_parm, const struct ieee80211_frame *wh)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	u_int8_t len = frm[1];
+	u_int32_t w;
+	int n;
+
+	/*
+	 * Check the length once for fixed parts:
+	 * version, mcast cipher, and 2 selector counts.
+	 * Other, variable-length data, must be checked separately.
+	 */
+	if (!(vap->iv_flags & IEEE80211_F_WPA2)) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "RSN", "vap not RSN, flags 0x%x", vap->iv_flags);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+
+	if (len < 10) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "RSN", "too short, len %u", len);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	frm += 2;
+	w = LE_READ_2(frm);
+	if (w != RSN_VERSION) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "RSN", "bad version %u", w);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	frm += 2;
+	len -= 2;
+
+	/* multicast/group cipher */
+	w = rsn_cipher(frm, &rsn_parm->rsn_mcastkeylen);
+	if (w != rsn_parm->rsn_mcastcipher) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "RSN", "mcast cipher mismatch; got %u, expected %u",
+			w, rsn_parm->rsn_mcastcipher);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	if (!IEEE80211_IS_TKIP_ALLOWED(ic)) {
+		if (w == IEEE80211_CIPHER_TKIP) {
+			return IEEE80211_REASON_STA_CIPHER_NOT_SUPP;
+		}
+	}
+	frm += 4;
+	len -= 4;
+
+	/* unicast ciphers */
+	n = LE_READ_2(frm);
+	frm += 2;
+	len -= 2;
+	if (len < n * 4 + 2) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "RSN", "ucast cipher data too short; len %u, n %u",
+			len, n);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	w = 0;
+	for (; n > 0; n--) {
+		w |= 1 << rsn_cipher(frm, &rsn_parm->rsn_ucastkeylen);
+		frm += 4;
+		len -= 4;
+	}
+
+	w &= rsn_parm->rsn_ucastcipherset;
+	if (w == 0) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "RSN", "%s", "ucast cipher set empty");
+		return IEEE80211_REASON_IE_INVALID;
+	}
+        if (w & (1 << IEEE80211_CIPHER_TKIP)) {
+		if (!IEEE80211_IS_TKIP_ALLOWED(ic))
+                        return IEEE80211_REASON_STA_CIPHER_NOT_SUPP;
+                else
+                        rsn_parm->rsn_ucastcipher = IEEE80211_CIPHER_TKIP;
+        } else {
+                rsn_parm->rsn_ucastcipher = IEEE80211_CIPHER_AES_CCM;
+        }
+
+	/* key management algorithms */
+	n = LE_READ_2(frm);
+	frm += 2;
+	len -= 2;
+	if (len < n * 4) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "RSN", "key mgmt alg data too short; len %u, n %u",
+			len, n);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	w = 0;
+	for (; n > 0; n--) {
+		w |= rsn_keymgmt(frm);
+		frm += 4;
+		len -= 4;
+	}
+	if (w & RSN_ASE_8021X_UNSPEC)
+		rsn_parm->rsn_keymgmt = RSN_ASE_8021X_UNSPEC;
+	else if (w & RSN_ASE_8021X_PSK)
+		rsn_parm->rsn_keymgmt = RSN_ASE_8021X_PSK;
+	else if (w & RSN_ASE_FT_PSK)
+		rsn_parm->rsn_keymgmt = WPA_KEY_MGMT_FT_PSK;
+	else if (w &  RSN_ASE_FT_8021X)
+		rsn_parm->rsn_keymgmt = WPA_KEY_MGMT_FT_IEEE8021X;
+	else {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "RSN", "%s", "no acceptable key mgmt alg");
+		return IEEE80211_REASON_IE_INVALID;
+	}
+
+	/* optional RSN capabilities */
+	if (len >= 2)
+		rsn_parm->rsn_caps = LE_READ_2(frm);
+	/* XXXPMKID */
+
+	return 0;
+}
+
+
+#define IEEE80211_OSEN_IE_HEAD_LEN		4
+#define IEEE80211_OSEN_IE_SUITE_LEN		4
+#define IEEE80211_OSEN_IE_SUITE_COUNT_LEN	2
+#define IEEE80211_IE_HEAD_LEN			2
+#define IEEE80211_OSEN_IE_RSN_CAPS_LEN		2
+#define IEEE80211_OSEN_IE_MIN_LEN		(IEEE80211_OSEN_IE_HEAD_LEN + \
+						IEEE80211_OSEN_IE_SUITE_LEN + \
+						IEEE80211_OSEN_IE_SUITE_COUNT_LEN)
+
+static int
+ieee80211_parse_osen(struct ieee80211vap *vap, u_int8_t *frm,
+		struct ieee80211_rsnparms *rsn_parm, const struct ieee80211_frame *wh)
+{
+	uint8_t len = frm[1];
+	uint32_t w;
+	int n;
+
+	if (len < IEEE80211_OSEN_IE_MIN_LEN) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "OSEN", "too short, len %u", len);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+
+	if (!vap->iv_osen) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+			wh, "OSEN", "%s", "vap not OSEN");
+		return IEEE80211_REASON_IE_INVALID;
+	}
+
+	frm += IEEE80211_IE_HEAD_LEN + IEEE80211_OSEN_IE_HEAD_LEN;
+	len -= IEEE80211_OSEN_IE_HEAD_LEN;
+
+	w = LE_READ_4(frm);
+	if (w != ((RSN_CSE_GROUP_NOT_ALLOW << 24) | RSN_OUI)) {
+		IEEE80211_DISCARD_IE(vap,
+				IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+				wh, "OSEN", "mcast cipher mismatch; got %u, expected %u",
+				w, RSN_CSE_GROUP_NOT_ALLOW);
+			return IEEE80211_REASON_IE_INVALID;
+	}
+
+	frm += IEEE80211_OSEN_IE_SUITE_LEN;
+	len -= IEEE80211_OSEN_IE_SUITE_LEN;
+
+	/* unicast ciphers */
+	n = LE_READ_2(frm);
+	frm += IEEE80211_OSEN_IE_SUITE_COUNT_LEN;
+	len -= IEEE80211_OSEN_IE_SUITE_COUNT_LEN;
+	if (len < n * IEEE80211_OSEN_IE_SUITE_LEN + IEEE80211_OSEN_IE_SUITE_COUNT_LEN) {
+		IEEE80211_DISCARD_IE(vap,
+				IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+				wh, "OSEN", "ucast cipher data too short; len %u, n %u",
+				len, n);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	w = 0;
+	for (; n > 0; n--) {
+		w |= 1 << rsn_cipher(frm, &rsn_parm->rsn_ucastkeylen);
+		frm += IEEE80211_OSEN_IE_SUITE_LEN;
+		len -= IEEE80211_OSEN_IE_SUITE_LEN;
+	}
+
+	w &= rsn_parm->rsn_ucastcipherset;
+	if (w == 0) {
+		IEEE80211_DISCARD_IE(vap,
+				IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+				wh, "OSEN", "%s", "ucast cipher set empty");
+		return IEEE80211_REASON_IE_INVALID;
+	}
+	if (w & (1 << IEEE80211_CIPHER_TKIP))
+		rsn_parm->rsn_ucastcipher = IEEE80211_CIPHER_TKIP;
+	else
+		rsn_parm->rsn_ucastcipher = IEEE80211_CIPHER_AES_CCM;
+
+	/* key management algorithms */
+	n = LE_READ_2(frm);
+	frm += IEEE80211_OSEN_IE_SUITE_COUNT_LEN;
+	len -= IEEE80211_OSEN_IE_SUITE_COUNT_LEN;
+	if (len < n * IEEE80211_OSEN_IE_SUITE_LEN) {
+		IEEE80211_DISCARD_IE(vap,
+				IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+				wh, "OSEN", "key mgmt alg data too short; len %u, n %u",
+				len, n);
+		return IEEE80211_REASON_IE_INVALID;
+	}
+
+	if (LE_READ_4(frm) != ((WFA_AKM_TYPE_OSEN << 24) | WFA_OUI)) {
+		IEEE80211_DISCARD_IE(vap,
+				IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+				wh, "OSEN", "%s", "no acceptable key mgmt alg");
+		return IEEE80211_REASON_IE_INVALID;
+	}
+
+	rsn_parm->rsn_keymgmt = RSN_ASE_8021X_UNSPEC;
+
+	/* optional RSN capabilities */
+	if (len >= IEEE80211_OSEN_IE_RSN_CAPS_LEN)
+		rsn_parm->rsn_caps = LE_READ_2(frm);
+
+	return 0;
+}
+
+void
+ieee80211_saveie(u_int8_t **iep, const u_int8_t *ie)
+{
+	if (*iep == NULL)
+	{
+		if (ie != NULL)
+			MALLOC(*iep, void*, ie[1] + 2, M_DEVBUF, M_ZERO);
+	}
+	else
+	{
+		if (((*iep)[1] != ie[1]) || (ie == NULL)) {
+			FREE(*iep, M_DEVBUF);
+			*iep = NULL;
+			if (ie != NULL) {
+				MALLOC(*iep, void*, ie[1] + 2, M_DEVBUF, M_ZERO);
+			}
+		}
+	}
+
+	if ((*iep != NULL) && (ie != NULL))
+		memcpy(*iep, ie, ie[1] + 2);
+}
+EXPORT_SYMBOL(ieee80211_saveie);
+
+static int
+ieee80211_parse_wmeie(u_int8_t *frm, const struct ieee80211_frame *wh,
+					  struct ieee80211_node *ni)
+{
+	u_int len = frm[1];
+
+	if (len != 7) {
+		IEEE80211_DISCARD_IE(ni->ni_vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WME,
+			wh, "WME IE", "too short, len %u", len);
+		return -1;
+	}
+	ni->ni_uapsd = frm[WME_CAPINFO_IE_OFFSET];
+	if (ni->ni_uapsd) {
+		ni->ni_flags |= IEEE80211_NODE_UAPSD;
+	}
+	IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_POWER, ni,
+		"UAPSD bit settings from STA: %02x", ni->ni_uapsd);
+
+	return 1;
+}
+/*
+ * This function is only used on STA and the purpose is to save the cipher
+ * info filled in ASSOC_REQ and apply it to node allocation once association
+ * response received.
+ * If TKIP, need two entries; If others, only need one entry
+ */
+void
+ieee80211_parse_cipher_key(struct ieee80211vap *vap, void *ie, uint16_t len)
+{
+	struct ieee80211_rsnparms *ni_rsn = &(vap->iv_bss->ni_rsn);
+	uint8_t type;
+	uint8_t count;
+	int16_t length = len;
+	uint8_t *frm = ie;
+
+	if (vap->iv_opmode != IEEE80211_M_STA)
+		return;
+
+	vap->iv_bss->ni_rsn.rsn_ucastcipher = IEEE80211_CIPHER_NONE;
+	vap->iv_bss->ni_rsn.rsn_mcastcipher = IEEE80211_CIPHER_NONE;
+
+	while(length > 0) {
+		type = *frm;
+		length -= 2;
+		if (likely (type == IEEE80211_ELEMID_RSN) &&
+				(length >= 10)) {
+			/*
+			 * fixed part for RSN IE. version, mcast cipher, and 2 selector counts.
+			 * Other, variable-length data, must be checked separately.
+			 */
+			frm += 4;
+			ni_rsn->rsn_mcastcipher = rsn_cipher(frm, NULL);
+			frm += 4;
+			count = LE_READ_2(frm);
+			if (count != 1) {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+					"%s: more than one unicast cipher in optie RSN\n",
+					__func__);
+			}
+			frm += 2;
+			ni_rsn->rsn_ucastcipher = rsn_cipher(frm, NULL);
+			return;
+		} else if ((type == IEEE80211_ELEMID_VENDOR) && iswpaoui(frm) &&
+			(length >= 14)) {
+			/*
+			 * Check the length once for fixed parts: OUI, type,
+			 * version, mcast cipher, and 2 selector counts.
+			 * Other, variable-length data, must be checked separately.
+			 */
+			frm += 8;
+			ni_rsn->rsn_mcastcipher = wpa_cipher(frm, NULL);
+			frm += 4;
+			count = LE_READ_2(frm);
+			if (count != 1) {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+					"%s: more than one unicast cipher in optie WPA\n",
+					__func__);
+			}
+			frm += 2;
+			ni_rsn->rsn_ucastcipher = wpa_cipher(frm, NULL);
+			return;
+		} else {
+			length -= *(frm + 1);
+			frm += (*(frm + 1) + 2);
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+				"%s: No WPA/RSN IE\n",
+				__func__);
+		}
+	}
+
+	return;
+}
+
+static int
+ieee80211_parse_wmeparams(struct ieee80211vap *vap, u_int8_t *frm,
+	const struct ieee80211_frame *wh, u_int8_t *qosinfo)
+{
+	struct ieee80211_wme_state *wme = &vap->iv_ic->ic_wme;
+	u_int len = frm[1], qosinfo_count;
+	int i;
+
+	*qosinfo = 0;
+
+	if (len < sizeof(struct ieee80211_wme_param)-2) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_WME,
+			wh, "WME", "too short, len %u", len);
+		return -1;
+	}
+	*qosinfo = frm[__offsetof(struct ieee80211_wme_param, param_qosInfo)];
+	qosinfo_count = *qosinfo & WME_QOSINFO_COUNT;
+	/* XXX do proper check for wraparound */
+	if (qosinfo_count == wme->wme_wmeChanParams.cap_info_count) {
+		return 0;
+	}
+	frm += __offsetof(struct ieee80211_wme_param, params_acParams);
+	for (i = 0; i < WME_NUM_AC; i++) {
+		struct wmm_params *wmep =
+			&wme->wme_wmeChanParams.cap_wmeParams[i];
+		/* NB: ACI not used */
+		wmep->wmm_acm = MS(frm[0], WME_PARAM_ACM);
+		wmep->wmm_aifsn = MS(frm[0], WME_PARAM_AIFSN);
+		wmep->wmm_logcwmin = MS(frm[1], WME_PARAM_LOGCWMIN);
+		wmep->wmm_logcwmax = MS(frm[1], WME_PARAM_LOGCWMAX);
+		wmep->wmm_txopLimit = LE_READ_2(frm + 2);
+		frm += 4;
+	}
+	wme->wme_wmeChanParams.cap_info_count = qosinfo_count;
+	return 1;
+}
+
+static void
+ieee80211_parse_athParams(struct ieee80211_node *ni, u_int8_t *ie)
+{
+	struct ieee80211_ie_athAdvCap *athIe =
+		(struct ieee80211_ie_athAdvCap *) ie;
+
+	ni->ni_ath_flags = athIe->athAdvCap_capability;
+	if (ni->ni_ath_flags & IEEE80211_ATHC_COMP)
+		ni->ni_ath_defkeyindex = LE_READ_2(&athIe->athAdvCap_defKeyIndex);
+}
+
+static void
+ieee80211_skb_dev_set(struct net_device *dev, struct sk_buff *skb)
+{
+	struct net_bridge_port *br_port = get_br_port(dev);
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+	if (br_port && br_port->br)
+		skb->dev = br_port->br->dev;
+	else
+		skb->dev = dev;
+#else
+	skb->dev = dev;
+#endif
+}
+static void
+forward_mgmt_to_app_for_further_processing(struct ieee80211vap *vap, int subtype, struct sk_buff *skb,
+	struct ieee80211_frame *wh)
+{
+	struct net_device *dev = vap->iv_dev;
+	struct sk_buff *skb1;
+	skb1 = skb_copy(skb, GFP_ATOMIC);
+	if (skb1 == NULL)
+		return;
+
+	ieee80211_skb_dev_set(dev, skb1);
+	skb_reset_mac_header(skb1);
+	skb1->ip_summed = CHECKSUM_NONE;
+	skb1->pkt_type = PACKET_OTHERHOST;
+	skb1->protocol = __constant_htons(0x0019);  /* ETH_P_80211_RAW */
+	netif_rx(skb1);
+}
+
+static void
+forward_mgmt_to_app(struct ieee80211vap *vap, int subtype, struct sk_buff *skb,
+	struct ieee80211_frame *wh)
+{
+	struct net_device *dev = vap->iv_dev;
+	int filter_type = 0;
+
+	switch (subtype) {
+	case IEEE80211_FC0_SUBTYPE_BEACON:
+		filter_type = IEEE80211_FILTER_TYPE_BEACON;
+		break;
+	case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
+		filter_type = IEEE80211_FILTER_TYPE_PROBE_REQ;
+		break;
+	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+		filter_type = IEEE80211_FILTER_TYPE_PROBE_RESP;
+		break;
+	case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
+	case IEEE80211_FC0_SUBTYPE_REASSOC_REQ:
+		filter_type = IEEE80211_FILTER_TYPE_ASSOC_REQ;
+		break;
+	case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
+	case IEEE80211_FC0_SUBTYPE_REASSOC_RESP:
+		filter_type = IEEE80211_FILTER_TYPE_ASSOC_RESP;
+		break;
+	case IEEE80211_FC0_SUBTYPE_AUTH:
+		filter_type = IEEE80211_FILTER_TYPE_AUTH;
+		break;
+	case IEEE80211_FC0_SUBTYPE_DEAUTH:
+		filter_type = IEEE80211_FILTER_TYPE_DEAUTH;
+		break;
+	case IEEE80211_FC0_SUBTYPE_DISASSOC:
+		filter_type = IEEE80211_FILTER_TYPE_DISASSOC;
+		break;
+	case IEEE80211_FC0_SUBTYPE_ACTION:
+		filter_type = IEEE80211_FILTER_TYPE_ACTION;
+		break;
+	default:
+		break;
+	}
+
+	if (filter_type && ((vap->app_filter & filter_type) == filter_type)) {
+		struct sk_buff *skb1;
+		skb1 = skb_copy(skb, GFP_ATOMIC);
+		if (skb1 == NULL)
+			return;
+
+		ieee80211_skb_dev_set(dev, skb1);
+		skb_reset_mac_header(skb1);
+		skb1->ip_summed = CHECKSUM_NONE;
+		skb1->pkt_type = PACKET_OTHERHOST;
+		skb1->protocol = __constant_htons(0x0019);  /* ETH_P_80211_RAW */
+		netif_rx(skb1);
+	}
+}
+
+void
+ieee80211_saveath(struct ieee80211_node *ni, u_int8_t *ie)
+{
+	const struct ieee80211_ie_athAdvCap *athIe =
+		(const struct ieee80211_ie_athAdvCap *) ie;
+
+	ni->ni_ath_flags = athIe->athAdvCap_capability;
+	if (ni->ni_ath_flags & IEEE80211_ATHC_COMP)
+		ni->ni_ath_defkeyindex = LE_READ_2(&athIe->athAdvCap_defKeyIndex);
+	ieee80211_saveie(&ni->ni_ath_ie, ie);
+}
+
+struct ieee80211_channel *
+ieee80211_doth_findchan(struct ieee80211vap *vap, u_int8_t chan)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *c;
+	int flags, freq;
+
+	KASSERT(ic->ic_bsschan != IEEE80211_CHAN_ANYC, ("BSS channel not set up"));
+
+	/* NB: try first to preserve turbo */
+	flags = ic->ic_bsschan->ic_flags & IEEE80211_CHAN_ALL;
+	freq = ieee80211_ieee2mhz(chan, 0);
+	c = ieee80211_find_channel(ic, freq, flags);
+	if (c == NULL)
+		c = ieee80211_find_channel(ic, freq, 0);
+	return c;
+}
+
+static void
+ieee80211_doth_cancel_cs(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	/* attempt a cancel */
+	ic->ic_set_channel_deferred(ic, 0, IEEE80211_SET_CHANNEL_DEFERRED_CANCEL);
+}
+
+int
+ieee80211_parse_htcap(struct ieee80211_node *ni, u_int8_t *ie)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_ie_htcap *htcap = (struct ieee80211_ie_htcap *)ie;
+	u_int16_t peer_cap = IEEE80211_HTCAP_CAPABILITIES(htcap);
+	u_int16_t merged_cap = peer_cap & ic->ic_htcap.cap;
+
+	/* Compare with stored ie, return 0 if unchanged */
+	if (memcmp(htcap, &ni->ni_ie_htcap, sizeof(struct ieee80211_ie_htcap)) == 0) {
+		return 0;
+	}
+
+	memcpy(&ni->ni_ie_htcap, htcap, sizeof(ni->ni_ie_htcap));
+
+	/* Take the combination of IC and STA parameters */
+	/* set HT capabilities */
+	ni->ni_htcap.cap = 0;
+
+	/* set channel width */
+	ni->ni_htcap.cap |= peer_cap & IEEE80211_HTCAP_C_CHWIDTH40;
+
+	/* Set power save mode - STA determines this, not the AP. */
+	if (vap->iv_opmode == IEEE80211_M_STA) {
+		ni->ni_htcap.pwrsave = ic->ic_htcap.pwrsave;
+	} else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		u_int8_t smps = IEEE80211_HTCAP_PWRSAVE_MODE(htcap);
+		ni->ni_htcap.pwrsave = smps;
+	} else if (vap->iv_opmode == IEEE80211_M_WDS) {
+		/* WDS power save unsupported */
+		ni->ni_htcap.pwrsave = IEEE80211_HTCAP_C_MIMOPWRSAVE_NONE;
+	}
+
+	/* Default invalid PS mode to NONE */
+	if (ni->ni_htcap.pwrsave == IEEE80211_HTCAP_C_MIMOPWRSAVE_NA)
+		ni->ni_htcap.pwrsave = IEEE80211_HTCAP_C_MIMOPWRSAVE_NONE;
+
+	/* set SHORT GI options */
+	ni->ni_htcap.cap |= merged_cap & IEEE80211_HTCAP_C_SHORTGI20;
+	ni->ni_htcap.cap |= merged_cap & IEEE80211_HTCAP_C_SHORTGI40;
+
+	/* Set STBC options */
+	if ((ic->ic_htcap.cap & IEEE80211_HTCAP_C_TXSTBC)
+			&& (IEEE80211_HTCAP_RX_STBC_MODE(htcap)))
+	ni->ni_htcap.cap |= IEEE80211_HTCAP_C_TXSTBC;
+
+	if (ic->ic_htcap.cap & IEEE80211_HTCAP_C_TXSTBC)
+		ni->ni_htcap.numrxstbcstr = IEEE80211_HTCAP_RX_STBC_MODE(htcap);
+	else
+		ni->ni_htcap.numrxstbcstr = 0;
+
+	/* delayed block ack */
+	ni->ni_htcap.cap |= peer_cap & IEEE80211_HTCAP_C_DELAYEDBLKACK;
+
+	/* Maximum A-MSDU size */
+	if (peer_cap & IEEE80211_HTCAP_C_MAXAMSDUSIZE_8K)
+		ni->ni_htcap.maxmsdu = 7935;
+	else
+		ni->ni_htcap.maxmsdu = 3839;
+
+	/* DSSS/CCK mode in 40 MHz */
+	ni->ni_htcap.cap |= peer_cap & IEEE80211_HTCAP_C_DSSSCCK40;
+
+	/* PSMP support (only if AP supports) */
+	ni->ni_htcap.cap |= peer_cap & IEEE80211_HTCAP_C_PSMP;
+
+	/* set 40 MHz intolerant */
+	ni->ni_htcap.cap |= peer_cap & IEEE80211_HTCAP_C_40_INTOLERANT;
+
+	/* set L-SIG TXOP support */
+	ni->ni_htcap.cap |= peer_cap & IEEE80211_HTCAP_C_LSIGTXOPPROT;
+
+	/* set maximum A-MPDU size */
+	ni->ni_htcap.maxampdu = IEEE80211_HTCAP_MAX_AMPDU_LEN(htcap);
+
+	/* set maximum MPDU spacing */
+	ni->ni_htcap.mpduspacing = IEEE80211_HTCAP_MIN_AMPDU_SPACING(htcap);
+
+	/* set MCS rate indexes */
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS1] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap, IEEE80211_HT_MCSSET_20_40_NSS1);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS2] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap, IEEE80211_HT_MCSSET_20_40_NSS2);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap, IEEE80211_HT_MCSSET_20_40_NSS3);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS4] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap, IEEE80211_HT_MCSSET_20_40_NSS4);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM1] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap, IEEE80211_HT_MCSSET_20_40_UEQM1);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM2] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap, IEEE80211_HT_MCSSET_20_40_UEQM2);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM3] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap, IEEE80211_HT_MCSSET_20_40_UEQM3);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM4] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap, IEEE80211_HT_MCSSET_20_40_UEQM4);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM5] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap, IEEE80211_HT_MCSSET_20_40_UEQM5);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM6] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap, IEEE80211_HT_MCSSET_20_40_UEQM6);
+
+	/* set maximum data rate */
+	ni->ni_htcap.maxdatarate = IEEE80211_HTCAP_HIGHEST_DATA_RATE(htcap);
+
+	/* set MCS parameters */
+	ni->ni_htcap.mcsparams = 0;
+
+	if (IEEE80211_HTCAP_MCS_PARAMS(htcap) & IEEE80211_HTCAP_MCS_TX_SET_DEFINED) {
+		ni->ni_htcap.mcsparams |= IEEE80211_HTCAP_MCS_TX_SET_DEFINED;
+
+		/* set number of Tx spatial streams */
+		if(IEEE80211_HTCAP_MCS_PARAMS(htcap) & IEEE80211_HTCAP_MCS_TX_RX_SET_NEQ) {
+			ni->ni_htcap.numtxspstr = IEEE80211_HTCAP_MCS_STREAMS(htcap);
+			ni->ni_htcap.mcsparams |= IEEE80211_HTCAP_MCS_TX_RX_SET_NEQ |
+				(IEEE80211_HTCAP_MCS_PARAMS(htcap) & IEEE80211_HTCAP_MCS_TX_UNEQ_MOD);
+		} else {
+			ni->ni_htcap.numtxspstr = 0;
+		}
+	} else {
+		ni->ni_htcap.numtxspstr = 0;
+	}
+
+	if (ni->ni_qtn_assoc_ie || (vap->iv_ht_flags & IEEE80211_HTF_LDPC_ALLOW_NON_QTN)) {
+		ni->ni_htcap.cap |= peer_cap & IEEE80211_HTCAP_C_LDPCCODING;
+	}
+
+	ni->ni_htcap.hc_txbf[0] = ic->ic_htcap.hc_txbf[0] & htcap->hc_txbf[0];
+	ni->ni_htcap.hc_txbf[1] = ic->ic_htcap.hc_txbf[1] & htcap->hc_txbf[1];
+	ni->ni_htcap.hc_txbf[2] = ic->ic_htcap.hc_txbf[2] & htcap->hc_txbf[2];
+	ni->ni_htcap.hc_txbf[3] = ic->ic_htcap.hc_txbf[3] & htcap->hc_txbf[3];
+
+	return 1;
+}
+static u_int16_t
+ieee80211_merge_vhtmcs(u_int16_t local_vhtmcs, u_int16_t far_vhtmcs)
+{
+	/* Spatial stream from 1-8 = 3 (not supported) */
+	u_int16_t merge_vhtmcsmap = IEEE80211_VHTMCS_ALL_DISABLE;
+
+	enum ieee80211_vht_nss vhtnss;
+	enum ieee80211_vht_mcs_supported vhtmcs = IEEE80211_VHT_MCS_NA;
+
+	for (vhtnss = IEEE80211_VHT_NSS1; vhtnss <= IEEE80211_VHT_NSS8; vhtnss++) {
+
+		/* Check if perticular stream is not supported by peer */
+		if (((local_vhtmcs & 0x0003) == IEEE80211_VHT_MCS_NA) ||
+			((far_vhtmcs & 0x0003) == IEEE80211_VHT_MCS_NA)) {
+			vhtmcs = IEEE80211_VHT_MCS_NA;
+		} else {
+			vhtmcs = min((local_vhtmcs & 0x0003), (far_vhtmcs & 0x0003));
+		}
+
+		switch(vhtnss) {
+		case IEEE80211_VHT_NSS1:
+			merge_vhtmcsmap &= 0xFFFC;
+			merge_vhtmcsmap |= vhtmcs;
+			break;
+		case IEEE80211_VHT_NSS2:
+			merge_vhtmcsmap &= 0xFFF3;
+			merge_vhtmcsmap |= (vhtmcs << 2);
+			break;
+		case IEEE80211_VHT_NSS3:
+			merge_vhtmcsmap &= 0xFFCF;
+			merge_vhtmcsmap |= (vhtmcs << 4);
+			break;
+		case IEEE80211_VHT_NSS4:
+			merge_vhtmcsmap &= 0xFF3F;
+			merge_vhtmcsmap |= (vhtmcs << 6);
+			break;
+		case IEEE80211_VHT_NSS5:
+			merge_vhtmcsmap &= 0xFCFF;
+			merge_vhtmcsmap |= (vhtmcs << 8);
+			break;
+		case IEEE80211_VHT_NSS6:
+			merge_vhtmcsmap &= 0xF3FF;
+			merge_vhtmcsmap |= (vhtmcs << 10);
+			break;
+		case IEEE80211_VHT_NSS7:
+			merge_vhtmcsmap &= 0xCFFF;
+			merge_vhtmcsmap |= (vhtmcs << 12);
+			break;
+		case IEEE80211_VHT_NSS8:
+			merge_vhtmcsmap &= 0x3FFF;
+			merge_vhtmcsmap |= (vhtmcs << 14);
+			break;
+		}
+		local_vhtmcs = local_vhtmcs >> 2;
+		far_vhtmcs = far_vhtmcs >> 2;
+	}
+
+	return (merge_vhtmcsmap);
+}
+
+
+void
+ieee80211_parse_vhtcap(struct ieee80211_node *ni, u_int8_t *ie)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_ie_vhtcap *rvhtcap = (struct ieee80211_ie_vhtcap *)ie;
+	u_int32_t peer_vhtcap = IEEE80211_VHTCAP_GET_CAPFLAGS(rvhtcap);
+	struct ieee80211_vhtcap *ic_vhtcap = NULL;
+	u_int32_t merged_cap;
+
+	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
+	      ic_vhtcap = &ic->ic_vhtcap_24g;
+	else
+	      ic_vhtcap = &ic->ic_vhtcap;
+
+	merged_cap = peer_vhtcap & ic_vhtcap->cap_flags;
+
+	/* Following BF related fields require cross merging hence clear them first */
+	merged_cap &= ~(IEEE80211_VHTCAP_C_SU_BEAM_FORMEE_CAP |
+				IEEE80211_VHTCAP_C_SU_BEAM_FORMER_CAP |
+				IEEE80211_VHTCAP_C_MU_BEAM_FORMEE_CAP |
+				IEEE80211_VHTCAP_C_MU_BEAM_FORMER_CAP);
+
+	memcpy(&ni->ni_ie_vhtcap, rvhtcap, sizeof(ni->ni_ie_vhtcap));
+
+	/* Take the combination of IC and STA parameters */
+	/* Set VHT capabilities in the node structure */
+	ni->ni_vhtcap.cap_flags = merged_cap;
+
+	ni->ni_vhtcap.maxmpdu = min(ic_vhtcap->maxmpdu,
+					IEEE80211_VHTCAP_GET_MAXMPDU(rvhtcap));
+	ni->ni_vhtcap.chanwidth = min(ic_vhtcap->chanwidth,
+					IEEE80211_VHTCAP_GET_CHANWIDTH(rvhtcap));
+
+	ni->ni_vhtcap.rxstbc = IEEE80211_VHTCAP_GET_RXSTBC(rvhtcap);
+	ni->ni_vhtcap.bfstscap = IEEE80211_VHTCAP_GET_BFSTSCAP(rvhtcap);
+	ni->ni_vhtcap.numsounding = IEEE80211_VHTCAP_GET_NUMSOUND(rvhtcap);
+	ni->ni_vhtcap.maxampduexp = min(ic_vhtcap->maxampduexp,
+					IEEE80211_VHTCAP_GET_MAXAMPDUEXP(rvhtcap));
+
+	ni->ni_vhtcap.lnkadptcap = min(ic_vhtcap->lnkadptcap,
+					IEEE80211_VHTCAP_GET_LNKADPTCAP(rvhtcap));
+
+	if ((vap->iv_vht_flags & IEEE80211_VHTCAP_C_TX_STBC) &&
+		(ni->ni_vhtcap.rxstbc)) {
+		ni->ni_vhtcap.cap_flags |= IEEE80211_VHTCAP_C_TX_STBC;
+	}
+
+	if ((ic->ic_vhtcap.cap_flags & IEEE80211_VHTCAP_C_SU_BEAM_FORMEE_CAP) &&
+		IEEE80211_VHTCAP_GET_SU_BEAMFORMER(rvhtcap)) {
+		ni->ni_vhtcap.cap_flags |= IEEE80211_VHTCAP_C_SU_BEAM_FORMER_CAP;
+	}
+
+	if ((ic->ic_vhtcap.cap_flags & IEEE80211_VHTCAP_C_SU_BEAM_FORMER_CAP) &&
+		IEEE80211_VHTCAP_GET_SU_BEAMFORMEE(rvhtcap)) {
+		ni->ni_vhtcap.cap_flags |= IEEE80211_VHTCAP_C_SU_BEAM_FORMEE_CAP;
+	}
+
+	if ((ic->ic_vhtcap.cap_flags & IEEE80211_VHTCAP_C_MU_BEAM_FORMEE_CAP) &&
+		IEEE80211_VHTCAP_GET_MU_BEAMFORMER(rvhtcap)) {
+		ni->ni_vhtcap.cap_flags |= IEEE80211_VHTCAP_C_MU_BEAM_FORMER_CAP;
+	}
+
+	if ((ic->ic_vhtcap.cap_flags & IEEE80211_VHTCAP_C_MU_BEAM_FORMER_CAP) &&
+		IEEE80211_VHTCAP_GET_MU_BEAMFORMEE(rvhtcap)) {
+		ni->ni_vhtcap.cap_flags |= IEEE80211_VHTCAP_C_MU_BEAM_FORMEE_CAP;
+	}
+
+	ni->ni_vhtcap.txlgimaxrate = min(ic_vhtcap->rxlgimaxrate,
+					IEEE80211_VHTCAP_GET_TX_LGIMAXRATE(rvhtcap));
+	ni->ni_vhtcap.rxlgimaxrate = min(ic_vhtcap->txlgimaxrate,
+					IEEE80211_VHTCAP_GET_RX_LGIMAXRATE(rvhtcap));
+	ni->ni_vhtcap.txmcsmap = ieee80211_merge_vhtmcs(ic_vhtcap->rxmcsmap,
+					IEEE80211_VHTCAP_GET_TX_MCS_NSS(rvhtcap));
+	ni->ni_vhtcap.rxmcsmap = ieee80211_merge_vhtmcs(ic_vhtcap->txmcsmap,
+					IEEE80211_VHTCAP_GET_RX_MCS_NSS(rvhtcap));
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"flags: local(0x%08x) remote(0x%08x) >> node(0x%08x)\n",
+				ic_vhtcap->cap_flags,
+				IEEE80211_VHTCAP_GET_CAPFLAGS(rvhtcap),
+				ni->ni_vhtcap.cap_flags);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"maxmpdu: local(%d) remote(%d) >> node(%d)\n",
+				ic_vhtcap->maxmpdu,
+				IEEE80211_VHTCAP_GET_MAXMPDU(rvhtcap),
+				ni->ni_vhtcap.maxmpdu);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"chanwidth: local(%d) remote(%d) >> node(%d)\n",
+				ic_vhtcap->chanwidth,
+				IEEE80211_VHTCAP_GET_CHANWIDTH(rvhtcap),
+				ni->ni_vhtcap.chanwidth);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"rxstbc: local(%d) remote(%d) >> node(%d)\n",
+				ic_vhtcap->rxstbc,
+				IEEE80211_VHTCAP_GET_RXSTBC(rvhtcap),
+				ni->ni_vhtcap.rxstbc);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"bfstscap: local(%d) remote(%d) >> node(%d)\n",
+				ic_vhtcap->bfstscap,
+				IEEE80211_VHTCAP_GET_BFSTSCAP(rvhtcap),
+				ni->ni_vhtcap.bfstscap);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"numsounding: local(%d) remote(%d) >> node(%d)\n",
+				ic_vhtcap->numsounding,
+				IEEE80211_VHTCAP_GET_NUMSOUND(rvhtcap),
+				ni->ni_vhtcap.numsounding);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"maxampduexp: local(%d) remote(%d) >> node(%d)\n",
+				ic_vhtcap->maxampduexp,
+				IEEE80211_VHTCAP_GET_MAXAMPDUEXP(rvhtcap),
+				ni->ni_vhtcap.maxampduexp);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"lnkadptcap: local(%d) remote(%d) >> node(%d)\n",
+				ic_vhtcap->lnkadptcap,
+				IEEE80211_VHTCAP_GET_LNKADPTCAP(rvhtcap),
+				ni->ni_vhtcap.lnkadptcap);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"rxlgimaxrate: local(%d) remote-tx(%d) >> node(%d)\n",
+				ic_vhtcap->rxlgimaxrate,
+				IEEE80211_VHTCAP_GET_TX_LGIMAXRATE(rvhtcap),
+				ni->ni_vhtcap.rxlgimaxrate);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"txlgimaxrate: local(%d) remote-rx(%d) >> node(%d)\n",
+				ic_vhtcap->txlgimaxrate,
+				IEEE80211_VHTCAP_GET_RX_LGIMAXRATE(rvhtcap),
+				ni->ni_vhtcap.txlgimaxrate);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"rxmcsmap: local(0x%08x) remote-tx(0x%08x) >> node(0x%08x)\n",
+				ic_vhtcap->rxmcsmap,
+				IEEE80211_VHTCAP_GET_TX_MCS_NSS(rvhtcap),
+				ni->ni_vhtcap.rxmcsmap);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"txmcsmap: local(0x%08x) remote-rx(0x%08x) >> node(0x%08x)\n",
+				ic_vhtcap->txmcsmap,
+				IEEE80211_VHTCAP_GET_RX_MCS_NSS(rvhtcap),
+				ni->ni_vhtcap.txmcsmap);
+}
+
+int
+ieee80211_check_and_parse_vhtcap(struct ieee80211_node *ni, u_int8_t *ie)
+{
+	struct ieee80211_ie_vhtcap *rvhtcap = (struct ieee80211_ie_vhtcap *)ie;
+
+	/* Compare with stored ie, return 0 if unchanged */
+	if (memcmp(rvhtcap, &ni->ni_ie_vhtcap, sizeof(struct ieee80211_ie_vhtcap)) == 0) {
+		return 0;
+	}
+
+	ieee80211_parse_vhtcap(ni, ie);
+	return 1;
+}
+
+static uint8_t *
+ieee80211_get_vhtcap_from_brcmvht(struct ieee80211_node *ni, uint8_t *ie)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_ie_brcm_vht *brcm_vht = (struct ieee80211_ie_brcm_vht *)ie;
+	uint8_t *vhtie = brcm_vht->vht_ies;
+	uint8_t *end = ie + ie[1];
+	uint8_t *vhtcap = NULL;
+
+	while (vhtie < end) {
+		switch (*vhtie) {
+		case IEEE80211_ELEMID_VHTCAP:
+			vhtcap = vhtie;
+			return vhtcap;
+		default:
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+					"unhandled id %u, len %u", *vhtie, vhtie[1]);
+			vap->iv_stats.is_rx_elem_unknown++;
+			break;
+		}
+		vhtie += vhtie[1] + 2;
+	}
+
+	return vhtcap;
+}
+
+static uint8_t *
+ieee80211_get_vhtop_from_brcmvht(struct ieee80211_node *ni, uint8_t *ie)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_ie_brcm_vht *brcm_vht = (struct ieee80211_ie_brcm_vht *)ie;
+	uint8_t *vhtie = brcm_vht->vht_ies;
+	uint8_t *end = ie + ie[1];
+	uint8_t *vhtop = NULL;
+
+	while (vhtie < end) {
+		switch (*vhtie) {
+		case IEEE80211_ELEMID_VHTOP:
+			vhtop = vhtie;
+			return vhtop;
+		default:
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+					"unhandled id %u, len %u", *vhtie, vhtie[1]);
+			vap->iv_stats.is_rx_elem_unknown++;
+			break;
+		}
+		vhtie += vhtie[1] + 2;
+	}
+
+	return vhtop;
+}
+
+
+void
+ieee80211_parse_measinfo(struct ieee80211_node *ni, u_int8_t *ie)
+{
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_ie_measure_comm *ie_comm = (struct ieee80211_ie_measure_comm *)(void*) ie;
+	struct ieee80211_ie_measreq *measinfo = (struct ieee80211_ie_measreq *)(void*) ie_comm->data;
+	struct ieee80211_channel *ch;
+	u_int32_t duration;
+	u_int64_t tsf;
+
+	if ( !(ic->ic_flags & IEEE80211_F_CCA) ) {
+		ic->ic_flags |= IEEE80211_F_CCA;
+
+		ch = findchannel(ic, measinfo->chan_num, ic->ic_des_mode);
+
+		if (ie_comm->token - ic->ic_cca_token > 0) {
+			ic->ic_cca_token = ie_comm->token;
+			tsf = ntohll(measinfo->start_tsf);
+			duration = ntohs(measinfo->duration_tu);
+			duration = IEEE80211_TU_TO_MS(duration);
+			ic->ic_set_start_cca_measurement(ic, ch,
+							 tsf, duration);
+		}
+	}
+#else
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_ie_measreq *measinfo = (struct ieee80211_ie_measreq *)(void*) ie;
+	struct ieee80211_channel *ch;
+	u_int32_t duration;
+	u_int64_t tsf;
+
+	if ( !(ic->ic_flags & IEEE80211_F_CCA) ) {
+		ic->ic_flags |= IEEE80211_F_CCA;
+
+		ch = findchannel(ic, measinfo->chan_num, ic->ic_des_mode);
+
+		if (measinfo->meas_token - ic->ic_cca_token > 0) {
+			ic->ic_cca_token = measinfo->meas_token;
+			tsf = ntohll(measinfo->start_tsf);
+			duration = ntohs(measinfo->duration_tu);
+			duration = IEEE80211_TU_TO_MS(duration);
+			ic->ic_set_start_cca_measurement(ic, ch,
+							 tsf, duration);
+		}
+	}
+#endif
+}
+
+int
+ieee80211_parse_htinfo(struct ieee80211_node *ni, u_int8_t *ie)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_ie_htinfo *htinfo = (struct ieee80211_ie_htinfo *)ie;
+
+	/* Compare with stored ie, return 0 if unchanged */
+	if (memcmp(htinfo, &ni->ni_ie_htinfo, sizeof(struct ieee80211_ie_htinfo)) == 0)
+		return 0;
+
+	memcpy(&ni->ni_ie_htinfo, htinfo, sizeof(ni->ni_ie_htinfo));
+
+	/* set primary channel */
+	ni->ni_htinfo.ctrlchannel = IEEE80211_HTINFO_PRIMARY_CHANNEL(htinfo);
+
+	/* set byte 1 values */
+	ni->ni_htinfo.byte1 = 0;
+
+	/* set the channel width and secondary channel offset */
+	if ((IEEE80211_HTINFO_BYTE_ONE(htinfo) & IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40) &&
+			(ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40)) {
+		ni->ni_htinfo.choffset = IEEE80211_HTINFO_B1_EXT_CHOFFSET(htinfo);
+		ni->ni_htinfo.byte1 |= IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40;
+	} else {
+		ni->ni_htinfo.choffset = 0;
+	}
+
+	/* force 20MHz bw if secondary channel offset is unknown */
+	if ((ni->ni_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) && !ni->ni_htinfo.choffset)
+		ni->ni_htcap.cap &= ~IEEE80211_HTCAP_C_CHWIDTH40;
+
+	/* XXX set the S-PSMP support */
+	if (ni->ni_htcap.cap & IEEE80211_HTCAP_C_PSMP)
+	{
+		if (IEEE80211_HTINFO_BYTE_ONE(htinfo) & IEEE80211_HTINFO_B1_CONTROLLED_ACCESS)
+			ni->ni_htinfo.byte1 |= (ic->ic_htinfo.byte1 & IEEE80211_HTINFO_B1_CONTROLLED_ACCESS);
+	}
+
+	/* service level granularity */
+	if (ni->ni_htinfo.byte1 & IEEE80211_HTINFO_B1_CONTROLLED_ACCESS)
+		ni->ni_htinfo.sigranularity = ic->ic_htinfo.sigranularity;
+
+	/* set byte 2 values */
+	ni->ni_htinfo.byte2 = 0;
+
+	ni->ni_htinfo.opmode = IEEE80211_HTINFO_B2_OP_MODE(htinfo);
+	ni->ni_htinfo.byte2 |= IEEE80211_HTINFO_BYTE_TWO(htinfo) & IEEE80211_HTINFO_B2_NON_GF_PRESENT;
+	ni->ni_htinfo.byte2 |= IEEE80211_HTINFO_BYTE_TWO(htinfo) & IEEE80211_HTINFO_B2_OBSS_PROT;
+
+	/* set byte 3 values */
+	ni->ni_htinfo.byte3 = 0;
+
+	/* set byte 4 values */
+	ni->ni_htinfo.byte4 = 0;
+
+	ni->ni_htinfo.byte4 |= IEEE80211_HTINFO_BYTE_FOUR(htinfo) & IEEE80211_HTINFO_B4_DUAL_BEACON;
+	ni->ni_htinfo.byte4 |= IEEE80211_HTINFO_BYTE_FOUR(htinfo) & IEEE80211_HTINFO_B4_DUAL_CTS;
+
+	/* set byte 5 values */
+	ni->ni_htinfo.byte5 = 0;
+
+	ni->ni_htinfo.byte5 |= IEEE80211_HTINFO_BYTE_FIVE(htinfo) & IEEE80211_HTINFO_B5_STBC_BEACON;
+	ni->ni_htinfo.byte5 |= IEEE80211_HTINFO_BYTE_FIVE(htinfo) & IEEE80211_HTINFO_B5_LSIGTXOPPROT;
+	ni->ni_htinfo.byte5 |= IEEE80211_HTINFO_BYTE_FIVE(htinfo) & IEEE80211_HTINFO_B5_PCO_ACTIVE;
+	ni->ni_htinfo.byte5 |= IEEE80211_HTINFO_BYTE_FIVE(htinfo) & IEEE80211_HTINFO_B5_40MHZPHASE;
+
+	/* set basic rates */
+	/* CBW = 20/40 MHz, Nss = 1, Nes = 1, EQM/ No EQM */
+	ni->ni_htinfo.basicmcsset[IEEE80211_HT_MCSSET_20_40_NSS1] =
+					IEEE80211_HTINFO_BASIC_MCS_VALUE(htinfo, IEEE80211_HT_MCSSET_20_40_NSS1);
+
+	/* CBW = 20/40 MHz, Nss = 2, Nes = 1, EQM */
+	ni->ni_htinfo.basicmcsset[IEEE80211_HT_MCSSET_20_40_NSS2] =
+					IEEE80211_HTINFO_BASIC_MCS_VALUE(htinfo, IEEE80211_HT_MCSSET_20_40_NSS2);
+
+	/* Enable RTS-CTS if HT-protection bit is set */
+	if (IEEE80211_11N_PROTECT_ENABLED(ic) &&
+		ni->ni_htinfo.opmode && !ic->ic_local_rts &&
+		(ni->ni_vap->iv_opmode == IEEE80211_M_STA ||
+		ni->ni_vap->iv_opmode == IEEE80211_M_WDS)) {
+		/* RTS-CTS can be enabled only when Rev B and later is used */
+		if (get_hardware_revision() != HARDWARE_REVISION_RUBY_A) {
+			ic->ic_local_rts = 1;
+			ic->ic_use_rtscts(ic);
+		}
+	}
+	/* Disable RTS-CTS if HT-protection bit is not set	*
+	 * and if RTS-CTS is in use currently		*/
+	if (IEEE80211_11N_PROTECT_ENABLED(ic) &&
+		ic->ic_local_rts && !(ni->ni_htinfo.opmode) &&
+		(ni->ni_vap->iv_opmode == IEEE80211_M_STA ||
+		ni->ni_vap->iv_opmode == IEEE80211_M_WDS)) {
+		/* RTS-CTS can be enabled only when Rev B and later is used */
+		if (get_hardware_revision() != HARDWARE_REVISION_RUBY_A) {
+			ic->ic_local_rts = 0;
+			ic->ic_use_rtscts(ic);
+		}
+	}
+	return 1;
+}
+
+int
+ieee80211_parse_vhtop(struct ieee80211_node *ni, u_int8_t *ie)
+{
+	struct ieee80211_ie_vhtop *rvhtop = (struct ieee80211_ie_vhtop *)ie;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_vhtop *ic_vhtop = NULL;
+
+	/* Compare with stored ie, return 0 if unchanged */
+	if (memcmp(rvhtop, &ni->ni_ie_vhtop, sizeof(struct ieee80211_ie_vhtop)) == 0)
+		return 0;
+
+	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
+	      ic_vhtop = &ic->ic_vhtop_24g;
+	else
+	      ic_vhtop = &ic->ic_vhtop;
+
+	memcpy(&ni->ni_ie_vhtop, rvhtop, sizeof(ni->ni_ie_vhtop));
+
+	ni->ni_vhtop.chanwidth = MIN(IEEE80211_VHTOP_GET_CHANWIDTH(rvhtop), ic_vhtop->chanwidth);
+	ni->ni_vhtop.centerfreq0 = IEEE80211_VHTOP_GET_CENTERFREQ0(rvhtop);
+	ni->ni_vhtop.centerfreq1 = IEEE80211_VHTOP_GET_CENTERFREQ1(rvhtop);
+	ni->ni_vhtop.basicvhtmcsnssset = IEEE80211_VHTOP_GET_BASIC_MCS_NSS(rvhtop);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"vht op: chan width: %d\n", ni->ni_vhtop.chanwidth);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"vht op: center freq0: %d\n", ni->ni_vhtop.centerfreq0);
+
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"vht op: center freq1: %d\n", ni->ni_vhtop.centerfreq1);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"vht op: basicvhtmcsnssset: 0x%08x\n", ni->ni_vhtop.basicvhtmcsnssset);
+
+	return 1;
+}
+
+/*
+static int
+ieee80211_setup_vhtcap(struct ieee80211_node *ni, u_int8_t *ie)
+{
+	return 1;
+}
+*/
+
+static int
+ieee80211_setup_htcap(struct ieee80211_node *ni, u_int8_t *ie)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_ie_htcap *htcap = (struct ieee80211_ie_htcap *)(void*) ie;
+	struct ieee80211vap *vap = ni->ni_vap;
+	int error = 0;
+	u_int32_t flags = vap->iv_ht_flags;
+	u_int16_t peer_cap = IEEE80211_HTCAP_CAPABILITIES(htcap);
+	u_int16_t merged_cap = peer_cap & ic->ic_htcap.cap;
+
+	/* save the original HT CAP IE */
+	memcpy(&ni->ni_ie_htcap, htcap, sizeof(ni->ni_ie_htcap));
+	/* Take the combination of IC and STA parameters */
+	/* set HT capabilities */
+	ni->ni_htcap.cap = 0;
+
+	/* set channel width */
+	if ((flags & IEEE80211_HTF_CBW_40MHZ_ONLY)
+			&& !(peer_cap & IEEE80211_HTCAP_C_CHWIDTH40))
+		error |= 0x0001;
+	else
+		ni->ni_htcap.cap |= (peer_cap & IEEE80211_HTCAP_C_CHWIDTH40);
+
+	/* set power save mode */
+	/* NOTE: only ever called when we're running as an AP - take the SM power save as
+	 * being what the client advertises, NOT what the AP wants.
+	 */
+	{
+		int pwrsave = IEEE80211_HTCAP_PWRSAVE_MODE(htcap);
+
+		if (pwrsave == IEEE80211_HTCAP_C_MIMOPWRSAVE_NA)
+		{
+			/* Default to Dyanmic powersave if invalid value passed in the (re)association request */
+			pwrsave = IEEE80211_HTCAP_C_MIMOPWRSAVE_NONE;
+		}
+		ni->ni_htcap.pwrsave = pwrsave;
+	}
+
+	/* set SHORT GI options */
+	if ((flags & IEEE80211_HTF_SHORTGI20_ONLY) &&
+			!(peer_cap & IEEE80211_HTCAP_C_SHORTGI20))
+		error |= 0x0002;
+	else
+		ni->ni_htcap.cap |= (merged_cap & IEEE80211_HTCAP_C_SHORTGI20);
+
+	if ((flags & IEEE80211_HTF_SHORTGI40_ONLY) &&
+				!(peer_cap & IEEE80211_HTCAP_C_SHORTGI40))
+		error |= 0x0004;
+	else
+		ni->ni_htcap.cap |= (merged_cap & IEEE80211_HTCAP_C_SHORTGI40);
+
+	/* Set STBC options */
+	if ((flags & IEEE80211_HTF_TXSTBC_ONLY) &&
+				!(IEEE80211_HTCAP_RX_STBC_MODE(htcap)))
+		error |= 0x0008;
+	else {
+		if ((vap->iv_ht_flags & IEEE80211_HTF_STBC_ENABLED) &&
+								IEEE80211_HTCAP_RX_STBC_MODE(htcap))
+			ni->ni_htcap.cap |= IEEE80211_HTCAP_C_TXSTBC;
+	}
+
+	if ((flags & IEEE80211_HTF_RXSTBC_ONLY) &&
+					!(peer_cap & IEEE80211_HTCAP_C_TXSTBC))
+		error |= 0x0010;
+	else {
+		if (vap->iv_ht_flags & IEEE80211_HTF_STBC_ENABLED) {
+			ni->ni_htcap.numrxstbcstr = IEEE80211_HTCAP_RX_STBC_MODE(htcap);
+			/* If STA is capable of receive more streams then we support for tx,
+			   then limit our transmission to what we support */
+			ni->ni_htcap.numrxstbcstr = (ni->ni_htcap.numrxstbcstr > IEEE80211_MAX_TX_STBC_SS) ?
+						IEEE80211_MAX_TX_STBC_SS : ni->ni_htcap.numrxstbcstr;
+		}
+		else
+			ni->ni_htcap.numrxstbcstr = 0;
+	}
+
+	if (IEEE80211_HTCAP_RX_STBC_MODE(htcap)) {
+		ni->ni_htcap.cap |= IEEE80211_HTCAP_C_RXSTBC;
+	}
+
+	/* delayed block ack */
+	ni->ni_htcap.cap |= (peer_cap & IEEE80211_HTCAP_C_DELAYEDBLKACK);
+
+	/* Maximum A-MSDU size */
+	if (peer_cap & IEEE80211_HTCAP_C_MAXAMSDUSIZE_8K)
+		ni->ni_htcap.maxmsdu = 7935;
+	else
+		ni->ni_htcap.maxmsdu = 3839;
+
+	/* DSSS/CCK mode in 40 MHz */
+	if (flags & IEEE80211_HTF_DSSS_40MHZ_ONLY) {
+		if (!(peer_cap & IEEE80211_HTCAP_C_DSSSCCK40))
+			error |= 0x0020;
+	}
+	ni->ni_htcap.cap |= (peer_cap & IEEE80211_HTCAP_C_DSSSCCK40);
+
+	/* PSMP support (only if AP supports) */
+	if ((flags & IEEE80211_HTF_PSMP_SUPPORT_ONLY)
+				&& !(peer_cap & IEEE80211_HTCAP_C_PSMP))
+		error |= 0x0040;
+	else
+		ni->ni_htcap.cap |= (peer_cap & IEEE80211_HTCAP_C_PSMP);
+
+	/* set 40 MHz intolerant */
+	ni->ni_htcap.cap |= (peer_cap & IEEE80211_HTCAP_C_40_INTOLERANT);
+
+	/* set L-SIG TXOP support */
+	if ((flags & IEEE80211_HTF_LSIG_TXOP_ONLY)
+					&& !(peer_cap & IEEE80211_HTCAP_C_LSIGTXOPPROT))
+		error |= 0x0080;
+	else
+		ni->ni_htcap.cap |= (peer_cap & IEEE80211_HTCAP_C_LSIGTXOPPROT);
+
+	/* set maximum A-MPDU size */
+	ni->ni_htcap.maxampdu = IEEE80211_HTCAP_MAX_AMPDU_LEN(htcap);
+
+	/* set maximum MPDU spacing */
+	ni->ni_htcap.mpduspacing = IEEE80211_HTCAP_MIN_AMPDU_SPACING(htcap);
+
+	/* set MCS rate indexes */
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS1] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap,IEEE80211_HT_MCSSET_20_40_NSS1);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS2] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap,IEEE80211_HT_MCSSET_20_40_NSS2);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap,IEEE80211_HT_MCSSET_20_40_NSS3);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS4] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap,IEEE80211_HT_MCSSET_20_40_NSS4);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM1] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap,IEEE80211_HT_MCSSET_20_40_UEQM1);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM2] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap,IEEE80211_HT_MCSSET_20_40_UEQM2);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM3] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap,IEEE80211_HT_MCSSET_20_40_UEQM3);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM4] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap,IEEE80211_HT_MCSSET_20_40_UEQM4);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM5] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap,IEEE80211_HT_MCSSET_20_40_UEQM5);
+	ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM6] =
+		IEEE80211_HTCAP_MCS_VALUE(htcap,IEEE80211_HT_MCSSET_20_40_UEQM6);
+
+	/* set maximum data rate */
+	ni->ni_htcap.maxdatarate = IEEE80211_HTCAP_HIGHEST_DATA_RATE(htcap);
+
+	/* set MCS parameters */
+	ni->ni_htcap.mcsparams = 0;
+
+	if (IEEE80211_HTCAP_MCS_PARAMS(htcap) & IEEE80211_HTCAP_MCS_TX_SET_DEFINED)
+	{
+		ni->ni_htcap.mcsparams |= IEEE80211_HTCAP_MCS_TX_SET_DEFINED;
+
+		/* set number of Tx spatial streams */
+		if(IEEE80211_HTCAP_MCS_PARAMS(htcap) & IEEE80211_HTCAP_MCS_TX_RX_SET_NEQ)
+		{
+			ni->ni_htcap.numtxspstr = IEEE80211_HTCAP_MCS_STREAMS(htcap);
+			ni->ni_htcap.mcsparams |= IEEE80211_HTCAP_MCS_TX_RX_SET_NEQ |
+				(IEEE80211_HTCAP_MCS_PARAMS(htcap) & IEEE80211_HTCAP_MCS_TX_UNEQ_MOD);
+		}
+		else
+			ni->ni_htcap.numtxspstr = 0;
+	}
+	else
+		ni->ni_htcap.numtxspstr = 0;
+
+	if ((flags & IEEE80211_HTF_LDPC_ENABLED) &&
+			(peer_cap & IEEE80211_HTCAP_C_LDPCCODING)) {
+		ni->ni_htcap.cap |= IEEE80211_HTCAP_C_LDPCCODING;
+	}
+
+	if ((flags & IEEE80211_HTF_NSS_2_ONLY) && (ni->ni_htcap.numrxstbcstr < 2))
+		error |= 0x0100;
+
+	if (ni->ni_vendor != PEER_VENDOR_RLNK) {
+		ni->ni_htcap.hc_txbf[0] = ic->ic_htcap.hc_txbf[0] & htcap->hc_txbf[0];
+		ni->ni_htcap.hc_txbf[1] = ic->ic_htcap.hc_txbf[1] & htcap->hc_txbf[1];
+		ni->ni_htcap.hc_txbf[2] = ic->ic_htcap.hc_txbf[2] & htcap->hc_txbf[2];
+		ni->ni_htcap.hc_txbf[3] = ic->ic_htcap.hc_txbf[3] & htcap->hc_txbf[3];
+	}
+
+	return error;
+}
+
+static inline int ieee80211_ssid_compare(struct ieee80211vap *vap, struct ieee80211_scanparams *p_scan)
+{
+	return ((vap->iv_des_ssid[0].len != p_scan->ssid[1]) ||
+		(memcmp(vap->iv_des_ssid[0].ssid, p_scan->ssid + 2, p_scan->ssid[1]) != 0));
+}
+
+void extender_event_data_prepare(struct ieee80211com *ic,
+			struct ieee80211_scanparams *p_scan,
+			struct qtn_wds_ext_event_data *data,
+			uint8_t cmd,
+			uint8_t *peer_mac)
+{
+	memset(data, 0, sizeof(struct qtn_wds_ext_event_data));
+	strncpy(data->name, "QTN-WDS-EXT", sizeof(data->name) - 1);
+	data->cmd = cmd;
+	data->extender_role = ic->ic_extender_role;
+	data->bandwidth = ic->ic_extender_rbs_bw;
+	if (peer_mac)
+		memcpy(data->mac, peer_mac, IEEE80211_ADDR_LEN);
+	if (p_scan)
+		data->channel = p_scan->bchan;
+}
+
+int ieee80211_extender_send_event(
+	struct ieee80211vap *vap,
+	const struct qtn_wds_ext_event_data *p_data, uint8_t *ie)
+{
+	struct qtn_wds_ext_event_data *wds_event_data;
+	uint8_t	event_data[IEEE80211_MAX_EXT_EVENT_DATA_LEN];
+	union iwreq_data wreq;
+
+	memset(event_data, 0, sizeof(event_data));
+	wds_event_data = (struct qtn_wds_ext_event_data *)event_data;
+
+	if (sizeof(*p_data) > sizeof(event_data))
+		return 0;
+	memcpy(wds_event_data, p_data, sizeof(*p_data));
+
+	if ((wds_event_data->cmd != WDS_EXT_LINK_STATUS_UPDATE) && ie) {
+		wds_event_data->ie_len = ie[1] + 2;
+		if ((sizeof(*p_data) + wds_event_data->ie_len) > sizeof(event_data))
+			return 0;
+		memcpy(event_data + sizeof(*p_data), ie, wds_event_data->ie_len);
+	}
+
+	memset(&wreq, 0, sizeof(wreq));
+	wreq.data.length = sizeof(*wds_event_data) + (ie ? wds_event_data->ie_len : 0);
+	wireless_send_event(vap->iv_dev, IWEVCUSTOM, &wreq, (char *)&event_data);
+
+	return 0;
+}
+
+struct ieee80211_extender_wds_info *
+ieee80211_extender_find_peer_wds_info(struct ieee80211com *ic, uint8_t *mac_addr)
+{
+	struct ieee80211_extender_wds_info *peer_wds = NULL;
+	struct ieee80211vap *primary_vap = TAILQ_FIRST(&ic->ic_vaps);
+	unsigned long flags;
+	int hash;
+
+	hash = IEEE80211_NODE_HASH(mac_addr);
+	spin_lock_irqsave(&primary_vap->iv_extender_wds_lock, flags);
+	LIST_FOREACH(peer_wds, &primary_vap->iv_extender_wds_hash[hash], peer_wds_hash) {
+		if (IEEE80211_ADDR_EQ(mac_addr, peer_wds->peer_addr)) {
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&primary_vap->iv_extender_wds_lock, flags);
+
+	return peer_wds;
+}
+
+static struct ieee80211_extender_wds_info*
+ieee80211_extender_create_peer_wds_info(struct ieee80211com *ic, uint8_t *mac_addr)
+{
+	struct ieee80211_extender_wds_info *peer_wds = NULL;
+	struct ieee80211vap *primary_vap = TAILQ_FIRST(&ic->ic_vaps);
+	unsigned long flags;
+	int update_beacon = 0;
+	int rbs_num = 0;
+	int hash;
+	int i;
+
+	MALLOC(peer_wds, struct ieee80211_extender_wds_info *, sizeof(*peer_wds),
+		M_DEVBUF, M_WAITOK);
+	if (peer_wds) {
+		hash = IEEE80211_NODE_HASH(mac_addr);
+		memcpy(peer_wds->peer_addr, mac_addr, IEEE80211_ADDR_LEN);
+		spin_lock_irqsave(&primary_vap->iv_extender_wds_lock, flags);
+		LIST_INSERT_HEAD(&primary_vap->iv_extender_wds_hash[hash], peer_wds, peer_wds_hash);
+		if ((ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_MBS) &&
+				(ic->ic_extender_rbs_num < QTN_MAX_RBS_NUM)) {
+			for(i=0; i<QTN_MAX_RBS_NUM; i++) {
+				if (is_zero_ether_addr(ic->ic_extender_rbs_bssid[i])) {
+					IEEE80211_ADDR_COPY(ic->ic_extender_rbs_bssid[i], mac_addr);
+					update_beacon = 1;
+					break;
+				}
+			}
+
+			for(i=0; i<QTN_MAX_RBS_NUM; i++) {
+				if (!is_zero_ether_addr(ic->ic_extender_rbs_bssid[i]))
+				      rbs_num++;
+			}
+			ic->ic_extender_rbs_num = rbs_num;
+		}
+		spin_unlock_irqrestore(&primary_vap->iv_extender_wds_lock, flags);
+
+		if (ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_RBS)
+			IEEE80211_ADDR_COPY(ic->ic_extender_mbs_bssid, mac_addr);
+
+		IEEE80211_EXTENDER_DPRINTF(primary_vap, IEEE80211_EXTENDER_MSG_DBG,
+				"EXTENDER %s: add wds peer [%pM]\n", __func__,
+				peer_wds->peer_addr);
+
+		if (update_beacon)
+			ic->ic_beacon_update(primary_vap);
+	}
+	return peer_wds;
+}
+
+int
+ieee80211_extender_remove_peer_wds_info(struct ieee80211com *ic,
+	uint8_t *mac_addr)
+{
+	struct ieee80211_extender_wds_info *peer_wds = NULL;
+	struct ieee80211vap *primary_vap = TAILQ_FIRST(&ic->ic_vaps);
+	unsigned long flags;
+	int update_beacon = 0;
+	int rbs_num = 0;
+	int hash;
+	int i;
+
+	hash = IEEE80211_NODE_HASH(mac_addr);
+	spin_lock_irqsave(&primary_vap->iv_extender_wds_lock, flags);
+	LIST_FOREACH(peer_wds, &primary_vap->iv_extender_wds_hash[hash], peer_wds_hash) {
+		if (IEEE80211_ADDR_EQ(mac_addr, peer_wds->peer_addr)) {
+			LIST_REMOVE(peer_wds, peer_wds_hash);
+			FREE(peer_wds, M_DEVBUF);
+
+			if ((ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_MBS) &&
+					(ic->ic_extender_rbs_num > 0)) {
+				for(i=0; i<QTN_MAX_RBS_NUM; i++) {
+					if (IEEE80211_ADDR_EQ(ic->ic_extender_rbs_bssid[i], mac_addr)) {
+						IEEE80211_ADDR_SET_NULL(ic->ic_extender_rbs_bssid[i]);
+						update_beacon = 1;
+						break;
+					}
+				}
+
+				for(i=0; i<QTN_MAX_RBS_NUM; i++) {
+					if (!is_zero_ether_addr(ic->ic_extender_rbs_bssid[i]))
+					      rbs_num++;
+				}
+				ic->ic_extender_rbs_num = rbs_num;
+			} else if (ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_RBS) {
+				IEEE80211_ADDR_SET_NULL(ic->ic_extender_mbs_bssid);
+				ic->ic_extender_rbs_num = 0;
+				for(i=0; i<QTN_MAX_RBS_NUM; i++)
+					IEEE80211_ADDR_SET_NULL(ic->ic_extender_rbs_bssid[i]);
+				update_beacon = 1;
+			}
+
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&primary_vap->iv_extender_wds_lock, flags);
+
+	if (update_beacon)
+		ic->ic_beacon_update(primary_vap);
+
+	return 0;
+}
+
+void
+ieee80211_extender_notify_ext_role(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct qtn_wds_ext_event_data extender_event_data;
+	struct ieee80211_qtn_ext_bssid *ext_bssid_ie =
+		(struct ieee80211_qtn_ext_bssid *)ni->ni_ext_bssid_ie;
+	int i;
+
+	memset(&extender_event_data, 0, sizeof(extender_event_data));
+	extender_event_data.cmd = WDS_EXT_STA_UPDATE_EXT_INFO;
+	extender_event_data.extender_role = ni->ni_ext_role;
+	strncpy(extender_event_data.name, "QTN-WDS-EXT", sizeof(extender_event_data.name));
+	memcpy(extender_event_data.mac, ni->ni_bssid, IEEE80211_ADDR_LEN);
+
+	if (ext_bssid_ie) {
+		IEEE80211_EXTENDER_DPRINTF(vap, IEEE80211_EXTENDER_MSG_DBG,
+			"EXTENDER %s: trigger role info upate event, extender role [%u], "
+			"mbs address [%pM], rbs num %u, rbs address: ", __func__, ni->ni_ext_role,
+			ext_bssid_ie->mbs_bssid, ext_bssid_ie->rbs_num);
+		for (i=0; i<QTN_MAX_RBS_NUM; i++) {
+			if (!is_zero_ether_addr(ext_bssid_ie->rbs_bssid[i])) {
+				IEEE80211_EXTENDER_DPRINTF(vap, IEEE80211_EXTENDER_MSG_DBG, "%pM\n",
+						ext_bssid_ie->rbs_bssid[i]);
+			}
+		}
+	} else {
+		IEEE80211_EXTENDER_DPRINTF(vap, IEEE80211_EXTENDER_MSG_DBG, "EXTENDER %s: "
+			"trigger role info upate event, extender role: none\n", __func__);
+	}
+
+	ieee80211_extender_send_event(vap, &extender_event_data, (uint8_t *)ext_bssid_ie);
+}
+
+void
+ieee80211_extender_sta_update_info(struct ieee80211_node *ni,
+		const struct ieee80211_qtn_ext_role *ie_role,
+		const struct ieee80211_qtn_ext_bssid *ie_bssid)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	int update = 0;
+	if ((ni == vap->iv_bss) && (ni->ni_flags & IEEE80211_NODE_AUTH)) {
+		if (ie_role) {
+			if (ni->ni_ext_role != ie_role->role) {
+				ni->ni_ext_role = ie_role->role;
+				update = 1;
+			}
+		} else {
+			if (ni->ni_ext_role != IEEE80211_EXTENDER_ROLE_NONE) {
+				ni->ni_ext_role = IEEE80211_EXTENDER_ROLE_NONE;
+				update = 1;
+			}
+		}
+
+		if (ie_bssid) {
+			if (!ni->ni_ext_bssid_ie || memcmp(ie_bssid, ni->ni_ext_bssid_ie, sizeof(*ie_bssid))) {
+				ieee80211_saveie(&ni->ni_ext_bssid_ie, (uint8_t *)ie_bssid);
+				update = 1;
+			}
+		} else {
+			if (ni->ni_ext_bssid_ie != NULL) {
+				FREE(ni->ni_ext_bssid_ie, M_DEVBUF);
+				ni->ni_ext_bssid_ie = NULL;
+				update = 1;
+			}
+		}
+
+		if (update)
+			ieee80211_extender_notify_ext_role(ni);
+	}
+}
+
+void
+ieee80211_extender_vdetach(struct ieee80211vap *vap)
+{
+	int i;
+	unsigned long flags;
+	struct ieee80211_extender_wds_info *peer_wds = NULL;
+
+	spin_lock_irqsave(&vap->iv_extender_wds_lock, flags);
+	for (i = 0; i < IEEE80211_NODE_HASHSIZE; i++) {
+		LIST_FOREACH(peer_wds, &vap->iv_extender_wds_hash[i], peer_wds_hash) {
+			if (peer_wds != NULL) {
+				LIST_REMOVE(peer_wds, peer_wds_hash);
+				FREE(peer_wds, M_DEVBUF);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&vap->iv_extender_wds_lock, flags);
+}
+
+static int extender_role_to_event_cmd(uint8_t role)
+{
+	if (role == IEEE80211_EXTENDER_ROLE_MBS)
+		return WDS_EXT_RECEIVED_MBS_IE;
+	else
+		return WDS_EXT_RECEIVED_RBS_IE;
+}
+
+void ieee80211_extender_cleanup_wds_link(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211vap *pri_vap = TAILQ_FIRST(&ic->ic_vaps);
+	struct ieee80211_extender_wds_info *peer_wds = NULL;
+	struct qtn_wds_ext_event_data event_data;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&vap->iv_extender_wds_lock, flags);
+	for (i = 0; i < IEEE80211_NODE_HASHSIZE; i++) {
+		LIST_FOREACH(peer_wds, &vap->iv_extender_wds_hash[i], peer_wds_hash) {
+			if (!peer_wds)
+				continue;
+			extender_event_data_prepare(ic, NULL,
+					&event_data,
+					WDS_EXT_CLEANUP_WDS_LINK,
+					peer_wds->peer_addr);
+			ieee80211_extender_send_event(pri_vap, &event_data, NULL);
+			LIST_REMOVE(peer_wds, peer_wds_hash);
+			FREE(peer_wds, M_DEVBUF);
+		}
+	}
+	spin_unlock_irqrestore(&vap->iv_extender_wds_lock, flags);
+}
+
+static int extender_check_rssi_change(struct ieee80211com *ic,
+		struct ieee80211vap *vap, int rssi)
+{
+	if (vap->iv_opmode == IEEE80211_M_STA) {
+		if (rssi <= ic->ic_extender_mbs_best_rssi) {
+			ic->ic_extender_rssi_continue = 0;
+			return 0;
+		}
+	} else {
+		if (rssi >= ic->ic_extender_mbs_best_rssi - ic->ic_extender_mbs_rssi_margin) {
+			ic->ic_extender_rssi_continue = 0;
+			return 0;
+		}
+	}
+
+	if (ic->ic_extender_rssi_continue++ < QTN_EXTENDER_RSSI_MAX_COUNT)
+		return 0;
+
+	ic->ic_extender_rssi_continue = 0;
+
+	return 1;
+}
+
+static int ieee80211_trigger_extender_event(
+	struct ieee80211vap *vap,
+	const struct ieee80211_qtn_ext_role *ie,
+	struct ieee80211_scanparams *p_scan,
+	struct ieee80211_frame *wh,
+	int rssi)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_extender_wds_info *peer_wds = NULL;
+	struct qtn_wds_ext_event_data extender_event_data;
+
+	peer_wds = ieee80211_extender_find_peer_wds_info(ic, wh->i_addr2);
+	if (peer_wds) {
+		if (memcmp(&peer_wds->extender_ie, ie, sizeof(*ie)) == 0)
+			return 0;
+	} else {
+		if (vap->iv_opmode == IEEE80211_M_STA &&
+				extender_check_rssi_change(ic, vap, rssi) == 0)
+			return 0;
+
+		peer_wds = ieee80211_extender_create_peer_wds_info(ic, wh->i_addr2);
+		if (!peer_wds)
+			return 0;
+	}
+
+	memcpy(&peer_wds->extender_ie, ie, sizeof(*ie));
+	extender_event_data_prepare(ic, p_scan,
+				&extender_event_data,
+				extender_role_to_event_cmd(ie->role),
+				wh->i_addr2);
+
+	if ((p_scan->ssid != NULL) && (p_scan->ssid[1] > 0)) {
+		if (p_scan->ssid[1] < sizeof(extender_event_data.ssid)) {
+			memcpy(extender_event_data.ssid, p_scan->ssid + 2, p_scan->ssid[1]);
+		} else {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ELEMID, "%s : ssid is too long\n", __func__);
+			memcpy(extender_event_data.ssid, p_scan->ssid + 2, sizeof(extender_event_data.ssid) - 1);
+		}
+	}
+
+	ieee80211_extender_send_event(vap, &extender_event_data, (uint8_t *)ie);
+	return 0;
+}
+
+static void ieee80211_extender_update_rbs_macs(struct ieee80211com *ic,
+		const struct ieee80211_qtn_ext_role *ie_role,
+		const struct ieee80211_qtn_ext_bssid *ie_bssid,
+		const struct ieee80211_qtn_ext_state *ie_state)
+{
+	struct ieee80211vap *primary_vap = TAILQ_FIRST(&ic->ic_vaps);
+	uint8_t ext_role = ie_role->role;
+	int update_beacon = 0;
+
+	if (ic->ic_extender_role != IEEE80211_EXTENDER_ROLE_RBS)
+		return;
+
+	if (!ie_bssid || (ie_bssid->len < (sizeof(struct ieee80211_qtn_ext_bssid) - 2)))
+		return;
+
+	if (ext_role == IEEE80211_EXTENDER_ROLE_MBS) {
+		if (memcmp(ic->ic_extender_mbs_bssid,
+				ie_bssid->mbs_bssid, sizeof(ic->ic_extender_mbs_bssid))) {
+			memcpy(ic->ic_extender_mbs_bssid,
+				ie_bssid->mbs_bssid, sizeof(ic->ic_extender_mbs_bssid));
+			update_beacon = 1;
+		}
+
+		if ((ic->ic_extender_rbs_num != ie_bssid->rbs_num) ||
+				memcmp(ic->ic_extender_rbs_bssid[0],
+					ie_bssid->rbs_bssid[0], sizeof(ic->ic_extender_rbs_bssid))) {
+			ic->ic_extender_rbs_num = ie_bssid->rbs_num;
+			memcpy(ic->ic_extender_rbs_bssid[0],
+				ie_bssid->rbs_bssid[0], sizeof(ic->ic_extender_rbs_bssid));
+			update_beacon = 1;
+		}
+
+		if (ie_state && !!(ie_state->state1 & QTN_EXT_MBS_OCAC) != ic->ic_extender_mbs_ocac) {
+			ic->ic_extender_mbs_ocac = !!(ie_state->state1 & QTN_EXT_MBS_OCAC);
+			update_beacon = 2;
+		}
+	}
+
+	if (update_beacon == 1)
+		ic->ic_beacon_update(primary_vap);
+	else if (update_beacon == 2)
+		ieee80211_beacon_update_all(ic);
+}
+
+static int
+ieee80211_get_max_bandwidth(struct ieee80211com *ic, uint8_t channel)
+{
+	int max_glob_bw;
+	int max_chan_bw;
+
+	max_glob_bw = ieee80211_get_max_system_bw(ic);
+	max_chan_bw = ieee80211_get_max_channel_bw(ic, channel);
+
+	return MIN(max_glob_bw, max_chan_bw);
+}
+
+static int
+ieee80211_parse_peer_bandwidth(u_int8_t *vhtop, u_int8_t *htinfo)
+{
+	if (vhtop != NULL) {
+		struct ieee80211_ie_vhtop *vht_op = (struct ieee80211_ie_vhtop *)vhtop;
+
+		/*
+		 * Channel Width
+		 */
+		if (IEEE80211_VHTOP_GET_CHANWIDTH(vht_op))
+			return BW_HT80;
+	}
+
+	if (htinfo != NULL) {
+		struct ieee80211_ie_htinfo *ht_info = (struct ieee80211_ie_htinfo *)htinfo;
+
+		/*
+		 * Secondary Channel Offset
+		 */
+		if (IEEE80211_HTINFO_B1_EXT_CHOFFSET(ht_info))
+			return BW_HT40;
+	}
+
+	return BW_HT20;
+}
+
+static int
+ieee80211_parse_qtn_extender_ie(
+	struct ieee80211_node *ni,
+	const struct ieee80211_qtn_ext_role *ie,
+	const struct ieee80211_qtn_ext_bssid *ext_bssid_ie,
+	const struct ieee80211_qtn_ext_state *ie_state,
+	struct ieee80211_scanparams *p_scan,
+	struct ieee80211_frame *wh,
+	int rssi)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211vap *primary_vap;
+	struct ieee80211_extender_wds_info *peer_wds = NULL;
+	struct qtn_wds_ext_event_data event_data;
+
+	uint32_t mbs_bw;
+	uint32_t rbs_bw;
+
+	if (ie->len < (sizeof(*ie) - 2))
+		return -1;
+
+	p_scan->extender_role = ie->role;
+
+	switch(vap->iv_opmode) {
+	case IEEE80211_M_STA:
+		if ((ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_RBS) &&
+				(ie->role == IEEE80211_EXTENDER_ROLE_MBS)) {
+			if ((ni->ni_associd == 0) ||
+					(vap->iv_state != IEEE80211_S_RUN) ||
+					!(ni->ni_flags & IEEE80211_NODE_AUTH) ||
+					!IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_bssid))
+				return 0;
+			/* need to verify the essid in the frame is the same to our */
+			if ((p_scan->ssid == NULL) || (p_scan->ssid[1] == 0)) {
+				return 0;
+			} else {
+				if (ieee80211_ssid_compare(vap, p_scan))
+				return 0;
+			}
+
+			mbs_bw = ieee80211_parse_peer_bandwidth(p_scan->vhtop, p_scan->htinfo);
+			rbs_bw = ieee80211_get_max_bandwidth(ic, p_scan->bchan);
+
+			ic->ic_extender_rbs_bw = MIN(mbs_bw, rbs_bw);
+			ieee80211_trigger_extender_event(vap, ie, p_scan, wh, rssi);
+			ic->ic_extender_mbs_detected_jiffies = jiffies;
+		}
+		break;
+	case IEEE80211_M_HOSTAP:
+		if ((ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_NONE))
+			return 0;
+
+		/* need to verify the essid in the frame is the same to our */
+		if ((p_scan->ssid == NULL) || (p_scan->ssid[1] == 0)) {
+			return 0;
+		} else {
+			if (ieee80211_ssid_compare(vap, p_scan))
+				return 0;
+		}
+
+		if ((ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_MBS &&
+				ie->role == IEEE80211_EXTENDER_ROLE_RBS) ||
+				(ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_RBS &&
+				ie->role == IEEE80211_EXTENDER_ROLE_MBS)) {
+			if ((ni->ni_node_type == IEEE80211_NODE_TYPE_STA) &&
+					(ie->role == IEEE80211_EXTENDER_ROLE_RBS)) {
+				IEEE80211_EXTENDER_DPRINTF(vap,
+						IEEE80211_EXTENDER_MSG_WARN,
+						"QHop: peer %pM should disassociate first\n",
+						wh->i_addr2);
+				ieee80211_node_leave(ni);
+			}
+			/* ignore Beacon and Probe Response frames from other QHop networks */
+			if (ext_bssid_ie != NULL) {
+				if (IEEE80211_COM_WDS_IS_MBS(ic)) {
+					/* ignore if we are NOT the announced MBS, or else we
+					 * create WDS link for sender unexpectedly
+					 */
+					if (!IEEE80211_ADDR_EQ(ext_bssid_ie->mbs_bssid,
+							vap->iv_myaddr))
+						break;
+				} else {
+					/* ignore if we already use another MBS, or else we
+					 * update MBS BSSID unexpectedly
+					 */
+					if (!IEEE80211_ADDR_EQ(ext_bssid_ie->mbs_bssid,
+							ic->ic_extender_mbs_bssid)) {
+						/* make an exception for RBS boots up as AP */
+						if (!IEEE80211_ADDR_NULL(ic->ic_extender_mbs_bssid))
+							break;
+					}
+				}
+			}
+			ieee80211_trigger_extender_event(vap, ie, p_scan, wh, 0);
+			ic->ic_extender_mbs_detected_jiffies = jiffies;
+
+			/* update extender bssid ie */
+			if ((ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_RBS) &&
+					(ie->role == IEEE80211_EXTENDER_ROLE_MBS))
+				ieee80211_extender_update_rbs_macs(ic, ie, ext_bssid_ie, ie_state);
+		}
+		break;
+	case IEEE80211_M_WDS:
+		if (ic->ic_extender_role != IEEE80211_EXTENDER_ROLE_RBS ||
+			ie->role != IEEE80211_EXTENDER_ROLE_MBS ||
+			!IEEE80211_ADDR_EQ(wh->i_addr2, vap->wds_mac)) {
+			return 0;
+		}
+		primary_vap = TAILQ_FIRST(&ic->ic_vaps);
+		/* need to verify the essid in the frame is the same to our */
+
+		if ((p_scan->ssid == NULL) || (p_scan->ssid[1] == 0)) {
+			return 0;
+		} else {
+			if (ieee80211_ssid_compare(primary_vap, p_scan))
+				return 0;
+		}
+
+		ic->ic_extender_mbs_detected_jiffies = jiffies;
+		peer_wds = ieee80211_extender_find_peer_wds_info(ic, wh->i_addr2);
+		if (!peer_wds) {
+			peer_wds = ieee80211_extender_create_peer_wds_info(ic, wh->i_addr2);
+			if (peer_wds)
+				memcpy(&peer_wds->extender_ie, ie, sizeof(*ie));
+		}
+
+		/* update extender bssid ie */
+		if ((ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_RBS) &&
+				(ie->role == IEEE80211_EXTENDER_ROLE_MBS))
+			ieee80211_extender_update_rbs_macs(ic, ie, ext_bssid_ie, ie_state);
+
+		if (unlikely(ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+				ic->ic_bsschan != ic->ic_curchan)) {
+			extender_event_data_prepare(ic, p_scan,
+					&event_data,
+					WDS_EXT_RBS_SET_CHANNEL,
+					wh->i_addr2);
+			ieee80211_extender_send_event(primary_vap, &event_data, NULL);
+			return 0;
+		}
+
+		if (extender_check_rssi_change(ic, primary_vap, rssi)) {
+			IEEE80211_EXTENDER_DPRINTF(vap, IEEE80211_EXTENDER_MSG_WARN,
+					"QHop: MBS %pM rssi %d is out of BRR %u\n",
+					wh->i_addr2, rssi, ic->ic_extender_mbs_best_rssi);
+			extender_event_data_prepare(ic, NULL,
+					&event_data,
+					WDS_EXT_RBS_OUT_OF_BRR,
+					wh->i_addr2);
+			ieee80211_extender_send_event(primary_vap, &event_data, NULL);
+			ieee80211_extender_remove_peer_wds_info(ic, wh->i_addr2);
+			return 0;
+		}
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int ieee80211_extender_process(
+	struct ieee80211_node *ni,
+	const struct ieee80211_qtn_ext_role *ie_role,
+	const struct ieee80211_qtn_ext_bssid *ie_bssid,
+	const struct ieee80211_qtn_ext_state *ie_state,
+	struct ieee80211_scanparams *p_scan,
+	struct ieee80211_frame *wh,
+	int rssi)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211vap *primary_vap = NULL;
+	struct ieee80211_extender_wds_info *wds_info = NULL;
+	struct qtn_wds_ext_event_data extender_event_data;
+
+	p_scan->ext_bssid_ie = (uint8_t *)ie_bssid;
+
+	if (ie_role) {
+		ieee80211_parse_qtn_extender_ie(ni, ie_role, ie_bssid, ie_state, p_scan, wh, rssi);
+	} else {
+		p_scan->extender_role = IEEE80211_EXTENDER_ROLE_NONE;
+		if ((vap->iv_opmode == IEEE80211_M_HOSTAP) || (vap->iv_opmode == IEEE80211_M_WDS)) {
+			primary_vap = TAILQ_FIRST(&ic->ic_vaps);
+			wds_info = ieee80211_extender_find_peer_wds_info(ic, wh->i_addr2);
+			if (wds_info) {
+				IEEE80211_EXTENDER_DPRINTF(vap, IEEE80211_EXTENDER_MSG_WARN,
+						"QHop: Extender IE of peer %pM is missing\n",
+						wh->i_addr2);
+				extender_event_data_prepare(ic, NULL,
+						&extender_event_data,
+						WDS_EXT_LINK_STATUS_UPDATE,
+						wh->i_addr2);
+				ieee80211_extender_send_event(primary_vap, &extender_event_data, NULL);
+				ieee80211_extender_remove_peer_wds_info(ic, wh->i_addr2);
+			}
+		}
+	}
+
+	if (vap->iv_opmode == IEEE80211_M_STA)
+		ieee80211_extender_sta_update_info(ni, ie_role, ie_bssid);
+
+	return 0;
+}
+
+void ieee80211_channel_switch_post(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+
+	ic->ic_prevchan = ic->ic_curchan;
+	ic->ic_curchan = ic->ic_csa_chan;
+	ic->ic_bsschan = ic->ic_csa_chan;
+
+	if (ic->ic_flags_qtn & IEEE80211_QTN_MONITOR)
+		printk("switched to chan %u\n", ic->ic_csa_chan->ic_ieee);
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+				"%s: switched to %d\n",
+				__FUNCTION__, ic->ic_csa_chan->ic_ieee);
+
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			struct ieee80211_node *ni;
+			ni = ieee80211_find_node(&ic->ic_sta, vap->iv_myaddr);
+			if (ni == NULL) {
+				IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_DOTH,
+					vap->iv_myaddr, "Node not found %d\n", 1);
+				return;
+			}
+
+			ieee80211_node_set_chan(ic, ni);
+			ieee80211_free_node(ni);
+
+			/* Remove the CSA IE from beacons */
+			vap->iv_flags &= ~IEEE80211_F_CHANSWITCH;
+			ic->ic_flags &= ~IEEE80211_F_CHANSWITCH;
+			ic->ic_beacon_update(vap);
+		}
+	}
+}
+EXPORT_SYMBOL(ieee80211_channel_switch_post);
+
+static void
+ieee80211_relay_csaie(struct ieee80211com *ic,
+		struct ieee80211_channel* new_chan, uint8_t csa_count)
+{
+	uint32_t flags = IEEE80211_CSA_F_ACTION;
+
+	if (csa_count)
+		flags |= IEEE80211_CSA_F_BEACON;
+
+	ieee80211_enter_csa(ic, new_chan, NULL, IEEE80211_CSW_REASON_CSA,
+			csa_count, IEEE80211_CSA_MUST_STOP_TX, flags);
+}
+
+#ifdef CONFIG_QHOP
+static void ieee80211_dfs_trigger_channel_switch(unsigned long arg)
+{
+        struct ieee80211com *ic = (struct ieee80211com *)arg;
+
+        ieee80211_finish_csa((unsigned long)ic);
+	ic->ic_pm_reason = IEEE80211_PM_LEVEL_CSA_DFS_ACTION;
+	ieee80211_pm_queue_work_custom(ic, BOARD_PM_WLAN_IDLE_TIMEOUT);
+}
+
+void
+ieee80211_dfs_send_csa(struct ieee80211vap *vap, uint8_t new_chan)
+{
+	uint32_t flags = IEEE80211_CSA_F_ACTION | IEEE80211_CSA_F_BEACON;
+	struct ieee80211_channel *c;
+
+	c = ieee80211_doth_findchan(vap, new_chan);
+	if (c == NULL) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+			"%s: channel %u lookup failed " "\n", __func__, new_chan);
+		return;
+	}
+
+	ieee80211_enter_csa(vap->iv_ic, c, ieee80211_dfs_trigger_channel_switch, IEEE80211_CSW_REASON_DFS,
+		vap->iv_ic->ic_dfs_csa_cnt, IEEE80211_CSA_MUST_STOP_TX, flags);
+}
+EXPORT_SYMBOL(ieee80211_dfs_send_csa);
+#endif
+
+static inline int
+ieee80211_should_relay_csaie(struct ieee80211com *ic, struct ieee80211vap *vap)
+{
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP && (IEEE80211_COM_WDS_IS_RBS(ic) ||
+				IEEE80211_VAP_WDS_IS_RBS(vap))) {
+		return true;
+	}
+
+	if (ieee80211_is_repeater(ic))
+		return true;
+
+	return false;
+}
+
+#if defined(PLATFORM_QFDR)
+static DEFINE_TIMER(ieee80211_csa_otherband_timer, NULL, 0, 0);
+
+static void ieee80211_csa_otherband_notify(unsigned long data)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *)data;
+	struct ieee80211com *ic = vap->iv_ic;
+	ic->ic_csa_count = 0;
+	/* disassociate STA from RootAP */
+	ieee80211_new_state(vap, IEEE80211_S_INIT, 0);
+}
+#endif
+
+#define QTN_CSAIE_ERR_INVALID_IE	(-1)
+#define QTN_CSAIE_ERR_CHAN_NOT_SUPP	(-2)
+static int
+ieee80211_parse_csaie(struct ieee80211_node *ni, u_int8_t *csa_frm, u_int8_t *csa_tsf_frm,
+	const struct ieee80211_frame *wh)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *c;
+	struct ieee80211_ie_csa *csa_ie = (struct ieee80211_ie_csa *)csa_frm;
+	struct ieee80211_ie_qtn_csa_tsf *csa_tsf_ie = (struct ieee80211_ie_qtn_csa_tsf *)csa_tsf_frm;
+	int subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+#if defined(PLATFORM_QFDR)
+	int otherband = 0;
+#endif
+
+	if (!(ic->ic_opmode == IEEE80211_M_STA ||
+			(ic->ic_opmode == IEEE80211_M_HOSTAP &&
+				 IEEE80211_VAP_WDS_IS_RBS(vap)))) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+				"%s: incorrect operation mode - ic %d vap %d, "
+				"role %d, flags_ext 0x%x\n", __func__,
+				ic->ic_opmode, vap->iv_opmode,
+				ic->ic_extender_role, ic->ic_flags_ext);
+		return 0;
+	}
+
+	if (!csa_ie) {
+		if (ic->ic_csa_count) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+					"%s: channel switch is scheduled, but we got "
+					"Beacon without CSA IE!\n", __func__);
+		}
+		return 0;
+	}
+
+	ic->ic_csa_frame[(subtype == IEEE80211_FC0_SUBTYPE_BEACON)
+		? IEEE80211_CSA_FRM_BEACON : IEEE80211_CSA_FRM_ACTION]++;
+
+	if (csa_ie->csa_len != 3) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_DOTH,
+			wh, "channel switch", "invalid length %u",
+			csa_ie->csa_len);
+		return QTN_CSAIE_ERR_INVALID_IE;
+	}
+
+	if (isclr(ic->ic_chan_avail, csa_ie->csa_chan)) {
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_DOTH,
+			wh, "channel switch", "invalid channel %u",
+			csa_ie->csa_chan);
+		return QTN_CSAIE_ERR_CHAN_NOT_SUPP;
+	}
+
+	if (isclr(ic->ic_chan_active, csa_ie->csa_chan)) {
+#if defined(PLATFORM_QFDR)
+		/* channel may switched into other band */
+		otherband = 1;
+#else
+		IEEE80211_DISCARD_IE(vap,
+			IEEE80211_MSG_ELEMID | IEEE80211_MSG_DOTH,
+			wh, "channel switch", "disabled channel %u",
+			csa_ie->csa_chan);
+		return QTN_CSAIE_ERR_CHAN_NOT_SUPP;
+#endif
+	}
+
+	c = ieee80211_doth_findchan(vap, csa_ie->csa_chan);
+	if (!c) {
+		IEEE80211_DISCARD_IE(vap,
+				IEEE80211_MSG_ELEMID | IEEE80211_MSG_DOTH,
+				wh, "channel switch",
+				"channel %u lookup failed", csa_ie->csa_chan);
+		return QTN_CSAIE_ERR_CHAN_NOT_SUPP;
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+		"%s: channel switch to %u in %u tbtt (mode %u) announced, trigger_frame=0x%x\n",
+		__func__, csa_ie->csa_chan, csa_ie->csa_count,
+		csa_ie->csa_mode, subtype);
+
+	if (ic->ic_csa_count) {
+		/* CSA was received recently */
+		if (c != ic->ic_csa_chan) {
+			/* XXX abuse? */
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+					"%s: channel switch channel "
+					"changed from %u to %u!\n", __func__,
+					ic->ic_csa_chan->ic_ieee,
+					csa_ie->csa_chan);
+#if defined(PLATFORM_QFDR)
+			if (isclr(ic->ic_chan_active, ic->ic_csa_chan->ic_ieee)) {
+				/* channel was switching to other band */
+				del_timer(&ieee80211_csa_otherband_timer);
+				ic->ic_csa_count = 0;
+				return 0;
+			}
+#endif
+			ieee80211_doth_cancel_cs(vap);
+			return 0;
+		}
+
+		if (csa_ie->csa_mode != ic->ic_csa_mode) {
+			/* Can be abused, but with no (to little) impact. */
+
+			/* CS mode change has no influence on our actions since
+			 * we don't respect cs modes at all (yet). Complain and
+			 * forget. */
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+					"%s: channel switch mode changed from "
+					"%u to %u!\n", __func__,
+					ic->ic_csa_mode, csa_ie->csa_mode);
+		}
+
+		ic->ic_csa_count = csa_ie->csa_count;
+		if (ic->ic_csa_count == 0) {
+			/* keep ic_csa_count unzero to avoid 2nd csa trigger */
+			ic->ic_csa_count = 1;
+		}
+	} else {
+		/* CSA wasn't received recently, so this is the first one in
+		 * the sequence. */
+		uint64_t tsf = 0;
+
+		ic->ic_csa_mode = csa_ie->csa_mode;
+		ic->ic_csa_count = csa_ie->csa_count;
+		ic->ic_csa_chan = c;
+
+#if defined(PLATFORM_QFDR)
+		if (ieee80211_should_relay_csaie(ic, vap) && !otherband)
+			ieee80211_relay_csaie(ic, c, ic->ic_csa_count);
+#else
+		if (ieee80211_should_relay_csaie(ic, vap))
+			ieee80211_relay_csaie(ic, c, ic->ic_csa_count);
+#endif
+
+		if (ic->ic_csa_count == 0) {
+			/* keep ic_csa_count unzero to avoid 2nd csa trigger */
+			ic->ic_csa_count = 1;
+		}
+
+		if (ic->ic_opmode == IEEE80211_M_STA) {
+			if (csa_tsf_ie && csa_tsf_ie->id == IEEE80211_ELEMID_VENDOR && isqtnie((u_int8_t *)csa_tsf_ie)) {
+				tsf = ntohll(csa_tsf_ie->tsf);
+			} else {
+				ic->ic_get_tsf(&tsf);
+				tsf += IEEE80211_MS_TO_USEC(ic->ic_csa_count * ni->ni_intval);
+			}
+			if (ic->ic_flags_qtn & IEEE80211_QTN_MONITOR)
+				printk("%s: switching to chan=%u\n", __func__,
+					ic->ic_csa_chan->ic_ieee);
+#if defined(PLATFORM_QFDR)
+			if (otherband) {
+				ieee80211_eventf(vap->iv_dev,
+					"%s[CSA-OTHER-BAND] FREQ=%u DELAY=%u MAC=%pM",
+					QEVT_COMMON_PREFIX,
+					ic->ic_csa_chan->ic_freq,
+					csa_ie->csa_count * ni->ni_intval,
+					wh->i_addr2);
+
+				init_timer(&ieee80211_csa_otherband_timer);
+				ieee80211_csa_otherband_timer.function = ieee80211_csa_otherband_notify;
+				ieee80211_csa_otherband_timer.data = (unsigned long)vap;
+				ieee80211_csa_otherband_timer.expires = jiffies +
+					IEEE80211_MS_TO_JIFFIES(csa_ie->csa_count * ni->ni_intval);
+				add_timer(&ieee80211_csa_otherband_timer);
+
+				return 0;
+			}
+#endif
+			ieee80211_eventf(vap->iv_dev, "%s[CSA] switch to %u in %u TBTT, MAC: %pM", QEVT_COMMON_PREFIX,
+				ic->ic_csa_chan->ic_ieee, csa_ie->csa_count, wh->i_addr2);
+			ic->ic_set_channel_deferred(ic, tsf, 0);
+		}
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_narrower_bw_supported(struct ieee80211_node *ni, u_int8_t *csa_frm, int cur_bw)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_ie_csa *csa_ie = (struct ieee80211_ie_csa *)csa_frm;
+
+	switch (cur_bw) {
+	case BW_HT80:
+	case BW_HT40:
+		if (isset(ic->ic_chan_active_20, csa_ie->csa_chan)) {
+			return 1;
+		}
+		break;
+	}
+	return 0;
+}
+static int
+ieee80211_wider_bw_supported(struct ieee80211_node *ni, u_int8_t *csa_frm, int cur_bw)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_ie_csa *csa_ie = (struct ieee80211_ie_csa *)csa_frm;
+	
+	if (ic->ic_max_system_bw <= cur_bw)
+		return 0;
+
+	switch (cur_bw) {
+	case BW_HT20:
+		if (isset(ic->ic_chan_active_40, csa_ie->csa_chan)) {
+			return 1;
+		}
+		break;
+	case BW_HT40:
+		if (isset(ic->ic_chan_active_80, csa_ie->csa_chan)) {
+			return 1;
+		}
+		break;
+	}
+	return 0;
+}
+
+/* XXX. Not the right place for such a definition */
+struct l2_update_frame {
+	struct ether_header eh;
+	u8 dsap;
+	u8 ssap;
+	u8 control;
+	u8 xid[3];
+}  __packed;
+
+static void
+ieee80211_deliver_l2uf(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct net_device *dev = vap->iv_dev;
+	struct sk_buff *skb;
+	struct l2_update_frame *l2uf;
+	struct ether_header *eh;
+
+	skb = dev_alloc_skb(sizeof(*l2uf));
+	if (!skb) {
+		printk(KERN_INFO "ieee80211_deliver_l2uf: no buf available\n");
+		return;
+	}
+	skb_put(skb, sizeof(*l2uf));
+	l2uf = (struct l2_update_frame *)(skb->data);
+	eh = &l2uf->eh;
+	/* dst: Broadcast address */
+	IEEE80211_ADDR_COPY(eh->ether_dhost, dev->broadcast);
+	/* src: associated STA */
+	IEEE80211_ADDR_COPY(eh->ether_shost, ni->ni_macaddr);
+	eh->ether_type = htons(skb->len - sizeof(*eh));
+
+	l2uf->dsap = 0;
+	l2uf->ssap = 0;
+	l2uf->control = 0xf5;
+	l2uf->xid[0] = 0x81;
+	l2uf->xid[1] = 0x80;
+	l2uf->xid[2] = 0x00;
+
+	ieee80211_skb_dev_set(dev, skb);
+	skb->protocol = eth_type_trans(skb, skb->dev);
+	skb_push(skb, ETH_HLEN);
+	skb_reset_mac_header(skb);
+
+	ieee80211_deliver_data(ni, skb);
+	return;
+}
+
+static __inline int
+contbgscan(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	return ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) &&
+		time_after(jiffies, ic->ic_lastdata + vap->iv_bgscanidle));
+}
+
+static __inline int
+startbgscan(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (ic->ic_scan_opchan_enable) {
+		return (!IEEE80211_IS_CHAN_DTURBO(ic->ic_curchan) &&
+			time_after(jiffies, ic->ic_lastscan + vap->iv_bgscanintvl));
+	} else {
+		return ((vap->iv_flags & IEEE80211_F_BGSCAN) &&
+			!IEEE80211_IS_CHAN_DTURBO(ic->ic_curchan) &&
+			time_after(jiffies, ic->ic_lastscan + vap->iv_bgscanintvl) &&
+			time_after(jiffies, ic->ic_lastdata + vap->iv_bgscanidle));
+	}
+}
+
+/*
+ * Process beacon/probe response frames in:
+ *    o AP mode, to check for non-HT APs on same channel
+ *    o station mode when associated, to collect state updates such as 802.11g slot time
+ *    o monitor/sniffer node, to handle CSA events
+ *    o WDS mode, to set peer node capabilities and rates
+ *    o adhoc mode, to discover neighbors
+ *    o any mode, when scanning
+ */
+static int
+ieee80211_beacon_should_discard(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	switch (vap->iv_opmode) {
+	case IEEE80211_M_HOSTAP:
+		return 0;
+		break;
+	case IEEE80211_M_STA:
+		if (ni->ni_associd)
+			return 0;
+		if (ic->ic_flags_qtn & IEEE80211_QTN_MONITOR)
+			return 0;
+		break;
+	case IEEE80211_M_WDS:
+		if (IEEE80211_ADDR_EQ(ni->ni_macaddr, vap->wds_mac))
+			return 0;
+		break;
+	case IEEE80211_M_IBSS:
+		return 0;
+		break;
+	default:
+		break;
+	}
+
+	if ((ic->ic_flags & IEEE80211_F_SCAN) || (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN))
+		return 0;
+
+	return 1;
+}
+
+/*
+ * FIXME
+ * Ignore changes in Primary Channel and Secondary channel offset to skip node update
+ * caused by channel switch.
+ */
+static int
+ieee80211_wds_compare_htinfo(struct ieee80211_ie_htinfo *old,
+		struct ieee80211_ie_htinfo *new)
+{
+	int ret;
+
+	old->hi_ctrlchannel = new->hi_ctrlchannel;
+
+	old->hi_byte1 &= ~IEEE80211_HTINFO_B1_SEC_CHAN_OFFSET;
+	old->hi_byte1 |= new->hi_byte1 & IEEE80211_HTINFO_B1_SEC_CHAN_OFFSET;
+
+	ret = memcmp(old, new, sizeof(struct ieee80211_ie_htinfo));
+
+	return (ret != 0);
+}
+
+/*
+ * FIXME
+ * Ignore changes in Channel Center Segment 0/1 to skip node update
+ * caused by channel switch.
+ */
+static int
+ieee80211_wds_compare_vhtop(struct ieee80211_ie_vhtop *old,
+		struct ieee80211_ie_vhtop *new)
+{
+	int ret;
+
+	old->vhtop_info[1] = new->vhtop_info[1];
+	old->vhtop_info[2] = new->vhtop_info[2];
+
+	ret = memcmp(old, new, sizeof(struct ieee80211_ie_vhtop));
+
+	return (ret != 0);
+}
+
+static void
+ieee80211_update_wds_peer_node(struct ieee80211_node *ni,
+		struct ieee80211_scanparams *scan)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	int ni_update_required = 0;
+	int htinfo_update_required = 0;
+	int vhtcap_update_required = 0;
+	int htcap_update_required = 0;
+	int vhtop_update_required = 0;
+	struct ieee80211_ie_htinfo lhtinfo;
+
+	if (scan->qtn && ni->ni_qtn_assoc_ie == NULL) {
+		ieee80211_saveie(&ni->ni_qtn_assoc_ie, scan->qtn);
+		ni_update_required = 1;
+	}
+
+	if (scan->htcap && ieee80211_parse_htcap(ni, scan->htcap)) {
+		htcap_update_required = 1;
+		ni_update_required = 1;
+	}
+
+	memcpy(&lhtinfo, &ni->ni_ie_htinfo, sizeof(struct ieee80211_ie_htinfo));
+
+	if (scan->htinfo && ieee80211_parse_htinfo(ni, scan->htinfo)) {
+		htinfo_update_required = ieee80211_wds_compare_htinfo(&lhtinfo,
+					(struct ieee80211_ie_htinfo *)scan->htinfo);
+		ni_update_required = 1;
+	}
+
+	/* 802.11ac */
+	if (scan->vhtcap && IS_IEEE80211_VHT_ENABLED(ic)) {
+		ni->ni_flags |= IEEE80211_NODE_VHT;
+		if (ieee80211_check_and_parse_vhtcap(ni, scan->vhtcap)) {
+			vhtcap_update_required = 1;
+			ni_update_required = 1;
+		}
+	}
+	if (scan->vhtop && IS_IEEE80211_VHT_ENABLED(ic)) {
+		struct ieee80211_ie_vhtop lvhtop;
+
+		memcpy(&lvhtop, &ni->ni_ie_vhtop, sizeof(struct ieee80211_ie_vhtop));
+
+		if (ieee80211_parse_vhtop(ni, scan->vhtop)) {
+			vhtop_update_required = ieee80211_wds_compare_vhtop(&lvhtop,
+						(struct ieee80211_ie_vhtop *)scan->vhtop);
+			ni_update_required = 1;
+		}
+	}
+
+	if ((ic->ic_peer_rts_mode == IEEE80211_PEER_RTS_PMP) &&
+		((ic->ic_sta_assoc - ic->ic_nonqtn_sta) >= IEEE80211_MAX_STA_CCA_ENABLED)) {
+
+		ic->ic_peer_rts = 1;
+	}
+
+	if (ni_update_required) {
+		struct ieee80211_rateset old_ni_rates;	/* negotiated rate set */
+		struct ieee80211_ht_rateset old_ni_htrates;	/* negotiated ht rate set */
+		int ba_established = (ni->ni_ba_tx[IEEE80211_WDS_LINK_MAINTAIN_BA_TID].state == IEEE80211_BA_ESTABLISHED);
+
+		old_ni_rates = ni->ni_rates;
+		old_ni_htrates = ni->ni_htrates;
+
+		ieee80211_fix_rate(ni, IEEE80211_F_DOXSECT | IEEE80211_F_DODEL);
+		ieee80211_fix_ht_rate(ni, IEEE80211_F_DOXSECT | IEEE80211_F_DODEL);
+
+		if (ba_established &&
+				ieee80211_node_is_qtn(ni) &&
+				(memcmp(&ni->ni_rates, &old_ni_rates, sizeof(old_ni_rates)) == 0) &&
+				(memcmp(&ni->ni_htrates, &old_ni_htrates, sizeof(old_ni_htrates)) == 0) &&
+				!vhtcap_update_required && !vhtop_update_required) {
+			return;
+		}
+
+		if (ic->ic_newassoc != NULL) {
+			ic->ic_newassoc(ni, 0);
+
+			/* update key peer WDS */
+			if (vap->iv_wds_peer_key.wk_keylen != 0) {
+				ieee80211_key_update_begin(vap);
+				vap->iv_key_set(vap, &vap->iv_wds_peer_key, ni->ni_macaddr);
+				ieee80211_key_update_end(vap);
+			}
+		}
+
+		ieee80211_node_ba_state_clear(ni);
+	}
+}
+
+void
+ieee80211_update_tbtt(struct ieee80211vap *vap,
+		struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	uint64_t cur_tsf;
+
+	ic->ic_get_tsf(&cur_tsf);
+	ni->ni_tbtt = cur_tsf + IEEE80211_TU_TO_USEC(ni->ni_intval);
+
+	if (vap->iv_dtim_count == 0)
+		ni->ni_dtim_tbtt = cur_tsf +
+			(uint32_t)vap->iv_dtim_period * IEEE80211_TU_TO_USEC(ni->ni_intval);
+}
+
+static int
+ieee80211_get_band_chan_step(int chan)
+{
+	struct ieee80211_band_info *band;
+	int band_idx;
+	int temp_chan;
+	int chan_cnt;
+	int chan_step;
+
+	for (band_idx = 0; band_idx < IEEE80211_BAND_IDX_MAX; band_idx++) {
+		band = ieee80211_get_band_info(band_idx);
+		if (band == NULL)
+			continue;
+
+		temp_chan = band->band_first_chan;
+		chan_cnt = band->band_chan_cnt;
+		chan_step = band->band_chan_step;
+
+		while (chan_cnt--) {
+			if (temp_chan == chan)
+				return chan_step;
+			temp_chan += chan_step;
+		}
+	}
+
+	return -1;
+}
+
+int
+ieee80211_parse_supp_chan(struct ieee80211_node *ni, uint8_t *ie)
+{
+	int chan_tuples;
+	uint8_t *chan;
+	uint8_t *chan_len;
+	uint8_t chan_step;
+	int i;
+
+	if (!ni || !ie)
+		return -1;
+
+	chan_tuples = ie[1] / 2;
+	for (i = 0; i < chan_tuples; i++) {
+		chan = ie + 2 + i * 2;
+		if (ieee80211_get_band_chan_step(*chan) < 0) {
+			IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ELEMID,
+					"%s: Invalid channel: %u\n",
+					__func__, *chan);
+			return -1;
+		}
+	}
+
+	memset(ni->ni_supp_chans, 0, sizeof(ni->ni_supp_chans));
+	ni->ni_chan_num = 0;
+
+	chan_tuples = ie[1] / 2;
+	for (i = 0; i < chan_tuples; i++) {
+		chan = ie + 2 + i * 2;
+		chan_len = ie + 3 + i * 2;
+		chan_step = ieee80211_get_band_chan_step(*chan);
+		while ((*chan_len)--) {
+			setbit(ni->ni_supp_chans, *chan);
+			ni->ni_chan_num++;
+			*chan += chan_step;
+		}
+	}
+
+	return 0;
+}
+static int ieee80211_parse_80211_v(struct ieee80211_node *ni, uint8_t *frm)
+{
+	uint32_t extcap[IEEE8211_EXTCAP_LENGTH / 4] = {0};
+	uint32_t temp_extcap = 0;
+	uint8_t len = 0;
+	uint8_t *ie = NULL;
+	uint8_t i = 0;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (vap == NULL)
+		return IEEE80211_REASON_IE_INVALID;
+
+	ie = frm;
+	len = ie[1];
+	if ((len == 0) || (len > IEEE8211_EXTCAP_LENGTH))
+		return IEEE80211_REASON_IE_INVALID;
+
+	ie += 2;
+	for (i = 0; i < len; i++) {
+		temp_extcap = ie[i];
+		/* 4 bytes compose an extcap value, lower byte is on lower 8-bit of extcap value */
+		temp_extcap <<= (i % 4) * 8;
+		extcap[i / 4] |= temp_extcap;
+	}
+
+	if (extcap[0] & IEEE80211_EXTCAP1_BSS_TRANSITION) {
+		ni->ni_ext_flags |= IEEE80211_NODE_BSS_TRANSITION;
+	} else {
+		ni->ni_ext_flags &= ~IEEE80211_NODE_BSS_TRANSITION;
+	}
+	return 0;
+
+}
+
+static int ieee80211_parse_extcap(struct ieee80211_node *ni, uint8_t *frm, uint8_t *bssid)
+{
+	uint32_t extcap[IEEE8211_EXTCAP_LENGTH / 4] = {0};
+	uint32_t temp_extcap = 0;
+	uint8_t len = 0;
+	uint8_t *ie = NULL;
+	uint8_t i = 0;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (vap == NULL)
+		return IEEE80211_REASON_IE_INVALID;
+
+	ie = frm;
+	len = ie[1];
+	if ((len == 0) || (len > IEEE8211_EXTCAP_LENGTH))
+		return IEEE80211_REASON_IE_INVALID;
+
+	ie += 2;
+
+	/* check BSS transition management capbility and set node info */
+	if ((len >= 3) && ie[2] & IEEE80211_EXTCAP_BTM)
+		ni->ni_wnm_capability |= IEEE80211_NODE_WNM_BTM_CAPABLE;
+
+	for (i = 0; i < len; i++) {
+		temp_extcap = (uint32_t)ie[i];
+		/* 4 bytes compose an extcap value, lower byte is on lower 8-bit of extcap value */
+		temp_extcap <<= (i % 4) * 8;
+		extcap[i / 4] |= temp_extcap;
+	}
+
+	if ((vap->iv_opmode == IEEE80211_M_STA) &&
+			(vap->iv_state == IEEE80211_S_RUN) &&
+			(IEEE80211_ADDR_EQ(vap->iv_bss->ni_bssid, bssid))) {
+		if (extcap[1] & IEEE80211_EXTCAP2_TDLS_PROHIB) {
+			if ((vap->iv_flags_ext & IEEE80211_FEXT_AP_TDLS_PROHIB) == 0) {
+				if ((vap->iv_flags_ext & IEEE80211_FEXT_TDLS_PROHIB) == 0) {
+					/* teardown the link and clear timer */
+					ieee80211_tdls_teardown_all_link(vap);
+					ieee80211_tdls_clear_disc_timer(vap);
+					ieee80211_tdls_clear_node_expire_timer(vap);
+				}
+				vap->iv_flags_ext |= IEEE80211_FEXT_AP_TDLS_PROHIB;
+			}
+		} else {
+			if ((vap->iv_flags_ext & IEEE80211_FEXT_AP_TDLS_PROHIB) != 0) {
+				vap->iv_flags_ext &= ~IEEE80211_FEXT_AP_TDLS_PROHIB;
+				if ((vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) == 0) {
+					ieee80211_tdls_start_disc_timer(vap);
+					ieee80211_tdls_start_node_expire_timer(vap);
+				}
+			}
+		}
+
+		if (extcap[1] & IEEE80211_EXTCAP2_TDLS_CS_PROHIB)
+			vap->iv_flags_ext |= IEEE80211_FEXT_TDLS_CS_PROHIB;
+		else
+			vap->iv_flags_ext &= ~IEEE80211_FEXT_TDLS_CS_PROHIB;
+	}
+
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP) &&
+		IEEE80211_ADDR_EQ(ni->ni_macaddr, bssid)) {
+		if (extcap[1] & IEEE80211_EXTCAP2_OP_MODE_NOTI)
+			ni->ni_ext_flags |= IEEE80211_NODE_OP_MODE_NOTI;
+		else
+			ni->ni_ext_flags &= ~IEEE80211_NODE_OP_MODE_NOTI;
+	}
+
+	return 0;
+}
+
+/*
+ * Parse the wireless header for a TDLS frame
+ */
+static void
+ieee80211_parse_tdls_hdr(struct ieee80211vap *vap,
+	struct ieee80211_tdls_params *tdls, struct ieee80211_frame *wh)
+{
+	struct ieee80211_frame_addr4 *wh4;
+
+	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+	case IEEE80211_FC1_DIR_NODS:
+		tdls->da = wh->i_addr1;
+		tdls->sa = wh->i_addr2;
+		break;
+	case IEEE80211_FC1_DIR_TODS:
+		tdls->sa = wh->i_addr2;
+		tdls->da = wh->i_addr3;
+		break;
+	case IEEE80211_FC1_DIR_FROMDS:
+		tdls->da = wh->i_addr1;
+		tdls->sa = wh->i_addr3;
+		break;
+	case IEEE80211_FC1_DIR_DSTODS:
+		wh4 = (struct ieee80211_frame_addr4 *)wh;
+		tdls->da = wh4->i_addr3;
+		tdls->sa = wh4->i_addr4;
+		break;
+	}
+}
+
+/*
+ * Parse the TLVs in a TDLS action frame or public tdls action frame
+ * Returns 0 if successful, else 1.
+ */
+static int
+ieee80211_parse_tdls_tlvs(struct ieee80211vap *vap,
+		struct ieee80211_tdls_params *tdls, uint8_t **frm_p, uint8_t *efrm,
+	uint8_t ia_action)
+{
+	uint8_t *frm = *frm_p;
+	uint32_t size = (uint32_t)efrm - (uint32_t)frm;
+	char *elem_type = "none";
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG, "TDLS %s: parse TLVs\n", __func__);
+	/* Parse TLVs */
+	while (frm < efrm) {
+		if (size < frm[1])
+			goto error;
+		switch (*frm) {
+		case IEEE80211_ELEMID_RATES:
+			elem_type = "rates";
+			tdls->rates = frm;
+			break;
+		case IEEE80211_ELEMID_COUNTRY:
+			elem_type = "country";
+			tdls->country = frm;
+			break;
+		case IEEE80211_ELEMID_XRATES:
+			elem_type = "xrates";
+			tdls->xrates = frm;
+			break;
+		case IEEE80211_ELEMID_SUPPCHAN:
+			elem_type = "suppchan";
+			tdls->supp_chan = frm;
+			break;
+		case IEEE80211_ELEMID_SEC_CHAN_OFF:
+			elem_type = "sec_chan_off";
+			tdls->sec_chan_off = frm;
+			break;
+		case IEEE80211_ELEMID_RSN:
+			elem_type = "rsn";
+			tdls->rsn = frm;
+			break;
+		case IEEE80211_ELEMID_EXTCAP:
+			elem_type = "ext cap";
+			tdls->ext_cap = frm;
+			break;
+		case IEEE80211_ELEMID_EDCA:
+			elem_type = "edca";
+			tdls->edca = frm;
+			break;
+		case IEEE80211_ELEMID_QOSCAP:
+			elem_type = "qoscap";
+			tdls->qos_cap = frm;
+			break;
+		case IEEE80211_ELEMID_FTIE:
+			elem_type = "ftie";
+			tdls->ftie = frm;
+			break;
+		case IEEE80211_ELEMID_TIMEOUT_INT:
+			elem_type = "timeout_int";
+			tdls->tpk_timeout = frm;
+			break;
+		case IEEE80211_ELEMID_REG_CLASSES:
+			elem_type = "reg_classes";
+			tdls->sup_reg_class = frm;
+			break;
+		case IEEE80211_ELEMID_HTCAP:
+			elem_type = "htcap";
+			tdls->htcap = frm;
+			break;
+		case IEEE80211_ELEMID_HTINFO:
+			elem_type = "htinfo";
+			tdls->htinfo = frm;
+			break;
+		case IEEE80211_ELEMID_VHTCAP:
+			elem_type = "vhtcap";
+			tdls->vhtcap = frm;
+			break;
+		case IEEE80211_ELEMID_VHTOP:
+			elem_type = "vhtop";
+			tdls->vhtop = frm;
+			break;
+		case IEEE80211_ELEMID_20_40_BSS_COEX:
+			elem_type = "20/40 bss coex";
+			tdls->bss_2040_coex = frm;
+			break;
+		case IEEE80211_ELEMID_AID:
+			elem_type = "aid";
+			if (size < sizeof(*tdls->aid))
+				goto error;
+			tdls->aid = (struct ieee80211_ie_aid *)frm;
+			break;
+		case IEEE80211_ELEMID_TDLS_LINK_ID:
+			elem_type = "link id";
+			if (size < sizeof(*tdls->link_id))
+				goto error;
+			tdls->link_id = (struct ieee80211_tdls_link_id *)frm;
+			break;
+		case IEEE80211_ELEMID_TDLS_WKUP_SCHED:
+			elem_type = "wkup sched";
+			if (size < sizeof(*tdls->wkup_sched))
+				goto error;
+			tdls->wkup_sched = (struct ieee80211_tdls_wkup_sched *)frm;
+			break;
+		case IEEE80211_ELEMID_TDLS_CS_TIMING:
+			elem_type = "cs timing";
+			if (size < sizeof(*tdls->cs_timing))
+				goto error;
+			tdls->cs_timing = (struct ieee80211_tdls_cs_timing *)frm;
+			break;
+		case IEEE80211_ELEMID_TDLS_PTI_CTRL:
+			elem_type = "pti ctrl";
+			if (size < sizeof(*tdls->pti_ctrl))
+				goto error;
+			tdls->pti_ctrl = (struct ieee80211_tdls_pti_ctrl *)frm;
+			break;
+		case IEEE80211_ELEMID_TDLS_PU_BUF_STAT:
+			elem_type = "pu buf stat";
+			if (size < sizeof(*tdls->pu_buf_stat))
+				goto error;
+			tdls->pu_buf_stat = (struct ieee80211_tdls_pu_buf_stat *)frm;
+			break;
+		case IEEE80211_ELEMID_WBWCHANSWITCH:
+			elem_type = "wide_bw_cs";
+			if (size < sizeof(*tdls->wide_bw_cs))
+				goto error;
+			tdls->wide_bw_cs = (struct ieee80211_ie_wbchansw *)frm;
+			break;
+		case IEEE80211_ELEMID_VHTXMTPWRENVLP:
+			elem_type = "vht_tx_pw_envelope";
+			if (size < sizeof(*tdls->vht_tx_pw_env))
+				goto error;
+			tdls->vht_tx_pw_env = (struct ieee80211_ie_vtxpwren *)frm;
+			break;
+		case IEEE80211_ELEMID_VENDOR:
+			elem_type = "vendor";
+			/* Unsupported vendor IEs are silently ignored*/
+			if (isqtnie(frm)) {
+				tdls->qtn_info = frm;
+			} else if (is_qtn_oui_tdls_brmacs(frm)) {
+				tdls->qtn_brmacs = frm;
+			} else {
+				/* TDLS debugging */
+				IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"unhandled id %u, len %u", *frm, frm[1]);
+				vap->iv_stats.is_rx_elem_unknown++;
+			}
+			break;
+		default:
+			elem_type = "unknown";
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"unhandled id %u, len %u", *frm, frm[1]);
+			vap->iv_stats.is_rx_elem_unknown++;
+			break;
+		}
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG, "TDLS %s: got %s pos=%p "
+			"id=%u len=%u\n", __func__, elem_type, frm, *frm, frm[1]);
+
+		frm += frm[1] + 2;
+	}
+
+	if (frm > efrm) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN, "TDLS %s: "
+			"frame len invalid frm=%p efrm=%p\n", __func__, frm, efrm);
+		vap->iv_stats.is_rx_elem_toobig++;
+		return 1;
+	}
+
+	*frm_p = frm;
+
+	return 0;
+
+error:
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: ie (%s) too short", __func__, elem_type);
+	vap->iv_stats.is_rx_elem_toosmall++;
+
+	return 1;
+}
+
+/*
+ * Process a public TDLS action frame
+ */
+static void
+ieee80211_recv_action_public_tdls(struct ieee80211_node *ni, struct sk_buff *skb,
+	struct ieee80211_frame *wh, struct ieee80211_action *ia, int rssi)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_tdls_params tdls;
+	uint8_t *frm = (u_int8_t *)(ia + 1);
+	uint8_t *efrm = skb->data + skb->len;
+	uint32_t dump_len = sizeof(struct ieee80211_ht_qosframe) + LLC_SNAPFRAMELEN;
+	int subtype = IEEE80211_FC0_SUBTYPE_ACTION; /* for validation scripts */
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG, "TDLS %s: "
+		"got TDLS type %u\n", __func__, ia->ia_action);
+
+	if (unlikely(ieee80211_msg(vap, IEEE80211_MSG_TDLS) && ieee80211_tdls_msg(vap, IEEE80211_TDLS_MSG_DBG))) {
+		if (unlikely(skb->len < dump_len))
+			dump_len = skb->len;
+		ieee80211_dump_pkt(vap->iv_ic, skb->data, dump_len, -1, rssi);
+	}
+
+	if (vap->iv_opmode != IEEE80211_M_STA) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN, "TDLS %s: "
+			"Ignoring public TDLS action frame - not STA\n", __func__);
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, ieee80211_tdls_action_name_get(ia->ia_action),
+			"%s: Ignoring public TDLS action frame - not STA\n",
+			ia->ia_action);
+		vap->iv_stats.is_rx_mgtdiscard++;
+		return;
+	}
+	if (ia->ia_action != IEEE80211_ACTION_PUB_TDLS_DISC_RESP) {
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+			wh, ieee80211_mgt_subtype_name[
+			IEEE80211_FC0_SUBTYPE_ACTION >> IEEE80211_FC0_SUBTYPE_SHIFT],
+			"unsupported TDLS public action %u", ia->ia_action);
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN, "TDLS %s: "
+			"unsupported TDLS public action\n", __func__);
+		vap->iv_stats.is_rx_badsubtype++;
+		return;
+	}
+
+	memset(&tdls, 0, sizeof(tdls));
+
+	ieee80211_parse_tdls_hdr(vap, &tdls, wh);
+
+	IEEE80211_VERIFY_LENGTH(efrm - frm,
+		sizeof(tdls.diag_token) +
+		sizeof(tdls.caps));
+	tdls.diag_token = *frm;
+	frm += sizeof(tdls.diag_token);
+	tdls.caps = le16toh(*(__le16 *)frm);
+	frm += sizeof(tdls.caps);
+
+	if (ieee80211_parse_tdls_tlvs(vap, &tdls, &frm, efrm, ia->ia_action)) {
+		return;
+	}
+
+	ieee80211_tdls_recv_disc_resp(ni, skb, rssi, &tdls);
+}
+
+void
+ieee80211_find_ht_pri_sec_chan(struct ieee80211vap *vap,
+		const struct ieee80211_scan_entry *se,
+		uint8_t *pri_chan, uint8_t *sec_chan)
+{
+	struct ieee80211_ie_htinfo *htinfo =
+			(struct ieee80211_ie_htinfo *)se->se_htinfo_ie;
+	uint8_t choff;
+
+	if (!htinfo) {
+		*pri_chan = se->se_chan->ic_ieee;
+		*sec_chan = 0;
+		return;
+	}
+
+	*pri_chan = IEEE80211_HTINFO_PRIMARY_CHANNEL(htinfo);
+	choff = IEEE80211_HTINFO_B1_EXT_CHOFFSET(htinfo);
+	if (choff == IEEE80211_HTINFO_EXTOFFSET_ABOVE)
+		*sec_chan = *pri_chan + IEEE80211_CHAN_SEC_SHIFT;
+	else if (choff == IEEE80211_HTINFO_EXTOFFSET_BELOW)
+		*sec_chan = *pri_chan - IEEE80211_CHAN_SEC_SHIFT;
+	else if (choff == IEEE80211_HTINFO_EXTOFFSET_NA)
+		*sec_chan = 0;
+	else
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: wrong channel offset %u in htinfo IE\n",
+			__func__, choff);
+
+	if (*pri_chan != se->se_chan->ic_ieee)
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: error - scan channel %u is different"
+			" with primary channel in htinfo\n", __func__,
+			se->se_chan->ic_ieee, pri_chan);
+}
+
+uint8_t
+ieee80211_find_ht_center_chan(struct ieee80211vap *vap,
+		const struct ieee80211_scan_entry *se)
+{
+	uint8_t pri_chan;
+	uint8_t sec_chan;
+
+	ieee80211_find_ht_pri_sec_chan(vap, se, &pri_chan, &sec_chan);
+
+	if (sec_chan)
+		return (pri_chan + sec_chan) / 2;
+	else
+		return pri_chan;
+}
+
+int
+ieee80211_20_40_operation_permitted(struct ieee80211com *ic,
+	struct ieee80211_channel *chan, uint8_t se_pri_chan, uint8_t se_sec_chan)
+{
+	uint16_t affected_start;
+	uint16_t affected_end;
+	uint8_t pri_chan;
+	uint8_t sec_chan;
+	uint16_t pri_freq;
+	uint16_t sec_freq;
+	uint16_t se_pri_freq;
+	uint16_t se_sec_freq;
+
+	pri_chan = chan->ic_ieee;
+	pri_freq = chan->ic_freq;
+	if (chan->ic_flags & IEEE80211_CHAN_HT40U) {
+		sec_chan = pri_chan + IEEE80211_SEC_CHAN_OFFSET;
+		sec_freq = pri_freq + IEEE80211_SEC_CHAN_OFFSET * IEEE80211_CHAN_SPACE;
+	} else if (chan->ic_flags & IEEE80211_CHAN_HT40D) {
+		sec_chan = pri_chan - IEEE80211_SEC_CHAN_OFFSET;
+		sec_freq = pri_freq - IEEE80211_SEC_CHAN_OFFSET * IEEE80211_CHAN_SPACE;
+	} else {
+		return 0;
+	}
+
+	/* Finding the frquency range */
+	affected_start = ((pri_freq + sec_freq) >> 1) - IEEE80211_BW_RANGE;
+	affected_end = ((pri_freq + sec_freq) >> 1) + IEEE80211_BW_RANGE;
+
+	se_pri_freq = ieee80211_ieee2mhz(se_pri_chan, 0);
+	se_sec_freq = (se_sec_chan != 0) ? ieee80211_ieee2mhz(se_sec_chan, 0) : 0;
+	if (((se_pri_freq > affected_start) && (se_pri_freq < affected_end)) ||
+			((se_sec_freq > affected_start) && (se_sec_freq < affected_end))) {
+		if ((pri_chan == se_pri_chan) && (se_sec_chan == 0))
+		      /*
+		       * The scanned 20M bandwidth AP shares same channel
+		       * with the primary channel of current AP
+		       */
+		      return 1;
+		else if ((pri_chan == se_pri_chan) && (sec_chan == se_sec_chan))
+		      /*
+		       * The scanned 40M bandwidth AP shares same primary channel
+		       * and secondary channel with current AP
+		       */
+		      return 1;
+		else
+		      return 0;
+	}
+
+	return 1;
+}
+
+static int
+ieee80211_is_40_allowed(struct ieee80211vap *vap, int channel)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	uint16_t ch_freq;
+	uint16_t affected_start;
+	uint16_t affected_end;
+	uint16_t pri_freq;
+	uint16_t sec_freq;
+
+	ch_freq = ieee80211_ieee2mhz(channel, 0);
+	pri_freq = ic->ic_curchan->ic_freq;
+
+	if (ic->ic_curchan->ic_flags & IEEE80211_CHAN_HT40U)
+		sec_freq = pri_freq + 20;
+	else if (ic->ic_curchan->ic_flags & IEEE80211_CHAN_HT40D)
+		sec_freq = pri_freq - 20;
+	else {
+		return 0;
+	}
+
+	/* Finding the frquency range */
+	affected_start = ((pri_freq + sec_freq) >> 1) - IEEE80211_BW_RANGE;
+	affected_end = ((pri_freq + sec_freq) >> 1) + IEEE80211_BW_RANGE;
+
+	if ((ch_freq < affected_start || ch_freq > affected_end))
+		return 1;
+
+	return 0;
+}
+
+int
+ieee80211_check_40_bw_allowed(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+	struct ap_scan_entry *se;
+	struct ap_state *as = ss->ss_priv;
+	uint8_t se_pri_chan = 0;
+	uint8_t se_sec_chan = 0;
+	int change_bw = 0;
+	int i;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP) {
+		return -EINVAL;
+	}
+	for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+		TAILQ_FOREACH(se, &as->as_scan_list[i].asl_head, ase_list) {
+			ieee80211_find_ht_pri_sec_chan(vap, &se->base,
+						&se_pri_chan, &se_sec_chan);
+			if (!ieee80211_20_40_operation_permitted(ic,
+					ic->ic_curchan, se_pri_chan, se_sec_chan)) {
+				change_bw = 1;
+				break;
+			}
+		}
+	}
+	if (change_bw && IEEE80211_IS_11NG_40(ic) && (ic->ic_20_40_coex_enable)) {
+		ieee80211_change_bw(vap, BW_HT20, 0);
+		ic->ic_coex_stats_update(ic, WLAN_COEX_STATS_BW_SCAN);
+	}
+	ic->ic_obss_scan_count = 1;
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_check_40_bw_allowed);
+
+static int
+is_ieee80211_obss_grant(struct ieee80211com *ic, struct ieee80211_node *ni)
+{
+	struct ieee80211_node *ni_tmp;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	uint8_t retval = 0;
+
+	IEEE80211_NODE_LOCK_BH(nt);
+	TAILQ_FOREACH(ni_tmp, &nt->nt_node, ni_list) {
+		if (ni_tmp->ni_vap->iv_opmode != IEEE80211_M_HOSTAP)
+			continue;
+		if (ni == ni_tmp)
+			continue;
+		if (ni_tmp->ni_associd == 0)
+			continue;
+		if (ni_tmp->ni_obss_scan & IEEE80211_NODE_OBSS_RUNNING)
+			retval =  WLAN_20_40_BSS_COEX_OBSS_EXEMPT_GRNT;
+	}
+	IEEE80211_NODE_UNLOCK_BH(nt);
+
+	return retval;
+}
+
+static void
+ieee80211_recv_action_public_coex(struct ieee80211_node *ni, struct sk_buff *skb,
+	struct ieee80211_frame *wh, struct ieee80211_action *ia, int rssi)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint8_t *frm = (u_int8_t *)(ia + 1);
+	uint8_t *efrm = skb->data + skb->len;
+	struct ieee80211_20_40_coex_param *coex_ie;
+	struct ieee80211_20_40_in_ch_rep *ch_rep_ie;
+	int change_bw = 0;
+	int i;
+
+	if ((efrm - (u_int8_t *)ia) < sizeof(struct ieee80211_20_40_coex_param)) {
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, "mgt", "%s", "20/40 coexistence action frame header too small");
+		vap->iv_stats.is_rx_elem_toosmall++;
+		return;
+	}
+	coex_ie = (struct ieee80211_20_40_coex_param *)frm;
+	if (coex_ie->param_id != IEEE80211_ELEMID_20_40_BSS_COEX) {
+		vap->iv_stats.is_rx_elem_unknown++;
+		return;
+	}
+	ni->ni_coex =  coex_ie->coex_param;
+	frm = (u_int8_t *)(coex_ie + 1);
+
+
+	if (ic->ic_opmode == IEEE80211_M_STA) {
+		if (coex_ie->coex_param & WLAN_20_40_BSS_COEX_INFO_REQ) {
+			u_int8_t coex = vap->iv_coex;
+			struct ieee80211_action_data action_data;
+			action_data.cat = IEEE80211_ACTION_CAT_PUBLIC;
+			action_data.action = IEEE80211_ACTION_PUB_20_40_COEX;
+			action_data.params = &coex;
+
+			IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+		}
+		if (coex_ie->coex_param & WLAN_20_40_BSS_COEX_OBSS_EXEMPT_GRNT)
+			del_timer_sync(&ic->ic_obss_timer);
+		return;
+	}
+
+	/* For element id and length */
+	if (efrm - frm > 3) {
+		if (*frm == IEEE80211_ELEMID_20_40_IT_CH_REP) {
+			ch_rep_ie = (struct ieee80211_20_40_in_ch_rep *)frm;
+			for (i = 0;i < ch_rep_ie->param_len - 1;i++) {
+				if (!ieee80211_is_40_allowed(vap, ch_rep_ie->chan[i])) {
+					change_bw = 1;
+					break;
+				}
+			}
+		}
+	}
+
+	if (coex_ie->coex_param &
+		(WLAN_20_40_BSS_COEX_40MHZ_INTOL | WLAN_20_40_BSS_COEX_20MHZ_WIDTH_REQ)) {
+		change_bw = 1;
+	}
+
+	if (change_bw && IEEE80211_IS_11NG_40(ic) && (ic->ic_20_40_coex_enable)) {
+		ieee80211_change_bw(vap, BW_HT20, 0);
+		ic->ic_coex_stats_update(ic, WLAN_COEX_STATS_BW_ACTION);
+	}
+
+	if (coex_ie->coex_param & WLAN_20_40_BSS_COEX_OBSS_EXEMPT_REQ) {
+		struct ieee80211_action_data action_data;
+		uint8_t coex_value = 0;
+		action_data.cat = IEEE80211_ACTION_CAT_PUBLIC;
+		action_data.action = IEEE80211_ACTION_PUB_20_40_COEX;
+
+		coex_value |= is_ieee80211_obss_grant(ic, ni);
+		action_data.params = &coex_value;
+
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+	}
+}
+
+/*
+ * Process a public action frame
+ */
+static void
+ieee80211_recv_action_public(struct ieee80211_node *ni, struct sk_buff *skb,
+	struct ieee80211_frame *wh, struct ieee80211_action *ia, int rssi)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	switch (ia->ia_action) {
+	case IEEE80211_ACTION_PUB_20_40_COEX:
+		ieee80211_recv_action_public_coex(ni, skb, wh, ia, rssi);
+		break;
+	case IEEE80211_ACTION_PUB_TDLS_DISC_RESP:
+		ieee80211_recv_action_public_tdls(ni, skb, wh, ia, rssi);
+		break;
+	default:
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN, "TDLS %s: "
+			"Received unsupported public action frame type %u\n",
+			__func__, ia->ia_action);
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, "mgt", "%s: Received unsupported public action frame type %u\n",
+			ia->ia_action);
+		vap->iv_stats.is_rx_mgtdiscard++;
+		break;
+	}
+}
+
+/*
+ * Process a TDLS Action Frame.
+ * Note: these are management frames encapsulated in data frames
+ */
+static void
+ieee80211_recv_action_tdls(struct ieee80211_node *ni, struct sk_buff *skb,
+	struct ieee80211_action *ia, int ieee80211_header, int rssi)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_tdls_params tdls;
+	uint8_t *frm = (uint8_t *)(ia + 1);
+	uint8_t *efrm = skb->data + skb->len;
+	struct ether_header *eh;
+	struct ieee80211_frame *wh;
+
+	if (ia->ia_category != IEEE80211_ACTION_CAT_TDLS) {
+		vap->iv_stats.is_rx_mgtdiscard++;
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN, "TDLS %s: "
+			"invalid category %d!\n", __func__, ia->ia_category);
+		return;
+	}
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG, "TDLS %s: "
+		"got TDLS type %u\n", __func__, ia->ia_action);
+
+	memset(&tdls, 0, sizeof(tdls));
+
+	if (ieee80211_header) {
+		wh = (struct ieee80211_frame *)skb->data;
+		ieee80211_parse_tdls_hdr(vap, &tdls, wh);
+	} else {
+		eh = (struct ether_header *)skb->data;
+		tdls.da = eh->ether_dhost;
+		tdls.sa = eh->ether_shost;
+	}
+
+	tdls.act = ia->ia_action;
+
+	/* Parse fixed length fields */
+	switch (ia->ia_action) {
+	case IEEE80211_ACTION_TDLS_SETUP_REQ:
+		IEEE80211_VERIFY_TDLS_LENGTH(efrm - frm,
+			sizeof(tdls.diag_token) +
+			sizeof(tdls.caps));
+		tdls.diag_token = *frm;
+		frm += sizeof(tdls.diag_token);
+		tdls.caps = le16toh(*(__le16 *)frm);
+		frm += sizeof(tdls.caps);
+		break;
+	case IEEE80211_ACTION_TDLS_SETUP_RESP:
+		IEEE80211_VERIFY_TDLS_LENGTH(efrm - frm,
+			sizeof(tdls.status) +
+			sizeof(tdls.diag_token));
+		tdls.status = le16toh(*(__le16 *)frm);
+		frm += sizeof(tdls.status);
+		tdls.diag_token = *frm;
+		frm += sizeof(tdls.diag_token);
+		if (tdls.status == IEEE80211_STATUS_SUCCESS) {
+			tdls.caps = le16toh(*(__le16 *)frm);
+			frm += sizeof(tdls.caps);
+		}
+		break;
+	case IEEE80211_ACTION_TDLS_SETUP_CONFIRM:
+		IEEE80211_VERIFY_TDLS_LENGTH(efrm - frm,
+			sizeof(tdls.status) +
+			sizeof(tdls.diag_token));
+		tdls.status = le16toh(*(__le16 *)frm);
+		frm += sizeof(tdls.status);
+		tdls.diag_token = *frm;
+		frm += sizeof(tdls.diag_token);
+		break;
+	case IEEE80211_ACTION_TDLS_TEARDOWN:
+		IEEE80211_VERIFY_TDLS_LENGTH(efrm - frm,
+			sizeof(tdls.reason));
+		tdls.reason = le16toh(*(__le16 *)frm);
+		frm += sizeof(tdls.reason);
+		break;
+	case IEEE80211_ACTION_TDLS_PTI:
+		IEEE80211_VERIFY_TDLS_LENGTH(efrm - frm,
+			sizeof(tdls.diag_token));
+		tdls.diag_token = *frm;
+		frm += sizeof(tdls.diag_token);
+		break;
+	case IEEE80211_ACTION_TDLS_CS_REQ:
+		IEEE80211_VERIFY_TDLS_LENGTH(efrm - frm,
+			sizeof(tdls.target_chan) +
+			sizeof(tdls.reg_class));
+		tdls.target_chan = *frm;
+		frm += sizeof(tdls.target_chan);
+		tdls.reg_class = *frm;
+		frm += sizeof(tdls.reg_class);
+		break;
+	case IEEE80211_ACTION_TDLS_CS_RESP:
+		IEEE80211_VERIFY_TDLS_LENGTH(efrm - frm,
+			sizeof(tdls.status));
+		tdls.status = le16toh(*(__le16 *)frm);
+		frm += sizeof(tdls.status);
+		break;
+	case IEEE80211_ACTION_TDLS_PEER_PSM_REQ:
+		IEEE80211_VERIFY_TDLS_LENGTH(efrm - frm,
+			sizeof(tdls.diag_token));
+		tdls.diag_token = *frm;
+		frm += sizeof(tdls.diag_token);
+		break;
+	case IEEE80211_ACTION_TDLS_PEER_PSM_RESP:
+		IEEE80211_VERIFY_TDLS_LENGTH(efrm - frm,
+			sizeof(tdls.diag_token) +
+			sizeof(tdls.status));
+		tdls.diag_token = *frm;
+		frm += sizeof(tdls.diag_token);
+		tdls.status = le16toh(*(__le16 *)frm);
+		frm += sizeof(tdls.status);
+		break;
+	case IEEE80211_ACTION_TDLS_PEER_TRAF_RESP:
+		IEEE80211_VERIFY_TDLS_LENGTH(efrm - frm,
+			sizeof(tdls.diag_token));
+		tdls.diag_token = *frm;
+		frm += sizeof(tdls.diag_token);
+		break;
+	case IEEE80211_ACTION_TDLS_DISC_REQ:
+		IEEE80211_VERIFY_TDLS_LENGTH(efrm - frm,
+			sizeof(tdls.diag_token));
+		tdls.diag_token = *frm;
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG, "TDLS %s: "
+			"disc req - diag_token=%u\n", __func__, tdls.diag_token);
+		frm += sizeof(tdls.diag_token);
+		break;
+	default:
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN, "TDLS %s: "
+			"unsupported TDLS action %u\n", __func__, ia->ia_action);
+		vap->iv_stats.is_rx_badsubtype++;
+		return;
+	}
+
+	if (ieee80211_parse_tdls_tlvs(vap, &tdls, &frm, efrm, ia->ia_action)) {
+		return;
+	}
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: process frame %u\n", __func__, ia->ia_action);
+	/* Process the frame */
+	switch (ia->ia_action) {
+	case IEEE80211_ACTION_TDLS_SETUP_REQ:
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: process setup_req\n", __func__);
+		ieee80211_tdls_recv_setup_req(ni, skb, rssi, &tdls);
+		break;
+	case IEEE80211_ACTION_TDLS_SETUP_RESP:
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: process setup_resp\n", __func__);
+		ieee80211_tdls_recv_setup_resp(ni, skb, rssi, &tdls);
+		break;
+	case IEEE80211_ACTION_TDLS_SETUP_CONFIRM:
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: process setup_confirm\n", __func__);
+		ieee80211_tdls_recv_setup_confirm(ni, skb, rssi, &tdls);
+		break;
+	case IEEE80211_ACTION_TDLS_TEARDOWN:
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: process teardown\n", __func__);
+		ieee80211_tdls_recv_teardown(ni, skb, rssi, &tdls);
+		break;
+	case IEEE80211_ACTION_TDLS_PTI:
+		break;
+	case IEEE80211_ACTION_TDLS_CS_REQ:
+		ieee80211_tdls_recv_chan_switch_req(ni, skb, rssi, &tdls);
+		break;
+	case IEEE80211_ACTION_TDLS_CS_RESP:
+		ieee80211_tdls_recv_chan_switch_resp(ni, skb, rssi, &tdls);
+		break;
+	case IEEE80211_ACTION_TDLS_PEER_PSM_REQ:
+		break;
+	case IEEE80211_ACTION_TDLS_PEER_PSM_RESP:
+		break;
+	case IEEE80211_ACTION_TDLS_PEER_TRAF_RESP:
+		break;
+	case IEEE80211_ACTION_TDLS_DISC_REQ:
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: process disc_req\n", __func__);
+		ieee80211_tdls_recv_disc_req(ni, skb, rssi, &tdls);
+		break;
+	default:
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: unsupported TDLS action %u\n", __func__, ia->ia_action);
+		vap->iv_stats.is_rx_badsubtype++;
+		return;
+	}
+
+	vap->iv_stats.is_rx_tdls++;
+	IEEE80211_NODE_STAT(ni, rx_tdls_action);
+}
+
+void ieee80211_recv_meas_basic_report(struct ieee80211_node *ni,
+		struct ieee80211_ie_measrep_basic *meas_rep_basic)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (meas_rep_basic->basic_report & IEEE80211_MEASURE_BASIC_REPORT_RADAR) {
+		if (meas_rep_basic->chan_num == ieee80211_chan2ieee(ic, ic->ic_bsschan)) {
+			ic->ic_radar_detected(ic, 0);
+		}
+	}
+}
+static void
+ieee80211_input_qtnie_common(struct ieee80211_node *ni, struct ieee80211_ie_qtn *qtnie)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	ni->ni_vsp_version = IEEE80211_QTN_VSP_V_NONE;
+
+	if (IEEE80211_QTN_IE_GE_V3(qtnie) && (ic->ic_curmode >= IEEE80211_MODE_11NA)) {
+		ni->ni_implicit_ba = qtnie->qtn_ie_implicit_ba_tid_h;
+		ni->ni_implicit_ba_size = qtnie->qtn_ie_implicit_ba_size;
+		ni->ni_implicit_ba_size = ni->ni_implicit_ba_size << IEEE80211_QTN_IE_BA_SIZE_SH;
+	}
+
+	if (IEEE80211_QTN_IE_GE_V4(qtnie)) {
+		ni->ni_vsp_version = qtnie->qtn_ie_vsp_version;
+	}
+
+	if (IEEE80211_QTN_IE_GE_V5(qtnie)) {
+		ni->ni_ver_sw = ntohl(get_unaligned(&qtnie->qtn_ie_ver_sw));
+		ni->ni_ver_hw = ntohs(get_unaligned(&qtnie->qtn_ie_ver_hw));
+		ni->ni_ver_platform_id = ntohs(get_unaligned(&qtnie->qtn_ie_ver_platform_id));
+		ni->ni_ver_timestamp = ntohl(get_unaligned(&qtnie->qtn_ie_ver_timestamp));
+		ni->ni_ver_flags = ntohl(get_unaligned(&qtnie->qtn_ie_ver_flags));
+	} else {
+		ni->ni_ver_sw = 0;
+		ni->ni_ver_hw = 0;
+		ni->ni_ver_platform_id = 0;
+		ni->ni_ver_timestamp = 0;
+		ni->ni_ver_flags = 0;
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+		"[%pM] QTN IE flags 0x%x ba %u sw " DBGFMT_BYTEFLD4_P
+		" hw 0x%x plat %u ts %u ver_flags 0x%08x\n",
+		ni->ni_macaddr,
+		qtnie->qtn_ie_my_flags,
+		ni->ni_implicit_ba_size,
+		DBGFMT_BYTEFLD4_V(ni->ni_ver_sw),
+		ni->ni_ver_hw,
+		ni->ni_ver_platform_id,
+		ni->ni_ver_timestamp,
+		ni->ni_ver_flags);
+}
+
+static void
+ieee80211_input_assoc_req_qtnie(struct ieee80211_node *ni, struct ieee80211vap *vap,
+				struct ieee80211_ie_qtn *qtnie)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	uint32_t ver = 0;
+
+	if (qtnie == NULL) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+			"%s: No QTN IE in assoc req\n", __func__);
+		/* Flush any state from a previous association */
+		FREE(ni->ni_qtn_assoc_ie, M_DEVBUF);
+		ni->ni_qtn_assoc_ie = NULL;
+		return;
+	}
+
+	if (vap->iv_debug & IEEE80211_MSG_ASSOC) {
+		if (IEEE80211_QTN_IE_GE_V5(qtnie))
+			ver = 5;
+		else if (IEEE80211_QTN_IE_GE_V4(qtnie))
+			ver = 4;
+		else if (IEEE80211_QTN_IE_GE_V3(qtnie))
+			ver = 3;
+		else if (IEEE80211_QTN_IE_GE_V2(qtnie))
+			ver = 2;
+		else
+			ver = 1;
+
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+			"%s: Received QTN IE v%u in assoc req, flags=%02x %02x\n",
+			__func__, ver, qtnie->qtn_ie_flags,
+			IEEE80211_QTN_TYPE_ENVY_LEGACY(qtnie) ?  0x0 : qtnie->qtn_ie_my_flags);
+	}
+
+	ieee80211_saveie(&ni->ni_qtn_assoc_ie, (u_int8_t *)qtnie);
+
+	/*
+	 * If the station requested bridge mode but it is not advertised,
+	 * restart.  This could happen if the client is using a stale
+	 * beacon.
+	 */
+	if ((qtnie->qtn_ie_flags & IEEE80211_QTN_BRIDGEMODE) &&
+		!(vap->iv_flags_ext & IEEE80211_FEXT_WDS)) {
+
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+			"%s: Bridge mode mismatch - restarting, flags=%02x\n", __func__,
+			qtnie->qtn_ie_flags);
+		ieee80211_node_leave(ni);
+		vap->iv_stats.is_rx_assoc_capmismatch++;
+		return;
+	}
+
+	ni->ni_implicit_ba = 0;
+
+	/* Implicit BA flags for the STA */
+	if (IEEE80211_QTN_IE_GE_V2(qtnie) && (ic->ic_curmode >= IEEE80211_MODE_11NA)) {
+		ni->ni_implicit_ba_valid = 1;
+		ni->ni_implicit_ba = qtnie->qtn_ie_implicit_ba_tid;
+	}
+	ni->ni_lncb_4addr = 0;
+
+	if (IEEE80211_QTN_TYPE_ENVY_LEGACY(qtnie)) {
+		return;
+	}
+
+	/* See whether to do 4 address LNCB encapsulation */
+	if (qtnie->qtn_ie_my_flags & IEEE80211_QTN_LNCB) {
+		ni->ni_lncb_4addr = 1;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+			"Client " DBGMACVAR " supports 4 addr LNCB\n", DBGMACFMT(ni->ni_macaddr));
+	}
+	if (!(vap->iv_flags_ext & IEEE80211_FEXT_WDS)) {
+		ni->ni_lncb_4addr = 0;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+			"Client " DBGMACVAR " 4 addr flag cleared - I'm not a bridge\n",
+			DBGMACFMT(ni->ni_macaddr));
+	}
+
+	ieee80211_input_qtnie_common(ni, qtnie);
+}
+
+static void
+ieee80211_input_assoc_resp_qtnie(struct ieee80211_node *ni, struct ieee80211vap *vap,
+				struct ieee80211_ie_qtn *qtnie)
+{
+	if (qtnie == NULL)
+		return;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+		"%s: Received QTN IE v%u in assoc resp, flags=%02x %02x\n",
+		__func__,
+		IEEE80211_QTN_TYPE_ENVY(qtnie) ?  1 : 2,
+		qtnie->qtn_ie_flags,
+		IEEE80211_QTN_TYPE_ENVY_LEGACY(qtnie) ?  0x0 : qtnie->qtn_ie_my_flags);
+
+	ni->ni_lncb_4addr = 0;
+
+	if (IEEE80211_QTN_TYPE_ENVY_LEGACY(qtnie)) {
+		return;
+	}
+
+	/* LNCB with 4 addresses can only be done when the AP is a bridge. */
+	if (qtnie->qtn_ie_my_flags & IEEE80211_QTN_LNCB) {
+		ni->ni_lncb_4addr = 1;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+			"AP " DBGMACVAR " supports 4 addr LNCB\n",
+			DBGMACFMT(ni->ni_macaddr));
+	}
+	/* No 4 addr packets if not bridge mode */
+	if (!(qtnie->qtn_ie_my_flags & IEEE80211_QTN_BRIDGEMODE)) {
+		ni->ni_lncb_4addr = 0;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+			"AP " DBGMACVAR " is not a bridge - clearing LNCB flag\n",
+			DBGMACFMT(ni->ni_macaddr));
+	}
+
+	ieee80211_input_qtnie_common(ni, qtnie);
+}
+
+int
+ieee80211_input_tdls_qtnie(struct ieee80211_node *ni, struct ieee80211vap *vap,
+				struct ieee80211_ie_qtn *qtnie)
+{
+	if (qtnie == NULL) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"%s: No QTN IE in TDLS action\n", __func__);
+		/* Flush any state from a previous association */
+		FREE(ni->ni_qtn_assoc_ie, M_DEVBUF);
+		ni->ni_qtn_assoc_ie = NULL;
+		return 1;
+	}
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"%s: Received QTN IE v%u in TDLS action, flags=%02x %02x\n",
+		__func__,
+		IEEE80211_QTN_TYPE_ENVY(qtnie) ?  1 : 2,
+		qtnie->qtn_ie_flags,
+		IEEE80211_QTN_TYPE_ENVY_LEGACY(qtnie) ?  0x0 : qtnie->qtn_ie_my_flags);
+
+	ieee80211_saveie(&ni->ni_qtn_assoc_ie, (u_int8_t *)qtnie);
+
+	ni->ni_implicit_ba = 0;
+	/* Implicit BA flags for the STA */
+	if (IEEE80211_QTN_IE_GE_V2(qtnie)) {
+		ni->ni_implicit_ba_valid = 1;
+		ni->ni_implicit_ba = qtnie->qtn_ie_implicit_ba_tid;
+	}
+
+	if (IEEE80211_QTN_TYPE_ENVY_LEGACY(qtnie)) {
+		return 1;
+	}
+
+	ieee80211_input_qtnie_common(ni, qtnie);
+
+	return 0;
+}
+
+#ifdef CONFIG_QVSP
+static void
+ieee80211_input_assoc_resp_vspie(struct ieee80211vap *vap, struct ieee80211_ie_vsp *vspie,
+				uint8_t *efrm)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_ie_vsp_item *item_p = &vspie->item[0];
+	int i;
+
+	if ((vspie == NULL) || !ic->ic_vsp_configure) {
+		return;
+	}
+
+	for (i = 0; i < vspie->item_cnt; i++) {
+		if ((uint8_t *)item_p > efrm) {
+			printk(KERN_INFO "VSP: invalid count in assoc resp IE\n");
+			return;
+		}
+		item_p++;
+	}
+
+	item_p = &vspie->item[0];
+	for (i = 0; i < vspie->item_cnt; i++) {
+		ic->ic_vsp_configure(ic, item_p->index, ntohl(item_p->value));
+		item_p++;
+	}
+}
+
+static void ieee80211_recv_action_vsp(struct ieee80211_node *ni, uint8_t *frm, uint8_t *efrm)
+{
+	static const u_int8_t q_oui[3] =
+		{QTN_OUI & 0xff, (QTN_OUI >> 8) & 0xff, (QTN_OUI >> 16) & 0xff};
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_qvsp_act_header_s *qa = (struct ieee80211_qvsp_act_header_s *)frm;
+
+	if (memcmp(q_oui, qa->oui, sizeof(q_oui)) || qa->type != QVSP_ACTION_TYPE_VSP) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION,
+			"Not Quantenna VSP action frame (%02x%02x%02x %u %u)\n",
+			qa->oui[0], qa->oui[1], qa->oui[2], qa->type, qa->action);
+		return;
+	}
+
+	switch (qa->action) {
+	case QVSP_ACTION_STRM_CTRL: {
+		struct ieee80211_qvsp_act_strm_ctrl_s *qsc =
+				(struct ieee80211_qvsp_act_strm_ctrl_s *)qa;
+		struct ieee80211_qvsp_strm_id *qsci = &qsc->strm_items[0];
+		struct ieee80211_qvsp_strm_dis_attr attr;
+		int i;
+
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION, "VSP: received strm ctrl frame\n", 0);
+		if (frm >= efrm) {
+			printk("VSP: strm ctrl frame overflow");
+			return;
+		}
+		if (qsc->count == 0) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION,
+				"VSP: invalid stream count (%u)\n", qsc->count);
+			return;
+		}
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION,
+			"VSP: set state to %u for %u streams\n",
+			qsc->strm_state, qsc->count);
+
+		if (!ic->ic_vsp_strm_state_set) {
+			return;
+		}
+
+		attr.throt_policy = qsc->dis_attr.throt_policy;
+		attr.throt_rate = qsc->dis_attr.throt_rate;
+		attr.demote_rule = qsc->dis_attr.demote_rule;
+		attr.demote_state = qsc->dis_attr.demote_state;
+		for (i = 0; i < qsc->count; i++) {
+			if ((uint8_t *)qsci >= efrm) {
+				printk(KERN_WARNING "VSP: Frame overflow on input - discarding\n");
+				return;
+			}
+			ic->ic_vsp_strm_state_set(ic, qsc->strm_state, qsci, &attr);
+			qsci++;
+		}
+		break;
+	}
+	case QVSP_ACTION_VSP_CTRL: {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION,
+			"VSP: Received VSP_ACTION_VSP_CTRL frame\n", 0);
+		struct ieee80211_qvsp_act_vsp_ctrl_s *qsc =
+					(struct ieee80211_qvsp_act_vsp_ctrl_s *)qa;
+		struct ieee80211_qvsp_act_vsp_ctrl_item_s *qsci = &qsc->ctrl_items[0];
+		int i;
+
+		if (frm >= efrm) {
+			printk("VSP: ctrl frame overflow");
+			return;
+		}
+
+		if (qsc->count) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION,
+				"VSP: %u config items\n", qsc->count);
+		}
+
+		if (!ic->ic_vsp_configure) {
+			return;
+		}
+
+		for (i = 0; i < qsc->count; i++) {
+			if ((uint8_t *)qsci >= efrm) {
+				printk(KERN_WARNING "VSP: Frame overflow on input - discarding\n");
+				return;
+			}
+			ic->ic_vsp_configure(ic, ntohl(qsci->index), ntohl(qsci->value));
+			qsci++;
+		}
+		break;
+	}
+	default:
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION,
+			"VSP: Unsupported VSP action type: %d\n", qa->type);
+		break;
+	}
+}
+#endif
+
+void ieee80211_recv_action_sa_query(struct ieee80211_node *ni,
+		struct ieee80211_action *ia,
+		struct ieee80211_frame *wh,
+		u_int8_t *efrm)
+{
+	struct ieee80211_action_sa_query *sa_query = (struct ieee80211_action_sa_query *)ia;
+	uint16_t tid = ntohs(sa_query->at_tid);
+
+	switch (ia->ia_action) {
+	case IEEE80211_ACTION_W_SA_QUERY_REQ:
+		ieee80211_send_sa_query(ni, IEEE80211_ACTION_W_SA_QUERY_RESP, tid);
+	break;
+	case IEEE80211_ACTION_W_SA_QUERY_RESP:
+		if (tid != ni->ni_sa_query_tid ||
+		  (ni->ni_sa_query_timeout && time_after(jiffies, (ni->ni_sa_query_timeout + HZ)))) {
+			ni->ni_sa_query_timeout = jiffies;
+		} else {
+			ni->ni_sa_query_timeout = 0 ;
+		}
+	break;
+	default:
+		return;
+	}
+}
+
+/*
+ * Handle an HT Action frame.
+ */
+void
+ieee80211_action_ht(struct ieee80211_node *ni, struct sk_buff *skb,
+			struct ieee80211_frame *wh, int subtype,
+			struct ieee80211_action *ia, u_int8_t *frm, u_int8_t *efrm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_action_ht_txchwidth *iachwidth;
+	enum ieee80211_cwm_width  chwidth;
+	u_int8_t new_val;
+	int new_mode;
+
+	switch (ia->ia_action) {
+	case IEEE80211_ACTION_HT_TXCHWIDTH:
+		IEEE80211_VERIFY_LENGTH(efrm - frm,
+			sizeof(struct ieee80211_action_ht_txchwidth));
+
+		iachwidth = (struct ieee80211_action_ht_txchwidth *) (void*)frm;
+		chwidth = (iachwidth->at_chwidth == IEEE80211_A_HT_TXCHWIDTH_2040) ?
+				IEEE80211_CWM_WIDTH40 : IEEE80211_CWM_WIDTH20;
+
+		/* Check for channel width change */
+		if (chwidth != ni->ni_chwidth) {
+			ni->ni_newchwidth = 1;
+		}
+
+		/* update node's recommended tx channel width */
+		ni->ni_chwidth = chwidth;
+		break;
+	case IEEE80211_ACTION_HT_MIMOPWRSAVE:
+		/*
+		 * Parsing of the input SM PS action frame. This moves the station in and out
+		 * of SM Power Save and also changes the mode when enabled (dynamic, static).
+		 */
+		new_mode = IEEE80211_HTCAP_C_MIMOPWRSAVE_NONE;
+		IEEE80211_VERIFY_LENGTH(efrm - frm,
+			sizeof(struct ieee80211_action_ht_mimopowersave));
+		frm += 2; /* action type, action category */
+		new_val = *frm;
+		/* Bit 0 - enabled/disabled */
+		if (new_val & 0x1) {
+			/* Bit 1 - static/dynamic */
+			if (new_val & 0x2) {
+				new_mode = IEEE80211_HTCAP_C_MIMOPWRSAVE_DYNAMIC;
+			} else {
+				new_mode = IEEE80211_HTCAP_C_MIMOPWRSAVE_STATIC;
+			}
+		} else {
+			/* Disabled - don't care what the bit 1 value says */
+			new_mode = IEEE80211_HTCAP_C_MIMOPWRSAVE_NONE;
+		}
+		/* Change the mode with the MUC if different from current mode */
+		if (new_mode != ni->ni_htcap.pwrsave) {
+			/* Inform the MUC */
+			if (ic->ic_smps != NULL) {
+				(*ic->ic_smps)(ni, new_mode);
+			}
+			ni->ni_htcap.pwrsave = new_mode;
+		}
+		break;
+	case IEEE80211_ACTION_HT_NCBEAMFORMING:
+		IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_action_ht_bf));
+
+		/* Call the driver to do some stuff if it wants to */
+		if (ic->ic_ncbeamforming != NULL) {
+			(*ic->ic_ncbeamforming)(ni, skb);
+		}
+		break;
+	default:
+		vap->iv_stats.is_rx_mgtdiscard++;
+		break;
+	}
+}
+
+/*
+ * True if the station is probably an Intel 5100 or 5300 device.
+ */
+static inline int
+ieee80211_is_intel_old(struct ieee80211_node *ni, uint16_t peer_cap)
+{
+	return (ieee80211_node_is_intel(ni) &&
+		!(peer_cap & IEEE80211_HTCAP_C_RXSTBC));
+}
+
+/*
+ * Intel client type identification:
+ * 620x:
+ *   2x2, MCS 32, no TX STBC support, but support RX STBC: Action: send HT20 Channel Width Notification
+ * 5100/5300:
+ *   2x2(5100) or 3x3 (5300), MCS 32, no TX STBC support and no RX STBC support: Action: send HT20 Channel Width Notification,
+ *   restrict to use 2 TX Chains, and use LGI for TX.
+ * 6300:
+ *   support 3x3, MCS 32, no TX STBC and SUPPORT RX STBC: treat it as normal client, Action: none
+ */
+static void
+ieee80211_blacklist_ba(struct ieee80211_node *ni, u_int8_t tid)
+{
+	struct shared_params *params = qtn_mproc_sync_shared_params_get();
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+	u_int16_t peer_cap = IEEE80211_HTCAP_CAPABILITIES(&ni->ni_ie_htcap);
+
+	if ((ni->ni_qtn_flags & QTN_IS_BCM_NODE) && (params->iot_tweaks & QTN_IOT_BCM_NO_3SS_MCS_TWEAK)) {
+		ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] = 0x00;
+	} else if ((ni->ni_vendor == PEER_VENDOR_RLNK) && (params->iot_tweaks & QTN_IOT_RLNK_NO_3SS_MCS_TWEAK) && !(IEEE80211_NODE_IS_VHT(ni))) {
+		ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] = 0x00;
+	}
+	/* WAR: 11n LDPC in IOT mode is allowed only if:
+	        iwpriv wifi0 set_ldpc_non_qtn 1
+		Node is BRCM
+		BRCM node indicated support for LDPC  */
+	if (!(vap->iv_ht_flags & IEEE80211_HTF_LDPC_ALLOW_NON_QTN) &&
+		(ni->ni_qtn_assoc_ie == NULL) &&
+		!(ni->ni_qtn_flags & QTN_IS_BCM_NODE) &&
+		(ni->ni_htcap.cap & IEEE80211_HTCAP_C_LDPCCODING)) {
+		/* Keep LDPC disabled for non-QTN, non-BRCM devices for now */
+		ni->ni_htcap.cap &= ~IEEE80211_HTCAP_C_LDPCCODING;
+	}
+
+	if ((ni->ni_qtn_flags & QTN_IS_BCM_NODE) && (params->iot_tweaks & QTN_IOT_BCM_TWEAK)) {
+		ieee80211_note(ni->ni_vap, "TX BA rejected for BCM client %s\n",
+				ether_sprintf(ni->ni_macaddr));
+		ieee80211_send_delba(ni, tid, 0, IEEE80211_REASON_STA_NOT_USE);
+		ieee80211_node_tx_ba_set_state(ni, tid, IEEE80211_BA_BLOCKED, 0);
+		return;
+	}
+
+	/*
+	 * For Realtek devices, disable A-MSDU since we got better performance without A-MSDU,
+	 * but this condition may be changed in future
+	 */
+	if ((ni->ni_qtn_flags & QTN_IS_REALTEK_NODE) && (params->iot_tweaks & QTN_IOT_RTK_NO_AMSDU_TWEAK)) {
+		ni->ni_ba_tx[tid].flags &= ~QTN_BA_ARGS_F_AMSDU;
+	}
+
+	if (ieee80211_is_intel_old(ni, peer_cap)) {
+		/*
+		 * Old Intel devices do not Ack BBF QoS null frames. This can
+		 * cause unstability and disconnections.
+		 */
+		ni->ni_bbf_disallowed = 1;
+	}
+
+	/* Try to identify problem peers - may cause other peer stations to be blacklisted */
+	if ((((params->iot_tweaks & QTN_IOT_INTEL5100_TWEAK) && !(ni->ni_htcap.cap & IEEE80211_HTCAP_C_RXSTBC)) ||
+			((params->iot_tweaks & QTN_IOT_INTEL6200_TWEAK) && (ni->ni_htcap.cap & IEEE80211_HTCAP_C_RXSTBC))) &&
+			(ni->ni_qtn_assoc_ie == NULL) && !(ni->ni_qtn_flags & QTN_IS_BCM_NODE) &&
+			(ni->ni_htcap.mpduspacing == 5) &&
+			!(peer_cap & IEEE80211_HTCAP_C_TXSTBC) &&
+			(peer_cap & IEEE80211_HTCAP_C_SHORTGI20) &&
+			((ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM1] & 0x1) == 0x01) && /* Some Intel firmware versions support MCS 32 */
+			(ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM2] == 0) &&
+			(ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM3] == 0) &&
+			(ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM4] == 0) &&
+			(ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM5] == 0) &&
+			(ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM6] == 0)) {
+		/* Currently disabling BBF to intel. With BBF, it periodically sends delba every 30 seconds */
+		ni->ni_bbf_disallowed = 1;
+		/* TX BA does not work with Intel 5100 when running on HT40 mode */
+		if (params->iot_tweaks & QTN_IOT_INTEL_SEND_NCW_ACTION) {
+			ieee80211_note(ni->ni_vap, "TX Notify Chan Width Action to STA %s\n",
+				       ether_sprintf(ni->ni_macaddr));
+			ic->ic_send_notify_chan_width_action(ni->ni_vap, ni, 0);
+		} else if (get_hardware_revision() <= HARDWARE_REVISION_RUBY_D) {
+			ieee80211_note(ni->ni_vap, "TX BA rejected for incompatible peer %s\n",
+				       ether_sprintf(ni->ni_macaddr));
+			ieee80211_send_delba(ni, tid, 0, IEEE80211_REASON_STA_NOT_USE);
+			ieee80211_node_tx_ba_set_state(ni, tid, IEEE80211_BA_BLOCKED, 0);
+		}
+
+		if (!(peer_cap & IEEE80211_HTCAP_C_RXSTBC)) {
+			if (ni->ni_associd && (ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] == 0)) {
+				ni->ni_qtn_flags |= QTN_IS_INTEL_5100_NODE;
+			} else {
+				ni->ni_qtn_flags |= QTN_IS_INTEL_5300_NODE;
+			}
+
+			/* tell Muc only to use two 2 TX chain for Intel 5x00 client
+			 * and turn off SGI for TX to them
+			 */
+			if (params->iot_tweaks & QTN_IOT_INTEL_NOAGG2TXCHAIN_TWEAK) {
+				ni->ni_flags |= IEEE80211_NODE_2_TX_CHAINS;
+				ieee80211_note(ni->ni_vap, "STA %s is Intel %s: disable TX side of SGI and restrict to 2 TX chains\n",
+					       ether_sprintf(ni->ni_macaddr),
+					       (ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] == 0xFF) ? "5300" : "5100");
+				/* disable 5100 aggregation */
+				if ((ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] == 0) && ni->ni_associd) {
+					ieee80211_note(ni->ni_vap, "TX BA rejected for Intel 5100 %s\n",
+						       ether_sprintf(ni->ni_macaddr));
+					ieee80211_send_delba(ni, tid, 0, IEEE80211_REASON_STA_NOT_USE);
+					ieee80211_node_tx_ba_set_state(ni, tid, IEEE80211_BA_BLOCKED, 0);
+				}
+			}
+		}
+	}
+}
+
+static inline u_int16_t lower_power_of_2(u_int16_t bsize)
+{
+	if (bsize && !((bsize - 1) & bsize)) {
+		/* Already a power of 2 */
+		return bsize;
+	} else if (!bsize) {
+		return 0;
+	}
+
+	bsize--;
+	bsize |= bsize >> 1;
+	bsize |= bsize >> 2;
+	bsize |= bsize >> 4;
+	bsize |= bsize >> 8;
+	return (bsize - (bsize >> 1));
+}
+
+static __inline int ieee80211_action_ba_permitted(struct ieee80211_node *ni, u_int8_t tid)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (vap->rx_ba_decline) {
+		return 0;
+	}
+
+	if (!(vap->iv_ba_control & (1 << tid))) {
+		return 0;
+	}
+
+	if (ni->ni_ba_rx[tid].type != IEEE80211_BA_IMMEDIATE) {
+		return 0;
+	}
+
+	if ((get_hardware_revision() <= HARDWARE_REVISION_RUBY_D) &&
+			((ni->ni_qtn_flags & QTN_IS_INTEL_5300_NODE) ||
+				(ni->ni_qtn_flags & QTN_IS_INTEL_5100_NODE))) {
+		return 0;
+	}
+
+	return 1;
+}
+
+/*
+ * Identify client during BA setup
+ * For some IOT client, such as Intel6205 with some version, we cannot identify them during
+ * association. But we can identify from BA setup. So it is necessary to update the node
+ * information into MuC/AuC then.
+ */
+static void
+ieee80211_node_identify_ba(struct ieee80211com *ic, struct ieee80211_node *ni, uint8_t tid)
+{
+	int changed = 0;
+
+	if (!(ni->ni_qtn_flags & QTN_IS_INTEL_NODE) &&
+		(ieee80211_node_is_intel(ni))) {
+		changed = 1;
+		ni->ni_vendor = PEER_VENDOR_INTEL;
+	}
+
+	/* let it decide whether to use any tweaks such as disable BBF */
+	ieee80211_blacklist_ba(ni, tid);
+
+	/* update into MuC/AuC if vendor changed */
+	if (changed)
+		ic->ic_node_update(ni);
+}
+
+/*
+ * Handle a Block Acknowledgement Action frame.
+ */
+static void
+ieee80211_action_ba(struct ieee80211_node *ni, struct ieee80211_frame *wh, int subtype,
+			struct ieee80211_action *ia, u_int8_t *frm, u_int8_t *efrm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	u_int8_t tid;
+	u_int8_t initiator;
+	u_int16_t reason;
+	u_int16_t temp16;
+	struct ba_action_resp ba_req_resp;
+	struct ieee80211_action_ba_addba_resp *ba_resp;
+	struct ieee80211_action_data act;
+#ifdef CONFIG_QVSP
+	struct ieee80211_ba_throt *ba_throt;
+#endif
+
+	switch (ia->ia_action) {
+	case IEEE80211_ACTION_BA_ADDBA_REQ:
+		IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_action_ba_addba_req));
+		frm += 2; /* action type = 1 octet and category = 1 octet */
+
+		temp16 = LE_READ_2((frm + 1)); /* ba parameter field is 1 octet ahead */
+		tid = ((temp16) & IEEE80211_A_BA_TID_M) >> IEEE80211_A_BA_TID_S;
+#ifdef CONFIG_QVSP
+		ba_throt = &ni->ni_ba_rx[tid].ba_throt;
+		if (ni->ni_vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (ba_throt->throt_intv &&
+				ba_throt->last_setup_jiffies &&
+				time_before(jiffies, (ba_throt->last_setup_jiffies +
+						      msecs_to_jiffies(ba_throt->throt_intv)))) {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_VSP,
+					"VSP: discard ADDBA REQ from node %u tid %d\n",
+					IEEE80211_AID(ni->ni_associd), tid);
+				break;
+			}
+		}
+#endif
+		ni->ni_ba_rx[tid].dlg_in = *frm;
+		ni->ni_ba_rx[tid].type = (temp16 & IEEE80211_A_BA_IMMEDIATE) ? IEEE80211_BA_IMMEDIATE :
+						IEEE80211_BA_DELAYED;
+		ni->ni_ba_rx[tid].flags = (temp16 & IEEE80211_A_BA_AMSDU_SUPPORTED) ? QTN_BA_ARGS_F_AMSDU : 0;
+		ni->ni_ba_rx[tid].buff_size =
+			lower_power_of_2((((temp16) & IEEE80211_A_BA_BUFF_SIZE_M) >> IEEE80211_A_BA_BUFF_SIZE_S));
+#ifdef CONFIG_QVSP
+		ni->ni_ba_rx[tid].ba_throt.unthroted_win_size = ni->ni_ba_rx[tid].buff_size;
+#endif
+		if (ni->ni_vap->iv_opmode == IEEE80211_M_WDS && tid == IEEE80211_WDS_LINK_MAINTAIN_BA_TID &&
+				ni->ni_qtn_assoc_ie) {
+			/* For WDS, use larger buffer size for TID 0 to get more throughput */
+			if ((ni->ni_ba_rx[tid].buff_size == 0) ||
+					(ni->ni_ba_rx[tid].buff_size > IEEE80211_DEFAULT_BA_WINSIZE_H)) {
+				ni->ni_ba_rx[tid].buff_size = IEEE80211_DEFAULT_BA_WINSIZE_H;
+			}
+		} else {
+			if ((ni->ni_ba_rx[tid].buff_size == 0) ||
+					(ni->ni_ba_rx[tid].buff_size > vap->iv_max_ba_win_size)) {
+				ni->ni_ba_rx[tid].buff_size = vap->iv_max_ba_win_size;
+			}
+#ifdef CONFIG_QVSP
+			if (ni->ni_vap->iv_opmode == IEEE80211_M_HOSTAP) {
+				if (ba_throt->throt_win_size &&
+					(ba_throt->throt_win_size < ni->ni_ba_rx[tid].buff_size)) {
+					IEEE80211_DPRINTF(vap, IEEE80211_MSG_VSP,
+						"VSP: limit node %u tid %d BA winsize from %u to %u\n",
+						IEEE80211_AID(ni->ni_associd), tid, ni->ni_ba_rx[tid].buff_size,
+						ba_throt->throt_win_size);
+					ni->ni_ba_rx[tid].buff_size = ba_throt->throt_win_size;
+				}
+			}
+#endif
+		}
+
+		frm += 3; /* dialog = 1 octet and ba parameters = 2 octets */
+		temp16 = LE_READ_2((frm));
+		ni->ni_ba_rx[tid].timeout = temp16;
+		if(ni->ni_ba_rx[tid].timeout != 0) {
+			ni->ni_ba_rx[tid].timeout = 0;
+		}
+
+		frm += 2; /* timeout = 2 octets */
+		temp16 = LE_READ_2((frm));
+		ni->ni_ba_rx[tid].frag = (temp16 & IEEE80211_A_BA_FRAG_M);
+		ni->ni_ba_rx[tid].seq = (temp16 & IEEE80211_A_BA_SEQ_M) >> IEEE80211_A_BA_SEQ_S;
+
+		if (ieee80211_action_ba_permitted(ni, tid)) {
+			ni->ni_ba_rx[tid].state = IEEE80211_BA_ESTABLISHED;
+			ba_req_resp.reason = IEEE80211_STATUS_SUCCESS;
+			IEEE80211_NOTE(vap, IEEE80211_MSG_11N, ni,
+				"block ack requested by peer tid accepted: %u size %u seq %u",
+				tid, ni->ni_ba_rx[tid].buff_size, ni->ni_ba_rx[tid].seq);
+			if (ni->ni_vap->iv_opmode == IEEE80211_M_WDS) {
+		                ic->ic_pm_reason = IEEE80211_PM_LEVEL_RCVD_ADDBA_REQ;
+				ieee80211_pm_queue_work(ic);
+			}
+		} else {
+			ni->ni_ba_rx[tid].state = IEEE80211_BA_BLOCKED;
+			ba_req_resp.reason = IEEE80211_STATUS_PEER_MECHANISM_REJECT;
+			IEEE80211_NOTE(vap, IEEE80211_MSG_11N, ni,
+				"block ack requested by peer tid denied: %u size %u seq %u",
+				tid, ni->ni_ba_rx[tid].buff_size, ni->ni_ba_rx[tid].seq);
+		}
+
+#ifdef CONFIG_QVSP
+		ba_throt->last_setup_jiffies = jiffies;
+#endif
+
+		/* Call the driver to inform the MuC */
+		if (ic->ic_htaddba != NULL) {
+			(*ic->ic_htaddba)(ni, tid, 0);
+		}
+
+		/* send the response */
+		act.cat = IEEE80211_ACTION_CAT_BA;
+		act.action = IEEE80211_ACTION_BA_ADDBA_RESP;
+		ba_req_resp.type = ni->ni_ba_rx[tid].type;
+		ba_req_resp.tid = tid;
+		ba_req_resp.seq = ni->ni_ba_rx[tid].seq;
+		ba_req_resp.frag = ni->ni_ba_rx[tid].frag;
+		ba_req_resp.timeout = ni->ni_ba_rx[tid].timeout;
+		ba_req_resp.buff_size = ni->ni_ba_rx[tid].buff_size;
+
+		act.params = (void *)&ba_req_resp;
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&act);
+		break;
+	case IEEE80211_ACTION_BA_ADDBA_RESP:
+		IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_action_ba_addba_resp));
+		ba_resp  = (struct ieee80211_action_ba_addba_resp *)(void*)frm;
+		frm += 2; /* action type = 1 octet and category = 1 octet */
+
+		temp16 = LE_READ_2((frm + 3)); /* parameter field is 3 octet ahead */
+		tid = ((temp16) & IEEE80211_A_BA_TID_M) >> IEEE80211_A_BA_TID_S;
+
+		if (ni->ni_ba_tx[tid].dlg_out != (*frm)) {
+			vap->iv_stats.is_rx_mgtdiscard++;
+			break;
+		}
+
+		frm += 1; /* dialog = 1 octet */
+
+		temp16 = LE_READ_2(frm);
+		if (temp16 == 0) {
+			ieee80211_node_tx_ba_set_state(ni, tid, IEEE80211_BA_ESTABLISHED, 0);
+			if (ni->ni_vap->iv_opmode == IEEE80211_M_WDS) {
+		                ic->ic_pm_reason = IEEE80211_PM_LEVEL_RCVD_ADDBA_RESP;
+				ieee80211_pm_queue_work(ic);
+			}
+		} else {
+			IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2,
+				"block ack not allowed by peer due to %d",temp16);
+			if (temp16 == IEEE80211_STATUS_PEER_MECHANISM_REJECT) {
+				ieee80211_node_tx_ba_set_state(ni, tid, IEEE80211_BA_BLOCKED, 0);
+			} else if (ni->ni_vap->iv_ba_control & (1 << tid)) {
+				ieee80211_node_tx_ba_set_state(ni, tid, IEEE80211_BA_FAILED,
+					IEEE80211_TX_BA_REQUEST_RETRY_TIMEOUT);
+			} else {
+				ieee80211_node_tx_ba_set_state(ni, tid, IEEE80211_BA_BLOCKED, 0);
+			}
+			if (ic->ic_htaddba != NULL) {
+				(*ic->ic_htaddba)(ni, tid, 1);
+			}
+			break;
+		}
+
+		frm += 2; /* status = 2 octets */
+
+		temp16 = LE_READ_2(frm);
+		ni->ni_ba_tx[tid].type = (temp16 & IEEE80211_A_BA_IMMEDIATE)?1:0;
+		ni->ni_ba_tx[tid].buff_size =
+			lower_power_of_2((((temp16) & IEEE80211_A_BA_BUFF_SIZE_M) >> IEEE80211_A_BA_BUFF_SIZE_S));
+		ni->ni_ba_tx[tid].flags = (temp16 & IEEE80211_A_BA_AMSDU_SUPPORTED) ? QTN_BA_ARGS_F_AMSDU : 0;
+
+		if ((ni->ni_qtn_flags & QTN_IS_BCM_NODE) && !IEEE80211_NODE_IS_VHT(ni) &&
+			!ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS3]) {
+			/*
+			 * IOT WAR: 1SS/2SS BCM devices that advertise window size as 32 have trouble
+			 * handling SW retries in between next AMPDU. Based on experiments, the results
+			 * are smooth if window size is set to 16. Scoreboard size in AuC should also be
+			 * 16 for these devices. The fix is mainly targeted to BCM 2x2 11n based ipads.
+			 */
+			ni->ni_ba_tx[tid].buff_size = MIN(ni->ni_ba_tx[tid].buff_size, 16);
+		}
+
+		if (ni->ni_qtn_flags & QTN_IS_GALAXY_NOTE_4_NODE) {
+			/* Disable AMSDU for these phones */
+			ni->ni_ba_tx[tid].flags &= ~QTN_BA_ARGS_F_AMSDU;
+		}
+
+		frm += 2; /* ba parameter = 2 octets */
+		temp16 = LE_READ_2(frm);
+		ni->ni_ba_tx[tid].timeout = temp16;
+
+		ieee80211_note_mac(vap,  wh->i_addr2,
+			"block ack allowed by peer tid: %d size %d type 0x%x flags 0x%x to %d",
+			 tid, ni->ni_ba_tx[tid].buff_size,
+			 ni->ni_ba_tx[tid].type, ni->ni_ba_tx[tid].flags, ni->ni_ba_tx[tid].timeout);
+
+		ieee80211_node_identify_ba(ic, ni, tid);
+
+		/* Call the driver to do some stuff if it wants to */
+		if (ic->ic_htaddba != NULL) {
+			(*ic->ic_htaddba)(ni, tid, 1);
+		}
+		break;
+	case IEEE80211_ACTION_BA_DELBA:
+		IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_action_ba_delba));
+		frm += 2; /* action type = 1 octet and category = 1 octet */
+
+		temp16 = LE_READ_2(frm);
+		tid = MS(temp16, IEEE80211_A_BA_DELBA_TID);
+		initiator = MS(temp16, IEEE80211_A_BA_INITIATOR);
+		frm += 2;
+
+		reason = LE_READ_2(frm);
+
+		if (tid < WME_NUM_TID) {
+			ieee80211_node_ba_del(ni, tid, !initiator, reason);
+		}
+
+		break;
+	}
+}
+
+void ieee80211_parse_measure_request(struct ieee80211_node *ni,
+		struct ieee80211_frame *wh,
+		u_int8_t category,
+		u_int8_t frame_token,
+		u_int8_t *frm,
+		u_int8_t *efrm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_ie_measure_comm *meas_comm;
+	struct ieee80211_global_measure_info *meas_info = &ic->ic_measure_info;
+
+	if (sizeof(struct ieee80211_ie_measure_comm) > (efrm - frm)) {
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, "mgt", "%s", "no enough data for measurement common field");
+		vap->iv_stats.is_rx_elem_toosmall++;
+		return;
+	}
+	meas_comm = (struct ieee80211_ie_measure_comm*)frm;
+	frm += sizeof(*meas_comm);
+
+	if (meas_comm->id != IEEE80211_ELEMID_MEASREQ) {
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+				"mgt", "%s", "measurement request ID mismatch\n");
+		vap->iv_stats.is_rx_action++;
+		return;
+	}
+
+	switch (meas_comm->type) {
+	case IEEE80211_CCA_MEASTYPE_BASIC:
+	{
+		struct ieee80211_ie_measreq *meas_request;
+
+		if (sizeof(struct ieee80211_ie_measreq) > (efrm - frm)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+					"mgmt", "%s", "no enough data for measuremet request ie common field\n");
+			vap->iv_stats.is_rx_elem_toosmall++;
+			return;
+		}
+		meas_request = (struct ieee80211_ie_measreq *)frm;
+		frm += sizeof(struct ieee80211_ie_measreq);
+
+		/* check current state */
+		if (meas_info->status != MEAS_STATUS_IDLE) {
+			ieee80211_action_measurement_report_fail(ni,
+				meas_comm->type,
+				IEEE80211_CCA_REPMODE_REFUSE,
+				frame_token,
+				meas_comm->token);
+				return;
+		}
+
+		/* send autonomous response if enable=1 report=1, which is not against the standard */
+		if ((meas_comm->mode & IEEE80211_CCA_REQMODE_ENABLE) &&
+				(meas_comm->mode & IEEE80211_CCA_REQMODE_REPORT))
+			meas_info->frame_token = 0;
+		else
+			meas_info->frame_token = frame_token;
+
+		meas_info->ni = ni;
+		meas_info->type = meas_comm->type;
+
+		meas_info->param.basic.channel = meas_request->chan_num;
+		*((u_int32_t *)&meas_info->param.basic.tsf) = BE_READ_4((u_int8_t *)&meas_request->start_tsf);
+		*((u_int32_t *)&meas_info->param.basic.tsf + 1) = BE_READ_4((u_int8_t *)&meas_request->start_tsf + 4);
+		meas_info->param.basic.duration_tu = ntohs(meas_request->duration_tu);
+
+		if (ieee80211_action_trigger_measurement(ic) != 0) {
+			ieee80211_action_measurement_report_fail(ni,
+				meas_comm->type,
+				IEEE80211_CCA_REPMODE_REFUSE,
+				frame_token,
+				meas_comm->token);
+		}
+		break;
+	}
+	case IEEE80211_CCA_MEASTYPE_CCA:
+	{
+		/* Quantenna CCA Extension */
+		if (category == IEEE80211_ACTION_CAT_RM) {
+			if (ic->ic_scs.scs_stats_on) {
+				struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+				struct qtn_scs_info_set *scs_info_lh = sp->scs_info_lhost;
+				uint64_t tsf = 0;
+				uint16_t cca_try;
+				uint16_t cca_intf;
+
+				ic->ic_get_tsf(&tsf);
+
+				cca_try = (uint16_t)scs_info_lh->scs_info[scs_info_lh->valid_index].cca_try;
+				if (cca_try == 0) {
+					break;
+				}
+
+				cca_intf = (uint16_t)scs_info_lh->scs_info[scs_info_lh->valid_index].cca_interference;
+				/* scale before sending */
+				cca_intf = cca_intf * IEEE80211_SCS_CCA_INTF_SCALE / cca_try;
+				ieee80211_send_action_cca_report(ni, frame_token, cca_intf,
+						tsf, cca_try,
+						ic->ic_scs.scs_sp_err_smthed,
+						ic->ic_scs.scs_lp_err_smthed,
+						0, NULL, 0);	/*TODO: Do we need to send others time?  */
+				ieee80211_send_action_dfs_report(ni);
+			}
+		} else {
+			struct ieee80211_ie_measreq *meas_request;
+
+			if (sizeof(struct ieee80211_ie_measreq) > (efrm - frm)) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+						"mgmt", "%s", "no enough data for measuremet request ie common field\n");
+				vap->iv_stats.is_rx_elem_toosmall++;
+				return;
+			}
+			meas_request = (struct ieee80211_ie_measreq *)frm;
+			frm += sizeof(struct ieee80211_ie_measreq);
+
+			/* check current state */
+			if (meas_info->status != MEAS_STATUS_IDLE) {
+				ieee80211_action_measurement_report_fail(ni, meas_comm->type,
+					IEEE80211_CCA_REPMODE_REFUSE, frame_token, meas_comm->token);
+					return;
+			}
+
+			/* send autonomous response if enable=1 report=1, which is not against the standard */
+			if ((meas_comm->mode & IEEE80211_CCA_REQMODE_ENABLE) &&
+					(meas_comm->mode & IEEE80211_CCA_REQMODE_REPORT))
+				meas_info->frame_token = 0;
+			else
+				meas_info->frame_token = frame_token;
+
+			meas_info->ni = ni;
+			meas_info->type = meas_comm->type;
+
+			meas_info->param.cca.channel = meas_request->chan_num;
+			*((u_int32_t *)&meas_info->param.cca.tsf) = BE_READ_4((u_int8_t *)&meas_request->start_tsf);
+			*((u_int32_t *)&meas_info->param.cca.tsf + 1) = BE_READ_4((u_int8_t *)&meas_request->start_tsf + 4);
+			meas_info->param.cca.duration_tu = ntohs(meas_request->duration_tu);
+
+			if (ieee80211_action_trigger_measurement(ic) != 0) {
+				ieee80211_action_measurement_report_fail(ni,
+					meas_comm->type,
+					IEEE80211_CCA_REPMODE_REFUSE,
+					frame_token,
+					meas_comm->token);
+			}
+		}
+		break;
+	}
+	case IEEE80211_CCA_MEASTYPE_RPI:
+	{
+		struct ieee80211_ie_measreq *meas_request;
+
+		if (sizeof(struct ieee80211_ie_measreq) > (efrm - frm)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+					"mgmt", "%s", "no enough data for measuremet request ie common field\n");
+			vap->iv_stats.is_rx_elem_toosmall++;
+			return;
+		}
+		meas_request = (struct ieee80211_ie_measreq *)frm;
+		frm += sizeof(struct ieee80211_ie_measreq);
+
+		/* check current state */
+		if (meas_info->status != MEAS_STATUS_IDLE) {
+			ieee80211_action_measurement_report_fail(ni,
+				meas_comm->type,
+				IEEE80211_CCA_REPMODE_REFUSE,
+				frame_token,
+				meas_comm->token);
+				return;
+		}
+
+		/* send autonomous response if enable=1 report=1, which is not against the standard */
+		if ((meas_comm->mode & IEEE80211_CCA_REQMODE_ENABLE) &&
+				(meas_comm->mode & IEEE80211_CCA_REQMODE_REPORT))
+			meas_info->frame_token = 0;
+		else
+			meas_info->frame_token = frame_token;
+
+		meas_info->ni = ni;
+		meas_info->type = meas_comm->type;
+
+		meas_info->param.rpi.channel = meas_request->chan_num;
+		*((u_int32_t *)&meas_info->param.rpi.tsf) = BE_READ_4((u_int8_t *)&meas_request->start_tsf);
+		*((u_int32_t *)&meas_info->param.rpi.tsf + 1) = BE_READ_4((u_int8_t *)&meas_request->start_tsf + 4);
+		meas_info->param.rpi.duration_tu = ntohs(meas_request->duration_tu);
+
+		if (ieee80211_action_trigger_measurement(ic) != 0) {
+			ieee80211_action_measurement_report_fail(ni,
+				meas_comm->type,
+				IEEE80211_CCA_REPMODE_REFUSE,
+				frame_token,
+				meas_comm->token);
+		}
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_STA:
+	{
+		struct ieee80211_ie_measreq_sta_stat *sta_stats_request;
+		u_int8_t duration;
+		u_int8_t group_id;
+		u_int8_t measure_token;
+		ieee80211_11k_sub_element_head se_head;
+		ieee80211_11k_sub_element *p_se;
+
+		measure_token = meas_comm->token;
+		/* sta statistics request field */
+		if (sizeof(struct ieee80211_ie_measreq_sta_stat) > (efrm - frm)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+					"mgmt", "%s", "no enough data for sta stats request field\n");
+			vap->iv_stats.is_rx_action++;
+			return;
+		}
+		sta_stats_request = (struct ieee80211_ie_measreq_sta_stat*)frm;
+		frm += sizeof(struct ieee80211_ie_measreq_sta_stat);
+
+		if (memcmp(vap->iv_myaddr, sta_stats_request->peer_mac, IEEE80211_ADDR_LEN)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+				wh, "mgt", "%s-mac:%s", "Peer mac address un-match.",
+				ether_sprintf(sta_stats_request->peer_mac));
+			vap->iv_stats.is_rx_action++;
+			return;
+		}
+
+		duration = sta_stats_request->duration_tu;
+		group_id = sta_stats_request->group_id;
+
+		SLIST_INIT(&se_head);
+		/* sub element */
+		while (frm < efrm) {
+			switch (frm[0]) {
+			case IEEE80211_ELEMID_VENDOR:
+			{
+				struct ieee80211_ie_qtn_rm_measure_sta *qtn_ie;
+				u_int8_t sequence;
+				u_int32_t vendor_flags;
+				int32_t tlv_cnt, i;
+				u_int8_t *tlv_frm;
+
+				qtn_ie = (struct ieee80211_ie_qtn_rm_measure_sta *)frm;
+				if (!isqtnmrespoui(frm)) {
+					IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+						wh, "RM", "%s: id %u type %u OUI %02x:%02x:%02x",
+						"specific IE incorrect",
+						qtn_ie->id, qtn_ie->type, qtn_ie->qtn_ie_oui[0],
+						qtn_ie->qtn_ie_oui[1], qtn_ie->qtn_ie_oui[2]);
+					vap->iv_stats.is_rx_action++;
+					break;
+				}
+
+				sequence = qtn_ie->seq;
+				if (qtn_ie->type == QTN_OUI_RM_ALL) {
+					vendor_flags = BIT(RM_QTN_MAX + 1) - 1;
+				} else {
+					if ((qtn_ie->len != (qtn_ie->data[0] * 2)
+							+ sizeof(struct ieee80211_ie_qtn_rm_measure_sta) - 1)){
+						IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+							wh, "RM", "%s: id %u type %u len %u cnt %u",
+							"specific IE length-element count mis-match",
+							qtn_ie->id, qtn_ie->type, qtn_ie->len, qtn_ie->data[0]);
+						vap->iv_stats.is_rx_action++;
+						break;
+					}
+
+					tlv_cnt = qtn_ie->data[0];
+					tlv_frm = (u_int8_t *)&qtn_ie->data[1];
+					vendor_flags = 0;
+
+					for (i = 0; i < tlv_cnt; i++) {
+						switch (tlv_frm[0]) {
+						case RM_QTN_TX_STATS:
+						case RM_QTN_RX_STATS:
+						case RM_QTN_MAX_QUEUED:
+						case RM_QTN_LINK_QUALITY:
+						case RM_QTN_RSSI_DBM:
+						case RM_QTN_BANDWIDTH:
+						case RM_QTN_SNR:
+						case RM_QTN_TX_PHY_RATE:
+						case RM_QTN_RX_PHY_RATE:
+						case RM_QTN_CCA:
+						case RM_QTN_BR_IP:
+						case RM_QTN_RSSI:
+						case RM_QTN_HW_NOISE:
+						case RM_QTN_SOC_MACADDR:
+						case RM_QTN_SOC_IPADDR:
+						case RM_QTN_RESET_CNTS:
+						case RM_QTN_RESET_QUEUED:
+						{
+							vendor_flags |= BIT(tlv_frm[0]);
+							tlv_frm += 2;
+							break;
+						}
+						default:
+							tlv_frm += 2;
+							break;
+						}
+					}
+				}
+
+				p_se = (ieee80211_11k_sub_element *)kmalloc(sizeof(*p_se) + sizeof(struct stastats_subele_vendor), GFP_ATOMIC);
+				if (p_se != NULL) {
+					struct stastats_subele_vendor *vendor;
+
+					p_se->sub_id = IEEE80211_ELEMID_VENDOR;
+					vendor = (struct stastats_subele_vendor *)p_se->data;
+					vendor->flags = vendor_flags;
+					vendor->sequence = sequence;
+					SLIST_INSERT_HEAD(&se_head, p_se, next);
+				}
+				break;
+			}
+			default:
+				break;
+			}
+			frm += 2 + frm[1];
+		}
+
+		ieee80211_send_rm_rep_stastats(ni,
+				0,
+				frame_token,
+				meas_comm->token,
+				sta_stats_request->group_id,
+				ntohs(sta_stats_request->duration_tu),
+				(void *)&se_head);
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_CH_LOAD:
+	{
+		struct ieee80211_ie_measreq_chan_load *cl;
+
+		if (sizeof(struct ieee80211_ie_measreq_chan_load) > (efrm - frm)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+					"mgmt", "%s", "no enough data for channel load request field\n");
+			vap->iv_stats.is_rx_action++;
+			return;
+		}
+		cl = (struct ieee80211_ie_measreq_chan_load *)frm;
+		frm += sizeof(struct ieee80211_ie_measreq_chan_load);
+
+		/* check current state */
+		if (meas_info->status != MEAS_STATUS_IDLE) {
+			ieee80211_action_measurement_report_fail(ni,
+				meas_comm->type,
+				IEEE80211_CCA_REPMODE_REFUSE,
+				frame_token,
+				meas_comm->token);
+				return;
+		}
+
+		/* send autonomous response if enable=1 report=1, which is not against the standard */
+		if ((meas_comm->mode & IEEE80211_CCA_REQMODE_ENABLE) &&
+				(meas_comm->mode & IEEE80211_CCA_REQMODE_REPORT))
+			meas_info->frame_token = 0;
+		else
+			meas_info->frame_token = frame_token;
+
+		meas_info->ni = ni;
+		meas_info->type = meas_comm->type;
+
+		meas_info->param.chan_load.op_class = cl->operating_class;
+		meas_info->param.chan_load.channel = cl->channel_num;
+		meas_info->param.chan_load.duration_tu = ntohs(cl->duration_tu);
+		meas_info->param.chan_load.upper_interval = ntohs(cl->random_interval_tu);
+
+		if (ieee80211_action_trigger_measurement(ic) != 0) {
+			ieee80211_action_measurement_report_fail(ni,
+				meas_comm->type,
+				IEEE80211_CCA_REPMODE_REFUSE,
+				frame_token,
+				meas_comm->token);
+		}
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_NOISE:
+	{
+		struct ieee80211_ie_measreq_noise_his *nh;
+
+		if (sizeof(struct ieee80211_ie_measreq_noise_his) > (efrm - frm)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+					"mgmt", "%s", "no enough data for noise hisgram request field\n");
+			vap->iv_stats.is_rx_action++;
+			return;
+		}
+		nh = (struct ieee80211_ie_measreq_noise_his *)frm;
+		frm += sizeof(struct ieee80211_ie_measreq_noise_his);
+
+		/* check current state */
+		if (meas_info->status != MEAS_STATUS_IDLE) {
+			ieee80211_action_measurement_report_fail(ni,
+				meas_comm->type,
+				IEEE80211_CCA_REPMODE_REFUSE,
+				frame_token,
+				meas_comm->token);
+				return;
+		}
+
+		/* send autonomous response if enable=1 report=1, which is not against the standard */
+		if ((meas_comm->mode & IEEE80211_CCA_REQMODE_ENABLE) &&
+				(meas_comm->mode & IEEE80211_CCA_REQMODE_REPORT))
+			meas_info->frame_token = 0;
+		else
+			meas_info->frame_token = frame_token;
+
+		meas_info->ni = ni;
+		meas_info->type = meas_comm->type;
+
+		meas_info->param.noise_his.op_class = nh->operating_class;
+		meas_info->param.noise_his.duration_tu = ntohs(nh->duration_tu);
+		meas_info->param.noise_his.upper_interval= ntohs(nh->random_interval_tu);
+		meas_info->param.noise_his.channel = nh->channel_num;
+
+		if (ieee80211_action_trigger_measurement(ic) != 0) {
+			ieee80211_action_measurement_report_fail(ni,
+				meas_comm->type,
+				IEEE80211_CCA_REPMODE_REFUSE,
+				frame_token,
+				meas_comm->token);
+		}
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_BEACON:
+	{
+		struct ieee80211_ie_measreq_beacon *beacon;
+		u_int8_t wildcard_bssid[IEEE80211_ADDR_LEN] = {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
+		u_int8_t parent_tsf[4] = {0};
+
+		if (sizeof(struct ieee80211_ie_measreq_beacon) > (efrm - frm)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+					"mgmt", "%s", "no enough data for beacon report request field\n");
+			vap->iv_stats.is_rx_action++;
+			return;
+		}
+		beacon = (struct ieee80211_ie_measreq_beacon *)frm;
+		frm += sizeof(struct ieee80211_ie_measreq_beacon);
+
+		/*
+		 * as described in 10.11.9.1:
+		 * if the STA has no beacon information avaiilable then the STA may
+		 * either refuse the request or send an empty Beacon Report.
+		 * here we choose to refuse this request if there's no candidates
+		 * */
+		if (memcmp(beacon->bssid, wildcard_bssid, IEEE80211_ADDR_LEN) == 0) {
+			/* if request wildcard bssid, at least one BSS could be reported */
+			ieee80211_send_rm_rep_beacon(ni, 0, frame_token,
+					meas_comm->token, beacon->operating_class,
+					beacon->channel_num, beacon->duration_tu,
+					0, 0, 0, ni->ni_vap->iv_bss->ni_bssid, 255, parent_tsf);
+		} else {
+			ieee80211_action_measurement_report_fail(ni, meas_comm->type,
+					IEEE80211_CCA_REPMODE_REFUSE,
+					frame_token, meas_comm->token);
+		}
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_FRAME:
+	{
+		struct ieee80211_ie_measreq_frame *frame;
+		struct frame_report_subele_frame_count *entry;
+		ieee80211_11k_sub_element_head se_head;
+		ieee80211_11k_sub_element *p_se;
+
+		if (sizeof(struct ieee80211_ie_measreq_frame) > (efrm - frm)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+					"mgmt", "%s", "no enough data for frame report request field\n");
+			vap->iv_stats.is_rx_action++;
+			return;
+		}
+		frame = (struct ieee80211_ie_measreq_frame *)frm;
+		frm += sizeof(struct ieee80211_ie_measreq_frame);
+
+		/* currently only frame count report is required */
+		if (frame->frame_request_type != FRAME_COUNT_REPORT) {
+			ieee80211_action_measurement_report_fail(ni,
+					meas_comm->type, IEEE80211_CCA_REPMODE_REFUSE,
+					frame_token, meas_comm->token);
+			break;
+		}
+
+		SLIST_INIT(&se_head);
+		p_se = (ieee80211_11k_sub_element *)kmalloc(sizeof(ieee80211_11k_sub_element) +
+				sizeof(struct frame_report_subele_frame_count), GFP_ATOMIC);
+		if (p_se != NULL) {
+			p_se->sub_id = IEEE80211_FRAME_REPORT_SUBELE_FRAME_COUNT_REPORT;
+			entry = (struct frame_report_subele_frame_count *)p_se->data;
+			if (ni->ni_vap->iv_opmode == IEEE80211_M_HOSTAP) {
+				memcpy(entry->ta, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+			} else {
+				memcpy(entry->ta, ni->ni_vap->iv_bss->ni_macaddr, IEEE80211_ADDR_LEN);
+			}
+			memcpy(entry->bssid, ni->ni_vap->iv_bss->ni_macaddr, IEEE80211_ADDR_LEN);
+			entry->phy_type = 0;
+			entry->avg_rcpi = 0;
+			entry->last_rsni = 0;
+			entry->last_rcpi = 0;
+			entry->antenna_id = 255;
+			entry->frame_count = 1;
+			SLIST_INSERT_HEAD(&se_head, p_se, next);
+		}
+
+		ieee80211_send_rm_rep_frame(ni, 0,
+				frame_token, meas_comm->token,
+				0, frame->channel_num,
+				frame->duration_tu,
+				(void *)&se_head);
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_CATEGORY:
+	{
+		struct ieee80211_ie_measreq_trans_stream_cat *cat;
+		struct ieee80211_meas_report_ctrl ctrl;
+		struct ieee80211_action_data action_data;
+		int32_t i;
+
+		if (sizeof(struct ieee80211_ie_measreq_trans_stream_cat) > (efrm - frm)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+					"mgmt", "%s", "no enough data for transmit stream/category request field\n");
+			vap->iv_stats.is_rx_action++;
+			return;
+		}
+		cat = (struct ieee80211_ie_measreq_trans_stream_cat *)frm;
+		frm += sizeof(struct ieee80211_ie_measreq_trans_stream_cat);
+
+		memset(&ctrl, 0, sizeof(ctrl));
+		ctrl.meas_type = IEEE80211_RM_MEASTYPE_CATEGORY;
+		ctrl.report_mode = 0;
+		ctrl.token = frame_token;
+		ctrl.meas_token = meas_comm->token;
+		ctrl.autonomous = 0;
+
+		ctrl.u.tran_stream_cat.duration_tu = cat->duration_tu;
+		memcpy(ctrl.u.tran_stream_cat.peer_sta, cat->peer_sta_addr, IEEE80211_ADDR_LEN);
+		ctrl.u.tran_stream_cat.tid = cat->tid;
+		ctrl.u.tran_stream_cat.reason = 0;
+		ctrl.u.tran_stream_cat.tran_msdu_cnt = 0;
+		ctrl.u.tran_stream_cat.msdu_discard_cnt = 0;
+		ctrl.u.tran_stream_cat.msdu_fail_cnt = 0;
+		ctrl.u.tran_stream_cat.msdu_mul_retry_cnt = 0;
+		ctrl.u.tran_stream_cat.qos_lost_cnt = 0;
+		ctrl.u.tran_stream_cat.avg_queue_delay = 0;
+		ctrl.u.tran_stream_cat.avg_tran_delay = 0;
+		ctrl.u.tran_stream_cat.bin0_range = cat->bin0_range;
+
+		for (i = 0; i < ARRAY_SIZE(ctrl.u.tran_stream_cat.bins); i++)
+			ctrl.u.tran_stream_cat.bins[i] = 0;
+
+		action_data.cat = IEEE80211_ACTION_CAT_RM;
+		action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+		action_data.params = &ctrl;
+
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_MUL_DIAG:
+	{
+		struct ieee80211_ie_measreq_multicast_diag *mul_diag;
+
+		if (sizeof(struct ieee80211_ie_measreq_multicast_diag) > (efrm - frm)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+					"mgmt", "%s", "no enough data for multicast diagnostics request field\n");
+			vap->iv_stats.is_rx_action++;
+			return;
+		}
+		mul_diag = (struct ieee80211_ie_measreq_multicast_diag *)frm;
+		frm += sizeof(struct ieee80211_ie_measreq_multicast_diag);
+
+		ieee80211_send_rm_rep_multicast_diag(ni, 0, frame_token, meas_comm->token,
+				mul_diag->duration_tu, mul_diag->group_mac_addr,
+				0, 0, 0, 0, 0);
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_LCI:
+	case IEEE80211_RM_MEASTYPE_LOC_CIVIC:
+	case IEEE80211_RM_MEASTYPE_LOC_ID:
+		ieee80211_action_measurement_report_fail(ni,
+				meas_comm->type, IEEE80211_CCA_REPMODE_REFUSE,
+				frame_token, meas_comm->token);
+		break;
+	default:
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+				"mgmt", "%s", "unsupported type\n");
+		vap->iv_stats.is_rx_action++;
+		break;
+	}
+}
+
+/* Quantenna CCA Extension */
+void ieee80211_parse_qtn_measure_report(struct ieee80211_node *ni,
+		struct ieee80211_ie_measure_comm *meas_comm,
+		u_int8_t *efrm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct cca_rm_rep_data *cca_report;
+	uint8_t *qtn_extra_ie;
+	uint8_t *qtn_extra_ie_end;
+	struct ieee80211_ie_qtn_scs *qtn_scs_ie;
+
+	if (!ic->ic_scs.scs_stats_on)
+		return;
+
+	cca_report = (struct cca_rm_rep_data*)meas_comm->data;
+	ni->ni_recent_cca_intf = (cca_report->busy_frac * IEEE80211_SCS_CCA_INTF_SCALE / IEEE80211_11K_CCA_INTF_SCALE);
+	ni->ni_recent_cca_intf_jiffies = jiffies;
+	qtn_scs_ie = (struct ieee80211_ie_qtn_scs*)(cca_report + 1);
+	if (((efrm - (uint8_t*)qtn_scs_ie) >= QTN_SCS_IE_LEN_MIN) &&
+				(qtn_scs_ie->id == IEEE80211_ELEMID_VENDOR) &&
+				(qtn_scs_ie->len >= QTN_SCS_IE_LEN_MIN - 2) &&
+				(is_qtn_scs_oui((uint8_t*)qtn_scs_ie))) {
+		if ((qtn_scs_ie->scs_ie_type == QTN_SCS_IE_TYPE_STA_INTF_RPT) &&
+					(qtn_scs_ie->len >= (QTN_SCS_IE_STA_INTF_RPT_LEN_MIN - 2))) {
+			ic->ic_sta_cc = 1;
+			ni->ni_recent_sp_fail = le32toh(qtn_scs_ie->u.cca_info.sp_fail);
+			ni->ni_recent_lp_fail = le32toh(qtn_scs_ie->u.cca_info.lp_fail);
+			ni->ni_recent_others_time = le16toh(qtn_scs_ie->u.cca_info.others_time);
+			if (ni->ni_recent_others_time > ni->ni_recent_others_time_smth) {
+				ni->ni_recent_others_time_smth = ni->ni_recent_others_time;
+			} else {
+				ni->ni_recent_others_time_smth = IEEE80211_SCS_SMOOTH(
+							ni->ni_recent_others_time_smth,
+							ni->ni_recent_others_time,
+							IEEE80211_SCS_SMTH_RBS_TIME);
+			}
+
+			if ((qtn_scs_ie->extra_ie_len != 0) &&
+					((qtn_scs_ie->extra_ie_len + sizeof(struct ieee80211_ie_qtn_scs)) <=
+						(qtn_scs_ie->len + 2))) {
+				qtn_extra_ie = qtn_scs_ie->extra_ie;
+				qtn_extra_ie_end = qtn_scs_ie->extra_ie + qtn_scs_ie->extra_ie_len;
+
+				while (qtn_extra_ie < qtn_extra_ie_end) {
+					ieee80211_scs_update_tdls_stats(ic, (struct ieee80211_tdls_scs_stats *)qtn_extra_ie);
+					qtn_extra_ie += sizeof(struct ieee80211_tdls_scs_stats);
+				}
+			}
+		} else if ((qtn_scs_ie->scs_ie_type == QTN_SCS_IE_TYPE_STA_DFS_RPT) &&
+					(qtn_scs_ie->len >= (QTN_SCS_IE_STA_DFS_RPT_LEN_MIN - 2))) {
+			/* let AP ignore the interference from DFS report */
+			ni->ni_recent_cca_intf = SCS_CCA_INTF_INVALID;
+			ni->ni_qtn_dfs_enabled = le16toh(qtn_scs_ie->u.dfs_info.dfs_enabled);
+			ni->ni_txpower = le16toh(qtn_scs_ie->u.dfs_info.max_txpower);
+		} else if ((qtn_scs_ie->scs_ie_type == QTN_SCS_IE_TYPE_STA_FAT_RPT) &&
+					(qtn_scs_ie->len >= (QTN_SCS_IE_STA_FAT_RPT_LEN_MIN - 2))) {
+			ni->ni_recent_cca_idle = le16toh(qtn_scs_ie->u.fat_info.free_airtime);
+		}
+	}
+
+	switch (qtn_scs_ie->scs_ie_type) {
+	case QTN_SCS_IE_TYPE_STA_INTF_RPT:
+		SCSDBG(SCSLOG_NOTICE, "CCA: SCS IE type %u: rx cca_intf %u "
+					"with busy_fraction %u report from STA 0x%x, "
+					"pmbl_error=%u %u "
+					"others_time=%u "
+					"others_time_smth=%u\n",
+					qtn_scs_ie->scs_ie_type, ni->ni_recent_cca_intf,
+					cca_report->busy_frac, ni->ni_associd,
+					ni->ni_recent_sp_fail, ni->ni_recent_lp_fail,
+					ni->ni_recent_others_time, ni->ni_recent_others_time_smth);
+		break;
+	case QTN_SCS_IE_TYPE_STA_DFS_RPT:
+		SCSDBG(SCSLOG_NOTICE, "DFS: SCS IE type %u, DFS %s, TX power %d, report from STA 0x%x\n",
+					qtn_scs_ie->scs_ie_type,
+					ni->ni_qtn_dfs_enabled ? "enabled" : "disabled",
+					ni->ni_txpower,
+					ni->ni_associd);
+		break;
+	case QTN_SCS_IE_TYPE_STA_FAT_RPT:
+		SCSDBG(SCSLOG_NOTICE, "CCA: SCS IE type %u: rx cca_intf %u "
+					"with busy_fraction %u report from STA 0x%x, "
+					"cca_idle=%d\n",
+					qtn_scs_ie->scs_ie_type, ni->ni_recent_cca_intf,
+					cca_report->busy_frac, ni->ni_associd,
+					ni->ni_recent_cca_idle);
+		break;
+	default:
+		SCSDBG(SCSLOG_NOTICE, "CCA: received cca_intf %u with busy_fraction %u report from STA 0x%x, "
+					"scs IE not present or invalid (%d)\n",
+					ni->ni_recent_cca_intf, cca_report->busy_frac,
+					ni->ni_associd, qtn_scs_ie->scs_ie_type);
+		break;
+	}
+}
+
+void ieee80211_parse_measure_report(struct ieee80211_node *ni,
+		struct ieee80211_frame *wh,
+		u_int8_t category,
+		u_int8_t frame_token,
+		u_int8_t *frm,
+		u_int8_t *efrm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_ie_measure_comm *meas_comm;
+
+#define QTN_MREPORT_SUBIE_LEN(dlen)	\
+			((uint8_t)(offsetof(struct ieee80211_ie_qtn_rm_measure_sta, data) - \
+			offsetof(struct ieee80211_ie_qtn_rm_measure_sta, qtn_ie_oui) + (dlen)))
+
+	if (sizeof(struct ieee80211_ie_measure_comm) > (efrm - frm)) {
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, "mgt", "%s", "no enough data for measurement common field");
+		/* this counter is used incorrectly throughout the module, but not currently in use */
+		vap->iv_stats.is_rx_action++;
+		return;
+	}
+	meas_comm = (struct ieee80211_ie_measure_comm*)frm;
+	frm += sizeof(*meas_comm);
+
+	if (meas_comm->id != IEEE80211_ELEMID_MEASREP) {
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+				"mgt", "%s", "measurement report ID mismatch\n");
+		vap->iv_stats.is_rx_action++;
+		return;
+	}
+
+	/* autonomous report frame should be handled seperately */
+	if (meas_comm->token == 0) {
+		switch (meas_comm->type) {
+		case IEEE80211_CCA_MEASTYPE_BASIC:
+		{
+			struct ieee80211_ie_measrep_basic *meas_rep_basic;
+
+			if (meas_comm->mode == 0) {
+				if (sizeof(struct ieee80211_ie_measrep_basic) > (efrm - frm)) {
+					IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+						wh, "mgt", "%s", "no enough data for measurement report field");
+					vap->iv_stats.is_rx_action++;
+					return;
+				}
+				meas_rep_basic = (struct ieee80211_ie_measrep_basic *)(frm);
+				frm += sizeof(struct ieee80211_ie_measrep_basic);
+				ieee80211_recv_meas_basic_report(ni, meas_rep_basic);
+			}
+			break;
+		}
+		case IEEE80211_CCA_MEASTYPE_CCA: {
+			if (category == IEEE80211_ACTION_CAT_RM)
+				ieee80211_parse_qtn_measure_report(ni, meas_comm, efrm);
+			break;
+		}
+		default:
+			printk("unsupport autonomous report, type=%d\n", meas_comm->type);
+			break;
+		}
+
+		return;
+	}
+
+	ni->ni_meas_info.ni_meas_rep_mode = meas_comm->mode;
+	ni->ni_meas_info.ni_meas_rep_time = jiffies;
+
+	/* normal measurement report */
+	switch (meas_comm->type) {
+	case IEEE80211_CCA_MEASTYPE_BASIC:
+	{
+		struct ieee80211_ie_measrep_basic *meas_rep_basic;
+
+		if (meas_comm->mode == 0) {
+			if (sizeof(struct ieee80211_ie_measrep_basic) > (efrm - frm)) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "mgt", "%s", "no enough data for basic report field");
+				vap->iv_stats.is_rx_elem_toosmall++;
+				return;
+			}
+			meas_rep_basic = (struct ieee80211_ie_measrep_basic *)(frm);
+			frm += sizeof(struct ieee80211_ie_measrep_basic);
+
+			ni->ni_meas_info.rep.basic = meas_rep_basic->basic_report;
+
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DOTH,
+				"receive MEAS Report:type=basic, report_mode=%u, channel=%d, tsf=%llu, duration=%u, data=%u\n",
+					ni->ni_meas_info.ni_meas_rep_mode,
+					meas_rep_basic->chan_num,
+					meas_rep_basic->start_tsf,
+					meas_rep_basic->duration_tu,
+					ni->ni_meas_info.rep.basic);
+		}
+		break;
+	}
+	case IEEE80211_CCA_MEASTYPE_CCA:
+	{
+		struct ieee80211_ie_measrep_cca *meas_rep_cca;
+
+		if (meas_comm->mode == 0) {
+			if (sizeof(struct ieee80211_ie_measrep_cca) > (efrm - frm)) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "mgt", "%s", "no enough data for cca report field");
+				vap->iv_stats.is_rx_elem_toosmall++;
+				return;
+			}
+			meas_rep_cca = (struct ieee80211_ie_measrep_cca *)(frm);
+			frm += sizeof(struct ieee80211_ie_measrep_cca);
+
+			ni->ni_meas_info.rep.cca = meas_rep_cca->cca_report;
+
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DOTH,
+				"receive MEAS Report:type=cca, report_mode=%u, channel=%d, tsf=%llu, duration=%u, data=%u\n",
+					ni->ni_meas_info.ni_meas_rep_mode,
+					meas_rep_cca->chan_num,
+					meas_rep_cca->start_tsf,
+					meas_rep_cca->duration_tu,
+					ni->ni_meas_info.rep.cca);
+		}
+		break;
+	}
+	case IEEE80211_CCA_MEASTYPE_RPI:
+	{
+		struct ieee80211_ie_measrep_rpi *meas_rep_rpi;
+
+		if (meas_comm->mode == 0) {
+			if (sizeof(struct ieee80211_ie_measrep_rpi) > (efrm - frm)) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "mgt", "%s", "no enough data for rpi report field");
+				vap->iv_stats.is_rx_elem_toosmall++;
+				return;
+			}
+			meas_rep_rpi = (struct ieee80211_ie_measrep_rpi *)(frm);
+			frm += sizeof(struct ieee80211_ie_measrep_rpi);
+
+			memcpy(ni->ni_meas_info.rep.rpi, meas_rep_rpi->rpi_report, sizeof(ni->ni_meas_info.rep.rpi));
+
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DOTH,
+				"receive MEAS Report:type=rpi,	report_mode=%u, "
+				"channel=%d, tsf=%llu, duration=%u, data=%u %u %u %u %u %u %u %u\n",
+					ni->ni_meas_info.ni_meas_rep_mode,
+					meas_rep_rpi->chan_num,
+					meas_rep_rpi->start_tsf,
+					meas_rep_rpi->duration_tu,
+					ni->ni_meas_info.rep.rpi[0],
+					ni->ni_meas_info.rep.rpi[1],
+					ni->ni_meas_info.rep.rpi[2],
+					ni->ni_meas_info.rep.rpi[3],
+					ni->ni_meas_info.rep.rpi[4],
+					ni->ni_meas_info.rep.rpi[5],
+					ni->ni_meas_info.rep.rpi[6],
+					ni->ni_meas_info.rep.rpi[7]);
+		}
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_STA:
+	{
+		const int32_t sta_stats_groupid_len[] = {
+			[0] = sizeof(struct ieee80211_rm_sta_stats_group0),
+			[1] = sizeof(struct ieee80211_rm_sta_stats_group1),
+			[2 ... 9] = sizeof(struct ieee80211_rm_sta_stats_group2to9),
+			[10] = sizeof(struct ieee80211_rm_sta_stats_group10),
+			[11] = sizeof(struct ieee80211_rm_sta_stats_group11),
+			[12] = sizeof(struct ieee80211_rm_sta_stats_group12),
+			[13] = sizeof(struct ieee80211_rm_sta_stats_group13),
+			[14] = sizeof(struct ieee80211_rm_sta_stats_group14),
+			[15] = sizeof(struct ieee80211_rm_sta_stats_group15),
+			[16] = sizeof(struct ieee80211_rm_sta_stats_group16),
+		};
+		struct ieee80211_ie_measrep_sta_stat *report = NULL;
+		int status = 0;
+
+		if (sizeof(struct ieee80211_ie_measrep_sta_stat) > (efrm - frm)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+				wh, "RM", "insufficient data for %s", "sta stats report common field");
+			vap->iv_stats.is_rx_action++;
+			return;
+		}
+		report = (struct ieee80211_ie_measrep_sta_stat *)frm;
+		frm += sizeof(struct ieee80211_ie_measrep_sta_stat);
+
+		/* handle with group ID used for Cisco */
+		if (report->group_id == 221) {
+			if ((&report->data[0] + sizeof(ni->ni_rm_sta_grp221)) > efrm) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "RM", "insufficient data for %s", "group 221 STA stats");
+				vap->iv_stats.is_rx_action++;
+			} else {
+				memcpy(&ni->ni_rm_sta_grp221, &report->data[0], sizeof(ni->ni_rm_sta_grp221));
+			}
+			return;
+		}
+
+		if (report->group_id >= ARRAY_SIZE(sta_stats_groupid_len)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+				wh, "RM", "invalid group id %d", report->group_id);
+			vap->iv_stats.is_rx_action++;
+			return;
+		}
+
+		if (sta_stats_groupid_len[report->group_id] > (efrm - frm)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+				wh, "RM", "insufficient data for %s", "sta stats group result field");
+			vap->iv_stats.is_rx_action++;
+			return;
+		}
+		frm += sta_stats_groupid_len[report->group_id];
+
+		/* optional sub element */
+		while ((frm + IEEE80211_RM_MEAS_SUBTYPE_LEN_MIN) <= efrm) {
+			switch (frm[0]) {
+			case IEEE80211_ELEMID_VENDOR:
+			{
+				struct ieee80211_ie_qtn_rm_measure_sta *qtn_comm;
+				struct ieee80211_ie_qtn_rm_sta_all *remote;
+				int i, cnt;
+
+				if ((frm + sizeof(*qtn_comm)) > efrm) {
+					IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+						wh, "RM", "insufficient data for %s", "vendor subelement");
+					vap->iv_stats.is_rx_action++;
+					return;
+				}
+
+				qtn_comm = (struct ieee80211_ie_qtn_rm_measure_sta*)frm;
+				if (!isqtnmrespoui(frm)) {
+					IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+						wh, "RM", "%s: id %u type %u OUI %02x:%02x:%02x",
+						"specific IE incorrect",
+						qtn_comm->id, qtn_comm->type, qtn_comm->qtn_ie_oui[0],
+						qtn_comm->qtn_ie_oui[1], qtn_comm->qtn_ie_oui[2]);
+					vap->iv_stats.is_rx_action++;
+					break;
+				}
+
+				remote = &ni->ni_qtn_rm_sta_all;	/* record the remote statistics to node */
+				if (qtn_comm->type == QTN_OUI_RM_ALL) {
+					if ((qtn_comm->data + sizeof(*remote)) > efrm ||
+							qtn_comm->len != QTN_MREPORT_SUBIE_LEN(sizeof(*remote))) {
+						IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+							wh, "RM", "%s", "QTN all group IE bad len");
+						vap->iv_stats.is_rx_action++;
+						break;
+					}
+
+					memcpy(remote, qtn_comm->data, sizeof(*remote));
+
+					remote->tx_stats.tx_bytes = ntohll(remote->tx_stats.tx_bytes);
+					remote->tx_stats.tx_pkts = ntohl(remote->tx_stats.tx_pkts);
+					remote->tx_stats.tx_discard = ntohl(remote->tx_stats.tx_discard);
+					remote->tx_stats.tx_err = ntohl(remote->tx_stats.tx_err);
+					remote->tx_stats.tx_ucast = ntohl(remote->tx_stats.tx_ucast);
+					remote->tx_stats.tx_mcast = ntohl(remote->tx_stats.tx_mcast);
+					remote->tx_stats.tx_bcast = ntohl(remote->tx_stats.tx_bcast);
+
+					remote->rx_stats.rx_bytes = ntohll(remote->rx_stats.rx_bytes);
+					remote->rx_stats.rx_pkts = ntohl(remote->rx_stats.rx_pkts);
+					remote->rx_stats.rx_discard = ntohl(remote->rx_stats.rx_discard);
+					remote->rx_stats.rx_err = ntohl(remote->rx_stats.rx_err);
+					remote->rx_stats.rx_ucast = ntohl(remote->rx_stats.rx_ucast);
+					remote->rx_stats.rx_mcast = ntohl(remote->rx_stats.rx_mcast);
+					remote->rx_stats.rx_bcast = ntohl(remote->rx_stats.rx_bcast);
+
+					remote->max_queued = ntohl(remote->max_queued);
+					remote->link_quality = ntohl(remote->link_quality);
+					remote->rssi_dbm = ntohl(remote->rssi_dbm);
+					remote->bandwidth = ntohl(remote->bandwidth);
+					remote->snr = ntohl(remote->snr);
+					remote->tx_phy_rate = ntohl(remote->tx_phy_rate);
+					remote->rx_phy_rate = ntohl(remote->rx_phy_rate);
+					remote->cca = ntohl(remote->cca);
+					remote->br_ip = ntohl(remote->br_ip);
+
+					for (i = 0; i <= RM_QTN_MAX; i++ ) {
+						ni->ni_last_update[i] = jiffies;
+					}
+				} else {
+					u_int8_t *vendor_frm;
+					uint8_t sie_type;
+					uint8_t sie_len;
+
+					vendor_frm = (u_int8_t*)qtn_comm->data;
+					cnt = *vendor_frm++;
+
+					if (cnt == 0) {
+						status = -EPROTONOSUPPORT;
+					} else if (cnt > (RM_QTN_CTRL_END + 1)) {
+						cnt = RM_QTN_CTRL_END + 1;
+					}
+
+					for (i = 0; i < cnt; i++) {
+						if ((vendor_frm + IEEE80211_RM_MEAS_SUBTYPE_LEN_MIN) > efrm) {
+							IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+								wh, "RM", "insufficient data for %s", "QTN spcial group IE");
+							vap->iv_stats.is_rx_action++;
+							break;
+						}
+
+						sie_type = *vendor_frm++;
+						sie_len = *vendor_frm++;
+
+						if ((sie_type <= RM_QTN_CTRL_END && sie_type != RM_QTN_UNKNOWN &&
+							sie_len != ieee80211_meas_sta_qtn_report_subtype_len[sie_type]) ||
+									(vendor_frm + sie_len) > efrm) {
+							/* Skip the whole IE in case a single bad sub-IE encountered */
+							IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+								wh, "RM", "%s: %d bytes (must be %d)",
+								"QTN spcial group IE bad len", sie_len,
+								ieee80211_meas_sta_qtn_report_subtype_len[sie_type]);
+							vap->iv_stats.is_rx_action++;
+							break;
+						}
+
+						switch (sie_type) {
+						case RM_QTN_TX_STATS:
+						{
+							memcpy(&remote->tx_stats, vendor_frm, sie_len);
+							remote->tx_stats.tx_bytes = ntohll(remote->tx_stats.tx_bytes);
+							remote->tx_stats.tx_pkts = ntohl(remote->tx_stats.tx_pkts);
+							remote->tx_stats.tx_discard = ntohl(remote->tx_stats.tx_discard);
+							remote->tx_stats.tx_err = ntohl(remote->tx_stats.tx_err);
+							remote->tx_stats.tx_ucast = ntohl(remote->tx_stats.tx_ucast);
+							remote->tx_stats.tx_mcast = ntohl(remote->tx_stats.tx_mcast);
+							remote->tx_stats.tx_bcast = ntohl(remote->tx_stats.tx_bcast);
+							ni->ni_last_update[RM_QTN_TX_STATS] = jiffies;
+							break;
+						}
+						case RM_QTN_RX_STATS:
+						{
+							memcpy(&remote->rx_stats, vendor_frm, sie_len);
+							remote->rx_stats.rx_bytes = ntohll(remote->rx_stats.rx_bytes);
+							remote->rx_stats.rx_pkts = ntohl(remote->rx_stats.rx_pkts);
+							remote->rx_stats.rx_discard = ntohl(remote->rx_stats.rx_discard);
+							remote->rx_stats.rx_err = ntohl(remote->rx_stats.rx_err);
+							remote->rx_stats.rx_ucast = ntohl(remote->rx_stats.rx_ucast);
+							remote->rx_stats.rx_mcast = ntohl(remote->rx_stats.rx_mcast);
+							remote->rx_stats.rx_bcast = ntohl(remote->rx_stats.rx_bcast);
+							ni->ni_last_update[RM_QTN_RX_STATS] = jiffies;
+							break;
+						}
+						case RM_QTN_MAX_QUEUED:
+						{
+							memcpy(&remote->max_queued, vendor_frm, sie_len);
+							remote->max_queued = ntohl(remote->max_queued);
+							break;
+						}
+						case RM_QTN_LINK_QUALITY:
+						{
+							memcpy(&remote->link_quality, vendor_frm, sie_len);
+							remote->link_quality = ntohl(remote->link_quality);
+							break;
+						}
+						case RM_QTN_RSSI_DBM:
+						{
+							memcpy(&remote->rssi_dbm, vendor_frm, sie_len);
+							remote->rssi_dbm = ntohl(remote->rssi_dbm);
+							break;
+						}
+						case RM_QTN_BANDWIDTH:
+						{
+							memcpy(&remote->bandwidth, vendor_frm, sie_len);
+							remote->bandwidth = ntohl(remote->bandwidth);
+							break;
+						}
+						case RM_QTN_SNR:
+						{
+							memcpy(&remote->snr, vendor_frm, sie_len);
+							remote->snr = ntohl(remote->snr);
+							break;
+						}
+						case RM_QTN_TX_PHY_RATE:
+						{
+							memcpy(&remote->tx_phy_rate, vendor_frm, sie_len);
+							remote->tx_phy_rate = ntohl(remote->tx_phy_rate);
+							break;
+						}
+						case RM_QTN_RX_PHY_RATE:
+						{
+							memcpy(&remote->rx_phy_rate, vendor_frm, sie_len);
+							remote->rx_phy_rate = ntohl(remote->rx_phy_rate);
+							break;
+						}
+						case RM_QTN_CCA:
+						{
+							memcpy(&remote->cca, vendor_frm, sie_len);
+							remote->cca = ntohl(remote->cca);
+							break;
+						}
+						case RM_QTN_BR_IP:
+						{
+							memcpy(&remote->br_ip, vendor_frm, sie_len);
+							remote->br_ip = ntohl(remote->br_ip);
+							break;
+						}
+						case RM_QTN_RSSI:
+						{
+							memcpy(&remote->rssi, vendor_frm, sie_len);
+							remote->rssi = ntohl(remote->rssi);
+							break;
+						}
+						case RM_QTN_HW_NOISE:
+						{
+							memcpy(&remote->hw_noise, vendor_frm, sie_len);
+							remote->hw_noise = ntohl(remote->hw_noise);
+							break;
+						}
+						case RM_QTN_SOC_MACADDR:
+						{
+							memcpy(&remote->soc_macaddr, vendor_frm, sie_len);
+							break;
+						}
+						case RM_QTN_SOC_IPADDR:
+						{
+							memcpy(&remote->soc_ipaddr, vendor_frm, sie_len);
+							remote->soc_ipaddr = ntohl(remote->soc_ipaddr);
+							break;
+						}
+						/* for control IE below */
+						case RM_QTN_RESET_CNTS:
+						{
+							memcpy(&status, vendor_frm, sie_len);
+							status = ntohl(status);
+							break;
+						}
+						default:
+							/* Just skip unknown subelement types - for compatibility reasons */
+							break;
+						}
+						vendor_frm += sie_len;
+					}
+				}
+				break;
+			}
+			default:
+				break;
+			}
+			frm += 2 + frm[1];
+		}
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_CH_LOAD:
+	{
+		struct ieee80211_ie_measrep_chan_load *cl;
+
+		if (meas_comm->mode == 0) {
+			if (sizeof(struct ieee80211_ie_measrep_chan_load) > (efrm - frm)) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "mgt", "%s", "no enough data for channel load report field");
+				vap->iv_stats.is_rx_action++;
+				return;
+			}
+			cl = (struct ieee80211_ie_measrep_chan_load *)frm;
+			frm += sizeof(struct ieee80211_ie_measrep_chan_load);
+
+			ni->ni_meas_info.rep.chan_load = cl->channel_load;
+		}
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_NOISE:
+	{
+		struct ieee80211_ie_measrep_noise_his *nh;
+
+		if (meas_comm->mode == 0) {
+			if (sizeof(struct ieee80211_ie_measrep_noise_his) > (efrm - frm)) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "mgt", "%s", "no enough data for noise histogram report field");
+				vap->iv_stats.is_rx_action++;
+				return;
+			}
+			nh = (struct ieee80211_ie_measrep_noise_his *)frm;
+			frm += sizeof(struct ieee80211_ie_measrep_noise_his);
+
+			ni->ni_meas_info.rep.noise_his.antenna_id = nh->antenna_id;
+			ni->ni_meas_info.rep.noise_his.anpi = nh->anpi;
+			memcpy(ni->ni_meas_info.rep.noise_his.ipi, nh->ipi, sizeof(ni->ni_meas_info.rep.noise_his.ipi));
+		}
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_BEACON:
+	{
+		struct ieee80211_ie_measrep_beacon *beacon;
+
+		if (meas_comm->mode == 0) {
+			if (sizeof(struct ieee80211_ie_measrep_beacon) > (efrm - frm)) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "mgt", "%s", "no enough data for beacon report response field");
+				vap->iv_stats.is_rx_action++;
+				return;
+			}
+			beacon = (struct ieee80211_ie_measrep_beacon *)frm;
+			frm += sizeof(struct ieee80211_ie_measrep_beacon);
+
+			ni->ni_meas_info.rep.beacon.reported_frame_info = beacon->reported_frame_info;
+			ni->ni_meas_info.rep.beacon.rcpi = beacon->rcpi;
+			ni->ni_meas_info.rep.beacon.rsni = beacon->rsni;
+			memcpy(ni->ni_meas_info.rep.beacon.bssid, beacon->bssid, IEEE80211_ADDR_LEN);
+			ni->ni_meas_info.rep.beacon.antenna_id = beacon->antenna_id;
+			ni->ni_meas_info.rep.beacon.parent_tsf = BE_READ_4(beacon->parent_tsf);
+		}
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_FRAME:
+	{
+		if (meas_comm->mode == 0) {
+			if (sizeof(struct ieee80211_ie_measrep_frame) > (efrm - frm)) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "mgt", "%s", "no enough data for frame report response field");
+				vap->iv_stats.is_rx_action++;
+				return;
+			}
+			frm += sizeof(struct ieee80211_ie_measrep_frame);
+
+			ni->ni_meas_info.rep.frame_count.sub_ele_flag = 0;
+			if ((efrm - frm) >= sizeof(struct ieee80211_subie_section_frame_entry)) {
+				struct ieee80211_subie_section_frame_entry *entry;
+
+				entry = (struct ieee80211_subie_section_frame_entry *)frm;
+				frm += sizeof(struct ieee80211_subie_section_frame_entry);
+				ni->ni_meas_info.rep.frame_count.sub_ele_flag = 1;
+				memcpy(ni->ni_meas_info.rep.frame_count.ta, entry->transmit_address, IEEE80211_ADDR_LEN);
+				memcpy(ni->ni_meas_info.rep.frame_count.bssid, entry->bssid, IEEE80211_ADDR_LEN);
+				ni->ni_meas_info.rep.frame_count.phy_type = entry->phy_type;
+				ni->ni_meas_info.rep.frame_count.avg_rcpi = entry->avg_rcpi;
+				ni->ni_meas_info.rep.frame_count.last_rsni = entry->last_rsni;
+				ni->ni_meas_info.rep.frame_count.last_rcpi = entry->last_rcpi;
+				ni->ni_meas_info.rep.frame_count.antenna_id = entry->anntenna_id;
+				ni->ni_meas_info.rep.frame_count.frame_count = ntohs(entry->frame_cnt);
+			}
+		}
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_CATEGORY:
+	{
+		if (meas_comm->mode == 0) {
+			struct ieee80211_ie_measrep_trans_stream_cat *cat;
+
+			if (sizeof(struct ieee80211_ie_measrep_trans_stream_cat) > (efrm - frm)) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "mgt", "%s", "no enough data for transmit stream category response field");
+				vap->iv_stats.is_rx_action++;
+				return;
+			}
+			cat = (struct ieee80211_ie_measrep_trans_stream_cat *)frm;
+			frm += sizeof(struct ieee80211_ie_measrep_trans_stream_cat);
+
+			ni->ni_meas_info.rep.tran_stream_cat.reason = cat->reason;
+			ni->ni_meas_info.rep.tran_stream_cat.tran_msdu_cnt = ntohl(cat->tran_msdu_cnt);
+			ni->ni_meas_info.rep.tran_stream_cat.msdu_discard_cnt = ntohl(cat->msdu_discarded_cnt);
+			ni->ni_meas_info.rep.tran_stream_cat.msdu_fail_cnt = ntohl(cat->msdu_failed_cnt);
+			ni->ni_meas_info.rep.tran_stream_cat.msdu_mul_retry_cnt = ntohl(cat->msdu_mul_retry_cnt);
+			ni->ni_meas_info.rep.tran_stream_cat.qos_lost_cnt= ntohl(cat->qos_cf_lost_cnt);
+			ni->ni_meas_info.rep.tran_stream_cat.avg_queue_delay= ntohl(cat->avg_queue_delay);
+			ni->ni_meas_info.rep.tran_stream_cat.avg_tran_delay= ntohl(cat->avg_trans_delay);
+			ni->ni_meas_info.rep.tran_stream_cat.bin0_range= cat->bin0_range;
+			ni->ni_meas_info.rep.tran_stream_cat.bins[0]= ntohl(cat->bin0);
+			ni->ni_meas_info.rep.tran_stream_cat.bins[1]= ntohl(cat->bin1);
+			ni->ni_meas_info.rep.tran_stream_cat.bins[2]= ntohl(cat->bin2);
+			ni->ni_meas_info.rep.tran_stream_cat.bins[3]= ntohl(cat->bin3);
+			ni->ni_meas_info.rep.tran_stream_cat.bins[4]= ntohl(cat->bin4);
+			ni->ni_meas_info.rep.tran_stream_cat.bins[5]= ntohl(cat->bin5);
+		}
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_MUL_DIAG:
+	{
+		if (meas_comm->mode == 0) {
+			struct ieee80211_ie_measrep_multicast_diag *mul_diag;
+
+			if (sizeof(struct ieee80211_ie_measrep_multicast_diag) > (efrm - frm)) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					wh, "mgt", "%s", "no enough data for multicast diagnostics response field");
+				vap->iv_stats.is_rx_action++;
+				return;
+			}
+			mul_diag = (struct ieee80211_ie_measrep_multicast_diag *)frm;
+			frm += sizeof(struct ieee80211_ie_measrep_multicast_diag);
+
+			ni->ni_meas_info.rep.multicast_diag.reason = mul_diag->reason;
+			ni->ni_meas_info.rep.multicast_diag.mul_rec_msdu_cnt = ntohl(mul_diag->mul_rx_msdu_cnt);
+			ni->ni_meas_info.rep.multicast_diag.first_seq_num = ntohs(mul_diag->first_seq_num);
+			ni->ni_meas_info.rep.multicast_diag.last_seq_num = ntohs(mul_diag->last_seq_num);
+			ni->ni_meas_info.rep.multicast_diag.mul_rate= ntohs(mul_diag->mul_rate);
+		}
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_LCI:
+	case IEEE80211_RM_MEASTYPE_LOC_CIVIC:
+	case IEEE80211_RM_MEASTYPE_LOC_ID:
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, "mgt", "%s", "unsupported measurement frame, drop it");
+		vap->iv_stats.is_rx_action++;
+		break;
+	default:
+		break;
+	}
+}
+
+void ieee80211_recv_action_measure_11h(struct ieee80211_node *ni,
+		struct ieee80211_action *ia,
+		struct ieee80211_frame *wh,
+		u_int8_t *efrm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	u_int8_t *meas_ie_frm;
+	struct ieee80211_action_sm_measurement_header *sm_header;
+
+	if ((efrm - (u_int8_t *)ia) < sizeof(struct ieee80211_action_sm_measurement_header)) {
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, "mgt", "%s", "11h measurement frame header too small");
+		vap->iv_stats.is_rx_elem_toosmall++;
+		return;
+	}
+	sm_header = (struct ieee80211_action_sm_measurement_header *)ia;
+	meas_ie_frm = (u_int8_t *)(sm_header + 1);
+
+	if (sm_header->ia_action == IEEE80211_ACTION_S_MEASUREMENT_REQUEST) {
+		while (meas_ie_frm < efrm) {
+			ieee80211_parse_measure_request(ni,
+					wh,
+					sm_header->ia_category,
+					sm_header->am_token,
+					meas_ie_frm,
+					meas_ie_frm + meas_ie_frm[1] + 2);
+			meas_ie_frm += meas_ie_frm[1] + 2;
+		}
+	} else {	/* 11h measurement report */
+		while (meas_ie_frm < efrm) {
+			ieee80211_parse_measure_report(ni,
+					wh,
+					sm_header->ia_category,
+					sm_header->am_token,
+					meas_ie_frm,
+					meas_ie_frm + meas_ie_frm[1] + 2);
+			meas_ie_frm += meas_ie_frm[1] + 2;
+		}
+
+		ieee80211_ppqueue_remove_with_response(&ni->ni_vap->iv_ppqueue,
+				ni,
+				ia->ia_category,
+				ia->ia_action,
+				sm_header->am_token);
+	}
+}
+
+void ieee80211_recv_action_measure_11k(struct ieee80211_node *ni,
+		struct ieee80211_action *ia,
+		struct ieee80211_frame *wh,
+		u_int8_t *efrm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	u_int8_t *meas_ie_frm;
+	struct ieee80211_action_radio_measure_request *rm_request;
+	struct ieee80211_action_radio_measure_report *rm_report;
+
+	if (ia->ia_action == IEEE80211_ACTION_R_MEASUREMENT_REQUEST) {
+		if ((efrm - (u_int8_t *)ia) < sizeof(struct ieee80211_action_radio_measure_request)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+				wh, "mgt", "%s", "11k measurement request frame header too small");
+			vap->iv_stats.is_rx_elem_toosmall++;
+			return;
+		}
+		rm_request = (struct ieee80211_action_radio_measure_request *)ia;
+		meas_ie_frm = (u_int8_t *)rm_request->am_data;
+
+		/* repeat one time */
+		while (meas_ie_frm < efrm) {
+			ieee80211_parse_measure_request(ni,
+					wh,
+					ia->ia_category,
+					rm_request->am_token,
+					meas_ie_frm,
+					meas_ie_frm + meas_ie_frm[1] + 2);
+			meas_ie_frm += meas_ie_frm[1] + 2;
+		}
+	} else {
+		if ((efrm - (u_int8_t *)ia) < sizeof(struct ieee80211_action_radio_measure_report)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+				wh, "mgt", "%s", "11k measurement report frame header too small");
+			vap->iv_stats.is_rx_elem_toosmall++;
+			return;
+		}
+		rm_report = (struct ieee80211_action_radio_measure_report *)ia;
+		meas_ie_frm = (u_int8_t *)rm_report->am_data;
+
+		while (meas_ie_frm < efrm) {
+			ieee80211_parse_measure_report(ni,
+					wh,
+					ia->ia_category,
+					rm_report->am_token,
+					meas_ie_frm,
+					meas_ie_frm + meas_ie_frm[1] + 2);
+			meas_ie_frm += meas_ie_frm[1] + 2;
+		}
+		ieee80211_ppqueue_remove_with_response(&ni->ni_vap->iv_ppqueue,
+				ni,
+				ia->ia_category,
+				ia->ia_action,
+				rm_report->am_token);
+	}
+}
+
+void ieee80211_recv_action_link_measure_request(struct ieee80211_node *ni,
+		struct ieee80211_action *ia,
+		struct ieee80211_frame *wh,
+		u_int8_t *efrm)
+{
+	struct ieee80211_action_rm_link_measure_request *request;
+	struct ieee80211_action_data action_data;
+	struct ieee80211_link_measure_report report;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if ((efrm - (u_int8_t *)ia) < sizeof(struct ieee80211_action_rm_link_measure_request)) {
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, "mgt", "%s", "link measurement request too small");
+		vap->iv_stats.is_rx_elem_toosmall++;
+		return;
+	}
+	request = (struct ieee80211_action_rm_link_measure_request *)ia;
+
+	report.token = request->token;
+	report.tpc_report.tx_power = ni->ni_ic->ic_get_local_txpow(ni->ni_ic);
+	ni->ni_ic->ic_get_local_link_margin(ni, &report.tpc_report.link_margin);
+	report.recv_antenna_id = 255;
+	report.tran_antenna_id = 255;
+	report.rcpi = 0;
+	report.rsni = 0;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_LINKMEASURE_REPORT;
+	action_data.params = &report;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+
+void ieee80211_recv_action_link_measure_report(struct ieee80211_node *ni,
+		struct ieee80211_action *ia,
+		struct ieee80211_frame *wh,
+		u_int8_t *efrm)
+{
+	struct ieee80211_action_rm_link_measure_report *report;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if ((efrm - (u_int8_t *)ia) < sizeof(struct ieee80211_action_rm_link_measure_report)) {
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, "mgt", "%s", "link measurement report too small");
+		vap->iv_stats.is_rx_elem_toosmall++;
+		return;
+	}
+	report = (struct ieee80211_action_rm_link_measure_report *)ia;
+
+	ni->ni_lm.tpc_report.link_margin = report->tpc_report.link_margin;
+	ni->ni_lm.tpc_report.tx_power = report->tpc_report.tran_power;
+	ni->ni_lm.recv_antenna_id = report->recv_antenna_id;
+	ni->ni_lm.tran_antenna_id = report->tran_antenna_id;
+	ni->ni_lm.rcpi = report->rcpi;
+	ni->ni_lm.rsni = report->rsni;
+
+	ieee80211_ppqueue_remove_with_response(&ni->ni_vap->iv_ppqueue,
+			ni,
+			ia->ia_category,
+			ia->ia_action,
+			report->token);
+}
+
+struct ieee80211_nbr_entry {
+	struct ieee80211_neighbor_report_request_item item;
+	uint8_t channel_util;
+	TAILQ_ENTRY(ieee80211_nbr_entry) next;
+};
+
+struct neighbor_list_head {
+	struct ieee80211vap *vap;
+	TAILQ_HEAD(, ieee80211_nbr_entry) list;
+};
+
+static int
+neighbor_scanentry_cb(void *arg, const struct ieee80211_scan_entry *se)
+{
+	struct ieee80211_nbr_entry *entry = NULL;
+	struct neighbor_list_head *list = (struct neighbor_list_head *)arg;
+	struct ieee80211vap *vap = list->vap;
+	struct ieee80211_scan_ssid *vssid = &vap->iv_des_ssid[0];
+
+	if (!memcmp(&se->se_ssid[2], vssid->ssid, IEEE80211_NWID_LEN) ) {
+		entry = (struct ieee80211_nbr_entry *)kmalloc(sizeof(*entry), GFP_ATOMIC);
+		if (entry != NULL) {
+			memcpy(entry->item.bssid, se->se_bssid, IEEE80211_ADDR_LEN);
+			entry->item.channel = se->se_chan->ic_ieee;
+			entry->item.phy_type = vap->iv_ic->ic_phytype;
+			entry->item.operating_class = 0;
+			entry->item.bssid_info = (BSSID_INFO_AP_UNKNOWN
+						| BSSID_INFO_SECURITY_COPY		/* not sure */
+						| BSSID_INFO_KEY_SCOPE_COPY);		/* not sure */
+
+			if (se->se_capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT)
+				entry->item.bssid_info |= BSSID_INFO_CAP_SPECTRUM_MANAGEMENT;
+			if (se->se_capinfo & IEEE80211_CAPINFO_WME)
+				entry->item.bssid_info |= BSSID_INFO_CAP_QOS;
+			if (se->se_capinfo & IEEE80211_CAPINFO_APSD)
+				entry->item.bssid_info |= BSSID_INFO_CAP_APSD;
+			if (se->se_capinfo & IEEE80211_CAPINFO_RM)
+				entry->item.bssid_info |= BSSID_INFO_CAP_RADIO_MEASUREMENT;
+			if (se->se_capinfo & IEEE80211_CAPINFO_DELAYED_BA)
+				entry->item.bssid_info |= BSSID_INFO_CAP_DELAYED_BA;
+			if (se->se_capinfo & IEEE80211_CAPINFO_IMMEDIATE_BA)
+				entry->item.bssid_info |= BSSID_INFO_CAP_IMMEDIATE_BA;
+			if (se->se_md_ie)
+				entry->item.bssid_info |= BSSID_INFO_MOBILITY_DOMAIN;
+			if (se->se_htcap_ie)
+				entry->item.bssid_info |= BSSID_INFO_HIGH_THROUGHPUT;
+			if (se->se_vhtcap_ie)
+				entry->item.bssid_info |= BSSID_INFO_VERY_HIGH_THROUGHPUT;
+			if (se->se_md_ie) {
+				struct ieee80211_md_ie *md = (struct ieee80211_md_ie *)se->se_md_ie;
+				if (vap->iv_mdid == md->md_info)
+					entry->item.bssid_info |= BSSID_INFO_MOBILITY_DOMAIN;
+			}
+			entry->channel_util = se->se_bss_load_ie ? *(se->se_bss_load_ie + 4) : 0;
+			if (se->se_bss_load_ie) {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION,
+					"sta count %d chutil %d bssid info %x bssid: %pM\n",
+					*(se->se_bss_load_ie + 2), *(se->se_bss_load_ie + 4),
+					entry->item.bssid_info, entry->item.bssid);
+			}
+			TAILQ_INSERT_TAIL(&list->list, entry, next);
+		}
+	}
+
+	return 0;
+}
+
+static void
+ieee80211_parse_scan_cache_and_generate_neighbor_report_list(struct ieee80211com *ic,
+		struct neighbor_list_head *list)
+{
+	ieee80211_scan_iterate(ic, neighbor_scanentry_cb, list);
+}
+
+int
+ieee80211_create_neighbor_reports(struct ieee80211_node *ni,
+	struct ieee80211_neighbor_report_request_item **item_table,
+	int num_items,
+	int sort_on_chan_util)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct neighbor_list_head list;
+	struct ieee80211_nbr_entry *entry = NULL;
+	struct ieee80211_nbr_entry *tentry = NULL;
+	uint8_t bss_num = 0;
+	int i;
+	struct ieee80211_neighbor_report_request_item *cache = NULL;
+	struct ieee80211_neighbor_report_request_item *tcache = NULL;
+
+	*item_table = NULL;
+	cache = kmalloc(sizeof(struct ieee80211_neighbor_report_request_item) * num_items,
+				GFP_ATOMIC);
+	if (!cache ) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION, "Failed to alloc size %d\n", num_items);
+		return 0;
+	}
+	TAILQ_INIT(&list.list);
+	list.vap = vap;
+	ieee80211_parse_scan_cache_and_generate_neighbor_report_list(ic, &list);
+
+	bss_num = 0;
+	for ( i = 0; i  < num_items; i++ ) {
+		uint8_t util = 255;
+		tentry = NULL;
+		/* get entry based on least channel utilization */
+		TAILQ_FOREACH(entry, &list.list, next) {
+		if (entry->channel_util < util) {
+			tentry = entry;
+			util = entry->channel_util;
+			}
+		}
+		if (!tentry)
+			break;
+		TAILQ_REMOVE(&list.list, tentry, next);
+		if (bss_num < num_items) {
+			tcache = cache + bss_num;
+			memcpy(tcache->bssid, tentry->item.bssid, IEEE80211_ADDR_LEN);
+			tcache->bssid_info = htonl(tentry->item.bssid_info);
+			tcache->channel = tentry->item.channel;
+			tcache->operating_class = tentry->item.operating_class;
+			tcache->phy_type = tentry->item.phy_type;
+			bss_num++;
+		}
+	}
+	*item_table = cache;
+
+	entry = TAILQ_FIRST(&list.list);
+	while (entry != NULL) {
+		tentry = TAILQ_NEXT(entry, next);
+		kfree(entry);
+		entry = tentry;
+	}
+
+	return bss_num;
+}
+
+#define NEIGH_REPORTS_MAX	32
+
+void
+ieee80211_beacon_request_callback_success(void *ctx)
+{
+	struct ieee80211_node *ni = ctx;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_neighbor_report_request_item *item_cache, *item_table[NEIGH_REPORTS_MAX];
+	int num_of_items = 0;
+	int i;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION, "beacon report received for sta: %pM\n",
+				ni->ni_macaddr);
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION, "beacon report rcpi %d rsni BSSID: %pM\n",
+			ni->ni_meas_info.rep.beacon.rcpi,
+			ni->ni_meas_info.rep.beacon.rsni,
+			ni->ni_meas_info.rep.beacon.bssid );
+	num_of_items = ieee80211_create_neighbor_reports(ni, &item_cache, NEIGH_REPORTS_MAX, 1);
+	if (num_of_items > 0) {
+		for (i = 0; i < num_of_items; i++) {
+			/* mark reachability flag mathching becon report entry */
+			if (!memcmp(ni->ni_meas_info.rep.beacon.bssid, item_cache->bssid,
+				IEEE80211_ADDR_LEN))
+				item_cache->bssid_info |= BSSID_INFO_AP_REACHABLE;
+			item_table[i] = item_cache++;
+		}
+	}
+	ieee80211_send_neighbor_report_response(ni, ni->pending_beacon_req_token, num_of_items,
+							item_table);
+	if (item_cache) {
+		kfree(item_cache);
+	}
+}
+
+void
+ieee80211_beacon_request_callback_fail(void *ctx)
+{
+	struct ieee80211_node *ni = ctx;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_neighbor_report_request_item *item_cache, *item_table[NEIGH_REPORTS_MAX];
+	int num_of_items = 0;
+	int i;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION, "beacon report failed for sta: %pM\n",
+					ni->ni_macaddr);
+	num_of_items = ieee80211_create_neighbor_reports(ni, &item_cache, NEIGH_REPORTS_MAX, 1);
+	if (num_of_items > 0) {
+		for (i = 0; i < num_of_items; i++)
+			item_table[i] = item_cache++;
+	}
+	ieee80211_send_neighbor_report_response(ni, ni->pending_beacon_req_token, num_of_items,
+							item_table);
+	if (item_cache) {
+		kfree(item_cache);
+	}
+}
+
+#define BEACON_MEASURE_MODE_PASSIVE	0
+#define BEACON_MEASURE_MODE_ACTIVE	1
+#define BEACON_MEASURE_MODE_TABLE	2
+#define BEACON_MEASURE_TIME		20 /* 20 msec */
+#define BEACON_MEASURE_REQUEST_TIMEOUT	40 /* 40 msec */
+
+void
+ieee80211_create_and_send_neighbor_report_using_beacon_report(struct ieee80211_node *ni,
+									uint8_t token)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_scan_ssid *vssid = &vap->iv_des_ssid[0];
+
+	ni->pending_beacon_req_token = token;
+	/* send beacon report request - beacon table mode */
+	ieee80211_send_rm_req_beacon(ni, 0, 0, BEACON_MEASURE_TIME, BEACON_MEASURE_MODE_TABLE, NULL,
+		&vssid->ssid[0], vssid->len, BEACON_MEASURE_REQUEST_TIMEOUT,
+		ieee80211_beacon_request_callback_success,
+		ieee80211_beacon_request_callback_fail);
+
+}
+
+void
+ieee80211_recv_action_neighbor_report_request(struct ieee80211_node *ni,
+		struct ieee80211_action *ia,
+		struct ieee80211_frame *wh,
+		u_int8_t *efrm)
+{
+	struct ieee80211_action_rm_neighbor_report_request *request;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (vap->iv_opmode != IEEE80211_M_HOSTAP) {
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, "mgt", "%s", "wrong mode");
+		vap->iv_stats.is_rx_mgtdiscard++;
+		return;
+	}
+
+	if ((efrm - (u_int8_t *)ia) < sizeof(struct ieee80211_action_rm_neighbor_report_request)) {
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, "mgt", "%s", "neighbor report request too small");
+		vap->iv_stats.is_rx_elem_toosmall++;
+		return;
+	}
+	request = (struct ieee80211_action_rm_neighbor_report_request *)ia;
+
+	if (ni->ni_rrm_capability & IEEE80211_NODE_BEACON_TABLE_REPORT_CAPABLE) {
+		ieee80211_create_and_send_neighbor_report_using_beacon_report(ni, request->token);
+	} else {
+		struct ieee80211_neighbor_report_request_item *item_cache = NULL;
+		struct ieee80211_neighbor_report_request_item *item_table[NEIGH_REPORTS_MAX];
+		int num_of_items = 0;
+		int i;
+
+		num_of_items = ieee80211_create_neighbor_reports(ni, &item_cache,
+					NEIGH_REPORTS_MAX, 1);
+		if (num_of_items > 0) {
+			for (i = 0; i < num_of_items; i++)
+				item_table[i] = item_cache++;
+		}
+		ieee80211_send_neighbor_report_response(ni, request->token,
+			num_of_items, item_table);
+		if (item_cache) {
+			kfree(item_cache);
+		}
+	}
+}
+
+void ieee80211_recv_action_neighbor_report_response(struct ieee80211_node *ni,
+		struct ieee80211_action *ia,
+		struct ieee80211_frame *wh,
+		u_int8_t *efrm)
+{
+	struct ieee80211_action_rm_neighbor_report_response *response;
+	struct ieee80211_ie_neighbor_report *ie;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_neighbor_report_item *item;
+	u_int8_t *frm = (u_int8_t *)ia;
+	u_int8_t i;
+
+	if ((efrm - frm) < sizeof(struct ieee80211_action_rm_neighbor_report_response)) {
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, "mgt", "%s", "neighbor report response too small");
+		vap->iv_stats.is_rx_elem_toosmall++;
+		return;
+	}
+	response = (struct ieee80211_action_rm_neighbor_report_response *)frm;
+	frm += sizeof(struct ieee80211_action_rm_neighbor_report_response);
+
+	if (ni->ni_neighbor.report_count != 0) {
+		for (i = 0; i < ni->ni_neighbor.report_count; i++) {
+			kfree(ni->ni_neighbor.item_table[i]);
+			ni->ni_neighbor.item_table[i] = NULL;
+		}
+		ni->ni_neighbor.report_count = 0;
+	}
+
+	while ((frm < efrm) && (ni->ni_neighbor.report_count < IEEE80211_RM_NEIGHBOR_REPORT_ITEM_MAX)) {
+		ie = (struct ieee80211_ie_neighbor_report *)frm;
+		if ((efrm - frm) < (ie->len + 2))
+			break;
+
+		item = (struct ieee80211_neighbor_report_item *)kmalloc(sizeof(*item), GFP_ATOMIC);
+		if (item == NULL)
+			break;
+
+		memcpy(item->bssid, ie->bssid, IEEE80211_ADDR_LEN);
+		item->bssid_info = ntohl(ie->bssid_info);
+		item->operating_class = ie->operating_class;
+		item->channel = ie->channel;
+		item->phy_type = ie->phy_type;
+		ni->ni_neighbor.item_table[ni->ni_neighbor.report_count++] = item;
+		frm += ie->len + 2;
+	}
+
+	ieee80211_ppqueue_remove_with_response(&ni->ni_vap->iv_ppqueue,
+			ni,
+			ia->ia_category,
+			ia->ia_action,
+			response->token);
+}
+
+void ieee80211_recv_action_11k(struct ieee80211_node *ni,
+		struct ieee80211_action *ia,
+		struct ieee80211_frame *wh,
+		u_int8_t *efrm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	switch (ia->ia_action) {
+	case IEEE80211_ACTION_R_MEASUREMENT_REQUEST:
+	case IEEE80211_ACTION_R_MEASUREMENT_REPORT:
+		ieee80211_recv_action_measure_11k(ni, ia, wh, efrm);
+		break;
+	case IEEE80211_ACTION_R_LINKMEASURE_REQUEST:
+		ieee80211_recv_action_link_measure_request(ni, ia, wh, efrm);
+		break;
+	case IEEE80211_ACTION_R_LINKMEASURE_REPORT:
+		ieee80211_recv_action_link_measure_report(ni, ia, wh, efrm);
+		break;
+	case IEEE80211_ACTION_R_NEIGHBOR_REQUEST:
+		ieee80211_recv_action_neighbor_report_request(ni, ia, wh, efrm);
+		break;
+	case IEEE80211_ACTION_R_NEIGHBOR_REPORT:
+		ieee80211_recv_action_neighbor_report_response(ni, ia, wh, efrm);
+		break;
+	default:
+		vap->iv_stats.is_rx_mgtdiscard++;
+		break;
+	}
+}
+
+/*
+ * Check whether non-ERP protection required or not
+ * @scan OBSS beacon data
+ * @return TRUE if non-ERP protection is required, FALSE if not required
+ */
+static __inline int is_non_erp_prot_required(struct ieee80211_scanparams *scan)
+{
+	int i;
+
+#define IS_B_RATE(x) ( (((x) & ~0x80) == 0x02) || (((x) & ~0x80) == 0x04) || \
+		       (((x) & ~0x80) == 0x0b) || (((x) & ~0x80) == 0x16) )
+
+	/* If non erp sta present in obss */
+	if (scan->erp & IEEE80211_ERP_NON_ERP_PRESENT) {
+		return 1;
+	}
+
+	/* If beacon is from b-only ap where supported rates are
+	 * 1, 2, 5.5 and 11 Mbps
+	 */
+	if (!scan->xrates && (scan->rates[1] <= 4)) {
+		/* Check all the rates in the supported rate IE. If
+		 * any of the rate is not the B rate, then return 0,
+		 * otherwise return 1
+		 */
+		for (i = 0; i < scan->rates[1]; i++) {
+			if (!IS_B_RATE(scan->rates[i+2]))  break;
+		}
+		if (i == scan->rates[1]) {
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Sets the station's VHT capability based on received assoc resp frame
+ * If peer AP is non-VHT TX AMSDU will be disabled in station
+ */
+static void
+ieee80211_input_sta_vht_set(struct ieee80211_node *ni,
+			struct ieee80211vap *vap, uint8_t *vhtcap,
+			uint8_t *vhtop, int vht_is_allowed)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	/* 802.11ac */
+	if (vhtcap && IS_IEEE80211_DUALBAND_VHT_ENABLED(ic) && vht_is_allowed) {
+		ni->ni_flags |= IEEE80211_NODE_VHT;
+		ieee80211_parse_vhtcap(ni, vhtcap);
+
+		if (vhtop)
+			ieee80211_parse_vhtop(ni, vhtop);
+
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_TX_AMSDU, QTN_TX_AMSDU_ADAPTIVE, NULL, 0);
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"VHT Enabled: ni_flags = 0x%04x\n",
+				ni->ni_flags);
+	} else {
+		ni->ni_flags &= ~IEEE80211_NODE_VHT;
+		/* WAR: Livebox AP is stricter in handling TX AMSDU packets */
+		if(ic->ic_flags_qtn & QTN_NODE_11N_TXAMSDU_OFF)
+			ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_TX_AMSDU, QTN_TX_AMSDU_DISABLED, NULL, 0);
+
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+				"VHT Disabled: ni_flags = 0x%04x\n",
+				ni->ni_flags);
+	}
+}
+
+static int is_assoc_limit_reached(struct ieee80211com *ic, struct ieee80211vap *vap)
+{
+	int reserved = 0;
+	int grp = vap->iv_ssid_group;
+	int i;
+	int r = 0;
+
+	if ((ic->ic_sta_assoc - ic->ic_wds_links) >= ic->ic_sta_assoc_limit
+			|| (ic->ic_ssid_grp[grp].assocs >= ic->ic_ssid_grp[grp].limit)) {
+		return 1;
+	}
+
+	/* Get the total reservation count */
+	for (i = 0; i < IEEE80211_MAX_BSS_GROUP; i++) {
+		if (i == grp) {
+			continue;
+		}
+
+		r = ic->ic_ssid_grp[i].reserve - ic->ic_ssid_grp[i].assocs;
+		if (r > 0) {
+			reserved += r;
+		}
+	}
+
+	if ((ic->ic_sta_assoc - ic->ic_wds_links) >= (ic->ic_sta_assoc_limit - reserved)) {
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * This function is used to verify the HESSID and Access Network Type.
+ * Probe Response is sent only if these parameters are matched or is Wildcard
+**/
+static int
+ieee80211_verify_interworking(struct ieee80211vap *vap, u_int8_t *interw)
+{
+	struct ieee80211_ie *ie = (struct ieee80211_ie *)interw;
+	u_int8_t interworking_len = ie->len;
+	const u_int8_t *hessid;
+	u_int8_t an_type; /* Access Network Type */
+
+	/*
+	 * Interworking Element
+	 * El.ID | Length | AccessNetworkOpt | VenueInfo     | HESSID
+	 * 1Byte | 1Byte  | 1 Byte	     | 2B (Optional) | 6B (Optional)
+	 */
+
+#define INTERWORKING_ANT_WILDCARD 15
+	if (interworking_len >= 1) {
+		an_type = ie->info[0] & 0x0f;
+		if (an_type != INTERWORKING_ANT_WILDCARD &&
+				an_type != vap->interw_info.an_type) {
+			return -1;
+		}
+	}
+
+	if (interworking_len == 7 || interworking_len == 9) {
+		if (interworking_len == 7)
+			hessid = &ie->info[1];
+		else
+			hessid = &ie->info[3];
+
+		if (!IEEE80211_ADDR_NULL(vap->interw_info.hessid)) {
+			if (!IEEE80211_ADDR_BCAST(hessid) &&
+					!IEEE80211_ADDR_EQ(hessid, vap->interw_info.hessid)) {
+				return -1;
+			}
+		} else if (!IEEE80211_ADDR_EQ(hessid, vap->iv_bss->ni_bssid) &&
+				!IEEE80211_ADDR_BCAST(hessid)) {
+			return -1;
+		}
+	}
+#undef INTERWORKING_ANT_WILDCARD
+
+	return 0;
+}
+
+static int ieee80211_input_mac_reserved(struct ieee80211vap *vap, struct ieee80211com *ic,
+					struct ieee80211_node *ni, struct ieee80211_frame *wh)
+{
+	if (ic->ic_mac_reserved(wh->i_addr2)) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH,
+			"%s: reject auth req from reserved mac %pM\n", __func__,
+			wh->i_addr2);
+		ieee80211_send_error(ni, wh->i_addr2,
+				IEEE80211_FC0_SUBTYPE_DEAUTH,
+				IEEE80211_REASON_UNSPECIFIED);
+		return 1;
+	}
+
+	return 0;
+}
+static void
+ieee80211_extdr_cac_check(struct ieee80211com *ic, struct ieee80211vap *vap, struct ieee80211_frame *wh)
+{
+	struct ieee80211vap *tmp_vap;
+
+	if (IEEE80211_VAP_WDS_IS_RBS(vap) || IEEE80211_COM_WDS_IS_RBS(ic)) {
+		if (IEEE80211_ADDR_EQ(ic->ic_extender_mbs_bssid, wh->i_addr2))
+			ic->ic_complete_cac();
+	} else if (ieee80211_is_repeater(ic)) {
+		tmp_vap = TAILQ_FIRST(&ic->ic_vaps);
+		if (!tmp_vap)
+			return;
+
+		if (tmp_vap->iv_state == IEEE80211_S_RUN ||
+				tmp_vap->iv_state == IEEE80211_S_AUTH ||
+				tmp_vap->iv_state == IEEE80211_S_ASSOC) {
+			if (IEEE80211_ADDR_EQ(wh->i_addr2, tmp_vap->iv_bss->ni_macaddr))
+				ic->ic_complete_cac();
+		}
+	}
+}
+/*
+ * This function is used to check VHT and HT capabilites presence.
+ * returns 1 if VHT or HT capablities are present.
+ * returns 0 if VHT or HT capablities are not present.
+ **/
+uint8_t
+ieee80211_phy_mode_allowed(struct ieee80211vap *vap, uint8_t *vhtcap, uint8_t *htcap)
+{
+
+	uint8_t retVal = 0;
+
+	switch (vap->iv_11ac_and_11n_flag) {
+	case IEEE80211_11AC_ONLY:
+		if (vhtcap != NULL) {
+			retVal = 1;
+		}
+		break;
+	case IEEE80211_11N_ONLY:
+		if (htcap != NULL) {
+			retVal = 1;
+		}
+		break;
+	default:
+		retVal = 1;
+		break;
+	}
+
+	return retVal;
+}
+
+/*
+ * In case ocac_rx_state is either in BACKOFF or ONGOING state and has not been updated for atleast
+ * 1 sec, we should reset it.
+ *
+ * One reason could be that we are no longer able to hear the AP that sent that beacon. We may or
+ * may not hear again from that AP and hence we should forget about that AP and hence reset
+ * ocac_rx_state
+ */
+static void iee80211_check_ocac_rx_state(struct ieee80211com *ic)
+{
+	uint64_t delta, now = jiffies;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP)
+		return;
+
+	spin_lock(&ic->ic_ocac.ocac_lock);
+
+	if (ic->ic_ocac.ocac_rx_state.state == OCAC_STATE_NONE) {
+		spin_unlock(&ic->ic_ocac.ocac_lock);
+		return;
+	}
+
+	/* To handle (one time) overflow */
+	if (now >= ic->ic_ocac.ocac_rx_state.timestamp)
+		delta = now - ic->ic_ocac.ocac_rx_state.timestamp;
+	else
+		delta = ic->ic_ocac.ocac_rx_state.timestamp - now;
+
+	if (delta >= HZ)
+		memset(&ic->ic_ocac.ocac_rx_state, 0, sizeof(ic->ic_ocac.ocac_rx_state));
+
+	spin_unlock(&ic->ic_ocac.ocac_lock);
+}
+
+/*
+ * Used to save the OCAC State IE from a received beacon frame, if relevant
+ *
+ * Should be called only if we are an AP
+ *
+ * ocac_rx_state is reset:
+ * - on channel change
+ * - when we are in BACKOFF or ONGOING state and we do not receive a beacon that updates
+ *   ocac_rx_state for atleast 1 sec
+ * - TBD: is there any other case?
+ */
+static void ieee80211_save_rx_ocac_state_ie(struct ieee80211com *ic, uint8_t *ta, uint8_t state,
+					uint8_t param)
+{
+#define QTN_IS_EQ_TA(ta1, ta2)	!memcmp((ta1), (ta2), IEEE80211_ADDR_LEN)
+
+#define QTN_RESET_RX_STATE	memset(old, 0, sizeof(*old))
+
+#define QTN_SAVE_RX_STATE	memcpy(old, &new, sizeof(*old))
+
+	struct ieee80211_ocac_rx_state new;
+	struct ieee80211_ocac_rx_state *old = &ic->ic_ocac.ocac_rx_state;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP)
+		return;
+
+	memcpy(new.ta, ta, IEEE80211_ADDR_LEN);
+	new.state = state;
+	new.param = param;
+	new.timestamp = jiffies;
+
+	spin_lock(&ic->ic_ocac.ocac_lock);
+
+	switch (old->state) {
+	case OCAC_STATE_NONE:
+		switch (new.state) {
+		case OCAC_STATE_NONE:
+			break;
+
+		case OCAC_STATE_BACKOFF:
+		case OCAC_STATE_ONGOING:
+			QTN_SAVE_RX_STATE;
+			break;
+		}
+
+		break;
+
+	case OCAC_STATE_BACKOFF:
+		switch (new.state) {
+		case OCAC_STATE_NONE:
+			if (QTN_IS_EQ_TA(new.ta, old->ta))
+				QTN_RESET_RX_STATE;
+			break;
+
+		case OCAC_STATE_BACKOFF:
+			if (new.param < old->param)
+				QTN_SAVE_RX_STATE;
+			break;
+
+		case OCAC_STATE_ONGOING:
+			QTN_SAVE_RX_STATE;
+			break;
+		}
+
+		break;
+
+	case OCAC_STATE_ONGOING:
+		switch (new.state) {
+		case OCAC_STATE_NONE:
+			if (QTN_IS_EQ_TA(new.ta, old->ta))
+				QTN_RESET_RX_STATE;
+			break;
+
+		case OCAC_STATE_BACKOFF:
+			if (QTN_IS_EQ_TA(new.ta, old->ta))
+				QTN_SAVE_RX_STATE;
+			break;
+
+		case OCAC_STATE_ONGOING:
+			break;
+		}
+
+		break;
+	}
+
+	spin_unlock(&ic->ic_ocac.ocac_lock);
+}
+
+int ieee80211_handle_csa_invalid_channel(struct ieee80211_node *ni, struct ieee80211_frame *wh)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+		return -1;
+	if (!ni->ni_associd)
+		return -1;
+	if (!IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_bssid))
+		return -1;
+
+	ieee80211_new_state(vap, IEEE80211_S_INIT, IEEE80211_REASON_DISASSOC_BAD_SUPP_CHAN);
+
+	return 0;
+}
+
+/*
+ * Context: SoftIRQ
+ */
+void
+ieee80211_recv_mgmt(struct ieee80211_node *ni, struct sk_buff *skb,
+	int subtype, int rssi, u_int32_t rstamp)
+{
+#define	ISPROBE(_st)	((_st) == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
+#define	ISREASSOC(_st)	((_st) == IEEE80211_FC0_SUBTYPE_REASSOC_RESP)
+#define IEEE80211_OPMODE_NOTIFY_INVALID 0xFF
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_frame *wh;
+	u_int8_t *frm, *efrm;
+	u_int8_t *ssid, *rates, *xrates, *wpa, *rsn, *osen, *wme, *ath, *htcap = NULL, *htinfo = NULL;
+	u_int8_t *mdie = NULL;
+	u_int8_t *ftie = NULL;
+	u_int8_t *rlnk = NULL;
+	u_int8_t *vhtcap = NULL, *vhtop = NULL;
+	u_int8_t *extcap = NULL;
+	u_int8_t *wscie = NULL;
+	uint8_t *opmode_notif_ie = NULL;
+	u_int8_t *interw = NULL;
+	u_int8_t *obss_scan = NULL;
+	struct ieee80211_ie_qtn *qtnie = NULL;
+#ifdef CONFIG_QVSP
+	struct ieee80211_ie_vsp *vspie = NULL;
+#endif
+	u_int8_t *rrm_enabled = NULL;
+	struct ieee80211_ie_qtn_pairing *qtn_pairing_ie = NULL;
+	struct ieee80211_qtn_ext_role *qtn_ext_role = NULL;
+	struct ieee80211_qtn_ext_bssid *qtn_ext_bssid = NULL;
+	struct ieee80211_qtn_ext_state *qtn_ext_state = NULL;
+
+	u_int8_t rate;
+	int reassoc;
+	int resp;
+	int node_reference_held = 0;
+	u_int8_t qosinfo;
+	u_int8_t beacon_update_required = 0;
+	void *bcmie = NULL;
+	void *rtkie = NULL;
+	struct ieee80211_ie_power_capability *pwr_cap;
+	uint8_t *supp_chan_ie = NULL;
+	int8_t	min_txpwr, max_txpwr;
+	int8_t	local_max_txpwr;
+	int arg;
+	int8_t non_erp_present = 0;
+	int sta_pure_tkip = 0;
+
+	wh = (struct ieee80211_frame *) skb->data;
+	frm = (u_int8_t *)&wh[1];
+	efrm = skb->data + skb->len;
+
+	/* forward management frame to application */
+	if (vap->iv_opmode != IEEE80211_M_MONITOR)
+		forward_mgmt_to_app(vap, subtype, skb, wh);
+	if (vap->iv_bss)
+	      sta_pure_tkip = (vap->iv_bss->ni_rsn.rsn_ucastcipher == IEEE80211_CIPHER_TKIP);
+
+	switch (subtype) {
+	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+	case IEEE80211_FC0_SUBTYPE_BEACON: {
+		struct ieee80211_scanparams scan;
+
+		/* Check if we need to reset ocac_rx_state */
+		iee80211_check_ocac_rx_state(ic);
+
+		/*
+		 * When STA disconnects and boots up as AP, the DEAUTH/DISASSOC frame sent may get
+		 * lost and AP can't create a WDS link with it since it's still "associated".
+		 * This may be recovered by force leaving "STA" once we detect it became AP.
+		 */
+		if ((ni->ni_associd != 0) && (ni->ni_node_type == IEEE80211_NODE_TYPE_STA) &&
+				ieee80211_node_is_qtn(ni)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+				"force leave STA %pM as it became AP\n", ni->ni_macaddr);
+			ieee80211_node_leave(ni);
+			return;
+		}
+
+		if (ieee80211_beacon_should_discard(ni)) {
+			vap->iv_stats.is_rx_mgtdiscard++;
+			return;
+		}
+
+		/*
+		 * beacon/probe response frame format
+		 *	[8] time stamp
+		 *	[2] beacon interval
+		 *	[2] capability information
+		 *	[tlv] ssid
+		 *	[tlv] supported rates
+		 *	[tlv] country information
+		 *	[tlv] parameter set (FH/DS)
+		 *	[tlv] erp information
+		 *	[tlv] extended supported rates
+		 *	[tlv] power constraint
+		 *	[tlv] WME
+		 *	[tlv] WPA or RSN
+		 *	[tlv] Atheros Advanced Capabilities
+		 *	[tlv] Quantenna flags
+		 */
+		IEEE80211_VERIFY_LENGTH(efrm - frm, 12);
+		memset(&scan, 0, sizeof(scan));
+		scan.tstamp  = frm;
+		frm += 8;
+		scan.bintval = le16toh(*(__le16 *)frm);
+		frm += 2;
+		scan.capinfo = le16toh(*(__le16 *)frm);
+		frm += 2;
+		scan.bchan = ieee80211_chan2ieee(ic, ic->ic_curchan);
+
+		ni->ni_flags &= ~IEEE80211_NODE_TPC;
+		while (frm < efrm) {
+			/* Agere element in beacon */
+			if ((*frm == IEEE80211_ELEMID_AGERE1) ||
+			    (*frm == IEEE80211_ELEMID_AGERE2)) {
+				frm = efrm;
+				continue;
+			}
+
+			IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1]);
+			switch (*frm) {
+			case IEEE80211_ELEMID_SSID:
+				scan.ssid = frm;
+				break;
+			case IEEE80211_ELEMID_RATES:
+				scan.rates = frm;
+				break;
+			case IEEE80211_ELEMID_COUNTRY:
+				scan.country = frm;
+				break;
+			case IEEE80211_ELEMID_PWRCNSTR:
+				scan.pwr_constraint = frm;
+				ni->ni_flags |= IEEE80211_NODE_TPC;
+				break;
+			case IEEE80211_ELEMID_FHPARMS:
+				if (ic->ic_phytype == IEEE80211_T_FH) {
+					scan.fhdwell = LE_READ_2(&frm[2]);
+					scan.chan = IEEE80211_FH_CHAN(frm[4], frm[5]);
+					scan.fhindex = frm[6];
+				}
+				break;
+			case IEEE80211_ELEMID_DSPARMS:
+				/*
+				 * XXX hack this since depending on phytype
+				 * is problematic for multi-mode devices.
+				 */
+				if (ic->ic_phytype != IEEE80211_T_FH)
+					scan.chan = frm[2];
+				break;
+			case IEEE80211_ELEMID_TIM:
+				/* XXX ATIM? */
+				scan.tim = frm;
+				scan.timoff = frm - skb->data;
+				break;
+			case IEEE80211_ELEMID_IBSSPARMS:
+				break;
+			case IEEE80211_ELEMID_XRATES:
+				scan.xrates = frm;
+				break;
+			case IEEE80211_ELEMID_ERP:
+				if (frm[1] != 1) {
+					IEEE80211_DISCARD_IE(vap,
+						IEEE80211_MSG_ELEMID, wh, "ERP",
+						"bad len %u", frm[1]);
+					vap->iv_stats.is_rx_elem_toobig++;
+					break;
+				}
+				scan.erp = frm[2];
+				break;
+			case IEEE80211_ELEMID_RSN:
+				scan.rsn = frm;
+				break;
+			case IEEE80211_ELEMID_OPMOD_NOTIF:
+				opmode_notif_ie = frm;
+				break;
+			case IEEE80211_ELEMID_OBSS_SCAN:
+				scan.obss_scan = frm;
+				break;
+			case IEEE80211_ELEMID_BSS_LOAD:
+				scan.bssload = frm;
+				break;
+			case IEEE80211_ELEMID_VENDOR:
+				if (iswpaoui(frm))
+					scan.wpa = frm;
+				else if (iswmeparam(frm) || iswmeinfo(frm))
+					scan.wme = frm;
+				else if (iswscoui(frm))
+					scan.wsc = frm;
+				else if (isatherosoui(frm))
+					scan.ath = frm;
+				else if (isqtnie(frm))
+					scan.qtn = frm;
+				else if (is_qtn_ext_role_oui(frm))
+					qtn_ext_role = (struct ieee80211_qtn_ext_role *)frm;
+				else if (is_qtn_ext_bssid_oui(frm))
+					qtn_ext_bssid = (struct ieee80211_qtn_ext_bssid *)frm;
+				else if (is_qtn_ext_state_oui(frm))
+					qtn_ext_state = (struct ieee80211_qtn_ext_state *)frm;
+				else if (isqtnpairoui(frm))
+					scan.pairing_ie = frm;
+				else if (isbrcmvhtoui(frm)) {
+					scan.vhtcap = ieee80211_get_vhtcap_from_brcmvht(ni, frm);
+					scan.vhtop = ieee80211_get_vhtop_from_brcmvht(ni, frm);
+				}
+#ifdef CONFIG_QVSP
+				else if (isqtnwmeie(frm)) {
+					/* override standard WME IE */
+					struct ieee80211_ie_qtn_wme *qwme = (struct ieee80211_ie_qtn_wme *)frm;
+					IEEE80211_NOTE(vap, IEEE80211_MSG_WME | IEEE80211_MSG_ELEMID, ni,
+							"%s: found QTN WME IE, version %u\n",
+							__func__, qwme->qtn_wme_ie_version);
+					scan.wme = (uint8_t *)&qwme->qtn_wme_ie;
+				}
+#endif
+				/* Extract the OCAC State IE, if present */
+				else if (is_qtn_ocac_state_ie(frm))
+					ieee80211_save_rx_ocac_state_ie(ic, wh->i_addr3, frm[6],
+						frm[7]);
+				break;
+			case IEEE80211_ELEMID_CHANSWITCHANN:
+				scan.csa = frm;
+				break;
+			case IEEE80211_ELEMID_MEASREQ:
+				scan.measreq = frm;
+				break;
+			case IEEE80211_ELEMID_HTCAP:
+				scan.htcap = frm;
+				break;
+			case IEEE80211_ELEMID_HTINFO:
+				scan.htinfo = frm;
+				// As DS_PARAM IE is optional for 5 GHZ, double check here
+				scan.chan = frm[2];
+				break;
+			case IEEE80211_ELEMID_VHTCAP:
+				scan.vhtcap = frm;
+				break;
+			case IEEE80211_ELEMID_VHTOP:
+				scan.vhtop = frm;
+				break;
+			/* Explicitly ignore some unhandled elements */
+			case IEEE80211_ELEMID_TPCREP:
+				break;
+			case IEEE80211_ELEMID_EXTCAP:
+				extcap = frm;
+				break;
+			case IEEE80211_ELEMID_MOBILITY_DOMAIN:
+				scan.mdie = frm;
+				break;
+			default:
+				IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID,
+					wh, "unhandled",
+					"id %u, len %u", *frm, frm[1]);
+				vap->iv_stats.is_rx_elem_unknown++;
+				break;
+			}
+			frm += frm[1] + 2;
+		}
+		if (frm > efrm)
+			return;
+		IEEE80211_VERIFY_ELEMENT(scan.rates, IEEE80211_RATE_MAXSIZE);
+		IEEE80211_VERIFY_ELEMENT(scan.ssid, IEEE80211_NWID_LEN);
+#if IEEE80211_CHAN_MAX < 255
+		if (scan.chan > IEEE80211_CHAN_MAX) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID,
+				wh, ieee80211_mgt_subtype_name[subtype >>
+					IEEE80211_FC0_SUBTYPE_SHIFT],
+				"invalid channel %u", scan.chan);
+			vap->iv_stats.is_rx_badchan++;
+			return;
+		}
+#endif
+
+		/* beacon channel could be different with current channel,  recorrect it */
+		if (is_channel_valid(scan.chan)) {
+			scan.rxchan = findchannel_any(ic, scan.chan, ic->ic_des_mode);
+			if (!is_ieee80211_chan_valid(scan.rxchan))
+				scan.rxchan = ic->ic_curchan;
+		} else {
+			scan.rxchan = ic->ic_curchan;
+		}
+
+		/* Pure legacy 5GHz APs should not have a channel check. */
+		/* Exception to this is 'BG' APs. */
+		if ((ic->ic_phytype == IEEE80211_T_OFDM) &&
+		    (scan.htcap == NULL) && (scan.htinfo == NULL)
+				&& !(scan.chan)) {
+			scan.chan = scan.bchan = 0;
+		}
+
+		if (scan.chan != scan.bchan &&
+		    ic->ic_phytype != IEEE80211_T_FH) {
+
+			/* The frame may have been received on the previous channel if the
+			 * RX channel has been changed recently.
+			 */
+			u_int8_t older_chan = scan.bchan;
+			scan.rxchan = ic->ic_prevchan;
+			scan.bchan = ieee80211_chan2ieee(ic, ic->ic_prevchan);
+#ifdef QTN_BG_SCAN
+			if (scan.chan != scan.bchan) {
+				if ((ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) && ic->ic_scanchan) {
+					scan.rxchan = ic->ic_scanchan;
+					scan.bchan = ieee80211_chan2ieee(ic, ic->ic_scanchan);
+				}
+			}
+#endif /* QTN_BG_SCAN */
+#ifdef QSCS_ENABLED
+			if (scan.chan != scan.bchan) {
+				if (ic->ic_scs.scs_smpl_enable) {
+					scan.rxchan = &ic->ic_channels[ic->ic_scs.scs_des_smpl_chan];
+					scan.bchan = ieee80211_chan2ieee(ic, scan.rxchan);
+				}
+			}
+#endif /* QSCS_ENABLED */
+			if (scan.chan != scan.bchan) {
+				/*
+				 * Frame was received on a channel different from the
+				 * one indicated in the DS params element id;
+				 * silently discard it.
+				 *
+				 * NB: this can happen due to signal leakage.
+				 *     But we should take it for FH phy because
+				 *     the rssi value should be correct even for
+				 *     different hop pattern in FH.
+				 */
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID,
+						wh, ieee80211_mgt_subtype_name[subtype >>
+						IEEE80211_FC0_SUBTYPE_SHIFT],
+						"for off-channel (cur chan:%u, bcn chan:%u last chan:%u)\n",
+						older_chan, scan.chan, scan.bchan);
+				vap->iv_stats.is_rx_chanmismatch++;
+				return;
+			} else {
+				IEEE80211_DPRINTF(vap,
+						IEEE80211_MSG_ELEMID,
+						"accepted late bcn (cur chan:%u, bcn chan:%u last chan:%u)\n",
+						older_chan, scan.chan, scan.bchan);
+			}
+
+		}
+
+		if ((vap->iv_opmode == IEEE80211_M_STA) && (scan.rxchan == ic->ic_curchan)) {
+			del_timer_sync(&ic->sta_dfs_info.sta_silence_timer);
+			ic->ic_enable_xmit(ic);
+		}
+
+		if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC) &&
+				(scan.country != NULL) && (scan.pwr_constraint != NULL)) {
+			ieee80211_parse_local_max_txpwr(vap, &scan);
+		}
+
+		ieee80211_extender_process(ni, qtn_ext_role, qtn_ext_bssid, qtn_ext_state, &scan, wh, rssi);
+
+		if (IEEE80211_IS_CHAN_CAC_IN_PROGRESS(ic->ic_curchan) && scan.csa == NULL)
+			ieee80211_extdr_cac_check(ic, vap, wh);
+
+		/* IEEE802.11 does not specify the allowed range for
+		 * beacon interval. We discard any beacons with a
+		 * beacon interval outside of an arbitrary range in
+		 * order to protect against attack.
+		 *
+		 * NB: Discarding beacon directly maybe not a good solution.
+		 * It will lead to some IOT issues with AP whose beacon interval is not in this range,
+		 * although most of AP will not set beacon interval out of this range.
+		 *
+		 */
+		if (!(IEEE80211_BINTVAL_MIN <= scan.bintval &&
+		     scan.bintval <= IEEE80211_BINTVAL_MAX)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_SCAN,
+				wh, "beacon", "invalid beacon interval (%u)",
+				scan.bintval);
+			return;
+		}
+
+		/*
+		 * Count frame now that we know it's to be processed.
+		 */
+		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
+			IEEE80211_NODE_STAT(ni, rx_beacons);
+		else
+			IEEE80211_NODE_STAT(ni, rx_proberesp);
+
+		if (ic->ic_flags_qtn & IEEE80211_QTN_MONITOR) {
+			ni->ni_intval = scan.bintval;
+			if (scan.csa) {
+				ieee80211_parse_csaie(ni, scan.csa, scan.csa_tsf, wh);
+			}
+			return;
+		}
+
+		/*
+		 * When operating in station mode, check for state updates.
+		 * Be careful to ignore beacons received while doing a
+		 * background scan.  We consider only 11g/WMM stuff right now.
+		 */
+		if (vap->iv_opmode == IEEE80211_M_STA &&
+		    ni->ni_associd != 0 &&
+		    IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_bssid)) {
+			/* record tsf of last beacon */
+			memcpy(ni->ni_tstamp.data, scan.tstamp,
+				sizeof(ni->ni_tstamp));
+			if (ni->ni_intval != scan.bintval) {
+				IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+						"beacon interval divergence: was %u, now %u",
+						ni->ni_intval, scan.bintval);
+				if (!ni->ni_intval_end) {
+					int msecs = 0; /* silence compiler */
+					ni->ni_intval_cnt = 0;
+					ni->ni_intval_old = ni->ni_intval;
+					msecs = (ni->ni_intval_old * 1024 * 10) / 1000;
+					ni->ni_intval_end = jiffies + msecs_to_jiffies(msecs);
+					IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+							"scheduling beacon interval measurement for %u msecs",
+							msecs);
+				}
+				if (scan.bintval > ni->ni_intval) {
+					ni->ni_intval = scan.bintval;
+					vap->iv_flags_ext |= IEEE80211_FEXT_APPIE_UPDATE;
+				}
+				/* XXX statistic */
+			}
+			if (ni->ni_intval_end) {
+				if (scan.bintval == ni->ni_intval_old)
+					ni->ni_intval_cnt++;
+				if (!time_before(jiffies, ni->ni_intval_end)) {
+					IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+							"beacon interval measurement finished, old value repeated: %u times",
+							ni->ni_intval_cnt);
+					ni->ni_intval_end = 0;
+					if (ni->ni_intval_cnt == 0) {
+						IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+								"reprogramming bmiss timer from %u to %u",
+								ni->ni_intval_old, scan.bintval);
+						ni->ni_intval = scan.bintval;
+						vap->iv_flags_ext |= IEEE80211_FEXT_APPIE_UPDATE;
+					} else {
+						IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+								"ignoring the divergence (maybe someone tried to spoof the AP?)", 0);
+					}
+				}
+				/* XXX statistic */
+			}
+
+
+			/* update transmit power if necessary */
+			if ((ic->ic_flags & IEEE80211_F_DOTH) &&
+					(ic->ic_flags_ext & IEEE80211_FEXT_TPC) &&
+					(scan.pwr_constraint != NULL) &&
+					(vap->iv_local_max_txpow != scan.local_max_txpwr)) {
+				if ((scan.local_max_txpwr >= ni->ni_chan->ic_maxpower_normal) &&
+						(vap->iv_local_max_txpow < ni->ni_chan->ic_maxpower_normal)) {
+					vap->iv_local_max_txpow = ni->ni_chan->ic_maxpower_normal;
+				} else if ((scan.local_max_txpwr <= ni->ni_chan->ic_minpower_normal) &&
+						(vap->iv_local_max_txpow > ni->ni_chan->ic_minpower_normal)) {
+					vap->iv_local_max_txpow = ni->ni_chan->ic_minpower_normal;
+				} else if (scan.local_max_txpwr < ni->ni_chan->ic_maxpower_normal &&
+						(scan.local_max_txpwr > ni->ni_chan->ic_minpower_normal)){
+					vap->iv_local_max_txpow = scan.local_max_txpwr;
+				} else {
+					/* do nothing */
+				}
+				ieee80211_update_tx_power(ic, vap->iv_local_max_txpow);
+			}
+
+			if (ni->ni_erp != scan.erp) {
+				IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+					"erp change: was 0x%x, now 0x%x",
+					ni->ni_erp, scan.erp);
+				if (IEEE80211_BG_PROTECT_ENABLED(ic) && (scan.erp & IEEE80211_ERP_USE_PROTECTION)) {
+					ic->ic_flags |= IEEE80211_F_USEPROT;
+					/* tell Muc to use ERP cts-to-self mechanism now */
+					ic->ic_set_11g_erp(vap, 1);
+				} else {
+					ic->ic_flags &= ~IEEE80211_F_USEPROT;
+					/* tell Muc to turn off ERP now */
+					ic->ic_set_11g_erp(vap, 0);
+				}
+				ni->ni_erp = scan.erp;
+				/* XXX statistic */
+			}
+			if ((ni->ni_capinfo ^ scan.capinfo) & IEEE80211_CAPINFO_SHORT_SLOTTIME) {
+				IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+					"capabilities change: was 0x%x, now 0x%x",
+					ni->ni_capinfo, scan.capinfo);
+				/*
+				 * NB: we assume short preamble doesn't
+				 *     change dynamically
+				 */
+				ieee80211_set_shortslottime(ic,
+					IEEE80211_IS_CHAN_A(ic->ic_bsschan) ||
+					(ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME));
+				ni->ni_capinfo = scan.capinfo;
+				/* XXX statistic */
+			}
+			if (scan.wme != NULL &&
+			    (ni->ni_flags & IEEE80211_NODE_QOS)) {
+				int _retval;
+				if ((_retval = ieee80211_parse_wmeparams(vap, scan.wme, wh, &qosinfo)) >= 0) {
+					if (qosinfo & WME_CAPINFO_UAPSD_EN)
+						ni->ni_flags |= IEEE80211_NODE_UAPSD;
+					if (_retval > 0)
+						ieee80211_wme_updateparams(vap, 0);
+				}
+			} else {
+				ni->ni_flags &= ~IEEE80211_NODE_UAPSD;
+			}
+			if (scan.ath != NULL)
+				ieee80211_parse_athParams(ni, scan.ath);
+			if (scan.csa != NULL) {
+				if (QTN_CSAIE_ERR_CHAN_NOT_SUPP == ieee80211_parse_csaie(ni, scan.csa, scan.csa_tsf, wh))
+					if (!ieee80211_handle_csa_invalid_channel(ni, wh))
+						return;
+			}
+			/* 11n */
+			if (scan.htcap) {
+				ieee80211_parse_htcap(ni, scan.htcap);
+			}
+			if (scan.htinfo) {
+				ieee80211_parse_htinfo(ni, scan.htinfo);
+				if ((ic->ic_opmode == IEEE80211_M_STA) &&
+					(ic->ic_20_40_coex_enable) &&
+					IEEE80211_IS_11NG_40(ic) && vap->iv_bss &&
+					IEEE80211_ADDR_EQ(ni->ni_macaddr, vap->iv_bss->ni_macaddr) &&
+					(!ni->ni_htinfo.choffset)) {
+					ieee80211_change_bw(vap, BW_HT20, 0);
+					ic->ic_coex_stats_update(ic, WLAN_COEX_STATS_BW_SCAN);
+				}
+			}
+			/* 802.11ac */
+			if (scan.vhtcap && IS_IEEE80211_DUALBAND_VHT_ENABLED(ic)) {
+				ieee80211_check_and_parse_vhtcap(ni, scan.vhtcap);
+			}
+			if (scan.vhtop && IS_IEEE80211_DUALBAND_VHT_ENABLED(ic)) {
+				ieee80211_parse_vhtop(ni, scan.vhtop);
+			}
+			if (scan.measreq) {
+				ieee80211_parse_measinfo(ni, scan.measreq);
+			}
+
+			if (scan.obss_scan) {
+				memset(&ni->ni_obss_ie, 0, sizeof(struct ieee80211_obss_scan_ie));
+				memcpy(&ni->ni_obss_ie, scan.obss_scan,
+						sizeof(struct ieee80211_obss_scan_ie));
+			}
+
+			if (scan.tim != NULL) {
+				/*
+				 * Check the TIM. For now we drop out of
+				 * power save mode for any reason.
+				 */
+				struct ieee80211_tim_ie *tim =
+				    (struct ieee80211_tim_ie *) scan.tim;
+				int aid = IEEE80211_AID(ni->ni_associd);
+				int ix = aid / NBBY;
+				int min = tim->tim_bitctl & ~1;
+				int max = tim->tim_len + min - 4;
+				if (min <= ix && ix <= max &&
+						isset(tim->tim_bitmap - min, aid)) {
+					ieee80211_sta_pwrsave(vap, 0);
+					vap->iv_ap_buffered = 1;
+				} else {
+					vap->iv_ap_buffered = 0;
+				}
+				vap->iv_dtim_count = tim->tim_count;
+			}
+
+			ieee80211_update_tbtt(vap, ni);
+
+			/* WDS/Repeater: re-schedule software beacon timer for STA */
+			if (vap->iv_state == IEEE80211_S_RUN &&
+			    vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) {
+#if defined(QBMPS_ENABLE)
+				if (vap->iv_swbmiss_bmps_warning) {
+					/* if previously BMPS detects swbmiss */
+					/* it will disable power-saving temporary */
+					/* to help beacon RX */
+					/* now it is time to reenable power-saving */
+					vap->iv_swbmiss_bmps_warning = 0;
+			                ic->ic_pm_reason = IEEE80211_PM_LEVEL_SWBCN_MISS;
+					ieee80211_pm_queue_work(ic);
+				}
+#endif
+				vap->iv_swbmiss_warnings = IEEE80211_SWBMISS_WARNINGS;
+				mod_timer(&vap->iv_swbmiss, jiffies + vap->iv_swbmiss_period);
+			}
+
+			if (opmode_notif_ie) {
+				struct ieee80211_ie_vhtop_notif *ie =
+						(struct ieee80211_ie_vhtop_notif *)opmode_notif_ie;
+				int cur_opnode;
+				uint8_t opmode;
+				
+				ieee80211_param_from_qdrv(vap, IEEE80211_PARAM_NODE_OPMODE, &cur_opnode, NULL, NULL);
+				opmode = recalc_opmode(ni, ie->vhtop_notif_mode);
+				if (cur_opnode != opmode) {
+					ieee80211_param_to_qdrv(ni->ni_vap, IEEE80211_PARAM_NODE_OPMODE,
+							opmode, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+					ni->ni_vhtop_notif_mode = ie->vhtop_notif_mode;
+				}
+			}
+
+			/*
+			 * If scanning, pass the info to the scan module.
+			 * Otherwise, check if it's the right time to do
+			 * a background scan.  Background scanning must
+			 * be enabled and we must not be operating in the
+			 * turbo phase of dynamic turbo mode.  Then,
+			 * it's been a while since the last background
+			 * scan and if no data frames have come through
+			 * recently, kick off a scan.  Note that this
+			 * is the mechanism by which a background scan
+			 * is started _and_ continued each time we
+			 * return on-channel to receive a beacon from
+			 * our ap.
+			 */
+			if ((ic->ic_flags & IEEE80211_F_SCAN)
+#ifdef QTN_BG_SCAN
+			|| (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+#endif /* QTN_BG_SCAN */
+			) {
+				ieee80211_add_scan(vap, &scan, wh,
+					subtype, rssi, rstamp);
+			} else if (contbgscan(vap) || startbgscan(vap)) {
+				ieee80211_bg_scan(vap);
+			}
+			if (extcap != NULL)
+				ieee80211_parse_extcap(ni, extcap, wh->i_addr3);
+			return;
+		}
+
+		/*
+		 * If scanning, pass information to the scan module.
+		 */
+		if ((ic->ic_flags & IEEE80211_F_SCAN)
+#ifdef QTN_BG_SCAN
+			|| (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+#endif /* QTN_BG_SCAN */
+		) {
+			/* In two cases station scan list will be updated.
+			 * 1. when 11ac_and 11n flag is set and only vht or ht capabilities
+			 *	are present in Beacon/Probe response.
+			 * 2. When 11ac_and_11n flag is not set.
+			 */
+			if (ieee80211_phy_mode_allowed(vap, scan.vhtcap, scan.htcap)) {
+				ieee80211_add_scan(vap, &scan, wh, subtype, rssi, rstamp);
+			}
+
+			return;
+		}
+
+#ifdef QSCS_ENABLED
+		if (ic->ic_scs.scs_smpl_enable)
+			ieee80211_add_scs_off_chan(vap, &scan, wh, subtype, rssi, rstamp);
+#endif
+
+		if (vap->iv_opmode == IEEE80211_M_WDS) {
+			int return_val = 0;
+
+			ieee80211_update_wds_peer_node(ni, &scan);
+
+			if (unlikely(scan.csa != NULL) && IEEE80211_VAP_WDS_IS_RBS(vap))
+				return_val = ieee80211_parse_csaie(ni, scan.csa, scan.csa_tsf, wh);
+			if (QTN_CSAIE_ERR_CHAN_NOT_SUPP == return_val)
+				if (!ieee80211_handle_csa_invalid_channel(ni, wh))
+					return;
+
+			return;
+		}
+
+		/* check beacon for non-ERP and non-HT non member protection mode
+		 * non-ERP protection: OBSS non-ERP protection is set when
+		 * a) non-ERP present bit is set in the OBSS AP
+		 * b) B only AP is present
+		 * non-HT non member protection is set when there is non HT BSS.
+		 * Timer is used to reset the protection parameters after last
+		 * non-ERP AP or non-HT BSS goes away.
+		 */
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			uint8_t nonht_obss;
+			struct ieee80211_ie_htinfo *ht = (struct ieee80211_ie_htinfo *) scan.htinfo;
+			nonht_obss = ((scan.htcap == NULL) || (ht == NULL) ||
+				((ht->hi_byte2 & IEEE80211_HTINFO_OPMODE_HT_PROT_MIXED) ==
+				IEEE80211_HTINFO_OPMODE_HT_PROT_MIXED));
+
+			if (nonht_obss) {
+				if ((ic->ic_curmode == IEEE80211_MODE_11NG_HT40PM) &&
+						(ic->ic_20_40_coex_enable)) {
+					ieee80211_change_bw(vap, BW_HT20, 0);
+					ic->ic_coex_stats_update(ic, WLAN_COEX_STATS_BW_SCAN);
+				}
+
+				/*Legacy AP is present */
+				if (ic->ic_non_ht_non_member == 0) {
+					/* First non-HT AP beacon received */
+					beacon_update_required = 1;
+					ic->ic_non_ht_non_member = 1;
+					vap->iv_ht_flags |= IEEE80211_HTF_HTINFOUPDATE;
+				}
+			}
+
+			/* Check non-ERP protection in 2 GHz band */
+			if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
+				non_erp_present = is_non_erp_prot_required(&scan);
+
+				if (IEEE80211_BG_PROTECT_ENABLED(ic)
+						&& non_erp_present
+						&& !(ic->ic_flags & IEEE80211_F_USEPROT)) {
+					/* First OBSS non-ERP AP beacon received */
+					/* Set Use_Protection in ERP IE */
+					ic->ic_flags |= IEEE80211_F_USEPROT;
+
+					/* To call ieee80211_add_erp function */
+					ic->ic_flags_ext |= IEEE80211_FEXT_ERPUPDATE;
+					beacon_update_required = 1;
+					/* tell Muc to use ERP cts-to-self mechanism now */
+					ic->ic_set_11g_erp(vap, 1);
+				}
+			}
+
+			if (vap->iv_state == IEEE80211_S_RUN) {
+				/* Update beacon */
+				if(beacon_update_required)
+					ic->ic_beacon_update(vap);
+
+				if (nonht_obss) {
+					mod_timer(&vap->iv_swbmiss,
+						jiffies + vap->iv_swbmiss_period);
+				}
+
+				if (non_erp_present) {
+					mod_timer(&vap->iv_swberp,
+						jiffies + vap->iv_swberp_period);
+				}
+			}
+		}
+
+		if ((vap->iv_opmode == IEEE80211_M_IBSS) &&
+				(scan.capinfo & IEEE80211_CAPINFO_IBSS)) {
+			if (!IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_macaddr)) {
+				/* Create a new entry in the neighbor table. */
+				ni = ieee80211_add_neighbor(vap, wh, &scan);
+				node_reference_held = 1;
+			} else {
+				/*
+				 * Copy data from beacon to neighbor table.
+				 * Some of this information might change after
+				 * ieee80211_add_neighbor(), so we just copy
+				 * everything over to be safe.
+				 */
+				ni->ni_esslen = scan.ssid[1];
+				memcpy(ni->ni_essid, scan.ssid + 2, scan.ssid[1]);
+				IEEE80211_ADDR_COPY(ni->ni_bssid, wh->i_addr3);
+				memcpy(ni->ni_tstamp.data, scan.tstamp,
+					sizeof(ni->ni_tstamp));
+				ni->ni_intval = IEEE80211_BINTVAL_SANITISE(scan.bintval);
+				ni->ni_capinfo = scan.capinfo;
+				ni->ni_chan = ic->ic_curchan;
+				ni->ni_fhdwell = scan.fhdwell;
+				ni->ni_fhindex = scan.fhindex;
+				ni->ni_erp = scan.erp;
+				ni->ni_timoff = scan.timoff;
+				if (scan.wme != NULL)
+					ieee80211_saveie(&ni->ni_wme_ie, scan.wme);
+				if (scan.wpa != NULL)
+					ieee80211_saveie(&ni->ni_wpa_ie, scan.wpa);
+				if (scan.rsn != NULL)
+					ieee80211_saveie(&ni->ni_rsn_ie, scan.rsn);
+				if (scan.wsc != NULL)
+					ieee80211_saveie(&ni->ni_wsc_ie, scan.wsc);
+				if (scan.ath != NULL)
+					ieee80211_saveath(ni, scan.ath);
+
+				/* NB: must be after ni_chan is setup */
+				ieee80211_setup_rates(ni, scan.rates,
+					scan.xrates, IEEE80211_F_DOSORT);
+			}
+			if (ni != NULL) {
+				ni->ni_rssi = rssi;
+				ni->ni_rstamp = rstamp;
+				ni->ni_last_rx = jiffies;
+				if (node_reference_held) {
+					ieee80211_free_node(ni);
+				}
+			}
+		}
+		break;
+	}
+
+	case IEEE80211_FC0_SUBTYPE_PROBE_REQ: {
+		if (vap->iv_opmode == IEEE80211_M_STA ||
+		    vap->iv_opmode == IEEE80211_M_AHDEMO ||
+		    vap->iv_opmode == IEEE80211_M_WDS ||
+		    vap->iv_state != IEEE80211_S_RUN ||
+		    vap->is_block_all_assoc) {
+			vap->iv_stats.is_rx_mgtdiscard++;
+			return;
+		}
+
+#if defined(PLATFORM_QFDR)
+		if (ic->ic_reject_auth & QFDR_F_IGNORE_PROBE_REQ)
+			return;
+#endif
+
+		if (vap->iv_acl != NULL && !vap->iv_acl->iac_check(vap, wh->i_addr2)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL,
+					wh, "Probe Req", "%s", "disallowed by ACL");
+			vap->iv_stats.is_rx_acl++;
+			return;
+		}
+
+		/*
+		 * prreq frame format
+		 *	[tlv] ssid
+		 *	[tlv] supported rates
+		 *	[tlv] extended supported rates
+		 *      [tlv] Atheros Advanced Capabilities
+		 */
+		ssid = rates = xrates = ath = NULL;
+		while (frm < efrm) {
+			IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1]);
+			switch (*frm) {
+			case IEEE80211_ELEMID_SSID:
+				/* WAR: Null-paddings are interpreted as null SSID IEs */
+				ssid = ssid ? ssid : frm;
+				break;
+			case IEEE80211_ELEMID_RATES:
+				rates = frm;
+				break;
+			case IEEE80211_ELEMID_HTCAP:
+				htcap = frm;
+				break;
+			case IEEE80211_ELEMID_VHTCAP:
+				vhtcap = frm;
+				break;
+			case IEEE80211_ELEMID_XRATES:
+				xrates = frm;
+				break;
+			case IEEE80211_ELEMID_INTERWORKING:
+				interw = frm;
+				break;
+			case IEEE80211_ELEMID_VENDOR:
+				if (isatherosoui(frm))
+					ath = frm;
+				/* XXX Atheros OUI support */
+				break;
+			}
+			frm += frm[1] + 2;
+		}
+		if (frm > efrm)
+			return;
+
+
+		IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE);
+		IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN);
+		if (ieee80211_verify_ssid(vap, ni, wh, ssid, subtype) ==
+				IEEE80211_VERIFY_SSID_ACTION_RETURN) {
+			return;
+		}
+
+		if (vap->interworking && interw != NULL) {
+			if (ieee80211_verify_interworking(vap, interw))
+				return;
+		}
+
+		if (ni == vap->iv_bss) {
+			if (vap->iv_opmode == IEEE80211_M_IBSS) {
+				/*
+				 * XXX Cannot tell if the sender is operating
+				 * in ibss mode.  But we need a new node to
+				 * send the response so blindly add them to the
+				 * neighbor table.
+				 */
+				ni = ieee80211_fakeup_adhoc_node(vap,
+					wh->i_addr2);
+			} else {
+				ni = ieee80211_tmp_node(vap, wh->i_addr2);
+			}
+			if (ni == NULL) {
+				return;
+			}
+			node_reference_held = 1;
+		}
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2,
+			"%s", "recv probe req");
+		ni->ni_rssi = rssi;
+		ni->ni_rstamp = rstamp;
+		ni->ni_last_rx = jiffies;
+		rate = ieee80211_setup_rates(ni, rates, xrates,
+			IEEE80211_F_DOSORT | IEEE80211_F_DOFRATE |
+			IEEE80211_F_DONEGO | IEEE80211_F_DODEL);
+		if (rate & IEEE80211_RATE_BASIC) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_XRATE,
+				wh, ieee80211_mgt_subtype_name[subtype >>
+					IEEE80211_FC0_SUBTYPE_SHIFT],
+				"%s", "recv'd rate set invalid");
+		} else {
+
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+			if (vap->bsa_status == BSA_STATUS_ACTIVE) {
+				ieee80211_bsa_probe_event_send(vap, skb, wh->i_addr3,wh->i_addr2,
+								rssi);
+				if (ieee80211_bsa_macfilter_check(vap, wh->i_addr2))
+					return;
+			}
+#endif
+			IEEE80211_SEND_MGMT(ni,
+				IEEE80211_FC0_SUBTYPE_PROBE_RESP,
+				ssid[1] == 0);
+		}
+		if (node_reference_held) {
+			ieee80211_free_node(ni);
+		} else if (ath != NULL)
+			ieee80211_saveath(ni, ath);
+		break;
+	}
+
+	case IEEE80211_FC0_SUBTYPE_AUTH: {
+		u_int16_t algo, seq, status;
+		/*
+		 * auth frame format
+		 *	[2] algorithm
+		 *	[2] sequence
+		 *	[2] status
+		 *	[tlv*] challenge
+		 */
+		IEEE80211_VERIFY_LENGTH(efrm - frm, 6);
+		algo   = le16toh(*(__le16 *)frm);
+		seq    = le16toh(*(__le16 *)(frm + 2));
+		status = le16toh(*(__le16 *)(frm + 4));
+
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr2,
+			"recv auth frame with algorithm %d seq %d", algo, seq);
+
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (!(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER)
+					&& (ic->ic_curchan->ic_flags & IEEE80211_CHAN_DFS)
+					&& (!IEEE80211_IS_CHAN_CACDONE(ic->ic_curchan))) {
+				IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr2,
+					"During DFS CAC period(channel %3d), reject auth frame", ic->ic_curchan->ic_ieee);
+				return;
+			}
+			if (unlikely(ieee80211_input_mac_reserved(vap, ic, ni, wh)))
+				return;
+			if (vap->is_block_all_assoc) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_AUTH,
+					wh, "auth", "%s", "Dropped due to BSS is set to block all assoc");
+				IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, IEEE80211_STATUS_DENIED);
+				return;
+			}
+
+#if defined(PLATFORM_QFDR)
+			if (ic->ic_reject_auth & QFDR_F_REJECT_AUTH) {
+				IEEE80211_DISCARD(vap, IEEE80211_MSG_AUTH,
+					wh, "auth", "%s", "Rejected auth frame");
+				ieee80211_send_error(ni, wh->i_addr2,
+					IEEE80211_FC0_SUBTYPE_AUTH,
+					(seq+1) | (IEEE80211_STATUS_TOOMANY << 16));
+				return;
+			}
+#endif /* PLATFORM_QFDR */
+
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+			if (vap->bsa_status == BSA_STATUS_ACTIVE) {
+				if (ieee80211_bsa_macfilter_check(vap, wh->i_addr2)) {
+					IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, IEEE80211_STATUS_DENIED);
+					return;
+				}
+			}
+#endif
+		}
+
+		/* Consult the ACL policy module if set up */
+		if (vap->iv_acl != NULL && !vap->iv_acl->iac_check(vap, wh->i_addr2)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL,
+				wh, "auth", "%s", "disallowed by ACL");
+			vap->iv_stats.is_rx_acl++;
+			ieee80211_eventf(vap->iv_dev, "%s[WLAN access denied] from MAC: %pM", QEVT_ACL_PREFIX, wh->i_addr2);
+			return;
+		} else {
+			ieee80211_eventf(vap->iv_dev, "%s[WLAN access allowed] from MAC: %pM", QEVT_ACL_PREFIX, wh->i_addr2);
+		}
+		if (vap->iv_flags & IEEE80211_F_COUNTERM) {
+			IEEE80211_DISCARD(vap,
+				IEEE80211_MSG_AUTH | IEEE80211_MSG_CRYPTO,
+				wh, "auth", "%s", "TKIP countermeasures enabled");
+			vap->iv_stats.is_rx_auth_countermeasures++;
+			if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+				/* This will include broadcast deauth frame queued on BSS node */
+				IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, IEEE80211_REASON_MIC_FAILURE);
+			}
+			return;
+		}
+
+		if (algo == IEEE80211_AUTH_ALG_SHARED)
+			ieee80211_auth_shared(ni, wh, frm + 6, efrm, rssi,
+				rstamp, seq, status);
+		else if (algo == IEEE80211_AUTH_ALG_OPEN) {
+			ieee80211_auth_open(ni, wh, rssi, rstamp, seq, status);
+			if (vap->iv_mdid)
+				forward_mgmt_to_app_for_further_processing(vap, subtype, skb, wh);
+		} else if (algo == IEEE80211_AUTH_ALG_FT) {
+			uint8_t cap = 0;
+			uint16_t mdid = 0;
+			frm += 6;
+			/* Parse the auth req frame */
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH,
+				"[%pM] FT auth request\n", wh->i_addr2);
+			while (frm < efrm) {
+				IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1]);
+				switch (*frm) {
+				case IEEE80211_ELEMID_MOBILITY_DOMAIN:
+					if (frm[1] != IEEE80211_MDIE_LEN) {
+						IEEE80211_DISCARD(vap, IEEE80211_MSG_AUTH, wh,
+							"auth", "wrong len of MDID in auth req %d",
+							frm[1]);
+						return;
+					}
+					mdid = le16toh(*(u_int16_t *)(&frm[2]));
+					if (mdid != ni->ni_vap->iv_mdid) {
+						IEEE80211_DISCARD(vap, IEEE80211_MSG_AUTH, wh,
+							"wrong", "mdid in auth %d, expected %d",
+							mdid, ni->ni_vap->iv_mdid);
+
+						return;
+					}
+					/* extract the ft policy and ft capability */
+					cap = frm[4];
+					ni->ni_ft_capability = cap;
+					IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH,
+						"[%pM] MDIE in the auth with the cap as %d, mdid %d \n",
+						wh->i_addr2, cap, mdid);
+					break;
+				default:
+					IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,"[%pM] Received IE %d\n",
+						wh->i_addr2,*frm);
+					break;
+				}
+				frm += frm[1] + 2;
+			}
+			if (mdid == ni->ni_vap->iv_mdid) {
+				if (ni == vap->iv_bss) {
+					ni = ieee80211_dup_bss(vap, wh->i_addr2);
+					ni->ni_node_type = IEEE80211_NODE_TYPE_STA;
+					ieee80211_free_node(ni);
+				}
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH,
+					"[%pM] FT auth request sending up\n", wh->i_addr2);
+				forward_mgmt_to_app_for_further_processing(vap, subtype, skb, wh);
+			}
+		} else {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+				wh, "auth", "unsupported alg %d", algo);
+			vap->iv_stats.is_rx_auth_unsupported++;
+			if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+				/* XXX not right */
+				ieee80211_send_error(ni, wh->i_addr2,
+					IEEE80211_FC0_SUBTYPE_AUTH,
+					(seq+1) | (IEEE80211_STATUS_ALG << 16));
+			}
+			return;
+		}
+		ieee80211_off_channel_suspend(vap, IEEE80211_OFFCHAN_TIMEOUT_AUTH);
+		ni->ni_used_auth_algo = algo;
+		break;
+	}
+
+	case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
+	case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: {
+		uint16_t capinfo;
+		uint16_t bintval;
+		struct ieee80211_rsnparms rsn_parm;
+		uint8_t reason;
+		int error = 0;
+		uint8_t interworking_ie_present = 0;
+		struct ieee80211_20_40_coex_param *coex = NULL;
+
+		enum ieee80211_verify_ssid_action ssid_verify_action;
+
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP ||
+		    vap->iv_state != IEEE80211_S_RUN) {
+			vap->iv_stats.is_rx_mgtdiscard++;
+			return;
+		}
+		if (subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
+			reassoc = 1;
+			resp = IEEE80211_FC0_SUBTYPE_REASSOC_RESP;
+		} else {
+			reassoc = 0;
+			resp = IEEE80211_FC0_SUBTYPE_ASSOC_RESP;
+		}
+
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+		if (vap->bsa_status == BSA_STATUS_ACTIVE) {
+			if (ieee80211_bsa_macfilter_check(vap, wh->i_addr2)) {
+				IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_DENIED);
+				return;
+			}
+		}
+#endif
+
+		if (vap->is_block_all_assoc) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+				wh, ieee80211_mgt_subtype_name[subtype >>
+					IEEE80211_FC0_SUBTYPE_SHIFT],
+				"%s", "BSS is blocked for all association request");
+			vap->iv_stats.is_rx_assoc_bss++;
+			IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_DENIED);
+			return;
+		}
+		/*
+		 * asreq frame format
+		 *	[2] capability information
+		 *	[2] listen interval
+		 *	[6*] current AP address (reassoc only)
+		 *	[tlv] ssid
+		 *	[tlv] supported rates
+		 *	[tlv] extended supported rates
+		 *	[tlv] power capability
+		 *	[tlv] supported channels
+		 *	[tlv] wpa or RSN
+		 *	[tlv] WME
+		 *	[tlv] Atheros Advanced Capabilities
+		 *	[tlv] Quantenna flags
+		 */
+		IEEE80211_VERIFY_LENGTH(efrm - frm, (reassoc ? 10 : 4));
+		if (!IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_bss->ni_bssid)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+				wh, ieee80211_mgt_subtype_name[subtype >>
+					IEEE80211_FC0_SUBTYPE_SHIFT],
+				"%s", "wrong bssid");
+			vap->iv_stats.is_rx_assoc_bss++;
+			mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC_FAILS, 1);
+			return;
+		}
+		if (vap->iv_pmf &&
+			RSN_IS_MFP(ni->ni_rsn.rsn_caps) &&
+			(ni->ni_associd) &&
+			(!ni->ni_sa_query_timeout)) {
+
+			IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_PMF_REJECT_RETRY);
+			ieee80211_send_sa_query(ni, IEEE80211_ACTION_W_SA_QUERY_REQ, ++ni->ni_sa_query_tid);
+			return;
+		}
+		capinfo = le16toh(*(__le16 *)frm);
+		frm += 2;
+		bintval = le16toh(*(__le16 *)frm);
+		frm += 2;
+		if (reassoc)
+			frm += 6;	/* ignore current AP info */
+		ssid = rates = xrates = wpa = rsn = osen = wme = ath = NULL;
+		pwr_cap = NULL;
+		ni->ni_flags &= ~IEEE80211_NODE_TPC;
+		while (frm < efrm) {
+			IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1]);
+			switch (*frm) {
+			case IEEE80211_ELEMID_SSID:
+				ssid = frm;
+				break;
+			case IEEE80211_ELEMID_RATES:
+				rates = frm;
+				break;
+			case IEEE80211_ELEMID_XRATES:
+				xrates = frm;
+				break;
+			case IEEE80211_ELEMID_PWRCAP:
+				pwr_cap = (struct ieee80211_ie_power_capability *)frm;
+				ni->ni_flags |= IEEE80211_NODE_TPC;
+				break;
+			case IEEE80211_ELEMID_SUPPCHAN:
+				supp_chan_ie = frm;
+				break;
+			case IEEE80211_ELEMID_INTERWORKING:
+				interworking_ie_present = 1;
+				break;
+			/* XXX verify only one of RSN and WPA ie's? */
+			case IEEE80211_ELEMID_RSN:
+				if (vap->iv_flags & IEEE80211_F_WPA2)
+					rsn = frm;
+				else
+					IEEE80211_DPRINTF(vap,
+						IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA,
+						"[%s] ignoring RSN IE in association request\n",
+						ether_sprintf(wh->i_addr2));
+				break;
+			case IEEE80211_ELEMID_HTCAP:
+				htcap = frm;
+				break;
+			case IEEE80211_ELEMID_VHTCAP:
+				vhtcap = frm;
+				break;
+			case IEEE80211_ELEMID_VHTOP:
+				vhtop = frm;
+				break;
+			case IEEE80211_ELEMID_OPMOD_NOTIF:
+				opmode_notif_ie = frm;
+				break;
+			case IEEE80211_ELEMID_20_40_BSS_COEX:
+				coex = (struct ieee80211_20_40_coex_param *)frm;
+				break;
+			case IEEE80211_ELEMID_VENDOR:
+				/* don't override RSN element
+				 * XXX: actually the driver should report both WPA versions,
+				 * so wpa_supplicant can choose and also detect downgrade attacks
+                                 */
+				if (iswpaoui(frm) && !wpa) {
+					if (vap->iv_flags & IEEE80211_F_WPA1)
+						wpa = frm;
+					else
+						IEEE80211_DPRINTF(vap,
+							IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA,
+							"[%s] ignoring WPA IE in association request\n",
+							ether_sprintf(wh->i_addr2));
+				} else if (isosenie(frm)) {
+					osen = frm;
+				} else if (iswmeinfo(frm)) {
+					wme = frm;
+				} else if (isatherosoui(frm)) {
+					ath = frm;
+				} else if (isbroadcomoui(frm)) {
+					bcmie = frm;
+					if (isbrcmvhtoui(frm))
+						vhtcap = ieee80211_get_vhtcap_from_brcmvht(ni, frm);
+				} else if (isbroadcomoui2(frm)) {
+					bcmie = frm;
+				} else if (isrealtekoui(frm)) {
+					rtkie = frm;
+				} else if (isqtnie(frm)) {
+					qtnie = (struct ieee80211_ie_qtn *)frm;
+				/* For now just get the first WSC IE until we can handle multiple of these */
+				} else if (iswscoui(frm) && !wscie) {
+					wscie = frm;
+				} else if (isqtnpairoui(frm)) {
+					qtn_pairing_ie = (struct ieee80211_ie_qtn_pairing *)frm;
+				} else if (isrlnkoui(frm)) {
+					rlnk = frm;
+				}
+				break;
+			case IEEE80211_ELEMID_EXTCAP:
+				extcap = frm;
+				if (extcap != NULL) {
+					ieee80211_parse_extcap(ni, extcap, wh->i_addr2);
+					ieee80211_parse_80211_v(ni, extcap);
+				}
+				break;
+			case IEEE80211_ELEMID_RRM_ENABLED:
+				rrm_enabled = frm;
+				break;
+			case IEEE80211_ELEMID_MOBILITY_DOMAIN:
+				mdie = frm;
+				break;
+			case IEEE80211_ELEMID_FTIE:
+				ftie = frm;
+				break;
+			}
+			frm += frm[1] + 2;
+		}
+		if (frm > efrm)
+			return;
+		IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE);
+		IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN);
+
+		ssid_verify_action = ieee80211_verify_ssid(vap, ni, wh, ssid, subtype);
+		switch (ssid_verify_action) {
+		case IEEE80211_VERIFY_SSID_ACTION_NODE_DEL_AND_RETURN:
+			IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, IEEE80211_REASON_IE_INVALID);
+			ieee80211_node_leave(ni);
+			return;
+		case IEEE80211_VERIFY_SSID_ACTION_RETURN:
+			return;
+		case IEEE80211_VERIFY_SSID_ACTION_NO:
+		default:
+			break;
+		}
+
+		if (ni == vap->iv_bss) {
+			IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_ASSOC, wh->i_addr2,
+				"deny %s request, sta not authenticated",
+				reassoc ? "reassoc" : "assoc");
+			ieee80211_send_error(ni, wh->i_addr2,
+				IEEE80211_FC0_SUBTYPE_DEAUTH,
+				IEEE80211_REASON_ASSOC_NOT_AUTHED);
+			vap->iv_stats.is_rx_assoc_notauth++;
+			mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC_FAILS, 1);
+			return;
+		}
+
+		if (is_assoc_limit_reached(ic, vap)) {
+			IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_ASSOC, wh->i_addr2,
+					"%s request denied, assoc limit %d reached, sta cnt %d",
+					reassoc ? "reassoc" : "assoc",
+					ic->ic_ssid_grp[vap->iv_ssid_group].limit,
+					ic->ic_ssid_grp[vap->iv_ssid_group].assocs);
+			IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_TOOMANY);
+			ieee80211_node_leave(ni);
+			vap->iv_stats.is_rx_assoc_toomany++;
+			mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC_FAILS, 1);
+
+			return;
+		}
+
+#if defined(PLATFORM_QFDR)
+		if (ic->ic_reject_auth & QFDR_F_REJECT_AUTH) {
+			IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT | IEEE80211_MSG_ASSOC, wh->i_addr2,
+					"%s request rejected",
+					reassoc ? "reassoc" : "assoc");
+					IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_TOOMANY);
+			ieee80211_node_leave(ni);
+			/* no need to update statistics */
+			return;
+		}
+#endif /* PLATFORM_QFDR */
+
+		memset((u_int8_t*)&rsn_parm, 0, sizeof(rsn_parm));
+
+		/* Validate power capability */
+		/* power capability:ID|LEN|MIN TX CAP|MAX TX CAP */
+		if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC) &&
+				pwr_cap != NULL) {
+			TPC_DBG(vap, "[AP]channel=%d/regulatory power=%d/power constraint=%d/local max tx power=%d\n",
+					ic->ic_bsschan->ic_ieee,
+					ic->ic_bsschan->ic_maxregpower,
+					ic->ic_pwr_constraint,
+					ic->ic_bsschan->ic_maxregpower - ic->ic_pwr_constraint);
+
+			/* check Power Cap IE Length */
+			if (pwr_cap->len != 2) {
+				TPC_DBG(vap, "[%s] invalid power capability, Discard it!\n",
+						  ether_sprintf(wh->i_addr2));
+				IEEE80211_SEND_MGMT(ni,
+						IEEE80211_FC0_SUBTYPE_DEAUTH,
+						IEEE80211_REASON_IE_INVALID);
+				ieee80211_node_leave(ni);
+				mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC_FAILS, 1);
+				return;
+			}
+			else {
+				local_max_txpwr = ic->ic_bsschan->ic_maxregpower - ic->ic_pwr_constraint;
+				min_txpwr = pwr_cap->min_txpwr;
+				max_txpwr = pwr_cap->max_txpwr;
+				TPC_DBG(vap, "[RECV ASSOC REQ]min power(%d) max power(%d) for mac(%s)\n",
+						min_txpwr, max_txpwr, ether_sprintf(wh->i_addr2));
+				if (min_txpwr > max_txpwr) {
+					TPC_DBG(vap, "[AP] Warning,sta min power(%d) larger than max power(%d)!\n",
+							min_txpwr,
+							max_txpwr);
+				}
+				if (min_txpwr > local_max_txpwr) {
+					TPC_DBG(vap, "[AP] power capability unacceptable(min tx power=%d max tx power=%d), discard it!\n",
+							min_txpwr,
+							max_txpwr);
+					IEEE80211_SEND_MGMT(ni,
+							IEEE80211_FC0_SUBTYPE_DEAUTH,
+							IEEE80211_REASON_DISASSOC_BAD_POWER);
+					ieee80211_node_leave(ni);
+					vap->iv_stats.is_rx_assoc_capmismatch++;
+					mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC_FAILS, 1);
+					return;
+				}
+				else {
+					ni->ni_tpc_info.tpc_sta_cap.min_txpow = min_txpwr;
+					ni->ni_tpc_info.tpc_sta_cap.max_txpow = max_txpwr;
+				}
+			}
+		}
+
+		/* Validate association security credentials */
+		if ((rsn != NULL) || (wpa != NULL) || (osen != NULL)) {
+			/*
+			 * Parse WPA information element.  Note that
+			 * we initialize the param block from the node
+			 * state so that information in the IE overrides
+			 * our defaults.  The resulting parameters are
+			 * installed below after the association is assured.
+			 */
+
+			rsn_parm = ni->ni_rsn;
+
+			if (rsn != NULL) {
+				reason = ieee80211_parse_rsn(vap, rsn, &rsn_parm, wh);
+			} else {
+				if (wpa != NULL)
+					reason = ieee80211_parse_wpa(vap, wpa, &rsn_parm, wh);
+				else
+					reason = ieee80211_parse_osen(vap, osen, &rsn_parm, wh);
+			}
+
+			if (reason != 0) {
+				IEEE80211_DPRINTF(vap,
+						  IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA,
+						  "[%s] WPA/RSN IE mismatch in association request\n",
+						  ether_sprintf(wh->i_addr2));
+				IEEE80211_SEND_MGMT(ni,
+					IEEE80211_FC0_SUBTYPE_DEAUTH, reason);
+				ieee80211_node_leave(ni);
+				/* XXX distinguish WPA/RSN? */
+				vap->iv_stats.is_rx_assoc_badwpaie++;
+				mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC_FAILS, 1);
+				return;
+			}
+			IEEE80211_NOTE_MAC(vap,
+				IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA,
+				wh->i_addr2,
+				"%s ie: mc %u/%u uc %u/%u key %u caps 0x%x",
+				rsn ?  "RSN" :
+				wpa ?  "WPA" : "OSEN",
+				rsn_parm.rsn_mcastcipher, rsn_parm.rsn_mcastkeylen,
+				rsn_parm.rsn_ucastcipher, rsn_parm.rsn_ucastkeylen,
+				rsn_parm.rsn_keymgmt, rsn_parm.rsn_caps);
+
+			/* Reject association if using TKIP and HT */
+			if ((rsn_parm.rsn_ucastcipher == IEEE80211_CIPHER_TKIP) && (htcap != NULL)) {
+				IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2,
+					"deny %s request, TKIP and HT rates requested",
+					reassoc ? "reassoc" : "assoc");
+				IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_OTHER);
+				ieee80211_node_leave(ni);
+				vap->iv_stats.is_rx_assoc_tkiphtreject++;
+				mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC_FAILS, 1);
+				return;
+			}
+		}
+
+		/* discard challenge after association */
+		if (ni->ni_challenge != NULL) {
+			FREE(ni->ni_challenge, M_DEVBUF);
+			ni->ni_challenge = NULL;
+		}
+		/* 802.11 spec says to ignore station's privacy bit */
+		if ((capinfo & IEEE80211_CAPINFO_ESS) == 0) {
+			IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2,
+				"deny %s request, capability mismatch 0x%x",
+				reassoc ? "reassoc" : "assoc", capinfo);
+			IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_CAPINFO);
+			ieee80211_node_leave(ni);
+			vap->iv_stats.is_rx_assoc_capmismatch++;
+			mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC_FAILS, 1);
+			return;
+		}
+		rate = ieee80211_setup_rates(ni, rates, xrates,
+			IEEE80211_F_DOSORT | IEEE80211_F_DOFRATE |
+			IEEE80211_F_DONEGO | IEEE80211_F_DODEL);
+
+		/*
+		 * If constrained to 11g-only stations reject an
+		 * 11b-only station.  We cheat a bit here by looking
+		 * at the max negotiated xmit rate and assuming anyone
+		 * with a best rate <24Mb/s is an 11b station.
+		 */
+		if ((rate & IEEE80211_RATE_BASIC) ||
+		    ((vap->iv_flags & IEEE80211_F_PUREG) && rate < 48)) {
+			IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2,
+				"deny %s request, rate set mismatch",
+				reassoc ? "reassoc" : "assoc");
+			IEEE80211_SEND_MGMT(ni, resp,
+				IEEE80211_STATUS_BASIC_RATE);
+			ieee80211_node_leave(ni);
+			vap->iv_stats.is_rx_assoc_norate++;
+			mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC_FAILS, 1);
+			return;
+		}
+
+		if (ni->ni_associd != 0 &&
+		    IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan)) {
+			if ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME)
+			    != (capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME)) {
+				IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT,
+					wh->i_addr2,
+					"deny %s request, short slot time "
+					"capability mismatch 0x%x",
+					reassoc ? "reassoc" : "assoc", capinfo);
+				IEEE80211_SEND_MGMT(ni, resp,
+					IEEE80211_STATUS_CAPINFO);
+				ieee80211_node_leave(ni);
+				vap->iv_stats.is_rx_assoc_capmismatch++;
+				mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC_FAILS, 1);
+				return;
+			}
+		}
+
+		ni->ni_vendor = PEER_VENDOR_NONE;
+		if (qtnie != NULL) {
+			ni->ni_vendor = PEER_VENDOR_QTN;
+		}
+		if (bcmie != NULL) {
+			ni->ni_vendor = PEER_VENDOR_BRCM;
+		}
+		if (rlnk != NULL) {
+			ni->ni_vendor = PEER_VENDOR_RLNK;
+		}
+		if (rtkie != NULL) {
+			ni->ni_vendor = PEER_VENDOR_RTK;
+		}
+
+		ieee80211_input_assoc_req_qtnie(ni, vap, qtnie);
+
+		if (IEEE80211_IS_CHAN_ANYN(ic->ic_curchan)) {
+			/*
+			 * Assoc request ignored when 11ac_and_11n flag is set and
+			 * vht or ht capabilities are not-present in Assoc req/reassoc req packet.
+			 */
+			if (!ieee80211_phy_mode_allowed(vap, vhtcap, htcap)) {
+				IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2,
+				"deny %s request, VHT or HT capability IE are not present = %d",
+				reassoc ? "reassoc" : "assoc", error);
+				IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_OTHER);
+				ieee80211_node_leave(ni);
+				mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC_FAILS, 1);
+				return;
+			}
+
+			if (htcap != NULL) {
+				ni->ni_flags |= IEEE80211_NODE_HT;
+
+				/* record capabilities, mark node as capable of HT */
+				error = ieee80211_setup_htcap(ni, htcap);
+				if (error) {
+					IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2,
+							   "deny %s request, ht capability mismatch error = %d",
+							   reassoc ? "reassoc" : "assoc", error);
+					IEEE80211_SEND_MGMT(ni, resp,
+							    IEEE80211_STATUS_HT_FEATURE);
+					ieee80211_node_leave(ni);
+					vap->iv_stats.is_rx_assoc_nohtcap++;
+					mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC_FAILS, 1);
+					return;
+				}
+
+				if (vhtcap && IS_IEEE80211_DUALBAND_VHT_ENABLED(ic)) {
+					ieee80211_parse_vhtcap(ni, vhtcap);
+					ni->ni_flags |= IEEE80211_NODE_VHT;
+
+					IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+						"VHT Enabled: ni_flags = 0x%04x\n",
+						ni->ni_flags);
+				} else {
+					ni->ni_flags &= ~IEEE80211_NODE_VHT;
+
+					IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+						"VHT Disabled: ni_flags = 0x%04x\n",
+						ni->ni_flags);
+				}
+			} else {
+				/*
+				 * Flush any state from a previous association.
+				 */
+				memset(&ni->ni_htcap, 0, sizeof(struct ieee80211_htcap));
+				memset(&ni->ni_htinfo, 0, sizeof(struct ieee80211_htinfo));
+
+				ni->ni_flags &= ~IEEE80211_NODE_HT;
+				ni->ni_flags &= ~IEEE80211_NODE_VHT;
+			}
+		}
+
+		ni->ni_rstamp = rstamp;
+		ni->ni_last_rx = jiffies;
+		ieee80211_scs_node_clean_stats(IEEE80211_SCS_STATE_INIT, ni);
+		ni->ni_raw_bintval = bintval;
+		ni->ni_intval = IEEE80211_BINTVAL_SANITISE(bintval);
+		ni->ni_capinfo = capinfo;
+		ni->ni_chan = ic->ic_curchan;
+		ni->ni_fhdwell = vap->iv_bss->ni_fhdwell;
+		ni->ni_fhindex = vap->iv_bss->ni_fhindex;
+		ni->ni_rsn = rsn_parm;
+
+		if (wpa != NULL) {
+			/*
+			 * Record WPA/RSN parameters for station, mark
+			 * node as using WPA and record information element
+			 * for applications that require it.
+			 */
+			ieee80211_saveie(&ni->ni_wpa_ie, wpa);
+		} else if (ni->ni_wpa_ie != NULL) {
+			/*
+			 * Flush any state from a previous association.
+			 */
+			FREE(ni->ni_wpa_ie, M_DEVBUF);
+			ni->ni_wpa_ie = NULL;
+		}
+		if (rsn != NULL) {
+			/*
+			 * Record WPA/RSN parameters for station, mark
+			 * node as using WPA and record information element
+			 * for applications that require it.
+			 */
+			ieee80211_saveie(&ni->ni_rsn_ie, rsn);
+		} else if (ni->ni_rsn_ie != NULL) {
+			/*
+			 * Flush any state from a previous association.
+			 */
+			FREE(ni->ni_rsn_ie, M_DEVBUF);
+			ni->ni_rsn_ie = NULL;
+		}
+		if (osen != NULL) {
+			/*
+			 * Record OSEN parameters for station, mark
+			 * node as using OSEN and record information element
+			 * for applications that require it.
+			 */
+			ieee80211_saveie(&ni->ni_osen_ie, osen);
+		} else if (ni->ni_osen_ie != NULL) {
+			/*
+			 * Flush any state from a previous association.
+			 */
+			FREE(ni->ni_osen_ie, M_DEVBUF);
+			ni->ni_osen_ie = NULL;
+		}
+		if (ni->ni_rx_md_ie != NULL) {
+			/*
+			 * Flush any state from a previous association.
+			 */
+			FREE(ni->ni_rx_md_ie, M_DEVBUF);
+			ni->ni_rx_md_ie = NULL;
+		}
+		if (mdie != NULL) {
+			ieee80211_saveie(&ni->ni_rx_md_ie, mdie);
+		}
+
+		if (coex != NULL) {
+			ni->ni_coex = coex->coex_param;
+		}
+
+		if ((ni->ni_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) &&
+					IEEE80211_IS_11NG_40(ic))
+			ni->ni_obss_scan = IEEE80211_NODE_OBSS_CAPABLE |
+						IEEE80211_NODE_OBSS_RUNNING;
+
+		if (wme != NULL) {
+			/*
+			 * Record WME parameters for station, mark node
+			 * as capable of QoS and record information
+			 * element for applications that require it.
+			 */
+			ieee80211_saveie(&ni->ni_wme_ie, wme);
+			if (ieee80211_parse_wmeie(wme, wh, ni) > 0)
+				ni->ni_flags |= IEEE80211_NODE_QOS;
+		} else if (ni->ni_wme_ie != NULL) {
+			/*
+			 * Flush any state from a previous association.
+			 */
+			FREE(ni->ni_wme_ie, M_DEVBUF);
+			ni->ni_wme_ie = NULL;
+			ni->ni_flags &= ~IEEE80211_NODE_QOS;
+		}
+		if (ath != NULL) {
+			ieee80211_saveath(ni, ath);
+			ni->ni_vendor = PEER_VENDOR_ATH;
+		} else if (ni->ni_ath_ie != NULL) {
+			/*
+			 * Flush any state from a previous association.
+			 */
+			FREE(ni->ni_ath_ie, M_DEVBUF);
+			ni->ni_ath_ie = NULL;
+			ni->ni_ath_flags = 0;
+		}
+		if (bcmie != NULL) {
+			ni->ni_brcm_flags = 1;
+		} else if (ni->ni_brcm_flags != 0) {
+			ni->ni_brcm_flags = 0;
+		}
+
+		if (wscie != NULL) {
+			ieee80211_saveie(&ni->ni_wsc_ie, wscie);
+		} else if (ni->ni_wsc_ie != NULL) {
+			FREE(ni->ni_wsc_ie, M_DEVBUF);
+			ni->ni_wsc_ie = NULL;
+		}
+
+		if (qtn_pairing_ie != NULL) {
+			ieee80211_saveie(&ni->ni_qtn_pairing_ie, (u_int8_t *)qtn_pairing_ie);
+		} else if (ni->ni_qtn_pairing_ie != NULL) {
+			FREE(ni->ni_qtn_pairing_ie, M_DEVBUF);
+			ni->ni_qtn_pairing_ie = NULL;
+		}
+		if (ni->ni_rx_ft_ie != NULL) {
+			FREE(ni->ni_rx_ft_ie, M_DEVBUF);
+			ni->ni_rx_ft_ie = NULL;
+		}
+		if (ftie != NULL) {
+			ieee80211_saveie(&ni->ni_rx_ft_ie, ftie);
+		}
+
+		if (supp_chan_ie != NULL)
+			ieee80211_parse_supp_chan(ni, supp_chan_ie);
+
+		/* Send TGf L2UF frame on behalf of newly associated station */
+		ieee80211_deliver_l2uf(ni);
+
+		/* Clean up old block ack state */
+		ieee80211_node_ba_state_clear(ni);
+
+		/* Add the local implicit BA values */
+		if (qtnie && ni->ni_implicit_ba_valid) {
+			ieee80211_node_implicit_ba_setup(ni);
+		}
+
+		/* Quantenna peers are 4 address capable */
+		if (!qtnie) {
+			ni->ni_qtn_flags |= QTN_IS_NOT_4ADDR_CAPABLE_NODE;
+		}
+
+		/* Check if it is a broadcom station */
+		if (unlikely(bcmie)) {
+			ni->ni_qtn_flags |= QTN_IS_BCM_NODE;
+			if (interworking_ie_present &&
+				(ni->ni_flags & IEEE80211_NODE_VHT)) {
+				ni->ni_qtn_flags |= QTN_IS_GALAXY_NOTE_4_NODE;
+			}
+		}
+
+		if (unlikely(ieee80211_node_is_realtek(ni))) {
+			ni->ni_qtn_flags |= QTN_IS_REALTEK_NODE;
+		}
+
+		if (unlikely(ieee80211_node_is_opti_node(ni))) {
+			ni->ni_qtn_flags |= QTN_OPTI_NODE;
+		}
+
+		/* if it an Intel 5x00/620x client, we need to mark the flag to use 2 tx chain only
+		 * before informing MuC
+		 */
+		if ((ni->ni_flags & IEEE80211_NODE_HT) || (ni->ni_flags & IEEE80211_NODE_VHT)) {
+			ieee80211_blacklist_ba(ni, 0);
+		} else {
+			/* No BBF for legacy clients */
+			ni->ni_bbf_disallowed = 1;
+		}
+
+		/* TODO: Probably we need to filter out vendor sugn from ni_qtn_flags and move it to ni_vendor
+		 * to remove duplicate signs of vendor.
+		 */
+		if (unlikely((ni->ni_qtn_flags & QTN_IS_INTEL_NODE ||
+		    ni->ni_qtn_flags & QTN_IS_INTEL_5100_NODE ||
+		    ni->ni_qtn_flags & QTN_IS_INTEL_5300_NODE) &&
+		    ni->ni_vendor == PEER_VENDOR_NONE)) {
+
+			ni->ni_vendor = PEER_VENDOR_INTEL;
+		}
+
+		if (vhtcap) {
+			enum ieee80211_vhtop_chanwidth assoc_vhtop_bw;
+
+			switch (IEEE80211_VHTCAP_GET_CHANWIDTH((struct ieee80211_ie_vhtcap*)vhtcap)) {
+			case IEEE80211_VHTCAP_CW_160M:
+				assoc_vhtop_bw = IEEE80211_VHTOP_CHAN_WIDTH_160MHZ;
+				break;
+
+			case IEEE80211_VHTCAP_CW_160_AND_80P80M:
+				assoc_vhtop_bw = IEEE80211_VHTOP_CHAN_WIDTH_80PLUS80MHZ;
+				break;
+
+			case IEEE80211_VHTCAP_CW_80M_ONLY:
+			default:
+				assoc_vhtop_bw = IEEE80211_VHTOP_CHAN_WIDTH_80MHZ;
+				break;
+			}
+			ni->ni_vhtop.chanwidth = min(ic->ic_vhtop.chanwidth, assoc_vhtop_bw);
+
+			if (IS_IEEE80211_11NG_VHT_ENABLED(ic)) {
+				assoc_vhtop_bw = IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ;
+				ni->ni_vhtop.chanwidth = min(ic->ic_vhtop_24g.chanwidth, assoc_vhtop_bw);
+			}
+		}
+
+		if (unlikely(rrm_enabled)) {
+			uint8_t rrm_enabled_byte0 = *(rrm_enabled + 2);
+			if (rrm_enabled_byte0 & IEEE80211_RM_NEIGH_REPORT_CAP)
+				ni->ni_rrm_capability |= IEEE80211_NODE_NEIGHBOR_REPORT_CAPABLE;
+			if (rrm_enabled_byte0 & IEEE80211_RM_BEACON_PASSIVE_REPORT_CAP)
+				ni->ni_rrm_capability |= IEEE80211_NODE_BEACON_PASSIVE_REPORT_CAPABLE;
+			if (rrm_enabled_byte0 & IEEE80211_RM_BEACON_ACTIVE_REPORT_CAP)
+				ni->ni_rrm_capability |= IEEE80211_NODE_BEACON_ACTIVE_REPORT_CAPABLE;
+			if (rrm_enabled_byte0 & IEEE80211_RM_BEACON_TABLE_REPORT_CAP)
+				ni->ni_rrm_capability |= IEEE80211_NODE_BEACON_TABLE_REPORT_CAPABLE;
+		}
+
+		if (((ni->ni_coex & WLAN_20_40_BSS_COEX_40MHZ_INTOL) ||
+		    (ni->ni_coex & WLAN_20_40_BSS_COEX_20MHZ_WIDTH_REQ) ||
+		    (ni->ni_htcap.cap & IEEE80211_HTCAP_C_40_INTOLERANT)) &&
+					    (ic->ic_20_40_coex_enable)) {
+			ieee80211_change_bw(vap, BW_HT20, 0);
+			ic->ic_coex_stats_update(ic, WLAN_COEX_STATS_BW_ASSOC);
+		}
+		/* Mobility domain present, then let hostapd handle rest of association process */
+		if (vap->iv_mdid) {
+			/* FT assoc/reassoc request */
+			if (ni->ni_rx_md_ie != NULL) {
+				uint16_t mdid = *((uint16_t *)(ni->ni_rx_md_ie + 2));
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+					"%pM %s request mdid 0x%x\n", wh->i_addr2,
+					reassoc ? "reassoc" : "assoc", mdid);
+				if (mdid == vap->iv_mdid) {
+					forward_mgmt_to_app_for_further_processing(vap, subtype,
+											skb, wh);
+					return;
+				} else {
+					IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+						"%pM %s request mdid mismatch request 0x%x "
+						"vap's mdid 0x%x\n",
+						wh->i_addr2, reassoc ? "reassoc" : "assoc",
+						mdid, vap->iv_mdid);
+					IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, 13);
+					ieee80211_node_leave(ni);
+					vap->iv_stats.is_rx_assoc_capmismatch++;
+					mlme_stats_delayed_update(wh->i_addr2,
+							MLME_STAT_ASSOC_FAILS, 1);
+					return;
+				}
+			} else {
+				/* non FT assoc/reassoc request , let hostapd handle rest of association*/
+				forward_mgmt_to_app_for_further_processing(vap, subtype, skb, wh);
+				return;
+			}
+		}
+
+		ieee80211_node_join(ni, resp);
+
+		if (opmode_notif_ie) {
+			struct ieee80211_ie_vhtop_notif *ie =
+						(struct ieee80211_ie_vhtop_notif *)opmode_notif_ie;
+						
+			uint8_t opmode = recalc_opmode(ni, ie->vhtop_notif_mode);
+			ieee80211_param_to_qdrv(ni->ni_vap, IEEE80211_PARAM_NODE_OPMODE,
+					opmode, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+			ni->ni_vhtop_notif_mode = ie->vhtop_notif_mode;
+		}
+
+		ieee80211_update_current_mode(ni);
+
+		mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_ASSOC, 1);
+		break;
+	}
+
+	case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
+	case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: {
+		uint16_t capinfo;
+		uint16_t associd;
+		uint16_t status;
+		uint8_t ridx;
+
+		if (vap->iv_opmode != IEEE80211_M_STA || vap->iv_state != IEEE80211_S_ASSOC) {
+			if (vap->iv_state < IEEE80211_S_ASSOC) {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_ASSOC,
+					"%s: Received %sassoc resp when not authed - send deauth\n",
+					__func__,
+					subtype == IEEE80211_FC0_SUBTYPE_REASSOC_RESP ?  "re" : "");
+				ieee80211_send_error(ni, wh->i_addr2,
+						IEEE80211_FC0_SUBTYPE_DEAUTH,
+						IEEE80211_REASON_NOT_AUTHED);
+			}
+			vap->iv_stats.is_rx_mgtdiscard++;
+			return;
+		}
+
+		/*
+		 * asresp frame format
+		 *	[2] capability information
+		 *	[2] status
+		 *	[2] association ID
+		 *	[tlv] supported rates
+		 *	[tlv] extended supported rates
+		 *	[tlv] WME
+		 */
+		IEEE80211_VERIFY_LENGTH(efrm - frm, 6);
+		ni = vap->iv_bss;
+		capinfo = le16toh(*(__le16 *)frm);
+		frm += 2;
+		status = le16toh(*(__le16 *)frm);
+		frm += 2;
+		if (status != 0) {
+			IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ASSOC,
+				wh->i_addr2,
+				"%sassoc failed (reason %d)",
+				ISREASSOC(subtype) ?  "re" : "", status);
+			vap->iv_stats.is_rx_auth_fail++;	/* XXX */
+			ieee80211_new_state(vap, IEEE80211_S_SCAN,
+				IEEE80211_SCAN_FAIL_STATUS);
+			return;
+		}
+		associd = le16toh(*(__le16 *)frm);
+		if ((IEEE80211_AID(associd) == 0) || (IEEE80211_AID(associd) > IEEE80211_AID_MAX)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+				"%s: invalid associd %u\n", __func__, IEEE80211_AID(associd));
+			IEEE80211_SEND_MGMT(ni,
+				IEEE80211_FC0_SUBTYPE_DISASSOC,
+				IEEE80211_REASON_UNSPECIFIED);
+			return;
+		}
+		frm += 2;
+
+		rates = xrates = wme = NULL;
+		while (frm < efrm) {
+			/*
+			 * Do not discard frames containing proprietary Agere
+			 * elements 128 and 129, as the reported element length
+			 * is often wrong. Skip rest of the frame, since we can
+			 * not rely on the given element length making it impossible
+			 * to know where the next element starts.
+			 */
+			if ((*frm == IEEE80211_ELEMID_AGERE1) ||
+			    (*frm == IEEE80211_ELEMID_AGERE2)) {
+				frm = efrm;
+				continue;
+			}
+
+			IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1]);
+			switch (*frm) {
+			case IEEE80211_ELEMID_RATES:
+				rates = frm;
+				break;
+			case IEEE80211_ELEMID_HTCAP:
+				htcap = frm;
+				break;
+			case IEEE80211_ELEMID_HTINFO:
+				htinfo = frm;
+				break;
+			case IEEE80211_ELEMID_VHTCAP:
+				vhtcap = frm;
+				break;
+			case IEEE80211_ELEMID_VHTOP:
+				vhtop = frm;
+				break;
+			case IEEE80211_ELEMID_XRATES:
+				xrates = frm;
+				break;
+			case IEEE80211_ELEMID_OPMOD_NOTIF:
+				opmode_notif_ie = frm;
+				break;
+			case IEEE80211_ELEMID_OBSS_SCAN:
+				obss_scan = frm;
+				break;
+			case IEEE80211_ELEMID_VENDOR:
+				if (iswmeoui(frm)) {
+					wme = frm;
+				} else if (isqtnie(frm)) {
+					qtnie = (struct ieee80211_ie_qtn *)frm;
+				} else if (isbroadcomoui(frm)) {
+					bcmie = frm;
+					if (isbrcmvhtoui(frm)) {
+						vhtcap = ieee80211_get_vhtcap_from_brcmvht(ni, frm);
+						vhtop = ieee80211_get_vhtop_from_brcmvht(ni, frm);
+					}
+				} else if (isbroadcomoui2(frm)) {
+					bcmie = frm;
+				} else if (isqtnpairoui(frm)) {
+					qtn_pairing_ie = (struct ieee80211_ie_qtn_pairing *)frm;
+#ifdef CONFIG_QVSP
+				} else if (isvspie(frm)) {
+					vspie = (struct ieee80211_ie_vsp *)frm;
+				} else if (isqtnwmeie(frm)) {
+					/* override standard WME IE */
+					struct ieee80211_ie_qtn_wme *qwme = (struct ieee80211_ie_qtn_wme *)frm;
+					IEEE80211_NOTE_MAC(vap,
+						IEEE80211_MSG_ASSOC | IEEE80211_MSG_WME | IEEE80211_MSG_VSP,
+						wh->i_addr2, "%s: found QTN WME IE, version %u\n",
+						__func__, qwme->qtn_wme_ie_version);
+					wme = (uint8_t *)&qwme->qtn_wme_ie;
+#endif
+				}
+				break;
+			case IEEE80211_ELEMID_EXTCAP:
+				extcap = frm;
+				if (extcap != NULL)
+					ieee80211_parse_extcap(ni, extcap, wh->i_addr3);
+				break;
+			}
+			frm += frm[1] + 2;
+		}
+		if (frm > efrm)
+			return;
+
+		ni->ni_vendor = PEER_VENDOR_NONE;
+		if (qtnie != NULL) {
+			ni->ni_vendor = PEER_VENDOR_QTN;
+		}
+		if (bcmie != NULL) {
+			ni->ni_vendor = PEER_VENDOR_BRCM;
+			ni->ni_qtn_flags |= QTN_IS_BCM_NODE;
+		}
+
+		ieee80211_input_assoc_resp_qtnie(ni, vap, qtnie);
+#ifdef CONFIG_QVSP
+		ieee80211_input_assoc_resp_vspie(vap, vspie, efrm);
+#endif
+
+		IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE);
+		rate = ieee80211_setup_rates(ni, rates, xrates,
+			IEEE80211_F_DOSORT |
+			IEEE80211_F_DONEGO | IEEE80211_F_DODEL);
+		if (rate & IEEE80211_RATE_BASIC) {
+			IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ASSOC,
+				wh->i_addr2,
+				"%sassoc failed (rate set mismatch)",
+				ISREASSOC(subtype) ?  "re" : "");
+			vap->iv_stats.is_rx_assoc_norate++;
+			ieee80211_new_state(vap, IEEE80211_S_SCAN,
+				IEEE80211_SCAN_FAIL_STATUS);
+			return;
+		}
+
+		/* check to see if need to set up ERP flag */
+		if (IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan) &&
+			ieee80211_iserp_rateset(ic, &ni->ni_rates)) {
+			IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+				"%s STA is joing an ERP AP, set the ERP flag\n", __func__);
+			ni->ni_flags |= IEEE80211_NODE_ERP;
+		}
+
+		ni->ni_capinfo = capinfo;
+		ni->ni_associd = associd;
+		if (wme != NULL &&
+		    ieee80211_parse_wmeparams(vap, wme, wh, &qosinfo) >= 0) {
+			ni->ni_flags |= IEEE80211_NODE_QOS;
+			ieee80211_wme_updateparams(vap, 0);
+		} else
+			ni->ni_flags &= ~IEEE80211_NODE_QOS;
+		/*
+		 * Configure state now that we are associated.
+		 *
+		 * XXX may need different/additional driver callbacks?
+		 */
+		if (IEEE80211_IS_CHAN_A(ic->ic_curchan) ||
+		    ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE) &&
+		    (ic->ic_caps & IEEE80211_C_SHPREAMBLE))) {
+			ic->ic_flags |= IEEE80211_F_SHPREAMBLE;
+			ic->ic_flags &= ~IEEE80211_F_USEBARKER;
+		} else {
+			ic->ic_flags &= ~IEEE80211_F_SHPREAMBLE;
+			ic->ic_flags |= IEEE80211_F_USEBARKER;
+		}
+		ieee80211_set_shortslottime(ic,
+			IEEE80211_IS_CHAN_A(ic->ic_curchan) ||
+				(ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME));
+		/*
+		 * Honor ERP protection.
+		 *
+		 * NB: ni_erp should zero for non-11g operation
+		 *     but we check the channel characteristics
+		 *     just in case.
+		 */
+		if (IEEE80211_BG_PROTECT_ENABLED(ic)
+				&& (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)
+				|| IEEE80211_IS_CHAN_11NG(ic->ic_curchan))
+				&& (ni->ni_erp & IEEE80211_ERP_USE_PROTECTION)) {
+			ic->ic_flags |= IEEE80211_F_USEPROT;
+			/* tell Muc to use ERP cts-to-self mechanism now */
+			ic->ic_set_11g_erp(vap, 1);
+		} else {
+			ic->ic_flags &= ~IEEE80211_F_USEPROT;
+			/* tell Muc to turn off ERP now */
+			ic->ic_set_11g_erp(vap, 0);
+		}
+
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ASSOC, wh->i_addr2,
+			"%sassoc success: %s preamble, %s slot time ampdu density %d %s%s%s%s%s%s%s",
+			ISREASSOC(subtype) ? "re" : "",
+			(ic->ic_flags&IEEE80211_F_SHPREAMBLE) &&
+			(ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE) ? "short" : "long",
+			ic->ic_flags&IEEE80211_F_SHSLOT ? "short" : "long",
+			ni->ni_htcap.mpduspacing,
+			ic->ic_flags&IEEE80211_F_USEPROT ? ", protection" : "",
+			ni->ni_flags & IEEE80211_NODE_QOS ? ", QoS" : "",
+			IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_TURBOP) ?
+				", turbo" : "",
+			IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_COMP) ?
+				", compression" : "",
+			IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF) ?
+				", fast-frames" : "",
+			IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_XR) ?
+				", XR" : "",
+			IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_AR) ?
+				", AR" : ""
+		);
+
+		/* Sanity check - make sure bridge mode advertisement is as expected */
+		if (vap->iv_is_qtn_dev) {
+			if (qtnie) {
+				ieee80211_saveie(&ni->ni_qtn_assoc_ie, (u_int8_t *)qtnie);
+				if (((qtnie->qtn_ie_flags & IEEE80211_QTN_BRIDGEMODE)
+						== IEEE80211_QTN_BRIDGEMODE) !=
+					((vap->iv_flags_ext & IEEE80211_FEXT_WDS) == IEEE80211_FEXT_WDS)) {
+
+					IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+						"%s: QTN IE in assoc resp is invalid, %02x/%08x\n", __func__,
+						qtnie->qtn_ie_flags, vap->iv_flags_ext);
+				}
+				ni->ni_implicit_ba = 0;
+				/* Implicit BA flags for the AP */
+				if (IEEE80211_QTN_IE_GE_V2(qtnie) &&
+					(ni->ni_flags & IEEE80211_NODE_HT) && !sta_pure_tkip) {
+					ni->ni_implicit_ba = qtnie->qtn_ie_implicit_ba_tid;
+					ni->ni_implicit_ba_valid = 1;
+				}
+			} else {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+					"%s: QTN IE in assoc resp is missing\n", __func__);
+			}
+		} else if (qtnie) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+				"%s: Unexpected QTN IE in assoc resp\n", __func__);
+		}
+
+		if (qtn_pairing_ie != NULL) {
+			ieee80211_saveie(&ni->ni_qtn_pairing_ie, (u_int8_t *)qtn_pairing_ie);
+		} else if (ni->ni_qtn_pairing_ie != NULL) {
+			FREE(ni->ni_qtn_pairing_ie, M_DEVBUF);
+			ni->ni_qtn_pairing_ie = NULL;
+		}
+
+		/* 11n */
+		/* Both Q-Station and AP should support HT */
+		if ((ic->ic_curmode >= IEEE80211_MODE_11NA) && (htcap != NULL) && !sta_pure_tkip) {
+			ni->ni_flags |= IEEE80211_NODE_HT;
+			ieee80211_parse_htcap(ni, htcap);
+			ieee80211_parse_htinfo(ni, htinfo);
+			/* keep only the rates supported by both parties */
+			ridx = ieee80211_fix_ht_rate(ni, IEEE80211_F_DOFRATE|IEEE80211_F_DODEL|IEEE80211_F_DOXSECT);
+		} else {
+			/*
+			 * Flush any state from a previous association.
+			 */
+			memset(&ni->ni_htcap, 0, sizeof(struct ieee80211_htcap));
+			memset(&ni->ni_htinfo, 0, sizeof(struct ieee80211_htinfo));
+			ni->ni_flags &= ~IEEE80211_NODE_HT;
+		}
+
+		if (obss_scan) {
+			memset(&ni->ni_obss_ie, 0, sizeof(struct ieee80211_obss_scan_ie));
+			memcpy(&ni->ni_obss_ie, obss_scan, sizeof(struct ieee80211_obss_scan_ie));
+		}
+
+		ieee80211_input_sta_vht_set(ni, vap, vhtcap, vhtop, !sta_pure_tkip);
+		ieee80211_node_ba_state_clear(ni);
+		/* Apply the implicit BA parameters to the local structure.
+		 * Prevents the sending of addba request to the other side of the link.
+		 */
+		if (ni->ni_implicit_ba_valid) {
+			ieee80211_node_implicit_ba_setup(ni);
+		}
+
+		if ((ic->ic_flags & IEEE80211_F_DOTH) &&
+				(ic->ic_flags_ext & IEEE80211_FEXT_TPC) &&
+					(ni->ni_flags & IEEE80211_NODE_TPC)) {
+			if (vap->iv_local_max_txpow >= ic->ic_curchan->ic_maxpower_normal) {
+				vap->iv_local_max_txpow = ic->ic_curchan->ic_maxpower_normal;
+			} else if (vap->iv_local_max_txpow < ic->ic_curchan->ic_minpower_normal) {
+				vap->iv_local_max_txpow = ic->ic_curchan->ic_minpower_normal;
+			} else {
+				ieee80211_update_tx_power(ic, vap->iv_local_max_txpow);
+			}
+		}
+
+		ni->ni_vhtop_notif_mode = IEEE80211_OPMODE_NOTIFY_INVALID;
+		if (opmode_notif_ie) {
+			struct ieee80211_ie_vhtop_notif *ie =
+						(struct ieee80211_ie_vhtop_notif *)opmode_notif_ie;
+						
+			uint8_t opmode = recalc_opmode(ni, ie->vhtop_notif_mode);
+			ieee80211_param_to_qdrv(ni->ni_vap, IEEE80211_PARAM_NODE_OPMODE,
+					opmode, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+			ni->ni_vhtop_notif_mode = ie->vhtop_notif_mode;
+		}
+
+		ieee80211_new_state(vap, IEEE80211_S_RUN, subtype);
+
+		ieee80211_update_current_mode(ni);
+
+		/*For STA mode, record the start time of association with AP*/
+		ni->ni_start_time_assoc = get_jiffies_64();
+		if (IEEE80211_IS_11NG_40(ic) && ic->ic_obss_scan_enable &&
+				(ni->ni_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) &&
+				(ni->ni_obss_ie.param_id)) {
+			ic->ic_obss_timer.function = ieee80211_obss_scan_timer;
+			mod_timer(&ic->ic_obss_timer,
+					jiffies + ni->ni_obss_ie.obss_trigger_interval * HZ);
+			ic->ic_obss_timer.data = (unsigned long)vap;
+		}
+		break;
+	}
+
+	case IEEE80211_FC0_SUBTYPE_DEAUTH: {
+		u_int16_t reason;
+
+		if (vap->iv_state == IEEE80211_S_SCAN) {
+			vap->iv_stats.is_rx_mgtdiscard++;
+			return;
+		}
+		/*
+		 * deauth frame format
+		 *	[2] reason
+		 */
+		IEEE80211_VERIFY_LENGTH(efrm - frm, 2);
+		reason = le16toh(*(__le16 *)frm);
+		vap->iv_stats.is_rx_deauth++;
+		IEEE80211_NODE_STAT(ni, rx_deauth);
+
+		IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni,
+			"recv deauthenticate (reason %d) for %s", reason, ether_sprintf(wh->i_addr1));
+		switch (vap->iv_opmode) {
+		case IEEE80211_M_STA:
+			arg = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+
+			if (!IEEE80211_ADDR_EQ(vap->iv_bss->ni_bssid, wh->i_addr2)) {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH,
+					"drop deauthenticate (reason %d) from unknown addr %pM",
+					reason, wh->i_addr2);
+				break;
+			}
+
+			/*
+			 * Don't keep retrying the auth if we receive a deauth.
+			 * Let wpa_supplicant choose the next action.
+			 */
+			if (!ieee80211_tdls_pend_disassociation(vap, IEEE80211_S_INIT, arg))
+			      ieee80211_new_state(vap, IEEE80211_S_INIT, arg);
+			ieee80211_dot11_msg_send(ni->ni_vap, (char *)ni->ni_macaddr,
+					d11_m[IEEE80211_DOT11_MSG_AP_DISCONNECTED],
+					d11_c[IEEE80211_DOT11_MSG_REASON_DEAUTHENTICATED],
+					reason,
+					(reason < DOT11_MAX_REASON_CODE) ? d11_r[reason] : "Reserved",
+					NULL,
+					NULL);
+			break;
+		case IEEE80211_M_HOSTAP:
+			if (!IEEE80211_ADDR_EQ(wh->i_addr1, vap->iv_bss->ni_bssid)) {
+				vap->iv_stats.is_rx_mgtdiscard++;
+				return;
+			}
+			if (ni != vap->iv_bss) {
+				mlme_stats_delayed_update(wh->i_addr2,
+								MLME_STAT_DEAUTH, 1);
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+				if (vap->bsa_status == BSA_STATUS_ACTIVE)
+					ieee80211_bsa_disconnect_event_send(vap, ni,
+						reason, IEEE80211_FC0_SUBTYPE_DEAUTH,
+						BSA_DISCONNECT_PEER_GENERATED);
+#endif
+				/* Message to indicate STA sent the deauth */
+				ieee80211_dot11_msg_send(ni->ni_vap,
+					(char *)ni->ni_macaddr,
+					d11_m[IEEE80211_DOT11_MSG_CLIENT_DISCONNECTED],
+					d11_c[IEEE80211_DOT11_MSG_REASON_CLIENT_SENT_DEAUTH],
+					reason,
+					(reason < DOT11_MAX_REASON_CODE) ?
+						d11_r[reason] : "Reserved",
+					NULL,
+					NULL);
+				ieee80211_nofity_sta_require_leave(ni);
+				ieee80211_node_leave(ni);
+			} else {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH,
+					"Receive deauthenticate (reason %d)"
+					" from unknown addr %pM",
+					reason, wh->i_addr2);
+			}
+			break;
+		default:
+			vap->iv_stats.is_rx_mgtdiscard++;
+			break;
+		}
+		break;
+	}
+
+	case IEEE80211_FC0_SUBTYPE_DISASSOC: {
+		u_int16_t reason;
+
+		/* if a disassoc request is received in the un-auth state
+		 * a deauth should be sent by the sta */
+		if (vap->iv_state < IEEE80211_S_ASSOC && vap->iv_opmode == IEEE80211_M_STA) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_ASSOC,
+					"%s: receive disassoc in state-unauthed, send deauth \n", __func__);
+			ieee80211_send_error(ni, wh->i_addr2,
+					IEEE80211_FC0_SUBTYPE_DEAUTH,
+					IEEE80211_REASON_NOT_AUTHED);
+			vap->iv_stats.is_rx_mgtdiscard++;
+			return;
+		}
+
+		if (vap->iv_state != IEEE80211_S_RUN &&
+		    vap->iv_state != IEEE80211_S_ASSOC &&
+		    vap->iv_state != IEEE80211_S_AUTH) {
+			vap->iv_stats.is_rx_mgtdiscard++;
+			return;
+		}
+		/*
+		 * disassoc frame format
+		 *	[2] reason
+		 */
+		IEEE80211_VERIFY_LENGTH(efrm - frm, 2);
+		reason = le16toh(*(__le16 *)frm);
+		vap->iv_stats.is_rx_disassoc++;
+		IEEE80211_NODE_STAT(ni, rx_disassoc);
+
+		vap->iv_disassoc_reason = reason;
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+			"recv disassociate (reason %d)", reason);
+		switch (vap->iv_opmode) {
+		case IEEE80211_M_STA:
+			if (!IEEE80211_ADDR_EQ(vap->iv_bss->ni_bssid, wh->i_addr2)) {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH,
+					"drop disassociate (reason %d) from unknown addr %pM",
+					reason, wh->i_addr2);
+				break;
+			}
+
+			if (!ieee80211_tdls_pend_disassociation(vap, IEEE80211_S_ASSOC, 0))
+				ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
+
+			ieee80211_dot11_msg_send(ni->ni_vap, (char *)ni->ni_macaddr,
+					d11_m[IEEE80211_DOT11_MSG_AP_DISCONNECTED],
+					d11_c[IEEE80211_DOT11_MSG_REASON_DISASSOCIATED],
+					reason,
+					(reason < DOT11_MAX_REASON_CODE) ? d11_r[reason] : "Reserved",
+					NULL,
+					NULL);
+			break;
+		case IEEE80211_M_HOSTAP:
+			if (ni != vap->iv_bss) {
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+				if (vap->bsa_status == BSA_STATUS_ACTIVE)
+					ieee80211_bsa_disconnect_event_send(vap, ni,
+						reason, IEEE80211_FC0_SUBTYPE_DISASSOC,
+						BSA_DISCONNECT_PEER_GENERATED);
+#endif
+
+				ieee80211_dot11_msg_send(ni->ni_vap, (char *)wh->i_addr2,
+					d11_m[IEEE80211_DOT11_MSG_CLIENT_DISCONNECTED],
+					d11_c[IEEE80211_DOT11_MSG_REASON_CLIENT_SENT_DISASSOC],
+					reason,
+					(reason < DOT11_MAX_REASON_CODE) ?
+						d11_r[reason] : "Reserved",
+					NULL,
+					NULL);
+
+				ieee80211_nofity_sta_require_leave(ni);
+				ieee80211_node_leave(ni);
+				mlme_stats_delayed_update(wh->i_addr2,
+							MLME_STAT_DIASSOC, 1);
+			} else {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH,
+					"Receive disassociate (reason %d) from unknown addr %pM",
+					reason, wh->i_addr2);
+			}
+			break;
+		default:
+			vap->iv_stats.is_rx_mgtdiscard++;
+			break;
+		}
+		break;
+	}
+	case IEEE80211_FC0_SUBTYPE_ACTION: {
+		struct ieee80211_action *ia;
+
+		IEEE80211_VERIFY_LENGTH(efrm - frm, sizeof(struct ieee80211_action));
+		ia = (struct ieee80211_action *) (void*)frm;
+
+		if (vap->iv_state != IEEE80211_S_RUN &&
+				vap->iv_state != IEEE80211_S_ASSOC &&
+				vap->iv_state != IEEE80211_S_AUTH &&
+				!(ic->ic_flags_qtn & IEEE80211_QTN_MONITOR)) {
+			vap->iv_stats.is_rx_mgtdiscard++;
+			return;
+		}
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP && ni == vap->iv_bss &&
+		    !(IEEE80211_IS_MULTICAST(wh->i_addr1)) &&
+			(ia->ia_category != IEEE80211_ACTION_CAT_PUBLIC)) {
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+					  wh, "mgt", "%s", "src same as bss");
+			if (vap->iv_state == IEEE80211_S_RUN)
+				ieee80211_send_error(ni, wh->i_addr2,
+						     IEEE80211_FC0_SUBTYPE_DEAUTH,
+						     IEEE80211_REASON_NOT_AUTHED);
+			vap->iv_stats.is_rx_notassoc++;
+			return;
+		}
+
+		memcpy(&ni->ni_action, ia, sizeof(struct ieee80211_action));
+
+		vap->iv_stats.is_rx_action++;
+		IEEE80211_NODE_STAT(ni, rx_action);
+
+		switch (ia->ia_category) {
+		case IEEE80211_ACTION_CAT_PUBLIC:
+			ieee80211_recv_action_public(ni, skb, wh, ia, rssi);
+			break;
+		case IEEE80211_ACTION_CAT_TDLS:
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: Ignoring TDLS MGMT frame\n", __func__);
+			IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+					wh, "mgt", "%s frame not allowed", "TDLS Action");
+			vap->iv_stats.is_rx_mgtdiscard++;
+			break;
+		case IEEE80211_ACTION_CAT_SPEC_MGMT:
+			switch (ia->ia_action) {
+			case IEEE80211_ACTION_S_CHANSWITCHANN:
+			{
+				u_int8_t *csa_tsf_ie = NULL;
+				int return_val = 0;
+				int cur_bw;
+
+				if ((efrm - frm) >= (sizeof(struct ieee80211_action) +
+							sizeof(struct ieee80211_ie_csa) +
+							sizeof(struct ieee80211_ie_qtn_csa_tsf))) {
+					csa_tsf_ie = frm + sizeof(struct ieee80211_action) + sizeof(struct ieee80211_ie_csa);
+				}
+				
+				ieee80211_param_from_qdrv(ni->ni_vap, IEEE80211_PARAM_BW_SEL_MUC, &cur_bw, NULL, 0);
+				
+				return_val = ieee80211_parse_csaie(ni, frm + sizeof(struct ieee80211_action), csa_tsf_ie, wh);
+				if (QTN_CSAIE_ERR_CHAN_NOT_SUPP == return_val) {
+					if (ieee80211_narrower_bw_supported(ni, frm + sizeof(struct ieee80211_action), cur_bw)) {
+						/* reassociate */
+						ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
+						return;
+					} else if (!ieee80211_handle_csa_invalid_channel(ni, wh)) {
+						return;
+						
+					}
+				} else if (ieee80211_wider_bw_supported(ni, frm + sizeof(struct ieee80211_action), cur_bw)) {
+					/* reassociate */
+					ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
+					return;
+				}
+				break;
+			}
+			case IEEE80211_ACTION_S_TPC_REQUEST:
+			{
+				struct ieee80211_action_tpc_report	tpc_report;
+				struct ieee80211_action_data		action_data;
+
+				if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+					frm = frm + sizeof(struct ieee80211_action);
+					tpc_report.rx_token = *frm++;
+					if (*frm == IEEE80211_ELEMID_TPCREQ) {
+						tpc_report.tx_power		= ic->ic_get_local_txpow(ic);
+						ic->ic_get_local_link_margin(ni, &tpc_report.link_margin);
+						action_data.cat			= IEEE80211_ACTION_CAT_SPEC_MGMT;
+						action_data.action		= IEEE80211_ACTION_S_TPC_REPORT;
+						action_data.params		= &tpc_report;
+						IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+					} else {
+						IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+								wh, "mgt", "%s", "Missing TPC REQUEST Element in TPC Request action frame\n");
+						vap->iv_stats.is_rx_elem_missing++;
+					}
+				} else {
+					TPC_DBG(vap, "TPC Request: frame not supported, drop it\n");
+					vap->iv_stats.is_rx_mgtdiscard++;
+				}
+
+				break;
+			}
+			case IEEE80211_ACTION_S_TPC_REPORT:
+			{
+				u_int8_t	tpc_report_token;
+				if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+					frm = frm + sizeof(struct ieee80211_action);
+					tpc_report_token = *frm++;
+					IEEE80211_VERIFY_LENGTH(efrm - frm, 4);
+					if ((frm[0] == IEEE80211_ELEMID_TPCREP) && (frm[1] == 2)) {
+						frm += 2;
+						ni->ni_tpc_info.tpc_report.node_txpow = *frm++;
+						ni->ni_tpc_info.tpc_report.node_link_margin = *frm++;
+
+						ieee80211_ppqueue_remove_with_response(&ni->ni_vap->iv_ppqueue,
+								ni,
+								IEEE80211_ACTION_CAT_SPEC_MGMT,
+								IEEE80211_ACTION_S_TPC_REPORT,
+								tpc_report_token);
+					} else {
+						IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+								wh, "mgt", "%s", "Missing TPC REPORT Element in TPC Report action frame\n");
+						vap->iv_stats.is_rx_elem_missing++;
+					}
+				} else {
+					TPC_DBG(vap, "TPC Report: frame not supported, drop it\n");
+					vap->iv_stats.is_rx_mgtdiscard++;
+				}
+				break;
+			}
+			case IEEE80211_ACTION_S_MEASUREMENT_REPORT:
+			case IEEE80211_ACTION_S_MEASUREMENT_REQUEST:
+				ieee80211_recv_action_measure_11h(ni, ia, wh, efrm);
+				break;
+			default:
+				vap->iv_stats.is_rx_mgtdiscard++;
+			}
+			break;
+		case IEEE80211_ACTION_CAT_QOS:
+			vap->iv_stats.is_rx_mgtdiscard++;
+			break;
+		case IEEE80211_ACTION_CAT_HT:
+			ieee80211_action_ht(ni, skb, wh, subtype, ia, frm, efrm);
+			break;
+		case IEEE80211_ACTION_CAT_BA:
+			ieee80211_action_ba(ni, wh, subtype, ia, frm, efrm);
+			break;
+		case IEEE80211_ACTION_CAT_RM:
+			ieee80211_recv_action_11k(ni, ia, wh, efrm);
+			break;
+		case IEEE80211_ACTION_CAT_VENDOR: {
+			struct qdrv_vendor_action_header *va;
+			va = (struct qdrv_vendor_action_header *) (void*)frm;
+#ifdef CONFIG_QHOP
+			if (va->type == QDRV_ACTION_TYPE_QHOP) {
+				if (va->action == QDRV_ACTION_QHOP_DFS_REPORT) {
+					if (IEEE80211_VAP_WDS_IS_MBS(vap))
+						ic->ic_radar_detected(ic, 0);
+				}
+			}
+#endif
+#ifdef CONFIG_QVSP
+			if (va->type != QDRV_ACTION_TYPE_QHOP) {
+				ieee80211_recv_action_vsp(ni, frm, efrm);
+			}
+#endif
+			break;
+		}
+		case IEEE80211_ACTION_CAT_SA_QUERY:
+			ieee80211_recv_action_sa_query(ni, ia, wh, efrm);
+			break;
+		case IEEE80211_ACTION_CAT_VHT:
+			ieee80211_recv_action_vht(ni, ia, subtype, wh, frm, efrm);
+			break;
+		case IEEE80211_ACTION_CAT_WNM:
+			ieee80211_recv_action_wnm(ni, ia, subtype, wh, frm);
+			break;
+		default:
+			vap->iv_stats.is_rx_mgtdiscard++;
+			break;
+		}
+		break;
+	}
+	default:
+		IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+			wh, "mgt", "subtype 0x%x not handled", subtype);
+		vap->iv_stats.is_rx_badsubtype++;
+		break;
+	}
+#undef ISREASSOC
+#undef ISPROBE
+}
+
+#undef IEEE80211_VERIFY_ELEMENT
+
+/*
+ * Process a received ps-poll frame.
+ */
+static void
+ieee80211_recv_pspoll(struct ieee80211_node *ni, struct sk_buff *skb0)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_frame_min *wh;
+	struct sk_buff *skb;
+	u_int16_t aid;
+	int qlen;
+
+	wh = (struct ieee80211_frame_min *)skb0->data;
+	if (ni->ni_associd == 0) {
+		IEEE80211_DISCARD(vap,
+			IEEE80211_MSG_POWER | IEEE80211_MSG_DEBUG,
+			(struct ieee80211_frame *) wh, "ps-poll",
+			"%s", "unassociated station");
+		vap->iv_stats.is_ps_unassoc++;
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
+			IEEE80211_REASON_NOT_ASSOCED);
+		return;
+	}
+
+	aid = le16toh(*(__le16 *)wh->i_dur);
+	if (aid != ni->ni_associd) {
+		IEEE80211_DISCARD(vap,
+			IEEE80211_MSG_POWER | IEEE80211_MSG_DEBUG,
+			(struct ieee80211_frame *) wh, "ps-poll",
+			"aid mismatch: sta aid 0x%x poll aid 0x%x",
+			ni->ni_associd, aid);
+		vap->iv_stats.is_ps_badaid++;
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
+			IEEE80211_REASON_NOT_ASSOCED);
+		return;
+	}
+
+	/* Okay, take the first queued packet and put it out... */
+	IEEE80211_NODE_SAVEQ_LOCK(ni);
+	IEEE80211_NODE_SAVEQ_DEQUEUE(ni, skb, qlen);
+	IEEE80211_NODE_SAVEQ_UNLOCK(ni);
+	if (skb == NULL) {
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_POWER, wh->i_addr2,
+			"%s", "recv ps-poll, but queue empty");
+		ieee80211_ref_node(ni);
+		ieee80211_send_nulldata(ni);
+		vap->iv_stats.is_ps_qempty++;	/* XXX node stat */
+		if (vap->iv_set_tim != NULL)
+			vap->iv_set_tim(ni, 0);		/* just in case */
+		return;
+	}
+	/*
+	 * If there are more packets, set the more packets bit
+	 * in the packet dispatched to the station; otherwise
+	 * turn off the TIM bit.
+	 */
+	if (qlen != 0) {
+		IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+			"recv ps-poll, send packet, %u still queued", qlen);
+		/*
+		 * NB: More-data bit will be set during encap.
+		 */
+	} else {
+		IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+			"%s", "recv ps-poll, send packet, queue empty");
+		if (vap->iv_set_tim != NULL)
+			vap->iv_set_tim(ni, 0);
+	}
+	M_PWR_SAV_SET(skb);		/* ensure MORE_DATA bit is set correctly */
+
+	ieee80211_parent_queue_xmit(skb);	/* Submit to parent device, including updating stats */
+}
+
+#ifdef USE_HEADERLEN_RESV
+/*
+ * The kernel version of this function alters the skb in a manner
+ * inconsistent with dev->hard_header_len header reservation. This
+ * is a rewrite of the portion of eth_type_trans() that we need.
+ */
+static __be16
+ath_eth_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ethhdr *eth;
+
+	skb_reset_mac_header(skb);
+	skb_pull(skb, ETH_HLEN);
+	/*
+	 * NB: mac.ethernet is replaced in 2.6.9 by eth_hdr but
+	 *     since that's an inline and not a define there's
+	 *     no easy way to do this cleanly.
+	 */
+	eth = (struct ethhdr *)skb_mac_header(skb);
+
+	if (*eth->h_dest & 1)
+		if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
+			skb->pkt_type = PACKET_BROADCAST;
+		else
+			skb->pkt_type = PACKET_MULTICAST;
+	else
+		if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
+			skb->pkt_type = PACKET_OTHERHOST;
+
+	return eth->h_proto;
+}
+#endif
+
+#ifdef IEEE80211_DEBUG
+/*
+ * Debugging support.
+ */
+
+/*
+ * Return the bssid of a frame.
+ */
+static const u_int8_t *
+ieee80211_getbssid(struct ieee80211vap *vap, const struct ieee80211_frame *wh)
+{
+	if (vap->iv_opmode == IEEE80211_M_STA)
+		return wh->i_addr2;
+	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_NODS)
+		return wh->i_addr1;
+	if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PS_POLL)
+		return wh->i_addr1;
+	return wh->i_addr3;
+}
+
+/* used for send formatted string custom event IWEVCUSTOM */
+int ieee80211_eventf(struct net_device *dev, const char *fmt, ...)
+{
+	va_list args;
+	int i;
+	union iwreq_data wreq;
+	char buffer[IW_CUSTOM_MAX];
+
+	if (dev == NULL) {
+		return 0;
+	}
+
+	/* Format the custom wireless event */
+	memset(&wreq, 0, sizeof(wreq));
+
+	va_start(args, fmt);
+	i = vsnprintf(buffer, IW_CUSTOM_MAX, fmt, args);
+	va_end(args);
+
+	wreq.data.length = strnlen(buffer, IW_CUSTOM_MAX);
+	wireless_send_event(dev, IWEVCUSTOM, &wreq, buffer);
+	return i;
+}
+EXPORT_SYMBOL(ieee80211_eventf);
+
+void
+ieee80211_note(struct ieee80211vap *vap, const char *fmt, ...)
+{
+	char buf[128];		/* XXX */
+	va_list ap;
+
+	va_start(ap, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+
+	printk("%s: %s", vap->iv_dev->name, buf);	/* NB: no \n */
+}
+EXPORT_SYMBOL(ieee80211_note);
+
+void
+ieee80211_note_frame(struct ieee80211vap *vap, const struct ieee80211_frame *wh,
+	const char *fmt, ...)
+{
+	char buf[128];		/* XXX */
+	va_list ap;
+
+	va_start(ap, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+	printk("%s: [%s] %s\n", vap->iv_dev->name,
+		ether_sprintf(ieee80211_getbssid(vap, wh)), buf);
+}
+EXPORT_SYMBOL(ieee80211_note_frame);
+
+void
+ieee80211_note_mac(struct ieee80211vap *vap, const u_int8_t mac[IEEE80211_ADDR_LEN],
+	const char *fmt, ...)
+{
+	char buf[128];		/* XXX */
+	va_list ap;
+
+	va_start(ap, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+	printk("%s: [%s] %s\n", vap->iv_dev->name, ether_sprintf(mac), buf);
+}
+EXPORT_SYMBOL(ieee80211_note_mac);
+
+static void
+ieee80211_discard_frame(struct ieee80211vap *vap, const struct ieee80211_frame *wh,
+	const char *type, const char *fmt, ...)
+{
+	char buf[128];		/* XXX */
+	va_list ap;
+
+	va_start(ap, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+	if (type != NULL)
+		printk("[%s:%s] discard %s frame, %s\n", vap->iv_dev->name,
+			ether_sprintf(ieee80211_getbssid(vap, wh)), type, buf);
+	else
+		printk("[%s:%s] discard frame, %s\n", vap->iv_dev->name,
+			ether_sprintf(ieee80211_getbssid(vap, wh)), buf);
+}
+
+static void
+ieee80211_discard_ie(struct ieee80211vap *vap, const struct ieee80211_frame *wh,
+	const char *type, const char *fmt, ...)
+{
+	char buf[128];		/* XXX */
+	va_list ap;
+
+	va_start(ap, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+	if (type != NULL)
+		printk("[%s:%s] discard %s information element, %s\n",
+			vap->iv_dev->name,
+			ether_sprintf(ieee80211_getbssid(vap, wh)), type, buf);
+	else
+		printk("[%s:%s] discard information element, %s\n",
+			vap->iv_dev->name,
+			ether_sprintf(ieee80211_getbssid(vap, wh)), buf);
+}
+
+static void
+ieee80211_discard_mac(struct ieee80211vap *vap, const u_int8_t mac[IEEE80211_ADDR_LEN],
+	const char *type, const char *fmt, ...)
+{
+	char buf[128];		/* XXX */
+	va_list ap;
+
+	va_start(ap, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+	if (type != NULL)
+		printk("[%s:%s] discard %s frame, %s\n", vap->iv_dev->name,
+			ether_sprintf(mac), type, buf);
+	else
+		printk("[%s:%s] discard frame, %s\n", vap->iv_dev->name,
+			ether_sprintf(mac), buf);
+}
+#endif /* IEEE80211_DEBUG */
+
+static void ieee80211_recv_action_vht(struct ieee80211_node *ni,
+				      struct ieee80211_action *ia,
+				      int subtype,
+				      struct ieee80211_frame *wh,
+				      u_int8_t *frm,
+				      u_int8_t *efrm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_vht_mu_grp *mu_grp;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_action_vht_opmode_notification *iaopmode;
+
+	switch (ia->ia_action) {
+	case IEEE80211_ACTION_VHT_CBEAMFORMING:
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION,
+				  "VHT: Not handling compressed beamforming frame \
+					from station %pM\n", ni->ni_macaddr);
+		vap->iv_stats.is_rx_mgtdiscard++;
+		break;
+	case IEEE80211_ACTION_VHT_OPMODE_NOTIFICATION:
+		IEEE80211_VERIFY_LENGTH(efrm - frm,
+					sizeof(struct ieee80211_action_vht_opmode_notification));
+
+		iaopmode = (struct ieee80211_action_vht_opmode_notification *)frm;
+
+		{
+			uint8_t opmode = recalc_opmode(ni, iaopmode->am_opmode);
+			ieee80211_param_to_qdrv(ni->ni_vap, IEEE80211_PARAM_NODE_OPMODE,
+					opmode, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+		}
+		break;
+	case IEEE80211_ACTION_VHT_MU_GRP_ID:
+		if ((vap->iv_opmode == IEEE80211_M_STA) &&
+				ieee80211_swfeat_is_supported(SWFEAT_ID_MU_MIMO, 0)) {
+			/* AP sends me my MU group and position arrays, push it down to Muc/HW */
+			mu_grp = (struct ieee80211_vht_mu_grp *)(ia + 1);
+			memcpy(&vap->iv_bss->ni_mu_grp, mu_grp, sizeof(struct ieee80211_vht_mu_grp));
+			ic->ic_setparam(ni, IEEE80211_PARAM_UPDATE_MU_GRP, 1,
+				(unsigned char *)&vap->iv_bss->ni_mu_grp,
+				sizeof(struct ieee80211_vht_mu_grp));
+		}
+		break;
+	default:
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION,
+				  "VHT: Invalid action frame(%d) from station %pM\n",
+				  ia->ia_action, ni->ni_macaddr);
+		vap->iv_stats.is_rx_mgtdiscard++;
+		break;
+	}
+}
+
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+uint32_t ieee80211_wlan_vht_mcs_streams(uint16_t mcsmap)
+{
+	uint32_t nss = 8;
+
+	while ((nss > 0) && ((mcsmap & BSA_VHT_MCSMAP_MASK) == BSA_VHT_MCSMAP_NOT_SUPPORT)) {
+		nss--;
+		mcsmap <<= 2;
+	};
+	return nss;
+}
+
+uint32_t ieee80211_wlan_vht_rxstreams(struct ieee80211_ie_vhtcap *vhtcap)
+{
+	uint16_t mcsmap = (u_int16_t)IEEE80211_VHTCAP_GET_RX_MCS_NSS(vhtcap);
+
+	return ieee80211_wlan_vht_mcs_streams(mcsmap);
+}
+EXPORT_SYMBOL(ieee80211_wlan_vht_rxstreams);
+
+uint32_t ieee80211_wlan_vht_rx_maxrate(struct ieee80211_ie_vhtcap *vhtcap)
+{
+	int chan_mode = 0;
+	uint32_t max = 0;
+	uint8_t sgi = 0;
+	int k;
+	int r;
+	uint16_t mask = BSA_VHT_MCSMAP_MASK;
+
+	if (vhtcap) {
+		u_int16_t mcsmap = 0;
+		r = 0;
+		/* 80+80 or 160 Mhz */
+		if (IEEE80211_VHTCAP_GET_CHANWIDTH(vhtcap)) {
+			chan_mode = 1;
+			sgi = IEEE80211_VHTCAP_GET_SGI_160MHZ(vhtcap);
+		} else {
+			sgi = IEEE80211_VHTCAP_GET_SGI_80MHZ(vhtcap);
+		}
+		mcsmap = (u_int16_t)IEEE80211_VHTCAP_GET_RX_MCS_NSS(vhtcap);
+		for (k = 8; k > 0; k--) {
+			if ((mcsmap & mask) != mask) {
+				int rate = 0;
+				int val = ((mcsmap & mask) >> ((k-1) * 2));
+				r = (val == 2) ? 9: (val == 1) ? 8 : 7;
+				rate = ieee80211_mcs2rate(r, chan_mode, sgi, 1);
+				if (rate >= 0) {
+					rate = (rate / 2) * k;
+					if (max < rate)
+						max = rate;
+					break;
+				}
+			}
+			mask = mask >> 2;
+		}
+	}
+		return max;
+}
+EXPORT_SYMBOL(ieee80211_wlan_vht_rx_maxrate);
+
+uint32_t ieee80211_wlan_ht_rx_maxrate(struct ieee80211_ie_htcap *htcap, uint32_t *rx_ss)
+{
+	int chan_mode = 0, j;
+	uint32_t max = 0;
+	uint8_t sgi = 0;
+	int k;
+	int r;
+	u_int16_t mask;
+
+	if (htcap) {
+		r = 0;
+		if (htcap->hc_cap[0] & IEEE80211_HTCAP_C_CHWIDTH40) {
+			chan_mode = 1;
+			sgi = htcap->hc_cap[0] & IEEE80211_HTCAP_C_SHORTGI40 ? 1 : 0;
+		} else {
+			sgi = htcap->hc_cap[0] & IEEE80211_HTCAP_C_SHORTGI20 ? 1 : 0;
+		}
+		for (j = IEEE80211_HT_MCSSET_20_40_NSS1; j <= IEEE80211_HT_MCSSET_20_40_NSS4; j++) {
+			mask = 1;
+			for (k = 0; k < 8; k++, r++) {
+				if (htcap->hc_mcsset[j] & mask) {
+					/* Copy HT rates */
+					int rate = ieee80211_mcs2rate(r, chan_mode, sgi, 0) / 2;
+					if (rate >= 0 && max < rate)
+						max = rate;
+					*rx_ss = j+1;
+				}
+				mask = mask << 1;
+			}
+		}
+	}
+	return max;
+}
+EXPORT_SYMBOL(ieee80211_wlan_ht_rx_maxrate);
+
+int ieee80211_bsa_probe_event_send(struct ieee80211vap *vap,struct sk_buff *skb, uint8_t *bssid,
+									uint8_t *sta_mac, int rssi)
+{
+	uint8_t event_data[IEEE80211_MAX_EVENT_DATA_LEN];
+	struct ieee80211_frame *wh;
+	uint8_t *frm;
+	uint8_t *efrm;
+	uint8_t *htcap = NULL;
+	uint8_t *htinfo = NULL;
+	uint8_t *vhtcap = NULL;
+	uint8_t *vhtop = NULL;
+	uint8_t chan=0;
+	union iwreq_data wreq;
+	struct ieee80211_ie_htcap *htcap_ie = NULL;
+	struct ieee80211_ie_vhtcap *vhtcap_ie = NULL;
+	uint8_t *extcap = NULL;
+	struct qtn_bsa_peer_event_data *p_data;
+	struct qtn_bsa_probe_event_info *pevent;
+	uint32_t max_ht_phy_rate = 54;
+	uint32_t max_vht_phy_rate = 0;
+	uint32_t max_ht_ss = 1;
+	uint32_t max_vht_ss = 0;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint16_t bandwidth = 1;
+
+	memset(&event_data, 0, IEEE80211_MAX_EVENT_DATA_LEN);
+
+	p_data = (void *)event_data;
+	pevent = (void *)(event_data + sizeof(struct qtn_bsa_peer_event_data));
+
+	wh = (struct ieee80211_frame *)skb->data;
+	frm = (u_int8_t *)&wh[1];
+	efrm = skb->data + skb->len;
+
+	strncpy(p_data->bsa_name, "BSA-PEER-EVENT", sizeof(p_data->bsa_name));
+
+	put_unaligned(BSA_PROBE_EVENT_REQ, &p_data->bsa_event_id);
+	memcpy(p_data->bsa_bssid, bssid, IEEE80211_ADDR_LEN);
+	put_unaligned(sizeof(struct qtn_bsa_peer_event_data), &p_data->offset);
+
+	if (sta_mac) {
+		memcpy(pevent->bsa_sta_mac, sta_mac, IEEE80211_ADDR_LEN);
+	}
+
+	while (frm < efrm) {
+		switch (*frm) {
+		case IEEE80211_ELEMID_FHPARMS:
+			chan = IEEE80211_FH_CHAN(frm[4], frm[5]);
+			break;
+		case IEEE80211_ELEMID_DSPARMS:
+			chan = frm[2];
+			break;
+		case IEEE80211_ELEMID_HTCAP:
+			htcap = frm;
+			break;
+		case IEEE80211_ELEMID_VHTCAP:
+			vhtcap = frm;
+			break;
+		case IEEE80211_ELEMID_VHTOP:
+			vhtop = frm;
+			break;
+		case IEEE80211_ELEMID_HTINFO:
+			htinfo = frm;
+			break;
+		case IEEE80211_ELEMID_EXTCAP:
+			extcap = frm;
+			break;
+		default:
+			break;
+		}
+
+		frm += frm[1] + 2;
+	}
+
+	put_unaligned(rssi, &pevent->bsa_rssi);
+	put_unaligned(0, &pevent->bsa_band_width);
+	pevent->bsa_mu_mimo_capab = 0;
+	pevent->bsa_vht_capab = 0;
+	put_unaligned(0, &pevent->bsa_bss_transition);
+	if (extcap) {
+		uint8_t value;
+		value = extcap[1];
+		if (value >= 3) {
+			value = extcap[4];
+			if (value & 0x8)
+				put_unaligned(1, &pevent->bsa_bss_transition);
+		}
+	}
+
+	if (htcap != NULL) {
+		htcap_ie = (struct ieee80211_ie_htcap *) htcap;
+		max_ht_phy_rate = ieee80211_wlan_ht_rx_maxrate(htcap_ie, &max_ht_ss);
+		bandwidth = ((htcap_ie->hc_cap[0] & 0x2) >> 1);
+		pevent->bsa_vht_capab |= BSA_HT_SUPPORTED;
+	}
+
+	if (vhtcap != NULL) {
+		vhtcap_ie = (struct ieee80211_ie_vhtcap * ) vhtcap;
+		max_vht_phy_rate = ieee80211_wlan_vht_rx_maxrate(vhtcap_ie);
+		pevent->bsa_vht_capab |= BSA_VHT_SUPPORTED;
+		pevent->bsa_mu_mimo_capab = ((vhtcap_ie->vht_cap[2] & 0x18)>>3);
+		max_vht_ss = ieee80211_wlan_vht_rxstreams(vhtcap_ie);
+		bandwidth |= ((vhtcap_ie->vht_cap[0] & 0xc) >> 1);
+	}
+	put_unaligned(bandwidth, &pevent->bsa_band_width);
+	put_unaligned((max_vht_phy_rate > max_ht_phy_rate)? max_vht_phy_rate : max_ht_phy_rate,
+		&pevent->bsa_max_phy_rate);
+	put_unaligned((max_ht_ss > max_vht_ss) ? max_ht_ss : max_vht_ss,
+		&pevent->bsa_nss);
+
+	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
+		put_unaligned(BSA_OPER_BAND_5G, &pevent->bsa_curr_band);
+	else
+		put_unaligned(BSA_OPER_BAND_2G, &pevent->bsa_curr_band);
+
+	put_unaligned(ic->ic_curchan->ic_ieee, &pevent->bsa_channel);
+
+	put_unaligned(skb->len, &pevent->cookie_len);
+	pevent->cookie = skb->data;
+
+	memset(&wreq, 0, sizeof(wreq));
+	wreq.data.length = sizeof(*pevent) + sizeof(*p_data);
+	wireless_send_event(vap->iv_dev, IWEVCUSTOM, &wreq, (char *)&event_data);
+
+	return 0;
+}
+#endif
+
+/* BSS transition management query reasons ieee80211 2102 spec Table 8-138 */
+char *btm_query_reason[] = {
+	"Unspecified",
+	"Excessive frame loss rates and/or poor conditions",
+	"Excessive delay for current traffic streams",
+	"Insufficient QoS capacity for current traffic streams (TSPEC rejected)",
+	"First association to ESS (the association initiated by an Association "
+		"Request message instead of a Reassociation Request message)",
+	"Load balancing",
+	"Better AP found",
+	"Deauthenticated or Disassociated from the previous AP",
+	"AP failed IEEE 802.1X EAP Authentication",
+	"AP failed 4-Way Handshake",
+	"Received too many replay counter failures",
+	"Received too many data MIC failures",
+	"Exceeded maximum number of retransmissions",
+	"Received too many broadcast disassociations",
+	"Received too many broadcast deauthentications",
+	"Previous transition failed",
+	"Low RSSI",
+	"Roam from a non IEEE 802.11 system",
+	"Transition due to received BSS Transition Request frame",
+	"Preferred BSS transition candidate list included",
+	"Leaving ESS",
+	"Reserved"
+};
+#define WNM_BTM_QUERY_REASON_CODE_MAX		ARRAY_SIZE(btm_query_reason)
+
+/* BTM response codes */
+char *btm_resp_status_codes[] = {
+	"Accept",
+	"Reject - Unspecified reject reason",
+	"Reject - Insufficient Beacon or Probe Response frames received from all candidates",
+	"Reject - Insufficient available capacity from all candidates",
+	"Reject - BSS termination undesired",
+	"Reject - BSS Termination delay requested",
+	"Reject - STA BSS Transition Candidate List provided",
+	"Reject - No suitable BSS transition candidates",
+	"Reject - Leaving ESS",
+	"Reserved"
+};
+#define WNM_BTM_RESPONSE_STATUS_CODE_MAX	ARRAY_SIZE(btm_resp_status_codes)
+
+int
+ieee80211_wnm_btm_create_pref_candidate_list(struct ieee80211_node *ni, uint8_t **list)
+{
+	struct ieee80211_neighbor_report_request_item *item_cache = NULL;
+	int i = 0;
+	int num_items = 0;
+	uint8_t *neigh_repo = NULL;
+	int neigh_repo_size = 0;
+	uint8_t pref = 255;
+
+	num_items = ieee80211_create_neighbor_reports(ni, &item_cache,
+				NEIGH_REPORTS_MAX, 1);
+	neigh_repo_size = (sizeof(struct ieee80211_ie_neighbor_report)
+			+ sizeof(struct ieee80211_subie_pref)) * num_items;
+	*list = kmalloc(neigh_repo_size, GFP_ATOMIC);
+	neigh_repo = *list;
+	if (neigh_repo && (num_items > 0)) {
+		for (i = 0; i < num_items; i++) {
+			*neigh_repo++ = IEEE80211_ELEMID_NEIGHBOR_REP;
+			*neigh_repo++ = sizeof(struct ieee80211_ie_neighbor_report)
+					+ sizeof(struct ieee80211_subie_pref)
+					- IEEE80211_IE_ID_LEN_SIZE;
+			memcpy(neigh_repo, item_cache->bssid, IEEE80211_ADDR_LEN);
+			neigh_repo += IEEE80211_ADDR_LEN;
+			ADDINT32(neigh_repo, item_cache->bssid_info);
+			*neigh_repo++ = item_cache->operating_class;
+			*neigh_repo++ = item_cache->channel;
+			*neigh_repo++ = item_cache->phy_type;
+			/* fill preference sub element */
+			*neigh_repo++ =  WNM_NEIGHBOR_BTM_CANDIDATE_PREFERENCE;
+			*neigh_repo++ = 1;
+			*neigh_repo++ = pref--;
+			item_cache++;
+		}
+	}
+
+	kfree(item_cache);
+
+	return neigh_repo_size;
+}
+
+static void
+ieee80211_handle_btm_query(struct ieee80211_node *ni, struct ieee80211_action_btm_query *query)
+{
+	uint8_t rcode = 0;
+	uint8_t mode = BTM_REQ_PREF_CAND_LIST_INCLUDED | BTM_REQ_ABRIDGED;
+	int nsize = 0;
+	uint8_t *neigh_repos = NULL;
+
+	rcode = (query->btm_query_param.reason < WNM_BTM_QUERY_REASON_CODE_MAX) ?
+			query->btm_query_param.reason :
+			(WNM_BTM_QUERY_REASON_CODE_MAX - 1);
+	IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ACTION,
+			"WNM: Received BSS Transition management query from station %pM token: %u reason %s\n",
+			ni->ni_macaddr, query->btm_query_param.dialog_token,
+			btm_query_reason[rcode]);
+	nsize = ieee80211_wnm_btm_create_pref_candidate_list(ni, &neigh_repos);
+
+	if (nsize == 0)
+		mode &= ~BTM_REQ_PREF_CAND_LIST_INCLUDED;
+
+	if (ieee80211_send_wnm_bss_tm_solicited_req(ni, mode, 0,
+						WNM_BTM_DEFAULT_VAL_INTVAL,
+						NULL, NULL, neigh_repos,
+						nsize,
+						query->btm_query_param.dialog_token))
+		IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ACTION, "WNM: Failed to send BTM request %pM\n",
+						ni->ni_macaddr);
+	kfree(neigh_repos);
+
+}
+
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+static int ieee80211_bsa_btm_resp_event(struct ieee80211vap *vap,struct ieee80211_node *ni, uint8_t status)
+{
+	struct qtn_bsa_peer_event_data *p_data;
+	struct ieee80211_bsa_btm_resp_event *pevent;
+	uint8_t event_data[IEEE80211_MAX_EVENT_DATA_LEN];
+	union iwreq_data wreq;
+
+	p_data = (void *)event_data;
+	pevent = (void *)(event_data + sizeof(struct qtn_bsa_peer_event_data));
+	strncpy(p_data->bsa_name, "BSA-PEER-EVENT", sizeof(p_data->bsa_name));
+	put_unaligned(BSA_EVENT_BSS_TRANS_STATUS, &p_data->bsa_event_id);
+	memcpy(p_data->bsa_bssid, ni->ni_bssid, IEEE80211_ADDR_LEN);
+	put_unaligned(sizeof(struct qtn_bsa_peer_event_data), &p_data->offset);
+	memcpy(pevent->bsa_sta_mac, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+	put_unaligned(status, &pevent->bsa_btm_resp_status);
+	memset(&wreq, 0, sizeof(wreq));
+	wreq.data.length = sizeof(*pevent) + sizeof(*p_data);
+	wireless_send_event(vap->iv_dev, IWEVCUSTOM, &wreq, (char *)&event_data);
+
+	return 0;
+}
+#endif
+
+static void
+ieee80211_hanle_btm_resp(struct ieee80211_node *ni, struct ieee80211_action_btm_rsp *rsp)
+{
+	uint8_t scode = 0;
+
+	scode = (rsp->btm_rsp_param.status_code >= WNM_BTM_RESPONSE_STATUS_CODE_MAX) ?
+			(WNM_BTM_RESPONSE_STATUS_CODE_MAX - 1) :
+			rsp->btm_rsp_param.status_code;
+	IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ACTION,
+			"WNM: Received BSS Transition management response from station %pM token: %u status code %s\n",
+			ni->ni_macaddr, rsp->btm_rsp_param.dialog_token, btm_resp_status_codes[scode]);
+	/*
+	 * TBD: we don't have full WNM upper layer information or management control for now
+	 * we are just logging and clearing pending BTM request
+	 */
+	if ((ni->ni_btm_req != 0) && (ni->ni_btm_req == rsp->btm_rsp_param.dialog_token)) {
+		del_timer_sync(&ni->ni_btm_resp_wait_timer);
+		ni->ni_btm_req = 0;
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+		ieee80211_bsa_btm_resp_event(ni->ni_vap, ni, scode);
+#endif
+	}
+}
+
+static void
+ieee80211_recv_action_wnm(struct ieee80211_node *ni,
+			struct ieee80211_action *ia,
+			int subtype,
+			struct ieee80211_frame *wh,
+			u_int8_t *frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	switch (ia->ia_action) {
+	case IEEE80211_WNM_BSS_TRANS_MGMT_QUERY: {
+		struct ieee80211_action_btm_query *query = (struct ieee80211_action_btm_query *)frm;
+		ieee80211_handle_btm_query(ni, query);
+		break;
+	}
+	case IEEE80211_WNM_BSS_TRANS_MGMT_RESP: {
+		struct ieee80211_action_btm_rsp *rsp = (struct ieee80211_action_btm_rsp *)frm;
+		ieee80211_hanle_btm_resp(ni, rsp);
+		break;
+	}
+	default:
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION, "WNM: Invalid action frame(%d) from station %pM\n",
+					ia->ia_action, ni->ni_macaddr);
+		vap->iv_stats.is_rx_mgtdiscard++;
+		break;
+	}
+}
diff --git a/drivers/qtn/wlan/ieee80211_linux.c b/drivers/qtn/wlan/ieee80211_linux.c
new file mode 100644
index 0000000..1be8c64
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_linux.c
@@ -0,0 +1,1128 @@
+/*-
+ * Copyright (c) 2003-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_linux.c 2759 2007-10-17 21:48:20Z kelmo $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 support (Linux-specific code)
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/sysctl.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+
+#include <net/iw_handler.h>
+#include <linux/wireless.h>
+#include <linux/if_arp.h>		/* XXX for ARPHRD_* */
+
+#include <asm/uaccess.h>
+
+#include "qtn/qtn_global.h"
+
+#include "net80211/if_media.h"
+#include "net80211/if_ethersubr.h"
+
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_monitor.h"
+
+#define proc_net init_net.proc_net
+
+/*
+ * Print a console message with the device name prepended.
+ */
+void
+if_printf(struct net_device *dev, const char *fmt, ...)
+{
+	va_list ap;
+	char buf[512];		/* XXX */
+
+	va_start(ap, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+
+	printk("%s: %s", dev->name, buf);
+}
+
+/*
+ * Allocate a data frame
+ * Returns the sk_buff and a pointer to the start of the reserved contiguous data area.
+ */
+struct sk_buff *
+ieee80211_getdataframe(struct ieee80211vap *vap, uint8_t **frm, uint8_t qos, uint32_t payload_len)
+{
+	struct sk_buff *skb;
+	uint32_t hdrlen;
+
+	if (qos)
+		hdrlen = sizeof(struct ieee80211_qosframe);
+	else
+		hdrlen = sizeof(struct ieee80211_frame);
+
+	skb = dev_alloc_skb(hdrlen + payload_len);
+	if (!skb) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY,
+			"%s: cannot get buf of size %u", __func__,
+			hdrlen + payload_len);
+		vap->iv_stats.is_tx_nobuf++;
+		return NULL;
+	}
+
+	skb_reserve(skb, hdrlen);
+	*frm = skb_put(skb, payload_len);
+
+	return skb;
+}
+EXPORT_SYMBOL(ieee80211_getmgtframe);
+
+/*
+ * Allocate a management frame
+ * Returns the sk_buff and a pointer to the start of the reserved contiguous data area.
+ * The data area is forced to 32-bit alignment and the buffer length to a multiple of 4 bytes.  This
+ * is done mainly so beacon frames (that require this) can use this interface too.
+ */
+struct sk_buff *
+ieee80211_getmgtframe(uint8_t **frm, uint32_t payload_len)
+{
+	struct sk_buff *skb;
+	uint32_t alignment, len;
+
+	len = roundup(sizeof(struct ieee80211_frame) + payload_len, 4);
+	skb = dev_alloc_skb(len + ARC_DCACHE_LINE_LEN - 1);
+	if (skb != NULL) {
+		/* Cache align the frame */
+		alignment = (unsigned int)(skb->data) & (ARC_DCACHE_LINE_LEN - 1);
+		if (alignment) {
+			skb_reserve(skb, ARC_DCACHE_LINE_LEN - alignment);
+		}
+
+		skb_reserve(skb, sizeof(struct ieee80211_frame));
+		*frm = skb_put(skb, payload_len);
+	}
+	return skb;
+}
+
+#if IEEE80211_VLAN_TAG_USED
+/*
+ * VLAN support.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+/*
+ * Register a vlan group.
+ */
+static void
+ieee80211_vlan_register(struct net_device *dev, struct vlan_group *grp)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	vap->iv_vlgrp = grp;
+}
+#endif
+
+/*
+ * Add an rx vlan identifier
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int
+ieee80211_vlan_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+#else
+static void
+ieee80211_vlan_add_vid(struct net_device *dev, unsigned short vid)
+#endif
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	if (vap->iv_vlgrp != NULL)
+		vap->iv_bss->ni_vlan = vid;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	return 0;
+#endif
+
+}
+
+/*
+ * Kill (i.e. delete) a vlan identifier.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+static int
+ieee80211_vlan_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+#else
+static void
+ieee80211_vlan_kill_vid(struct net_device *dev, unsigned short vid)
+#endif
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	if (vap->iv_vlgrp != NULL)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		vlan_group_set_device(vap->iv_vlgrp, proto, vid, NULL);
+#else
+		vlan_group_set_device(vap->iv_vlgrp, vid, NULL);
+#endif
+	return 0;
+}
+#endif /* IEEE80211_VLAN_TAG_USED */
+
+void
+ieee80211_vlan_vattach(struct ieee80211vap *vap)
+{
+#if IEEE80211_VLAN_TAG_USED
+	struct net_device *dev = vap->iv_dev;
+	struct net_device_ops *pndo = (struct net_device_ops *)dev->netdev_ops;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+			NETIF_F_HW_VLAN_FILTER;
+#else
+	dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+			 NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,24)
+	pndo->ndo_vlan_rx_register = ieee80211_vlan_register;
+#endif
+	pndo->ndo_vlan_rx_add_vid = ieee80211_vlan_add_vid;
+	pndo->ndo_vlan_rx_kill_vid = ieee80211_vlan_kill_vid;
+#endif /* IEEE80211_VLAN_TAG_USED */
+}
+
+void
+ieee80211_vlan_vdetach(struct ieee80211vap *vap)
+{
+}
+
+void
+ieee80211_notify_node_join(struct ieee80211_node *ni, int newassoc)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct net_device *dev = vap->iv_dev;
+	union iwreq_data wreq;
+
+	if (ni == vap->iv_bss) {
+		if (newassoc) {
+			netif_carrier_on(dev);
+		}
+		memset(&wreq, 0, sizeof(wreq));
+		IEEE80211_ADDR_COPY(wreq.addr.sa_data, ni->ni_bssid);
+		wreq.addr.sa_family = ARPHRD_ETHER;
+		wireless_send_event(dev, SIOCGIWAP, &wreq, NULL);
+
+		ieee80211_extender_notify_ext_role(ni);
+	} else {
+		memset(&wreq, 0, sizeof(wreq));
+		IEEE80211_ADDR_COPY(wreq.addr.sa_data, ni->ni_macaddr);
+		wreq.addr.sa_family = ARPHRD_ETHER;
+		wireless_send_event(dev, IWEVREGISTERED, &wreq, NULL);
+	}
+}
+
+void
+ieee80211_notify_node_leave(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct net_device *dev = vap->iv_dev;
+	union iwreq_data wreq;
+
+	if (ni == vap->iv_bss) {
+		memset(wreq.ap_addr.sa_data, 0, ETHER_ADDR_LEN);
+		wreq.ap_addr.sa_family = ARPHRD_ETHER;
+		wireless_send_event(dev, SIOCGIWAP, &wreq, NULL);
+	} else {
+		/* fire off wireless event station leaving */
+		memset(&wreq, 0, sizeof(wreq));
+		IEEE80211_ADDR_COPY(wreq.addr.sa_data, ni->ni_macaddr);
+		wreq.addr.sa_family = ARPHRD_ETHER;
+		wireless_send_event(dev, IWEVEXPIRED, &wreq, NULL);
+	}
+}
+
+void
+ieee80211_notify_sta_stats(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	static const char *tag = "STA-TRAFFIC-STAT";
+	struct net_device *dev = vap->iv_dev;
+
+	ieee80211_eventf(dev, "%s\nmac=%s\nrx_packets=%u\nrx_bytes=%llu\n"
+			"tx_packets=%u\ntx_bytes=%llu\n", tag,
+			ether_sprintf(ni->ni_macaddr), ni->ni_stats.ns_rx_data,
+			ni->ni_stats.ns_rx_bytes, ni->ni_stats.ns_tx_data,
+			ni->ni_stats.ns_tx_bytes);
+}
+
+void
+ieee80211_notify_scan_done(struct ieee80211vap *vap)
+{
+	struct net_device *dev = vap->iv_dev;
+	union iwreq_data wreq;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s\n", "notify scan done");
+
+	/* dispatch wireless event indicating scan completed */
+	wreq.data.length = 0;
+	wreq.data.flags = 0;
+	wireless_send_event(dev, SIOCGIWSCAN, &wreq, NULL);
+}
+
+void
+ieee80211_notify_replay_failure(struct ieee80211vap *vap,
+	const struct ieee80211_frame *wh, const struct ieee80211_key *k,
+	u_int64_t rsc)
+{
+	static const char *tag = "MLME-REPLAYFAILURE.indication";
+	struct net_device *dev = vap->iv_dev;
+
+	IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+		"%s replay detected <keyix %d, rsc %llu >",
+		k->wk_cipher->ic_name, k->wk_keyix, rsc );
+
+	/* TODO: needed parameters: count, keyid, key type, src address, TSC */
+	ieee80211_eventf(dev, "%s(keyid=%d %scast addr=%s)", tag,
+		k->wk_keyix,
+		IEEE80211_IS_MULTICAST(wh->i_addr1) ?  "broad" : "uni",
+		ether_sprintf(wh->i_addr1));
+}
+EXPORT_SYMBOL(ieee80211_notify_replay_failure);
+
+void
+ieee80211_nofity_sta_require_leave(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct net_device *dev = vap->iv_dev;
+	const char *indicator = "STA-REQUIRE-LEAVE";
+
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP && ni->ni_associd != 0) {
+		ieee80211_eventf(dev, "%s=%s", indicator, ether_sprintf(ni->ni_macaddr));
+	}
+}
+
+/**
+ * Entry point for reporting of MIC failure (via ic pointer).
+ */
+void ieee80211_tkip_mic_failure(struct ieee80211vap *vap, int count)
+{
+	int i;
+
+	/* If more than two errors, only report the first two as this
+	 * will be enough to trigger countermeasures.
+	 */
+	if (count > 2)
+		count = 2;
+
+	/* Send up to 2 reports */
+	for (i = 0; i < count; i++)
+	{
+		/* Format a frame header appropriately for a MIC report */
+		struct ieee80211_frame wh;
+		memset(&wh, 0, sizeof(wh));
+		memcpy(&wh.i_addr1, vap->iv_bss->ni_macaddr, IEEE80211_ADDR_LEN);
+		memcpy(&wh.i_addr2, vap->iv_bss->ni_bssid, IEEE80211_ADDR_LEN);
+		ieee80211_notify_michael_failure(vap, &wh, 0);
+	}
+}
+
+void
+ieee80211_notify_michael_failure(struct ieee80211vap *vap,
+	const struct ieee80211_frame *wh, u_int keyix)
+{
+	static const char *tag = "MLME-MICHAELMICFAILURE.indication";
+	struct net_device *dev = vap->iv_dev;
+
+	IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+		"Michael MIC verification failed <keyix %d>", keyix);
+	vap->iv_stats.is_rx_tkipmic++;
+
+	/* TODO: needed parameters: count, keyid, key type, src address, TSC */
+	/* qtn=1 is used (in conjunction with userspace hostapd change) to ensure that the
+	 * MIC failure report is tallied, regardless of the source address.
+	 */
+	ieee80211_eventf(dev, "%s(keyid=%d %scast addr=%s qtn=1)", tag,
+		keyix, IEEE80211_IS_MULTICAST(wh->i_addr1) ?  "broad" : "uni",
+		ether_sprintf(wh->i_addr1));
+}
+EXPORT_SYMBOL(ieee80211_notify_michael_failure);
+
+/*
+ * Note that a successful call to this function does not guarantee that
+ * the services provided by the requested module are available:
+ *
+ * "Note that a successful module load does not mean the module did not
+ * then unload and exit on an error of its own. Callers must check that
+ * the service they requested is now available not blindly invoke it."
+ * http://kernelnewbies.org/documents/kdoc/kernel-api/r7338.html
+ */
+int
+ieee80211_load_module(const char *modname)
+{
+#if defined(CONFIG_KMOD) || defined(CONFIG_MODULES)
+	int rv;
+	rv = request_module(modname);
+	if (rv < 0)
+		printk(KERN_ERR "failed to automatically load module: %s; " \
+			"errno: %d\n", modname, rv);
+	return rv;
+#else /* CONFIG_KMOD || CONFIG_MODULES */
+	printk(KERN_ERR "Unable to load needed module: %s; no support for " \
+			"automatic module loading", modname );
+	return -ENOSYS;
+#endif /* CONFIG_KMOD || CONFIG_MODULES */
+}
+
+
+static struct proc_dir_entry *proc_madwifi;
+static int proc_madwifi_count = 0;
+
+/**
+ * Return a string representing the MIMO power save mode passed in.
+ */
+static char *
+ieee80211_smps_to_string(u_int8_t pwrsave)
+{
+	switch(pwrsave)
+	{
+		case IEEE80211_HTCAP_C_MIMOPWRSAVE_STATIC:
+			return "Static";
+			break;
+		case IEEE80211_HTCAP_C_MIMOPWRSAVE_DYNAMIC:
+			return "Dynamic";
+			break;
+		case IEEE80211_HTCAP_C_MIMOPWRSAVE_NA:
+			return "INVALID";
+			break;
+		case IEEE80211_HTCAP_C_MIMOPWRSAVE_NONE:
+			return "None";
+			break;
+		default:
+			return "Unknown";
+			break;
+	}
+	return "Unknown";
+}
+
+/**
+ * Return a STATIC buffer containing a dump of the HT capability info field
+ */
+static char *
+ieee80211_htcapinfo_to_string(struct ieee80211vap *vap, struct ieee80211_htcap *ni_htcap)
+{
+	static char htcapinfobuf[1024];
+	char *p = htcapinfobuf;
+	int printed = 0;
+	printed += snprintf(p, sizeof(htcapinfobuf), "MIMO power save:%s",
+				ieee80211_smps_to_string(ni_htcap->pwrsave));
+	p += printed;
+	if (vap->iv_smps_force & 0x8000)
+	{
+		snprintf(p, sizeof(htcapinfobuf) - printed, " Overridden to:%s (%04X)",
+				ieee80211_smps_to_string((u_int8_t)(vap->iv_smps_force & 0xFF)), vap->iv_smps_force);
+	}
+	/* FIXME: decode other parts of the capability IE here */
+	return htcapinfobuf;
+}
+
+/* Check whether to output a node entry for the /proc/net/madwifi/wifi0/associated_sta output */
+static int
+ieee80211_node_should_print(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+	if ((ni->ni_vap == vap) &&
+	    (0 != memcmp(vap->iv_myaddr, ni->ni_macaddr, IEEE80211_ADDR_LEN)) &&
+	    (ni->ni_associd) &&
+	    (ni->ni_blacklist_timeout == 0) &&
+	    ieee80211_node_is_authorized(ni)) {
+
+		return 1;
+	}
+	return 0;
+}
+
+static int
+proc_read_nodes(struct ieee80211vap *vap, char *buf, int space)
+{
+        char *p = buf;
+        struct ieee80211_node *ni;
+        struct ieee80211_node_table *nt = (struct ieee80211_node_table *) &vap->iv_ic->ic_sta;
+
+	/* Don't print anything out on the STA side if we're not connected. */
+	if ((vap->iv_opmode != IEEE80211_M_HOSTAP) &&
+		(vap->iv_state != IEEE80211_S_RUN))
+	{
+		return 0;
+	}
+        //IEEE80211_NODE_LOCK(nt);
+        TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+                /* Assume each node needs 500 bytes */
+                if (buf + space < p + 500)
+                        break;
+
+		/* Nodes associated with OUR VAP, not with OUR MAC address, and with
+		 * NON-ZERO association ID (ie no temporary nodes)
+		 */
+		if (ieee80211_node_should_print(vap, ni)) {
+			struct timespec t;
+			jiffies_to_timespec(jiffies - ni->ni_last_rx, &t);
+			p += sprintf(p, "macaddr: <%s>\n", ether_sprintf(ni->ni_macaddr));
+			p += sprintf(p, " rssi %d\n", ni->ni_rssi);
+
+			p += sprintf(p, " last_rx %ld.%06ld %d\n",
+				     t.tv_sec, t.tv_nsec / 1000, ni->ni_inact);
+			p += sprintf(p, " HT CAP: %s\n",
+				     ieee80211_htcapinfo_to_string(vap, &ni->ni_htcap));
+		}
+        }
+        //IEEE80211_NODE_UNLOCK(nt);
+        return (p - buf);
+}
+
+static ssize_t
+proc_ieee80211_read(struct file *file, char __user *buf, size_t len, loff_t *offset)
+{
+	loff_t pos = *offset;
+	struct proc_ieee80211_priv *pv = (struct proc_ieee80211_priv *) file->private_data;
+
+	if (!pv->rbuf)
+		return -EINVAL;
+	if (pos < 0)
+		return -EINVAL;
+	if (pos > pv->rlen)
+		return -EFAULT;
+	if (len > pv->rlen - pos)
+		len = pv->rlen - pos;
+	if (copy_to_user(buf, pv->rbuf + pos, len))
+		return -EFAULT;
+	*offset = pos + len;
+	return len;
+}
+
+static int
+proc_ieee80211_open(struct inode *inode, struct file *file)
+{
+	struct proc_ieee80211_priv *pv = NULL;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	struct proc_dir_entry *dp = PDE(inode);
+	struct ieee80211vap *vap = dp->data;
+#else
+	struct ieee80211vap *vap = PDE_DATA(inode);
+#endif
+
+	if (!(file->private_data = kmalloc(sizeof(struct proc_ieee80211_priv), GFP_KERNEL)))
+		return -ENOMEM;
+	/* initially allocate both read and write buffers */
+	pv = (struct proc_ieee80211_priv *) file->private_data;
+	memset(pv, 0, sizeof(struct proc_ieee80211_priv));
+	pv->rbuf = vmalloc(MAX_PROC_IEEE80211_SIZE);
+	if (!pv->rbuf) {
+		kfree(pv);
+		return -ENOMEM;
+	}
+	pv->wbuf = vmalloc(MAX_PROC_IEEE80211_SIZE);
+	if (!pv->wbuf) {
+		vfree(pv->rbuf);
+		kfree(pv);
+		return -ENOMEM;
+	}
+	memset(pv->wbuf, 0, MAX_PROC_IEEE80211_SIZE);
+	memset(pv->rbuf, 0, MAX_PROC_IEEE80211_SIZE);
+	pv->max_wlen = MAX_PROC_IEEE80211_SIZE;
+	pv->max_rlen = MAX_PROC_IEEE80211_SIZE;
+	/* now read the data into the buffer */
+	pv->rlen = proc_read_nodes(vap, pv->rbuf, MAX_PROC_IEEE80211_SIZE);
+	return 0;
+}
+
+static ssize_t
+proc_ieee80211_write(struct file *file, const char __user *buf, size_t len, loff_t *offset)
+{
+	loff_t pos = *offset;
+	struct proc_ieee80211_priv *pv =
+		(struct proc_ieee80211_priv *) file->private_data;
+
+	if (!pv->wbuf)
+		return -EINVAL;
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= pv->max_wlen)
+		return 0;
+	if (len > pv->max_wlen - pos)
+		len = pv->max_wlen - pos;
+	if (copy_from_user(pv->wbuf + pos, buf, len))
+		return -EFAULT;
+	if (pos + len > pv->wlen)
+		pv->wlen = pos + len;
+	*offset = pos + len;
+
+	return len;
+}
+
+static int
+proc_ieee80211_close(struct inode *inode, struct file *file)
+{
+	struct proc_ieee80211_priv *pv =
+		(struct proc_ieee80211_priv *) file->private_data;
+	if (pv->rbuf)
+		vfree(pv->rbuf);
+	if (pv->wbuf)
+		vfree(pv->wbuf);
+	kfree(pv);
+	return 0;
+}
+
+static struct file_operations proc_ieee80211_ops = {
+        .read = proc_ieee80211_read,
+        .write = proc_ieee80211_write,
+        .open = proc_ieee80211_open,
+        .release = proc_ieee80211_close,
+};
+
+#ifdef IEEE80211_DEBUG
+static int
+IEEE80211_SYSCTL_DECL(ieee80211_sysctl_debug, ctl, write, filp, buffer,
+	lenp, ppos)
+{
+	struct ieee80211vap *vap = ctl->extra1;
+	u_int val;
+	int ret;
+
+	ctl->data = &val;
+	ctl->maxlen = sizeof(val);
+	if (write) {
+		ret = IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
+			lenp, ppos);
+		if (ret == 0)
+			vap->iv_debug = val;
+	} else {
+		val = vap->iv_debug;
+		ret = IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
+			lenp, ppos);
+	}
+	return ret;
+}
+#endif /* IEEE80211_DEBUG */
+
+static int
+IEEE80211_SYSCTL_DECL(ieee80211_sysctl_dev_type, ctl, write, filp, buffer,
+	lenp, ppos)
+{
+	struct ieee80211vap *vap = ctl->extra1;
+	u_int val;
+	int ret;
+
+	ctl->data = &val;
+	ctl->maxlen = sizeof(val);
+	if (write) {
+		ret = IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
+			lenp, ppos);
+		if (ret == 0 && vap->iv_opmode == IEEE80211_M_MONITOR) {
+			if (val == ARPHRD_IEEE80211_RADIOTAP ||
+			    val == ARPHRD_IEEE80211 ||
+			    val == ARPHRD_IEEE80211_PRISM ||
+			    val == ARPHRD_IEEE80211_ATHDESC) {
+				vap->iv_dev->type = val;
+			}
+		}
+	} else {
+		val = vap->iv_dev->type;
+		ret = IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
+			lenp, ppos);
+	}
+	return ret;
+}
+static int
+IEEE80211_SYSCTL_DECL(ieee80211_sysctl_monitor_nods_only, ctl, write, filp, buffer,
+	lenp, ppos)
+{
+	struct ieee80211vap *vap = ctl->extra1;
+	u_int val;
+	int ret;
+
+	ctl->data = &val;
+	ctl->maxlen = sizeof(val);
+	if (write) {
+		ret = IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
+			lenp, ppos);
+		if (ret == 0)
+			vap->iv_monitor_nods_only = val;
+	} else {
+		val = vap->iv_monitor_nods_only;
+		ret = IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
+			lenp, ppos);
+	}
+	return ret;
+}
+static int
+IEEE80211_SYSCTL_DECL(ieee80211_sysctl_monitor_txf_len, ctl, write, filp, buffer,
+	lenp, ppos)
+{
+	struct ieee80211vap *vap = ctl->extra1;
+	u_int val;
+	int ret;
+
+	ctl->data = &val;
+	ctl->maxlen = sizeof(val);
+	if (write) {
+		ret = IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
+			lenp, ppos);
+		if (ret == 0)
+			vap->iv_monitor_txf_len = val;
+	} else {
+		val = vap->iv_monitor_txf_len;
+		ret = IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
+			lenp, ppos);
+	}
+	return ret;
+}
+static int
+IEEE80211_SYSCTL_DECL(ieee80211_sysctl_monitor_phy_errors, ctl, write, filp, buffer,
+	lenp, ppos)
+{
+	struct ieee80211vap *vap = ctl->extra1;
+	u_int val;
+	int ret;
+
+	ctl->data = &val;
+	ctl->maxlen = sizeof(val);
+	if (write) {
+		ret = IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
+			lenp, ppos);
+		if (ret == 0)
+			vap->iv_monitor_phy_errors = val;
+	} else {
+		val = vap->iv_monitor_phy_errors;
+		ret = IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
+			lenp, ppos);
+	}
+	return ret;
+}
+
+static int
+IEEE80211_SYSCTL_DECL(ieee80211_sysctl_monitor_crc_errors, ctl, write, filp, buffer,
+	lenp, ppos)
+{
+	struct ieee80211vap *vap = ctl->extra1;
+	u_int val;
+	int ret;
+
+	ctl->data = &val;
+	ctl->maxlen = sizeof(val);
+	if (write) {
+		ret = IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
+			lenp, ppos);
+		if (ret == 0)
+			vap->iv_monitor_crc_errors = val;
+	} else {
+		val = vap->iv_monitor_crc_errors;
+		ret = IEEE80211_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
+			lenp, ppos);
+	}
+	return ret;
+}
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#define	CTL_AUTO	-2	/* cannot be CTL_ANY or CTL_NONE */
+#define INIT_CTL_NAME(value)  .ctl_name = (value),
+#else
+#define INIT_CTL_NAME(value)
+#endif
+static const struct ctl_table ieee80211_sysctl_template[] = {
+#ifdef IEEE80211_DEBUG
+	{
+	   INIT_CTL_NAME(CTL_AUTO)
+	  .procname	= "debug",
+	  .mode		= 0644,
+	  .proc_handler	= ieee80211_sysctl_debug
+	},
+#endif
+	{
+	  INIT_CTL_NAME(CTL_AUTO)
+	  .procname	= "dev_type",
+	  .mode		= 0644,
+	  .proc_handler	= ieee80211_sysctl_dev_type
+	},
+	{
+	  INIT_CTL_NAME(CTL_AUTO)
+	  .procname	= "monitor_nods_only",
+	  .mode		= 0644,
+	  .proc_handler	= ieee80211_sysctl_monitor_nods_only
+	},
+	{
+	  INIT_CTL_NAME(CTL_AUTO)
+	  .procname	= "monitor_txf_len",
+	  .mode		= 0644,
+	  .proc_handler	= ieee80211_sysctl_monitor_txf_len
+	},
+	{ INIT_CTL_NAME(CTL_AUTO)
+	  .procname	= "monitor_phy_errors",
+	  .mode		= 0644,
+	  .proc_handler = ieee80211_sysctl_monitor_phy_errors
+	},
+	{
+	  INIT_CTL_NAME(CTL_AUTO)
+	  .procname	= "monitor_crc_errors",
+	  .mode		= 0644,
+	  .proc_handler = ieee80211_sysctl_monitor_crc_errors
+	},
+	/* NB: must be last entry before NULL */
+	{
+	  INIT_CTL_NAME(CTL_AUTO)
+	  .procname	= "%parent",
+	  .maxlen	= IFNAMSIZ,
+	  .mode		= 0444,
+	  .proc_handler	= proc_dostring
+	},
+	{ 0 }
+};
+
+void
+ieee80211_sysctl_vattach(struct ieee80211vap *vap)
+{
+	int i, space;
+	char *devname = NULL;
+	struct ieee80211_proc_entry *tmp=NULL;
+
+	space = 5 * sizeof(struct ctl_table) + sizeof(ieee80211_sysctl_template);
+	vap->iv_sysctls = kmalloc(space, GFP_KERNEL);
+	if (vap->iv_sysctls == NULL) {
+		printk("%s: no memory for sysctl table!\n", __func__);
+		return;
+	}
+
+	/*
+	 * Reserve space for the device name outside the net_device structure
+	 * so that if the name changes we know what it used to be.
+	 */
+	devname = kmalloc((strlen(vap->iv_dev->name) + 1) * sizeof(char), GFP_KERNEL);
+	if (devname == NULL) {
+		printk("%s: no memory for VAP name!\n", __func__);
+		kfree(vap->iv_sysctls);
+		vap->iv_sysctls = NULL;
+		return;
+	}
+	strncpy(devname, vap->iv_dev->name, strlen(vap->iv_dev->name) + 1);
+
+	/* setup the table */
+	memset(vap->iv_sysctls, 0, space);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+	vap->iv_sysctls[0].ctl_name = CTL_NET;
+#endif
+	vap->iv_sysctls[0].procname = "net";
+	vap->iv_sysctls[0].mode = 0555;
+	vap->iv_sysctls[0].child = &vap->iv_sysctls[2];
+	/* [1] is NULL terminator */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+	vap->iv_sysctls[2].ctl_name = CTL_AUTO;
+#endif
+	vap->iv_sysctls[2].procname = devname; /* XXX bad idea? */
+	vap->iv_sysctls[2].mode = 0555;
+	vap->iv_sysctls[2].child = &vap->iv_sysctls[4];
+	/* [3] is NULL terminator */
+	/* copy in pre-defined data */
+	memcpy(&vap->iv_sysctls[4], ieee80211_sysctl_template,
+		sizeof(ieee80211_sysctl_template));
+
+	/* add in dynamic data references */
+	for (i = 4; vap->iv_sysctls[i].procname; i++)
+
+		if (vap->iv_sysctls[i].extra1 == NULL)
+			vap->iv_sysctls[i].extra1 = vap;
+
+	vap->iv_sysctls[i-1].data = "";	/* XXX? */
+
+	/* and register everything */
+	vap->iv_sysctl_header = ATH_REGISTER_SYSCTL_TABLE(vap->iv_sysctls);
+	if (!vap->iv_sysctl_header) {
+		printk("%s: failed to register sysctls!\n", vap->iv_dev->name);
+		kfree(vap->iv_sysctls);
+		vap->iv_sysctls = NULL;
+	}
+
+	vap->iv_disconn_cnt = 0;
+	vap->iv_disconn_seq = 0;
+
+	/* Ensure the base madwifi directory exists */
+	if (!proc_madwifi && proc_net != NULL) {
+		proc_madwifi = proc_mkdir("madwifi", proc_net);
+		if (!proc_madwifi)
+			printk(KERN_WARNING "Failed to mkdir /proc/net/madwifi\n");
+	}
+
+	/* Create a proc directory named after the VAP */
+	if (proc_madwifi) {
+		proc_madwifi_count++;
+		vap->iv_proc = proc_mkdir(vap->iv_dev->name, proc_madwifi);
+	}
+
+	/* Create a proc entry listing the associated stations */
+	ieee80211_proc_vcreate(vap, &proc_ieee80211_ops, "associated_sta");
+
+	/* Recreate any other proc entries that have been registered */
+		if (vap->iv_proc) {
+		tmp = vap->iv_proc_entries;
+		while (tmp) {
+			if (!tmp->entry) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+				tmp->entry = create_proc_entry(tmp->name,
+				PROC_IEEE80211_PERM, vap->iv_proc);
+				tmp->entry->data = vap;
+				tmp->entry->proc_fops = tmp->fileops;
+#else
+				tmp->entry = proc_create_data(tmp->name, PROC_IEEE80211_PERM,
+							      vap->iv_proc, tmp->fileops, vap);
+#endif
+			}
+			tmp = tmp->next;
+		}
+	}
+}
+
+/* Frees all memory used for the list of proc entries */
+void
+ieee80211_proc_cleanup(struct ieee80211vap *vap)
+{
+	struct ieee80211_proc_entry *tmp=vap->iv_proc_entries;
+	struct ieee80211_proc_entry *next = NULL;
+	while (tmp) {
+		next = tmp->next;
+		kfree(tmp);
+		tmp = next;
+	}
+}
+
+/* Called by other modules to register a proc entry under the vap directory */
+int
+ieee80211_proc_vcreate(struct ieee80211vap *vap,
+		struct file_operations *fileops, char *name)
+{
+	struct ieee80211_proc_entry *entry;
+	struct ieee80211_proc_entry *tmp=NULL;
+
+	/* Ignore if already in the list */
+	if (vap->iv_proc_entries) {
+		tmp = vap->iv_proc_entries;
+		do {
+			if (strcmp(tmp->name, name)==0)
+				return -1;
+			/* Check for end of list */
+			if (!tmp->next)
+				break;
+			/* Otherwise move on */
+			tmp = tmp->next;
+		} while (1);
+	}
+
+	/* Create an item in our list for the new entry */
+	entry = kmalloc(sizeof(struct ieee80211_proc_entry), GFP_KERNEL);
+	if (entry == NULL) {
+		printk("%s: no memory for new proc entry (%s)!\n", __func__,
+				name);
+		return -1;
+	}
+
+	/* Replace null fileops pointers with our standard functions */
+	if (!fileops->open)
+		fileops->open = proc_ieee80211_open;
+	if (!fileops->release)
+		fileops->release = proc_ieee80211_close;
+	if (!fileops->read)
+		fileops->read = proc_ieee80211_read;
+	if (!fileops->write)
+		fileops->write = proc_ieee80211_write;
+
+	/* Create the entry record */
+	entry->name = name;
+	entry->fileops = fileops;
+	entry->next = NULL;
+	entry->entry = NULL;
+
+	/* Create the actual proc entry */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	if (vap->iv_proc) {
+		entry->entry = create_proc_entry(entry->name,
+				PROC_IEEE80211_PERM, vap->iv_proc);
+		entry->entry->data = vap;
+		entry->entry->proc_fops = entry->fileops;
+	}
+#else
+	if (vap->iv_proc)
+		entry->entry = proc_create_data(entry->name, PROC_IEEE80211_PERM,
+						vap->iv_proc, entry->fileops, vap);
+#endif
+
+	/* Add it to the list */
+	if (!tmp) {
+		/* Add to the start */
+		vap->iv_proc_entries = entry;
+	} else {
+		/* Add to the end */
+		tmp->next = entry;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_proc_vcreate);
+
+void
+ieee80211_sysctl_vdetach(struct ieee80211vap *vap)
+{
+	struct ieee80211_proc_entry *tmp=NULL;
+
+	if (vap->iv_sysctl_header) {
+		unregister_sysctl_table(vap->iv_sysctl_header);
+		vap->iv_sysctl_header = NULL;
+	}
+
+	if (vap->iv_proc) {
+		/* Remove child proc entries but leave them in the list */
+		tmp = vap->iv_proc_entries;
+		while (tmp) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+			if (tmp->entry) {
+				remove_proc_entry(tmp->name, vap->iv_proc);
+				tmp->entry = NULL;
+			}
+#else
+			tmp->entry = NULL;
+#endif
+			tmp = tmp->next;
+		}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+		remove_proc_entry(vap->iv_proc->name, proc_madwifi);
+#else
+		proc_remove(vap->iv_proc);
+#endif
+		if (proc_madwifi_count == 1) {
+			remove_proc_entry("madwifi", proc_net);
+			proc_madwifi = NULL;
+		}
+		proc_madwifi_count--;
+	}
+
+	if (vap->iv_sysctls) {
+		if (vap->iv_sysctls[2].procname) {
+			kfree(vap->iv_sysctls[2].procname);
+			vap->iv_sysctls[2].procname = NULL;
+		}
+		kfree(vap->iv_sysctls);
+		vap->iv_sysctls = NULL;
+	}
+}
+
+/*
+ * Format an Ethernet MAC for printing.
+ */
+const char*
+ether_sprintf(const u_int8_t *mac)
+{
+	static char etherbuf[18]; 	/* XXX */
+	snprintf(etherbuf, sizeof(etherbuf), "%02x:%02x:%02x:%02x:%02x:%02x",
+		mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+	return etherbuf;
+}
+EXPORT_SYMBOL(ether_sprintf);		/* XXX */
+
+/* Function to handle the device event notifications.
+ * If the event is a NETDEV_CHANGENAME, and is for an interface
+ * we are taking care of, then we want to remove its existing
+ * proc entries (which now have the wrong names) and add
+ * new, correct, entries.
+ */
+static int
+ieee80211_rcv_dev_event(struct notifier_block *this, unsigned long event,
+	void *ptr)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	struct net_device *dev = (struct net_device *) ptr;
+#else
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+#endif
+
+	if (!dev || dev->netdev_ops->ndo_open != &ieee80211_open)
+		return 0;
+
+        switch (event) {
+        case NETDEV_CHANGENAME:
+		ieee80211_sysctl_vdetach(netdev_priv(dev));
+		ieee80211_sysctl_vattach(netdev_priv(dev));
+		return NOTIFY_DONE;
+	default:
+		break;
+        }
+        return 0;
+}
+
+static struct notifier_block ieee80211_event_block = {
+        .notifier_call = ieee80211_rcv_dev_event
+};
+
+/*
+ * Module glue.
+ */
+#include "version.h"
+#include "release.h"
+static char *version = WLAN_VERSION " (" RELEASE_VERSION ")";
+static char *dev_info = "wlan";
+
+MODULE_AUTHOR("Errno Consulting, Sam Leffler");
+MODULE_DESCRIPTION("802.11 wireless LAN protocol support");
+#ifdef MODULE_VERSION
+MODULE_VERSION(RELEASE_VERSION);
+#endif
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual BSD/GPL");
+#endif
+
+extern	void ieee80211_auth_setup(void);
+
+static int __init
+init_wlan(void)
+{
+  	register_netdevice_notifier(&ieee80211_event_block);
+	printk(KERN_INFO "%s: %s\n", dev_info, version);
+	return 0;
+}
+module_init(init_wlan);
+
+static void __exit
+exit_wlan(void)
+{
+  	unregister_netdevice_notifier(&ieee80211_event_block);
+	printk(KERN_INFO "%s: driver unloaded\n", dev_info);
+}
+module_exit(exit_wlan);
diff --git a/drivers/qtn/wlan/ieee80211_mlme_statistics.c b/drivers/qtn/wlan/ieee80211_mlme_statistics.c
new file mode 100644
index 0000000..b8134e4
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_mlme_statistics.c
@@ -0,0 +1,640 @@
+/*-
+ * Copyright (c) 2014 Quantenna
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_mlme_statistics.c 1 2014-01-17 12:00:00Z vsaiapin $
+ */
+
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/jhash.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#if defined(CONFIG_PROC_FS) && defined(MLME_STATS_PROCFS)
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#endif /* defined(CONFIG_PROC_FS) && defined(MLME_STATS_PROCFS) */
+#include <linux/cdev.h>
+#include <linux/workqueue.h>
+
+#include "net80211/ieee80211_mlme_statistics.h"
+#include "net80211/if_media.h"
+#include "net80211/ieee80211_var.h"
+
+#ifdef MLME_STATS_DEBUG
+#define MLME_STATS_MAC_HASH_SIZE			8
+#define MLME_STATS_MAX_CLIENTS				25
+#else /* MLME_STATS_DEBUG */
+#define MLME_STATS_MAC_HASH_SIZE			128
+#define MLME_STATS_MAX_CLIENTS				128
+#endif /* MLME_STATS_DEBUG */
+
+#ifdef MLME_STATS_PROCFS
+#define MLME_STATS_PROC_FILENAME			"mlmestats"
+#endif /* MLME_STATS_PROCFS */
+
+#ifdef MLME_STATS_DEVFS
+#define MLME_STATS_DEVFS_NAME				"mlmestats"
+#define MLME_STATS_DEVFS_MAJOR				0xEF
+#endif /* MLME_STATS_DEVFS */
+
+struct mlme_delayed_update_task {
+	unsigned char mac_addr[IEEE80211_ADDR_LEN];
+	unsigned int statistics_entry;
+	unsigned int incrementor;
+	struct work_struct work;
+};
+
+struct mlme_stats_node {
+	struct mlme_stats_record stats;
+	struct list_head lru_link;
+	struct hlist_node hash_link;
+};
+
+struct mlme_stats_factory {
+	struct hlist_head mac_hash_table[MLME_STATS_MAC_HASH_SIZE];
+	unsigned int nodes_count;
+
+	/* This 'lru_list_head' node will be used for anonymous statistics */
+	struct mlme_stats_node lru_list_head;
+	spinlock_t access_lock;
+	struct workqueue_struct *delayed_update_wq;
+};
+
+static atomic_t mlme_active = ATOMIC_INIT(0);
+static struct mlme_stats_factory mlme_statistics;
+
+static void mlme_stats_lock(void)
+{
+	spin_lock_bh(&mlme_statistics.access_lock);
+}
+
+static void mlme_stats_unlock(void)
+{
+	spin_unlock_bh(&mlme_statistics.access_lock);
+}
+
+static int mlme_stats_mac_hash(unsigned char *mac_addr)
+{
+	return jhash(mac_addr, IEEE80211_ADDR_LEN, 0) & (MLME_STATS_MAC_HASH_SIZE - 1);
+}
+
+/**
+ * mlme_stats_update_node - increment required stat in the node
+ * @stat_record: client's node
+ * @statistics_entry: entry of stats needs to be updated
+ * @incrementor: value which will be added to the stat entry
+ *
+ * Update client's node according parameters.
+ * Assumes that factory is locked.
+ */
+static void mlme_stats_update_node(struct mlme_stats_node *stat_node, unsigned int statistics_entry, unsigned int incrementor)
+{
+	unsigned int *node_stats_array = (unsigned int*)&stat_node->stats.auth;
+
+	if (statistics_entry >= MLME_STAT_MAX) {
+#ifdef MLME_STATS_DEBUG
+		printk(KERN_WARNING "Wrong statistics entry, ignoring it");
+#endif /* MLME_STATS_DEBUG */
+		return;
+	}
+
+	node_stats_array[statistics_entry] += incrementor;
+}
+
+/**
+ * mlme_stats_locate_node - search a node for client
+ * @mac_addr: client's mac address
+ * @mac_hash: hash of client's mac address
+ *
+ * Search a node for specified client through
+ * lru_list. Return null if node isn't found
+ */
+static struct mlme_stats_node* mlme_stats_locate_node(unsigned char *mac_addr, int mac_hash)
+{
+	struct hlist_head *hash_head = &mlme_statistics.mac_hash_table[mac_hash];
+	struct mlme_stats_node *node_iterator = NULL;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	struct hlist_node *loop_cursor = NULL;
+	hlist_for_each_entry (node_iterator, loop_cursor, hash_head,hash_link) {
+#else
+	hlist_for_each_entry (node_iterator, hash_head,hash_link) {
+#endif
+		if (IEEE80211_ADDR_EQ(node_iterator->stats.mac_addr, mac_addr)) {
+			return node_iterator;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * mlme_stats_add_node - add new node
+ * @mac_addr: mac address of new client
+ * @mac_hash: hash of the mac address
+ *
+ * Add new node, put it to the head of LRU list and
+ * add it to the hash table.
+ * Assumes that factory is locked.
+ */
+static struct mlme_stats_node* mlme_stats_add_node(unsigned char *mac_addr, int mac_hash)
+{
+	struct mlme_stats_node* new_stats_node = NULL;
+	if (mlme_statistics.nodes_count == MLME_STATS_MAX_CLIENTS) {
+		new_stats_node = list_entry(mlme_statistics.lru_list_head.lru_link.prev,
+												struct mlme_stats_node, lru_link);
+		hlist_del(&new_stats_node->hash_link);
+		list_del(&new_stats_node->lru_link);
+	} else {
+#ifdef MLME_STATS_DEBUG
+		if (in_atomic()) {
+			printk("kzalloc in atomic context\n");
+		}
+		if (in_softirq()) {
+			printk("kzalloc in softirq context\n");
+		}
+#endif /* MLME_STATS_DEBUG */
+		new_stats_node = kzalloc(sizeof(*new_stats_node), GFP_KERNEL);
+		if (new_stats_node == NULL) {
+#ifdef MLME_STATS_DEBUG
+			printk(KERN_ERR "Failed to allocate emory for new stats node\n");
+#endif /* MLME_STATS_DEBUG */
+			return NULL;
+		}
+		++mlme_statistics.nodes_count;
+	}
+	memcpy(new_stats_node->stats.mac_addr, mac_addr, IEEE80211_ADDR_LEN);
+
+	INIT_LIST_HEAD(&new_stats_node->lru_link);
+	list_add(&new_stats_node->lru_link, &mlme_statistics.lru_list_head.lru_link);
+	hlist_add_head(&new_stats_node->hash_link, &mlme_statistics.mac_hash_table[mac_hash]);
+	return new_stats_node;
+}
+
+#if defined(CONFIG_PROC_FS) && defined(MLME_STATS_PROCFS)
+void *mlme_stats_seq_start(struct seq_file *m, loff_t *pos)
+{
+	loff_t off;
+	struct mlme_stats_node *iterator, *iterator_tmp;
+
+	mlme_stats_lock();
+
+	if (!*pos) {
+		return SEQ_START_TOKEN;
+	}
+
+	off = 1;
+	list_for_each_entry_safe (iterator, iterator_tmp, &mlme_statistics.lru_list_head.lru_link, lru_link) {
+		if (off++ == *pos) {
+			return iterator;
+		}
+	}
+
+	return NULL;
+}
+
+void *mlme_stats_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct mlme_stats_node *next_iterator;
+
+	if (++*pos > mlme_statistics.nodes_count) {
+		return NULL;
+	}
+
+	if (v == SEQ_START_TOKEN) {
+		next_iterator = list_entry(mlme_statistics.lru_list_head.lru_link.next,
+								   struct mlme_stats_node, lru_link);
+	} else {
+		next_iterator = list_entry(((struct mlme_stats_node *)v)->lru_link.next,
+								   struct mlme_stats_node, lru_link);
+	}
+
+	return next_iterator;
+}
+int mlme_stats_seq_show(struct seq_file *m, void *v)
+{
+	int i;
+	struct mlme_stats_node *current_node;
+	unsigned int *node_stats_array;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_printf(m, "                  %11s%11s%11s%11s%11s%11s\n",
+				   "auth", "auth_fail", "assoc", "assoc_fail", "deauth", "diassoc");
+		node_stats_array = (unsigned int*)&mlme_statistics.lru_list_head.stats.auth;
+		seq_printf(m, "00:00:00:00:00:00 ");
+	} else {
+		current_node = (struct mlme_stats_node *)v;
+		node_stats_array = (unsigned int*)&current_node->stats.auth;
+		seq_printf(m, "%pM ", current_node->stats.mac_addr);
+	}
+
+	for (i = 0;i < MLME_STAT_MAX; ++i) {
+		seq_printf(m, "%11u", node_stats_array[i]);
+	}
+	seq_putc(m, '\n');
+	return 0;
+}
+void mlme_stats_seq_stop(struct seq_file *m, void *v)
+{
+	mlme_stats_unlock();
+}
+
+struct seq_operations mlme_stats_seq_fops = {
+	.start   = mlme_stats_seq_start,
+	.stop    = mlme_stats_seq_stop,
+	.next    = mlme_stats_seq_next,
+	.show    = mlme_stats_seq_show,
+};
+static int mlme_stats_proc_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &mlme_stats_seq_fops);
+};
+
+static const struct file_operations mlme_stats_proc_fops = {
+	.owner   = THIS_MODULE,
+	.open    = mlme_stats_proc_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release
+};
+#endif /* defined(CONFIG_PROC_FS) && defined(MLME_STATS_PROCFS) */
+
+#ifdef MLME_STATS_DEVFS
+static long mlme_stats_ioctl(struct file *file, unsigned int cmd,
+							unsigned long arg)
+{
+	int ret = 0;
+	unsigned int max_clients = MLME_STATS_MAX_CLIENTS;
+	struct mlme_stats_node *iterator = NULL, *iterator_tmp = NULL;
+	unsigned char *macs_buffer = NULL, *macs_ptr = NULL;
+
+	if (!arg) {
+		return -EFAULT;
+	}
+
+	mlme_stats_lock();
+	switch (cmd) {
+		case MLME_STATS_IOC_GET_MAX_CLIENTS:
+			if (copy_to_user((unsigned int __user*)arg, &max_clients, sizeof(max_clients)) != 0) {
+				ret = -EFAULT;
+			}
+			break;
+		case MLME_STATS_IOC_GET_CUR_CLIENTS:
+			if (copy_to_user((unsigned int __user*)arg, &mlme_statistics.nodes_count, sizeof(mlme_statistics.nodes_count)) != 0) {
+				ret = -EFAULT;
+			}
+			break;
+		case MLME_STATS_IOC_GET_ALL_MACS:
+			if (mlme_statistics.nodes_count < MLME_STATS_MAX_CLIENTS) {
+				max_clients = mlme_statistics.nodes_count + 1;
+			}
+			macs_buffer = kmalloc(IEEE80211_ADDR_LEN * max_clients, GFP_KERNEL);
+			if (macs_buffer != NULL) {
+				macs_ptr = macs_buffer;
+				list_for_each_entry_safe (iterator, iterator_tmp, &mlme_statistics.lru_list_head.lru_link, lru_link) {
+					memcpy(macs_ptr, iterator->stats.mac_addr, IEEE80211_ADDR_LEN);
+					macs_ptr += IEEE80211_ADDR_LEN;
+				}
+				if (mlme_statistics.nodes_count < MLME_STATS_MAX_CLIENTS) {
+					memset(macs_ptr, 0xFF, IEEE80211_ADDR_LEN);
+				}
+				if (copy_to_user((unsigned char __user*)arg, macs_buffer, max_clients * IEEE80211_ADDR_LEN) != 0) {
+					ret = -EFAULT;
+				}
+				kfree(macs_buffer);
+			} else {
+				ret = -ENOMEM;
+			}
+			break;
+		case MLME_STATS_IOC_GET_CLIENT_STATS:
+			macs_buffer = kmalloc(IEEE80211_ADDR_LEN, GFP_KERNEL);
+			if (macs_buffer != NULL) {
+				if (copy_from_user(macs_buffer, (unsigned char __user*)arg, IEEE80211_ADDR_LEN) != 0) {
+					ret = -EFAULT;
+				} else {
+					if (memcmp(macs_buffer, mlme_statistics.lru_list_head.stats.mac_addr, IEEE80211_ADDR_LEN) == 0) {
+						if (copy_to_user((struct mlme_stats_record __user*)arg, &mlme_statistics.lru_list_head.stats,
+										 sizeof(struct mlme_stats_record))) {
+							ret = -EFAULT;
+						}
+					} else {
+						iterator = mlme_stats_locate_node(macs_buffer, mlme_stats_mac_hash(macs_buffer));
+						if (iterator != NULL) {
+							if (copy_to_user((struct mlme_stats_record __user*)arg, &iterator->stats,
+											 sizeof(struct mlme_stats_record))) {
+								ret = -EFAULT;
+							}
+						} else {
+							ret = -ENXIO;
+						}
+					}
+				}
+				kfree(macs_buffer);
+			}
+			break;
+		default:
+			ret = -EINVAL;
+			break;
+	}
+	mlme_stats_unlock();
+
+	return ret;
+}
+
+static ssize_t mlme_stats_read(struct file *file, char __user *buf,
+							   size_t count, loff_t *ppos)
+{
+	unsigned char __user* user_ptr = (unsigned char __user*)buf;
+	struct mlme_stats_node *iterator,*iterator_tmp;
+	unsigned int skip_records = (unsigned int)*ppos /
+								(unsigned int)sizeof(struct mlme_stats_record);
+	unsigned int record_offset = (unsigned int)*ppos %
+								 (unsigned int)sizeof(struct mlme_stats_record);
+	size_t bytes_to_copy;
+	unsigned int remain_to_copy = count;
+
+	mlme_stats_lock();
+
+	if (skip_records == 0) {
+		// Anonymous record should requested as well
+		bytes_to_copy = sizeof(struct mlme_stats_record) - record_offset;
+		bytes_to_copy = min(remain_to_copy, bytes_to_copy);
+		copy_to_user(user_ptr, (unsigned char*)&mlme_statistics.lru_list_head.stats + record_offset, bytes_to_copy);
+		user_ptr += bytes_to_copy;
+		*ppos += bytes_to_copy;
+		remain_to_copy -= bytes_to_copy;
+		if (record_offset > 0) {
+			record_offset = 0;
+		}
+	} else {
+		--skip_records;
+	}
+	if (!remain_to_copy) {
+		goto all_read;
+	}
+    list_for_each_entry_safe (iterator, iterator_tmp, &mlme_statistics.lru_list_head.lru_link, lru_link) {
+		if (skip_records > 0) {
+			--skip_records;
+			continue;
+		}
+		bytes_to_copy = sizeof(struct mlme_stats_record) - record_offset;
+		bytes_to_copy = min(remain_to_copy, bytes_to_copy);
+		copy_to_user(user_ptr, (unsigned char*)&iterator->stats + record_offset, bytes_to_copy);
+		user_ptr += bytes_to_copy;
+		*ppos += bytes_to_copy;
+		remain_to_copy -= bytes_to_copy;
+		if (record_offset > 0) {
+			record_offset = 0;
+		}
+		if (remain_to_copy == 0) {
+			goto all_read;
+		}
+	}
+	if (skip_records > 0 || record_offset > 0) {
+		mlme_stats_unlock();
+		return -ESPIPE;
+	}
+all_read:
+	mlme_stats_unlock();
+
+	return count - remain_to_copy;
+}
+
+static const struct file_operations mlme_stats_fops = {
+	.owner = THIS_MODULE,
+	.read = mlme_stats_read,
+	.unlocked_ioctl = mlme_stats_ioctl,
+};
+static struct cdev mlme_stats_dev;
+#endif /* MLME_STATS_DEVFS */
+
+static void perform_delayed_update(struct work_struct *work)
+{
+	struct mlme_delayed_update_task *owner = container_of(work, struct mlme_delayed_update_task, work);
+	mlme_stats_update(owner->mac_addr, owner->statistics_entry, owner->incrementor);
+	kfree(owner);
+}
+
+
+void mlme_stats_update(unsigned char *mac_addr, unsigned int statistics_entry,
+					   unsigned int incrementor)
+{
+	int mac_hash;
+	struct mlme_stats_node* stats_node;
+
+	if (atomic_read(&mlme_active) != 1) {
+		return;
+	}
+
+	mlme_stats_lock();
+	mac_hash = mlme_stats_mac_hash(mac_addr);
+	stats_node = mlme_stats_locate_node(mac_addr, mac_hash);
+
+	/* Client statistics node already exists, simply update it */
+	if (stats_node != NULL) {
+		mlme_stats_update_node(stats_node, statistics_entry,incrementor);
+		list_move(&stats_node->lru_link, &mlme_statistics.lru_list_head.lru_link);
+		goto unlock_and_exit;
+	}
+
+	/* If client entry doesn't exist and this is not auth request
+	 * than update anonymous statistics
+	 */
+	if (statistics_entry != 0) {
+		mlme_stats_update_node(&mlme_statistics.lru_list_head, statistics_entry, incrementor);
+		goto unlock_and_exit;
+	}
+
+	// Create node for the new client
+	stats_node = mlme_stats_add_node(mac_addr, mac_hash);
+	if (stats_node == NULL) {
+		goto unlock_and_exit;
+	}
+	mlme_stats_update_node(stats_node, statistics_entry, incrementor);
+unlock_and_exit:
+	mlme_stats_unlock();
+}
+EXPORT_SYMBOL(mlme_stats_update);
+
+void mlme_stats_delayed_update(unsigned char *mac_addr, unsigned int statistics_entry,
+							   unsigned int incrementor)
+{
+	struct mlme_delayed_update_task *new_work = NULL;
+
+	if (mlme_statistics.delayed_update_wq == NULL) {
+		return;
+	}
+	if (atomic_read(&mlme_active) != 1) {
+		return;
+	}
+	new_work = kmalloc(sizeof(*new_work), GFP_ATOMIC);
+	if (new_work != NULL) {
+		INIT_WORK(&new_work->work, perform_delayed_update);
+		memcpy(new_work->mac_addr, mac_addr, IEEE80211_ADDR_LEN);
+		new_work->statistics_entry = statistics_entry;
+		new_work->incrementor = incrementor;
+		queue_work(mlme_statistics.delayed_update_wq, &new_work->work);
+	}
+}
+EXPORT_SYMBOL(mlme_stats_delayed_update);
+
+#ifdef MLME_STATS_DEBUG
+static void mlme_stats_dump_lru(void)
+{
+	struct mlme_stats_node *iterator, *iterator_tmp;
+
+	mlme_stats_lock();
+	printk("==== BEGIN OF LRU DUMP ====\n");
+	list_for_each_entry_safe (iterator, iterator_tmp, &mlme_statistics.lru_list_head.lru_link, lru_link) {
+		printk("%pM [%d]\n", iterator->stats.mac_addr, mlme_stats_mac_hash(iterator->stats.mac_addr));
+	}
+	printk("===== END OF LRU DUMP =====\n");
+	mlme_stats_unlock();
+}
+static void mlme_stats_dump_hash_table(void)
+{
+	struct hlist_head *hash_head = NULL;
+	struct mlme_stats_node *node_iterator = NULL;
+	struct hlist_node *loop_cursor = NULL;
+	int i;
+	mlme_stats_lock();
+	printk("==== BEGIN OF HASH TABLE DUMP ====\n");
+	for (i = 0;i < MLME_STATS_MAC_HASH_SIZE; ++i) {
+		hash_head = &mlme_statistics.mac_hash_table[i];
+		printk("%5d  ",i);
+		hlist_for_each_entry (node_iterator, loop_cursor, hash_head, hash_link) {
+			printk("->%pM", node_iterator->stats.mac_addr);
+		}
+		printk("\n");
+	}
+	printk("===== END OF HASH TABLE DUMP =====\n");
+	mlme_stats_unlock();
+}
+void test_func(void)
+{
+	unsigned char mac[] = {0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF};
+	unsigned short *mac_diff = (unsigned short*)mac;
+	int i, j = 1;
+
+
+	for (i = 0;i < 1024; ++i, j = (j + 1) & 0x0F, *mac_diff = *mac_diff + 1) {
+		if (!j) j = 1;
+		mlme_stats_update(mac, MLME_STAT_ASSOC, j);
+	}
+
+	mlme_stats_dump_lru();
+	mlme_stats_dump_hash_table();
+}
+#endif /* MLME_STATS_DEBUG */
+
+void mlme_stats_init(void)
+{
+	int i = 0;
+	dev_t mlme_stats_dev_id;
+
+	memset(&mlme_statistics, 0x00, sizeof(mlme_statistics));
+	for (;i < MLME_STATS_MAC_HASH_SIZE; ++i) {
+		INIT_HLIST_HEAD(&mlme_statistics.mac_hash_table[i]);
+	}
+	spin_lock_init(&mlme_statistics.access_lock);
+	INIT_LIST_HEAD(&mlme_statistics.lru_list_head.lru_link);
+	mlme_statistics.delayed_update_wq = create_singlethread_workqueue("mlmestats");
+	if (mlme_statistics.delayed_update_wq == NULL) {
+		printk(KERN_WARNING "Unable to create workqueue for mlme stats\n");
+	}
+	atomic_set(&mlme_active, 1);
+
+#if defined(CONFIG_PROC_FS) && defined(MLME_STATS_PROCFS)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	if (!proc_create(MLME_STATS_PROC_FILENAME, 0400, NULL, &mlme_stats_proc_fops)) {
+		printk(KERN_ERR "Unable to create proc file\n");
+	}
+#else
+	//TODO: Avinash: Need to find correct proc_dir_entry and context.
+/*	tmp->entry = proc_create_data(tmp->name, PROC_IEEE80211_PERM,*/
+/*				      vap->iv_proc, &mlme_stats_proc_fops, vap);*/
+#endif
+#endif /* defined(CONFIG_PROC_FS) && defined(MLME_STATS_PROCFS) */
+
+#ifdef MLME_STATS_DEVFS
+	cdev_init(&mlme_stats_dev, &mlme_stats_fops);
+	mlme_stats_dev.owner = THIS_MODULE;
+	mlme_stats_dev_id = MKDEV(MLME_STATS_DEVFS_MAJOR, 0);
+	if (register_chrdev_region(mlme_stats_dev_id, 1, MLME_STATS_DEVFS_NAME) < 0) {
+		printk(KERN_WARNING "Unable to register major number for mlme stats\n");
+	} else {
+		if (cdev_add(&mlme_stats_dev, mlme_stats_dev_id, 1) < 0) {
+			printk(KERN_WARNING "Unable to add character device for mlme stats\n");
+		}
+	}
+#endif /* MLME_STATS_DEVFS */
+
+#ifdef MLME_STATS_DEBUG
+	test_func();
+#endif /* MLME_STATS_DEBUG */
+}
+EXPORT_SYMBOL(mlme_stats_init);
+
+void mlme_stats_exit(void)
+{
+	struct mlme_stats_node *iterator,*iterator_tmp;
+
+#ifdef MLME_STATS_DEVFS
+	unregister_chrdev_region(mlme_stats_dev.dev, mlme_stats_dev.count);
+	cdev_del(&mlme_stats_dev);
+#endif /* MLME_STATS_DEVFS */
+
+#if defined(CONFIG_PROC_FS) && defined(MLME_STATS_PROCFS)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+		remove_proc_entry(MLME_STATS_PROC_FILENAME, NULL);
+#else
+/*		proc_remove(vap->iv_proc);*/
+#endif
+#endif /* defined(CONFIG_PROC_FS) && defined(MLME_STATS_PROCFS) */
+
+	atomic_set(&mlme_active, 0);
+	if (mlme_statistics.delayed_update_wq != NULL) {
+		flush_workqueue(mlme_statistics.delayed_update_wq);
+		destroy_workqueue(mlme_statistics.delayed_update_wq);
+	}
+
+	list_for_each_entry_safe (iterator, iterator_tmp, &mlme_statistics.lru_list_head.lru_link, lru_link) {
+		list_del(&iterator->lru_link);
+		kfree(iterator);
+	}
+}
+EXPORT_SYMBOL(mlme_stats_exit);
diff --git a/drivers/qtn/wlan/ieee80211_node.c b/drivers/qtn/wlan/ieee80211_node.c
new file mode 100644
index 0000000..d0da7a3
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_node.c
@@ -0,0 +1,4388 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_node.c 2366 2007-05-23 08:43:05Z mrenzmann $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 node handling support.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+
+#include <qtn/qdrv_sch.h>
+
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_node.h"
+#include "net80211/ieee80211_dot11_msg.h"
+#include "net80211/if_ethersubr.h"
+#include "net80211/ieee80211_tpc.h"
+#include "net80211/ieee80211_tdls.h"
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+#include "net80211/ieee80211_bsa.h"
+#endif
+
+#include <qtn/qtn_net_packet.h>
+#include <qtn/skb_recycle.h>
+#include "qdrv_sch_const.h"
+#include <qtn/qtn_pcap.h>
+#include <qtn/shared_params.h>
+#include <qtn/qtn_vlan.h>
+#include "qtn_logging.h"
+
+#include <asm/board/pm.h>
+#include <asm/board/kdump.h>
+
+#define IEEE80211_OBSS_AP_SCAN_INT	25
+#define IEEE80211_BSS_DELETE_DELAY 5
+#define FREQ_2_4_GHZ                    0
+#define FREQ_5_GHZ                      1
+
+#define DBGMAC "%02X:%02X:%02X:%02X:%02X:%02X"
+#define ETHERFMT(a) \
+	        (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+
+static void ieee80211_aid_remove(struct ieee80211_node *);
+static void ieee80211_idx_remove(struct ieee80211_node *);
+static void ieee80211_node_cleanup(struct ieee80211_node *);
+static void ieee80211_node_exit(struct ieee80211_node *);
+static u_int8_t ieee80211_node_getrssi(const struct ieee80211_node *);
+
+static void _ieee80211_free_node(struct ieee80211_node *);
+
+static void ieee80211_node_timeout(unsigned long);
+
+static void ieee80211_node_table_init(struct ieee80211com *,
+	struct ieee80211_node_table *, const char *, int);
+static void ieee80211_node_table_cleanup(struct ieee80211com *, struct ieee80211_node_table *);
+static void ieee80211_node_table_reset(struct ieee80211com *, struct ieee80211_node_table *,
+	struct ieee80211vap *);
+static void ieee80211_node_wds_ageout(unsigned long);
+static void ieee80211_timeout_station_work(struct work_struct *work);
+void ieee80211_wireless_reassoc(struct ieee80211vap *vap, int debug, int rescan);
+static int v_wait = 0;
+#define V_WAIT_REASSOC (10)
+
+#define IEEE80211_REAUTH_ENABLE 1
+MALLOC_DEFINE(M_80211_NODE, "80211node", "802.11 node state");
+
+struct ieee80211_v_cnt {
+	uint32_t total;
+	uint32_t v;
+};
+
+#ifdef IEEE80211_DEBUG_REFCNT
+
+static struct ieee80211_node_table *nt_refdebug = NULL;
+
+static void ieee80211_node_show_refdebug_info(struct ieee80211_node *ni)
+{
+	struct node_refdebug_info *info = ni->ni_refdebug_info_p;
+	struct node_refdebug_info_entry *entry;
+	int i;
+
+	if (info) {
+		printk("\nnode %p, MAC %pM, ref count %d, entry count %d:\n",
+				ni,
+				ni->ni_macaddr,
+				ieee80211_node_refcnt(ni),
+				info->entry_count);
+		for (i = 0; i< info->entry_count; i++) {
+			entry = &info->entry[i];
+			/* INC	count	line	filename */
+			printk("%4s %10d %9d %s\n",
+					(entry->line & 0xffff0000) ? "dec" : "inc",
+					entry->count,
+					(entry->line & 0x0000ffff),
+					entry->fname);
+		}
+
+		if (info->entry_count == REFDEBUG_ENTRY_MAX) {
+			printk("%4s %10d %9s %s\n",
+					"inc", info->inc_count, "unknown", "unknown");
+			printk("%4s %10d %9s %s\n",
+					"dec", info->dec_count, "unknown", "unknown");
+		}
+	}
+}
+
+#endif
+
+void ieee80211_node_dbgref_history_dump(void)
+{
+#ifdef IEEE80211_DEBUG_REFCNT
+	struct ieee80211_node *ni, *next;
+
+	if (!nt_refdebug)
+	      return;
+
+	IEEE80211_NODE_LOCK_IRQ(nt_refdebug);
+	TAILQ_FOREACH_SAFE(ni, &nt_refdebug->nt_node, ni_list, next) {
+		ieee80211_node_show_refdebug_info(ni);
+	}
+	IEEE80211_NODE_UNLOCK_IRQ(nt_refdebug);
+#else
+	printk("%s: not enabled\n", __func__);
+#endif
+}
+
+#ifdef IEEE80211_DEBUG_REFCNT
+static __sram_text void
+ieee80211_node_dbgref_history(const struct ieee80211_node *ni,
+			const char *filename, int line, int is_increased)
+{
+	unsigned long flags;
+	struct node_refdebug_info *info;
+	struct node_refdebug_info_entry *entry;
+	int i;
+
+	info = ni->ni_refdebug_info_p;
+	if (!info)
+	      return;
+
+	line = is_increased ? line : (0xffff0000 | line);
+
+	local_irq_save(flags);
+	for (i = 0; i < REFDEBUG_ENTRY_MAX; i++) {
+		entry = &info->entry[i];
+		if (entry->fname == NULL) {
+			entry->fname = filename;
+			entry->line = line;
+			info->entry_count++;
+			break;
+		}
+		if (entry->line == line &&
+				strcmp(filename, entry->fname) == 0)
+			break;
+	}
+
+	if (unlikely(i == REFDEBUG_ENTRY_MAX)) {
+		printk_once("%s: Table is full\n", __FUNCTION__);
+		if (is_increased)
+			info->inc_count++;
+		else
+			info->dec_count++;
+	} else {
+		entry->count++;
+	}
+	local_irq_restore(flags);
+}
+
+void __sram_text
+ieee80211_node_dbgref(const struct ieee80211_node *ni, const char *filename,
+			const int line, int is_increased)
+{
+	ieee80211_node_dbgref_history(ni, filename, line, is_increased);
+}
+EXPORT_SYMBOL(ieee80211_node_dbgref);
+#endif
+
+/*
+ * Caller must lock the IEEE80211_NODE_LOCK
+ * Context: hwIRQ, softIRQ and process context
+ */
+static void __sram_text
+_ieee80211_remove_node(struct ieee80211_node *ni)
+{
+	struct ieee80211_node_table *nt = ni->ni_table;
+
+	if (nt != NULL) {
+		TAILQ_REMOVE(&nt->nt_node, ni, ni_list);
+		LIST_REMOVE(ni, ni_hash);
+		ni->ni_table = NULL;
+		ieee80211_aid_remove(ni);
+		ieee80211_idx_remove(ni);
+	}
+}
+
+/*
+ * Reclaim a node.  If this is the last reference count then
+ * do the normal free work.  Otherwise remove it from the node
+ * table and mark it gone by clearing the back-reference.
+ */
+static void
+#ifdef IEEE80211_DEBUG_REFCNT
+#define ieee80211_node_reclaim(_nt, _ni) \
+        ieee80211_node_reclaim_debug(_nt, _ni, __FILE__, __LINE__)
+ieee80211_node_reclaim_debug(struct ieee80211_node_table *nt, struct ieee80211_node *ni,
+			const char *filename, int line)
+#else
+ieee80211_node_reclaim(struct ieee80211_node_table *nt, struct ieee80211_node *ni)
+#endif
+{
+	struct ieee80211com *ic = ni->ni_ic;
+
+	IEEE80211_LOCK_IRQ(ic);
+	if (!ieee80211_node_dectestref(ni)) {
+		ieee80211_node_dbgref(ni, filename, line, IEEE80211_NODEREF_DECR);
+		/*
+		 * Other references are present, just remove the
+		 * node from the table so it cannot be found.  When
+		 * the references are dropped storage will be
+		 * reclaimed.
+		 */
+		_ieee80211_remove_node(ni);
+	} else {
+		ieee80211_node_dbgref(ni, filename, line, IEEE80211_NODEREF_DECR);
+		_ieee80211_free_node(ni);
+	}
+	IEEE80211_UNLOCK_IRQ(ic);
+}
+
+void
+indicate_association(void)
+{
+}
+EXPORT_SYMBOL(indicate_association);
+
+void
+indicate_disassociation(void)
+{
+}
+EXPORT_SYMBOL(indicate_disassociation);
+
+void
+ieee80211_node_attach(struct ieee80211com *ic)
+{
+	ieee80211_node_table_init(ic, &ic->ic_sta, "station",
+		IEEE80211_INACT_INIT);
+	init_timer(&ic->ic_inact);
+	ic->ic_inact.function = ieee80211_node_timeout;
+	ic->ic_inact.data = (unsigned long) ic;
+	ic->ic_inact.expires = jiffies + IEEE80211_INACT_WAIT * HZ;
+#if IEEE80211_REAUTH_ENABLE
+	add_timer(&ic->ic_inact);
+#endif
+
+	/* copy default check interval from inactivity timer */
+	ic->ic_scan_results_check = IEEE80211_INACT_WAIT;
+	init_timer(&ic->ic_scan_results_expire);
+	ic->ic_scan_results_expire.function = ieee80211_scan_timeout;
+	ic->ic_scan_results_expire.data = (unsigned long) ic;
+	ic->ic_scan_results_expire.expires = jiffies + ic->ic_scan_results_check * HZ;
+	add_timer(&ic->ic_scan_results_expire);
+
+	ic->ic_node_free = ieee80211_node_exit;
+	ic->ic_node_cleanup = ieee80211_node_cleanup;
+	ic->ic_node_getrssi = ieee80211_node_getrssi;
+	ic->ic_iterate_nodes = ieee80211_iterate_nodes;
+	ic->ic_iterate_dev_nodes = ieee80211_iterate_dev_nodes;
+
+#ifdef IEEE80211_DEBUG_REFCNT
+	kdump_add_troubleshooter(&ieee80211_node_dbgref_history_dump);
+	nt_refdebug = &ic->ic_sta;
+#endif
+}
+
+void
+ieee80211_node_detach(struct ieee80211com *ic)
+{
+#if IEEE80211_REAUTH_ENABLE
+	del_timer(&ic->ic_inact);
+#endif
+	del_timer(&ic->ic_scan_results_expire);
+	ieee80211_node_table_cleanup(ic, &ic->ic_sta);
+}
+
+void
+ieee80211_node_vattach(struct ieee80211vap *vap)
+{
+	/* default station inactivity timer setings */
+	vap->iv_inact_init = IEEE80211_INACT_INIT;
+	vap->iv_inact_auth = IEEE80211_INACT_AUTH;
+	vap->iv_inact_probe = IEEE80211_INACT_PROBE;
+
+	if (vap->iv_opmode == IEEE80211_M_STA)
+		vap->iv_inact_run = IEEE80211_INACT_RUN_STA;
+	else if (vap->iv_opmode == IEEE80211_M_WDS)
+		vap->iv_inact_run = IEEE80211_INACT_RUN_WDS;
+	else
+		vap->iv_inact_run = IEEE80211_INACT_RUN;
+}
+
+void
+ieee80211_node_latevattach(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_rsnparms *rsn;
+
+	/*
+	 * Allocate these only if needed. On STA, only needed for TDLS
+	 * connections. Beware that adhoc mode doesn't support ATIM yet
+	 */
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
+			vap->iv_opmode == IEEE80211_M_STA ||
+			vap->iv_opmode == IEEE80211_M_WDS) {
+		if (vap->iv_max_aid == 0)
+			vap->iv_max_aid = IEEE80211_AID_DEF;
+		else if (vap->iv_max_aid > IEEE80211_AID_MAX)
+			vap->iv_max_aid = IEEE80211_AID_MAX;
+	}
+
+	ieee80211_reset_bss(vap);
+
+	if (vap->iv_opmode == IEEE80211_M_WDS) {
+		vap->iv_bss = NULL;
+		vap->iv_auth = ieee80211_authenticator_get(IEEE80211_AUTH_AUTO);
+		return;
+	}
+
+	/*
+	 * Set up "global settings" in the bss node so that
+	 * each new station automatically inherits them.
+	 */
+	rsn = &vap->iv_bss->ni_rsn;
+	rsn->rsn_ucastcipherset |= 1 << IEEE80211_CIPHER_AES_CCM;
+	rsn->rsn_ucastcipherset |= 1 << IEEE80211_CIPHER_WEP;
+	if (IEEE80211_IS_TKIP_ALLOWED(ic))
+		rsn->rsn_ucastcipherset |= 1 << IEEE80211_CIPHER_TKIP;
+	if (ic->ic_caps & IEEE80211_C_AES)
+		rsn->rsn_ucastcipherset |= 1 << IEEE80211_CIPHER_AES_OCB;
+	if (ic->ic_caps & IEEE80211_C_CKIP)
+		rsn->rsn_ucastcipherset |= 1 << IEEE80211_CIPHER_CKIP;
+	/*
+	 * Default unicast cipher to WEP for 802.1x use.  If
+	 * WPA is enabled the management code will set these
+	 * values to reflect.
+	 */
+	rsn->rsn_ucastcipher = IEEE80211_CIPHER_WEP;
+	rsn->rsn_ucastkeylen = 104 / NBBY;
+	/* Initialise with the lowest allowed mcast cipher */
+	if (!IEEE80211_IS_TKIP_ALLOWED(ic))
+		rsn->rsn_mcastcipher = IEEE80211_CIPHER_AES_CCM;
+	else
+		rsn->rsn_mcastcipher = IEEE80211_CIPHER_TKIP;
+	rsn->rsn_mcastkeylen = 128 / NBBY;
+	/*
+	 * We support both WPA-PSK and 802.1x; the one used
+	 * is determined by the authentication mode and the
+	 * setting of the PSK state.
+	 */
+	rsn->rsn_keymgmtset = WPA_ASE_8021X_UNSPEC | WPA_ASE_8021X_PSK;
+	rsn->rsn_keymgmt = WPA_ASE_8021X_PSK;
+
+	vap->iv_auth = ieee80211_authenticator_get(vap->iv_bss->ni_authmode);
+}
+
+void
+ieee80211_node_vdetach(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (vap->iv_bss != NULL)
+		ieee80211_ref_node(vap->iv_bss);
+
+	ieee80211_node_table_reset(ic, &ic->ic_sta, vap);
+	if (vap->iv_bss != NULL) {
+		ieee80211_free_node(vap->iv_bss);
+		vap->iv_bss = NULL;
+	}
+}
+
+/*
+ * Empty data frame used to do rate training. Goes through the normal
+ * data path, to trigger the rate adaptation algorithm to find the optimal rate
+ * before we send traffic.
+ *
+ * OUI extended Ethertype frame for Quantenna OUI is used for rate training
+ * packets. The bridge at the other end of the link will discard these frames
+ * as they are destined for the address of the WiFi interface, and there is
+ * no handler installed for this frame type.
+ */
+static void ieee80211_send_dummy_data(struct ieee80211_node *ni,
+		struct ieee80211vap *vap, int skb_flags)
+{
+	struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
+	struct sk_buff *skb = qtn_skb_recycle_list_pop(recycle_list, &recycle_list->stats_qdrv);
+	struct qtn_dummy_frame *df;
+	int total_len = QTN_RATE_TRAIN_DATA_LEN + sizeof(*df) - sizeof(df->eh);
+	uint32_t *payload;
+	uint32_t *data;
+
+	if (!skb) {
+		/* If the recycle list is empty, use a smaller buffer to reduce memory pressure */
+		skb = dev_alloc_skb(QTN_RATE_TRAIN_DATA_LEN * 3);
+		if (!skb) {
+			return;
+		}
+	}
+
+	df = (struct qtn_dummy_frame *)skb_put(skb, sizeof(df->eh));
+	payload = (uint32_t *)skb_put(skb, total_len);
+
+	skb_reset_network_header(skb);
+
+	memcpy(df->eh.ether_shost, vap->iv_myaddr, sizeof(df->eh.ether_shost));
+	memcpy(df->eh.ether_dhost, ni->ni_macaddr, sizeof(df->eh.ether_dhost));
+	memset(payload, QTN_RATE_TRAIN_BYTE, total_len);
+	df->llc.llc_dsap = LLC_SNAP_LSAP;
+	df->llc.llc_ssap = LLC_SNAP_LSAP;
+	df->llc.llc_un.type_snap.control = LLC_UI;
+	df->llc.llc_un.type_snap.org_code[0] = 0;
+	df->llc.llc_un.type_snap.org_code[1] = 0;
+	df->llc.llc_un.type_snap.org_code[2] = 0;
+	df->llc.llc_un.type_snap.ether_type = htons(ETHERTYPE_802A);
+	ieee80211_oui_add_qtn(df->ouie.oui);
+	put_unaligned(htons(QTN_OUIE_TYPE_TRAINING), &df->ouie.type);
+	df->eh.ether_type = htons(total_len);
+
+	if (ni->ni_rate_train_hash) {
+		data = (uint32_t *)(df + 1);
+		put_unaligned(htonl(ni->ni_rate_train_hash), data + 1);
+	}
+
+	skb->dev = vap->iv_dev;
+
+	qdrv_sch_classify_bk(skb);
+
+	ieee80211_ref_node(ni);
+	QTN_SKB_CB_NI(skb) = ni;
+
+	M_FLAG_SET(skb, M_RATE_TRAINING);
+	M_FLAG_SET(skb, M_NO_AMSDU);
+	M_FLAG_SET(skb, M_VSP_CHK);
+	if (skb_flags != 0)
+		M_FLAG_SET(skb, (skb_flags));
+
+	dev_queue_xmit(skb);
+}
+
+/*
+ * Rate training - send a bunch of NULL data packets to get the rate
+ * retry algorithm to converge quickly.
+ */
+static void ieee80211_sta_add_training(unsigned long arg)
+{
+	struct ieee80211_node *ni = (struct ieee80211_node *)arg;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+	int i;
+
+	spin_lock(&ni->ni_lock);
+	if (ni->ni_training_count < vap->iv_rate_training_count) {
+		ni->ni_training_count++;
+		spin_unlock(&ni->ni_lock);
+		mod_timer(&ni->ni_training_timer, jiffies + 1);
+		for (i = 0; i < vap->iv_rate_training_burst_count; ++i) {
+			ieee80211_send_dummy_data(ni, vap, 0);
+		}
+	} else {
+		ni->ni_training_flag = NI_TRAINING_END;
+		spin_unlock(&ni->ni_lock);
+		printk("%s: [%pM] %s ends\n",
+			vap->iv_dev->name, ni->ni_macaddr,
+			ic->ic_ocac.ocac_running ? "T process" : "training");
+		ieee80211_free_node(ni);
+	}
+}
+
+/*
+ * Rate detecting - send a bunch of NULL data packets to get
+ * the current Tx rate.
+ */
+static void ieee80211_tdls_rate_detection(unsigned long arg)
+{
+	struct ieee80211_node *ni = (struct ieee80211_node *)arg;
+	struct ieee80211vap *vap = ni->ni_vap;
+	int i;
+
+	if (ni->ni_training_count < vap->tdls_training_pkt_cnt) {
+		for (i = 0; i < DEFAULT_TDLS_RATE_DETECTION_BURST_CNT; ++i) {
+			ieee80211_send_dummy_data(ni, vap, 0);
+		}
+		ni->ni_training_count++;
+		mod_timer(&ni->ni_training_timer, jiffies + 2);
+	} else {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"[%pM] rate detecting ends\n", ni->ni_macaddr);
+		spin_lock(&ni->ni_lock);
+		ni->ni_training_flag = NI_TRAINING_END;
+		spin_unlock(&ni->ni_lock);
+		ieee80211_free_node(ni);
+	}
+}
+
+static void ieee80211_add_assoc_record(struct ieee80211com *ic, struct ieee80211_node *ni)
+{
+	struct ieee80211_assoc_history	*ah = &ic->ic_assoc_history;
+	struct timeval time;
+	time_t oldest_time = ah->ah_timestamp[0];
+	int i, index = 0;
+
+	if (ni->ni_associd == 0) {
+		return;
+	}
+
+	do_gettimeofday(&time);
+
+	for (i = 0; i < IEEE80211_MAX_ASSOC_HISTORY; i++) {
+		if (IEEE80211_ADDR_EQ(ah->ah_macaddr_table[i], ni->ni_macaddr)) {
+			ah->ah_timestamp[i] = time.tv_sec;
+
+			return;
+		}
+	}
+
+	for (i = 0; i < IEEE80211_MAX_ASSOC_HISTORY; i++) {
+		if (ah->ah_timestamp[i] <= 0) {
+			IEEE80211_ADDR_COPY(ah->ah_macaddr_table[i], ni->ni_macaddr);
+			ah->ah_timestamp[i] = time.tv_sec;
+
+			return;
+		}
+	}
+
+	for (i = 1; i < IEEE80211_MAX_ASSOC_HISTORY; i++) {
+		if (ah->ah_timestamp[i] < oldest_time) {
+			oldest_time = ah->ah_timestamp[i];
+			index = i;
+		}
+	}
+
+	IEEE80211_ADDR_COPY(ah->ah_macaddr_table[index], ni->ni_macaddr);
+	ah->ah_timestamp[index] = time.tv_sec;
+}
+
+void ieee80211_active_training_timer(struct ieee80211_node *ni,
+		void (*call_back)(unsigned long), unsigned int interval)
+{
+	if ((!ni) || (!call_back)) {
+		printk("%s: Get invalid arguments\n", __func__);
+		return;
+	}
+
+	/* Reference for the timer below */
+	spin_lock_bh(&ni->ni_lock);
+	/* If training already started, don't start new training. */
+	if (ni->ni_training_flag == NI_TRAINING_RUNNING) {
+		printk("%s: Rate training is running\n", __func__);
+		spin_unlock_bh(&ni->ni_lock);
+		return;
+	} else {
+		ieee80211_ref_node(ni);
+	}
+	ni->ni_training_count = 0;
+	ni->ni_training_flag = NI_TRAINING_RUNNING;
+	ni->ni_training_start = jiffies + interval;
+	spin_unlock_bh(&ni->ni_lock);
+
+	ni->ni_training_timer.function = call_back;
+	ni->ni_training_timer.data = (unsigned long)ni;
+
+	/*
+	 * We stagger the start of the training for the node to prevent situations
+	 * where all nodes associate immediately, causing out of resource due to
+	 * allocating too many training packets.
+	 */
+	mod_timer(&ni->ni_training_timer, jiffies + interval);
+}
+
+static int ieee80211_node_training_required(const struct ieee80211_node *ni)
+{
+	const int skip_training = TOPAZ_FPGA_PLATFORM || QTN_GENPCAP;
+	struct ieee80211vap *vap = ni->ni_vap;
+	uint32_t ret = 0;
+	/* We only do rate training for Q->Q links */
+	if (!skip_training && (ni->ni_qtn_assoc_ie != NULL)
+		&& (vap->iv_opmode != IEEE80211_M_WDS)) {
+		if (((vap->iv_opmode == IEEE80211_M_HOSTAP) && (ni != vap->iv_bss))
+			|| ((vap->iv_opmode == IEEE80211_M_STA) && (ni == vap->iv_bss))) {
+			ret = IEEE80211_NODE_TRAINING_NORMAL_MODE;
+		} else if (((vap->iv_opmode == IEEE80211_M_STA)
+				&& (vap->tdls_discovery_interval == 0)
+				&& IEEE80211_NODE_IS_TDLS_ACTIVE(ni))) {
+			ret = IEEE80211_NODE_TRAINING_TDLS_MODE;
+		}
+	}
+	return ret;
+}
+
+void ieee80211_node_training_start(struct ieee80211_node *ni, int immediate)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	uint32_t training_delay_secs;
+
+	/*
+	 * We stagger the start of the training for the node to prevent situations
+	 * where all nodes associate immediately, causing out of resource due to
+	 * allocating too many training packets.
+	 */
+	training_delay_secs = 1 + (immediate ? 0 : (IEEE80211_NODE_AID(ni) % 10));
+
+	/* Reference for the timer below */
+	spin_lock_bh(&ni->ni_lock);
+
+	/*
+	 * If training already started, don't reference the node again or
+	 * we could leak the node if it's discarded.
+	 *
+	 * This happens on the STA side as the BSS node is retained longer
+	 * than on the AP side.
+	 */
+	if (ni->ni_training_flag != NI_TRAINING_RUNNING) {
+		ieee80211_ref_node(ni);
+	}
+	ni->ni_training_count = 0;
+	ni->ni_training_flag = NI_TRAINING_RUNNING;
+	spin_unlock_bh(&ni->ni_lock);
+
+	ni->ni_training_timer.function = ieee80211_sta_add_training;
+	ni->ni_training_timer.data = (unsigned long)ni;
+
+	mod_timer(&ni->ni_training_timer, jiffies + (training_delay_secs * HZ));
+	printk("%s: [%pM] %s starts in %d seconds, idx %u\n",
+			ni->ni_vap->iv_dev->name, ni->ni_macaddr,
+			ic->ic_ocac.ocac_running ? "T process" : "training",
+			training_delay_secs,
+			IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx));
+}
+
+void
+_ieee80211_node_authorize(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap= ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	uint32_t ret;
+
+	ieee80211_add_assoc_record(ic, ni);
+
+	ni->ni_flags |= IEEE80211_NODE_AUTH;
+	ni->ni_inact_reload = vap->iv_inact_run;
+	ic->ic_node_auth_state_change(ni, 1);
+
+	ret = ieee80211_node_training_required(ni);
+	if (ret == IEEE80211_NODE_TRAINING_NORMAL_MODE) {
+		ieee80211_node_training_start(ni, 0);
+	} else if (ret == IEEE80211_NODE_TRAINING_TDLS_MODE) {
+		ieee80211_tdls_add_rate_detection(ni);
+	}
+}
+
+void
+ieee80211_tdls_add_rate_detection(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap= ni->ni_vap;
+	struct ieee80211_node *ap_ni = vap->iv_bss;
+	unsigned int interval;
+	int smthd_rssi;
+
+	if (ap_ni == NULL)
+		return;
+
+	smthd_rssi = ieee80211_tdls_get_smoothed_rssi(vap, ni);
+	if (smthd_rssi < vap->tdls_min_valid_rssi)
+		return;
+
+	interval = HZ + (HZ * IEEE80211_NODE_AID(ap_ni) % 5) +
+		(HZ / 2 * (IEEE80211_NODE_AID(ni) % 10));
+
+	ieee80211_active_training_timer(ni, ieee80211_tdls_rate_detection, interval);
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+		"TDLS %s: Rate detection starts for %s AID %d at %lu\n", __func__,
+		ether_sprintf(ni->ni_macaddr), IEEE80211_NODE_AID(ni), jiffies + interval);
+}
+EXPORT_SYMBOL(ieee80211_tdls_add_rate_detection);
+
+/*
+ * Port authorize/unauthorize interfaces for use by an authenticator.
+ */
+void
+ieee80211_node_authorize(struct ieee80211_node *ni)
+{
+	int msg = IEEE80211_DOT11_MSG_CLIENT_CONNECTED;
+
+	_ieee80211_node_authorize(ni);
+
+	if (ni->ni_vap->iv_opmode == IEEE80211_M_STA) {
+		msg = IEEE80211_DOT11_MSG_AP_CONNECTED;
+	}
+
+	if (ni->ni_authmode <= IEEE80211_AUTH_SHARED) {
+		ieee80211_eventf(ni->ni_vap->iv_dev, QEVT_COMMON_PREFIX" %s ["DBGMAC"] [%s/%s] SSID %s",
+					d11_m[msg],
+					ETHERFMT(ni->ni_bssid),
+					ni->ni_authmode <= IEEE80211_AUTH_OPEN ? "OPEN" : "SHARED",
+					"NONE",
+					ni->ni_essid);
+	}
+}
+EXPORT_SYMBOL(ieee80211_node_authorize);
+
+void
+ieee80211_node_unauthorize(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (ieee80211_node_is_authorized(ni) &&
+			!IEEE80211_ADDR_EQ(vap->iv_myaddr, ni->ni_macaddr))
+		vap->iv_disconn_cnt++;
+
+	ni->ni_flags &= ~IEEE80211_NODE_AUTH;
+	ic->ic_node_auth_state_change(ni, 0);
+}
+EXPORT_SYMBOL(ieee80211_node_unauthorize);
+
+/*
+ * Set/change the channel.  The rate set is also updated
+ * to ensure a consistent view by drivers.
+ */
+void
+ieee80211_node_set_chan(struct ieee80211com *ic, struct ieee80211_node *ni)
+{
+	struct ieee80211_channel *chan = ic->ic_bsschan;
+
+	KASSERT(chan != IEEE80211_CHAN_ANYC, ("BSS channel not set up"));
+	ni->ni_chan = chan;
+
+	if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) {
+		ni->ni_rates = ic->ic_sup_half_rates;
+	} else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) {
+		ni->ni_rates = ic->ic_sup_quarter_rates;
+	} else {
+		ni->ni_rates = ic->ic_sup_rates[ic->ic_curmode];
+	}
+}
+
+static __inline void
+copy_bss(struct ieee80211_node *nbss, const struct ieee80211_node *obss)
+{
+	/* propagate useful state */
+	nbss->ni_authmode = obss->ni_authmode;
+	nbss->ni_ath_flags = obss->ni_ath_flags;
+	nbss->ni_txpower = obss->ni_txpower;
+	nbss->ni_vlan = obss->ni_vlan;
+	nbss->ni_rsn = obss->ni_rsn;
+	/* XXX statistics? */
+}
+
+/*
+ * Create an IBSS or BSS.
+ */
+void
+ieee80211_create_bss(struct ieee80211vap* vap, struct ieee80211_channel *chan)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+	struct ieee80211_channel *previous_chann = NULL;
+
+	if (vap->iv_opmode == IEEE80211_M_WDS) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+			"%s: skipping for WDS mode\n",
+			__func__);
+
+		if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) {
+			ni = ieee80211_find_node(&ic->ic_sta, vap->iv_myaddr);
+			if (!ni)
+				return;
+
+			/* Ensure ic_bsschan is set if WDS has been created first */
+			ic->ic_bsschan = chan;
+			ieee80211_node_set_chan(ic, ni);
+			ieee80211_free_node(ni);
+		}
+		return;
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+		"%s: creating on channel %u\n", __func__,
+		ieee80211_chan2ieee(ic, chan));
+
+	/* Check to see if we already have a node for this mac */
+	ni = ieee80211_find_node(&ic->ic_sta, vap->iv_myaddr);
+	if (ni == NULL) {
+		ni = ieee80211_alloc_node(&ic->ic_sta, vap, vap->iv_myaddr, "bss create");
+	}
+
+	KASSERT(ni != NULL, ("unable to find or create BSS node"));
+
+	ni->ni_node_idx = vap->iv_vapnode_idx;
+
+	ni->ni_vhtop.chanwidth = ic->ic_vhtop.chanwidth;
+
+	IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_myaddr);
+	ni->ni_esslen = vap->iv_des_ssid[0].len;
+	memcpy(ni->ni_essid, vap->iv_des_ssid[0].ssid, ni->ni_esslen);
+	if (vap->iv_bss != NULL)
+		copy_bss(ni, vap->iv_bss);
+	ni->ni_intval = ic->ic_lintval;
+
+	if (vap->iv_flags & IEEE80211_F_PRIVACY)
+		ni->ni_capinfo |= IEEE80211_CAPINFO_PRIVACY;
+	if (ic->ic_phytype == IEEE80211_T_FH) {
+		ni->ni_fhdwell = 200;	/* XXX */
+		ni->ni_fhindex = 1;
+	}
+	if (vap->iv_opmode == IEEE80211_M_IBSS) {
+		vap->iv_flags |= IEEE80211_F_SIBSS;
+		ni->ni_capinfo |= IEEE80211_CAPINFO_IBSS;	/* XXX */
+		if (vap->iv_flags & IEEE80211_F_DESBSSID)
+			IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_des_bssid);
+		else
+			ni->ni_bssid[0] |= 0x02;	/* local bit for IBSS */
+	} else if (vap->iv_opmode == IEEE80211_M_AHDEMO) {
+		if (vap->iv_flags & IEEE80211_F_DESBSSID)
+		    IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_des_bssid);
+		else {
+		    ni->ni_bssid[0] = 0x00;
+		    ni->ni_bssid[1] = 0x00;
+		    ni->ni_bssid[2] = 0x00;
+		    ni->ni_bssid[3] = 0x00;
+		    ni->ni_bssid[4] = 0x00;
+		    ni->ni_bssid[5] = 0x00;
+		}
+	}
+
+	ni->ni_vendor = PEER_VENDOR_QTN;
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+		ni->ni_node_type = IEEE80211_NODE_TYPE_VAP;
+	else if (vap->iv_opmode == IEEE80211_M_STA)
+		ni->ni_node_type = IEEE80211_NODE_TYPE_STA;
+
+	/* clear DFS CAC state on previous channel */
+	if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+			ic->ic_bsschan->ic_freq != chan->ic_freq &&
+			IEEE80211_IS_CHAN_CACDONE(ic->ic_bsschan)) {
+
+		/*
+		 * IEEE80211_CHAN_DFS_CAC_DONE indicates whether or not to do CAC afresh.
+		 * US   : IEEE80211_CHAN_DFS_CAC_DONE shall be cleared whenver we move to
+		 *        a different channel
+		 * ETSI : IEEE80211_CHAN_DFS_CAC_DONE shall be retained; Only event which
+		 *        would mark the channel as unusable is the radar indication
+		 */
+		if (ic->ic_dfs_is_eu_region() == false) {
+			previous_chann = ic->ic_bsschan;
+			previous_chann->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_DONE;
+			if (ic->ic_mark_channel_dfs_cac_status) {
+				ic->ic_mark_channel_dfs_cac_status(ic, previous_chann, IEEE80211_CHAN_DFS_CAC_DONE, false);
+				ic->ic_mark_channel_dfs_cac_status(ic, previous_chann, IEEE80211_CHAN_DFS_CAC_IN_PROGRESS, false);
+			}
+			/* Mark the channel as not_available and ready for cac */
+			if (ic->ic_mark_channel_availability_status) {
+				ic->ic_mark_channel_availability_status(ic, previous_chann,
+						IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_CAC_REQUIRED);
+			}
+			printk(KERN_DEBUG "ieee80211_create_bss:"
+					"Clearing CAC_DONE Status for chan %d\n",
+					previous_chann->ic_ieee);
+		}
+	}
+
+	/*
+	 * Fix the channel and related attributes.
+	 * If a channel change was initiated, ieee80211_channel_switch_post will handle it
+	 */
+	ic->ic_bsschan = chan;
+	ieee80211_node_set_chan(ic, ni);
+
+	ieee80211_sta_join1(vap, ni, 0);
+
+	ieee80211_free_node(ni);
+}
+EXPORT_SYMBOL(ieee80211_create_bss);
+
+/*
+ * Reset bss state on transition to the INIT state.
+ * Clear any stations from the table (they have been
+ * deauth'd) and reset the bss node (clears key, rate,
+ * etc. state).
+ */
+void
+ieee80211_reset_bss(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *obss = vap->iv_bss;
+	struct ieee80211_node *ni;
+
+	if (vap->iv_opmode == IEEE80211_M_WDS) {
+		vap->iv_bss = NULL;
+		return;
+	}
+
+	if (obss != NULL) {
+		ieee80211_ref_node(obss);
+	}
+
+	ieee80211_node_table_reset(ic, &ic->ic_sta, vap);
+	ieee80211_reset_erp(ic, ic->ic_curmode);
+
+	ni = ieee80211_alloc_node(&ic->ic_sta, vap, vap->iv_myaddr, "bss reset");
+	KASSERT(ni != NULL, ("unable to set up BSS node"));
+
+	ni->ni_vendor = PEER_VENDOR_QTN;
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+		ni->ni_node_type = IEEE80211_NODE_TYPE_VAP;
+	else if (vap->iv_opmode == IEEE80211_M_STA)
+		ni->ni_node_type = IEEE80211_NODE_TYPE_STA;
+
+	vap->iv_bss = ni;
+	IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_myaddr);
+
+	if (obss != NULL) {
+		copy_bss(ni, obss);
+		ni->ni_intval = ic->ic_lintval;
+		ieee80211_free_node(obss);
+	}
+
+	ieee80211_free_node(ni);
+}
+
+static int
+match_ssid(const struct ieee80211_node *ni,
+	int nssid, const struct ieee80211_scan_ssid ssids[])
+{
+	int i;
+
+	if (!ni)
+		return 0;
+
+	for (i = 0; i < nssid; i++) {
+		if (ni->ni_esslen == ssids[i].len &&
+		    memcmp(ni->ni_essid, ssids[i].ssid, ni->ni_esslen) == 0)
+			return 1;
+	}
+	return 0;
+}
+
+/*
+ * Test a node for suitability/compatibility.
+ */
+static int
+check_bss(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	uint8_t rate;
+	unsigned int chan;
+
+	chan = ieee80211_chan2ieee(ic, ni->ni_chan);
+	if (unlikely(chan == IEEE80211_CHAN_ANY))
+		return 0;
+
+	if (isclr(ic->ic_chan_active, chan))
+		return 0;
+	if (vap->iv_opmode == IEEE80211_M_IBSS) {
+		if ((ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) == 0)
+			return 0;
+	} else {
+		if ((ni->ni_capinfo & IEEE80211_CAPINFO_ESS) == 0)
+			return 0;
+	}
+	if (vap->iv_flags & IEEE80211_F_PRIVACY) {
+		if ((ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0)
+			return 0;
+	} else {
+		/* Reference: IEEE802.11 7.3.1.4
+		 * This means that the data confidentiality service is required
+		 * for all frames exchanged with this STA  in IBSS and for all
+		 * frames exchanged within the entire BSS otherwise
+		 */
+
+		if (ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY)
+			return 0;
+	}
+	rate = ieee80211_fix_rate(ni, IEEE80211_F_DONEGO | IEEE80211_F_DOFRATE);
+	if (rate & IEEE80211_RATE_BASIC)
+		return 0;
+	if (vap->iv_des_nssid != 0 &&
+	    !match_ssid(ni, vap->iv_des_nssid, vap->iv_des_ssid))
+		return 0;
+	if ((vap->iv_flags & IEEE80211_F_DESBSSID) &&
+	    !IEEE80211_ADDR_EQ(vap->iv_des_bssid, ni->ni_bssid))
+		return 0;
+	return 1;
+}
+
+#ifdef IEEE80211_DEBUG
+/*
+ * Display node suitability/compatibility.
+ */
+static void
+check_bss_debug(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	uint8_t rate;
+	unsigned int chan;
+	int fail;
+
+	chan = ieee80211_chan2ieee(ic, ni->ni_chan);
+
+	fail = 0;
+	if (chan == IEEE80211_CHAN_ANY || isclr(ic->ic_chan_active, chan))
+		fail |= 0x01;
+	if (vap->iv_opmode == IEEE80211_M_IBSS) {
+		if ((ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) == 0)
+			fail |= 0x02;
+	} else {
+		if ((ni->ni_capinfo & IEEE80211_CAPINFO_ESS) == 0)
+			fail |= 0x02;
+	}
+	if (vap->iv_flags & IEEE80211_F_PRIVACY) {
+		if ((ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0)
+			fail |= 0x04;
+	} else {
+		/* This means that the data confidentiality service is required
+		 * for all frames exchanged within this BSS. (IEEE802.11 7.3.1.4)
+		 */
+		if (ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY)
+			fail |= 0x04;
+	}
+	rate = ieee80211_fix_rate(ni, IEEE80211_F_DONEGO | IEEE80211_F_DOFRATE);
+	if (rate & IEEE80211_RATE_BASIC)
+		fail |= 0x08;
+	if (vap->iv_des_nssid != 0 &&
+	    !match_ssid(ni, vap->iv_des_nssid, vap->iv_des_ssid))
+		fail |= 0x10;
+	if ((vap->iv_flags & IEEE80211_F_DESBSSID) &&
+	    !IEEE80211_ADDR_EQ(vap->iv_des_bssid, ni->ni_bssid))
+		fail |= 0x20;
+
+	printk(" %c %s", fail ? '-' : '+', ether_sprintf(ni->ni_macaddr));
+	printk(" %s%c", ether_sprintf(ni->ni_bssid), fail & 0x20 ? '!' : ' ');
+	printk(" %3d%c",
+		chan, fail & 0x01 ? '!' : ' ');
+	printk(" %+4d", ni->ni_rssi);
+	printk(" %2dM%c", (rate & IEEE80211_RATE_VAL) / 2,
+		fail & 0x08 ? '!' : ' ');
+	printk(" %4s%c",
+		(ni->ni_capinfo & IEEE80211_CAPINFO_ESS) ? "ess" :
+			(ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) ? "ibss" :
+				"????",
+			fail & 0x02 ? '!' : ' ');
+	printk(" %3s%c ",
+		(ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) ?  "wep" : "no",
+		fail & 0x04 ? '!' : ' ');
+	ieee80211_print_essid(ni->ni_essid, ni->ni_esslen);
+	printk("%s\n", fail & 0x10 ? "!" : "");
+}
+#endif /* IEEE80211_DEBUG */
+
+/*
+ * Handle 802.11 ad hoc network merge.  The
+ * convention, set by the Wireless Ethernet Compatibility Alliance
+ * (WECA), is that an 802.11 station will change its BSSID to match
+ * the "oldest" 802.11 ad hoc network, on the same channel, that
+ * has the station's desired SSID.  The "oldest" 802.11 network
+ * sends beacons with the greatest TSF timestamp.
+ *
+ * The caller is assumed to validate TSF's before attempting a merge.
+ *
+ * Return !0 if the BSSID changed, 0 otherwise.
+ */
+int
+ieee80211_ibss_merge(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+
+	if (ni == vap->iv_bss ||
+	    IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_bss->ni_bssid)) {
+		/* unchanged, nothing to do */
+		return 0;
+	}
+	if (!check_bss(vap, ni)) {
+		/* capabilities mismatch */
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+		    "%s: merge failed, capabilities mismatch\n", __func__);
+#ifdef IEEE80211_DEBUG
+		if (ieee80211_msg_assoc(vap))
+			check_bss_debug(vap, ni);
+#endif
+		vap->iv_stats.is_ibss_capmismatch++;
+		return 0;
+	}
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+		"%s: new bssid %s: %s preamble, %s slot time%s\n", __func__,
+		ether_sprintf(ni->ni_bssid),
+		ic->ic_flags & IEEE80211_F_SHPREAMBLE ? "short" : "long",
+		ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long",
+		ic->ic_flags & IEEE80211_F_USEPROT ? ", protection" : "");
+
+	ieee80211_ref_node(ni);
+
+	ieee80211_sta_join1(vap, ni, 0);
+
+	return 1;
+}
+EXPORT_SYMBOL(ieee80211_ibss_merge);
+
+static __inline int
+ssid_equal(const struct ieee80211_node *a, const struct ieee80211_node *b)
+{
+	if (!a || !b)
+		return 0;
+
+	return (a->ni_esslen == b->ni_esslen &&
+		memcmp(a->ni_essid, b->ni_essid, a->ni_esslen) == 0);
+}
+
+void ieee80211_update_active_chanlist(struct ieee80211com *ic, int bw)
+{
+	COMPILE_TIME_ASSERT(sizeof(ic->ic_chan_active) == sizeof(ic->ic_chan_active_80));
+	COMPILE_TIME_ASSERT(sizeof(ic->ic_chan_active) == sizeof(ic->ic_chan_active_40));
+	COMPILE_TIME_ASSERT(sizeof(ic->ic_chan_active) == sizeof(ic->ic_chan_active_20));
+
+	switch(bw) {
+	case BW_HT80:
+		memcpy(ic->ic_chan_active, ic->ic_chan_active_80, sizeof(ic->ic_chan_active));
+		break;
+	case BW_HT40:
+		memcpy(ic->ic_chan_active, ic->ic_chan_active_40, sizeof(ic->ic_chan_active));
+		break;
+	default:
+		memcpy(ic->ic_chan_active, ic->ic_chan_active_20, sizeof(ic->ic_chan_active));
+		break;
+	}
+}
+
+void ieee80211_nonqtn_sta_join(struct ieee80211vap *vap,
+		struct ieee80211_node *ni, const char *caller)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (!ieee80211_node_is_qtn(ni)) {
+		vap->iv_non_qtn_sta_assoc++;
+		ic->ic_nonqtn_sta++;
+		ieee80211_set_recv_ctrlpkts(vap);
+
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+			"%s - increases counter ic_nonqtn_sta %u, iv_non_qtn_sta_assoc %u\n",
+			caller, ic->ic_nonqtn_sta, vap->iv_non_qtn_sta_assoc);
+	}
+}
+
+void ieee80211_nonqtn_sta_leave(struct ieee80211vap *vap,
+		struct ieee80211_node *ni, const char *caller)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (!ieee80211_node_is_qtn(ni)) {
+		WARN_ON(ic->ic_nonqtn_sta == 0);
+		WARN_ON(vap->iv_non_qtn_sta_assoc == 0);
+
+		if (ic->ic_nonqtn_sta > 0)
+			ic->ic_nonqtn_sta--;
+		if (vap->iv_non_qtn_sta_assoc > 0)
+			vap->iv_non_qtn_sta_assoc--;
+		ieee80211_set_recv_ctrlpkts(vap);
+
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+			"%s - reduces counter ic_nonqtn_sta %u, iv_non_qtn_sta_assoc %u\n",
+			caller, ic->ic_nonqtn_sta, vap->iv_non_qtn_sta_assoc);
+	}
+}
+
+static void
+ieee80211_reset_bw(struct ieee80211vap *vap, struct ieee80211com *ic, int channel, int delay_ch_switch)
+{
+	int bw;
+	int max_channel_bw;
+	int cur_bw = ieee80211_get_cap_bw(ic);
+
+	if (vap->iv_opmode != IEEE80211_M_STA) {
+		return;
+	}
+
+	max_channel_bw = ieee80211_get_max_channel_bw(ic, channel);
+	bw = MIN(MIN(ic->ic_max_system_bw, max_channel_bw), ic->ic_bss_bw);
+
+	if (bw >= BW_HT80 && !IS_IEEE80211_VHT_ENABLED(ic)) {
+		bw = BW_HT40;
+	}
+
+	if ((bw > BW_HT40) && !ieee80211_swfeat_is_supported(SWFEAT_ID_VHT, 1)) {
+		printk("BW %d is not supported on this device\n", bw);
+		bw = BW_HT40;
+	}
+
+	if ((bw >= BW_HT40) && (ic->ic_curmode == IEEE80211_MODE_11A)) {
+		bw = BW_HT20;
+	}
+
+	if ((ic->ic_curmode == IEEE80211_MODE_11B) ||
+			(ic->ic_curmode == IEEE80211_MODE_11G) ||
+				(ic->ic_curmode == IEEE80211_MODE_11NG)) {
+		bw = BW_HT20;
+	}
+
+	if (ic->ic_curmode == IEEE80211_MODE_11NG_HT40PM) {
+		bw = BW_HT40;
+	}
+
+	if (bw == cur_bw)
+		return;
+
+	ieee80211_change_bw(vap, bw, delay_ch_switch);
+}
+
+/*
+ * Restore bandwidth to initial state.
+ */
+void
+ieee80211_restore_bw(struct ieee80211vap *vap, struct ieee80211com *ic)
+{
+	int bw = ic->ic_max_system_bw;
+	int cur_bw = ieee80211_get_cap_bw(ic);
+
+	if (vap->iv_opmode != IEEE80211_M_STA)
+		return;
+
+	if ((bw >= BW_HT80) &&
+			(!IS_IEEE80211_VHT_ENABLED(ic) ||
+			!ieee80211_swfeat_is_supported(SWFEAT_ID_VHT, 1)))
+		bw = BW_HT40;
+
+	if ((bw >= BW_HT40) &&
+			((ic->ic_curmode == IEEE80211_MODE_11A) ||
+			(ic->ic_curmode == IEEE80211_MODE_11B) ||
+			(ic->ic_curmode == IEEE80211_MODE_11G) ||
+			(ic->ic_curmode == IEEE80211_MODE_11NG) ||
+			(ic->ic_curmode == IEEE80211_MODE_11NA)))
+		bw = BW_HT20;
+
+	ic->ic_bss_bw = bw;
+
+	if (bw == cur_bw)
+		return;
+
+	ieee80211_change_bw(vap, bw, 0);
+}
+
+/*
+ * Set phymode, bw and vht params specific to band
+ */
+void
+configure_phy_mode(struct ieee80211vap *vap, struct ieee80211_node *selbs)
+{
+	struct ieee80211com *ic = selbs->ni_ic;
+	int aggr = 1;
+
+	if (selbs->ni_chan->ic_ieee > QTN_2G_LAST_OPERATING_CHAN) {
+		if (vap->iv_5ghz_prof.phy_mode == IEEE80211_MODE_11NA) {
+			ic->ic_des_mode = ic->ic_phymode = IEEE80211_MODE_11NA;
+			vap->iv_5ghz_prof.bw = BW_HT20;
+			vap->iv_5ghz_prof.vht = 0;
+		} else if (vap->iv_5ghz_prof.phy_mode == IEEE80211_MODE_11NA_HT40PM) {
+			ic->ic_des_mode = ic->ic_phymode = IEEE80211_MODE_11NA_HT40PM;
+			vap->iv_5ghz_prof.bw = BW_HT40;
+			vap->iv_5ghz_prof.vht = 0;
+		} else if (vap->iv_5ghz_prof.phy_mode == IEEE80211_MODE_11AC_VHT20PM) {
+			ic->ic_des_mode = ic->ic_phymode = IEEE80211_MODE_11AC_VHT20PM;
+			vap->iv_5ghz_prof.bw = BW_HT20;
+			vap->iv_5ghz_prof.vht = 1;
+		} else if (vap->iv_5ghz_prof.phy_mode == IEEE80211_MODE_11AC_VHT40PM) {
+			ic->ic_des_mode = ic->ic_phymode = IEEE80211_MODE_11AC_VHT40PM;
+			vap->iv_5ghz_prof.bw = BW_HT40;
+			vap->iv_5ghz_prof.vht = 1;
+		} else {
+			ic->ic_des_mode = ic->ic_phymode = IEEE80211_MODE_11AC_VHT80PM;
+			vap->iv_5ghz_prof.bw = BW_HT80;
+			vap->iv_5ghz_prof.vht = 1;
+		}
+
+		ieee80211_setmode(ic, ic->ic_des_mode);
+		ic->ic_csw_reason = IEEE80211_CSW_REASON_CONFIG;
+		ieee80211_change_bw(vap, vap->iv_5ghz_prof.bw, 0);
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_MODE, vap->iv_5ghz_prof.vht, NULL, 0);
+	} else {
+		if (vap->iv_2_4ghz_prof.phy_mode == IEEE80211_MODE_11B) {
+			ic->ic_des_mode = ic->ic_phymode = IEEE80211_MODE_11B;
+			vap->iv_2_4ghz_prof.bw = BW_HT20;
+		} else if (vap->iv_2_4ghz_prof.phy_mode == IEEE80211_MODE_11G) {
+			ic->ic_des_mode = ic->ic_phymode = IEEE80211_MODE_11G;
+			vap->iv_2_4ghz_prof.bw = BW_HT20;
+		} else  if (vap->iv_2_4ghz_prof.phy_mode == IEEE80211_MODE_11NG) {
+			ic->ic_des_mode = ic->ic_phymode = IEEE80211_MODE_11NG;
+			vap->iv_2_4ghz_prof.bw = BW_HT20;
+		} else {
+			ic->ic_des_mode = ic->ic_phymode = IEEE80211_MODE_11NG_HT40PM;
+			vap->iv_2_4ghz_prof.bw = BW_HT40;
+		}
+
+		ieee80211_setmode(ic, ic->ic_des_mode);
+		ieee80211_start_obss_scan_timer(vap);
+
+		ieee80211_change_bw(vap, vap->iv_2_4ghz_prof.bw, 0);
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_PHY_MODE, ic->ic_phymode, NULL, 0);
+	}
+
+	ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_TX_AMSDU, aggr, NULL, 0);
+	ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_AGGREGATION, aggr, NULL, 0);
+}
+
+static void ieee80211_repeater_csa_finish(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *)arg;
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+	KASSERT((vap->iv_opmode == IEEE80211_M_STA
+		&& (ic->ic_flags_ext & IEEE80211_FEXT_REPEATER)), ("Should only be called for repeater STA interface"));
+
+	ieee80211_finish_csa(arg);
+
+	/* finish the undone job left by ieee80211_sta_join1 */
+	IEEE80211_SCHEDULE_TQUEUE(&vap->iv_stajoin1tq);
+}
+
+/*
+ * Join the specified IBSS/BSS network.  The node is assumed to
+ * be passed in with a reference already held for use in assigning
+ * to iv_bss.
+ */
+void
+ieee80211_sta_join1(struct ieee80211vap *vap, struct ieee80211_node *selbs, int reauth)
+{
+	struct ieee80211com *ic = selbs->ni_ic;
+	struct ieee80211_node *obss = vap->iv_bss;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211com *ic_sta = vap->iv_ic;
+	int canreassoc;
+	int cur_band, new_band;
+	u_int8_t bridge_mode_changed;
+	int start_csa = (vap->iv_opmode == IEEE80211_M_STA
+			&& (ic->ic_flags_ext & IEEE80211_FEXT_REPEATER));
+
+	cur_band = new_band = 0;
+	if (vap->iv_opmode == IEEE80211_M_IBSS) {
+		/*
+		 * Delete unusable rates; we've already checked
+		 * that the negotiated rate set is acceptable.
+		 */
+		ieee80211_fix_rate(selbs, IEEE80211_F_DODEL);
+		ieee80211_fix_ht_rate(selbs, IEEE80211_F_DODEL);
+	}
+
+	/* Set bridge mode according to AP's capabilities and local config */
+	bridge_mode_changed = ieee80211_bridgemode_set(vap, 0);
+
+	/*
+	 * Check if old+new node have the same ssid in which
+	 * case we can reassociate when operating in sta mode.
+	 */
+	canreassoc = (obss != NULL &&
+			vap->iv_state >= IEEE80211_S_ASSOC &&
+			ssid_equal(obss, selbs) &&
+			!bridge_mode_changed);
+
+	if ((obss != NULL) && (obss != selbs) &&
+			(vap->iv_state >= IEEE80211_S_ASSOC)) {
+#if defined(QBMPS_ENABLE)
+		if ((ic->ic_flags_qtn & IEEE80211_QTN_BMPS) &&
+		    (vap->iv_opmode == IEEE80211_M_STA)) {
+			/* exit power-saving */
+	                ic->ic_pm_reason = IEEE80211_PM_LEVEL_JOIN_BSS;
+			ieee80211_pm_queue_work(ic);
+		}
+#endif
+		IEEE80211_SEND_MGMT(obss,
+				IEEE80211_FC0_SUBTYPE_DISASSOC,
+				IEEE80211_REASON_ASSOC_LEAVE);
+		/*
+		 * Ensure that the deauth/disassoc frame is sent
+		 * before the node is deleted.
+		 */
+		ieee80211_safe_wait_ms(50, !in_interrupt());
+	}
+
+	vap->iv_bss = selbs;
+	if ((obss != NULL) && (obss != selbs)) {
+		if (vap->iv_opmode == IEEE80211_M_STA) {
+			ic->ic_new_assoc(selbs);
+		}
+		ic->ic_disassoc(obss);
+		IEEE80211_NODE_LOCK_IRQ(nt);
+		ieee80211_node_reclaim(nt, obss);
+		IEEE80211_NODE_UNLOCK_IRQ(nt);
+	}
+
+	/* Update secondary channel offset base on htop */
+	if (selbs->ni_chan->ic_ieee <= QTN_2G_LAST_OPERATING_CHAN)
+		ieee80211_update_sec_chan_offset(selbs->ni_chan, selbs->ni_htinfo.choffset);
+
+	/* check if required to update phymode and bandwidth */
+	if (ic->ic_rf_chipid == CHIPID_DUAL) {
+		if (selbs->ni_chan->ic_ieee > QTN_2G_LAST_OPERATING_CHAN)
+			new_band = FREQ_5_GHZ;
+		else
+			new_band = FREQ_2_4_GHZ;
+		if (ic_sta->ic_curchan->ic_ieee > QTN_2G_LAST_OPERATING_CHAN)
+			cur_band = FREQ_5_GHZ;
+		else
+			cur_band = FREQ_2_4_GHZ;
+
+		if (cur_band != new_band)
+			configure_phy_mode(vap, selbs);
+	}
+
+	ic->ic_bsschan = selbs->ni_chan;
+	ic->ic_curchan = selbs->ni_chan;
+	ic->ic_fast_reass_chan = selbs->ni_chan;
+	ic->ic_fast_reass_scan_cnt = 0;
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE | IEEE80211_MSG_SCAN,
+		"Fast reassoc chan=%u fast=%u state=%u\n",
+		(ic->ic_fast_reass_chan && (ic->ic_fast_reass_chan != IEEE80211_CHAN_ANYC)) ?
+			ic->ic_fast_reass_chan->ic_ieee : 0,
+			canreassoc,
+			vap->iv_state);
+
+	if (start_csa) {
+		ieee80211_cancel_scan_no_wait(vap);
+		ieee80211_enter_csa(ic, ic->ic_curchan,
+			ieee80211_repeater_csa_finish,
+			IEEE80211_CSW_REASON_SCAN,
+			IEEE80211_DEFAULT_CHANCHANGE_TBTT_COUNT,
+			IEEE80211_CSA_MUST_STOP_TX,
+			IEEE80211_CSA_F_BEACON | IEEE80211_CSA_F_ACTION);
+	} else {
+		ic->ic_set_channel(ic);
+	}
+
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP) ||
+			(vap->iv_opmode == IEEE80211_M_STA))
+		ieee80211_build_countryie(ic);
+
+	/*
+	 * Set the erp state (mostly the slot time) to deal with
+	 * the auto-select case; this should be redundant if the
+	 * mode is locked.
+	 */
+	if (ic->ic_curmode != IEEE80211_MODE_11B) {
+		ieee80211_reset_erp(ic, ic->ic_curmode);
+	}
+
+	if (reauth && vap->iv_opmode == IEEE80211_M_STA) {
+		/*
+		 * Reset the bw before association to minimum of
+		 * max_system_bw and max_channel_bw.
+		 */
+		ieee80211_reset_bw(vap, ic, selbs->ni_chan->ic_ieee, start_csa);
+
+		/*
+		 * Act as if we received a DEAUTH frame in case we are
+		 * invoked from the RUN state.  This will cause us to try
+		 * to re-authenticate if we are operating as a station.
+		 */
+		if (canreassoc) {
+			vap->iv_nsparams.newstate = IEEE80211_S_ASSOC;
+			vap->iv_nsparams.arg = 0;
+		} else {
+			vap->iv_nsparams.newstate = IEEE80211_S_AUTH;
+			vap->iv_nsparams.arg = IEEE80211_FC0_SUBTYPE_DEAUTH;
+		}
+	} else {
+		vap->iv_nsparams.newstate = IEEE80211_S_RUN;
+		vap->iv_nsparams.arg = -1;
+	}
+
+	if (!start_csa)
+		IEEE80211_SCHEDULE_TQUEUE(&vap->iv_stajoin1tq);
+}
+
+static void ieee80211_bringup_repeater_sta(struct ieee80211com *ic, struct ieee80211vap *ap_vap)
+{
+	struct ieee80211vap *vap;
+
+	if (ap_vap->iv_opmode != IEEE80211_M_HOSTAP)
+		return;
+
+	vap = TAILQ_FIRST(&ic->ic_vaps);
+	if (vap->iv_state == IEEE80211_S_INIT
+			&& (ic->ic_flags_ext & IEEE80211_FEXT_REPEATER)) {
+		if ((ic->ic_flags & IEEE80211_F_SCAN)
+#ifdef QTN_BG_SCAN
+				|| (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+#endif
+				)
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+				"%s:Scan in progress\n",__func__);
+
+		ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+	}
+}
+
+void
+ieee80211_sta_join1_tasklet(IEEE80211_TQUEUE_ARG data)
+{
+	struct ieee80211vap *vap= (struct ieee80211vap *) data;
+	int rc;
+
+	rc = ieee80211_new_state(vap, vap->iv_nsparams.newstate, vap->iv_nsparams.arg);
+	vap->iv_nsparams.result = rc;
+	vap->iv_nsdone = 1;
+
+	/* Enable system xmit for DFS slave */
+	vap->iv_ic->ic_sta_set_xmit(true);
+
+	ieee80211_bringup_repeater_sta(vap->iv_ic, vap);
+}
+EXPORT_SYMBOL(ieee80211_sta_join1_tasklet);
+
+extern int
+ieee80211_get_rsn_from_ie(struct ieee80211vap *vap, u_int8_t *frm,
+	struct ieee80211_rsnparms *rsn_parm);
+
+
+int
+ieee80211_sta_join(struct ieee80211vap *vap,
+	const struct ieee80211_scan_entry *se)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *bss_ni = vap->iv_bss;
+	struct ieee80211_node *ni;
+	uint8_t bss_bw;
+
+	if (!bss_ni) {
+		bss_ni = TAILQ_FIRST(&ic->ic_vaps)->iv_bss;
+		if (!bss_ni) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+				"%s: no bss node\n", __func__);
+			return 0;
+		}
+	}
+
+	ni = ieee80211_find_node(&ic->ic_sta, se->se_macaddr);
+	if (ni == NULL) {
+		ni = ieee80211_alloc_node(&ic->ic_sta, vap, se->se_macaddr, "join");
+		if (ni == NULL) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+				"%s: failed to alloc node\n", __func__);
+			return 0;
+		}
+	}
+
+	/* Inherit some params from BSS */
+	ni->ni_authmode = bss_ni->ni_authmode;
+	ni->ni_rsn = bss_ni->ni_rsn;
+
+	/* Expand scan state into node's format */
+	IEEE80211_ADDR_COPY(ni->ni_bssid, se->se_bssid);
+	ni->ni_esslen = se->se_ssid[1];
+	memcpy(ni->ni_essid, se->se_ssid + 2, ni->ni_esslen);
+
+	ni->ni_rstamp = se->se_rstamp;
+	ni->ni_tstamp.tsf = se->se_tstamp.tsf;
+	ni->ni_intval = IEEE80211_BINTVAL_SANITISE(se->se_intval);
+	ni->ni_capinfo = se->se_capinfo;
+	ni->ni_chan = se->se_chan;
+	ni->ni_timoff = se->se_timoff;
+	ni->ni_fhdwell = se->se_fhdwell;
+	ni->ni_fhindex = se->se_fhindex;
+	ni->ni_erp = se->se_erp;
+	ni->ni_rssi = se->se_rssi;
+	ni->ni_node_type = IEEE80211_NODE_TYPE_VAP;
+	ni->tdls_status = IEEE80211_TDLS_NODE_STATUS_NONE;
+	if (se->se_htcap_ie != NULL) {
+		ieee80211_parse_htcap(ni, se->se_htcap_ie);
+		ni->ni_flags |= IEEE80211_NODE_HT;
+	} else {
+		ni->ni_flags &= ~IEEE80211_NODE_HT;
+	}
+	if (se->se_htinfo_ie != NULL)
+		ieee80211_parse_htinfo(ni, se->se_htinfo_ie);
+	if (se->se_vhtcap_ie != NULL)
+		ieee80211_parse_vhtcap(ni, se->se_vhtcap_ie);
+	if (se->se_vhtop_ie != NULL)
+		ieee80211_parse_vhtop(ni, se->se_vhtop_ie);
+	if (se->se_wpa_ie != NULL)
+		ieee80211_saveie(&ni->ni_wpa_ie, se->se_wpa_ie);
+	if (se->se_rsn_ie != NULL)
+		ieee80211_saveie(&ni->ni_rsn_ie, se->se_rsn_ie);
+	if (se->se_wme_ie != NULL)
+		ieee80211_saveie(&ni->ni_wme_ie, se->se_wme_ie);
+	if (se->se_qtn_ie != NULL)
+		ieee80211_saveie(&ni->ni_qtn_assoc_ie, se->se_qtn_ie);
+	if (se->se_ath_ie != NULL)
+		ieee80211_saveath(ni, se->se_ath_ie);
+	ni->ni_ext_role = se->se_ext_role;
+	if (se->se_ext_bssid_ie != NULL)
+		ieee80211_saveie(&ni->ni_ext_bssid_ie, se->se_ext_bssid_ie);
+
+	/*
+	 * Parse and fill the rsn_cap here only. The remaining of rsn params is gotten from bss above
+	 */
+	if(ni->ni_rsn_ie) {
+		struct ieee80211_rsnparms rsn_param;
+		u_int8_t res = 0;
+		memset((u_int8_t*)&rsn_param, 0, sizeof(rsn_param));
+		res = ieee80211_get_rsn_from_ie(vap, ni->ni_rsn_ie, &rsn_param);
+		if(res == 0) {
+			ni->ni_rsn.rsn_caps = rsn_param.rsn_caps;
+			if (!vap->iv_pmf) {
+				ni->ni_rsn.rsn_caps &= ~(RSN_CAP_MFP_REQ | RSN_CAP_MFP_CAP);
+			}
+		}
+	}
+
+	vap->iv_qtn_ap_cap = se->se_qtn_ie_flags;
+	vap->iv_is_qtn_dev = se->se_is_qtn_dev;
+	vap->iv_dtim_period = se->se_dtimperiod;
+	vap->iv_dtim_count = 0;
+	vap->iv_local_max_txpow = se->local_max_txpwr;
+
+	/* NB: must be after ni_chan is set up */
+	ieee80211_setup_rates(ni, se->se_rates, se->se_xrates,
+		IEEE80211_F_DOSORT | IEEE80211_F_DONEGO | IEEE80211_F_DODEL);
+
+	if (IEEE80211_VHTOP_GET_CHANWIDTH(&ni->ni_ie_vhtop)
+			>= IEEE80211_VHTOP_CHAN_WIDTH_80MHZ)
+		bss_bw = BW_HT80;
+	else if (ni->ni_ie_htinfo.hi_byte1 & IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40)
+		bss_bw = BW_HT40;
+	else
+		bss_bw = BW_HT20;
+	ic->ic_bss_bw = MIN(bss_bw, ic->ic_max_system_bw);
+
+	if (ic->sta_dfs_info.sta_dfs_strict_mode &&
+		ieee80211_is_chan_not_available(ni->ni_chan) &&
+			ic->ic_mark_channel_availability_status) {
+		ic->ic_mark_channel_availability_status(ic, ni->ni_chan,
+					IEEE80211_CHANNEL_STATUS_AVAILABLE);
+	}
+
+	ieee80211_sta_join1(vap, ni, 1);
+
+	ieee80211_free_node(ni);
+
+	return 1;
+}
+EXPORT_SYMBOL(ieee80211_sta_join);
+
+static __inline void
+ieee80211_clear_aid_bitmap(struct ieee80211com *ic, uint16_t aid)
+{
+	if (IEEE80211_AID(aid) < QTN_NODE_TBL_SIZE_LHOST)
+		IEEE80211_AID_CLR(ic, aid);
+}
+
+static void
+ieee80211_aid_remove(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (ni->ni_associd) {
+		ieee80211_clear_aid_bitmap(ic, ni->ni_associd);
+
+		ic->ic_node_auth_state_change(ni, 0);
+
+		ni->ni_associd = 0;
+	}
+}
+
+static void
+ieee80211_idx_remove(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (ni->ni_node_idx) {
+		if (ic->ic_node_idx_ni[IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx)] == ni) {
+			ic->ic_node_idx_ni[IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx)] = NULL;
+		}
+		ic->ic_unregister_node(ni);
+		ni->ni_node_idx = 0;
+	}
+}
+
+/*
+ * Leave the specified IBSS/BSS network.  The node is assumed to
+ * be passed in with a held reference.
+ */
+void
+ieee80211_sta_leave(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	ieee80211_ref_node(ni);
+
+	ieee80211_ppqueue_remove_node_leave(&vap->iv_ppqueue, ni);
+	if ((ic->ic_measure_info.ni == ni) &&
+			(ic->ic_measure_info.status == MEAS_STATUS_RUNNING)) {
+		 ic->ic_measure_info.status = MEAS_STATUS_DISCRAD;
+	}
+
+#if defined(QBMPS_ENABLE)
+	if (ic->ic_flags_qtn & IEEE80211_QTN_BMPS) {
+		/* exit power-saving */
+                ic->ic_pm_reason = IEEE80211_PM_LEVEL_LEAVE_BSS;
+		ieee80211_pm_queue_work(ic);
+	}
+#endif
+	/* WDS/Repeater: Stop software beacon timer for STA */
+	if (vap->iv_opmode == IEEE80211_M_STA &&
+	    vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) {
+		del_timer(&vap->iv_swbmiss);
+	}
+	/* Clear up the training timer */
+	if (vap->iv_opmode != IEEE80211_M_WDS) {
+		if (ni) {
+			int free_node = 0;
+			del_timer_sync(&ni->ni_training_timer);
+			spin_lock(&ni->ni_lock);
+			if (ni->ni_training_flag == NI_TRAINING_RUNNING) {
+				free_node = 1;
+				ni->ni_training_flag = NI_TRAINING_END;
+			}
+			spin_unlock(&ni->ni_lock);
+			if (free_node) {
+				printk("%s: [%pM] %s stopped\n",
+						vap->iv_dev->name, ni->ni_macaddr,
+						ic->ic_ocac.ocac_running ? "T process" : "training");
+				ieee80211_free_node(ni);
+			}
+		}
+	}
+
+	if (vap->iv_opmode == IEEE80211_M_STA)
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_PWR_ADJUST_AUTO, 0, NULL, 0);
+
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+		del_timer_sync(&ni->ni_btm_resp_wait_timer);
+
+	/*
+	 * Make sure the node index is removed after the state change, which is below. Otherwise, fdb will not be cleared.
+	 */
+	ic->ic_node_auth_state_change(ni, 0);
+
+	/*
+	 * Make sure the node index is removed before deleting the MuC/AuC node structure, else
+	 * packets will be dropped.
+	 */
+	ieee80211_idx_remove(ni);
+
+	ic->ic_node_cleanup(ni);
+	ic->ic_disassoc(ni);
+
+	ieee80211_notify_node_leave(ni);
+	ieee80211_free_node(ni);
+}
+
+/*
+ * Node table support.
+ */
+static void
+ieee80211_node_table_init(struct ieee80211com *ic,
+	struct ieee80211_node_table *nt,	const char *name, int inact)
+{
+	nt->nt_ic = ic;
+	IEEE80211_NODE_LOCK_INIT(nt, "");
+	IEEE80211_SCAN_LOCK_INIT(nt, "");
+
+	TAILQ_INIT(&nt->nt_node);
+	nt->nt_name = name;
+	nt->nt_scangen = 1;
+	nt->nt_inact_init = inact;
+	init_timer(&nt->nt_wds_aging_timer);
+	nt->nt_wds_aging_timer.function = ieee80211_node_wds_ageout;
+	nt->nt_wds_aging_timer.data = (unsigned long) nt;
+	mod_timer(&nt->nt_wds_aging_timer, jiffies + HZ * WDS_AGING_TIMER_VAL);
+}
+
+/*
+ * Reclaim any resources in a node and reset any critical
+ * state.  Typically nodes are free'd immediately after,
+ * but in some cases the storage may be reused so we need
+ * to ensure consistent state (should probably fix that).
+ *
+ * Context: hwIRQ, softIRQ and process context
+ */
+static void
+ieee80211_node_cleanup(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	IEEE80211_LOCK_IRQ(ni->ni_ic);
+	/* NB: preserve ni_table */
+	if (ni->ni_flags & IEEE80211_NODE_PWR_MGT) {
+		if (vap->iv_opmode != IEEE80211_M_STA)
+			vap->iv_ps_sta--;
+
+		if (WMM_UAPSD_NODE_IS_PWR_MGT(ni)) {
+			ni->ni_ic->ic_uapsdmaxtriggers--;
+		}
+
+		ni->ni_flags &= ~IEEE80211_NODE_PWR_MGT;
+		IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+			"power save mode off, %u sta's in ps mode",
+			vap->iv_ps_sta);
+	}
+
+	ni->ni_flags &= ~IEEE80211_NODE_HT;
+
+	ieee80211_node_saveq_drain(ni);
+
+	if (vap->iv_set_tim != NULL) {
+		vap->iv_set_tim(ni, 0);
+	}
+
+	ieee80211_aid_remove(ni);
+
+	memset(&ni->ni_stats, 0, sizeof(ni->ni_stats));
+
+	if (ni->ni_challenge != NULL) {
+		FREE(ni->ni_challenge, M_DEVBUF);
+		ni->ni_challenge = NULL;
+	}
+
+	if (ni->ni_qtn_brmacs != NULL) {
+		FREE(ni->ni_qtn_brmacs, M_DEVBUF);
+		ni->ni_qtn_brmacs = NULL;
+	}
+	/*
+	 * Preserve SSID, WPA, and WME ie's so the bss node is
+	 * reusable during a re-auth/re-assoc state transition.
+	 * If we remove these data they will not be recreated
+	 * because they come from a probe-response or beacon frame
+	 * which cannot be expected prior to the association-response.
+	 * This should not be an issue when operating in other modes
+	 * as stations leaving always go through a full state transition
+	 * which will rebuild this state.
+	 *
+	 * XXX does this leave us open to inheriting old state?
+	 */
+
+	if (ni->ni_rxfrag != NULL) {
+		dev_kfree_skb_any(ni->ni_rxfrag);
+		ni->ni_rxfrag = NULL;
+	}
+
+	/*
+	 * If there are related frames have been pushed to HW, clear the security bit in Node Cache.
+         * Don't delete the key here, disassoc will release the node cache later.
+         */
+#if 0
+	ieee80211_crypto_delkey(vap, &ni->ni_ucastkey, ni);
+	ni->ni_rxkeyoff = 0;
+#endif
+	/* Deauthorize - remove from various linked lists. */
+	ieee80211_node_unauthorize(ni);
+	IEEE80211_UNLOCK_IRQ(ni->ni_ic);
+}
+
+static void
+ieee80211_node_exit(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+
+	ic->ic_node_count--;
+
+	ic->ic_node_cleanup(ni);
+
+	if (ni->ni_wpa_ie != NULL) {
+		FREE(ni->ni_wpa_ie, M_DEVBUF);
+		ni->ni_wpa_ie = NULL;
+	}
+	if (ni->ni_rsn_ie != NULL) {
+		FREE(ni->ni_rsn_ie, M_DEVBUF);
+		ni->ni_rsn_ie = NULL;
+	}
+	if (ni->ni_osen_ie != NULL) {
+		FREE(ni->ni_osen_ie, M_DEVBUF);
+		ni->ni_osen_ie = NULL;
+	}
+	if (ni->ni_wme_ie != NULL) {
+		FREE(ni->ni_wme_ie, M_DEVBUF);
+		ni->ni_wme_ie = NULL;
+	}
+	if (ni->ni_wsc_ie != NULL) {
+		FREE(ni->ni_wsc_ie, M_DEVBUF);
+		ni->ni_wsc_ie = NULL;
+	}
+	if (ni->ni_ath_ie != NULL) {
+		FREE(ni->ni_ath_ie, M_DEVBUF);
+		ni->ni_ath_ie = NULL;
+	}
+	if (ni->ni_qtn_assoc_ie != NULL) {
+		FREE(ni->ni_qtn_assoc_ie, M_DEVBUF);
+		ni->ni_qtn_assoc_ie = NULL;
+	}
+	if (ni->ni_qtn_pairing_ie != NULL) {
+		FREE(ni->ni_qtn_pairing_ie, M_DEVBUF);
+		ni->ni_qtn_pairing_ie = NULL;
+	}
+	if (ni->ni_qtn_brmacs != NULL) {
+		FREE(ni->ni_qtn_brmacs, M_DEVBUF);
+		ni->ni_qtn_brmacs = NULL;
+	}
+	if (ni->ni_ext_bssid_ie != NULL) {
+		FREE(ni->ni_ext_bssid_ie, M_DEVBUF);
+		ni->ni_ext_bssid_ie = NULL;
+	}
+	if (ni->ni_tx_md_ie != NULL) {
+		FREE(ni->ni_tx_md_ie, M_DEVBUF);
+		ni->ni_tx_md_ie = NULL;
+	}
+	if (ni->ni_rx_md_ie != NULL) {
+		FREE(ni->ni_rx_md_ie, M_DEVBUF);
+		ni->ni_rx_md_ie = NULL;
+	}
+	if (ni->ni_tx_ft_ie != NULL) {
+		FREE(ni->ni_tx_ft_ie, M_DEVBUF);
+		ni->ni_tx_ft_ie = NULL;
+	}
+	if (ni->ni_rx_ft_ie != NULL) {
+		FREE(ni->ni_rx_ft_ie, M_DEVBUF);
+		ni->ni_rx_ft_ie = NULL;
+	}
+	if (ni->ni_tx_rsn_ie != NULL) {
+		FREE(ni->ni_tx_rsn_ie, M_DEVBUF);
+		ni->ni_tx_rsn_ie = NULL;
+	}
+
+	ieee80211_scs_free_node_tdls_stats(ic, ni);
+
+	ic->ic_qdrv_node_free(ni);
+
+	IEEE80211_NODE_SAVEQ_DESTROY(ni);
+	FREE(ni, M_80211_NODE);
+}
+
+static u_int8_t
+ieee80211_node_getrssi(const struct ieee80211_node *ni)
+{
+	return ni->ni_rssi;
+}
+
+/*
+ * Configure block ack agreements for a node.
+ */
+static void ieee80211_tx_addba(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+	int tid;
+
+	if (ni->ni_associd == 0) {
+		ieee80211_free_node(ni);
+		return;
+	}
+
+	for (tid = 0; tid < 7; tid++) {
+		if (ni->ni_ba_tx[tid].state == IEEE80211_BA_REQUESTED) {
+			ic->ic_setparam(ni, IEEE80211_PARAM_HTBA_SEQ_CTRL,
+					tid << 16, 0, 0);
+			if (ni->ni_vap->iv_opmode == IEEE80211_M_WDS &&
+					tid == IEEE80211_WDS_LINK_MAINTAIN_BA_TID &&
+					ni->ni_qtn_assoc_ie) {
+				/* For Q WDS peer, use larger buffer size for TID 0 to get more throughput */
+				ic->ic_setparam(ni, IEEE80211_PARAM_HTBA_SIZE_CTRL,
+					tid << 16 | IEEE80211_DEFAULT_BA_WINSIZE_H, 0, 0);
+			} else {
+				ic->ic_setparam(ni, IEEE80211_PARAM_HTBA_SIZE_CTRL,
+						tid << 16 | vap->iv_max_ba_win_size, 0, 0);
+			}
+			ic->ic_setparam(ni, IEEE80211_PARAM_HTBA_TIME_CTRL,
+					tid << 16 | 0x0, 0, 0);
+			ic->ic_setparam(ni, IEEE80211_PARAM_HT_ADDBA,
+					tid, 0, 0);
+		}
+	}
+
+	ieee80211_free_node(ni);
+}
+
+/*
+ * Configure block agreements for a node, workqueue version.
+ * The node structure must be locked before scheduling this workqueue.
+ */
+static void ieee80211_tx_addba_work(struct work_struct *work)
+{
+	struct ieee80211_node *ni =
+		container_of(work, struct ieee80211_node, ni_tx_addba_task);
+
+	ieee80211_tx_addba(ni);
+}
+
+/*
+ * Create an entry in the specified node table.  The node
+ * is set up with the mac address, an initial reference count,
+ * and some basic parameters obtained from global state.
+ * This interface is not intended for general use, it is
+ * used by the routines below to create entries with a
+ * specific purpose.
+ */
+struct ieee80211_node *
+ieee80211_alloc_node(struct ieee80211_node_table *nt,
+	struct ieee80211vap *vap, const u_int8_t *macaddr, const char *caller)
+{
+	struct ieee80211com *ic = nt->nt_ic;
+	struct ieee80211_node *ni;
+	int hash;
+	int i;
+
+	ni = ic->ic_node_alloc(nt, vap, macaddr, 0);
+	if (ni == NULL) {
+		/*
+		 * Free the expired tdls node first and then try to
+		 * allocate new node again
+		 */
+		if (vap->iv_opmode == IEEE80211_M_STA) {
+			ieee80211_tdls_node_expire((unsigned long)vap);
+			ni = ic->ic_node_alloc(nt, vap, macaddr, 0);
+		}
+		if (ni == NULL) {
+			printk(KERN_WARNING "Failed to allocate node for %s\n", caller);
+			vap->iv_stats.is_rx_nodealloc++;
+			return NULL;
+		}
+	}
+
+	IEEE80211_ADDR_COPY(ni->ni_macaddr, macaddr);
+	hash = IEEE80211_NODE_HASH(macaddr);
+	ni->ni_vap = vap;
+	ni->ni_ic = ic;
+#ifdef IEEE80211_DEBUG_REFCNT
+	ni->ni_refdebug_info_p = kmalloc(sizeof(*ni->ni_refdebug_info_p), GFP_ATOMIC);
+	if (ni->ni_refdebug_info_p)
+		memset(ni->ni_refdebug_info_p, 0, sizeof(*ni->ni_refdebug_info_p));
+#endif
+	ieee80211_node_initref(ni);
+	ieee80211_ref_node(ni);
+
+	ni->ni_tbtt = 0;
+	ni->ni_dtim_tbtt = 0;
+	ni->ni_flags |= (IS_IEEE80211_VHT_ENABLED(ic) ? IEEE80211_NODE_VHT:IEEE80211_NODE_HT);
+	ni->ni_htcap = ic->ic_htcap;
+	ni->ni_vhtcap = ic->ic_vhtcap;
+	ni->ni_chan = IEEE80211_CHAN_ANYC;
+	ni->ni_authmode = IEEE80211_AUTH_OPEN;
+	ni->ni_txpower = ic->ic_txpowlimit;	/* max power */
+	ieee80211_crypto_resetkey(vap, &ni->ni_ucastkey, IEEE80211_KEYIX_NONE);
+	ni->ni_inact_reload = nt->nt_inact_init;
+	ni->ni_inact = ni->ni_inact_reload;
+	ni->ni_ath_defkeyindex = IEEE80211_INVAL_DEFKEY;
+	ni->ni_rxkeyoff = 0;
+	ni->ni_recent_cca_idle = SCS_CCA_IDLE_INVALID;
+	INIT_WORK(&ni->ni_tx_addba_task, ieee80211_tx_addba_work);
+	INIT_WORK(&ni->ni_inact_work, ieee80211_timeout_station_work);
+
+	init_timer(&ni->ni_training_timer);
+	ni->ni_training_flag = NI_TRAINING_INIT;
+	ni->ni_node_type = IEEE80211_NODE_TYPE_NONE;
+	spin_lock_init(&ni->ni_lock);
+	ni->ni_training_count = 0;
+	ni->ni_training_start = 0;
+	ni->tdls_initiator = 0;
+	ni->last_tx_phy_rate = -1;
+	ni->tdls_path_sel_num = 0;
+	ni->tdls_last_path_sel = 0;
+	ni->tdls_no_send_cs_resp = 0;
+	ni->tdls_send_cs_req = 0;
+	ni->ni_chan_num = 0;
+	memset(ni->ni_supp_chans, 0, sizeof(ni->ni_supp_chans));
+	memset(&ni->ni_obss_ie, 0, sizeof(struct ieee80211_obss_scan_ie));
+	ni->ni_ext_role = IEEE80211_EXTENDER_ROLE_NONE;
+	ni->ni_ext_bssid_ie = NULL;
+	get_random_bytes(&ni->ni_rate_train, sizeof(ni->ni_rate_train));
+
+	init_timer(&ni->ni_btm_resp_wait_timer);
+	ni->ni_btm_req = 0;
+
+	ni->tdls_peer_associd = 0;
+	ni->tdls_status = IEEE80211_TDLS_NODE_STATUS_NONE;
+
+	IEEE80211_NODE_SAVEQ_INIT(ni, "unknown");
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+	init_waitqueue_head(&ni->ni_dotk_waitq);
+#endif
+	init_waitqueue_head(&ni->ni_meas_info.meas_waitq);
+	init_waitqueue_head(&ni->ni_tpc_info.tpc_wait_info.tpc_waitq);
+
+	for (i = 0; i < WME_NUM_TID; ++i) {
+		ni->ni_ba_rx[i].state = IEEE80211_BA_NOT_ESTABLISHED;
+		seqlock_init(&ni->ni_ba_rx[i].state_lock);
+		ni->ni_ba_tx[i].state = IEEE80211_BA_NOT_ESTABLISHED;
+		seqlock_init(&ni->ni_ba_tx[i].state_lock);
+	}
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	ni->ni_table = nt;
+	TAILQ_INSERT_TAIL(&nt->nt_node, ni, ni_list);
+	LIST_INSERT_HEAD(&nt->nt_hash[hash], ni, ni_hash);
+	ni->ni_rxfrag = NULL;
+	ni->ni_challenge = NULL;
+	ni->ni_implicit_ba_size = IEEE80211_DEFAULT_BA_WINSIZE;
+	ni->ni_tx_md_ie = NULL;
+	ni->ni_rx_md_ie = NULL;
+	ni->ni_tx_ft_ie = NULL;
+	ni->ni_rx_ft_ie = NULL;
+	ni->ni_rsn_ie = NULL;
+	ni->ni_tx_rsn_ie = NULL;
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+
+	WME_UAPSD_NODE_TRIGSEQINIT(ni);
+
+	return ni;
+}
+EXPORT_SYMBOL(ieee80211_alloc_node);
+
+/* Add wds address to the node table */
+int
+ieee80211_add_wds_addr(struct ieee80211_node_table *nt,
+	struct ieee80211_node *ni, const u_int8_t *macaddr, u_int8_t wds_static)
+{
+	int hash;
+	struct ieee80211_wds_addr *wds;
+
+	MALLOC(wds, struct ieee80211_wds_addr *, sizeof(struct ieee80211_wds_addr),
+	       M_80211_WDS, M_NOWAIT | M_ZERO);
+	if (wds == NULL) {
+		/* XXX msg */
+		return 1;
+	}
+	if (wds_static)
+		wds->wds_agingcount = WDS_AGING_STATIC;
+	else
+		wds->wds_agingcount = WDS_AGING_COUNT;
+	hash = IEEE80211_NODE_HASH(macaddr);
+	IEEE80211_ADDR_COPY(wds->wds_macaddr, macaddr);
+	wds->wds_ni = ni;
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	LIST_INSERT_HEAD(&nt->nt_wds_hash[hash], wds, wds_hash);
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_add_wds_addr);
+
+/* remove wds address from the wds hash table */
+void
+ieee80211_remove_wds_addr(struct ieee80211_node_table *nt, const u_int8_t *macaddr)
+{
+	int hash;
+	struct ieee80211_wds_addr *wds;
+
+	hash = IEEE80211_NODE_HASH(macaddr);
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	LIST_FOREACH(wds, &nt->nt_wds_hash[hash], wds_hash) {
+		if (IEEE80211_ADDR_EQ(wds->wds_macaddr, macaddr)) {
+			LIST_REMOVE(wds, wds_hash);
+			FREE(wds, M_80211_WDS);
+			break;
+		}
+	}
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+}
+EXPORT_SYMBOL(ieee80211_remove_wds_addr);
+
+
+/* Remove node references from wds table */
+void
+ieee80211_del_wds_node(struct ieee80211_node_table *nt, struct ieee80211_node *ni)
+{
+	int hash;
+	struct ieee80211_wds_addr *wds;
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	for (hash = 0; hash < IEEE80211_NODE_HASHSIZE; hash++) {
+		LIST_FOREACH(wds, &nt->nt_wds_hash[hash], wds_hash) {
+			if (wds->wds_ni == ni) {
+				LIST_REMOVE(wds, wds_hash);
+				FREE(wds, M_80211_WDS);
+			}
+		}
+	}
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+}
+EXPORT_SYMBOL(ieee80211_del_wds_node);
+
+static void
+ieee80211_node_wds_ageout(unsigned long data)
+{
+	struct ieee80211_node_table *nt = (struct ieee80211_node_table *)data;
+	int hash;
+	struct ieee80211_wds_addr *wds;
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	for (hash = 0; hash < IEEE80211_NODE_HASHSIZE; hash++) {
+		LIST_FOREACH(wds, &nt->nt_wds_hash[hash], wds_hash) {
+			if (wds->wds_agingcount != WDS_AGING_STATIC) {
+				if (!wds->wds_agingcount) {
+					LIST_REMOVE(wds, wds_hash);
+					FREE(wds, M_80211_WDS);
+				} else
+					wds->wds_agingcount--;
+			}
+		}
+	}
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+	mod_timer(&nt->nt_wds_aging_timer, jiffies + HZ * WDS_AGING_TIMER_VAL);
+}
+
+/*
+ * Craft a temporary node suitable for sending a management frame
+ * to the specified station.  We craft only as much state as we
+ * need to do the work since the node will be immediately reclaimed
+ * once the send completes.
+ */
+struct ieee80211_node *
+_ieee80211_tmp_node(struct ieee80211vap *vap, const u_int8_t *macaddr,
+		const u_int8_t *bssid)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+	struct ieee80211_node *bss_ni = vap->iv_bss;
+
+	if (!bss_ni) {
+		bss_ni = TAILQ_FIRST(&ic->ic_vaps)->iv_bss;
+		if (!bss_ni)
+			return NULL;
+	}
+
+	ni = ic->ic_node_alloc(&ic->ic_sta, vap, macaddr, 1);
+	if (ni == NULL) {
+		vap->iv_stats.is_rx_nodealloc++;
+		return NULL;
+	}
+
+#ifdef IEEE80211_DEBUG_REFCNT
+	ni->ni_refdebug_info_p = NULL;
+#endif
+
+	IEEE80211_ADDR_COPY(ni->ni_macaddr, macaddr);
+	IEEE80211_ADDR_COPY(ni->ni_bssid, bssid);
+
+
+	ni->ni_txpower = bss_ni->ni_txpower;
+	ni->ni_vap = vap;
+	ni->ni_node_idx = vap->iv_vapnode_idx;
+
+	ieee80211_node_initref(ni);
+
+	/* NB: required by ieee80211_fix_rate */
+	if (vap->iv_opmode == IEEE80211_M_STA) {
+		ni->ni_rates = ic->ic_sup_rates[ic->ic_curmode];
+	} else {
+		ieee80211_node_set_chan(ic, ni);
+	}
+	ieee80211_crypto_resetkey(vap, &ni->ni_ucastkey,
+		IEEE80211_KEYIX_NONE);
+	/* XXX optimize away */
+	IEEE80211_NODE_SAVEQ_INIT(ni, "unknown");
+
+	ni->ni_table = NULL;
+	ni->ni_ic = ic;
+	ni->ni_rxfrag = NULL;
+	ni->ni_challenge = NULL;
+
+	return ni;
+}
+EXPORT_SYMBOL(_ieee80211_tmp_node);
+
+struct ieee80211_node *
+ieee80211_tmp_node(struct ieee80211vap *vap, const u_int8_t *macaddr)
+{
+	return _ieee80211_tmp_node(vap, macaddr, vap->iv_bss->ni_bssid);
+}
+EXPORT_SYMBOL(ieee80211_tmp_node);
+
+/*
+ * Add the specified station to the station table.
+ * The node is locked and must be freed by the caller.
+ */
+struct ieee80211_node *
+ieee80211_dup_bss(struct ieee80211vap *vap, const u_int8_t *macaddr)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+
+	if (NULL == vap->iv_bss)
+		return NULL;
+
+	ni = ieee80211_alloc_node(&ic->ic_sta, vap, macaddr, "bss dup");
+	if (ni != NULL) {
+		/*
+		 * Inherit from iv_bss.
+		 */
+		ni->ni_authmode = vap->iv_bss->ni_authmode;
+		ni->ni_txpower = vap->iv_bss->ni_txpower;
+		ni->ni_vlan = vap->iv_bss->ni_vlan;	/* XXX?? */
+		IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_bss->ni_bssid);
+		ieee80211_node_set_chan(ic, ni);
+		ni->ni_rsn = vap->iv_bss->ni_rsn;
+		ni->ni_rxfrag = NULL;
+		ni->ni_associd = vap->iv_bss->ni_associd;
+
+		ni->ni_node_idx = vap->iv_bss->ni_node_idx;
+	}
+	return ni;
+}
+
+static struct ieee80211_node *
+_ieee80211_find_wds_node(struct ieee80211_node_table *nt, const u_int8_t *macaddr)
+{
+	struct ieee80211_node *ni;
+	struct ieee80211_wds_addr *wds;
+	int hash;
+	IEEE80211_NODE_LOCK_ASSERT(nt);
+
+	hash = IEEE80211_NODE_HASH(macaddr);
+	LIST_FOREACH(wds, &nt->nt_wds_hash[hash], wds_hash) {
+		if (IEEE80211_ADDR_EQ(wds->wds_macaddr, macaddr)) {
+			ni = wds->wds_ni;
+			if (wds->wds_agingcount != WDS_AGING_STATIC)
+				wds->wds_agingcount = WDS_AGING_COUNT; /* reset the aging count */
+			ieee80211_ref_node(ni);
+			return ni;
+		}
+	}
+	return NULL;
+}
+
+static struct ieee80211_node *
+#ifdef IEEE80211_DEBUG_REFCNT
+_ieee80211_find_node_debug(struct ieee80211_node_table *nt, const u_int8_t *macaddr,
+				const char *filename,
+				int line)
+#else
+_ieee80211_find_node(struct ieee80211_node_table *nt, const u_int8_t *macaddr)
+#endif
+{
+	struct ieee80211_node *ni;
+	int hash;
+	struct ieee80211_wds_addr *wds;
+
+	IEEE80211_NODE_LOCK_ASSERT(nt);
+
+	hash = IEEE80211_NODE_HASH(macaddr);
+	LIST_FOREACH(ni, &nt->nt_hash[hash], ni_hash) {
+		if (IEEE80211_ADDR_EQ(ni->ni_macaddr, macaddr)) {
+			ieee80211_ref_node_debug(ni, filename, line);
+			return ni;
+		}
+	}
+
+	/* Now, we look for the desired mac address in the 4 address nodes */
+	LIST_FOREACH(wds, &nt->nt_wds_hash[hash], wds_hash) {
+		if (IEEE80211_ADDR_EQ(wds->wds_macaddr, macaddr)) {
+			ni = wds->wds_ni;
+			ieee80211_ref_node_debug(ni, filename, line);
+			return ni;
+		}
+	}
+	return NULL;
+}
+#ifdef IEEE80211_DEBUG_REFCNT
+#define	_ieee80211_find_node(nt, mac) \
+	_ieee80211_find_node_debug(nt, mac, filename, line)
+#endif
+
+struct ieee80211_node *
+ieee80211_find_wds_node(struct ieee80211_node_table *nt, const u_int8_t *macaddr)
+{
+	struct ieee80211_node *ni;
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	ni = _ieee80211_find_wds_node(nt, macaddr);
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+	return ni;
+}
+EXPORT_SYMBOL(ieee80211_find_wds_node);
+
+struct ieee80211_node * __sram_text
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_node_debug(struct ieee80211_node_table *nt, const u_int8_t *macaddr,
+				const char *filename, int line)
+#else
+ieee80211_find_node(struct ieee80211_node_table *nt, const u_int8_t *macaddr)
+#endif
+{
+	struct ieee80211_node *ni;
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	ni = _ieee80211_find_node(nt, macaddr);
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+	return ni;
+}
+#ifdef IEEE80211_DEBUG_REFCNT
+EXPORT_SYMBOL(ieee80211_find_node_debug);
+#else
+EXPORT_SYMBOL(ieee80211_find_node);
+#endif
+
+#ifdef IEEE80211_DEBUG_REFCNT
+#define	ieee80211_find_node_by_ip_addr \
+	ieee80211_find_node_by_ip_addr_debug
+#endif
+
+struct ieee80211_node * __sram_text
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_node_by_ip_addr_debug(struct ieee80211_node_table *nt, uint32_t ip_addr,
+			const char *filename,
+			int line)
+#else
+ieee80211_find_node_by_ip_addr(struct ieee80211vap *vap, uint32_t ip_addr)
+#endif
+{
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni = NULL;
+
+	IEEE80211_NODE_LOCK(nt);
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (ni->ni_vap->iv_dev != vap->iv_dev) {
+			continue;
+		}
+		if (ni->ni_ip_addr && (ip_addr == ni->ni_ip_addr)) {
+			ieee80211_ref_node_debug(ni, filename, line);
+			break;
+		}
+	}
+
+	IEEE80211_NODE_UNLOCK(nt);
+
+	return ni;
+}
+#ifdef IEEE80211_DEBUG_REFCNT
+EXPORT_SYMBOL(ieee80211_find_node_by_ip_addr_debug);
+#else
+EXPORT_SYMBOL(ieee80211_find_node_by_ip_addr);
+#endif
+
+#ifdef CONFIG_IPV6
+#ifdef IEEE80211_DEBUG_REFCNT
+#define	ieee80211_find_node_by_ipv6_addr \
+	ieee80211_find_node_by_ipv6_addr_debug
+#endif
+
+struct ieee80211_node *
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_node_by_ipv6_addr_debug(struct ieee80211_node_table *nt, struct in6_addr *ipv6_addr,
+			const char *filename,
+			int line)
+#else
+ieee80211_find_node_by_ipv6_addr(struct ieee80211vap *vap, struct in6_addr *ipv6_addr)
+#endif
+{
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni = NULL;
+
+	IEEE80211_NODE_LOCK(nt);
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (ni->ni_vap->iv_dev != vap->iv_dev) {
+			continue;
+		}
+		if (!memcmp(ni->ipv6_llocal.s6_addr, ipv6_addr, sizeof(struct in6_addr))) {
+			ieee80211_ref_node_debug(ni, filename, line);
+			break;
+		}
+	}
+
+	IEEE80211_NODE_UNLOCK(nt);
+
+	return ni;
+}
+#ifdef IEEE80211_DEBUG_REFCNT
+EXPORT_SYMBOL(ieee80211_find_node_by_ipv6_addr_debug);
+#else
+EXPORT_SYMBOL(ieee80211_find_node_by_ipv6_addr);
+#endif
+#endif
+
+/**
+ * Look up the AID by MAC address.
+ * refcnt is not incremented.
+ */
+u_int16_t /*__sram_text*/
+ieee80211_find_aid_by_mac_addr(struct ieee80211_node_table *nt, const u_int8_t *macaddr)
+{
+	struct ieee80211_node *ni;
+	int hash;
+	u_int16_t aid = 0;
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+
+	IEEE80211_NODE_LOCK_ASSERT(nt);
+
+	hash = IEEE80211_NODE_HASH(macaddr);
+	LIST_FOREACH(ni, &nt->nt_hash[hash], ni_hash) {
+		if (IEEE80211_ADDR_EQ(ni->ni_macaddr, macaddr)) {
+			aid = IEEE80211_AID(ni->ni_associd);
+			break;
+		}
+	}
+
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+	return aid;
+}
+EXPORT_SYMBOL(ieee80211_find_aid_by_mac_addr);
+
+struct ieee80211_node * __sram_text
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_node_by_node_idx_debug(struct ieee80211vap *vap, uint16_t node_idx,
+			const char *filename,
+			int line)
+#else
+ieee80211_find_node_by_node_idx(struct ieee80211vap *vap, uint16_t node_idx)
+#endif
+{
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni;
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	ni = vap->iv_ic->ic_node_idx_ni[IEEE80211_NODE_IDX_UNMAP(node_idx)];
+	if (ni) {
+		ieee80211_ref_node_debug(ni, filename, line);
+	}
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+
+	return ni;
+}
+#ifdef IEEE80211_DEBUG_REFCNT
+EXPORT_SYMBOL(ieee80211_find_node_by_node_idx_debug);
+#else
+EXPORT_SYMBOL(ieee80211_find_node_by_node_idx);
+#endif
+
+struct ieee80211_node * __sram_text
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_node_by_idx_debug(struct ieee80211com *ic,
+			struct ieee80211vap *vap,
+			uint16_t node_idx,
+			const char *filename,
+			int line)
+#else
+ieee80211_find_node_by_idx(struct ieee80211com *ic, struct ieee80211vap *vap, uint16_t node_idx)
+#endif
+{
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211_node *ni = NULL;
+
+	if (IEEE80211_NODE_IDX_UNMAP(node_idx) < QTN_NCIDX_MAX) {
+		IEEE80211_NODE_LOCK_IRQ(nt);
+		ni = ic->ic_node_idx_ni[IEEE80211_NODE_IDX_UNMAP(node_idx)];
+		if (ni) {
+			if (!vap || (ni->ni_vap == vap)) {
+				ieee80211_ref_node_debug(ni, filename, line);
+			} else {
+				ni = NULL;
+			}
+		}
+		IEEE80211_NODE_UNLOCK_IRQ(nt);
+	}
+
+	return ni;
+}
+#ifdef IEEE80211_DEBUG_REFCNT
+EXPORT_SYMBOL(ieee80211_find_node_by_idx_debug);
+#else
+EXPORT_SYMBOL(ieee80211_find_node_by_idx);
+#endif
+
+/*
+ * Fake up a node; this handles node discovery in adhoc mode.
+ * Note that for the driver's benefit we treat this like an association so the driver has an
+ * opportunity to set up its private state.
+ * The node is locked and must be freed by the caller.
+ */
+struct ieee80211_node *
+ieee80211_fakeup_adhoc_node(struct ieee80211vap *vap, const u_int8_t *macaddr)
+{
+	struct ieee80211_node *ni;
+
+	if (NULL == vap->iv_bss)
+		return NULL;
+
+	ni = ieee80211_dup_bss(vap, macaddr);
+	if (ni != NULL) {
+		/* XXX no rate negotiation; just dup */
+		ni->ni_rates = vap->iv_bss->ni_rates;
+		if (vap->iv_ic->ic_newassoc != NULL)
+			vap->iv_ic->ic_newassoc(ni, 1);
+		/* XXX not right for 802.1x/WPA */
+		ieee80211_node_authorize(ni);
+
+	}
+	return ni;
+}
+
+/*
+ * Do node discovery in adhoc mode on receipt of a beacon or probe response frame.
+ * Note that for the driver's benefit we treat this like an association so the driver has an
+ * opportunity to set up its private state.
+ * The node is locked and must be freed by the caller.
+ */
+struct ieee80211_node *
+ieee80211_add_neighbor(struct ieee80211vap *vap,	const struct ieee80211_frame *wh,
+	const struct ieee80211_scanparams *sp)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+
+	ni = ieee80211_dup_bss(vap, wh->i_addr2);	/* XXX alloc_node? */
+	/* TODO: not really putting itself in a table */
+	if (ni != NULL) {
+		ni->ni_esslen = sp->ssid[1];
+		memcpy(ni->ni_essid, sp->ssid + 2, sp->ssid[1]);
+
+		IEEE80211_ADDR_COPY(ni->ni_bssid, wh->i_addr3);
+		memcpy(ni->ni_tstamp.data, sp->tstamp, sizeof(ni->ni_tstamp));
+		ni->ni_intval = IEEE80211_BINTVAL_SANITISE(sp->bintval);
+		ni->ni_capinfo = sp->capinfo;
+		ni->ni_chan = ic->ic_curchan;
+		ni->ni_fhdwell = sp->fhdwell;
+		ni->ni_fhindex = sp->fhindex;
+		ni->ni_erp = sp->erp;
+		ni->ni_timoff = sp->timoff;
+		if (sp->wme != NULL)
+			ieee80211_saveie(&ni->ni_wme_ie, sp->wme);
+		if (sp->wpa != NULL)
+			ieee80211_saveie(&ni->ni_wpa_ie, sp->wpa);
+		if (sp->rsn != NULL)
+			ieee80211_saveie(&ni->ni_rsn_ie, sp->rsn);
+		if (sp->ath != NULL)
+			ieee80211_saveath(ni, sp->ath);
+
+		/* NB: must be after ni_chan is set up */
+		ieee80211_setup_rates(ni, sp->rates, sp->xrates, IEEE80211_F_DOSORT);
+
+		if (ic->ic_newassoc != NULL)
+			ic->ic_newassoc(ni, 1);
+		/* XXX not right for 802.1x/WPA */
+		ieee80211_node_authorize(ni);
+		if (vap->iv_opmode == IEEE80211_M_AHDEMO) {
+			/*
+			 * Blindly propagate capabilities based on the
+			 * local configuration.  In particular this permits
+			 * us to use QoS to disable ACK's and to use short
+			 * preamble on 2.4G channels.
+			 */
+			if (vap->iv_flags & IEEE80211_F_WME)
+				ni->ni_flags |= IEEE80211_NODE_QOS;
+			if (vap->iv_flags & IEEE80211_F_SHPREAMBLE)
+				ni->ni_capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
+		}
+	}
+
+	return ni;
+}
+
+/*
+ * Locate the node for sender, track state, and then pass the
+ * (referenced) node up to the 802.11 layer for its use.  We
+ * return NULL when the sender is unknown; the driver is required
+ * locate the appropriate virtual ap in that case; possibly
+ * sending it to all (using ieee80211_input_all).
+ */
+struct ieee80211_node * __sram_text
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_rxnode_debug(struct ieee80211com *ic,
+			const struct ieee80211_frame_min *wh,
+			const char *filename,
+			int line)
+#else
+ieee80211_find_rxnode(struct ieee80211com *ic, const struct ieee80211_frame_min *wh)
+#endif
+{
+#define	IS_CTL(wh) \
+	((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL)
+#define	IS_PSPOLL(wh) \
+	((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PS_POLL)
+	struct ieee80211_node_table *nt;
+	struct ieee80211_node *ni;
+
+	/* XXX check ic_bss first in station mode */
+	/* XXX 4-address frames? */
+	nt = &ic->ic_sta;
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	if (IS_CTL(wh) && !IS_PSPOLL(wh) /*&& !IS_RTS(ah)*/)
+		ni = _ieee80211_find_node(nt, wh->i_addr1);
+	else
+		ni = _ieee80211_find_node(nt, wh->i_addr2);
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+
+	return ni;
+#undef IS_PSPOLL
+#undef IS_CTL
+}
+#ifdef IEEE80211_DEBUG_REFCNT
+EXPORT_SYMBOL(ieee80211_find_rxnode_debug);
+#else
+EXPORT_SYMBOL(ieee80211_find_rxnode);
+#endif
+
+/*
+ * Return a reference to the appropriate node for sending
+ * a data frame.  This handles node discovery in adhoc networks.
+ */
+struct ieee80211_node *
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_txnode_debug(struct ieee80211vap *vap,
+			const u_int8_t *mac,
+			const char *filename,
+			int line)
+#else
+ieee80211_find_txnode(struct ieee80211vap *vap, const u_int8_t *mac)
+#endif
+{
+	struct ieee80211_node_table *nt;
+	struct ieee80211_node *ni;
+
+	/*
+	 * The destination address should be in the node table
+	 * unless we are operating in station mode or this is a
+	 * multicast/broadcast frame.
+	 */
+	if (vap->iv_opmode == IEEE80211_M_STA || IEEE80211_IS_MULTICAST(mac)) {
+		if (vap->iv_bss)
+			ieee80211_ref_node_debug(vap->iv_bss, filename, line);
+		return vap->iv_bss;
+	}
+
+	/* XXX can't hold lock across dup_bss due to recursive locking */
+	nt = &vap->iv_ic->ic_sta;
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	ni = _ieee80211_find_node(nt, mac);
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+
+	if (ni == NULL) {
+		if (vap->iv_opmode == IEEE80211_M_IBSS ||
+		    vap->iv_opmode == IEEE80211_M_AHDEMO) {
+
+			ni = ieee80211_fakeup_adhoc_node(vap, mac);
+		} else {
+//			IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_OUTPUT, mac,
+//				"no node, discard frame (%s)", __func__);
+//			vap->iv_stats.is_tx_nonode++;
+		}
+	}
+	return ni;
+}
+
+#ifdef IEEE80211_DEBUG_REFCNT
+EXPORT_SYMBOL(ieee80211_find_txnode_debug);
+#else
+EXPORT_SYMBOL(ieee80211_find_txnode);
+#endif
+
+void
+ieee80211_idx_add(struct ieee80211_node *ni, uint16_t new_idx)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (new_idx == IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx)) {
+		return;
+	}
+
+	IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+		"aid=%u change idx from %u to %u\n",
+		IEEE80211_NODE_AID(ni), IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx), new_idx);
+
+	ieee80211_idx_remove(ni);
+
+	if ((new_idx == 0) || (new_idx >= QTN_NCIDX_MAX)) {
+		printk("%s: [%pM] invalid idx %u\n", __func__, ni->ni_macaddr, new_idx);
+		return;
+	}
+
+	ni->ni_node_idx = IEEE80211_NODE_IDX_MAP(new_idx);
+	ic->ic_register_node(ni);
+	ic->ic_node_idx_ni[new_idx] = ni;
+	if (vap->iv_opmode == IEEE80211_M_WDS) {
+		vap->iv_vapnode_idx = ni->ni_node_idx;
+	}
+}
+EXPORT_SYMBOL(ieee80211_idx_add);
+
+/* Caller must lock the IEEE80211_NODE_LOCK
+ *
+ * Context: hwIRQ, softIRQ and process context
+ */
+static void __sram_text
+_ieee80211_free_node(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+#ifdef IEEE80211_DEBUG_REFCNT
+	if (ni->ni_refdebug_info_p) {
+		ieee80211_node_show_refdebug_info(ni);
+		kfree(ni->ni_refdebug_info_p);
+		ni->ni_refdebug_info_p = NULL;
+	} else if (ni->ni_table != NULL) {
+		printk("%s:%d: freeing node %p\n", __func__, __LINE__, ni);
+	}
+#endif
+	_ieee80211_remove_node(ni);
+
+	vap->iv_ic->ic_node_free(ni);
+}
+
+void __sram_text
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_free_node_debug(struct ieee80211_node *ni, const char *filename, int line)
+#else
+ieee80211_free_node(struct ieee80211_node *ni)
+#endif
+{
+	struct ieee80211_node_table *nt = ni->ni_table;
+	struct ieee80211com *ic = ni->ni_ic;
+
+	/*
+	 * XXX: may need to lock out the following race. we dectestref
+	 *      and determine it's time to free the node. between the if()
+	 *      and lock, we take an rx intr to receive a frame from this
+	 *      node. the rx path (tasklet or intr) bumps this node's
+	 *      refcnt and xmits a response frame. eventually that response
+	 *      will get reaped, and the reaping code will attempt to use
+	 *      the node. the code below will delete the node prior
+	 *      to the reap and we could get a crash.
+	 *
+	 *      as a stopgap before delving deeper, lock intrs to
+	 *      prevent this case.
+	 */
+	IEEE80211_LOCK_IRQ(ic);
+	if (ieee80211_node_dectestref(ni)) {
+		ieee80211_node_dbgref(ni, filename, line, IEEE80211_NODEREF_DECR);
+		/*
+		 * Beware; if the node is marked gone then it's already
+		 * been removed from the table and we cannot assume the
+		 * table still exists.  Regardless, there's no need to lock
+		 * the table.
+		 */
+		if (ni->ni_table != NULL) {
+			IEEE80211_NODE_LOCK(nt);
+			_ieee80211_free_node(ni);
+			IEEE80211_NODE_UNLOCK(nt);
+		} else
+			_ieee80211_free_node(ni);
+	} else {
+		ieee80211_node_dbgref(ni, filename, line, IEEE80211_NODEREF_DECR);
+	}
+	IEEE80211_UNLOCK_IRQ(ic);
+}
+#ifdef IEEE80211_DEBUG_REFCNT
+EXPORT_SYMBOL(ieee80211_free_node_debug);
+#else
+EXPORT_SYMBOL(ieee80211_free_node);
+#endif
+
+void
+ieee80211_sta_assocs_inc(struct ieee80211vap *vap, const char *caller)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	ic->ic_sta_assoc++;
+	vap->iv_sta_assoc++;
+	ic->ic_ssid_grp[vap->iv_ssid_group].assocs++;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+		"%s - increases counter ic_sta_assoc %u, iv_sta_assoc %u\n",
+		caller, ic->ic_sta_assoc, vap->iv_sta_assoc);
+}
+
+void
+ieee80211_sta_assocs_dec(struct ieee80211vap *vap, const char *caller)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	WARN_ON(ic->ic_sta_assoc == 0);
+	WARN_ON(vap->iv_sta_assoc == 0);
+	WARN_ON(ic->ic_ssid_grp[vap->iv_ssid_group].assocs == 0);
+
+	if (ic->ic_sta_assoc > 0)
+		ic->ic_sta_assoc--;
+	if (vap->iv_sta_assoc > 0)
+		vap->iv_sta_assoc--;
+	if (ic->ic_ssid_grp[vap->iv_ssid_group].assocs > 0)
+		ic->ic_ssid_grp[vap->iv_ssid_group].assocs--;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+		"%s - reduces counter ic_sta_assoc %u, iv_sta_assoc %u\n",
+		caller, ic->ic_sta_assoc, vap->iv_sta_assoc);
+}
+
+static void
+ieee80211_node_table_reset(struct ieee80211com *ic, struct ieee80211_node_table *nt,
+	struct ieee80211vap *match)
+{
+	struct ieee80211_node *ni, *next;
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	TAILQ_FOREACH_SAFE(ni, &nt->nt_node, ni_list, next) {
+		if (match != NULL && ni->ni_vap != match)
+			continue;
+		if (ni->ni_associd != 0) {
+			struct ieee80211vap *vap = ni->ni_vap;
+
+			if (vap->iv_auth->ia_node_leave != NULL)
+				vap->iv_auth->ia_node_leave(ni);
+			ieee80211_clear_aid_bitmap(ic, ni->ni_associd);
+			ieee80211_sta_assocs_dec(vap, __func__);
+			ieee80211_nonqtn_sta_leave(vap, ni, __func__);
+		}
+		ieee80211_node_reclaim(nt, ni);
+	}
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+}
+
+static void
+ieee80211_node_table_cleanup(struct ieee80211com *ic, struct ieee80211_node_table *nt)
+{
+	struct ieee80211_node *ni, *next;
+
+	TAILQ_FOREACH_SAFE(ni, &nt->nt_node, ni_list, next) {
+		if (ni->ni_associd != 0) {
+			struct ieee80211vap *vap = ni->ni_vap;
+
+			if (vap->iv_auth->ia_node_leave != NULL)
+				vap->iv_auth->ia_node_leave(ni);
+			ieee80211_clear_aid_bitmap(ic, ni->ni_associd);
+		}
+		ieee80211_node_reclaim(nt, ni);
+	}
+	del_timer(&nt->nt_wds_aging_timer);
+	IEEE80211_SCAN_LOCK_DESTROY(nt);
+	IEEE80211_NODE_LOCK_DESTROY(nt);
+}
+
+void
+ieee80211_node_ba_del(struct ieee80211_node *ni, uint8_t tid, uint8_t is_tx, uint16_t reason)
+{
+	enum ieee80211_ba_state	new_state;
+	int state_changed = 0;
+
+	if (is_tx) {
+		if (ni->ni_vap->iv_ba_control & (1 << tid)) {
+			new_state = IEEE80211_BA_NOT_ESTABLISHED;
+		} else {
+			new_state = IEEE80211_BA_BLOCKED;
+		}
+		if (ni->ni_ba_tx[tid].state != new_state) {
+			state_changed = 1;
+			memset(&ni->ni_ba_tx[tid], 0, sizeof(ni->ni_ba_tx[tid]));
+			ieee80211_node_tx_ba_set_state(ni, tid, new_state,
+				reason == IEEE80211_REASON_STA_NOT_USE ?
+					IEEE80211_TX_BA_REQUEST_NEW_ATTEMPT_TIMEOUT : 0);
+		}
+	} else {
+		if (ni->ni_ba_rx[tid].state != IEEE80211_BA_NOT_ESTABLISHED) {
+#ifdef CONFIG_QVSP
+			struct ieee80211_ba_throt ba_throt;
+			/* backup and restore BA throt state across BA delete */
+			memcpy(&ba_throt, &ni->ni_ba_rx[tid].ba_throt, sizeof(struct ieee80211_ba_throt));
+#endif
+			state_changed = 1;
+			memset(&ni->ni_ba_rx[tid], 0, sizeof(ni->ni_ba_rx[tid]));
+			ni->ni_ba_rx[tid].state = IEEE80211_BA_NOT_ESTABLISHED;
+#ifdef CONFIG_QVSP
+			memcpy(&ni->ni_ba_rx[tid].ba_throt, &ba_throt, sizeof(struct ieee80211_ba_throt));
+#endif
+		}
+	}
+
+	if (state_changed && ni->ni_ic->ic_htdelba) {
+		IEEE80211_NOTE(ni->ni_vap,
+				IEEE80211_MSG_NODE | IEEE80211_MSG_INPUT | IEEE80211_MSG_OUTPUT, ni,
+			       "block ack del tid=%d is_tx=%d reason=%d",
+			       (int)tid, (int)is_tx, (int)reason);
+		(*ni->ni_ic->ic_htdelba)(ni, tid, is_tx);
+	}
+}
+EXPORT_SYMBOL(ieee80211_node_ba_del);
+
+#define IEEE80211_MAX_WDS_BA_ATTEMPTS 3
+
+static void
+ieee80211_timeout_station_work_wds(struct work_struct *work)
+{
+	struct ieee80211_node *ni = container_of(work, struct ieee80211_node, ni_inact_work);
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct qtn_wds_ext_event_data event_data;
+	struct ieee80211vap *primary_vap = NULL;
+
+	if (ni->ni_inact == 0) {
+		/* Nothing received (including beacons) */
+		IEEE80211_NOTE(ni->ni_vap,
+			IEEE80211_MSG_INACT | IEEE80211_MSG_NODE, ni,
+			"%s", "no beacons received from WDS peer");
+
+		ieee80211_node_ba_state_clear(ni);
+		ni->ni_inact = ni->ni_inact_reload;
+                ic->ic_pm_reason = IEEE80211_PM_LEVEL_INACTIVITY_IN_WDS;
+		ieee80211_pm_queue_work(ic);
+
+		if ((ic->ic_extender_role != IEEE80211_EXTENDER_ROLE_NONE) &&
+				!IEEE80211_VAP_WDS_BASIC(vap)) {
+			IEEE80211_EXTENDER_DPRINTF(vap, IEEE80211_EXTENDER_MSG_WARN,
+					"QHop: no beacons received from peer %pM\n",
+					ni->ni_macaddr);
+			primary_vap = TAILQ_FIRST(&ic->ic_vaps);
+			extender_event_data_prepare(ic, NULL,
+					&event_data,
+					WDS_EXT_LINK_STATUS_UPDATE,
+					ni->ni_macaddr);
+			ieee80211_extender_send_event(primary_vap, &event_data, NULL);
+			ieee80211_extender_remove_peer_wds_info(ic, ni->ni_macaddr);
+		}
+	}
+
+	if (!IEEE80211_BA_IS_COMPLETE(ni->ni_ba_rx[IEEE80211_WDS_LINK_MAINTAIN_BA_TID].state) ||
+		!IEEE80211_BA_IS_COMPLETE(ni->ni_ba_tx[IEEE80211_WDS_LINK_MAINTAIN_BA_TID].state)) {
+
+		if (ni->ni_wds_ba_attempts++ >= IEEE80211_MAX_WDS_BA_ATTEMPTS) {
+			/* Still not fully established, send DELBA to peer */
+			ni->ni_wds_ba_attempts = 0;
+			ieee80211_node_ba_state_clear(ni);
+			ic->ic_setparam(ni, IEEE80211_PARAM_HT_DELBA,
+					IEEE80211_WDS_LINK_MAINTAIN_BA_TID, 0, 0); }
+	} else {
+		ni->ni_wds_ba_attempts = 0;
+	}
+
+	if (ni->ni_ba_tx[IEEE80211_WDS_LINK_MAINTAIN_BA_TID].state == IEEE80211_BA_NOT_ESTABLISHED) {
+		ieee80211_node_tx_ba_set_state(ni, IEEE80211_WDS_LINK_MAINTAIN_BA_TID,
+						IEEE80211_BA_REQUESTED, 0);
+	}
+
+	ieee80211_tx_addba(ni);
+}
+
+/*
+ * This workqueue is called for any station that requires timeout processing.
+ * A node reference is taken before calling to prevent premature deletion.
+ * If the inact timer has not yet expired the station is probed with a null
+ * data probe (this can happen multiple times).  If the inact timer has expired, the node
+ * is removed.
+ */
+static void
+ieee80211_timeout_station_work(struct work_struct *work)
+{
+	struct ieee80211_node *ni = container_of(work, struct ieee80211_node, ni_inact_work);
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (qtn_vlan_is_group_addr(ni->ni_macaddr)) {
+		ieee80211_free_node(ni);
+		return;
+	}
+
+	/* Send dummy data packets to check if ACK can be received */
+	if ((IEEE80211_NODE_IS_TDLS_ACTIVE(ni) || IEEE80211_NODE_IS_TDLS_IDLE(ni)) &&
+				(ni->ni_inact <= IEEE80211_INACT_SEND_PKT_THRSH))
+		ieee80211_send_dummy_data(ni, vap, M_TX_DONE_IMM_INT);
+	if (vap->iv_opmode == IEEE80211_M_WDS) {
+		ieee80211_timeout_station_work_wds(work);
+		return;
+	}
+
+
+	if (ni->ni_inact > 0) {
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			IEEE80211_NOTE(vap, IEEE80211_MSG_INACT | IEEE80211_MSG_NODE | IEEE80211_MSG_POWER,
+			ni, "%s", "probe station due to inactivity");
+			/* Either of these frees the node reference */
+			if (ni->ni_flags & IEEE80211_NODE_QOS) {
+				/* non-QoS frames to 3rd party QoS node (Intel) can cause a BA teardown */
+				ieee80211_send_qosnulldata(ni, WMM_AC_BK);
+			} else {
+				ieee80211_send_nulldata(ni);
+			}
+		} else {
+			ieee80211_free_node(ni);
+		}
+		return;
+	}
+
+	IEEE80211_NOTE(ni->ni_vap,
+		IEEE80211_MSG_INACT | IEEE80211_MSG_NODE, ni,
+		"station timed out due to inactivity (refcnt %u)",
+		ieee80211_node_refcnt(ni));
+
+	ni->ni_vap->iv_stats.is_node_timeout++;
+
+	if (ni->ni_associd != 0 && (vap->iv_opmode != IEEE80211_M_STA ||
+			IEEE80211_NODE_IS_NONE_TDLS(ni))) {
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC, IEEE80211_REASON_ASSOC_EXPIRE);
+		ieee80211_dot11_msg_send(ni->ni_vap, (char *)ni->ni_macaddr,
+			d11_m[IEEE80211_DOT11_MSG_CLIENT_REMOVED],
+			d11_c[IEEE80211_DOT11_MSG_REASON_CLIENT_TIMEOUT],
+			-1, NULL, NULL, NULL);
+	}
+
+	if (IEEE80211_NODE_IS_TDLS_ACTIVE(ni)) {
+			enum ieee80211_tdls_operation operation = IEEE80211_TDLS_TEARDOWN;
+			if (ieee80211_tdls_send_event(ni, IEEE80211_EVENT_STATION_LOW_ACK, &operation))
+				IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"TDLS %s: Send event %d failed\n", __func__, operation);
+	} else if (IEEE80211_NODE_IS_TDLS_IDLE(ni)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"TDLS %s: TDLS peer timed out due to inactivity (tdls_status %d)\n",
+				__func__, ni->tdls_status);
+		ieee80211_tdls_node_leave(vap, ni);
+	} else {
+		ieee80211_node_leave(ni);
+	}
+	ieee80211_free_node(ni);
+}
+
+static void
+ieee80211_timeout_station_sched(struct ieee80211_node *ni)
+{
+	ieee80211_ref_node(ni);
+	if (unlikely(schedule_work(&ni->ni_inact_work) == 0))
+		ieee80211_free_node(ni);
+}
+
+static int __inline ieee80211_wds_node_cac_check(struct ieee80211com *ic)
+{
+	if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+			(ic->ic_bsschan->ic_flags & IEEE80211_CHAN_DFS) &&
+			IEEE80211_IS_CHAN_CAC_IN_PROGRESS(ic->ic_bsschan))
+		return 1;
+
+	return 0;
+}
+
+static void
+ieee80211_timeout_station(struct ieee80211com *ic, struct ieee80211_node *ni)
+{
+	int is_adhoc;
+	int su = STATS_SU;
+
+	if ((ni == ni->ni_vap->iv_bss) ||
+			IEEE80211_ADDR_EQ(ni->ni_vap->iv_myaddr, ni->ni_macaddr) ||
+			ieee80211_blacklist_check(ni)) {
+		return;
+	}
+
+	/*
+	 * Free fragment if not needed anymore (last fragment older than 1s).
+	 * XXX doesn't belong here
+	 */
+	if (ni->ni_rxfrag != NULL &&
+			time_after(jiffies, ni->ni_rxfragstamp + HZ)) {
+		dev_kfree_skb(ni->ni_rxfrag);
+		ni->ni_rxfrag = NULL;
+	}
+
+	IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_INACT | IEEE80211_MSG_NODE, ni,
+		"sta ni_inact=%u rx=%u/%u/%u ack=%u/%u/%u",
+		ni->ni_inact,
+		ni->ni_shared_stats->rx[su].pkts_cum, ni->rx_pkts,
+		(ni->ni_shared_stats->rx[su].pkts_cum - ni->rx_pkts),
+		ni->ni_shared_stats->tx[su].acks, ni->tx_acks,
+		(ni->ni_shared_stats->tx[su].acks - ni->tx_acks));
+
+	/*
+	 * WDS nodes are never timed out or removed, but this timeout station work mechanism is used
+	 * to establish a block ack if needed.
+	 */
+	if (ni->ni_vap->iv_opmode == IEEE80211_M_WDS &&
+			IEEE80211_ADDR_EQ(ni->ni_macaddr, ni->ni_vap->wds_mac)) {
+		if ((ni->ni_inact > 0) && !ieee80211_wds_node_cac_check(ic))
+			ni->ni_inact--;
+		ieee80211_timeout_station_sched(ni);
+		return;
+	}
+
+	if ((ni->ni_shared_stats->rx[su].pkts_cum != ni->rx_pkts) ||
+			(ni->ni_shared_stats->tx[su].acks != ni->tx_acks)) {
+		ni->ni_inact = ni->ni_inact_reload;
+	} else {
+		if (ni->ni_inact > 0)
+			ni->ni_inact--;
+	}
+	/* always reset the local counters in case the shared counters wrap */
+	ni->rx_pkts = ni->ni_shared_stats->rx[su].pkts_cum;
+	ni->tx_acks = ni->ni_shared_stats->tx[su].acks;
+
+	if (ni->ni_inact > 0) {
+		is_adhoc = (ic->ic_opmode == IEEE80211_M_IBSS ||
+				ic->ic_opmode == IEEE80211_M_AHDEMO);
+		if ((ni->ni_associd != 0 || is_adhoc) &&
+				(ni->ni_inact <= ni->ni_vap->iv_inact_probe ||
+					ic->ic_ocac.ocac_running)) {
+			ieee80211_timeout_station_sched(ni);
+		}
+	} else {
+		ieee80211_timeout_station_sched(ni);
+	}
+}
+
+/*
+ * Process inactive stations and do related housekeeping.
+ * Actual processing is offloaded to a workqueue because many of the called fuctions
+ * cannot run at interrupt level.  This also keeps table parsing simple because nodes
+ * are not removed during the loop.
+ */
+static void
+ieee80211_timeout_stations(struct ieee80211_node_table *nt)
+{
+	struct ieee80211_node *ni;
+	struct ieee80211com *ic = nt->nt_ic;
+
+	IEEE80211_SCAN_LOCK_IRQ(nt);
+	IEEE80211_NODE_LOCK_IRQ(nt);
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		ieee80211_timeout_station(ic, ni);
+	}
+
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+	IEEE80211_SCAN_UNLOCK_IRQ(nt);
+}
+
+/*
+ * Send out null frame from stations periodically
+ * to check the validity of the connection.
+ */
+static void
+ieee80211_timeout_ap(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap = NULL;
+	struct ieee80211_node *ni;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		KASSERT(vap != NULL, ("timeout ap, vap is invalid.\n"));
+		if ((vap->iv_opmode == IEEE80211_M_STA) &&
+		    (vap->iv_state == IEEE80211_S_RUN)) {
+			ni = vap->iv_bss;
+			ieee80211_ref_node(ni);
+
+			IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_INACT | IEEE80211_MSG_NODE, ni,
+				"ap ni_inact=%u ack=%u/%u/%u",
+				ni->ni_inact,
+				ni->ni_shared_stats->tx[STATS_SU].acks, ni->tx_acks,
+				(ni->ni_shared_stats->tx[STATS_SU].acks - ni->tx_acks));
+
+			if (ni->ni_shared_stats->tx[STATS_SU].acks != ni->tx_acks) {
+				ni->ni_inact = ni->ni_inact_reload;
+			} else if (ni->ni_inact > 0) {
+				ni->ni_inact--;
+			}
+			ni->tx_acks = ni->ni_shared_stats->tx[STATS_SU].acks;
+
+			ieee80211_send_nulldata(ni);
+		}
+	}
+}
+
+int
+ieee80211_should_disable_scs(struct ieee80211com *ic)
+{
+	/* SCS doesn't support 2.4GHz on dual band (RFIC6) boards */
+	if (ic->ic_rf_chipid == CHIPID_DUAL) {
+		if ((ic->ic_curmode == IEEE80211_MODE_11B) ||
+				(ic->ic_curmode == IEEE80211_MODE_11G) ||
+				(ic->ic_curmode == IEEE80211_MODE_FH) ||
+				(ic->ic_curmode == IEEE80211_MODE_TURBO_G) ||
+				(ic->ic_curmode == IEEE80211_MODE_11NG) ||
+				(ic->ic_curmode == IEEE80211_MODE_11NG_HT40PM)) {
+			return 0;
+		}
+	}
+	return 1;
+}
+
+static void
+ieee80211_change_bf(struct ieee80211com *ic, bool enable)
+{
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+	if (enable) {
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_TXBF_PERIOD, ic->ic_vopt.bf, NULL, 0);
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_EXP_MAT_SEL, 0x11, NULL, 0);
+	} else {
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_EXP_MAT_SEL, 0x10, NULL, 0);
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_TXBF_PERIOD, 0, NULL, 0);
+	}
+}
+
+static int
+ieee80211_enable_vopt(struct ieee80211com *ic, bool enable)
+{
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+	struct net_device *dev = vap->iv_dev;
+	uint32_t value = 0;
+
+	ic->ic_vopt.cur_state = enable ?
+		IEEE80211_VOPT_ENABLED : IEEE80211_VOPT_DISABLED;
+
+	if (enable) {
+		/* disable bf */
+		ieee80211_change_bf(ic, false);
+
+		/* disable pppc */
+		if (!((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)))
+			ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_PPPC_SELECT, 0, NULL, 0);
+		ic->ic_pppc_select_enable = 0;
+
+		/* disable bbf */
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_PS_CMD, 0, NULL, 0);
+
+		/* disable Airfair time */
+		ic->ic_airfair = 0;
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_AIRFAIR, 0, NULL, 0);
+
+		/* disable SCS due to fake CCA statistics under veriwave test */
+		value = IEEE80211_SCS_SET_ENABLE << 16 | 0;
+		if (ieee80211_param_scs_set(dev, vap, value) == 0)
+			ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_SCS, value, NULL, 0);
+
+		/* disable OCAC due to the same reason above */
+		value = IEEE80211_OCAC_SET_DISABLE << 16;
+		ieee80211_param_ocac_set(dev, vap, value);
+
+		/* disable QTM due to the same reason above */
+		if (ic->ic_vsp_get && ic->ic_vsp_set && ic->ic_vsp_get(ic, QVSP_CFG_ENABLED, &value) == 0) {
+			ic->ic_vopt.qtm = value;
+			ic->ic_vsp_set(ic, QVSP_CFG_ENABLED, 0);
+		}
+
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_VMODE, 0, NULL, 0);
+
+		/* use NON_HT_DUPLICATE format for MU NDPA */
+		ic->use_non_ht_duplicate_for_mu = 3;
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_USE_NON_HT_DUPLICATE_MU,
+						ic->use_non_ht_duplicate_for_mu, NULL, 0);
+	} else {
+		/* Restore config settings */
+		if (ic->ic_vopt.bf){
+			ieee80211_change_bf(ic, true);
+		}
+
+		if (ic->ic_vopt.pppc) {
+			ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_PPPC_SELECT,
+					ic->ic_vopt.pppc, NULL, 0);
+			ic->ic_pppc_select_enable = ic->ic_vopt.pppc;
+		}
+
+		if (ic->ic_vopt.scs) {
+			value = IEEE80211_SCS_SET_ENABLE << 16 | ic->ic_vopt.scs;
+
+			if (ieee80211_param_scs_set(dev, vap, value) == 0)
+				ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_SCS, value, NULL, 0);
+		}
+
+		if (ic->ic_vopt.ocac) {
+			value = IEEE80211_OCAC_SET_ENABLE << 16 | ic->ic_ocac.ocac_cfg.ocac_chan_ieee;
+			ieee80211_param_ocac_set(dev, vap, value);
+		}
+
+		if (ic->ic_vopt.qtm && ic->ic_vsp_set) {
+			ic->ic_vsp_set(ic, QVSP_CFG_ENABLED, 1);
+		}
+
+		if (ic->ic_vopt.bbf) {
+			ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_PS_CMD, ic->ic_vopt.bbf, NULL, 0);
+		}
+
+		if (ic->ic_vopt.airfair) {
+			ic->ic_airfair = ic->ic_vopt.airfair;
+			ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_AIRFAIR, ic->ic_vopt.airfair, NULL, 0);
+		}
+
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_VMODE,  ic->ic_curchan->ic_maxpower_normal, NULL, 0);
+
+		/* use VHT format for MU NDPA */
+		ic->use_non_ht_duplicate_for_mu = 0;
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_USE_NON_HT_DUPLICATE_MU,
+						ic->use_non_ht_duplicate_for_mu, NULL, 0);
+	}
+	ieee80211_wireless_reassoc(vap, 0, 0);
+
+	return 0;
+}
+
+static void
+ieee80211_v_node(void *arg, struct ieee80211_node *ni)
+{
+	struct ieee80211_v_cnt *sta_cnt = arg;
+
+	if ((ni == ni->ni_vap->iv_bss) ||
+			IEEE80211_ADDR_EQ(ni->ni_vap->iv_myaddr, ni->ni_macaddr))
+	       return;
+
+	sta_cnt->total++;
+
+	if (ni->ni_qtn_flags & QTN_OPTI_NODE)
+		sta_cnt->v++;
+}
+
+static void
+ieee80211_vopt(struct ieee80211com *ic)
+{
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211_v_cnt sta_cnt;
+	uint8_t cur_state = ic->ic_vopt.cur_state;
+
+	if (cur_state == ic->ic_vopt.state)
+		return;
+
+	sta_cnt.total = 0;
+	sta_cnt.v = 0;
+	ieee80211_iterate_nodes(nt,
+			ieee80211_v_node, &sta_cnt, 0);
+
+	switch (ic->ic_vopt.state) {
+	case IEEE80211_VOPT_DISABLED:
+		ieee80211_enable_vopt(ic, false);
+		v_wait = 0;
+		break;
+	case IEEE80211_VOPT_ENABLED:
+		ieee80211_enable_vopt(ic, true);
+		v_wait = 0;
+		break;
+	case IEEE80211_VOPT_AUTO:
+		if (!v_wait) {
+			if ((sta_cnt.total == sta_cnt.v) &&
+					(cur_state != IEEE80211_VOPT_ENABLED))
+				ieee80211_enable_vopt(ic, true);
+			if ((sta_cnt.total != sta_cnt.v) &&
+					(cur_state != IEEE80211_VOPT_DISABLED)) {
+				ieee80211_enable_vopt(ic, false);
+				v_wait = V_WAIT_REASSOC;
+			}
+		} else
+			v_wait --;
+		break;
+	}
+}
+
+/*
+ * Per-ieee80211com inactivity timer callback.
+ */
+static void
+ieee80211_node_timeout(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *) arg;
+
+	ieee80211_timeout_stations(&ic->ic_sta);
+	ieee80211_timeout_ap(ic);
+	ieee80211_vopt(ic);
+
+	ic->ic_inact.expires = jiffies + IEEE80211_INACT_WAIT * HZ;
+	add_timer(&ic->ic_inact);
+}
+
+void
+ieee80211_iterate_nodes(struct ieee80211_node_table *nt, ieee80211_iter_func *f,
+			void *arg, int ignore_blacklisted)
+{
+	ieee80211_iterate_dev_nodes(NULL, nt, f, arg, ignore_blacklisted);
+}
+EXPORT_SYMBOL(ieee80211_iterate_nodes);
+
+void
+ieee80211_iterate_dev_nodes(struct net_device *dev, struct ieee80211_node_table *nt,
+			    ieee80211_iter_func *f, void *arg, int ignore_blacklisted)
+{
+	struct ieee80211_node *ni;
+	u_int gen;
+
+	IEEE80211_SCAN_LOCK_BH(nt);
+	gen = ++nt->nt_scangen;
+restart:
+	IEEE80211_NODE_LOCK(nt);
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if ((dev != NULL && ni->ni_vap->iv_dev != dev) ||
+		    (ignore_blacklisted && (ni->ni_blacklist_timeout > 0))) {
+			continue;
+		}
+
+		if (ni->ni_scangen != gen) {
+			ni->ni_scangen = gen;
+			ieee80211_ref_node(ni);
+			IEEE80211_NODE_UNLOCK(nt);
+			(*f)(arg, ni);
+			ieee80211_free_node(ni);
+			goto restart;
+		}
+	}
+	IEEE80211_NODE_UNLOCK(nt);
+
+	IEEE80211_SCAN_UNLOCK_BH(nt);
+}
+EXPORT_SYMBOL(ieee80211_iterate_dev_nodes);
+
+void
+ieee80211_dump_node(struct ieee80211_node_table *nt, struct ieee80211_node *ni)
+{
+	int i;
+
+	printf("Node 0x%p: mac %s refcnt %d\n", ni,
+		ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni));
+	printf("    scangen %u authmode %u flags 0x%x\n",
+		ni->ni_scangen, ni->ni_authmode, ni->ni_flags);
+	printf("    associd 0x%x txpower %u vlan %u\n",
+		ni->ni_associd, ni->ni_txpower, ni->ni_vlan);
+	printf ("    rxfragstamp %lu\n", ni->ni_rxfragstamp);
+	for (i = 0; i < 17; i++) {
+		printf("    %d: txseq %u rxseq %u fragno %u\n", i,
+		       ni->ni_txseqs[i],
+		       ni->ni_rxseqs[i] >> IEEE80211_SEQ_SEQ_SHIFT,
+		       ni->ni_rxseqs[i] & IEEE80211_SEQ_FRAG_MASK);
+	}
+	printf("    rstamp %u rssi %u intval %u capinfo 0x%x\n",
+		ni->ni_rstamp, ni->ni_rssi, ni->ni_intval, ni->ni_capinfo);
+	printf("    bssid %s essid \"%.*s\" channel %u:0x%x\n",
+		ether_sprintf(ni->ni_bssid),
+		ni->ni_esslen, ni->ni_essid,
+		ni->ni_chan != IEEE80211_CHAN_ANYC ?
+			ni->ni_chan->ic_freq : IEEE80211_CHAN_ANY,
+		ni->ni_chan != IEEE80211_CHAN_ANYC ? ni->ni_chan->ic_flags : 0);
+	printf("    inact %u txrate %u tdls %d\n",
+		ni->ni_inact, ni->ni_txrate, ni->tdls_status);
+
+}
+
+void
+ieee80211_dump_nodes(struct ieee80211_node_table *nt)
+{
+	ieee80211_iterate_nodes(nt,
+		(ieee80211_iter_func *) ieee80211_dump_node, nt, 1);
+}
+EXPORT_SYMBOL(ieee80211_dump_nodes);
+
+#ifdef CONFIG_QVSP
+void
+ieee80211_node_vsp_send_action(void *arg, struct ieee80211_node *ni)
+{
+	if ((ni->ni_associd != 0) && ni->ni_qtn_assoc_ie) {
+		ni->ni_ic->ic_send_mgmt(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)arg);
+	}
+}
+EXPORT_SYMBOL(ieee80211_node_vsp_send_action);
+#endif
+
+/*
+ * Handle a station joining an 11g network.
+ */
+static void
+ieee80211_node_join_11g(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	IEEE80211_LOCK_ASSERT(ic);
+
+	KASSERT(IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan),
+	     ("not in 11g, bss %u:0x%x, curmode %u", ic->ic_bsschan->ic_freq,
+	      ic->ic_bsschan->ic_flags, ic->ic_curmode));
+
+	/*
+	 * If STA isn't capable of short slot time, bump
+	 * the count of long slot time stations.
+	 */
+	if ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME) == 0) {
+		ic->ic_longslotsta++;
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+			"station needs long slot time, count %d",
+			ic->ic_longslotsta);
+	}
+
+	/*
+	 * If the new station is not an ERP station
+	 * then bump the counter and enable protection
+	 * if configured.
+	 */
+	if (!ieee80211_iserp_rateset(ic, &ni->ni_rates)) {
+		ic->ic_nonerpsta++;
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+			"station is !ERP, %d non-ERP stations associated",
+			ic->ic_nonerpsta);
+		/*
+		 * If protection is configured, enable it.
+		 */
+		if (IEEE80211_BG_PROTECT_ENABLED(ic)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+				"%s: enable use of protection\n", __func__);
+			ic->ic_flags |= IEEE80211_F_USEPROT;
+		}
+		/*
+		 * If station does not support short preamble
+		 * then we must enable use of Barker preamble.
+		 */
+		if ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE) == 0) {
+			IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+				"%s", "station needs long preamble");
+			ic->ic_flags |= IEEE80211_F_USEBARKER;
+			ic->ic_flags &= ~IEEE80211_F_SHPREAMBLE;
+		}
+
+		/* Update ERP element if this is first non ERP station */
+		if (IEEE80211_BG_PROTECT_ENABLED(ic) && ic->ic_nonerpsta == 1) {
+			ic->ic_flags_ext |= IEEE80211_FEXT_ERPUPDATE;
+
+			/* tell Muc to use cts-to-self mechanism */
+			ic->ic_set_11g_erp(vap, 1);
+		}
+	} else {
+		ni->ni_flags |= IEEE80211_NODE_ERP;
+	}
+
+	/*
+	 * Disable use of short slot time in following condition.
+	 * Note that the actual switch over to long slot time use may not
+	 * occur until the next beacon transmission (per sec. 7.3.1.4 of 11g).
+	 */
+	if ((ic->ic_longslotsta > 0) || (ic->ic_nonerpsta > 0)) {
+		/* XXX vap's w/ conflicting needs won't work */
+		if (!IEEE80211_IS_CHAN_108G(ic->ic_bsschan)) {
+			/*
+			 * Don't force slot time when switched to turbo
+			 * mode as non-ERP stations won't be present; this
+			 * need only be done when on the normal G channel.
+			 */
+			ieee80211_set_shortslottime(ic, 0);
+		}
+	}
+}
+
+void
+ieee80211_node_ba_state_clear(struct ieee80211_node *ni)
+{
+	uint8_t i = 0;
+	for (i = 0; i < 7; i++) {
+		ieee80211_node_ba_del(ni, i, 0, IEEE80211_REASON_UNSPECIFIED);
+		ieee80211_node_ba_del(ni, i, 1, IEEE80211_REASON_UNSPECIFIED);
+	}
+}
+
+void
+ieee80211_node_implicit_ba_setup(struct ieee80211_node *ni)
+{
+	int i = 0;
+	u_int8_t our_ba = ni->ni_vap->iv_implicit_ba;
+	u_int8_t their_ba = ni->ni_implicit_ba;
+	u_int16_t size = ni->ni_implicit_ba_size;
+
+	for (i = 0; i < 7; i++) {
+		if ((our_ba >> i) & 0x1) {
+			/* Add the BA for TX */
+			ni->ni_ba_tx[i].state = IEEE80211_BA_ESTABLISHED;
+			ni->ni_ba_tx[i].type = 1;
+			ni->ni_ba_tx[i].buff_size = size;
+			ni->ni_ba_tx[i].timeout = 0;
+			ni->ni_ba_tx[i].frag = 0;
+			ni->ni_ba_tx[i].seq = 0;
+			printk(KERN_WARNING "%s: [%s] implicit TX BA for TID %d, win_size=%d\n",
+				ni->ni_vap->iv_dev->name, ether_sprintf(ni->ni_macaddr), i, (int)size);
+		}
+		if ((their_ba >> i) & 0x1) {
+			/* Add the BA for RX */
+			ni->ni_ba_rx[i].state = IEEE80211_BA_ESTABLISHED;
+			ni->ni_ba_rx[i].type = 1;
+			ni->ni_ba_rx[i].buff_size = size;
+			ni->ni_ba_rx[i].timeout = 0;
+			ni->ni_ba_rx[i].frag = 0;
+			ni->ni_ba_rx[i].seq = 0;
+			printk(KERN_WARNING "%s: [%s] implicit RX BA for TID %d, win_size=%d\n",
+				ni->ni_vap->iv_dev->name, ether_sprintf(ni->ni_macaddr), i, (int)size);
+		}
+	}
+}
+
+/*
+ * Decide wheter to turn off/on reception of control packets.
+ * If third party peers are present: Allow control frames
+ */
+void ieee80211_set_recv_ctrlpkts(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (ic->ic_nonqtn_sta) {
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_RX_CTRL_FILTER,
+					1, NULL, 0);
+	} else {
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_RX_CTRL_FILTER,
+					0, NULL, 0);
+	}
+}
+
+int ieee80211_aid_acquire(struct ieee80211com *ic, struct ieee80211_node *ni)
+{
+	static uint16_t last_used_aid = 0;
+	int aid;
+	int ret = 0;
+
+	IEEE80211_LOCK_IRQ(ic);
+
+	for (aid = (last_used_aid + 1) % QTN_NODE_TBL_SIZE_LHOST;
+			aid != last_used_aid;
+			aid = ((aid + 1) % QTN_NODE_TBL_SIZE_LHOST)) {
+
+		/* AID 0 is reserved for the BSS */
+		if (aid == 0) {
+			continue;
+		}
+
+		if ((ni->ni_vap->iv_opmode == IEEE80211_M_STA) &&
+			(aid == IEEE80211_AID(ni->ni_vap->iv_bss->ni_associd)))
+			continue;
+
+		if (!IEEE80211_AID_ISSET(ic, aid)) {
+			break;
+		}
+	}
+
+	if (aid == last_used_aid) {
+		ret = -1;
+	} else {
+		last_used_aid = aid;
+		ni->ni_associd = aid | 0xc000;
+		IEEE80211_AID_SET(ic, ni->ni_associd);
+	}
+
+	IEEE80211_UNLOCK_IRQ(ic);
+
+	return ret;
+}
+EXPORT_SYMBOL(ieee80211_aid_acquire);
+
+void
+ieee80211_node_join(struct ieee80211_node *ni, int resp)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap, *tmp_vap;
+	int newassoc;
+	u_int8_t beacon_update_required = 0;
+
+	if (ni->ni_associd == 0) {
+		if (ieee80211_aid_acquire(ic, ni)) {
+			IEEE80211_SEND_MGMT(ni, resp,
+				IEEE80211_REASON_ASSOC_TOOMANY);
+			ieee80211_node_leave(ni);
+			ieee80211_off_channel_resume(vap);
+			return;
+		}
+
+		IEEE80211_LOCK_IRQ(ic);
+		ieee80211_sta_assocs_inc(vap, __func__);
+		ieee80211_nonqtn_sta_join(vap, ni, __func__);
+
+		if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_ATHC_TURBOP))
+			ic->ic_dt_sta_assoc++;
+
+		if (IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan))
+			ieee80211_node_join_11g(ni);
+		IEEE80211_UNLOCK_IRQ(ic);
+
+		newassoc = 1;
+	} else {
+		newassoc = 0;
+	}
+
+	printk(KERN_WARNING "%s: %s %s, tot=%u/%u\n",
+		vap->iv_dev->name, ether_sprintf(ni->ni_macaddr),
+		ic->ic_ocac.ocac_running ? "connected" : "associated",
+		ic->ic_sta_assoc, ic->ic_nonqtn_sta);
+
+	if (ni->ni_authmode != IEEE80211_AUTH_8021X) {
+		ieee80211_node_authorize(ni);
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+		if (vap->bsa_status == BSA_STATUS_ACTIVE)
+			ieee80211_bsa_connect_complete_event_send(vap,ni);
+#endif
+	}
+
+	IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_DEBUG, ni,
+		"station %sassociated at aid %d: %s preamble, %s slot time"
+		"%s%s%s%s%s%s%s",
+		newassoc ? "" : "re",
+		IEEE80211_NODE_AID(ni),
+		(ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+		(ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE) ? "short" : "long",
+		ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long",
+		ic->ic_flags & IEEE80211_F_USEPROT ? ", protection" : "",
+		ni->ni_flags & IEEE80211_NODE_QOS ? ", QoS" : "",
+		IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_TURBOP) ?
+			", turbo" : "",
+		IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_COMP) ?
+			", compression" : "",
+		IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF) ?
+			", fast-frames" : "",
+		IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_XR) ? ", XR" : "",
+		IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_AR) ? ", AR" : ""
+	);
+
+	if(vap->iv_pmf) {
+		ni->ni_sa_query_timeout = 0;
+	} else {
+		/* vap is not PMF capable, reset any flags */
+		ni->ni_rsn.rsn_caps &= ~(RSN_CAP_MFP_REQ | RSN_CAP_MFP_CAP);
+	}
+	/* give driver a chance to set up state like ni_txrate */
+	if (ic->ic_newassoc != NULL)
+		ic->ic_newassoc(ni, newassoc);
+	ni->ni_inact_reload = vap->iv_inact_auth;
+	ni->ni_inact = ni->ni_inact_reload;
+
+	IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_SUCCESS);
+	/* tell the authenticator about new station */
+	if (vap->iv_auth->ia_node_join != NULL)
+		vap->iv_auth->ia_node_join(ni);
+	indicate_association();
+
+	ieee80211_notify_node_join(ni, newassoc);
+
+	/*For AP mode, record the strat time of association with STA*/
+	ni->ni_start_time_assoc = get_jiffies_64();
+
+	/* Update beacon if required */
+	if (newassoc) {
+		beacon_update_required = 1;
+
+		if ((ni->ni_flags & IEEE80211_NODE_HT) != IEEE80211_NODE_HT) {
+			if (ic->ic_non_ht_sta == 0 ||
+				(ic->ic_flags_ext & IEEE80211_FEXT_ERPUPDATE))
+				beacon_update_required = 1;
+
+			ic->ic_non_ht_sta++;
+		} else {
+			if ((ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) &&
+				!(ni->ni_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40)) {
+
+				if(ic->ic_ht_20mhz_only_sta == 0)
+					beacon_update_required = 1;
+
+				ic->ic_ht_20mhz_only_sta++;
+			}
+		}
+
+		if ((ic->ic_peer_rts_mode == IEEE80211_PEER_RTS_PMP) &&
+			((ic->ic_sta_assoc - ic->ic_nonqtn_sta) >= IEEE80211_MAX_STA_CCA_ENABLED)) {
+
+			ic->ic_peer_rts = 1;
+			TAILQ_FOREACH(tmp_vap, &ic->ic_vaps, iv_next) {
+				if (tmp_vap->iv_opmode != IEEE80211_M_HOSTAP)
+					continue;
+
+				if (tmp_vap->iv_state != IEEE80211_S_RUN)
+					continue;
+
+				if (tmp_vap == vap) {
+					beacon_update_required = 1;
+					continue;
+				}
+
+				ic->ic_beacon_update(tmp_vap);
+			}
+		}
+		/* Update beacon */
+		if(beacon_update_required) {
+			ic->ic_beacon_update(vap);
+		}
+
+                ic->ic_pm_reason = IEEE80211_PM_LEVEL_NODE_JOIN;
+
+		ieee80211_pm_queue_work(ic);
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+		{
+			struct net_bridge_port *br_port = get_br_port(vap->iv_dev);
+			if (br_fdb_update_const_hook && br_port) {
+				br_fdb_update_const_hook(br_port->br, br_port,
+					ni->ni_macaddr, 0, 0, IEEE80211_NODE_IDX_MAP(ni->ni_node_idx));
+			}
+		}
+#endif
+	}
+}
+
+/*
+ * Handle a station leaving an 11g network.
+ */
+static void
+ieee80211_node_leave_11g(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	IEEE80211_LOCK_ASSERT(ic);
+
+	KASSERT(IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan),
+		("not in 11g, bss %u:0x%x, curmode %u",
+		ic->ic_bsschan->ic_freq, ic->ic_bsschan->ic_flags,
+		ic->ic_curmode));
+
+	/*
+	 * If a long slot station do the slot time bookkeeping.
+	 */
+	if ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME) == 0) {
+		/* this can be 0 on mode changes from B -> G */
+		if (ic->ic_longslotsta > 0)
+			ic->ic_longslotsta--;
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+			"long slot time station leaves, count now %d",
+			ic->ic_longslotsta);
+	}
+	/*
+	 * If a non-ERP station do the protection-related bookkeeping.
+	 */
+	if ((ni->ni_flags & IEEE80211_NODE_ERP) == 0) {
+		/* this can be 0 on mode changes from B -> G */
+		if (ic->ic_nonerpsta > 0)
+			ic->ic_nonerpsta--;
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+			"non-ERP station leaves, count now %d", ic->ic_nonerpsta);
+		if (IEEE80211_BG_PROTECT_ENABLED(ic) && ic->ic_nonerpsta == 0) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+				"%s: disable use of protection\n", __func__);
+			ic->ic_flags &= ~IEEE80211_F_USEPROT;
+			/* XXX verify mode? */
+			if (ic->ic_caps & IEEE80211_C_SHPREAMBLE) {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+					"%s: re-enable use of short preamble\n",
+					__func__);
+				ic->ic_flags |= IEEE80211_F_SHPREAMBLE;
+				ic->ic_flags &= ~IEEE80211_F_USEBARKER;
+			}
+			ic->ic_set_11g_erp(vap, 0);
+			ic->ic_flags_ext |= IEEE80211_FEXT_ERPUPDATE;
+		}
+	}
+
+	if (!ic->ic_longslotsta && !ic->ic_nonerpsta) {
+		/*
+		 * Re-enable use of short slot time if supported
+		 * and not operating in IBSS mode (per spec).
+		 */
+		if ((ic->ic_caps & IEEE80211_C_SHSLOT) &&
+		    vap->iv_opmode != IEEE80211_M_IBSS) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+				"%s: re-enable use of short slot time\n",
+				__func__);
+			ieee80211_set_shortslottime(ic, 1);
+		}
+	}
+}
+
+static u_int8_t
+ieee80211_node_assoc_cleanup(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+	u_int8_t beacon_update_required = 0;
+	struct ieee80211vap *tmp_vap;
+
+	/*
+	 * Tell the authenticator the station is leaving.
+	 * Note that we must do this before yanking the
+	 * association id as the authenticator uses the
+	 * associd to locate its state block.
+	 */
+	if (vap->iv_auth->ia_node_leave != NULL) {
+		vap->iv_auth->ia_node_leave(ni);
+	}
+
+	ieee80211_notify_sta_stats(ni);
+
+	IEEE80211_LOCK_IRQ(ic);
+	if (IEEE80211_AID(ni->ni_associd) != 0) {
+		ieee80211_aid_remove(ni);
+		ieee80211_idx_remove(ni);
+		if (IEEE80211_NODE_IS_NONE_TDLS(ni)) {
+			ieee80211_sta_assocs_dec(vap, __func__);
+			if (ic->ic_sta_assoc == 0)
+				indicate_disassociation();
+		}
+		if (vap->iv_opmode == IEEE80211_M_WDS) {
+			vap->iv_vapnode_idx = 0;
+			ic->ic_wds_links--;
+		}
+	}
+
+	if ((vap->iv_opmode != IEEE80211_M_WDS) &&
+			IEEE80211_NODE_IS_NONE_TDLS(ni)) {
+		ieee80211_nonqtn_sta_leave(vap, ni, __func__);
+	}
+
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP) ||
+			(vap->iv_opmode == IEEE80211_M_IBSS)) {
+		beacon_update_required = 1;
+
+		if ((ni->ni_flags & IEEE80211_NODE_HT) != IEEE80211_NODE_HT) {
+			ic->ic_non_ht_sta--;
+			if (ic->ic_non_ht_sta == 0) {
+				beacon_update_required = 1;
+			}
+		} else {
+			if ((ic->ic_htcap.cap  & IEEE80211_HTCAP_C_CHWIDTH40) &&
+				!(ni->ni_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40)) {
+				ic->ic_ht_20mhz_only_sta--;
+
+				if (ic->ic_ht_20mhz_only_sta == 0) {
+					beacon_update_required = 1;
+				}
+			}
+		}
+	}
+
+	if ((ic->ic_peer_rts_mode == IEEE80211_PEER_RTS_PMP) &&
+		((ic->ic_sta_assoc - ic->ic_nonqtn_sta) < IEEE80211_MAX_STA_CCA_ENABLED)) {
+
+		ic->ic_peer_rts = 0;
+		TAILQ_FOREACH(tmp_vap, &ic->ic_vaps, iv_next) {
+			if (tmp_vap->iv_opmode != IEEE80211_M_HOSTAP)
+				continue;
+
+			if (tmp_vap->iv_state != IEEE80211_S_RUN)
+				continue;
+
+			if (tmp_vap == vap) {
+				beacon_update_required = 1;
+				continue;
+			}
+
+			ic->ic_beacon_update(tmp_vap);
+		}
+	}
+	if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_ATHC_TURBOP))
+		ic->ic_dt_sta_assoc--;
+
+	if (IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan)) {
+		ieee80211_node_leave_11g(ni);
+	}
+
+	/* Non-ERP station leaving the network is updated in
+	   ieee80211_node_leave_11g, So need to do beacon update here */
+	if (ic->ic_flags_ext & IEEE80211_FEXT_ERPUPDATE) {
+		beacon_update_required = 1;
+	}
+
+
+	IEEE80211_UNLOCK_IRQ(ic);
+	/*
+	 * Cleanup station state.  In particular clear various
+	 * state that might otherwise be reused if the node
+	 * is reused before the reference count goes to zero
+	 * (and memory is reclaimed).
+	 */
+	ieee80211_sta_leave(ni);
+
+	return beacon_update_required;
+}
+
+/*
+ * Remove the node from node table on Blacklist timeout
+ */
+void ieee80211_remove_node_blacklist_timeout(struct ieee80211_node *ni)
+{
+	struct ieee80211_node_table *nt = ni->ni_table;
+
+	if (nt != NULL) {
+		ieee80211_remove_wds_addr(nt,ni->ni_macaddr);
+		IEEE80211_NODE_LOCK_IRQ(nt);
+		ieee80211_node_reclaim(nt, ni);
+		IEEE80211_NODE_UNLOCK_IRQ(nt);
+	} else {
+		ieee80211_free_node(ni);
+	}
+}
+EXPORT_SYMBOL(ieee80211_remove_node_blacklist_timeout);
+
+/*
+ * Handle bookkeeping for a station/neighbor leaving
+ * the bss when operating in ap, wds or adhoc modes.
+ */
+void
+ieee80211_node_leave(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+	u_int8_t beacon_update_required = 0;
+
+	/* remove pending entry in queue */
+	ieee80211_ppqueue_remove_node_leave(&vap->iv_ppqueue, ni);
+	if ((ic->ic_measure_info.ni == ni) && (ic->ic_measure_info.status == MEAS_STATUS_RUNNING)) {
+		 ic->ic_measure_info.status = MEAS_STATUS_DISCRAD;
+	}
+
+	/* Tell the qdrv */
+	ic->ic_node_auth_state_change(ni, 0);
+
+	ieee80211_scs_clean_stats(ic, IEEE80211_SCS_STATE_MEASUREMENT_CHANGE_CLEAN, 0);
+
+	IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_DEBUG, ni,
+		"station with aid %d leaves (refcnt %u)",
+		IEEE80211_NODE_AID(ni), ieee80211_node_refcnt(ni));
+
+	if (ni->ni_associd != 0 && ((vap->iv_opmode != IEEE80211_M_STA) ||
+			!IEEE80211_NODE_IS_NONE_TDLS(ni))) {
+		beacon_update_required = ieee80211_node_assoc_cleanup(ni);
+	}
+
+	printk(KERN_WARNING "%s: %s %s, tot=%u/%u\n",
+		vap->iv_dev->name, ether_sprintf(ni->ni_macaddr),
+		ic->ic_ocac.ocac_running ? "disconnected" : "disassociated",
+		ic->ic_sta_assoc, ic->ic_nonqtn_sta);
+
+	if (ni == vap->iv_mgmt_retry_ni) {
+		vap->iv_mgmt_retry_ni = NULL;
+		vap->iv_mgmt_retry_cnt = 0;
+	}
+
+	/*
+	 * Remove the node from any table it's recorded in and
+	 * drop the caller's reference.  Removal from the table
+	 * is important to ensure the node is not reprocessed
+	 * for inactivity.
+	 */
+	if (ni->ni_blacklist_timeout == 0) {
+		ieee80211_remove_node_blacklist_timeout(ni);
+	}
+
+	/*
+	 * Update beacon
+	 * Anyway, check mode before beacon update is good
+	 */
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) &&
+		beacon_update_required &&
+		(vap->iv_state == IEEE80211_S_RUN)) {
+		ic->ic_beacon_update(vap);
+	}
+
+        ic->ic_pm_reason = IEEE80211_PM_LEVEL_NODE_LEFT;
+
+	ieee80211_pm_queue_work(ic);
+}
+EXPORT_SYMBOL(ieee80211_node_leave);
+
+u_int8_t
+ieee80211_getrssi(struct ieee80211com *ic)
+{
+#define	NZ(x)	((x) == 0 ? 1 : (x))
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211vap *vap;
+	u_int32_t rssi_samples, rssi_total;
+	struct ieee80211_node *ni;
+
+	rssi_total = 0;
+	rssi_samples = 0;
+	switch (ic->ic_opmode) {
+	case IEEE80211_M_IBSS:		/* average of all ibss neighbors */
+		/* XXX locking */
+		TAILQ_FOREACH(ni, &nt->nt_node, ni_list)
+			if (ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) {
+				rssi_samples++;
+				rssi_total += ic->ic_node_getrssi(ni);
+			}
+		break;
+	case IEEE80211_M_AHDEMO:	/* average of all neighbors */
+		/* XXX locking */
+		TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+			if (memcmp(ni->ni_vap->iv_myaddr, ni->ni_macaddr,
+						IEEE80211_ADDR_LEN)!=0) {
+				rssi_samples++;
+				rssi_total += ic->ic_node_getrssi(ni);
+			}
+		}
+		break;
+	case IEEE80211_M_HOSTAP:	/* average of all associated stations */
+		/* XXX locking */
+		TAILQ_FOREACH(ni, &nt->nt_node, ni_list)
+			if (IEEE80211_AID(ni->ni_associd) != 0) {
+				rssi_samples++;
+				rssi_total += ic->ic_node_getrssi(ni);
+			}
+		break;
+	case IEEE80211_M_MONITOR:	/* XXX */
+	case IEEE80211_M_STA:		/* use stats from associated ap */
+	default:
+		TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+			if (vap->iv_bss != NULL) {
+				rssi_samples++;
+				rssi_total += ic->ic_node_getrssi(vap->iv_bss);
+			}
+		break;
+	}
+	return rssi_total / NZ(rssi_samples);
+#undef NZ
+}
+EXPORT_SYMBOL(ieee80211_getrssi);
+
+void ieee80211_node_tx_ba_set_state(struct ieee80211_node *ni, uint8_t tid, enum ieee80211_ba_state state, unsigned delay)
+{
+	struct ieee80211_ba_tid *ba = &ni->ni_ba_tx[tid];
+
+	write_seqlock_bh(&ba->state_lock);
+
+	ba->state = state;
+	ba->state_deadline = 0;
+	if (delay) {
+		ba->state_deadline = jiffies + delay;
+		if (ba->state_deadline == 0) {
+			++ba->state_deadline;
+		}
+	}
+
+	write_sequnlock_bh(&ba->state_lock);
+}
+EXPORT_SYMBOL(ieee80211_node_tx_ba_set_state);
+
+int ieee80211_node_is_intel(struct ieee80211_node *ni)
+{
+	int i;
+	uint16_t peer_cap;
+	struct ieee80211_ie_vhtcap *vhtcap;
+
+	if (ni->ni_qtn_flags & QTN_IS_INTEL_NODE) {
+		return (1);
+	}
+
+	/* check the established TID TX BA timeout, if any one is none zero, it is Intel */
+	for (i = 0; i < WME_NUM_TID; i++) {
+		if ((ni->ni_ba_tx[i].state == IEEE80211_BA_ESTABLISHED) &&
+		    (ni->ni_ba_tx[i].timeout != 0)) {
+			ni->ni_qtn_flags |= QTN_IS_INTEL_NODE;
+			return (1);
+		}
+	}
+
+	/* Try to identify problem peers - may cause other peer stations to be blacklisted */
+	peer_cap = IEEE80211_HTCAP_CAPABILITIES(&ni->ni_ie_htcap);
+	if ((ni->ni_qtn_assoc_ie == NULL) && !(ni->ni_qtn_flags & QTN_IS_BCM_NODE) &&
+			!(IEEE80211_NODE_IS_VHT(ni)) &&
+			(ni->ni_htcap.mpduspacing == 5) &&
+			!(peer_cap & IEEE80211_HTCAP_C_TXSTBC) &&
+			(peer_cap & IEEE80211_HTCAP_C_SHORTGI20) &&
+			((ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM1] & 0x1) == 0x01)) {
+		ni->ni_qtn_flags |= QTN_IS_INTEL_NODE;
+		return (1);
+	}
+
+	vhtcap = &ni->ni_ie_vhtcap;
+	if ((ni->ni_qtn_assoc_ie == NULL) && (IEEE80211_NODE_IS_VHT(ni)) &&
+	    (ni->ni_vendor != PEER_VENDOR_BRCM) && (ni->ni_vendor != PEER_VENDOR_ATH) &&
+	    (ni->ni_vendor != PEER_VENDOR_RLNK) && !(IEEE80211_VHTCAP_GET_RXLDPC(vhtcap)) &&
+	    !(IEEE80211_VHTCAP_GET_TXSTBC(vhtcap)) &&
+	    !(IEEE80211_VHTCAP_GET_HTC_VHT(vhtcap)) &&
+	    !(IEEE80211_VHTCAP_GET_RXANTPAT(vhtcap))) {
+
+		ni->ni_qtn_flags |= QTN_IS_INTEL_NODE;
+		return (1);
+	}
+
+	return (0);
+}
+EXPORT_SYMBOL(ieee80211_node_is_intel);
+
+int ieee80211_node_is_realtek(struct ieee80211_node *ni)
+{
+	int ret = 0;
+
+	if ((ni->ni_vendor == PEER_VENDOR_RTK) ||
+			(ni->ni_qtn_flags & QTN_IS_REALTEK_NODE)) {
+		ret = 1;
+	} else if (ni->ni_vendor == PEER_VENDOR_NONE) {
+		/*
+		 * It is likely that the association request frame from Realtek station
+		 * doesn't have vendor IE, so we have to detect Realtek station by its
+		 * some properties.
+		 * The below judgement logic is for Edimax AC-1200, which is Realtek
+		 * 2*2 11ac chip.
+		 */
+		struct ieee80211_ie_vhtcap *vhtcap = &ni->ni_ie_vhtcap;
+		uint32_t ni_oui = (ni->ni_macaddr[2] << 16) | (ni->ni_macaddr[1] << 8) | ni->ni_macaddr[0];
+
+		/* Edimax AC-1200 detection */
+		if ((ni_oui == EDIMAX_OUI) &&
+				IEEE80211_NODE_IS_VHT(ni) &&
+				(ni->ni_htcap.mpduspacing == 0) &&
+				(ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS2]) &&
+				(ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] == 0) &&
+				!IEEE80211_VHTCAP_GET_CHANWIDTH(vhtcap) &&
+				IEEE80211_VHTCAP_GET_SGI_80MHZ(vhtcap) &&
+				IEEE80211_VHTCAP_GET_HTC_VHT(vhtcap) &&
+				!IEEE80211_VHTCAP_GET_RXANTPAT(vhtcap) &&
+				!IEEE80211_VHTCAP_GET_TXANTPAT(vhtcap)) {
+			ret = 1;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ieee80211_node_is_realtek);
+
+int ieee80211_node_is_opti_node(struct ieee80211_node *ni)
+{
+	struct ieee80211_ie_vhtcap *vhtcap = &ni->ni_ie_vhtcap;
+	int ret = 0;
+
+	if ((ni->ni_qtn_assoc_ie == NULL) && (IEEE80211_NODE_IS_VHT(ni)) &&
+			(ni->ni_vendor != PEER_VENDOR_BRCM) && (ni->ni_vendor != PEER_VENDOR_ATH) &&
+			(ni->ni_vendor != PEER_VENDOR_RLNK) &&
+			!(ni->ni_qtn_flags & QTN_IS_REALTEK_NODE) &&
+			!(IEEE80211_VHTCAP_GET_RXLDPC(vhtcap)) &&
+			!(IEEE80211_VHTCAP_GET_RXSTBC(vhtcap)) &&
+			!(IEEE80211_VHTCAP_GET_SU_BEAMFORMER(vhtcap)) &&
+			!(IEEE80211_VHTCAP_GET_SU_BEAMFORMEE(vhtcap)))
+		ret = 1;
+
+	if ((ni->ni_qtn_assoc_ie == NULL) && (IEEE80211_NODE_IS_HT(ni)) &&
+			!IEEE80211_NODE_IS_VHT(ni) &&
+			!(ni->ni_vendor & PEER_VENDOR_MASK) &&
+			!(ni->ni_qtn_flags & QTN_IS_REALTEK_NODE) &&
+			!(ni->ni_ext_flags & IEEE80211_NODE_OP_MODE_NOTI) &&
+			(ni->ni_raw_bintval == 0xa))
+		ret = 1;
+
+	return ret;
+}
+EXPORT_SYMBOL(ieee80211_node_is_opti_node);
+
+struct ieee80211_node *ieee80211_find_node_by_aid(struct ieee80211com *ic, uint8_t aid)
+{
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211_node *ni;
+	int found = 0;
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if ((ni->ni_associd & 0x3FFF) == aid) {
+			found = 1;
+			break;
+		}
+	}
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+
+	if (found) {
+		ieee80211_ref_node(ni);
+		return (ni);
+	}
+	return (NULL);
+}
+EXPORT_SYMBOL(ieee80211_find_node_by_aid);
diff --git a/drivers/qtn/wlan/ieee80211_output.c b/drivers/qtn/wlan/ieee80211_output.c
new file mode 100644
index 0000000..3f83e91
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_output.c
@@ -0,0 +1,6857 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_output.c 2606 2007-07-25 15:14:52Z mrenzmann $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 output handling.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <linux/if_arp.h>		/* for ARP proxy */
+#include <linux/ip.h>			/* XXX for TOS */
+#include <net/iw_handler.h>		/* wireless_send_event(..) */
+
+#include "qtn/qtn_global.h"
+#include "qtn/shared_params.h"
+#include "qtn/hardware_revision.h"
+
+#include "net80211/if_llc.h"
+#include "net80211/if_ethersubr.h"
+#include "net80211/if_media.h"
+#include "net80211/ieee80211.h"
+
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_dot11_msg.h"
+#include "net80211/ieee80211_monitor.h"
+#include "net80211/ieee80211_tdls.h"
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+#include "net80211/ieee80211_bsa.h"
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+#include "asm-generic/unaligned.h"
+#endif
+
+#include "qtn_logging.h"
+
+#ifdef IEEE80211_DEBUG
+/*
+ * Decide if an outbound management frame should be
+ * printed when debugging is enabled.  This filters some
+ * of the less interesting frames that come frequently
+ * (e.g. beacons).
+ */
+static __inline int
+doprint(struct ieee80211vap *vap, int subtype)
+{
+	if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
+		return (vap->iv_opmode == IEEE80211_M_IBSS);
+	return 1;
+}
+#endif
+
+#define	senderr(_x, _v)	do { vap->iv_stats._v++; ret = (_x); goto bad; } while (0)
+
+/*
+ * Add the Quantenna OUI to a frame
+ */
+uint8_t
+ieee80211_oui_add_qtn(uint8_t *oui)
+{
+	oui[0] = QTN_OUI & 0xff;
+	oui[1] = (QTN_OUI >> 8) & 0xff;
+	oui[2] = (QTN_OUI >> 16) & 0xff;
+
+	return IEEE80211_OUI_LEN;
+}
+EXPORT_SYMBOL(ieee80211_oui_add_qtn);
+
+void ieee80211_parent_queue_xmit(struct sk_buff *skb)
+{
+	struct ieee80211vap *vap = netdev_priv(skb->dev);
+
+	skb->dev = vap->iv_dev;
+
+	dev_queue_xmit(skb);
+}
+
+/*
+ * Initialise an 802.11 header.
+ * This should be called early on in constructing a frame as it sets i_fc[1]. Other bits can then be
+ * OR'd in.
+ */
+static void
+ieee80211_send_setup(struct ieee80211vap *vap, struct ieee80211_node *ni,
+			struct ieee80211_frame *wh,
+			const uint8_t type, const uint8_t subtype,
+			const uint8_t *sa, const uint8_t *da, const uint8_t *bssid)
+{
+#define	WH4(wh)	((struct ieee80211_frame_addr4 *)wh)
+
+	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | type | subtype;
+	if (type == IEEE80211_FC0_TYPE_DATA) {
+		switch (vap->iv_opmode) {
+		case IEEE80211_M_STA:
+			wh->i_fc[1] = IEEE80211_FC1_DIR_TODS;
+			IEEE80211_ADDR_COPY(wh->i_addr1, bssid);
+			IEEE80211_ADDR_COPY(wh->i_addr2, sa);
+			IEEE80211_ADDR_COPY(wh->i_addr3, da);
+			break;
+		case IEEE80211_M_IBSS:
+		case IEEE80211_M_AHDEMO:
+			wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+			IEEE80211_ADDR_COPY(wh->i_addr1, da);
+			IEEE80211_ADDR_COPY(wh->i_addr2, sa);
+			IEEE80211_ADDR_COPY(wh->i_addr3, bssid);
+			break;
+		case IEEE80211_M_HOSTAP:
+			wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
+			IEEE80211_ADDR_COPY(wh->i_addr1, da);
+			IEEE80211_ADDR_COPY(wh->i_addr2, bssid);
+			IEEE80211_ADDR_COPY(wh->i_addr3, sa);
+			break;
+		case IEEE80211_M_WDS:
+			wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS;
+			IEEE80211_ADDR_COPY(wh->i_addr1, bssid); /* bssid holds RA */
+			IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
+			IEEE80211_ADDR_COPY(wh->i_addr3, da);
+			IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, sa);
+			break;
+		case IEEE80211_M_MONITOR:
+			break;
+		}
+	} else {
+		wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+		IEEE80211_ADDR_COPY(wh->i_addr1, da);
+		IEEE80211_ADDR_COPY(wh->i_addr2, sa);
+		IEEE80211_ADDR_COPY(wh->i_addr3, bssid);
+	}
+	wh->i_dur[0] = 0;
+	wh->i_dur[1] = 0;
+
+	if (!(subtype & IEEE80211_FC0_SUBTYPE_QOS)) {
+		*(__le16 *)&wh->i_seq[0] = htole16(ni->ni_txseqs[0] << IEEE80211_SEQ_SEQ_SHIFT);
+		ni->ni_txseqs[0]++;
+	}
+#undef WH4
+}
+
+/*
+ * Send an EAPOL frame to the specified node.
+ * Use the MGMT frame path to ensure that EAPOL frames are high priority.
+ */
+void
+ieee80211_eap_output(struct net_device *dev, const void *const eap_msg, const int eap_msg_len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+	struct ieee80211_frame *wh;
+	struct ieee80211_qosframe *qwh;
+	const struct ether_header *const eh = eap_msg;
+	uint8_t *frm;
+	struct sk_buff *skb;
+	struct llc *llc;
+	int headerlen;
+	uint8_t subtype;
+
+	if (eap_msg_len <= sizeof(*eh)) {
+		return;
+	}
+
+	if (vap->iv_opmode == IEEE80211_M_WDS) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DOT1X,
+			"[%pM] eap send failed - WDS not supported\n",
+			eh->ether_dhost);
+		return;
+	}
+
+	ni = ieee80211_find_node(&ic->ic_sta, eh->ether_dhost);
+	if (!ni) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DOT1X,
+			"[%pM] eap send failed - node %s not found\n",
+			eh->ether_dhost);
+		return;
+	}
+
+	if (ni->ni_flags & IEEE80211_NODE_QOS) {
+		headerlen = sizeof(*qwh);
+		subtype = IEEE80211_FC0_SUBTYPE_QOS;
+	} else {
+		headerlen = sizeof(*wh);
+		subtype = IEEE80211_FC0_SUBTYPE_DATA;
+	}
+
+	skb = ieee80211_getdataframe(vap, &frm, 1, eap_msg_len + LLC_SNAPFRAMELEN);
+	if (!skb) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DOT1X,
+			"[%s] eap send failed - sbk alloc\n",
+			ni->ni_macaddr);
+		ieee80211_free_node(ni);
+		return;
+	}
+
+	memcpy(frm, eap_msg, eap_msg_len);
+
+	/* Replace the ethernet header with SNAP and 802.11 headers */
+	skb_pull(skb, sizeof(*eh));
+
+	llc = (struct llc *) skb_push(skb, LLC_SNAPFRAMELEN);
+	llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
+	llc->llc_control = LLC_UI;
+	llc->llc_snap.org_code[0] = 0;
+	llc->llc_snap.org_code[1] = 0;
+	llc->llc_snap.org_code[2] = 0;
+	llc->llc_snap.ether_type = htons(ETH_P_PAE);
+
+	wh = (struct ieee80211_frame *) skb_push(skb, headerlen);
+
+	ieee80211_send_setup(vap, ni, wh,
+		IEEE80211_FC0_TYPE_DATA, subtype,
+		vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid);
+
+	skb_trim(skb, eap_msg_len - sizeof(*eh) + headerlen + LLC_SNAPFRAMELEN);
+
+	if (ni->ni_flags & IEEE80211_NODE_QOS) {
+		qwh = (struct ieee80211_qosframe *) wh;
+		qwh->i_qos[0] = QTN_TID_WLAN;
+		qwh->i_qos[1] = 0;
+	}
+
+	if (IEEE80211_VAP_IS_SLEEPING(ni->ni_vap))
+		wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DOT1X,
+		"[%pM] send eapol frame on channel %u\n",
+		ni->ni_macaddr, ieee80211_chan2ieee(ic, ic->ic_curchan));
+
+	IEEE80211_NODE_STAT(ni, tx_data);
+
+	ieee80211_off_channel_suspend(vap, IEEE80211_OFFCHAN_TIMEOUT_EAPOL);
+
+	ic->ic_send_80211(ic, ni, skb, WME_AC_VO, 0);
+}
+EXPORT_SYMBOL(ieee80211_eap_output);
+
+/*
+ * Send a management frame to the specified node.  The node pointer
+ * must have a reference as the pointer will be passed to the driver
+ * and potentially held for a long time.  If the frame is successfully
+ * dispatched to the driver, then it is responsible for freeing the
+ * reference (and potentially freeing up any associated storage).
+ */
+void
+ieee80211_mgmt_output(struct ieee80211_node *ni, struct sk_buff *skb, int subtype,
+			const u_int8_t da[IEEE80211_ADDR_LEN])
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_frame *wh;
+
+	KASSERT(ni != NULL, ("null node"));
+
+	wh = (struct ieee80211_frame *) skb_push(skb, sizeof(struct ieee80211_frame));
+	ieee80211_send_setup(vap, ni, wh, IEEE80211_FC0_TYPE_MGT, subtype,
+		vap->iv_myaddr, da, ni->ni_bssid);
+
+	/* FIXME power management */
+
+	if (M_FLAG_ISSET(skb, M_LINK0) && ni->ni_challenge != NULL) {
+		M_FLAG_CLR(skb, M_LINK0);
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr1,
+			"encrypting frame (%s)", __func__);
+		wh->i_fc[1] |= IEEE80211_FC1_PROT;
+	}
+
+	if (IEEE80211_VAP_IS_SLEEPING(ni->ni_vap))
+		wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
+
+#ifdef IEEE80211_DEBUG
+	if ((ieee80211_msg_debug(vap) && doprint(vap, subtype)) ||
+	    ieee80211_msg_dumppkts(vap)) {
+		printf("[%pM] send %s on channel %u\n",
+			wh->i_addr1,
+			ieee80211_mgt_subtype_name[
+				(subtype & IEEE80211_FC0_SUBTYPE_MASK) >>
+					IEEE80211_FC0_SUBTYPE_SHIFT],
+			ieee80211_chan2ieee(ic, ic->ic_curchan));
+	}
+#endif
+	IEEE80211_NODE_STAT(ni, tx_mgmt);
+
+	ic->ic_send_80211(ic, ni, skb, WME_AC_VO, 1);
+}
+EXPORT_SYMBOL(ieee80211_mgmt_output);
+
+void
+ieee80211_tdls_mgmt_output(struct ieee80211_node *ni,
+	struct sk_buff *skb, const uint8_t type,
+	const uint8_t subtype, const uint8_t *da,
+	const uint8_t *bssid)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_frame *wh;
+
+	KASSERT(ni != NULL, ("null node"));
+
+	wh = (struct ieee80211_frame *)
+		skb_push(skb, sizeof(struct ieee80211_frame));
+	ieee80211_send_setup(vap, ni, wh,
+		type, subtype, vap->iv_myaddr, da, bssid);
+
+	if (M_FLAG_ISSET(skb, M_LINK0) && ni->ni_challenge != NULL) {
+		M_FLAG_CLR(skb, M_LINK0);
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr1,
+			"encrypting frame (%s)", __func__);
+		wh->i_fc[1] |= IEEE80211_FC1_PROT;
+	}
+
+	/* XXX power management */
+	if (IEEE80211_VAP_IS_SLEEPING(ni->ni_vap))
+		wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
+
+#ifdef IEEE80211_DEBUG
+	if ((ieee80211_msg_debug(vap) && doprint(vap, subtype)) ||
+	    ieee80211_msg_dumppkts(vap)) {
+		printf("[%s] send %s on channel %u\n",
+			ether_sprintf(wh->i_addr1),
+			ieee80211_mgt_subtype_name[
+				(subtype & IEEE80211_FC0_SUBTYPE_MASK) >>
+					IEEE80211_FC0_SUBTYPE_SHIFT],
+			ieee80211_chan2ieee(ic, ic->ic_curchan));
+	}
+#endif
+	IEEE80211_NODE_STAT(ni, tx_mgmt);
+
+	ic->ic_send_80211(ic, ni, skb, WME_AC_VO, 1);
+}
+EXPORT_SYMBOL(ieee80211_tdls_mgmt_output);
+
+/*
+ * Send a null data frame to the specified node.
+ *
+ * NB: the caller is assumed to have setup a node reference
+ *     for use; this is necessary to deal with a race condition
+ *     when probing for inactive stations.
+ */
+int
+ieee80211_send_nulldata(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct sk_buff *skb;
+	struct ieee80211_frame *wh;
+	uint8_t *frm;
+
+	skb = ieee80211_getdataframe(vap, &frm, 0, 0);
+	if (skb == NULL) {
+		ieee80211_free_node(ni);
+		return -ENOMEM;
+	}
+
+	wh = (struct ieee80211_frame *) skb_push(skb, sizeof(struct ieee80211_frame));
+	ieee80211_send_setup(vap, ni, wh,
+		IEEE80211_FC0_TYPE_DATA,
+		IEEE80211_FC0_SUBTYPE_NODATA,
+		vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid);
+
+	/* NB: power management bit is never sent by an AP */
+	if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) &&
+			vap->iv_opmode != IEEE80211_M_HOSTAP &&
+			vap->iv_opmode != IEEE80211_M_WDS)
+		wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
+
+	IEEE80211_NODE_STAT(ni, tx_data);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS,
+		"[%s] send null data frame on channel %u, pwr mgt %s\n",
+		ether_sprintf(ni->ni_macaddr),
+		ieee80211_chan2ieee(ic, ic->ic_curchan),
+		wh->i_fc[1] & IEEE80211_FC1_PWR_MGT ? "ena" : "dis");
+
+	ic->ic_send_80211(ic, ni, skb, WME_AC_VO, 0);
+
+	return 0;
+}
+
+/*
+ * Send some tuning data packets for low level to do
+ * power adjustment.
+ */
+int
+ieee80211_send_tuning_data(struct ieee80211_node *ni)
+{
+	struct ieee80211com	*ic = ni->ni_ic;
+	struct ieee80211vap	*vap = ni->ni_vap;
+	struct sk_buff		*skb;
+	struct ieee80211_frame	*wh;
+	uint8_t			*frm;
+	const uint8_t		da[IEEE80211_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+	skb = ieee80211_getdataframe(vap, &frm, 0, 0);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	/* Fill up the frame header */
+	wh = (struct ieee80211_frame *)skb_push(skb, sizeof(struct ieee80211_frame));
+
+	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_NODATA;
+	wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
+
+	IEEE80211_ADDR_COPY(wh->i_addr1, da);
+	IEEE80211_ADDR_COPY(wh->i_addr2, ni->ni_bssid);
+	IEEE80211_ADDR_COPY(wh->i_addr3, ni->ni_macaddr);
+
+	wh->i_dur[0] = 0;
+	wh->i_dur[1] = 0;
+
+	ieee80211_ref_node(ni);
+
+	/* No need to send these at high priority */
+	(void)ic->ic_send_80211(ic, ni, skb, WME_AC_BE, 0);
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_send_nulldata);
+
+/*
+ * Get null data on a particular AC to a node.
+ *
+ * The caller is assumed to have taken a node reference.
+ */
+struct sk_buff *
+ieee80211_get_nulldata(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct sk_buff *skb;
+	struct ieee80211_frame *wh;
+	uint8_t *frm;
+
+	skb = ieee80211_getdataframe(vap, &frm, 0, 0);
+	if (skb == NULL)
+		return NULL;
+
+	wh = (struct ieee80211_frame *)skb_push(skb, sizeof(struct ieee80211_frame));
+
+	ieee80211_send_setup(vap, ni, (struct ieee80211_frame *)wh,
+		IEEE80211_FC0_TYPE_DATA,
+		IEEE80211_FC0_SUBTYPE_NODATA,
+		vap->iv_myaddr,
+		ni->ni_macaddr,
+		ni->ni_bssid);
+
+	if (IEEE80211_VAP_IS_SLEEPING(ni->ni_vap))
+		wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
+
+	IEEE80211_NODE_STAT(ni, tx_data);
+
+	return skb;
+}
+EXPORT_SYMBOL(ieee80211_get_nulldata);
+
+/*
+ * Get QoS null data on a particular AC to a node.
+ *
+ * The caller is assumed to have taken a node reference.
+ */
+struct sk_buff *
+ieee80211_get_qosnulldata(struct ieee80211_node *ni, int ac)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct sk_buff *skb;
+	struct ieee80211_qosframe *qwh;
+	uint8_t *frm;
+	int tid;
+
+	skb = ieee80211_getdataframe(vap, &frm, 1, 0);
+	if (skb == NULL)
+		return NULL;
+
+	skb->priority = ac;
+	qwh = (struct ieee80211_qosframe *)skb_push(skb, sizeof(struct ieee80211_qosframe));
+
+	ieee80211_send_setup(vap, ni, (struct ieee80211_frame *)qwh,
+		IEEE80211_FC0_TYPE_DATA,
+		IEEE80211_FC0_SUBTYPE_QOS_NULL,
+		vap->iv_myaddr,
+		ni->ni_macaddr,
+		ni->ni_bssid);
+
+	if (IEEE80211_VAP_IS_SLEEPING(ni->ni_vap))
+		qwh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
+
+	tid = QTN_TID_WLAN;
+	qwh->i_qos[0] = tid & IEEE80211_QOS_TID;
+	if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[ac].wmm_noackPolicy)
+		qwh->i_qos[0] |= (1 << IEEE80211_QOS_ACKPOLICY_S) & IEEE80211_QOS_ACKPOLICY;
+	qwh->i_qos[1] = 0;
+
+	IEEE80211_NODE_STAT(ni, tx_data);
+
+	if (WME_UAPSD_AC_CAN_TRIGGER(skb->priority, ni)) {
+		/* U-APSD power save queue */
+		/* XXXAPSD: assuming triggerable means deliverable */
+		M_FLAG_SET(skb, M_UAPSD);
+	}
+
+	return skb;
+}
+EXPORT_SYMBOL(ieee80211_get_qosnulldata);
+
+/*
+ * Send QoS null data on a particular AC to a node.
+ *
+ * The caller is assumed to have taken a node reference.
+ */
+int
+ieee80211_send_qosnulldata(struct ieee80211_node *ni, int ac)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct sk_buff *skb;
+
+	skb = ieee80211_get_qosnulldata(ni, ac);
+	if (skb == NULL) {
+		ieee80211_free_node(ni);
+		return -ENOMEM;
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS,
+			"[%s] send qos null data frame on channel %u\n",
+			ether_sprintf(ni->ni_macaddr),
+			ieee80211_chan2ieee(ic, ic->ic_curchan));
+	ic->ic_send_80211(ic, ni, skb, ac, 0);
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_send_qosnulldata);
+
+int
+ieee80211_send_qosnulldata_ext(struct ieee80211com *ic, uint8_t *mac_addr, int pwr_mgt)
+{
+#define	WH4(wh)	((struct ieee80211_frame_addr4 *)wh)
+	struct ieee80211vap *vap;
+	struct ieee80211_node *ni;
+	struct sk_buff *skb;
+	uint8_t *frm;
+	struct ieee80211_qosframe *qwh;
+	int ac;
+	int tid;
+
+	ni = ieee80211_find_node(&ic->ic_sta, mac_addr);
+	if (!ni)
+	      return -EINVAL;
+
+	ac = WMM_AC_BK;
+	vap = ni->ni_vap;
+	skb = ieee80211_getdataframe(vap, &frm, 1, 0);
+	if (skb == NULL) {
+		ieee80211_free_node(ni);
+		return -ENOMEM;
+	}
+
+	skb->priority = ac;
+	qwh = (struct ieee80211_qosframe *)skb_push(skb, sizeof(struct ieee80211_qosframe_addr4));
+
+	ieee80211_send_setup(vap, ni, (struct ieee80211_frame *)qwh,
+		IEEE80211_FC0_TYPE_MGT,
+		IEEE80211_FC0_SUBTYPE_QOS_NULL,
+		vap->iv_myaddr,
+		ni->ni_macaddr,
+		ni->ni_bssid);
+	qwh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS_NULL;
+
+	if (pwr_mgt)
+		qwh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
+
+	tid = QTN_TID_WLAN;
+	qwh->i_qos[0] = tid & IEEE80211_QOS_TID;
+	if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[ac].wmm_noackPolicy)
+		qwh->i_qos[0] |= (1 << IEEE80211_QOS_ACKPOLICY_S) & IEEE80211_QOS_ACKPOLICY;
+	qwh->i_qos[1] = 0;
+
+	IEEE80211_NODE_STAT(ni, tx_data);
+
+	if (WME_UAPSD_AC_CAN_TRIGGER(skb->priority, ni)) {
+		/* U-APSD power save queue */
+		/* XXXAPSD: assuming triggerable means deliverable */
+		M_FLAG_SET(skb, M_UAPSD);
+	}
+	IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS,
+			"[%s] send pwr_mgt(%d) data frame on channel %u\n",
+			ether_sprintf(ni->ni_macaddr), pwr_mgt,
+			ieee80211_chan2ieee(ic, ic->ic_curchan));
+
+	ic->ic_send_80211(ic, ni, skb, ac, 0);
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_send_qosnulldata_ext);
+
+/*
+ * Add transmit power envelope information element
+ */
+u_int8_t *
+ieee80211_add_vhttxpwr_envelope(u_int8_t *frm, struct ieee80211com *ic)
+{
+	u_int32_t bw = ieee80211_get_bw(ic);
+	struct ieee80211_ie_vtxpwren *ie = (struct ieee80211_ie_vtxpwren *)frm;
+	u_int8_t local_max_tx_pwrcnt = 0;
+	struct ieee80211_channel *des_chan = ic->ic_des_chan;
+
+	if (des_chan == IEEE80211_CHAN_ANYC)
+		return frm;
+
+	switch (bw) {
+		case BW_HT20:
+			local_max_tx_pwrcnt = IEEE80211_TX_POW_FOR_20MHZ;
+			break;
+		case BW_HT40:
+			local_max_tx_pwrcnt = IEEE80211_TX_POW_FOR_40MHZ;
+			break;
+		case BW_HT80:
+			local_max_tx_pwrcnt = IEEE80211_TX_POW_FOR_80MHZ;
+			break;
+		default:
+			local_max_tx_pwrcnt = IEEE80211_TX_POW_FOR_80MHZ;
+	}
+
+	ie->vtxpwren_id = IEEE80211_ELEMID_VHTXMTPWRENVLP;
+	ie->vtxpwren_len = sizeof(struct ieee80211_ie_vtxpwren) - 2;
+
+	ie->vtxpwren_txpwr_info = local_max_tx_pwrcnt;
+	ie->vtxpwren_tp20 = des_chan->ic_maxregpower - ic->ic_pwr_constraint;
+	ie->vtxpwren_tp40 = des_chan->ic_maxregpower - ic->ic_pwr_constraint;
+	ie->vtxpwren_tp80 = des_chan->ic_maxregpower - ic->ic_pwr_constraint;
+	ie->vtxpwren_tp160 = 0;
+
+	frm += sizeof(struct ieee80211_ie_vtxpwren);
+	return frm;
+}
+
+/*
+ * Add wide-bandwidth Channel switch wrapper information element
+ */
+u_int8_t *
+ieee80211_add_wband_chanswitch(u_int8_t *frm, struct ieee80211com *ic)
+{
+	u_int32_t bw = ieee80211_get_bw(ic);
+	struct ieee80211_ie_wbchansw *ie = (struct ieee80211_ie_wbchansw *)frm;
+	struct ieee80211_channel *des_chan = ic->ic_csa_chan;
+	u_int32_t chwidth = 0;
+
+	if (!des_chan || (des_chan == IEEE80211_CHAN_ANYC))
+		return frm;
+
+	ie->wbcs_id = IEEE80211_ELEMID_WBWCHANSWITCH;
+	ie->wbcs_len = sizeof(struct ieee80211_ie_wbchansw) - 2;
+	switch (bw) {
+		case BW_HT20:
+		case BW_HT40:
+			chwidth = IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ;
+			break;
+		case BW_HT80:
+			chwidth = IEEE80211_VHTOP_CHAN_WIDTH_80MHZ;
+			break;
+		default:
+			chwidth = IEEE80211_VHTOP_CHAN_WIDTH_80MHZ;
+	}
+
+	ie->wbcs_newchanw = chwidth;
+	if (bw == BW_HT40) {
+		ie->wbcs_newchancf0 = des_chan->ic_center_f_40MHz;
+		ie->wbcs_newchancf1 = 0;
+	} else if (bw == BW_HT80) {
+		ie->wbcs_newchancf0 = des_chan->ic_center_f_80MHz;
+		ie->wbcs_newchancf1 = 0;
+	} else {
+		ie->wbcs_newchancf0 = 0;
+		ie->wbcs_newchancf1 = 0;
+	}
+
+	frm += sizeof(struct ieee80211_ie_wbchansw);
+	return frm;
+}
+
+/*
+ * Add Channel switch wrapper information element
+ */
+u_int8_t *
+ieee80211_add_chansw_wrap(u_int8_t *frm, struct ieee80211com *ic)
+{
+	struct ieee80211_ie_chsw_wrapper *ie = (struct ieee80211_ie_chsw_wrapper *) frm;
+	u_int32_t bw = ieee80211_get_bw(ic);
+	ie->chsw_id = IEEE80211_ELEMID_CHANSWITCHWRP;
+	ie->chsw_len = 0;
+	frm += sizeof(struct ieee80211_ie_chsw_wrapper);
+
+	/* Wide bandwidth channel switch element */
+	if (bw > BW_HT20) {
+		ie->chsw_len += sizeof(struct ieee80211_ie_wbchansw);
+		frm = ieee80211_add_wband_chanswitch(frm, ic);
+	}
+	/* VHT transmit power envelope */
+	if ((ic->ic_flags & IEEE80211_F_DOTH) &&
+	    (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+		ie->chsw_len += sizeof(struct ieee80211_ie_vtxpwren);
+		frm = ieee80211_add_vhttxpwr_envelope(frm, ic);
+	}
+	return frm;
+}
+
+/*
+ * Add a supported rates element id to a frame.
+ */
+u_int8_t *
+ieee80211_add_rates(u_int8_t *frm, const struct ieee80211_rateset *rs)
+{
+	int nrates;
+
+	*frm++ = IEEE80211_ELEMID_RATES;
+	nrates = rs->rs_nrates;
+	if (nrates > IEEE80211_RATE_SIZE)
+		nrates = IEEE80211_RATE_SIZE;
+	*frm++ = nrates;
+	memcpy(frm, rs->rs_rates, nrates);
+	return frm + nrates;
+}
+
+/*
+ * Add a Supported Channels element id to a frame.
+ */
+uint8_t*
+ieee80211_add_supported_chans(uint8_t *frm, struct ieee80211com *ic)
+{
+	int band_idx;
+	int first_chan;
+	int temp_chan;
+	int chan_cnt;
+	int active_chan_cnt;
+	uint8_t *ie_len;
+	int cur_bw;
+	uint8_t *chan_active;
+	struct ieee80211_band_info *band;
+
+
+	*frm++ = IEEE80211_ELEMID_SUPPCHAN;
+	ie_len = frm++;
+	*ie_len = 0;
+
+	cur_bw = ieee80211_get_bw(ic);
+	if (cur_bw == BW_HT20)
+		chan_active = &ic->ic_chan_active_20[0];
+	else if (cur_bw == BW_HT40)
+		chan_active = &ic->ic_chan_active_40[0];
+	else if (cur_bw == BW_HT80)
+		chan_active = &ic->ic_chan_active_80[0];
+	else
+		chan_active = &ic->ic_chan_active[0];
+
+	for (band_idx = 0; band_idx < IEEE80211_BAND_IDX_MAX; band_idx++) {
+		band = ieee80211_get_band_info(band_idx);
+		if (band == NULL)
+			continue;
+
+		first_chan = band->band_first_chan;
+		chan_cnt = band->band_chan_cnt;
+		active_chan_cnt = 0;
+		for (temp_chan = first_chan; chan_cnt >= 0; chan_cnt--){
+			if (isset(chan_active, temp_chan) && chan_cnt > 0) {
+				active_chan_cnt++;
+			} else if (active_chan_cnt) {
+				*frm++ = first_chan;
+				*frm++ = active_chan_cnt;
+				*ie_len += 2;
+				active_chan_cnt = 0;
+			}
+
+			if (active_chan_cnt == 1)
+				first_chan = temp_chan;
+
+			temp_chan += band->band_chan_step;
+		}
+	}
+
+	return frm;
+}
+
+
+/*
+ * Add an extended capabilities element id to a frame
+ */
+u_int8_t *
+ieee80211_add_extcap(u_int8_t *frm)
+{
+	struct ieee80211_extcap_param *ie  = (struct ieee80211_extcap_param *)frm;
+
+	memset(ie, 0, sizeof(struct ieee80211_extcap_param));
+
+	ie->param_id = IEEE80211_ELEMID_EXTCAP;
+	ie->param_len =	sizeof(struct ieee80211_extcap_param) - 2;
+	/* max msdu in amsdu 0 = unlimited */
+	ie->ext_cap[7] = IEEE80211_EXTCAP_OPMODE_NOTIFICATION;
+
+	return frm + sizeof(struct ieee80211_extcap_param);
+}
+
+void
+ieee80211_update_bss_tm(uint8_t *appie, int len, struct ieee80211com *ic, struct ieee80211vap *vap)
+{
+	size_t left = len;
+	uint8_t *pos = appie;
+
+	while (left >= 2) {
+		uint8_t id;
+		uint8_t elen;
+
+		id = *pos++;
+		elen = *pos++;
+		left -= 2;
+
+		if (elen > left) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ELEMID,
+				"IEEE 802.11 element parse failed (id=%u elen=%u left=%u)",
+				id, elen, left);
+			return;
+		}
+		if (id == IEEE80211_ELEMID_EXTCAP) {
+			if (IEEE80211_COM_BTM_ENABLED(ic)) {
+				*(pos + 2) |= IEEE80211_EXTCAP_BTM;
+			} else {
+				*(pos + 2) &= ~IEEE80211_EXTCAP_BTM;
+			}
+			return;
+		} else {
+			left -= elen;
+			pos += elen;
+		}
+	}
+}
+
+/*
+ * Add an extended supported rates element id to a frame.
+ */
+u_int8_t *
+ieee80211_add_xrates(u_int8_t *frm, const struct ieee80211_rateset *rs)
+{
+//FIXME
+#if 1
+	/*
+	 * Add an extended supported rates element if operating in 11g/n mode.
+	 * Only 11g rates are added. 11n Rates are published via ht cap */
+	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
+		int nrates = rs->rs_legacy_nrates - IEEE80211_RATE_SIZE;
+		if(nrates)
+		{
+			*frm++ = IEEE80211_ELEMID_XRATES;
+			*frm++ = nrates;
+			memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates);
+			frm += nrates;
+		}
+	}
+#else
+	/* Add BSS membership selector (HT == 0x7F)*/
+	*frm++ = IEEE80211_ELEMID_XRATES;
+	*frm++ = 1;
+	*frm++ = 0x7F;
+#endif
+	return frm;
+}
+
+/*
+ * Add an ssid elemet to a frame.
+ */
+static u_int8_t *
+ieee80211_add_ssid(u_int8_t *frm, const u_int8_t *ssid, u_int len)
+{
+	*frm++ = IEEE80211_ELEMID_SSID;
+	*frm++ = len;
+	memcpy(frm, ssid, len);
+	return frm + len;
+}
+
+/*
+ * Add an csa element to a frame.
+ */
+u_int8_t *
+ieee80211_add_csa(u_int8_t *frm,
+		u_int8_t csa_mode,
+		u_int8_t csa_chan,
+		u_int8_t csa_count)
+{
+	*frm++ = IEEE80211_ELEMID_CHANSWITCHANN;
+	*frm++ = 3;
+	*frm++ = csa_mode;
+	*frm++ = csa_chan;
+	*frm++ = csa_count;
+
+	return frm;
+}
+
+/*
+ * Add secondary channel offset element to a frame.
+ */
+void ieee80211_add_sec_chan_off(u_int8_t **frm,
+		struct ieee80211com *ic,
+		uint8_t csa_chan)
+{
+	struct ieee80211_channel *chan = NULL;
+	uint8_t sec_position = IEEE80211_HTINFO_EXTOFFSET_NA;
+        struct ieee80211_ie_sec_chan_off *sco = (struct ieee80211_ie_sec_chan_off *)(*frm);
+	uint32_t curr_bw = ieee80211_get_bw(ic);
+
+	chan = ieee80211_find_channel_by_ieee(ic, csa_chan);
+
+	if (chan && (curr_bw >= BW_HT40)) {
+		if (chan->ic_flags & IEEE80211_CHAN_HT40D) {
+			sec_position = IEEE80211_HTINFO_EXTOFFSET_BELOW;
+		} else if (chan->ic_flags & IEEE80211_CHAN_HT40U) {
+			sec_position = IEEE80211_HTINFO_EXTOFFSET_ABOVE;
+		}
+	}
+
+        sco->sco_id = IEEE80211_ELEMID_SEC_CHAN_OFF;
+        sco->sco_len = 1;
+        sco->sco_off = sec_position;
+
+        *frm += sizeof(struct ieee80211_ie_sec_chan_off);
+
+	return;
+}
+
+/*
+ * Add an erp element to a frame.
+ */
+u_int8_t *
+ieee80211_add_erp(u_int8_t *frm, struct ieee80211com *ic)
+{
+	u_int8_t erp;
+
+	*frm++ = IEEE80211_ELEMID_ERP;
+	*frm++ = 1;
+	erp = 0;
+	if (ic->ic_nonerpsta != 0)
+		erp |= IEEE80211_ERP_NON_ERP_PRESENT;
+	if (ic->ic_flags & IEEE80211_F_USEPROT)
+		erp |= IEEE80211_ERP_USE_PROTECTION;
+	if (ic->ic_flags & IEEE80211_F_USEBARKER)
+		erp |= IEEE80211_ERP_LONG_PREAMBLE;
+	*frm++ = erp;
+	return frm;
+}
+
+/*
+ * Add a country information element to a frame.
+ */
+u_int8_t *
+ieee80211_add_country(u_int8_t *frm, struct ieee80211com *ic)
+{
+	/* add country code */
+	memcpy(frm, (u_int8_t *)&ic->ic_country_ie,
+		ic->ic_country_ie.country_len + 2);
+	frm +=  ic->ic_country_ie.country_len + 2;
+	return frm;
+}
+
+/*
+ * Add BSS load element to frame
+ */
+u_int8_t *
+ieee80211_add_bss_load(u_int8_t *frm, struct ieee80211vap *vap)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+
+	*frm++ = IEEE80211_ELEMID_BSS_LOAD;
+	*frm++ = 5;
+	ADDINT16LE(frm, vap->iv_sta_assoc);
+	*frm++ = sp->chan_util;
+	/* TODO: Available Admission Capacity
+	 * parameters need to be updated with correct values */
+	*(__le16 *)frm = 0xffff;
+	frm += 2;
+
+	return frm;
+}
+
+/*
+ * Add RRM Enabled capabilities information element to frame
+ */
+u_int8_t *
+ieee80211_add_rrm_enabled(u_int8_t *frm, struct ieee80211vap *vap)
+{
+	struct ieee80211_ie_rrm *ie = (struct ieee80211_ie_rrm *)frm;
+
+	memset(ie, 0, sizeof(*ie));
+	ie->id = IEEE80211_ELEMID_RRM_ENABLED;
+	ie->len = (uint8_t)sizeof(*ie) - IEEE80211_IE_ID_LEN_SIZE;
+
+	/* set neigbor report capable bit */
+	if (IEEE80211_COM_NEIGHREPORT_ENABLED(vap->iv_ic)) {
+		ie->cap[0] |= IEEE80211_RM_NEIGH_REPORT_CAP;
+	} else {
+		ie->cap[0] = 0;
+	}
+
+	return frm + sizeof(*ie);
+}
+
+
+static u_int8_t *
+ieee80211_setup_wpa_ie(struct ieee80211vap *vap, u_int8_t *ie)
+{
+#define	WPA_OUI_BYTES		0x00, 0x50, 0xf2
+#define	ADDSHORT(frm, v) do {			\
+	frm[0] = (v) & 0xff;			\
+	frm[1] = (v) >> 8;			\
+	frm += 2;				\
+} while (0)
+#define	ADDSELECTOR(frm, sel) do {		\
+	memcpy(frm, sel, 4);			\
+	frm += 4;				\
+} while (0)
+	static const u_int8_t oui[4] = { WPA_OUI_BYTES, WPA_RSN_OUI_TYPE };
+	static const u_int8_t cipher_suite[][4] = {
+		{ WPA_OUI_BYTES, WPA_CSE_WEP40 },	/* NB: 40-bit */
+		{ WPA_OUI_BYTES, WPA_CSE_TKIP },
+		{ 0x00, 0x00, 0x00, 0x00 },		/* XXX WRAP */
+		{ WPA_OUI_BYTES, WPA_CSE_CCMP },
+		{ 0x00, 0x00, 0x00, 0x00 },		/* XXX CKIP */
+		{ WPA_OUI_BYTES, WPA_CSE_NULL },
+	};
+	static const u_int8_t wep104_suite[4] =
+		{ WPA_OUI_BYTES, WPA_CSE_WEP104 };
+	static const u_int8_t key_mgt_unspec[4] =
+		{ WPA_OUI_BYTES, WPA_ASE_8021X_UNSPEC };
+	static const u_int8_t key_mgt_psk[4] =
+		{ WPA_OUI_BYTES, WPA_ASE_8021X_PSK };
+	const struct ieee80211_rsnparms *rsn = &vap->iv_bss->ni_rsn;
+	u_int8_t *frm = ie;
+	u_int8_t *selcnt;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	*frm++ = IEEE80211_ELEMID_VENDOR;
+	*frm++ = 0;				/* length filled in below */
+	memcpy(frm, oui, sizeof(oui));		/* WPA OUI */
+	frm += sizeof(oui);
+	ADDSHORT(frm, WPA_VERSION);
+
+	/* XXX filter out CKIP */
+
+	/* multicast cipher */
+	if (rsn->rsn_mcastcipher == IEEE80211_CIPHER_WEP &&
+	    rsn->rsn_mcastkeylen >= 13)
+		ADDSELECTOR(frm, wep104_suite);
+	else if (!(IEEE80211_IS_TKIP_ALLOWED(ic)) &&
+			(rsn->rsn_mcastcipher == IEEE80211_CIPHER_TKIP))	/* remove TKIP functionality */
+		ADDSELECTOR(frm, cipher_suite[IEEE80211_CIPHER_AES_CCM]);
+	else
+		ADDSELECTOR(frm, cipher_suite[rsn->rsn_mcastcipher]);
+
+	/* unicast cipher list */
+	selcnt = frm;
+	ADDSHORT(frm, 0);			/* selector count */
+	if (rsn->rsn_ucastcipherset & (1 << IEEE80211_CIPHER_AES_CCM)) {
+		selcnt[0]++;
+		ADDSELECTOR(frm, cipher_suite[IEEE80211_CIPHER_AES_CCM]);
+	}
+	if (IEEE80211_IS_TKIP_ALLOWED(ic)) {
+		if (rsn->rsn_ucastcipherset & (1 << IEEE80211_CIPHER_TKIP)) {
+			selcnt[0]++;
+			ADDSELECTOR(frm, cipher_suite[IEEE80211_CIPHER_TKIP]);
+		}
+	}
+	/* authenticator selector list */
+	selcnt = frm;
+	ADDSHORT(frm, 0);			/* selector count */
+	if (rsn->rsn_keymgmtset & WPA_ASE_8021X_UNSPEC) {
+		selcnt[0]++;
+		ADDSELECTOR(frm, key_mgt_unspec);
+	}
+	if (rsn->rsn_keymgmtset & WPA_ASE_8021X_PSK) {
+		selcnt[0]++;
+		ADDSELECTOR(frm, key_mgt_psk);
+	}
+
+	/* optional capabilities */
+	if ((rsn->rsn_caps != 0) && (rsn->rsn_caps != RSN_CAP_PREAUTH))
+		ADDSHORT(frm, rsn->rsn_caps);
+
+	/* calculate element length */
+	ie[1] = frm - ie - 2;
+	KASSERT(ie[1] + 2 <= sizeof(struct ieee80211_ie_wpa),
+		("WPA IE too big, %u > %u",
+		ie[1] + 2, (int)sizeof(struct ieee80211_ie_wpa)));
+	return frm;
+#undef ADDSHORT
+#undef ADDSELECTOR
+#undef WPA_OUI_BYTES
+}
+
+static u_int8_t *
+ieee80211_setup_rsn_ie(struct ieee80211vap *vap, u_int8_t *ie)
+{
+#define	RSN_OUI_BYTES		0x00, 0x0f, 0xac
+#define	ADDSHORT(frm, v) do {			\
+	frm[0] = (v) & 0xff;			\
+	frm[1] = (v) >> 8;			\
+	frm += 2;				\
+} while (0)
+#define	ADDSELECTOR(frm, sel) do {		\
+	memcpy(frm, sel, 4);			\
+	frm += 4;				\
+} while (0)
+	static const u_int8_t cipher_suite[][4] = {
+		{ RSN_OUI_BYTES, RSN_CSE_WEP40 },	/* NB: 40-bit */
+		{ RSN_OUI_BYTES, RSN_CSE_TKIP },
+		{ RSN_OUI_BYTES, RSN_CSE_WRAP },
+		{ RSN_OUI_BYTES, RSN_CSE_CCMP },
+		{ 0x00, 0x00, 0x00, 0x00 },		/* XXX CKIP */
+		{ RSN_OUI_BYTES, RSN_CSE_NULL },
+	};
+	static const u_int8_t wep104_suite[4] =
+		{ RSN_OUI_BYTES, RSN_CSE_WEP104 };
+	static const u_int8_t key_mgt_unspec[4] =
+		{ RSN_OUI_BYTES, RSN_ASE_8021X_UNSPEC };
+	static const u_int8_t key_mgt_psk[4] =
+		{ RSN_OUI_BYTES, RSN_ASE_8021X_PSK };
+	static const u_int8_t key_mgt_dot1x_sha256[4] =
+		{ RSN_OUI_BYTES, RSN_ASE_8021X_SHA256 };
+	static const u_int8_t key_mgt_psk_sha256[4] =
+		{ RSN_OUI_BYTES, RSN_ASE_8021X_PSK_SHA256 };
+	static const u_int8_t key_mgt_bip[4] =
+		{ RSN_OUI_BYTES, RSN_CSE_BIP };
+	static const u_int8_t key_mgt_ft_8021x[4] =
+		{ RSN_OUI_BYTES, RSN_ASE_FT_8021X };
+	static const u_int8_t key_mgt_ft_psk[4] =
+		{ RSN_OUI_BYTES, RSN_ASE_FT_PSK };
+	const struct ieee80211_rsnparms *rsn = &vap->iv_bss->ni_rsn;
+	u_int8_t *frm = ie;
+	u_int8_t *selcnt;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	*frm++ = IEEE80211_ELEMID_RSN;
+	*frm++ = 0;				/* length filled in below */
+	ADDSHORT(frm, RSN_VERSION);
+
+	/* XXX filter out CKIP */
+
+	/* multicast cipher */
+	if (rsn->rsn_mcastcipher == IEEE80211_CIPHER_WEP &&
+	    rsn->rsn_mcastkeylen >= 13)
+		ADDSELECTOR(frm, wep104_suite);
+	else if (!(IEEE80211_IS_TKIP_ALLOWED(ic)) &&
+			(rsn->rsn_mcastcipher == IEEE80211_CIPHER_TKIP))	/* remove TKIP functionality */
+		ADDSELECTOR(frm, cipher_suite[IEEE80211_CIPHER_AES_CCM]);
+	else
+		ADDSELECTOR(frm, cipher_suite[rsn->rsn_mcastcipher]);
+
+	/* unicast cipher list */
+	selcnt = frm;
+	ADDSHORT(frm, 0);			/* selector count */
+	if (rsn->rsn_ucastcipherset & (1 << IEEE80211_CIPHER_AES_CCM)) {
+		selcnt[0]++;
+		ADDSELECTOR(frm, cipher_suite[IEEE80211_CIPHER_AES_CCM]);
+	}
+	if (IEEE80211_IS_TKIP_ALLOWED(ic)) {
+		if (rsn->rsn_ucastcipherset & (1 << IEEE80211_CIPHER_TKIP)) {
+			selcnt[0]++;
+			ADDSELECTOR(frm, cipher_suite[IEEE80211_CIPHER_TKIP]);
+		}
+	}
+	/* authenticator selector list */
+	selcnt = frm;
+	ADDSHORT(frm, 0);			/* selector count */
+	if (rsn->rsn_keymgmtset == RSN_ASE_8021X_UNSPEC) {
+		selcnt[0]++;
+		ADDSELECTOR(frm, key_mgt_unspec);
+	}
+	if (rsn->rsn_keymgmtset == RSN_ASE_8021X_PSK) {
+		selcnt[0]++;
+		ADDSELECTOR(frm, key_mgt_psk);
+	}
+	if (rsn->rsn_keymgmtset == (RSN_ASE_8021X_PSK | WPA_KEY_MGMT_FT_PSK)) {
+		selcnt[0]++;
+		ADDSELECTOR(frm, key_mgt_psk);
+		selcnt[0]++;
+		ADDSELECTOR(frm, key_mgt_ft_psk);
+	}
+	if (rsn->rsn_keymgmtset == (RSN_ASE_8021X_UNSPEC | WPA_KEY_MGMT_FT_IEEE8021X)) {
+		selcnt[0]++;
+		ADDSELECTOR(frm, key_mgt_unspec);
+		selcnt[0]++;
+		ADDSELECTOR(frm, key_mgt_ft_8021x);
+	}
+
+	if (vap->iv_pmf) {
+		if ((rsn->rsn_keymgmtset == RSN_ASE_8021X_PSK_SHA256)) {
+			selcnt[0]++;
+			ADDSELECTOR(frm, key_mgt_psk_sha256);
+		} else if ((rsn->rsn_keymgmtset == RSN_ASE_8021X_SHA256)) {
+			selcnt[0]++;
+			ADDSELECTOR(frm, key_mgt_dot1x_sha256);
+		}
+	}
+
+	/* capabilities */
+	ADDSHORT(frm, (rsn->rsn_caps | (vap->iv_pmf << 6)));
+	/* XXX PMKID */
+	if (vap->iv_pmf) {
+		/* PMKID here: We dont support PMKID list  */
+		ADDSHORT(frm, 0);
+
+		/* 802.11w Group Management Cipher suite */
+		selcnt = frm;
+		if (rsn->rsn_ucastcipherset & (1 << IEEE80211_CIPHER_AES_CCM)) {
+			selcnt[0]++;
+			ADDSELECTOR(frm, key_mgt_bip);
+		}
+	}
+	/* calculate element length */
+	ie[1] = frm - ie - 2;
+	KASSERT(ie[1] + 2 <= sizeof(struct ieee80211_ie_wpa),
+		("RSN IE too big, %u > %u",
+		ie[1] + 2, (int)sizeof(struct ieee80211_ie_wpa)));
+	return frm;
+#undef ADDSELECTOR
+#undef ADDSHORT
+#undef RSN_OUI_BYTES
+}
+
+/*
+ * Add a WPA/RSN element to a frame.
+ */
+u_int8_t *
+ieee80211_add_wpa(u_int8_t *frm, struct ieee80211vap *vap)
+{
+
+	KASSERT(vap->iv_flags & IEEE80211_F_WPA, ("no WPA/RSN!"));
+	if (vap->iv_flags & IEEE80211_F_WPA2)
+		frm = ieee80211_setup_rsn_ie(vap, frm);
+	if (vap->iv_flags & IEEE80211_F_WPA1)
+		frm = ieee80211_setup_wpa_ie(vap, frm);
+	return frm;
+}
+
+/*
+ * Add a mobility domain element to a frame.
+ */
+uint8_t *
+ieee80211_add_mdie(uint8_t *frm, struct ieee80211vap *vap)
+{
+	struct ieee80211_md_ie *mdie = (struct ieee80211_md_ie *) frm;
+
+	mdie->md_id = IEEE80211_ELEMID_MOBILITY_DOMAIN;
+	mdie->md_len = IEEE80211_MDIE_LEN;
+	mdie->md_info = vap->iv_mdid;
+	mdie->md_cap = vap->iv_ft_over_ds ? 1 : 0;
+	frm += sizeof(struct ieee80211_md_ie);
+
+	return frm;
+}
+
+#define	WME_OUI_BYTES		0x00, 0x50, 0xf2
+/*
+ * Add a WME Info element to a frame.
+ */
+static u_int8_t *
+ieee80211_add_wme(u_int8_t *frm, struct ieee80211_node *ni)
+{
+	static const u_int8_t oui[4] = { WME_OUI_BYTES, WME_OUI_TYPE };
+	struct ieee80211_ie_wme *ie = (struct ieee80211_ie_wme *) frm;
+	struct ieee80211_wme_state *wme = &ni->ni_ic->ic_wme;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	*frm++ = IEEE80211_ELEMID_VENDOR;
+	*frm++ = 0;				/* length filled in below */
+	memcpy(frm, oui, sizeof(oui));		/* WME OUI */
+	frm += sizeof(oui);
+	*frm++ = WME_INFO_OUI_SUBTYPE;		/* OUI subtype */
+	*frm++ = WME_VERSION;			/* protocol version */
+	/* QoS Info field depends on operating mode */
+	switch (vap->iv_opmode) {
+	case IEEE80211_M_HOSTAP:
+		*frm = wme->wme_bssChanParams.cap_info_count;
+		if (IEEE80211_VAP_UAPSD_ENABLED(vap))
+			*frm |= WME_CAPINFO_UAPSD_EN;
+		frm++;
+		break;
+	case IEEE80211_M_STA:
+		*frm++ = vap->iv_uapsdinfo;
+		break;
+	default:
+		*frm++ = 0;
+	}
+
+	ie->wme_len = frm - &ie->wme_oui[0];
+
+	return frm;
+}
+
+/*
+ * Add a WME Parameter element to a frame.
+ */
+u_int8_t *
+ieee80211_add_wme_param(u_int8_t *frm, struct ieee80211_wme_state *wme,
+	int uapsd_enable, int is_qtn_wme)
+{
+#define	SM(_v, _f)	(((_v) << _f##_S) & _f)
+#define	ADDSHORT(frm, v) do {			\
+	frm[0] = (v) & 0xff;			\
+	frm[1] = (v) >> 8;			\
+	frm += 2;				\
+} while (0)
+	static const u_int8_t oui[4] = { WME_OUI_BYTES, WME_OUI_TYPE };
+	struct ieee80211_wme_param *ie = (struct ieee80211_wme_param *) frm;
+	int i;
+
+	*frm++ = IEEE80211_ELEMID_VENDOR;
+	*frm++ = 0;				/* length filled in below */
+	memcpy(frm, oui, sizeof(oui));		/* WME OUI */
+	frm += sizeof(oui);
+	*frm++ = WME_PARAM_OUI_SUBTYPE;		/* OUI subtype */
+	*frm++ = WME_VERSION;			/* protocol version */
+	*frm = wme->wme_bssChanParams.cap_info_count;
+	if (uapsd_enable)
+		*frm |= WME_CAPINFO_UAPSD_EN;
+	frm++;
+	*frm++ = 0;                             /* reserved field */
+	for (i = 0; i < WME_NUM_AC; i++) {
+		const struct wmm_params *ac;
+#ifdef CONFIG_QVSP
+		if (!is_qtn_wme && (wme->wme_throt_bm & BIT(i))) {
+			ac = &wme->wme_throt_bssChanParams.cap_wmeParams[i];
+		} else
+#endif
+		{
+			ac = &wme->wme_bssChanParams.cap_wmeParams[i];
+		}
+		*frm++ = SM(i, WME_PARAM_ACI) |
+			SM(ac->wmm_acm, WME_PARAM_ACM) |
+			SM(ac->wmm_aifsn, WME_PARAM_AIFSN);
+		*frm++ = SM(ac->wmm_logcwmax, WME_PARAM_LOGCWMAX) |
+			SM(ac->wmm_logcwmin, WME_PARAM_LOGCWMIN);
+		ADDSHORT(frm, ac->wmm_txopLimit);
+	}
+
+	ie->param_len = frm - &ie->param_oui[0];
+
+	return frm;
+#undef ADDSHORT
+}
+#undef WME_OUI_BYTES
+
+/*
+ * Add an Atheros Advanaced Capability element to a frame
+ */
+u_int8_t *
+ieee80211_add_athAdvCap(u_int8_t *frm, u_int8_t capability, u_int16_t defaultKey)
+{
+	static const u_int8_t oui[6] = {(ATH_OUI & 0xff), ((ATH_OUI >>8) & 0xff),
+		((ATH_OUI >> 16) & 0xff), ATH_OUI_TYPE,
+		ATH_OUI_SUBTYPE, ATH_OUI_VERSION};
+	struct ieee80211_ie_athAdvCap *ie = (struct ieee80211_ie_athAdvCap *) frm;
+
+	*frm++ = IEEE80211_ELEMID_VENDOR;
+	*frm++ = 0;				/* Length filled in below */
+	memcpy(frm, oui, sizeof(oui));		/* Atheros OUI, type, subtype, and version for adv capabilities */
+	frm += sizeof(oui);
+	*frm++ = capability;
+
+	/* Setup default key index in little endian byte order */
+	*frm++ = (defaultKey & 0xff);
+	*frm++ = ((defaultKey >> 8) & 0xff);
+	ie->athAdvCap_len = frm - &ie->athAdvCap_oui[0];
+
+	return frm;
+}
+
+/*
+ * Add the Quantenna IE to a frame
+ * - all existing fields must be backwards compatible with previous verions.
+ */
+uint8_t *
+ieee80211_add_qtn_ie(uint8_t *frm, struct ieee80211com *ic, uint8_t flags, uint8_t my_flags,
+			uint8_t implicit_ba, uint16_t implicit_ba_size, uint32_t rate_train)
+{
+	struct ieee80211_ie_qtn *ie = (struct ieee80211_ie_qtn *)frm;
+
+	ie->qtn_ie_id = IEEE80211_ELEMID_VENDOR;
+	ie->qtn_ie_len = (uint8_t)sizeof(*ie) - IEEE80211_IE_ID_LEN_SIZE;
+	ieee80211_oui_add_qtn(ie->qtn_ie_oui);
+	ie->qtn_ie_type = QTN_OUI_CFG;
+	ie->qtn_ie_flags = (flags | IEEE80211_QTN_FLAGS_ENVY_DFLT) & IEEE80211_QTN_FLAGS_ENVY;
+	ie->qtn_ie_implicit_ba_tid = implicit_ba;
+	ie->qtn_ie_my_flags = (my_flags | IEEE80211_QTN_CAPS_DFLT) & ~IEEE80211_QTN_BF_VER1;
+	ie->qtn_ie_implicit_ba_tid_h = implicit_ba;
+	ie->qtn_ie_implicit_ba_size = (implicit_ba_size >> IEEE80211_QTN_IE_BA_SIZE_SH);
+	ie->qtn_ie_vsp_version = IEEE80211_QTN_VSP_VERSION;
+
+	put_unaligned(htonl(ic->ic_ver_sw), &ie->qtn_ie_ver_sw);
+	put_unaligned(htons(ic->ic_ver_hw), &ie->qtn_ie_ver_hw);
+	put_unaligned(htons(ic->ic_ver_platform_id), &ie->qtn_ie_ver_platform_id);
+	put_unaligned(htonl(ic->ic_ver_timestamp), &ie->qtn_ie_ver_timestamp);
+	put_unaligned(htonl(rate_train), &ie->qtn_ie_rate_train);
+	put_unaligned(htonl(ic->ic_ver_flags), &ie->qtn_ie_ver_flags);
+
+	return frm + sizeof(*ie);
+}
+
+#ifdef CONFIG_QVSP
+static __inline int
+ieee80211_vsp_ie_max_len(struct ieee80211com *ic)
+{
+	return sizeof(struct ieee80211_ie_vsp) +
+		(sizeof(struct ieee80211_ie_vsp_item) * ARRAY_SIZE(ic->vsp_cfg));
+}
+
+/*
+ * Add the Quantenna VSP configuration IE to a frame
+ */
+static uint8_t *
+ieee80211_add_vsp_ie(struct ieee80211vap *vap, void *start, void *end)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_ie_vsp *vsp_ie = start;
+	struct ieee80211_ie_vsp_item *item_p = &vsp_ie->item[0];
+	int i;
+
+	vsp_ie->id = IEEE80211_ELEMID_VENDOR;
+	ieee80211_oui_add_qtn(vsp_ie->oui);
+	vsp_ie->type = QTN_OUI_VSP_CTRL;
+	vsp_ie->item_cnt = 0;
+
+	for (i = 0; i < ARRAY_SIZE(ic->vsp_cfg); i++) {
+		if (ic->vsp_cfg[i].set != 0) {
+			item_p->index = i;
+			put_unaligned(htonl(ic->vsp_cfg[i].value), &item_p->value);
+			item_p++;
+			vsp_ie->item_cnt++;
+			if ((void *)item_p > end) {
+				printk(KERN_INFO "VSP: not adding IE to assoc resp - too long\n");
+				return start;
+			}
+		}
+	}
+	vsp_ie->len = (uint8_t *)item_p - &vsp_ie->oui[0];
+
+	return (uint8_t *)item_p;
+}
+
+static uint8_t *
+ieee80211_add_timeout_ie(u_int8_t *frm)
+{
+		struct ieee80211_timout_int_ie *tie = (struct ieee80211_timout_int_ie *) frm;
+
+		tie->timout_int_ie = IEEE80211_ELEMID_TIMEOUT_INT;
+		tie->timout_int_len = 5;
+		tie->timout_int_type = IEEE80211_TIMEOUT_ASSOC_COMEBACK;	/* timeout value type */
+		tie->timout_int_value = htole32(IEEE80211_W_ASSOC_COMEBACK_TO);	/* default value is 1000tus */
+		frm += sizeof(struct ieee80211_timout_int_ie);
+		return frm;
+}
+
+uint8_t *
+ieee80211_add_qtn_wme_param(struct ieee80211vap *vap, u_int8_t *frm)
+{
+	struct ieee80211_ie_qtn_wme *qwme_ie = (struct ieee80211_ie_qtn_wme *)frm;
+	struct ieee80211_wme_state *wme = ieee80211_vap_get_wmestate(vap);
+
+	qwme_ie->qtn_ie_id = IEEE80211_ELEMID_VENDOR;
+	qwme_ie->qtn_ie_len = sizeof(struct ieee80211_ie_qtn_wme) - 2;
+	ieee80211_oui_add_qtn(qwme_ie->qtn_ie_oui);
+	qwme_ie->qtn_ie_type = QTN_OUI_QWME;
+	qwme_ie->qtn_wme_ie_version = QTN_QWME_IE_VERSION;
+
+	return ieee80211_add_wme_param((uint8_t*)&qwme_ie->qtn_wme_ie, wme, IEEE80211_VAP_UAPSD_ENABLED(vap), 1);
+}
+#endif
+
+/*
+ * Add Quantenna pairing hash to a frame
+ */
+u_int8_t *
+ieee80211_add_qtn_pairing_ie(u_int8_t *frm, struct ieee80211_app_ie_t *pairing_ie)
+{
+	struct ieee80211_ie_qtn_pairing_tlv tlv_ie;
+	struct ieee80211_ie_qtn_pairing *ie = (struct ieee80211_ie_qtn_pairing *) frm;
+
+	tlv_ie.qtn_pairing_tlv_type = QTN_OUI_PAIRING;
+	memcpy(tlv_ie.qtn_pairing_tlv_hash, pairing_ie->ie, QTN_PAIRING_TLV_HASH_LEN);
+	tlv_ie.qtn_pairing_tlv_len = htole16(sizeof(struct ieee80211_ie_qtn_pairing_tlv));
+
+	*frm++ = IEEE80211_ELEMID_VENDOR;
+	*frm++ = 0;
+	frm += ieee80211_oui_add_qtn(frm);
+	memcpy(frm, &tlv_ie, sizeof(struct ieee80211_ie_qtn_pairing_tlv));
+	frm += sizeof(struct ieee80211_ie_qtn_pairing_tlv);
+	ie->qtn_pairing_ie_len = frm - &ie->qtn_pairing_ie_oui[0];
+
+	return frm;
+}
+/*
+ * Add Quantenna specific 802.11h information elements to a frame.
+ */
+u_int8_t *
+ieee80211_add_qtn_csatsf_ie(u_int8_t *frm, u_int64_t tsf)
+{
+	struct ieee80211_ie_qtn_csa_tsf *ie = (struct ieee80211_ie_qtn_csa_tsf *)frm;
+
+	*frm++ = IEEE80211_ELEMID_VENDOR;
+	*frm++ = 0;		/* Length is filled in below */
+	frm += ieee80211_oui_add_qtn(frm);
+	*frm++ = QTN_OUI_CFG;
+	ie->tsf = htonll(tsf);
+	frm += sizeof(tsf);
+	ie->len = frm - &ie->qtn_ie_oui[0];
+
+	return frm;
+}
+
+/*
+ * Add 802.11h information elements to a frame.
+ */
+static u_int8_t *
+ieee80211_add_doth(u_int8_t *frm, struct ieee80211com *ic)
+{
+	/* XXX ie structures */
+	/*
+	 * Power Capability IE
+	 */
+	if (ic->ic_flags_ext & IEEE80211_FEXT_TPC) {
+		*frm++ = IEEE80211_ELEMID_PWRCAP;
+		*frm++ = 2;
+		*frm++ = ic->ic_bsschan->ic_minpower;
+		*frm++ = ic->ic_bsschan->ic_maxpower;
+	}
+
+	return frm;
+}
+
+/*
+ * Add 802.11n HT MCS
+ */
+static void
+ieee80211_mcs_populate(struct ieee80211_node *ni, struct ieee80211_ie_htcap *ie, struct ieee80211_htcap *htcap, int subtype) {
+
+	/* Update the supported MCS on Assoc response based on intersection of AP and client capability */
+	if ((ni->ni_vap->iv_opmode == IEEE80211_M_HOSTAP) &&
+		(subtype == IEEE80211_FC0_SUBTYPE_ASSOC_RESP || subtype == IEEE80211_FC0_SUBTYPE_REASSOC_RESP)) {
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_NSS1,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_NSS1] & ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS1]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_NSS2,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_NSS2] & ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS2]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_NSS3,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_NSS3] & ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS3]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_NSS4,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_NSS4] & ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_NSS4]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_UEQM1,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_UEQM1] & ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM1]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_UEQM2,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_UEQM2] & ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM2]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_UEQM3,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_UEQM3] & ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM3]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_UEQM4,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_UEQM4] & ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM4]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_UEQM5,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_UEQM5] & ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM5]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_UEQM6,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_UEQM6] & ni->ni_htcap.mcsset[IEEE80211_HT_MCSSET_20_40_UEQM6]);
+	} else {
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_NSS1,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_NSS1]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_NSS2,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_NSS2]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_NSS3,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_NSS3]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_NSS4,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_NSS4]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_UEQM1,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_UEQM1]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_UEQM2,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_UEQM2]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_UEQM3,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_UEQM3]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_UEQM4,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_UEQM4]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_UEQM5,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_UEQM5]);
+		IEEE80211_HTCAP_SET_MCS_VALUE(ie, IEEE80211_HT_MCSSET_20_40_UEQM6,
+						  htcap->mcsset[IEEE80211_HT_MCSSET_20_40_UEQM6]);
+	}
+
+}
+
+/*
+ * Add 802.11n HT Capabilities IE
+ */
+u_int8_t *
+ieee80211_add_htcap(struct ieee80211_node *ni, u_int8_t *frm, struct ieee80211_htcap *htcap, int subtype)
+{
+	struct ieee80211_ie_htcap *ie = (struct ieee80211_ie_htcap *)(void*) frm;
+
+	memset(ie, 0, sizeof(struct ieee80211_ie_htcap));
+
+	ie->hc_id = IEEE80211_ELEMID_HTCAP;
+	ie->hc_len = sizeof(struct ieee80211_ie_htcap) - 2;
+
+	/* Update the LDPC capability based on the setting */
+	if (ni->ni_vap->iv_ht_flags & IEEE80211_HTF_LDPC_ENABLED) {
+		htcap->cap |= IEEE80211_HTCAP_C_LDPCCODING;
+	} else {
+		htcap->cap &= ~IEEE80211_HTCAP_C_LDPCCODING;
+	}
+
+	/* Update the STBC capability based on the setting */
+	if (ni->ni_vap->iv_ht_flags & IEEE80211_HTF_STBC_ENABLED) {
+		htcap->cap |= (IEEE80211_HTCAP_C_TXSTBC | IEEE80211_HTCAP_C_RXSTBC);
+	} else {
+		htcap->cap &= ~(IEEE80211_HTCAP_C_TXSTBC | IEEE80211_HTCAP_C_RXSTBC);
+	}
+
+	IEEE80211_HTCAP_SET_CAPABILITIES(ie,htcap->cap);
+
+	if (ni->ni_vap->iv_smps_force & 0x8000) {
+		IEEE80211_HTCAP_SET_PWRSAVE_MODE(ie, ni->ni_vap->iv_smps_force & 0xF);
+	} else {
+		IEEE80211_HTCAP_SET_PWRSAVE_MODE(ie,htcap->pwrsave);
+	}
+
+	IEEE80211_HTCAP_SET_AMPDU_LEN(ie,htcap->maxampdu);
+	IEEE80211_HTCAP_SET_AMPDU_SPACING(ie,htcap->mpduspacing);
+
+	ieee80211_mcs_populate(ni, ie, htcap, subtype);
+
+	IEEE80211_HTCAP_SET_HIGHEST_DATA_RATE(ie,htcap->maxdatarate);
+	IEEE80211_HTCAP_SET_MCS_PARAMS(ie,htcap->mcsparams);
+	IEEE80211_HTCAP_SET_MCS_STREAMS(ie,htcap->numtxspstr);
+
+	ie->hc_txbf[0] = htcap->hc_txbf[0];
+	ie->hc_txbf[1] = htcap->hc_txbf[1];
+	ie->hc_txbf[2] = htcap->hc_txbf[2];
+	ie->hc_txbf[3] = htcap->hc_txbf[3];
+
+	return frm + sizeof(struct ieee80211_ie_htcap);
+}
+
+
+/*
+ * Add 802.11n HT Information IE
+ */
+u_int8_t *
+ieee80211_add_htinfo(struct ieee80211_node *ni, u_int8_t *frm, struct ieee80211_htinfo *htinfo)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic  = vap->iv_ic;		/* back ptr to common state */
+	u_int8_t byteval = 0;
+	struct ieee80211_ie_htinfo *ie = (struct ieee80211_ie_htinfo *)(void*) frm;
+	memset(ie, 0, sizeof(struct ieee80211_ie_htinfo));
+	ie->hi_id = IEEE80211_ELEMID_HTINFO;
+	ie->hi_len = sizeof(struct ieee80211_ie_htinfo) - 2;
+	IEEE80211_HTINFO_SET_PRIMARY_CHANNEL(ie,htinfo->ctrlchannel);
+
+	/* set byte 1 */
+	byteval = 0;
+
+	/* set channel width */
+	byteval |= (htinfo->byte1 & IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40);
+
+	/*
+	 * Std 802.11ac-2013, 10.39.1 'Basic VHT BSS functionality': A VHT
+	 * AP shall set the RIFS Mode field in the HT Operation element to 0.
+	 */
+	if (!IS_IEEE80211_VHT_ENABLED(ic) ||
+			(vap->iv_opmode != IEEE80211_M_HOSTAP)) {
+		/* Rx RIFS is supported */
+		byteval |= IEEE80211_HTINFO_B1_RIFS_MODE;
+	}
+
+	/* set S-PSMP support */
+	/* Deprecated in current draft 11.0 */
+// 	byteval |= (htinfo->byte1 & IEEE80211_HTINFO_B1_CONTROLLED_ACCESS);
+
+	IEEE80211_HTINFO_SET_BYTE_ONE(ie,byteval);
+
+	/* set service level granularity and secondary channel offset */
+	/* Deprecated in current draft 11.0 */
+	//IEEE80211_HTINFO_B1_SET_SIGRANULARITY(ie,htinfo->sigranularity);
+	//
+	IEEE80211_HTINFO_B1_SET_EXT_CHOFFSET(ie,htinfo->choffset);
+
+	/* set byte 2 */
+	byteval = 0;
+
+	/* set op mode */
+	if (IEEE80211_11N_PROTECT_ENABLED(ic) &&
+			(vap->iv_opmode != IEEE80211_M_IBSS)) {
+		if (vap->iv_non_gf_sta_present)
+			byteval |= IEEE80211_HTINFO_B2_NON_GF_PRESENT;
+
+		if (ic->ic_non_ht_non_member || ic->ic_non_ht_sta)
+			byteval |= IEEE80211_HTINFO_B2_OBSS_PROT;
+	}
+
+	/* set OBSS */
+	IEEE80211_HTINFO_SET_BYTE_TWO(ie,byteval);
+
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		u_int8_t opmode = 0;
+		if (ic->ic_non_ht_sta != 0) {
+			opmode = IEEE80211_HTINFO_OPMODE_HT_PROT_MIXED;
+		} else {
+			if (ic->ic_non_ht_non_member != 0) {
+				opmode = IEEE80211_HTINFO_OPMODE_HT_PROT_NON_MEM;
+			} else {
+				if (ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) { /* 20/40 MHZ mode */
+					if (ic->ic_ht_20mhz_only_sta != 0) /* 20 MHZ only HT STA is present */
+						opmode = IEEE80211_HTINFO_OPMODE_HT_PROT_20_ONLY;
+					else
+						opmode = IEEE80211_HTINFO_OPMODE_NO_PROT;
+				} else {
+					opmode = IEEE80211_HTINFO_OPMODE_NO_PROT;
+				}
+			}
+		}
+
+		/*
+		 * If nonHT, 20MHz, 'nonHT in other BSS' stations counts are all 0, then we have
+		 * a QTN specific usage of HT protection field. If any of those counters are non-zero,
+		 * then ht protection field is set as per standard. Otherwise WFA test cases fail.
+		*/
+		if (!ic->ic_non_ht_sta && !ic->ic_ht_20mhz_only_sta && !ic->ic_non_ht_non_member) {
+			/* QTN specific settings */
+			if ((!IEEE80211_COM_WDS_IS_RBS(ic) || !ic->ic_extender_mbs_ocac) && !ic->ic_peer_rts) {
+				opmode = IEEE80211_HTINFO_OPMODE_NO_PROT;
+			} else {
+				opmode = IEEE80211_HTINFO_OPMODE_HT_PROT_NON_MEM;
+			}
+
+		}
+
+		if (!IEEE80211_11N_PROTECT_ENABLED(ic))
+			opmode = IEEE80211_HTINFO_OPMODE_NO_PROT;
+
+		htinfo->opmode = opmode;
+		IEEE80211_HTINFO_B2_SET_OP_MODE(ie, htinfo->opmode);
+	}
+
+	/* set byte 3 */
+	IEEE80211_HTINFO_SET_BYTE_THREE(ie,0);
+
+	/* set byte 4 */
+	byteval = 0;
+
+	if (vap->iv_opmode != IEEE80211_M_IBSS)
+	{
+		/* set dual beacon */
+		byteval |= (htinfo->byte4 & IEEE80211_HTINFO_B4_DUAL_BEACON);
+
+		/* set DUAL CTS requirement */
+		if (vap->iv_dual_cts_required)
+				byteval |= (IEEE80211_HTINFO_B4_DUAL_CTS);
+	}
+
+	IEEE80211_HTINFO_SET_BYTE_FOUR(ie,byteval);
+
+	/* set byte 5 */
+	byteval = 0;
+	if (vap->iv_opmode != IEEE80211_M_IBSS) {
+		/* set STBC beacon */
+		if (vap->iv_stbc_beacon)
+			byteval |= (IEEE80211_HTINFO_B5_STBC_BEACON);
+
+		/* set LSIG TXOP support */
+		if (vap->iv_lsig_txop_ok)
+			byteval |= (IEEE80211_HTINFO_B5_LSIGTXOPPROT);
+	}
+
+	IEEE80211_HTINFO_SET_BYTE_FIVE(ie,byteval);
+
+	IEEE80211_HTINFO_SET_BASIC_MCS_VALUE(ie,IEEE80211_HT_MCSSET_20_40_NSS1,htinfo->basicmcsset[0]);
+	IEEE80211_HTINFO_SET_BASIC_MCS_VALUE(ie,IEEE80211_HT_MCSSET_20_40_NSS2,htinfo->basicmcsset[1]);
+
+	return frm + sizeof(struct ieee80211_ie_htinfo);
+}
+
+
+/*
+ * Add 802.11ac VHT Capabilities IE
+ */
+u_int8_t *
+ieee80211_add_vhtcap(struct ieee80211_node *ni, u_int8_t *frm, struct ieee80211_vhtcap *vhtcap, uint8_t subtype)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_ie_vhtcap *ie = (struct ieee80211_ie_vhtcap *)(void *) frm;
+	u_int32_t vhtcap_flags = vhtcap->cap_flags;
+	uint32_t bfstscap;
+
+	memset(ie, 0, sizeof(struct ieee80211_ie_vhtcap));
+
+	ie->vht_id = IEEE80211_ELEMID_VHTCAP;
+	ie->vht_len = sizeof(struct ieee80211_ie_vhtcap) - 2;
+
+	if (vap->iv_vht_flags & IEEE80211_VHTCAP_C_RX_LDPC) {
+		vhtcap_flags |= IEEE80211_VHTCAP_C_RX_LDPC;
+	} else {
+		vhtcap_flags &= ~IEEE80211_VHTCAP_C_RX_LDPC;
+	}
+	if (vap->iv_vht_flags & IEEE80211_VHTCAP_C_TX_STBC) {
+		vhtcap_flags |= IEEE80211_VHTCAP_C_TX_STBC;
+	} else {
+		vhtcap_flags &= ~IEEE80211_VHTCAP_C_TX_STBC;
+	}
+
+	if (ic->ic_vhtcap.cap_flags & IEEE80211_VHTCAP_C_MU_BEAM_FORMER_CAP) {
+		vhtcap_flags |= IEEE80211_VHTCAP_C_MU_BEAM_FORMER_CAP;
+	} else {
+		vhtcap_flags &= ~IEEE80211_VHTCAP_C_MU_BEAM_FORMER_CAP;
+	}
+
+	if (ic->ic_vhtcap.cap_flags & IEEE80211_VHTCAP_C_MU_BEAM_FORMEE_CAP) {
+		vhtcap_flags |= IEEE80211_VHTCAP_C_MU_BEAM_FORMEE_CAP;
+	} else {
+		vhtcap_flags &= ~IEEE80211_VHTCAP_C_MU_BEAM_FORMEE_CAP;
+	}
+
+	IEEE80211_VHTCAP_SET_CAPFLAGS(ie, vhtcap_flags);
+
+	IEEE80211_VHTCAP_SET_MAXMPDU(ie, vhtcap->maxmpdu);
+	IEEE80211_VHTCAP_SET_CHANWIDTH(ie, vhtcap->chanwidth);
+	if (ni->ni_vap->iv_vht_flags & IEEE80211_VHTCAP_C_TX_STBC) {
+		IEEE80211_VHTCAP_SET_RXSTBC(ie, vhtcap->rxstbc);
+	}
+
+	/* FIXME: IOT Workaround for BRCM to set beamformee STS to 3 in beacons
+	 * BRCM sound with us assuming 2 antenna  when STS is set to our max value which is 4
+	 * However when beacons advertise it as 3, the sounding is done in 3 antenna.
+	 * Intel, QCA, MRVL takes the STS value from probe resp / assoc resp.
+	 * So having STS 4 in those frames can make use of 4 antenna in V matrix.
+	 */
+	bfstscap = ((subtype == IEEE80211_FC0_SUBTYPE_BEACON) && (vap->enable_iot_sts_war))
+				? MIN(vhtcap->bfstscap, IEEE80211_VHTCAP_RX_STS_3)
+				: vhtcap->bfstscap;
+
+	IEEE80211_VHTCAP_SET_BFSTSCAP(ie, bfstscap);
+	IEEE80211_VHTCAP_SET_NUMSOUND(ie, vhtcap->numsounding);
+	IEEE80211_VHTCAP_SET_MAXAMPDUEXP(ie, vhtcap->maxampduexp);
+	IEEE80211_VHTCAP_SET_LNKADPTCAP(ie, vhtcap->lnkadptcap);
+
+	IEEE80211_VHTCAP_SET_RX_MCS_NSS(ie, vhtcap->rxmcsmap);
+
+	IEEE80211_VHTCAP_SET_TX_MCS_NSS(ie, vhtcap->txmcsmap);
+
+	IEEE80211_VHTCAP_SET_RX_LGIMAXRATE(ie, vhtcap->rxlgimaxrate);
+	IEEE80211_VHTCAP_SET_TX_LGIMAXRATE(ie, vhtcap->txlgimaxrate);
+
+	return frm + sizeof(struct ieee80211_ie_vhtcap);
+}
+
+/*
+ * 802.11ac VHT Operation IE
+ */
+uint8_t *ieee80211_add_vhtop(struct ieee80211_node *ni, uint8_t *frm, struct ieee80211_vhtop *vhtop)
+{
+	struct ieee80211_ie_vhtop *ie = (struct ieee80211_ie_vhtop *)frm;
+
+	memset (ie, 0, sizeof(struct ieee80211_ie_vhtop));
+
+	ie->vhtop_id = IEEE80211_ELEMID_VHTOP;
+	ie->vhtop_len = sizeof(struct ieee80211_ie_vhtop) - 2;
+
+	IEEE80211_VHTOP_SET_CHANWIDTH(ie, vhtop->chanwidth);
+	IEEE80211_VHTOP_SET_CENTERFREQ0(ie, vhtop->centerfreq0);
+	IEEE80211_VHTOP_SET_CENTERFREQ1(ie, vhtop->centerfreq1);
+
+	IEEE80211_VHTOP_SET_BASIC_MCS_NSS(ie, vhtop->basicvhtmcsnssset);
+
+	return frm + sizeof(struct ieee80211_ie_vhtop);
+}
+
+u_int8_t ieee80211_get_peer_nss(struct ieee80211_node *ni)
+{
+	u_int8_t nss = 0;
+
+	if (IEEE80211_VHT_HAS_4SS(ni->ni_vhtcap.txmcsmap)) {
+		nss = 3;
+	} else if (IEEE80211_VHT_HAS_3SS(ni->ni_vhtcap.txmcsmap)) {
+		nss = 2;
+	} else if (IEEE80211_VHT_HAS_2SS(ni->ni_vhtcap.txmcsmap)) {
+		nss = 1;
+	}
+
+	return nss;
+}
+
+/*
+ * Add 802.11ac VHT Operating Mode Notification IE
+ */
+uint8_t *ieee80211_add_vhtop_notif(struct ieee80211_node *ni, uint8_t *frm, struct ieee80211com *ic, int band_24g)
+{
+	struct ieee80211_ie_vhtop_notif *ie = (struct ieee80211_ie_vhtop_notif *)frm;
+	uint8_t chwidth;
+	uint8_t vht_nss_cap = (band_24g ? ic->ic_vht_nss_cap_24g : ic->ic_vht_nss_cap);
+	uint8_t rxnss = min(vht_nss_cap - 1, QTN_GLOBAL_RATE_NSS_MAX - 1);
+	uint8_t rxnss_type = 0;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	switch (ic->ic_max_system_bw) {
+	case BW_HT40:
+		chwidth = IEEE80211_CWM_WIDTH40;
+		break;
+	case BW_HT20:
+		chwidth = IEEE80211_CWM_WIDTH20;
+		break;
+	case BW_HT80:
+	default:
+		chwidth = IEEE80211_CWM_WIDTH80;
+		break;
+	}
+
+	if (band_24g && chwidth == IEEE80211_CWM_WIDTH80)
+		chwidth = IEEE80211_CWM_WIDTH40;
+
+	memset(ie, 0, sizeof(struct ieee80211_ie_vhtop_notif));
+
+	ie->id = IEEE80211_ELEMID_OPMOD_NOTIF;
+	ie->len = sizeof(*ie) - 2;
+
+	if (vap->iv_opmode == IEEE80211_M_STA) {
+		rxnss = min((uint8_t)(vht_nss_cap - 1), (uint8_t)ieee80211_get_peer_nss(ni));
+	}
+
+	if (ic->ic_vht_opmode_notif == IEEE80211_VHT_OPMODE_NOTIF_DEFAULT) {
+		/* by default, use current configured channel width and nss */
+		ie->vhtop_notif_mode = SM(chwidth, IEEE80211_VHT_OPMODE_CHWIDTH) |
+					SM(rxnss, IEEE80211_VHT_OPMODE_RXNSS) |
+					SM(rxnss_type, IEEE80211_VHT_OPMODE_RXNSS_TYPE);
+	} else {
+		ie->vhtop_notif_mode = (uint8_t) (ic->ic_vht_opmode_notif & 0x00ff);
+	}
+
+	return frm + sizeof(*ie);
+}
+
+/*
+ * Add 20/40 coexistence IE
+ */
+u_int8_t *
+ieee80211_add_20_40_bss_coex_ie(u_int8_t *frm, u_int8_t coex)
+{
+	*frm++ = IEEE80211_ELEMID_20_40_BSS_COEX;
+	*frm++ = 1;
+	*frm++ = coex;
+
+	return frm;
+}
+
+void
+ieee80211_get_20_40_bss_into_chan_list(struct ieee80211com *ic,
+		struct ieee80211vap *vap, u_int16_t *pp_ch_list)
+{
+	struct sta_table *st = ic->ic_scan->ss_priv;
+	struct sta_entry *se, *next;
+	uint16_t ch_list = 0;
+	uint8_t se_pri_chan = 0;
+	uint8_t se_sec_chan = 0;
+	uint8_t unallowed = 0;
+
+	TAILQ_FOREACH_SAFE(se, &st->st_entry, se_list, next) {
+		if (!IEEE80211_ADDR_EQ(se->base.se_macaddr, vap->iv_bss->ni_macaddr) &&
+				(se->base.se_chan->ic_ieee <= QTN_2G_LAST_OPERATING_CHAN)) {
+			ieee80211_find_ht_pri_sec_chan(vap, &se->base,
+						&se_pri_chan, &se_sec_chan);
+			unallowed = !ieee80211_20_40_operation_permitted(ic,
+				ic->ic_curchan, se_pri_chan, se_sec_chan);
+		} else {
+			unallowed = 0;
+		}
+
+		if (unallowed)
+			ch_list |= 1 << se->base.se_chan->ic_ieee;
+	}
+	*pp_ch_list = ch_list;
+}
+
+static uint8_t
+ieee80211_count_channels(uint16_t ch_list)
+{
+	uint8_t chan_count = 0;
+
+	for ( ; ch_list; ch_list &= (ch_list - 1))
+		chan_count++;
+
+	return chan_count;
+}
+
+/*
+ * Add 20/40 BSS channel report
+ */
+u_int8_t *
+ieee80211_add_20_40_bss_into_ch_rep(u_int8_t *frm, struct ieee80211com *ic, u_int16_t ch_list)
+{
+#define IEEE80211_24GHZ_BAND 25
+#define IEEE80211_GLOBAL_24GHZ_OPER_CLASS 81
+#define bitsz_var(var) (sizeof(var) * 8)
+	int i;
+	uint8_t cur_reg_class = 0;
+
+	for (i = 1; i < bitsz_var(ch_list); i++) {
+		if (ch_list & (1 << i)) {
+			cur_reg_class = ieee80211_get_current_operating_class(ic->ic_country_code,
+						ic->ic_bsschan->ic_ieee,
+						IEEE80211_24GHZ_BAND);
+			if (!cur_reg_class)
+				cur_reg_class = IEEE80211_GLOBAL_24GHZ_OPER_CLASS;
+			break;
+		}
+	}
+
+	*frm++ = IEEE80211_ELEMID_20_40_IT_CH_REP;
+	*frm++ = ieee80211_count_channels(ch_list) + 1;
+	*frm++ = cur_reg_class;
+	for (i = 1; i < bitsz_var(ch_list); i++) {
+		if (ch_list & (1 << i)) {
+			*frm++ = i;
+		}
+	}
+
+	return frm;
+}
+
+u_int8_t *
+ieee80211_add_obss_scan_ie(u_int8_t *frm, struct ieee80211_obss_scan_ie *obss_ie)
+{
+	*frm++ = IEEE80211_ELEMID_OBSS_SCAN;
+	*frm++ = sizeof(struct ieee80211_obss_scan_ie) - 2;
+	memcpy(frm, &obss_ie->obss_passive_dwell, sizeof(struct ieee80211_obss_scan_ie) - 2);
+
+	return (frm + sizeof(struct ieee80211_obss_scan_ie) - 2);
+}
+
+/*
+* Add Extender Role IE
+*/
+u_int8_t *
+ieee80211_add_qtn_extender_role_ie(uint8_t *frm, uint8_t role)
+{
+	*frm++ = IEEE80211_ELEMID_VENDOR;
+	*frm++ = sizeof(struct ieee80211_qtn_ext_role) - 2;
+	frm += ieee80211_oui_add_qtn(frm);
+	*frm++ = QTN_OUI_EXTENDER_ROLE;
+	*frm++ = role;
+	return frm;
+}
+
+u_int8_t *
+ieee80211_add_qtn_extender_bssid_ie(struct ieee80211vap *vap, uint8_t *frm)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	int i;
+
+	*frm++ = IEEE80211_ELEMID_VENDOR;
+	*frm++ = sizeof(struct ieee80211_qtn_ext_bssid) - 2;
+	frm += ieee80211_oui_add_qtn(frm);
+	*frm++ = QTN_OUI_EXTENDER_BSSID;
+
+	memcpy(frm, ic->ic_extender_mbs_bssid, IEEE80211_ADDR_LEN);
+	frm = frm + IEEE80211_ADDR_LEN;
+	*frm++ = ic->ic_extender_rbs_num;
+	for (i = 0; i < QTN_MAX_RBS_NUM; i++) {
+		memcpy(frm, ic->ic_extender_rbs_bssid[i], IEEE80211_ADDR_LEN);
+		frm = frm + IEEE80211_ADDR_LEN;
+	}
+
+	return frm;
+}
+
+u_int8_t *
+ieee80211_add_qtn_extender_state_ie(uint8_t *frm, uint8_t ocac)
+{
+	*frm++ = IEEE80211_ELEMID_VENDOR;
+	*frm++ = sizeof(struct ieee80211_qtn_ext_state) - 2;
+	frm += ieee80211_oui_add_qtn(frm);
+	*frm++ = QTN_OUI_EXTENDER_STATE;
+	*frm++ = (ocac ? QTN_EXT_MBS_OCAC : 0);
+	*frm++ = 0;
+	*frm++ = 0;
+	*frm++ = 0;
+
+	return frm;
+}
+
+u_int8_t *
+ieee80211_add_qtn_ocac_state_ie(uint8_t *frm)
+{
+	*frm++ = IEEE80211_ELEMID_VENDOR;
+	*frm++ = sizeof(struct ieee80211_ie_qtn_ocac_state) - 2;
+	frm += ieee80211_oui_add_qtn(frm);
+	*frm++ = QTN_OUI_OCAC_STATE;
+	*frm++ = OCAC_STATE_NONE;
+	*frm++ = 0;
+
+	return frm;
+}
+
+/**
+ * Find the next IE in the buffer, return pointer to the next IE,
+ * and the length of the current IE.
+ */
+static void *
+ieee80211_smash_ie(const void *p_buf, int buf_len, int *p_this_ie_len)
+{
+	struct ieee80211_ie *p_ie = (struct ieee80211_ie *)p_buf;
+	struct ieee80211_ie *p_next_ie;
+	if (p_buf == NULL || buf_len < IEEE80211_IE_ID_LEN_SIZE) {
+		return NULL;
+	}
+	if ((p_ie->len + IEEE80211_IE_ID_LEN_SIZE) > buf_len) {
+		return NULL;
+	}
+	*p_this_ie_len = p_ie->len;
+	p_next_ie = (struct ieee80211_ie *)(p_buf + p_ie->len + IEEE80211_IE_ID_LEN_SIZE);
+	if ((u32)p_next_ie < (u32)(p_buf + buf_len)) {
+		return p_next_ie;
+	}
+	return NULL;
+}
+
+/*
+ * Create a probe request frame with the specified ssid
+ * and any optional information element data.
+ */
+struct sk_buff *
+ieee80211_get_probereq(struct ieee80211_node *ni,
+	const u_int8_t sa[IEEE80211_ADDR_LEN],
+	const u_int8_t da[IEEE80211_ADDR_LEN],
+	const u_int8_t bssid[IEEE80211_ADDR_LEN],
+	const u_int8_t *ssid, size_t ssidlen,
+	const void *optie, size_t optielen)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	enum ieee80211_phymode mode;
+	struct ieee80211_frame *wh;
+	struct sk_buff *skb;
+	u_int8_t *frm;
+
+	mode = ic->ic_curmode;
+	/*
+	 * prreq frame format
+	 *	[tlv] ssid
+	 *	[tlv] supported rates
+	 *	[tlv] extended supported rates
+	 *	[tlv] HT capabilities
+	 *	[tlv] user-specified ie's
+	 */
+	skb = ieee80211_getmgtframe(&frm, 2 + IEEE80211_NWID_LEN +
+	       2 + IEEE80211_RATE_SIZE +
+	       2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) +
+	       ((ic->ic_curmode >= IEEE80211_MODE_11NA) ?
+			(sizeof(struct ieee80211_ie_htcap) +
+			 sizeof(struct ieee80211_extcap_param)) : 0) +
+	       (optie != NULL ? optielen : 0) +
+	       vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_REQ].length +
+	       (IS_IEEE80211_DUALBAND_VHT_ENABLED(ic) ? sizeof(struct ieee80211_ie_vhtcap): 0)
+	       );
+
+	if (skb == NULL) {
+		vap->iv_stats.is_tx_nobuf++;
+		return NULL;
+	}
+
+	frm = ieee80211_add_ssid(frm, ssid, ssidlen);
+	frm = ieee80211_add_rates(frm, &ic->ic_sup_rates[mode]);
+
+	if (ic->ic_curmode >= IEEE80211_MODE_11NA) {
+		frm = ieee80211_add_htcap(ni, frm, &ic->ic_htcap, IEEE80211_FC0_SUBTYPE_PROBE_REQ);
+		/* Ext. Capabilities - For AP mode hostapd adds the extended cap */
+		if (vap->iv_opmode == IEEE80211_M_STA)
+			frm = ieee80211_add_extcap(frm);
+	}
+
+	frm = ieee80211_add_xrates(frm, &ic->ic_sup_rates[mode]);
+
+	if (IS_IEEE80211_VHT_ENABLED(ic)) {
+		frm = ieee80211_add_vhtcap(ni, frm, &ic->ic_vhtcap, IEEE80211_FC0_SUBTYPE_PROBE_REQ);
+	} else if (IS_IEEE80211_11NG_VHT_ENABLED(ic)) {
+		/* QTN 2.4G VHT IE */
+		frm = ieee80211_add_vhtcap(ni, frm, &ic->ic_vhtcap_24g, IEEE80211_FC0_SUBTYPE_PROBE_REQ);
+	}
+
+	/* Only add in vendor IEs to the probe request frame. */
+	if (optie != NULL) {
+		struct ieee80211_ie *p_ie = (struct ieee80211_ie *)optie;
+		struct ieee80211_ie *p_next_ie;
+		int this_len = 0;
+		do {
+			p_next_ie = ieee80211_smash_ie(p_ie, optielen, &this_len);
+			if (p_ie && this_len) {
+				/**
+				 * Rules for probe request is that vendor elements can be appended,
+				 * and only a few of the standard IEs. optie as passed in can
+				 * contain any one of a number of elements.
+				 */
+				if (p_ie->id == IEEE80211_ELEMID_VENDOR) {
+					if (((struct ieee80211_ie_wme *)p_ie)->wme_type == WPA_RSN_OUI_TYPE) {
+						continue;
+					}
+					memcpy(frm, p_ie, this_len + IEEE80211_IE_ID_LEN_SIZE);
+					frm += this_len + IEEE80211_IE_ID_LEN_SIZE;
+				}
+				p_ie = p_next_ie;
+				optielen -= this_len + IEEE80211_IE_ID_LEN_SIZE;
+			}
+		} while (p_next_ie != NULL);
+	}
+
+	if (vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_REQ].ie) {
+		memcpy(frm, vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_REQ].ie,
+			vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_REQ].length);
+		frm += vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_REQ].length;
+	}
+
+	skb_trim(skb, frm - skb->data);
+
+	wh = (struct ieee80211_frame *)
+		skb_push(skb, sizeof(struct ieee80211_frame));
+	ieee80211_send_setup(vap, ni, wh,
+		IEEE80211_FC0_TYPE_MGT,
+		IEEE80211_FC0_SUBTYPE_PROBE_REQ,
+		sa, da, bssid);
+
+	/* FIXME power management? */
+
+	IEEE80211_NODE_STAT(ni, tx_probereq);
+	IEEE80211_NODE_STAT(ni, tx_mgmt);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS,
+		"[%s] send probe req on channel %u\n",
+		ether_sprintf(wh->i_addr1),
+		ieee80211_chan2ieee(ic, ic->ic_curchan));
+
+	return skb;
+}
+EXPORT_SYMBOL(ieee80211_get_probereq);
+
+/*
+ * Send a probe request frame with the specified ssid
+ * and any optional information element data.
+ */
+int
+ieee80211_send_probereq(struct ieee80211_node *ni,
+	const u_int8_t sa[IEEE80211_ADDR_LEN],
+	const u_int8_t da[IEEE80211_ADDR_LEN],
+	const u_int8_t bssid[IEEE80211_ADDR_LEN],
+	const u_int8_t *ssid, size_t ssidlen,
+	const void *optie, size_t optielen)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct sk_buff *skb;
+
+	ieee80211_ref_node(ni);
+
+	skb = ieee80211_get_probereq(ni, sa, da, bssid,
+			ssid, ssidlen, optie, optielen);
+	if (skb == NULL) {
+		ieee80211_free_node(ni);
+		return -ENOMEM;
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS,
+			"[%s] send probe req frame on channel %u\n",
+			ether_sprintf(ni->ni_macaddr),
+			ieee80211_chan2ieee(ic, ic->ic_curchan));
+	ic->ic_send_80211(ic, ni, skb, WME_AC_BE, 0);
+
+	return 0;
+}
+
+/* Send a broadcast CSA frame, announcing the new channel. References are from
+ * IEEE 802.11h-2003. CSA frame format is an "Action" frame (Type: 00, Subtype:
+ * 1101, see 7.1.3.1.2)
+ *
+ * [1] Category : 0, Spectrum Management, 7.3.1.11
+ * [1] Action : 4, Channel Switch Announcement, 7.4.1 and 7.4.1.5
+ * [1] Element ID : 37, Channel Switch Announcement, 7.3.2
+ * [1] Length : 3, 7.3.2.20
+ * [1] Channel Switch Mode : 1, stop transmission immediately
+ * [1] New Channel Number
+ * [1] Channel Switch Count in TBTT : 0, immediate channel switch
+ *
+ * csa_mode : IEEE80211_CSA_CAN_STOP_TX / IEEE80211_CSA_MUST_STOP_TX
+ * csa_chan : new IEEE channel number
+ * csa_tbtt : TBTT until Channel Switch happens
+*/
+void
+ieee80211_send_csa_frame(struct ieee80211vap *vap,
+				u_int8_t csa_mode,
+				u_int8_t csa_chan,
+				u_int8_t csa_count,
+				u_int64_t tsf)
+{
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct ieee80211com *ic = ni->ni_ic;
+	uint32_t bw = ieee80211_get_bw(ic);
+	uint8_t wband_chanswitch_ie_len = ieee80211_wband_chanswitch_ie_len(bw);
+	struct sk_buff *skb;
+	int frm_len;
+	u_int8_t *frm;
+
+	frm_len = IEEE80211_CSA_LEN + ieee80211_sec_chan_off_ie_len() +
+			wband_chanswitch_ie_len;
+
+	if (tsf != 0) {
+		frm_len += sizeof(struct ieee80211_ie_qtn_csa_tsf);
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+			"%s: Sending action frame with CSA IE: %u/%u/%u\n",
+			__func__, csa_mode, csa_chan, csa_count);
+
+	skb = ieee80211_getmgtframe(&frm, frm_len);
+	if (skb == NULL) {
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
+			"%s: cannot get buf; size %u", __func__, frm_len);
+		vap->iv_stats.is_tx_nobuf++;
+		return;
+	}
+
+	*frm++ = IEEE80211_ACTION_CAT_SPEC_MGMT;  /* Category */
+	*frm++ = IEEE80211_ACTION_S_CHANSWITCHANN;      /* Spectrum Management */
+	frm = ieee80211_add_csa(frm, csa_mode, csa_chan, csa_count);
+	ieee80211_add_sec_chan_off(&frm, ic, csa_chan);
+
+	if (wband_chanswitch_ie_len) {
+		frm = ieee80211_add_wband_chanswitch(frm, ic);
+	}
+
+	if (tsf != 0) {
+		ieee80211_add_qtn_csatsf_ie(frm, tsf);
+	}
+
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		ieee80211_ref_node(ni);
+		ieee80211_mgmt_output(ni, skb, IEEE80211_FC0_SUBTYPE_ACTION,
+					vap->iv_dev->broadcast);
+	} else {
+		/* STA mode - tell AP to change channel */
+		ieee80211_ref_node(ni);
+		ieee80211_mgmt_output(ni, skb, IEEE80211_FC0_SUBTYPE_ACTION,
+					ni->ni_bssid);
+	}
+}
+EXPORT_SYMBOL(ieee80211_send_csa_frame);
+
+#ifdef CONFIG_QVSP
+static int
+ieee80211_compile_action_qvsp_frame(struct ieee80211vap *vap, struct ieee80211_qvsp_act *qvsp_a,
+				struct sk_buff **pp_skb)
+{
+	struct sk_buff *skb = NULL;
+
+	switch (qvsp_a->type) {
+	case QVSP_ACTION_STRM_CTRL: {
+		struct ieee80211_qvsp_act_strm_ctrl *qvsp_asc =
+			(struct ieee80211_qvsp_act_strm_ctrl *)qvsp_a;
+		int total_len;
+		struct ieee80211_qvsp_act_strm_ctrl_s *qa;
+		struct ieee80211_qvsp_strm_id *qai;
+		struct ieee80211_qvsp_strm_id *qvsp_asc_i = &qvsp_asc->strm_items[0];
+		int i;
+		u_int8_t *frm;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION,
+				"VSP: constructing stream ctrl frame\n", 0);
+
+		if (qvsp_asc->count > IEEE8021_QVSP_MAX_ACT_ITEMS) {
+			printk(KERN_INFO "VSP: truncating strm ctrl frame - too long\n");
+			qvsp_asc->count = IEEE8021_QVSP_MAX_ACT_ITEMS;
+		}
+
+		total_len = sizeof(*qa) + (qvsp_asc->count * sizeof(*qai));
+		KASSERT(total_len <= IEEE80211_MTU_MAX, ("VSP: strm ctrl frame is too large"));
+		skb = ieee80211_getmgtframe(&frm, total_len);
+		if (!skb) {
+			return -ENOMEM;
+		}
+		/* Common header */
+		qa = (struct ieee80211_qvsp_act_strm_ctrl_s *)frm;
+		qai = &qa->strm_items[0];
+		qa->header.category = IEEE80211_ACTION_CAT_VENDOR;
+		ieee80211_oui_add_qtn(qa->header.oui);
+		qa->header.type = QVSP_ACTION_TYPE_VSP;
+		qa->header.action = qvsp_a->type;
+		qa->strm_state = qvsp_asc->strm_state;
+		qa->dis_attr.throt_policy = qvsp_asc->dis_attr.throt_policy;
+		qa->dis_attr.throt_rate = qvsp_asc->dis_attr.throt_rate;
+		qa->dis_attr.demote_rule = qvsp_asc->dis_attr.demote_rule;
+		qa->dis_attr.demote_state = qvsp_asc->dis_attr.demote_state;
+		qa->count = qvsp_asc->count;
+		/* Set state for one or more streams */
+		for (i = 0; i < qa->count; i++) {
+			*qai++ = *qvsp_asc_i++;
+		}
+		break;
+	}
+	case QVSP_ACTION_VSP_CTRL: {
+		struct ieee80211_qvsp_act_cfg *qvsp_ac = (struct ieee80211_qvsp_act_cfg *)qvsp_a;
+		int total_len;
+		struct ieee80211_qvsp_act_vsp_ctrl_s *qa;
+		struct ieee80211_qvsp_act_vsp_ctrl_item_s *qai;
+		struct ieee80211_qvsp_act_cfg_item *qvsp_ac_i = &qvsp_ac->cfg_items[0];
+		int i;
+		u_int8_t *frm;
+
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION,
+				"VSP: constructing cfg frame\n", 0);
+
+		if (qvsp_ac->count > IEEE8021_QVSP_MAX_ACT_ITEMS) {
+			printk(KERN_INFO "VSP: truncating cfg frame - too long\n");
+			qvsp_ac->count = IEEE8021_QVSP_MAX_ACT_ITEMS;
+		}
+		total_len = sizeof(*qa) + (qvsp_ac->count * sizeof(*qai));
+
+		KASSERT(total_len <= IEEE80211_MTU_MAX, ("VSP: cfg frame is too large"));
+		skb = ieee80211_getmgtframe(&frm, total_len);
+		if (!skb) {
+			return -ENOMEM;
+		}
+		/* Common header */
+		qa = (struct ieee80211_qvsp_act_vsp_ctrl_s *)frm;
+		qai = &qa->ctrl_items[0];
+		qa->header.category = IEEE80211_ACTION_CAT_VENDOR;
+		ieee80211_oui_add_qtn(qa->header.oui);
+		qa->header.type = QVSP_ACTION_TYPE_VSP;
+		qa->header.action = qvsp_a->type;
+		qa->count = qvsp_ac->count;
+		/* Zero or more config index/value pairs. */
+		for (i = 0; i < qa->count; i++) {
+			qai->index = htonl(qvsp_ac_i->index);
+			qai->value = htonl(qvsp_ac_i->value);
+			qai++;
+			qvsp_ac_i++;
+		}
+		break;
+	}
+	default:
+		break;
+	}
+
+	if (skb) {
+		*pp_skb = skb;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+#endif
+
+static int
+ieee80211_compile_action_20_40_coex_frame(struct ieee80211vap *vap, struct ieee80211_action_data *action_data,
+				struct sk_buff **pp_skb, struct ieee80211_node *ni)
+{
+	struct sk_buff *skb;
+	int32_t frame_len = 0;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint8_t *frm;
+	uint8_t *coex_value = (uint8_t *)action_data->params;
+	uint8_t coex = vap->iv_coex;
+	uint16_t ch_list = 0;
+
+	if (coex_value)
+		coex = *coex_value;
+
+	frame_len = sizeof(struct ieee80211_action) + sizeof(struct ieee80211_20_40_coex_param);
+
+	if (ic->ic_opmode == IEEE80211_M_STA) {
+		uint8_t chan_count;
+		ieee80211_get_20_40_bss_into_chan_list(ic, vap, &ch_list);
+		chan_count = ieee80211_count_channels(ch_list);
+		if (chan_count) {
+			frame_len +=  sizeof(struct ieee80211_20_40_in_ch_rep) + chan_count;
+			coex |=  WLAN_20_40_BSS_COEX_20MHZ_WIDTH_REQ;
+		}
+	}
+
+	skb = ieee80211_getmgtframe(&frm, frame_len);
+	if (skb == NULL)
+		return -1;
+
+	*frm++ = action_data->cat;
+	*frm++ = action_data->action;
+
+	frm = ieee80211_add_20_40_bss_coex_ie(frm, coex);
+
+	if (ic->ic_opmode == IEEE80211_M_STA && ch_list) {
+		frm = ieee80211_add_20_40_bss_into_ch_rep(frm, ic, ch_list);
+	}
+
+	ni->ni_coex = 0;
+	skb_trim(skb, frm - skb->data);
+
+	if (skb) {
+		*pp_skb = skb;
+	}
+
+	return 0;
+
+}
+
+static int
+ieee80211_compile_action_sa_query_frame(struct ieee80211vap *vap, struct ieee80211_action_data *action_data,
+				struct sk_buff **pp_skb){
+
+	struct sk_buff *skb;
+	int32_t frame_len = 0;
+	u_int8_t *frm;
+	uint16_t *tid = (uint16_t *)action_data->params;
+	frame_len = sizeof(struct ieee80211_action_sa_query);
+
+	skb = ieee80211_getmgtframe(&frm, frame_len);
+	if (skb == NULL)
+		return -1;
+
+	*frm++ = IEEE80211_ACTION_CAT_SA_QUERY;
+	*frm++ = action_data->action;
+	ADDINT16(frm, *tid);
+
+	skb_trim(skb, frm - skb->data);
+	*pp_skb = skb;
+
+	return 0;
+}
+int32_t ieee80211_measure_request_ie_len(struct ieee80211_meas_request_ctrl *mrequest_ctrl)
+{
+	int32_t meas_ie_len;
+
+	switch (mrequest_ctrl->meas_type) {
+	case IEEE80211_CCA_MEASTYPE_BASIC:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ sizeof(struct ieee80211_ie_measreq);
+		break;
+	case IEEE80211_CCA_MEASTYPE_CCA:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ sizeof(struct ieee80211_ie_measreq);
+		break;
+	case IEEE80211_CCA_MEASTYPE_RPI:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ sizeof(struct ieee80211_ie_measreq);
+		break;
+	case IEEE80211_RM_MEASTYPE_STA:
+	{
+		int32_t cnt;
+		ieee80211_11k_sub_element *p_se;
+		ieee80211_11k_sub_element_head *se_head;
+
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ sizeof(struct ieee80211_ie_measreq_sta_stat);
+
+		if (mrequest_ctrl->u.sta_stats.sub_item == NULL) {
+			break;
+		}
+
+		se_head = (ieee80211_11k_sub_element_head *)mrequest_ctrl->u.sta_stats.sub_item;
+		SLIST_FOREACH(p_se, se_head, next) {
+			switch (p_se->sub_id) {
+			case IEEE80211_ELEMID_VENDOR:
+			{
+				struct stastats_subele_vendor *vendor;
+				u_int32_t flags;
+
+				meas_ie_len += sizeof(struct ieee80211_ie_qtn_rm_measure_sta);
+				vendor = (struct stastats_subele_vendor *)p_se->data;
+				flags = vendor->flags;
+
+				if (!IEEE80211_IS_ALL_SET(flags, RM_QTN_MAX)) {
+					meas_ie_len += 1;
+
+					for (cnt = RM_QTN_TX_STATS; cnt <= RM_QTN_MAX; cnt++) {
+						if (flags & (BIT(cnt)))
+							meas_ie_len += 2;
+					}
+
+					for (cnt = RM_QTN_CTRL_START; cnt <= RM_QTN_CTRL_END; cnt++) {
+						if (flags & (BIT(cnt)))
+							meas_ie_len += 2;
+					}
+				}
+
+				break;
+			}
+			default:
+				break;
+			}
+		}
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_QTN_CCA:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ sizeof(struct ieee80211_ie_measreq);
+		break;
+	case IEEE80211_RM_MEASTYPE_CH_LOAD:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ sizeof(struct ieee80211_ie_measreq_chan_load);
+		break;
+	case IEEE80211_RM_MEASTYPE_NOISE:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ sizeof(struct ieee80211_ie_measreq_noise_his);
+		break;
+	case IEEE80211_RM_MEASTYPE_BEACON:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ sizeof(struct ieee80211_ie_measreq_beacon);
+		break;
+	case IEEE80211_RM_MEASTYPE_FRAME:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ sizeof(struct ieee80211_ie_measreq_frame);
+		break;
+	case IEEE80211_RM_MEASTYPE_CATEGORY:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ sizeof(struct ieee80211_ie_measreq_trans_stream_cat);
+		break;
+	case IEEE80211_RM_MEASTYPE_MUL_DIAG:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ sizeof(struct ieee80211_ie_measreq_multicast_diag);
+		break;
+	default:
+		meas_ie_len = -1;
+		break;
+	}
+
+	return meas_ie_len;
+}
+
+int32_t ieee80211_measure_report_ie_len(struct ieee80211_meas_report_ctrl *mreport_ctrl)
+{
+	int32_t meas_ie_len;
+
+	/* measurement report filed would not exist if any bit of measurement report is set */
+	switch (mreport_ctrl->meas_type) {
+	case IEEE80211_CCA_MEASTYPE_BASIC:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ ((mreport_ctrl->report_mode == 0) ? sizeof(struct ieee80211_ie_measrep_basic) : 0);
+		break;
+	case IEEE80211_CCA_MEASTYPE_CCA:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ ((mreport_ctrl->report_mode == 0) ? sizeof(struct ieee80211_ie_measrep_cca) : 0);
+		break;
+	case IEEE80211_CCA_MEASTYPE_RPI:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ ((mreport_ctrl->report_mode == 0) ? sizeof(struct ieee80211_ie_measrep_rpi) : 0);
+		break;
+	case IEEE80211_RM_MEASTYPE_STA:
+	{
+		int32_t cnt;
+
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm);
+		if (mreport_ctrl->report_mode == 0) {
+			ieee80211_11k_sub_element *p_se;
+			ieee80211_11k_sub_element_head *se_head;
+
+			meas_ie_len += sizeof(struct ieee80211_ie_measrep_sta_stat);
+
+			if (0 == mreport_ctrl->u.sta_stats.group_id)
+				meas_ie_len += sizeof(struct ieee80211_rm_sta_stats_group0);
+			else if (1 == mreport_ctrl->u.sta_stats.group_id)
+				meas_ie_len += sizeof(struct ieee80211_rm_sta_stats_group1);
+			else if (1 < mreport_ctrl->u.sta_stats.group_id && mreport_ctrl->u.sta_stats.group_id < 10)
+				meas_ie_len += sizeof(struct ieee80211_rm_sta_stats_group2to9);
+			else if (10 == mreport_ctrl->u.sta_stats.group_id)
+				meas_ie_len += sizeof(struct ieee80211_rm_sta_stats_group10);
+			else if (11 == mreport_ctrl->u.sta_stats.group_id)
+				meas_ie_len += sizeof(struct ieee80211_rm_sta_stats_group11);
+			else if (12 == mreport_ctrl->u.sta_stats.group_id)
+				meas_ie_len += sizeof(struct ieee80211_rm_sta_stats_group12);
+			else if (13 == mreport_ctrl->u.sta_stats.group_id)
+				meas_ie_len += sizeof(struct ieee80211_rm_sta_stats_group13);
+			else if (14 == mreport_ctrl->u.sta_stats.group_id)
+				meas_ie_len += sizeof(struct ieee80211_rm_sta_stats_group14);
+			else if (15 == mreport_ctrl->u.sta_stats.group_id)
+				meas_ie_len += sizeof(struct ieee80211_rm_sta_stats_group15);
+			else
+				meas_ie_len += sizeof(struct ieee80211_rm_sta_stats_group16);
+
+			if (mreport_ctrl->u.sta_stats.sub_item == NULL) {
+				break;
+			}
+
+			se_head = (ieee80211_11k_sub_element_head *)mreport_ctrl->u.sta_stats.sub_item;
+			/* optional sub element length */
+			SLIST_FOREACH(p_se, se_head, next) {
+				switch (p_se->sub_id) {
+				case IEEE80211_ELEMID_VENDOR:
+				{
+					struct stastats_subele_vendor *vendor = (struct stastats_subele_vendor *)p_se->data;
+					u_int32_t vendor_flags = vendor->flags;
+
+					meas_ie_len += sizeof(struct ieee80211_ie_qtn_rm_measure_sta);
+					if (IEEE80211_IS_ALL_SET(vendor_flags, RM_QTN_MAX)) {
+						meas_ie_len += sizeof(struct ieee80211_ie_qtn_rm_sta_all);
+					} else {
+						meas_ie_len++;
+
+						for (cnt = RM_QTN_TX_STATS; cnt <= RM_QTN_MAX; cnt++) {
+							if (vendor_flags & (BIT(cnt))) {
+								meas_ie_len += 2;
+								meas_ie_len += ieee80211_meas_sta_qtn_report_subtype_len[cnt];
+							}
+						}
+
+						for (cnt = RM_QTN_CTRL_START; cnt <= RM_QTN_CTRL_END; cnt++) {
+							if (vendor_flags & (BIT(cnt))) {
+								meas_ie_len += 2;
+								meas_ie_len += ieee80211_meas_sta_qtn_report_subtype_len[cnt];
+							}
+						}
+					}
+					break;
+				}
+				default:
+					break;
+				}
+			}
+		}
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_QTN_CCA:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ ((mreport_ctrl->report_mode == 0) ?
+				sizeof(struct cca_rm_rep_data) + sizeof(struct ieee80211_ie_qtn_scs) : 0) +
+			mreport_ctrl->u.qtn_cca.extra_ie_len;
+		break;
+	case IEEE80211_RM_MEASTYPE_CH_LOAD:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ ((mreport_ctrl->report_mode == 0) ? (sizeof(struct ieee80211_ie_measrep_chan_load)) : (0));
+		break;
+	case IEEE80211_RM_MEASTYPE_NOISE:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ ((mreport_ctrl->report_mode == 0) ? (sizeof(struct ieee80211_ie_measrep_noise_his)) : (0));
+		break;
+	case IEEE80211_RM_MEASTYPE_BEACON:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ ((mreport_ctrl->report_mode == 0) ? (sizeof(struct ieee80211_ie_measrep_beacon)) : (0));
+		break;
+	case IEEE80211_RM_MEASTYPE_FRAME:
+	{
+		ieee80211_11k_sub_element *p_se;
+		ieee80211_11k_sub_element_head *se_head;
+
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm);
+
+		if (mreport_ctrl->report_mode == 0) {
+			meas_ie_len += sizeof(struct ieee80211_ie_measrep_frame);
+
+			se_head = (ieee80211_11k_sub_element_head *)mreport_ctrl->u.frame.sub_item;
+			SLIST_FOREACH(p_se, se_head, next) {
+				switch (p_se->sub_id) {
+				case IEEE80211_FRAME_REPORT_SUBELE_FRAME_COUNT_REPORT:
+					meas_ie_len += sizeof(struct ieee80211_subie_section_frame_entry);
+					break;
+				default:
+					break;
+				}
+			}
+		}
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_CATEGORY:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ ((mreport_ctrl->report_mode == 0) ? (sizeof(struct ieee80211_ie_measrep_trans_stream_cat)) : (0));
+		break;
+	case IEEE80211_RM_MEASTYPE_MUL_DIAG:
+		meas_ie_len = sizeof(struct ieee80211_ie_measure_comm)
+			+ ((mreport_ctrl->report_mode == 0) ? (sizeof(struct ieee80211_ie_measrep_multicast_diag)) : (0));
+		break;
+	default:
+		meas_ie_len = -1;
+		break;
+	}
+
+	return meas_ie_len;
+}
+
+u_int8_t *ieee80211_measure_request_ie_generate(struct ieee80211_node *ni,
+		u_int8_t *frm,
+		struct ieee80211_meas_request_ctrl *mrequest_ctrl)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	u_int8_t *ele_len;
+
+
+	*frm++ = IEEE80211_ELEMID_MEASREQ;
+	ele_len = frm;
+	*frm++ = 0;	/* will be filled when finished */
+	*frm++ = 1;	/* measurement token */
+	*frm++ = 0;	/* mode */
+	*frm++ = mrequest_ctrl->meas_type;
+
+	switch (mrequest_ctrl->meas_type) {
+	case IEEE80211_CCA_MEASTYPE_BASIC:
+	{
+		*frm++ = mrequest_ctrl->u.basic.channel;
+		ADDINT32(frm, *((u_int32_t *)&mrequest_ctrl->u.basic.start_tsf));
+		ADDINT32(frm, *((u_int32_t *)&mrequest_ctrl->u.basic.start_tsf + 1));
+		ADDINT16(frm, IEEE80211_MS_TO_TU(mrequest_ctrl->u.basic.duration_ms));
+
+		break;
+	}
+	case IEEE80211_CCA_MEASTYPE_CCA:
+	{
+		*frm++ = mrequest_ctrl->u.cca.channel;
+		ADDINT32(frm, *((u_int32_t *)&mrequest_ctrl->u.cca.start_tsf));
+		ADDINT32(frm, *((u_int32_t *)&mrequest_ctrl->u.cca.start_tsf + 1));
+		ADDINT16(frm, IEEE80211_MS_TO_TU(mrequest_ctrl->u.cca.duration_ms));
+
+		break;
+	}
+	case IEEE80211_CCA_MEASTYPE_RPI:
+	{
+		*frm++ = mrequest_ctrl->u.rpi.channel;
+		ADDINT32(frm, *((u_int32_t *)&mrequest_ctrl->u.rpi.start_tsf));
+		ADDINT32(frm, *((u_int32_t *)&mrequest_ctrl->u.cca.start_tsf + 1));
+		ADDINT16(frm, IEEE80211_MS_TO_TU(mrequest_ctrl->u.rpi.duration_ms));
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_STA:
+	{
+		u_int16_t random_interval;
+		ieee80211_11k_sub_element *p_se;
+		ieee80211_11k_sub_element_head *se_head;
+
+		memcpy(frm, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+		frm += IEEE80211_ADDR_LEN;
+		get_random_bytes(&random_interval, 1);
+		ADDINT16(frm, random_interval);
+		ADDINT16(frm, mrequest_ctrl->u.sta_stats.duration_tu);
+		*frm++ = mrequest_ctrl->u.sta_stats.group_id;
+
+		/* optional sub element */
+		if (mrequest_ctrl->u.sta_stats.sub_item != NULL) {
+			se_head = (ieee80211_11k_sub_element_head *)mrequest_ctrl->u.sta_stats.sub_item;
+			while (!SLIST_EMPTY(se_head)) {
+				p_se = SLIST_FIRST(se_head);
+				switch (p_se->sub_id) {
+				case IEEE80211_ELEMID_VENDOR:
+				{
+					struct stastats_subele_vendor *vendor = (struct stastats_subele_vendor *)p_se->data;
+					u_int8_t *vendor_ie_len;
+
+					if (vendor->flags & RM_QTN_MEASURE_MASK) {
+						*frm++ = IEEE80211_ELEMID_VENDOR;
+						vendor_ie_len = frm;
+						*frm++ = 0;
+						frm += ieee80211_oui_add_qtn(frm);
+						*frm++ = ni->ni_rm_sta_seq++;
+
+						if (IEEE80211_IS_ALL_SET(vendor->flags, RM_QTN_MAX)) {
+							*frm++ = QTN_OUI_RM_ALL;
+						} else {
+							u_int8_t cnt, *p_tlv_cnt;
+
+							*frm++ =  QTN_OUI_RM_SPCIAL;
+							p_tlv_cnt = frm;
+							*frm++ = 0;
+
+							for (cnt = RM_QTN_TX_STATS; cnt <= RM_QTN_MAX; cnt++) {
+								if (vendor->flags & (BIT(cnt))) {
+									*frm++ = cnt;
+									*frm++ = 0;
+									*p_tlv_cnt += 1;
+								}
+							}
+
+							for (cnt = RM_QTN_CTRL_START; cnt <= RM_QTN_CTRL_END; cnt++) {
+								if (vendor->flags & (BIT(cnt))) {
+									*frm++ = cnt;
+									*frm++ = 0;
+									*p_tlv_cnt += 1;
+								}
+							}
+						}
+						*vendor_ie_len = frm - vendor_ie_len - 1;
+					}
+					break;
+				}
+				default:
+					break;
+				}
+				SLIST_REMOVE_HEAD(se_head, next);
+				kfree(p_se);
+			}
+		}
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_QTN_CCA:
+	{
+		/* replace with real type */
+		*(frm - 1) = IEEE80211_CCA_MEASTYPE_CCA;
+
+		*frm++ = ic->ic_curchan->ic_ieee;
+		ADDINT32(frm, 0);
+		ADDINT32(frm, 0);
+		ADDINT16(frm, IEEE80211_MS_TO_TU(mrequest_ctrl->u.qtn_cca.duration_tu));
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_CH_LOAD:
+	{
+		*frm++ = 0;	/* TODO: operating class, figure out a correct mapping */
+		*frm++ = mrequest_ctrl->u.chan_load.channel;
+		ADDINT16(frm, 0);
+		ADDINT16(frm, IEEE80211_MS_TO_TU(mrequest_ctrl->u.chan_load.duration_ms));
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_NOISE:
+	{
+		*frm++ = 0;	/* TODO: operating class, figure out a correct mapping */
+		*frm++ = mrequest_ctrl->u.noise_his.channel;
+		ADDINT16(frm, 0);
+		ADDINT16(frm, IEEE80211_MS_TO_TU(mrequest_ctrl->u.noise_his.duration_ms));
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_BEACON:
+	{
+		*frm++ = mrequest_ctrl->u.beacon.op_class;
+		*frm++ = mrequest_ctrl->u.beacon.channel;
+		ADDINT16(frm, 0);
+		ADDINT16(frm, IEEE80211_MS_TO_TU(mrequest_ctrl->u.beacon.duration_ms));
+		*frm++ = mrequest_ctrl->u.beacon.mode;
+		memcpy(frm, mrequest_ctrl->u.beacon.bssid, IEEE80211_ADDR_LEN);
+		frm += IEEE80211_ADDR_LEN;
+		if (mrequest_ctrl->u.beacon.ssid_len) {
+			*frm++ = IEEE80211_ELEMID_SSID;
+			*frm++ = mrequest_ctrl->u.beacon.ssid_len;
+			memcpy(frm, mrequest_ctrl->u.beacon.ssid, mrequest_ctrl->u.beacon.ssid_len);
+			frm += mrequest_ctrl->u.beacon.ssid_len;
+		}
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_FRAME:
+	{
+		*frm++ = mrequest_ctrl->u.frame.op_class;
+		*frm++ = mrequest_ctrl->u.frame.channel;
+		ADDINT16(frm, 0);
+		ADDINT16(frm, IEEE80211_MS_TO_TU(mrequest_ctrl->u.frame.duration_ms));
+		*frm++ = mrequest_ctrl->u.frame.type;
+		memcpy(frm, mrequest_ctrl->u.frame.mac_address, IEEE80211_ADDR_LEN);
+		frm += IEEE80211_ADDR_LEN;
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_CATEGORY:
+	{
+		ADDINT16(frm, 0);
+		ADDINT16(frm, IEEE80211_MS_TO_TU(mrequest_ctrl->u.tran_stream_cat.duration_ms));
+		memcpy(frm, mrequest_ctrl->u.tran_stream_cat.peer_sta, IEEE80211_ADDR_LEN);
+		frm += IEEE80211_ADDR_LEN;
+		*frm++ = mrequest_ctrl->u.tran_stream_cat.tid;
+		*frm++ = mrequest_ctrl->u.tran_stream_cat.bin0;
+
+		break;
+	}
+	case IEEE80211_RM_MEASTYPE_MUL_DIAG:
+		ADDINT16(frm, 0);
+		ADDINT16(frm, IEEE80211_MS_TO_TU(mrequest_ctrl->u.multicast_diag.duration_ms));
+		memcpy(frm, mrequest_ctrl->u.multicast_diag.group_mac, IEEE80211_ADDR_LEN);
+		frm += IEEE80211_ADDR_LEN;
+
+		break;
+	default:
+		break;
+	}
+	*ele_len = (frm - ele_len) - 1;
+
+	return frm;
+}
+
+u_int8_t *ieee80211_measure_report_ie_generate(struct ieee80211_node *ni,
+		u_int8_t *frm,
+		struct ieee80211_meas_report_ctrl *mreport_ctrl)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	u_int8_t *ele_len;
+
+	/* common part */
+	*frm++ = IEEE80211_ELEMID_MEASREP;
+	ele_len = frm;
+	*frm++ = 0;
+	if (mreport_ctrl->autonomous)
+		*frm++ = 0;
+	else
+		*frm++ = mreport_ctrl->meas_token;
+	*frm++ = mreport_ctrl->report_mode;
+	*frm++ = mreport_ctrl->meas_type;
+
+	if (mreport_ctrl->report_mode == 0) {
+		switch (mreport_ctrl->meas_type) {
+		case IEEE80211_CCA_MEASTYPE_BASIC:
+		{
+			*frm++ = mreport_ctrl->u.basic.channel;
+			ADDINT32(frm, *((u_int32_t *)&mreport_ctrl->u.basic.start_tsf));
+			ADDINT32(frm, *((u_int32_t *)&mreport_ctrl->u.basic.start_tsf) + 1);
+			ADDINT16(frm, mreport_ctrl->u.basic.duration_tu);
+			*frm++ = mreport_ctrl->u.basic.basic_report;
+
+			break;
+		}
+		case IEEE80211_CCA_MEASTYPE_CCA:
+		{
+			*frm++ = mreport_ctrl->u.cca.channel;
+			ADDINT32(frm, *((u_int32_t *)&mreport_ctrl->u.cca.start_tsf));
+			ADDINT32(frm, *((u_int32_t *)&mreport_ctrl->u.cca.start_tsf) + 1);
+			ADDINT16(frm, mreport_ctrl->u.cca.duration_tu);
+			*frm++ = mreport_ctrl->u.cca.cca_report;
+
+			break;
+		}
+		case IEEE80211_CCA_MEASTYPE_RPI:
+		{
+			*frm++ = mreport_ctrl->u.rpi.channel;
+			ADDINT32(frm, *((u_int32_t *)&mreport_ctrl->u.rpi.start_tsf));
+			ADDINT32(frm, *((u_int32_t *)&mreport_ctrl->u.rpi.start_tsf) + 1);
+			ADDINT16(frm, mreport_ctrl->u.rpi.duration_tu);
+			memcpy(frm, mreport_ctrl->u.rpi.rpi_report, sizeof(mreport_ctrl->u.rpi.rpi_report));
+			frm += sizeof(mreport_ctrl->u.rpi.rpi_report);
+
+			break;
+		}
+		case IEEE80211_RM_MEASTYPE_STA:
+		{
+			u_int8_t group_len;
+			ieee80211_11k_sub_element *p_se;
+			ieee80211_11k_sub_element_head *se_head;
+			struct ieee80211_nodestats *stats;
+			u_int8_t assoc_bw;
+
+			ADDINT16(frm, mreport_ctrl->u.sta_stats.duration_tu);
+			*frm++ = mreport_ctrl->u.sta_stats.group_id;
+
+			switch (mreport_ctrl->u.sta_stats.group_id) {
+			case 0:
+				group_len = sizeof(struct ieee80211_rm_sta_stats_group0);
+				break;
+			case 1:
+				group_len = sizeof(struct ieee80211_rm_sta_stats_group0);
+				break;
+			case 2:
+			case 3:
+			case 4:
+			case 5:
+			case 6:
+			case 7:
+			case 8:
+			case 9:
+				group_len = sizeof(struct ieee80211_rm_sta_stats_group2to9);
+				break;
+			case 10:
+				group_len = sizeof(struct ieee80211_rm_sta_stats_group10);
+				break;
+			case 11:
+				group_len = sizeof(struct ieee80211_rm_sta_stats_group11);
+				break;
+			case 12:
+				group_len = sizeof(struct ieee80211_rm_sta_stats_group12);
+				break;
+			case 13:
+				group_len = sizeof(struct ieee80211_rm_sta_stats_group13);
+				break;
+			case 14:
+				group_len = sizeof(struct ieee80211_rm_sta_stats_group14);
+				break;
+			case 15:
+				group_len = sizeof(struct ieee80211_rm_sta_stats_group15);
+				break;
+			case 16:
+				group_len = sizeof(struct ieee80211_rm_sta_stats_group16);
+				break;
+			default:
+				group_len = sizeof(struct ieee80211_ie_qtn_rm_sta_all);
+				break;
+			}
+			frm += group_len;
+
+			if (mreport_ctrl->u.sta_stats.sub_item != NULL) {
+				se_head = (ieee80211_11k_sub_element_head *)mreport_ctrl->u.sta_stats.sub_item;
+				while (!SLIST_EMPTY(se_head)) {
+					p_se = SLIST_FIRST(se_head);
+					switch (p_se->sub_id) {
+					case IEEE80211_ELEMID_VENDOR:
+					{
+						struct stastats_subele_vendor *vendor = (struct stastats_subele_vendor *)p_se->data;
+						u_int32_t vendor_flags = vendor->flags;
+						u_int8_t sequence = vendor->sequence;
+
+						if (vendor_flags & RM_QTN_MEASURE_MASK) {
+							u_int8_t *vendor_ie_len;
+
+							*frm++ = IEEE80211_ELEMID_VENDOR;
+							vendor_ie_len = frm;
+							*frm++ = 0;
+							frm += ieee80211_oui_add_qtn(frm);
+							*frm++ = sequence;
+
+							stats = &ni->ni_stats;
+
+							if (IS_IEEE80211_VHT_ENABLED(ic) && (ni->ni_flags & IEEE80211_NODE_VHT)) {
+								 switch (ni->ni_vhtcap.chanwidth) {
+									case IEEE80211_VHTCAP_CW_160M:
+									case IEEE80211_VHTCAP_CW_160_AND_80P80M:
+										assoc_bw = 160;
+										break;
+									case IEEE80211_VHTCAP_CW_80M_ONLY:
+									default:
+										assoc_bw = 80;
+								}
+							} else {
+								if (ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40 &&
+									ni->ni_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) {
+									assoc_bw = 40;
+								} else {
+									assoc_bw = 20;
+								}
+							}
+
+							ic->ic_iterate_nodes(&ic->ic_sta, get_node_info, (void *)NULL, 1);
+
+							if (IEEE80211_IS_ALL_SET(vendor_flags, RM_QTN_MAX)) {
+								*frm++ = QTN_OUI_RM_ALL;
+
+								/* fill the content here */
+								/* fill the sta tx statistics */
+								ADDINT32TO64(frm, stats->ns_tx_bytes);
+								ADDINT32(frm, stats->ns_tx_data);
+								ADDINT32(frm, stats->ns_tx_dropped);
+								ADDINT32(frm, stats->ns_tx_errors);
+								ADDINT32(frm, stats->ns_tx_ucast);
+								ADDINT32(frm, stats->ns_tx_mcast);
+								ADDINT32(frm, stats->ns_tx_bcast);
+
+								/* fill the sta rx statistics */
+								ADDINT32TO64(frm, stats->ns_rx_bytes);
+								ADDINT32(frm, stats->ns_rx_data);
+								ADDINT32(frm, stats->ns_rx_dropped);
+								ADDINT32(frm, stats->ns_rx_errors);
+								ADDINT32(frm, stats->ns_rx_ucast);
+								ADDINT32(frm, stats->ns_rx_mcast);
+								ADDINT32(frm, stats->ns_rx_bcast);
+
+								/* fill the sta parameters */
+								ADDINT32(frm, ni->ni_max_queue);
+								ADDINT16TO32(frm, ni->ni_linkqual);
+								ADDINT32(frm, ni->ni_smthd_rssi);
+								ADDINT8TO32(frm, assoc_bw);
+								ADDINT32(frm, ni->ni_snr);
+								ADDINT8TO32(frm, ni->ni_rates.rs_rates[ni->ni_txrate]);
+								ADDINT16TO32(frm, ni->ni_rx_phy_rate);
+							} else {
+								u_int8_t *p_tlv_cnt;
+								u_int8_t i;
+
+								*frm++ = QTN_OUI_RM_SPCIAL;
+								p_tlv_cnt = frm;
+								*frm++ = 0;
+
+								for (i = 0; i <= RM_QTN_MAX; i++) {
+									if (vendor_flags & (BIT(i))) {
+										*p_tlv_cnt += 1;
+										*frm++ = i;
+										switch (i) {
+										case RM_QTN_TX_STATS:
+										{
+											/* Vendor specific content: tlv length field */
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											ADDINT32TO64(frm, stats->ns_tx_bytes);
+											ADDINT32(frm, stats->ns_tx_data);
+											ADDINT32(frm, stats->ns_tx_dropped);
+											ADDINT32(frm, stats->ns_tx_errors);
+											ADDINT32(frm, stats->ns_tx_ucast);
+											ADDINT32(frm, stats->ns_tx_mcast);
+											ADDINT32(frm, stats->ns_tx_bcast);
+											break;
+										}
+										case RM_QTN_RX_STATS:
+										{
+											/* Vendor specific content: tlv length field */
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											ADDINT32TO64(frm, stats->ns_rx_bytes);
+											ADDINT32(frm, stats->ns_rx_data);
+											ADDINT32(frm, stats->ns_rx_dropped);
+											ADDINT32(frm, stats->ns_rx_errors);
+											ADDINT32(frm, stats->ns_rx_ucast);
+											ADDINT32(frm, stats->ns_rx_mcast);
+											ADDINT32(frm, stats->ns_rx_bcast);
+											break;
+										}
+										case RM_QTN_MAX_QUEUED:
+										{
+											/* Vendor specific content: tlv length field */
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											ADDINT32(frm, ni->ni_max_queue);
+											break;
+										}
+										case RM_QTN_LINK_QUALITY:
+										{
+											/* Vendor specific content: tlv length field */
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											ADDINT32(frm, ni->ni_linkqual);
+											break;
+										}
+										case RM_QTN_RSSI_DBM:
+										{
+											/* Vendor specific content: tlv length field */
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											ADDINT32(frm, ni->ni_smthd_rssi);
+											break;
+										}
+										case RM_QTN_BANDWIDTH:
+										{
+											/* Vendor specific content: tlv length field */
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											ADDINT32(frm, assoc_bw);
+											break;
+										}
+										case RM_QTN_SNR:
+										{
+											/* Vendor specific content: tlv length field */
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											ADDINT32(frm, ni->ni_snr);
+											break;
+										}
+										case RM_QTN_TX_PHY_RATE:
+										{
+											/* Vendor specific content: tlv length field */
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											ADDINT32(frm, ni->ni_linkqual);
+											break;
+										}
+										case RM_QTN_RX_PHY_RATE:
+										{
+											/* Vendor specific content: tlv length field */
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											ADDINT32(frm, ni->ni_rx_phy_rate);
+											break;
+										}
+										case RM_QTN_CCA:
+										{
+											/* Vendor specific content: tlv length field */
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											/* Reserved for cca */
+											ADDINT32(frm, 0);
+											break;
+										}
+										case RM_QTN_BR_IP:
+										{
+											/* Vendor specific content: tlv length field */
+											__be32 br_ip = 0;
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											if (ic->ic_getparam != NULL) {
+												(*ic->ic_getparam)(ni, IEEE80211_PARAM_BR_IP_ADDR,
+														(int *)&br_ip, NULL, NULL);
+											}
+											ADDINT32(frm, br_ip);
+											break;
+										}
+										case RM_QTN_RSSI:
+										{
+											int32_t local_rssi = 0;
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+
+											if (ic->ic_rssi) {
+												local_rssi = ic->ic_rssi(ni);
+											}
+
+
+											if (local_rssi < -1 && local_rssi > -1200) {
+												local_rssi += 900;
+											}
+
+											if (local_rssi < 0) {
+												local_rssi = 0;
+											}
+
+											ADDINT32(frm, local_rssi);
+											break;
+										}
+										case RM_QTN_HW_NOISE:
+										{
+											int32_t local_noise = 0;
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+
+											local_noise = ic->ic_hw_noise(ni);
+
+											ADDINT32(frm, local_noise);
+
+											break;
+										}
+										case RM_QTN_SOC_MACADDR:
+										{
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+
+											memcpy(frm, ic->soc_addr, IEEE80211_ADDR_LEN);
+											frm += IEEE80211_ADDR_LEN;
+
+											break;
+										}
+										case RM_QTN_SOC_IPADDR:
+										{
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+
+											ADDINT32(frm, ic->ic_soc_ipaddr);
+
+											break;
+										}
+										default:
+											/* Vendor specific content: tlv length field */
+											*frm++ = sizeof(u_int32_t);
+											/* unkown type report 0 */
+											ADDINT32(frm, 0);
+											break;
+										}
+									}
+								}
+
+								for (i = RM_QTN_CTRL_START; i <= RM_QTN_CTRL_END; i++) {
+									if (vendor_flags & (BIT(i))) {
+										*frm++ = i;
+										switch(i) {
+										case RM_QTN_RESET_CNTS:
+										{
+											int32_t		ret;
+											/* Vendor specific content: tlv length field */
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											/* reset all counter */
+											ret = ieee80211_rst_dev_stats(vap);
+											ADDINT32(frm, ret);
+											break;
+										}
+										case RM_QTN_RESET_QUEUED:
+											/* Vendor specific content: tlv length field */
+											*frm++ = ieee80211_meas_sta_qtn_report_subtype_len[i];
+											/* reset all counter */
+											ic->ic_queue_reset(ni);
+											ADDINT32(frm, 0);
+											break;
+										default:
+											/* Vendor specific content: tlv length field */
+											*frm++ = sizeof(int32_t);
+											ADDINT32(frm, -1);
+											break;
+										}
+									}
+								}
+							}
+							*vendor_ie_len = frm - vendor_ie_len - 1;
+						}
+						break;
+					}
+					default:
+						printk("unknown STA Statistics sub element, ID = %d\n", p_se->sub_id);
+						break;
+					}
+					SLIST_REMOVE_HEAD(se_head, next);
+					kfree(p_se);
+				}
+			}
+
+			break;
+		}
+		case IEEE80211_RM_MEASTYPE_QTN_CCA:
+		{
+			u_int8_t *vendor_ie_len;
+
+			/* replace with real type */
+			*(frm - 1) = IEEE80211_CCA_MEASTYPE_CCA;
+
+			*frm++ = mreport_ctrl->u.qtn_cca.channel;
+			ADDINT32LE(frm, *((u_int32_t *)&mreport_ctrl->u.qtn_cca.start_tsf));
+			ADDINT32LE(frm, *((u_int32_t *)&mreport_ctrl->u.qtn_cca.start_tsf) + 1);
+			ADDINT16LE(frm, IEEE80211_MS_TO_TU(mreport_ctrl->u.qtn_cca.duration_ms));
+			*frm++ = mreport_ctrl->u.qtn_cca.qtn_cca_report;
+
+			/* qtn SCS IE */
+			*frm++ = IEEE80211_ELEMID_VENDOR;
+			vendor_ie_len = frm;
+			*frm++ = 0;
+			frm += ieee80211_oui_add_qtn(frm);
+			*frm++ = QTN_OUI_SCS;
+			*frm++ = mreport_ctrl->u.qtn_cca.type;
+			if (QTN_SCS_IE_TYPE_STA_INTF_RPT == mreport_ctrl->u.qtn_cca.type) {
+				ADDINT32LE(frm, mreport_ctrl->u.qtn_cca.u.qtn_cca_info.sp_fail);
+				ADDINT32LE(frm, mreport_ctrl->u.qtn_cca.u.qtn_cca_info.lp_fail);
+				ADDINT16LE(frm, mreport_ctrl->u.qtn_cca.u.qtn_cca_info.others_time);
+			} else if (QTN_SCS_IE_TYPE_STA_DFS_RPT == mreport_ctrl->u.qtn_cca.type) {
+				ADDINT16LE(frm, mreport_ctrl->u.qtn_cca.u.qtn_dfs_info.dfs_enabled);
+				*frm++ = mreport_ctrl->u.qtn_cca.u.qtn_dfs_info.max_txpower;
+			} else if (QTN_SCS_IE_TYPE_STA_FAT_RPT == mreport_ctrl->u.qtn_cca.type) {
+				ADDINT16LE(frm, mreport_ctrl->u.qtn_cca.u.qtn_fat_info.free_airtime);
+			}
+			ADDINT16LE(frm, mreport_ctrl->u.qtn_cca.extra_ie_len);
+			memcpy(frm, mreport_ctrl->u.qtn_cca.extra_ie,
+						mreport_ctrl->u.qtn_cca.extra_ie_len);
+			frm += mreport_ctrl->u.qtn_cca.extra_ie_len;
+			*vendor_ie_len = frm - vendor_ie_len - 1;
+
+			break;
+		}
+		case IEEE80211_RM_MEASTYPE_CH_LOAD:
+		{
+			u_int64_t tsf;
+
+			ic->ic_get_tsf(&tsf);
+
+			*frm++ = mreport_ctrl->u.chan_load.op_class;
+			*frm++ = mreport_ctrl->u.chan_load.channel;
+			ADDINT32(frm, *((u_int32_t *)&tsf));
+			ADDINT32(frm, *((u_int32_t *)&tsf + 1));
+			ADDINT16(frm, mreport_ctrl->u.chan_load.duration_tu);
+			*frm++ = mreport_ctrl->u.chan_load.channel_load;
+
+			break;
+		}
+		case IEEE80211_RM_MEASTYPE_NOISE:
+		{
+			u_int64_t tsf;
+
+			ic->ic_get_tsf(&tsf);
+
+			*frm++ = mreport_ctrl->u.noise_his.op_class;
+			*frm++ = mreport_ctrl->u.noise_his.channel;
+			ADDINT32(frm, *((u_int32_t *)&tsf));
+			ADDINT32(frm, *((u_int32_t *)&tsf + 1));
+			ADDINT16(frm, mreport_ctrl->u.noise_his.duration_tu);
+			*frm++ = mreport_ctrl->u.noise_his.antenna_id;
+			*frm++ = mreport_ctrl->u.noise_his.anpi;
+			memcpy(frm, mreport_ctrl->u.noise_his.ipi, sizeof(mreport_ctrl->u.noise_his.ipi));
+			frm += sizeof(mreport_ctrl->u.noise_his.ipi);
+
+			break;
+		}
+		case IEEE80211_RM_MEASTYPE_BEACON:
+		{
+			u_int64_t tsf;
+
+			ic->ic_get_tsf(&tsf);
+
+			*frm++ = mreport_ctrl->u.beacon.op_class;
+			*frm++ = mreport_ctrl->u.beacon.channel;
+			ADDINT32(frm, *((u_int32_t *)&tsf));
+			ADDINT32(frm, *((u_int32_t *)&tsf + 1));
+			ADDINT16(frm, mreport_ctrl->u.beacon.duration_tu);
+			*frm++ = mreport_ctrl->u.beacon.reported_frame_info;
+			*frm++ = mreport_ctrl->u.beacon.rcpi;
+			*frm++ = mreport_ctrl->u.beacon.rsni;
+			memcpy(frm, mreport_ctrl->u.beacon.bssid, IEEE80211_ADDR_LEN);
+			frm += IEEE80211_ADDR_LEN;
+			*frm++ = mreport_ctrl->u.beacon.antenna_id;
+			memcpy(frm, mreport_ctrl->u.beacon.parent_tsf, 4);
+			frm += sizeof(mreport_ctrl->u.beacon.parent_tsf);
+
+			break;
+		}
+		case IEEE80211_RM_MEASTYPE_FRAME:
+		{
+			u_int64_t tsf;
+			ieee80211_11k_sub_element *p_se;
+			ieee80211_11k_sub_element_head *se_head;
+
+			ic->ic_get_tsf(&tsf);
+
+			*frm++ = mreport_ctrl->u.frame.op_class;
+			*frm++ = mreport_ctrl->u.frame.channel;
+			ADDINT32(frm, *((u_int32_t *)&tsf));
+			ADDINT32(frm, *((u_int32_t *)&tsf + 1));
+			ADDINT16(frm, mreport_ctrl->u.frame.duration_tu);
+
+			se_head = (ieee80211_11k_sub_element_head *)mreport_ctrl->u.frame.sub_item;
+			while (!SLIST_EMPTY(se_head)) {
+				p_se = SLIST_FIRST(se_head);
+				switch (p_se->sub_id) {
+				case IEEE80211_FRAME_REPORT_SUBELE_FRAME_COUNT_REPORT:
+				{
+					u_int8_t *sub_ele_len;
+					struct frame_report_subele_frame_count *sub_ele;
+
+					sub_ele = (struct frame_report_subele_frame_count *)p_se->data;
+					*frm++ = p_se->sub_id;
+					sub_ele_len = frm;
+					*frm++ = 0;
+					memcpy(frm, sub_ele->ta, IEEE80211_ADDR_LEN);
+					frm += IEEE80211_ADDR_LEN;
+					memcpy(frm, sub_ele->bssid, IEEE80211_ADDR_LEN);
+					frm += IEEE80211_ADDR_LEN;
+					*frm++ = sub_ele->phy_type;
+					*frm++ = sub_ele->avg_rcpi;
+					*frm++ = sub_ele->last_rsni;
+					*frm++ = sub_ele->last_rcpi;
+					*frm++ = sub_ele->antenna_id;
+					ADDINT16(frm, sub_ele->frame_count);
+
+					*sub_ele_len = frm - sub_ele_len - 1;
+
+					break;
+				}
+				default:
+					break;
+				}
+				SLIST_REMOVE_HEAD(se_head, next);
+				kfree(p_se);
+			}
+
+			break;
+		}
+		case IEEE80211_RM_MEASTYPE_CATEGORY:
+		{
+			u_int64_t tsf;
+
+			ic->ic_get_tsf(&tsf);
+
+			ADDINT32(frm, *((u_int32_t *)&tsf));
+			ADDINT32(frm, *((u_int32_t *)&tsf + 1));
+			ADDINT16(frm, mreport_ctrl->u.tran_stream_cat.duration_tu);
+			memcpy(frm, mreport_ctrl->u.tran_stream_cat.peer_sta, IEEE80211_ADDR_LEN);
+			frm += IEEE80211_ADDR_LEN;
+			*frm++ = mreport_ctrl->u.tran_stream_cat.tid;
+			*frm++ = mreport_ctrl->u.tran_stream_cat.reason;
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.tran_msdu_cnt);
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.msdu_discard_cnt);
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.msdu_fail_cnt);
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.msdu_mul_retry_cnt);
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.qos_lost_cnt);
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.avg_queue_delay);
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.avg_tran_delay);
+			*frm++ = mreport_ctrl->u.tran_stream_cat.bin0_range;
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.bins[0]);
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.bins[1]);
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.bins[2]);
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.bins[3]);
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.bins[4]);
+			ADDINT32(frm, mreport_ctrl->u.tran_stream_cat.bins[5]);
+
+			break;
+		}
+		case IEEE80211_RM_MEASTYPE_MUL_DIAG:
+			ADDINT32(frm, 0);
+			ADDINT32(frm, 0);
+			ADDINT16(frm, mreport_ctrl->u.multicast_diag.duration_tu);
+			memcpy(frm, mreport_ctrl->u.multicast_diag.group_mac, IEEE80211_ADDR_LEN);
+			frm += IEEE80211_ADDR_LEN;
+			*frm++ = mreport_ctrl->u.multicast_diag.reason;
+			ADDINT32(frm, mreport_ctrl->u.multicast_diag.mul_rec_msdu_cnt);
+			ADDINT16(frm, mreport_ctrl->u.multicast_diag.first_seq_num);
+			ADDINT16(frm, mreport_ctrl->u.multicast_diag.last_seq_num);
+			ADDINT16(frm, mreport_ctrl->u.multicast_diag.mul_rate);
+
+			break;
+		default:
+			break;
+		}
+	}
+	*ele_len = frm - ele_len - 1;
+
+	return frm;
+}
+
+int32_t ieee80211_compile_action_measurement_11h(struct ieee80211_node *ni,
+		void *ctrl,
+		u_int8_t action,
+		struct sk_buff **p_skb)
+{
+	struct sk_buff *skb;
+	u_int8_t *frm;
+	int32_t meas_frame_len = 0;
+	int32_t meas_ie_len = 0;
+	struct ieee80211_meas_request_ctrl *mrequest_ctrl = NULL;
+	struct ieee80211_meas_report_ctrl *mreport_ctrl = NULL;
+	u_int8_t tx_token;
+
+	if ((action != IEEE80211_ACTION_S_MEASUREMENT_REQUEST) && (action != IEEE80211_ACTION_S_MEASUREMENT_REPORT))
+		return -1;
+
+	if (action == IEEE80211_ACTION_S_MEASUREMENT_REQUEST) {
+		mrequest_ctrl = (struct ieee80211_meas_request_ctrl *)ctrl;
+		meas_frame_len = sizeof(struct ieee80211_action_sm_measurement_header);
+
+		meas_ie_len = ieee80211_measure_request_ie_len(mrequest_ctrl);
+		if (meas_ie_len <= 0)
+			return -1;
+		meas_frame_len += meas_ie_len;
+
+		skb = ieee80211_getmgtframe(&frm, meas_frame_len);
+		if (NULL == skb)
+			return -1;
+
+		*frm++ = IEEE80211_ACTION_CAT_SPEC_MGMT;
+		*frm++ = action;
+		if (ni->ni_action_token == 0)
+			ni->ni_action_token++;
+		tx_token = ni->ni_action_token++;
+		*frm++ = tx_token;
+		frm = ieee80211_measure_request_ie_generate(ni, frm, mrequest_ctrl);
+		if (mrequest_ctrl->expire != 0) {
+			skb = ieee80211_ppqueue_pre_tx(ni,
+					skb,
+					IEEE80211_ACTION_CAT_SPEC_MGMT,
+					IEEE80211_ACTION_S_MEASUREMENT_REPORT,
+					tx_token,
+					mrequest_ctrl->expire,
+					mrequest_ctrl->fn_success,
+					mrequest_ctrl->fn_fail);
+			if (skb == NULL)
+				return -1;
+		}
+	} else {
+		mreport_ctrl = (struct ieee80211_meas_report_ctrl *)ctrl;
+		meas_frame_len = sizeof(struct ieee80211_action_sm_measurement_header);
+
+		meas_ie_len = ieee80211_measure_report_ie_len(mreport_ctrl);
+		if (meas_ie_len <= 0)
+			return -1;
+		meas_frame_len += meas_ie_len;
+
+		skb = ieee80211_getmgtframe(&frm, meas_frame_len);
+		if (NULL == skb)
+			return -1;
+		memset(frm, 0, meas_frame_len);
+
+		*frm++ = IEEE80211_ACTION_CAT_SPEC_MGMT;
+		*frm++ = action;
+		if (mreport_ctrl->autonomous)
+			*frm++ = 0;
+		else
+			*frm++ = mreport_ctrl->token;
+		frm = ieee80211_measure_report_ie_generate(ni, frm, mreport_ctrl);
+	}
+
+	KASSERT(((frm - skb->data) <= meas_frame_len),
+			("ERROR: 11h measure frame gen fail\n"
+			"expected len = %d\n"
+			"start address(0x%x), end address(0x%x), len = %d\n",
+			meas_frame_len,
+			(uint32_t)skb->data,
+			(uint32_t)frm,
+			(uint32_t)(frm - skb->data)));
+
+	skb_trim(skb, frm - skb->data);
+	*p_skb = skb;
+	return 0;
+}
+
+int32_t ieee80211_compile_action_measurement_11k(struct ieee80211_node *ni,
+		void *ctrl,
+		u_int8_t action,
+		struct sk_buff **p_skb)
+{
+	struct sk_buff *skb;
+	u_int8_t *frm;
+	int32_t meas_frame_len = 0;
+	int32_t meas_ie_len = 0;
+	struct ieee80211_meas_request_ctrl *mrequest_ctrl = NULL;
+	struct ieee80211_meas_report_ctrl *mreport_ctrl = NULL;
+	u_int8_t tx_token;
+
+	if ((action != IEEE80211_ACTION_R_MEASUREMENT_REQUEST) && (action != IEEE80211_ACTION_R_MEASUREMENT_REPORT))
+		return -1;
+
+	if (action == IEEE80211_ACTION_R_MEASUREMENT_REQUEST) {
+		mrequest_ctrl = (struct ieee80211_meas_request_ctrl *)ctrl;
+		meas_frame_len = sizeof(struct ieee80211_action_radio_measure_request);
+
+		meas_ie_len = ieee80211_measure_request_ie_len(mrequest_ctrl);
+		if (meas_ie_len <= 0)
+			return -1;
+
+		meas_frame_len += meas_ie_len;
+
+		if ((mrequest_ctrl->meas_type == IEEE80211_RM_MEASTYPE_BEACON)
+			&& (mrequest_ctrl->u.beacon.ssid_len)) {
+			meas_frame_len += mrequest_ctrl->u.beacon.ssid_len + 2;
+		}
+		skb = ieee80211_getmgtframe(&frm, meas_frame_len);
+		if (NULL == skb)
+			return -1;
+
+		*frm++ = IEEE80211_ACTION_CAT_RM;
+		*frm++ = action;
+		if (ni->ni_action_token == 0)
+			ni->ni_action_token++;
+		tx_token = ni->ni_action_token++;
+		*frm++ = tx_token;
+		ADDINT16(frm, 0);	/* set number of repetitions to 0 */
+		frm = ieee80211_measure_request_ie_generate(ni, frm, mrequest_ctrl);
+		if (mrequest_ctrl->expire != 0) {
+			skb = ieee80211_ppqueue_pre_tx(ni,
+					skb,
+					IEEE80211_ACTION_CAT_RM,
+					IEEE80211_ACTION_R_MEASUREMENT_REPORT,
+					tx_token,
+					mrequest_ctrl->expire,
+					mrequest_ctrl->fn_success,
+					mrequest_ctrl->fn_fail);
+			if (skb == NULL)
+				return -1;
+		}
+	} else {
+		mreport_ctrl = (struct ieee80211_meas_report_ctrl *)ctrl;
+		meas_frame_len = sizeof(struct ieee80211_action_radio_measure_report);
+
+		meas_ie_len = ieee80211_measure_report_ie_len(mreport_ctrl);
+		if (meas_ie_len <= 0)
+			return -1;
+		meas_frame_len += meas_ie_len;
+
+		skb = ieee80211_getmgtframe(&frm, meas_frame_len);
+		if (NULL == skb)
+			return -1;
+
+		*frm++ = IEEE80211_ACTION_CAT_RM;
+		*frm++ = action;
+		if (mreport_ctrl->autonomous)
+			*frm++ = 0;
+		else
+			*frm++ = mreport_ctrl->token;
+		frm = ieee80211_measure_report_ie_generate(ni, frm, mreport_ctrl);
+	}
+
+	KASSERT(((frm - skb->data) <= meas_frame_len),
+			("ERROR: 11k measure frame gen fail\n"
+			"expected len = %d\n"
+			"start address(0x%x), end address(0x%x), len = %d\n",
+			meas_frame_len,
+			(uint32_t)skb->data,
+			(uint32_t)frm,
+			(uint32_t)(frm - skb->data)));
+
+	skb_trim(skb, frm - skb->data);
+	*p_skb = skb;
+	return 0;
+}
+
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+void ieee80211_send_action_cca_report(struct ieee80211_node *ni, uint8_t token,
+		uint16_t cca_intf, uint64_t tsf, uint16_t duration, uint32_t sp_fail,
+		uint32_t lp_fail, uint16_t others_time, uint8_t *extra_ie, uint16_t ie_len)
+{
+	struct ieee80211_meas_report_ctrl mreport_ctrl;
+	struct ieee80211_action_data action_data;
+	u_int16_t frac_busy;
+
+	frac_busy = cca_intf * IEEE80211_11K_CCA_INTF_SCALE / IEEE80211_SCS_CCA_INTF_SCALE;
+	mreport_ctrl.meas_type = IEEE80211_RM_MEASTYPE_QTN_CCA;
+	mreport_ctrl.report_mode = 0;
+	mreport_ctrl.autonomous = 1;
+
+	mreport_ctrl.u.qtn_cca.type = QTN_SCS_IE_TYPE_STA_INTF_RPT;
+
+	mreport_ctrl.u.qtn_cca.channel = ni->ni_chan->ic_ieee;
+	mreport_ctrl.u.qtn_cca.start_tsf = tsf;
+	mreport_ctrl.u.qtn_cca.duration_ms = duration;
+	mreport_ctrl.u.qtn_cca.qtn_cca_report = (u_int8_t)frac_busy;
+
+	mreport_ctrl.u.qtn_cca.u.qtn_cca_info.sp_fail = sp_fail;
+	mreport_ctrl.u.qtn_cca.u.qtn_cca_info.lp_fail = lp_fail;
+	mreport_ctrl.u.qtn_cca.u.qtn_cca_info.others_time = others_time;
+
+	mreport_ctrl.u.qtn_cca.extra_ie = extra_ie;
+	mreport_ctrl.u.qtn_cca.extra_ie_len = ie_len;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+	action_data.params = &mreport_ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+
+void ieee80211_send_action_fat_report(struct ieee80211_node *ni, uint8_t token,
+		uint16_t cca_intf, uint64_t tsf, uint16_t duration, uint16_t idle_time)
+{
+	struct ieee80211_meas_report_ctrl mreport_ctrl;
+	struct ieee80211_action_data action_data;
+	u_int16_t frac_busy;
+
+	frac_busy = cca_intf * IEEE80211_11K_CCA_INTF_SCALE / IEEE80211_SCS_CCA_INTF_SCALE;
+	mreport_ctrl.meas_type = IEEE80211_RM_MEASTYPE_QTN_CCA;
+	mreport_ctrl.report_mode = 0;
+	mreport_ctrl.autonomous = 1;
+
+	mreport_ctrl.u.qtn_cca.type = QTN_SCS_IE_TYPE_STA_FAT_RPT;
+
+	mreport_ctrl.u.qtn_cca.channel = ni->ni_chan->ic_ieee;
+	mreport_ctrl.u.qtn_cca.start_tsf = tsf;
+	mreport_ctrl.u.qtn_cca.duration_ms = duration;
+	mreport_ctrl.u.qtn_cca.qtn_cca_report = (u_int8_t)frac_busy;
+
+	mreport_ctrl.u.qtn_cca.u.qtn_fat_info.free_airtime = idle_time;
+
+	mreport_ctrl.u.qtn_cca.extra_ie_len = 0;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+	action_data.params = &mreport_ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+
+void ieee80211_send_action_dfs_report(struct ieee80211_node *ni)
+{
+	struct ieee80211_meas_report_ctrl mreport_ctrl;
+	struct ieee80211_action_data action_data;
+	struct ieee80211com *ic = ni->ni_ic;
+
+	mreport_ctrl.meas_type = IEEE80211_RM_MEASTYPE_QTN_CCA;
+	mreport_ctrl.report_mode = 0;
+	mreport_ctrl.autonomous = 1;
+
+	mreport_ctrl.u.qtn_cca.type = QTN_SCS_IE_TYPE_STA_DFS_RPT;
+
+	mreport_ctrl.u.qtn_cca.channel = ni->ni_chan->ic_ieee;
+	mreport_ctrl.u.qtn_cca.start_tsf = 0;
+	mreport_ctrl.u.qtn_cca.duration_ms = 0;
+	mreport_ctrl.u.qtn_cca.qtn_cca_report = 0;
+
+	mreport_ctrl.u.qtn_cca.u.qtn_dfs_info.dfs_enabled = !!(ic->ic_flags_ext & IEEE80211_FEXT_MARKDFS);
+	mreport_ctrl.u.qtn_cca.u.qtn_dfs_info.max_txpower = ic->ic_curchan->ic_maxpower;
+
+	mreport_ctrl.u.qtn_cca.extra_ie_len = 0;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+	action_data.params = &mreport_ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_action_dfs_report);
+#endif
+
+__inline void ieee80211_ppqueue_release_entry(struct ieee80211_pairing_pending_entry *entry)
+{
+	if (entry != NULL) {
+		dev_kfree_skb_any(entry->skb);
+		kfree(entry);
+	}
+}
+
+void ieee80211_ppqueue_insert_entry(struct ieee80211_pairing_pending_queue *queue,
+				struct ieee80211_pairing_pending_entry *entry)
+{
+	int32_t timer_refresh = 0;
+	unsigned long flags;
+	struct ieee80211_pairing_pending_entry *prev, *cur;
+
+	spin_lock_irqsave(&queue->lock, flags);
+	if (queue->next == NULL) {
+		queue->next = entry;
+		queue->next_expire_jiffies = entry->next_expire_jiffies;
+		timer_refresh = 1;
+	} else {
+		if (time_before(entry->next_expire_jiffies, queue->next->next_expire_jiffies)) {
+			entry->next = queue->next;
+			queue->next = entry;
+			queue->next_expire_jiffies = entry->next_expire_jiffies;
+			timer_refresh = 1;
+		} else {
+			prev = queue->next;
+			cur = prev->next;
+			while (cur != NULL) {
+				if (time_before(entry->next_expire_jiffies, cur->next_expire_jiffies)) {
+					entry->next = cur;
+					prev->next = entry;
+					break;
+				}
+				prev = cur;
+				cur = prev->next;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&queue->lock, flags);
+
+	if (timer_refresh) {
+		mod_timer(&queue->timer, queue->next_expire_jiffies);
+	}
+}
+
+void ieee80211_ppqueue_remove_with_response(struct ieee80211_pairing_pending_queue *queue,
+					struct ieee80211_node *ni,
+					u_int8_t category,
+					u_int8_t action,
+					u_int8_t token)
+{
+	struct ieee80211_pairing_pending_entry **prev, *cur, *to_free;
+	unsigned long flags;
+
+	prev = &queue->next;
+	cur = queue->next;
+	to_free = NULL;
+
+	spin_lock_irqsave(&queue->lock, flags);
+	while (cur != NULL) {
+		if ((cur->ni == ni) &&
+			(cur->expected_category == category) &&
+			(cur->expected_action == action) &&
+			(cur->expected_token == token)) {
+			to_free = cur;
+			*prev = cur->next;
+			break;
+		}
+		prev = &cur->next;
+		cur = cur->next;
+	}
+	spin_unlock_irqrestore(&queue->lock, flags);
+
+	if (to_free != NULL) {
+		if (to_free->fn_success != NULL)
+			to_free->fn_success(to_free->ni);
+
+		ieee80211_ppqueue_release_entry(to_free);
+	}
+}
+
+void ieee80211_ppqueue_remove_node_leave(struct ieee80211_pairing_pending_queue *queue,
+				struct ieee80211_node *ni)
+{
+	struct ieee80211_pairing_pending_entry **prev, *cur;
+	struct ieee80211_pairing_pending_entry *ni_drop_list, *to_free;
+	unsigned long flags;
+
+	prev = &queue->next;
+	cur = queue->next;
+	ni_drop_list = NULL;
+	to_free = NULL;
+
+	spin_lock_irqsave(&queue->lock, flags);
+	while (cur != NULL) {
+		if (cur->ni == ni) {
+			to_free = cur;
+			*prev = cur->next;
+			cur = cur->next;
+			REPLACE_PPQ_ENTRY_HEAD(ni_drop_list, to_free);
+			continue;
+		}
+		prev = &cur->next;
+		cur = cur->next;
+	}
+	spin_unlock_irqrestore(&queue->lock, flags);
+
+	while (ni_drop_list != NULL) {
+		to_free = ni_drop_list;
+		ni_drop_list = ni_drop_list->next;
+
+		if (to_free->fn_fail)
+			to_free->fn_fail(to_free->ni, PPQ_FAIL_NODELEAVE);
+
+		ieee80211_ppqueue_release_entry(to_free);
+	}
+}
+
+void ieee80211_ppqueue_remove_with_cat_action(struct ieee80211_pairing_pending_queue *queue,
+				u_int8_t category,
+				u_int8_t action)
+{
+	struct ieee80211_pairing_pending_entry **prev, *cur;
+	struct ieee80211_pairing_pending_entry *ni_drop_list, *to_free;
+	unsigned long flags;
+
+	prev = &queue->next;
+	cur = queue->next;
+	ni_drop_list = NULL;
+	to_free = NULL;
+
+	spin_lock_irqsave(&queue->lock, flags);
+	while (cur != NULL) {
+		if (cur->expected_category == category &&
+			cur->expected_action == action) {
+			to_free = cur;
+			*prev = cur->next;
+			cur = cur->next;
+			REPLACE_PPQ_ENTRY_HEAD(ni_drop_list, to_free);
+			continue;
+		}
+		prev = &cur->next;
+		cur = cur->next;
+	}
+	spin_unlock_irqrestore(&queue->lock, flags);
+
+	while (ni_drop_list != NULL) {
+		to_free = ni_drop_list;
+		ni_drop_list = ni_drop_list->next;
+
+		if (to_free->fn_fail)
+			to_free->fn_fail(to_free->ni, PPQ_FAIL_STOP);
+
+		ieee80211_ppqueue_release_entry(to_free);
+	}
+}
+
+void ieee80211_ppqueue_flush(struct ieee80211_pairing_pending_queue *queue)
+{
+	struct ieee80211_pairing_pending_entry *flush_list, *to_free;
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->lock, flags);
+	flush_list = queue->next;
+	queue->next = NULL;
+	spin_unlock_irqrestore(&queue->lock, flags);
+
+	while (flush_list != NULL) {
+		to_free = flush_list;
+		flush_list = flush_list->next;
+
+		if (to_free->fn_fail)
+			to_free->fn_fail(to_free->ni, PPQ_FAIL_STOP);
+
+		ieee80211_ppqueue_release_entry(to_free);
+	}
+}
+
+void ieee80211_ppqueue_timeout(unsigned long ctx)
+{
+	struct ieee80211_pairing_pending_queue *queue = (struct ieee80211_pairing_pending_queue *)ctx;
+	struct ieee80211_pairing_pending_entry **prev, *cur, *to_do, *timeout_retry, *timeout_fail;
+	struct sk_buff *skb;
+	unsigned long flags;
+
+	prev = &queue->next;
+	cur = queue->next;
+	to_do = NULL;
+	timeout_retry = NULL;
+	timeout_fail = NULL;
+
+	spin_lock_irqsave(&queue->lock, flags);
+	while (cur != NULL) {
+		if (time_before_eq(cur->next_expire_jiffies, jiffies)) {
+			to_do = cur;
+			*prev = cur->next;
+			cur = cur->next;
+			if (to_do->retry_cnt < to_do->max_retry)
+				REPLACE_PPQ_ENTRY_HEAD(timeout_retry, to_do);
+			else
+				REPLACE_PPQ_ENTRY_HEAD(timeout_fail, to_do);
+			continue;
+		}
+		prev = &cur->next;
+		cur = cur->next;
+	}
+	spin_unlock_irqrestore(&queue->lock, flags);
+
+	while (timeout_retry != NULL) {
+		to_do = timeout_retry;
+		timeout_retry = timeout_retry->next;
+
+		to_do->retry_cnt++;
+		to_do->next_expire_jiffies = jiffies + to_do->expire;
+		skb = skb_clone(to_do->skb, GFP_ATOMIC);
+		if (skb) {
+			ieee80211_ref_node(to_do->ni);
+			ieee80211_mgmt_output(to_do->ni, skb, IEEE80211_FC0_SUBTYPE_ACTION, to_do->ni->ni_macaddr);
+		}
+		ieee80211_ppqueue_insert_entry(queue, to_do);
+	}
+
+	while (timeout_fail != NULL) {
+		to_do = timeout_fail;
+		timeout_fail = timeout_fail->next;
+
+		if (to_do->fn_fail)
+			to_do->fn_fail(to_do->ni, PPQ_FAIL_TIMEOUT);
+
+		ieee80211_ppqueue_release_entry(to_do);
+	}
+}
+
+void ieee80211_ppqueue_init(struct ieee80211vap *vap)
+{
+	struct ieee80211_pairing_pending_queue *queue = (struct ieee80211_pairing_pending_queue *)&vap->iv_ppqueue;
+
+	spin_lock_init(&queue->lock);
+	init_timer(&queue->timer);
+	queue->timer.data = (unsigned long)queue;
+	queue->timer.function = ieee80211_ppqueue_timeout;
+	queue->next = NULL;
+	queue->next_expire_jiffies = 0;
+}
+
+void ieee80211_ppqueue_deinit(struct ieee80211vap *vap)
+{
+	struct ieee80211_pairing_pending_queue *queue = (struct ieee80211_pairing_pending_queue *)&vap->iv_ppqueue;
+
+	del_timer(&queue->timer);
+	ieee80211_ppqueue_flush(queue);
+}
+
+struct sk_buff *ieee80211_ppqueue_pre_tx(struct ieee80211_node *ni,
+				struct sk_buff *skb,
+				u_int8_t category,
+				u_int8_t action,
+				u_int8_t token,
+				unsigned long expire,
+				ppq_callback_success fn_success,
+				ppq_callback_fail fn_fail)
+{
+	struct sk_buff *cloned_skb = NULL;
+	struct ieee80211_pairing_pending_queue *queue = &ni->ni_vap->iv_ppqueue;
+	struct ieee80211_pairing_pending_entry *entry = NULL;
+
+	entry = (struct ieee80211_pairing_pending_entry *)kmalloc(sizeof(*entry), GFP_ATOMIC);
+	if (NULL == entry) {
+		dev_kfree_skb_any(skb);
+		return NULL;
+	}
+
+	cloned_skb = skb_clone(skb, GFP_ATOMIC);
+	if (cloned_skb == NULL) {
+		dev_kfree_skb_any(skb);
+		kfree(entry);
+		return NULL;
+	}
+
+	memset(entry, 0, sizeof(*entry));
+	entry->skb = skb;
+	entry->ni = ni;
+	entry->expected_category = category;
+	entry->expected_action = action;
+	entry->expected_token = token;
+	entry->expire = expire;
+	entry->next_expire_jiffies = jiffies + expire;
+	entry->max_retry = IEEE80211_PPQ_DEF_MAX_RETRY;
+	entry->retry_cnt = 0;
+	entry->fn_success = fn_success;
+	entry->fn_fail = fn_fail;
+
+	ieee80211_ppqueue_insert_entry(queue, entry);
+	return cloned_skb;
+}
+
+int32_t ieee80211_compile_action_link_measure_request(struct ieee80211_node *ni,
+		void *ctrl,
+		struct sk_buff **p_skb)
+{
+	struct sk_buff *skb;
+	int32_t frame_len = 0;
+	struct ieee80211_link_measure_request *request;
+	u_int8_t *frm;
+	u_int8_t tx_token;
+
+	request = (struct ieee80211_link_measure_request *)ctrl;
+	frame_len = sizeof(struct ieee80211_action_rm_link_measure_request);
+
+	skb = ieee80211_getmgtframe(&frm, frame_len);
+	if (skb == NULL)
+		return -1;
+
+	*frm++ = IEEE80211_ACTION_CAT_RM;
+	*frm++ = IEEE80211_ACTION_R_LINKMEASURE_REQUEST;
+
+	if (ni->ni_action_token == 0)
+		ni->ni_action_token++;
+	tx_token = ni->ni_action_token++;
+	*frm++ = tx_token;
+	*frm++ = ni->ni_ic->ic_get_local_txpow(ni->ni_ic);
+	*frm++ = ni->ni_ic->ic_curchan->ic_maxpower_normal + 6;	/* 4 anntenna, add 6 db */
+
+	if (request->ppq.expire != 0) {
+		skb = ieee80211_ppqueue_pre_tx(ni, skb, IEEE80211_ACTION_CAT_RM,
+				IEEE80211_ACTION_R_LINKMEASURE_REPORT,
+				tx_token, request->ppq.expire,
+				request->ppq.fn_success, request->ppq.fn_fail);
+		if (skb == NULL)
+			return -1;
+	}
+
+	skb_trim(skb, frm - skb->data);
+	*p_skb = skb;
+	return 0;
+}
+
+int32_t ieee80211_compile_action_link_measure_report(struct ieee80211_node *ni,
+		void *ctrl,
+		struct sk_buff **p_skb)
+{
+	struct sk_buff *skb;
+	int32_t frame_len = 0;
+	struct ieee80211_link_measure_report *report;
+	u_int8_t *frm;
+
+	report = (struct ieee80211_link_measure_report *)ctrl;
+	frame_len = sizeof(struct ieee80211_action_rm_link_measure_report);
+
+	skb = ieee80211_getmgtframe(&frm, frame_len);
+	if (skb == NULL)
+		return -1;
+
+	*frm++ = IEEE80211_ACTION_CAT_RM;
+	*frm++ = IEEE80211_ACTION_R_LINKMEASURE_REPORT;
+	*frm++ = report->token;
+	*frm++ = IEEE80211_ELEMID_TPCREP;
+	*frm++ = 2;
+	*frm++ = report->tpc_report.tx_power;
+	*frm++ = report->tpc_report.link_margin;
+	*frm++ = report->recv_antenna_id;
+	*frm++ = report->tran_antenna_id;
+	*frm++ = report->rcpi;
+	*frm++ = report->rsni;
+
+	skb_trim(skb, frm - skb->data);
+	*p_skb = skb;
+	return 0;
+}
+
+int32_t ieee80211_compile_action_neighbor_report_request(struct ieee80211_node *ni,
+		void *ctrl,
+		struct sk_buff **p_skb)
+{
+	struct sk_buff *skb;
+	int32_t frame_len = 0;
+	u_int8_t *frm;
+	u_int8_t tx_token;
+	struct ieee80211_neighbor_report_request *request;
+
+	request = (struct ieee80211_neighbor_report_request *)ctrl;
+	frame_len = sizeof(struct ieee80211_action_rm_neighbor_report_request);
+	skb = ieee80211_getmgtframe(&frm, frame_len);
+	if (skb == NULL)
+		return -1;
+
+	*frm++ = IEEE80211_ACTION_CAT_RM;
+	*frm++ = IEEE80211_ACTION_R_NEIGHBOR_REQUEST;
+	if (ni->ni_action_token == 0)
+		ni->ni_action_token++;
+	tx_token = ni->ni_action_token++;
+	*frm++ = tx_token;
+
+	if (request->ppq.expire != 0) {
+		skb = ieee80211_ppqueue_pre_tx(ni, skb, IEEE80211_ACTION_CAT_RM,
+				IEEE80211_ACTION_R_NEIGHBOR_REPORT,
+				tx_token, request->ppq.expire,
+				request->ppq.fn_success, request->ppq.fn_fail);
+		if (skb == NULL)
+			return -1;
+	}
+
+	skb_trim(skb, frm - skb->data);
+	*p_skb = skb;
+	return 0;
+}
+
+int32_t ieee80211_compile_action_neighbor_report_response(struct ieee80211_node *ni,
+		void *ctrl,
+		struct sk_buff **p_skb)
+{
+	struct sk_buff *skb;
+	int32_t frame_len = 0;
+	u_int8_t *frm;
+	struct ieee80211_neighbor_report_response *response;
+	u_int8_t i;
+	u_int8_t bss_num = 0;
+
+	response = (struct ieee80211_neighbor_report_response *)ctrl;
+	frame_len = sizeof(struct ieee80211_action_rm_neighbor_report_response);
+	if (response->bss_num > 0) {
+		bss_num = (response->bss_num > 32 ? 32 : response->bss_num);
+		frame_len += sizeof(struct ieee80211_ie_neighbor_report) * bss_num;
+	}
+	skb = ieee80211_getmgtframe(&frm, frame_len);
+	if (skb == NULL)
+		return -1;
+
+	*frm++ = IEEE80211_ACTION_CAT_RM;
+	*frm++ = IEEE80211_ACTION_R_NEIGHBOR_REPORT;
+	*frm++ = response->token;
+
+	for (i = 0; i < bss_num; i++) {
+		*frm++ = IEEE80211_ELEMID_NEIGHBOR_REP;
+		*frm++ = sizeof(struct ieee80211_ie_neighbor_report) - 2;
+		memcpy(frm, response->neighbor_report_ptr[i]->bssid, IEEE80211_ADDR_LEN);
+		frm += IEEE80211_ADDR_LEN;
+		ADDINT32(frm, response->neighbor_report_ptr[i]->bssid_info);
+		*frm++ = response->neighbor_report_ptr[i]->operating_class;
+		*frm++ = response->neighbor_report_ptr[i]->channel;
+		*frm++ = response->neighbor_report_ptr[i]->phy_type;
+	}
+
+	skb_trim(skb, frm - skb->data);
+	*p_skb = skb;
+	return 0;
+}
+
+/*
+ * To check whether to enable RX AMSDU or not.
+ * Return 1: RX AMSDU can be enabled, 0: should be disabled
+ */
+static int ieee80211_rx_amsdu_allowed(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+
+	if (vap->iv_rx_amsdu_enable == QTN_RX_AMSDU_DISABLE) {
+		return 0;
+	}
+
+	if ((vap->iv_rx_amsdu_enable == QTN_RX_AMSDU_DYNAMIC) && !ieee80211_node_is_qtn(ni)) {
+		if (ic->ic_scs.scs_stats_on) {
+			if (vap->iv_rx_amsdu_threshold_cca && (ic->ic_opmode == IEEE80211_M_HOSTAP)) {
+				struct ap_state *as = ic->ic_scan->ss_scs_priv;
+				uint32_t cca_intf = as->as_cca_intf[ic->ic_curchan->ic_ieee];
+				if ((cca_intf != SCS_CCA_INTF_INVALID) &&
+						(cca_intf > vap->iv_rx_amsdu_threshold_cca)) {
+					return 0;
+				}
+			}
+			if (vap->iv_rx_amsdu_threshold_pmbl) {
+				uint32_t pmbl_err = (vap->iv_rx_amsdu_pmbl_wf_sp * ic->ic_scs.scs_sp_err_smthed +
+						vap->iv_rx_amsdu_pmbl_wf_lp * ic->ic_scs.scs_lp_err_smthed) / 100;
+
+				if (pmbl_err > vap->iv_rx_amsdu_threshold_pmbl) {
+					return 0;
+				}
+			}
+		}
+	}
+
+	return 1;
+}
+
+void
+ieee80211_get_channel_bw_offset(struct ieee80211com *ic, int16_t *is_40, int16_t *offset)
+{
+	*is_40 = 0;
+	*offset = IEEE80211_HTINFO_CHOFF_SCN;
+
+	if ((ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) &&
+			(ic->ic_bsschan->ic_flags & IEEE80211_CHAN_HT40_DUAL_EXT)) {
+		*offset = (ic->ic_bsschan->ic_flags & IEEE80211_CHAN_HT40U) ?
+				IEEE80211_HTINFO_CHOFF_SCA : IEEE80211_HTINFO_CHOFF_SCB;
+		*is_40 = 1;
+	}
+}
+
+static int ieee80211_check_11b_ap(const struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_rateset *b_rates = &ic->ic_sup_rates[IEEE80211_MODE_11B];
+	int i, j;
+
+	for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
+		for (j = 0; j < b_rates->rs_nrates; j++) {
+			if ((ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL) ==
+					(b_rates->rs_rates[j] & IEEE80211_RATE_VAL)) {
+				break;
+			}
+		}
+
+		if (j == b_rates->rs_nrates)
+			return 0;
+	}
+
+	return 1;
+}
+
+static int
+ieee80211_compile_action_btm_req(struct ieee80211_node *ni,
+				struct btm_request_params *ctrl,
+				struct sk_buff **pp_skb)
+{
+	struct sk_buff *skb = NULL;
+	uint8_t *frm = NULL;
+	size_t url_len = 0;
+	uint16_t frm_len = 0;
+	uint8_t tx_token = 0;
+
+	url_len = ctrl->url ? strlen(ctrl->url) + 1 : 0;
+	frm_len = (sizeof(struct ieee80211_action_btm_req)
+		+ (ctrl->bss_term_dur ? sizeof(struct ieee80211_ie_btm_bss_termdur) : 0)
+		+ url_len
+		+ ctrl->neigh_reports_length);
+	skb = ieee80211_getmgtframe(&frm, frm_len);
+	if (skb == NULL)
+		return -1;
+
+	*frm++ = IEEE80211_ACTION_CAT_WNM;
+	*frm++ = IEEE80211_WNM_BSS_TRANS_MGMT_REQ;
+	if (ctrl->dialog_token != 0) {
+		*frm++ = ctrl->dialog_token;
+		tx_token = ctrl->dialog_token;
+	} else {
+		if (ni->ni_action_token == 0)
+			ni->ni_action_token++;
+		*frm++ = ni->ni_action_token;
+		tx_token = ni->ni_action_token++;
+	}
+	*frm++ = ctrl->request_mode;
+	ADDINT16LE(frm, ctrl->disassoc_timer);
+	*frm++ = ctrl->validity_interval;
+	ni->ni_btm_req = tx_token;
+
+	if ((ctrl->request_mode & BTM_REQ_BSS_TERMINATION_INCLUDED) && ctrl->bss_term_dur) {
+		memcpy(frm, ctrl->bss_term_dur, sizeof(struct ieee80211_ie_btm_bss_termdur));
+		frm += sizeof(struct ieee80211_ie_btm_bss_termdur);
+	}
+
+	if (ctrl->url) {
+		*frm++ = url_len;
+		memcpy(frm, ctrl->url, url_len);
+		frm += url_len;
+	}
+	if (ctrl->neigh_reports) {
+		memcpy(frm, ctrl->neigh_reports, ctrl->neigh_reports_length);
+		frm += ctrl->neigh_reports_length;
+	}
+
+	skb_trim(skb, frm - skb->data);
+	*pp_skb = skb;
+
+	IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ACTION, "Sending BTM request to %pM token %u\n",
+			ni->ni_macaddr, tx_token);
+
+	return 0;
+}
+
+/*
+ * Send a management frame.  The node is for the destination (or ic_bss
+ * when in station mode).  Nodes other than ic_bss have their reference
+ * count bumped to reflect our use for an indeterminate time.
+ */
+int
+ieee80211_send_mgmt(struct ieee80211_node *ni, int type, int arg)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct sk_buff *skb = NULL;
+	u_int8_t *frm;
+	int16_t htinfo_channel_width = 0;
+	int16_t htinfo_2nd_channel_offset = 0;
+	u_int16_t capinfo, def_keyindex;
+	int has_challenge, is_shared_key, ret, timer, status, is_bcast_probe, expired_timer;
+	enum ieee80211_phymode mode;
+	struct ieee80211_wme_state *wme = ieee80211_vap_get_wmestate(vap);
+	int ap_pure_tkip = 0;
+	int sta_pure_tkip = 0;
+	int is_11b_ap;
+
+	KASSERT(ni != NULL, ("null node"));
+
+	if (vap->iv_opmode == IEEE80211_M_STA && ni != vap->iv_bss) {
+		/*
+		 * In Roaming cases, STA may receive null data frames from old AP
+		 * if it's not disassociated properly.
+		 * We add a exception here so that STA can send De-auth to old AP.
+		 */
+		if (type != IEEE80211_FC0_SUBTYPE_ACTION && type != IEEE80211_FC0_SUBTYPE_DEAUTH)
+			return 0;
+	}
+
+	ieee80211_ref_node(ni);
+
+	mode = ic->ic_curmode;
+	timer = 0;
+
+	if (vap->iv_bss && !vap->allow_tkip_for_vht) {
+	      ap_pure_tkip = (vap->iv_bss->ni_rsn.rsn_ucastcipherset == IEEE80211_C_TKIP);
+	      sta_pure_tkip = (vap->iv_bss->ni_rsn.rsn_ucastcipher == IEEE80211_CIPHER_TKIP);
+	}
+
+	switch (type) {
+	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+		/*
+		 * probe response frame format
+	 	 *	[8] time stamp
+		 *	[2] beacon interval
+		 *	[2] capability information
+		 *	[tlv] ssid
+		 *	[tlv] supported rates
+		 *	[7] FH/DS parameter set
+		 *	[tlv] IBSS parameter set
+		 *	[tlv] country code
+		 *	[3] power constraint
+		 *	[4] tpc report
+		 *	[tlv] Channel Switch Announcement
+		 *	[3] extended rate phy (ERP)
+		 *	[tlv] extended supported rates
+		 *	[tlv] WME parameters
+		 *	[tlv] WPA/RSN parameters
+		 *	[tlv] Atheros Advanced Capabilities
+		 *	[tlv] AtherosXR parameters
+		 *	[tlv] Quantenna parameters (probe resp)
+		 *      [tlv] QTN IE
+		 */
+		skb = ieee80211_getmgtframe(&frm,
+			  8	/* time stamp */
+			+ sizeof(u_int16_t)	/* beacon interval */
+			+ sizeof(u_int16_t)	/* capability information */
+			+ 2 + IEEE80211_NWID_LEN	/* ssid */
+			+ 2 + IEEE80211_RATE_SIZE	/* supported rates */
+			+ 7	/* FH/DS parameters max(7,3) */
+			/* XXX allocate max size */
+			+ 4	/* IBSS parameter set*/
+			+ 2 + ic->ic_country_ie.country_len	/* country code */
+			+ ((vap->interworking) ? 7 : 0)	/* BSS load */
+			+ 3	/* power constraint */
+			+ 4	/* tpc report */
+			+ IEEE80211_CHANSWITCHANN_BYTES  /*	CSA	*/
+			+ 3	/* ERP */
+			+ 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+			+ sizeof(struct ieee80211_wme_param)
+			/* XXX !WPA1+WPA2 fits w/o a cluster */
+			+ (vap->iv_flags & IEEE80211_F_WPA ?
+				2 * sizeof(struct ieee80211_ie_wpa) : 0)
+			+ ((ic->ic_curmode >= IEEE80211_MODE_11NA) ?
+					(sizeof(struct ieee80211_ie_htcap) +
+					 sizeof(struct ieee80211_ie_htinfo)) : 0)
+			+ sizeof(struct ieee80211_ie_athAdvCap)
+			+ vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_RESP].length
+			+ sizeof(struct ieee80211_ie_qtn)
+			+ sizeof(struct ieee80211_qtn_ext_role)
+			+ sizeof(struct ieee80211_qtn_ext_bssid)
+			+ (vap->qtn_pairing_ie.ie ? sizeof(struct ieee80211_ie_qtn_pairing) : 0)
+			+ (IS_IEEE80211_DUALBAND_VHT_ENABLED(ic) ?
+				(sizeof(struct ieee80211_ie_vhtcap) +
+				 sizeof(struct ieee80211_ie_vhtop) +
+				 sizeof(struct ieee80211_ie_vtxpwren)) : 0)
+			+ ((IS_IEEE80211_11NG(ic)) ?
+				(sizeof(struct ieee80211_20_40_coex_param) +
+				sizeof(struct ieee80211_obss_scan_ie)) : 0)
+			+ (IEEE80211_COM_NEIGHREPORT_ENABLED(ic) ? sizeof(struct ieee80211_ie_rrm) : 0)
+			);
+		if (skb == NULL)
+			senderr(ENOMEM, is_tx_nobuf);
+
+		/* timestamp should be filled later */
+		memset(frm, 0, 8);
+		frm += 8;
+
+		/* beacon interval */
+		*(__le16 *)frm = htole16(vap->iv_bss ? vap->iv_bss->ni_intval : IEEE80211_BINTVAL_DEFAULT);
+		frm += 2;
+
+		/* cap. info */
+		if (vap->iv_opmode == IEEE80211_M_IBSS)
+			capinfo = IEEE80211_CAPINFO_IBSS;
+		else
+			capinfo = IEEE80211_CAPINFO_ESS;
+		if (vap->iv_flags & IEEE80211_F_PRIVACY)
+			capinfo |= IEEE80211_CAPINFO_PRIVACY;
+		if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+		    IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
+			capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
+		if (ic->ic_flags & IEEE80211_F_SHSLOT)
+			capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
+		if (ic->ic_flags & IEEE80211_F_DOTH)
+			capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;
+		*(__le16 *)frm = htole16(capinfo);
+		frm += 2;
+
+		/* ssid */
+		is_bcast_probe = arg;
+		if ((!vap->iv_bss) || ((vap->iv_flags & IEEE80211_F_HIDESSID)
+						 && is_bcast_probe)) {
+			frm = ieee80211_add_ssid(frm, (u_int8_t *)"", 0);
+		} else {
+			frm = ieee80211_add_ssid(frm, vap->iv_bss->ni_essid,
+				vap->iv_bss->ni_esslen);
+		}
+
+		/* supported rates */
+		frm = ieee80211_add_rates(frm, &ic->ic_sup_rates[mode]);
+
+		/* XXX: FH/DS parameter set, correct ? */
+		if (ic->ic_phytype == IEEE80211_T_FH) {
+			*frm++ = IEEE80211_ELEMID_FHPARMS;
+			*frm++ = 5;
+			*frm++ = ni->ni_fhdwell & 0x00ff;
+			*frm++ = (ni->ni_fhdwell >> 8) & 0x00ff;
+			*frm++ = IEEE80211_FH_CHANSET(
+				ieee80211_chan2ieee(ic, ic->ic_curchan));
+			*frm++ = IEEE80211_FH_CHANPAT(
+				ieee80211_chan2ieee(ic, ic->ic_curchan));
+			*frm++ = ni->ni_fhindex;
+		} else {
+			*frm++ = IEEE80211_ELEMID_DSPARMS;
+			*frm++ = 1;
+			*frm++ = ieee80211_chan2ieee(ic, ic->ic_bsschan);
+		}
+
+		if (vap->iv_opmode == IEEE80211_M_IBSS) {
+			*frm++ = IEEE80211_ELEMID_IBSSPARMS;
+			*frm++ = 2;
+			*frm++ = 0;
+			*frm++ = 0;		/* TODO: ATIM window */
+		}
+
+		frm = ieee80211_add_bss_load(frm, vap);
+
+		/*
+		 * Tight coupling between Country IE and Power Constraint IE
+		 * Both using IEEE80211_FEXT_COUNTRYIE to optional enable them.
+		 */
+		/* country code */
+		if ((ic->ic_flags_ext & IEEE80211_FEXT_COUNTRYIE) ||
+				((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)))
+			frm = ieee80211_add_country(frm, ic);
+
+		/* power constraint */
+		if (((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_COUNTRYIE)) ||
+				((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext && IEEE80211_FEXT_TPC))) {
+			*frm++ = IEEE80211_ELEMID_PWRCNSTR;
+			*frm++ = 1;
+			*frm++ = IEEE80211_PWRCONSTRAINT_VAL(ic);
+		}
+
+		if (IS_IEEE80211_11NG(ic)) {
+			frm = ieee80211_add_20_40_bss_coex_ie(frm, vap->iv_coex);
+			frm = ieee80211_add_obss_scan_ie(frm, &ic->ic_obss_ie);
+		}
+
+		/* Transmit power envelope */
+		if (IS_IEEE80211_VHT_ENABLED(ic) && (ic->ic_flags & IEEE80211_F_DOTH)) {
+			frm = ieee80211_add_vhttxpwr_envelope(frm, ic);
+		}
+
+		/*TPC Report*/
+		if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+			*frm++ = IEEE80211_ELEMID_TPCREP;
+			*frm++ = 2;
+			*frm++ = ic->ic_get_local_txpow(ic);	/* tx power would be updated in macfw */
+			*frm++ = 0;	/* link margin is 0 */
+		}
+
+		/*	CSA	*/
+		if (ic->ic_csa_count) {
+			frm = ieee80211_add_csa(frm, ic->ic_csa_mode,
+					ic->ic_csa_chan->ic_ieee, ic->ic_csa_count);
+			ieee80211_add_sec_chan_off(&frm, ic, ic->ic_csa_chan->ic_ieee);
+		}
+
+		/* ERP */
+		if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) &&
+			((ic->ic_curmode == IEEE80211_MODE_11A) ||
+			(ic->ic_curmode == IEEE80211_MODE_11B))) {
+			frm = ieee80211_add_erp(frm, ic);
+		}
+
+		ieee80211_get_channel_bw_offset(ic, &htinfo_channel_width, &htinfo_2nd_channel_offset);
+
+		/* 802.11n specific IEs */
+		if (IEEE80211_IS_CHAN_ANYN(ic->ic_bsschan) &&
+			(ic->ic_curmode >= IEEE80211_MODE_11NA) && !ap_pure_tkip) {
+			frm = ieee80211_add_htcap(ni, frm, &ic->ic_htcap, type);
+			ic->ic_htinfo.ctrlchannel = ieee80211_chan2ieee(ic, ic->ic_bsschan);
+			ic->ic_htinfo.byte1 |= (htinfo_channel_width ? IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40 : 0x0);
+			ic->ic_htinfo.choffset = htinfo_2nd_channel_offset;
+			frm = ieee80211_add_htinfo(ni, frm, &ic->ic_htinfo);
+		}
+
+		/* Ext. Supp. Rates */
+		frm = ieee80211_add_xrates(frm, &ic->ic_sup_rates[mode]);
+
+		/* WME */
+		if (vap->iv_flags & IEEE80211_F_WME)
+			frm = ieee80211_add_wme_param(frm, wme,
+					IEEE80211_VAP_UAPSD_ENABLED(vap), 0);
+
+		/* WPA */
+		if (!vap->iv_osen && vap->iv_bss && (vap->iv_flags & IEEE80211_F_WPA))
+			frm = ieee80211_add_wpa(frm, vap);
+		/* RRM enabled IE */
+		if (IEEE80211_COM_NEIGHREPORT_ENABLED(ic))
+			frm = ieee80211_add_rrm_enabled(frm, vap);
+		/* Mobility Domain IE */
+		if (vap->iv_mdid) {
+			frm = ieee80211_add_mdie(frm, vap);
+		}
+		/* AthAdvCaps */
+		if (vap->iv_bss && vap->iv_bss->ni_ath_flags)
+			frm = ieee80211_add_athAdvCap(frm, vap->iv_bss->ni_ath_flags,
+					vap->iv_bss->ni_ath_defkeyindex);
+
+		if (vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_RESP].ie) {
+			memcpy(frm, vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_RESP].ie,
+					vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_RESP].length);
+			ieee80211_update_bss_tm(frm,
+				vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_RESP].length, ic, vap);
+			frm += vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_RESP].length;
+		}
+
+		if (vap->qtn_pairing_ie.ie) {
+			frm = ieee80211_add_qtn_pairing_ie(frm, &vap->qtn_pairing_ie);
+		}
+
+		if (IS_IEEE80211_VHT_ENABLED(ic) && !ap_pure_tkip) {
+			frm = ieee80211_add_vhtcap(ni, frm, &ic->ic_vhtcap, type);
+
+			/* VHT Operation element */
+			if ((IEEE80211_IS_VHT_40(ic)) || (IEEE80211_IS_VHT_20(ic))) {
+				ic->ic_vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ;
+				ic->ic_vhtop.centerfreq0 = 0;
+			} else if (IEEE80211_IS_VHT_80(ic)) {
+				ic->ic_vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_80MHZ;
+				ic->ic_vhtop.centerfreq0 = ic->ic_bsschan->ic_center_f_80MHz;
+			} else {
+				ic->ic_vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_160MHZ;
+				ic->ic_vhtop.centerfreq0 = ic->ic_bsschan->ic_center_f_160MHz;
+			}
+			frm = ieee80211_add_vhtop(ni, frm, &ic->ic_vhtop);
+		} else if (IS_IEEE80211_11NG_VHT_ENABLED(ic) && !ap_pure_tkip) {
+			/* QTN 2.4G VHT IE */
+			frm = ieee80211_add_vhtcap(ni, frm, &ic->ic_vhtcap_24g, type);
+			frm = ieee80211_add_vhtop(ni, frm, &ic->ic_vhtop_24g);
+		}
+
+		frm = ieee80211_add_qtn_ie(frm, ic,
+			(vap->iv_flags_ext & IEEE80211_FEXT_WDS ? (IEEE80211_QTN_BRIDGEMODE) : 0),
+			(vap->iv_flags_ext & IEEE80211_FEXT_WDS ?
+				(IEEE80211_QTN_BRIDGEMODE | IEEE80211_QTN_LNCB) : 0),
+			0, 0, 0);
+
+		/* Add Quantenna extender IE */
+		if (!IEEE80211_COM_WDS_IS_NONE(ic) && (vap == TAILQ_FIRST(&ic->ic_vaps))) {
+			frm = ieee80211_add_qtn_extender_role_ie(frm, ic->ic_extender_role);
+			frm = ieee80211_add_qtn_extender_bssid_ie(vap, frm);
+			frm = ieee80211_add_qtn_extender_state_ie(frm, !!ic->ic_ocac.ocac_cfg.ocac_enable);
+		}
+
+#ifdef CONFIG_QVSP
+		/* QTN WME IE */
+		if (ic->ic_wme.wme_throt_bm && ic->ic_wme.wme_throt_add_qwme_ie &&
+				(vap->iv_flags & IEEE80211_F_WME)) {
+			frm = ieee80211_add_qtn_wme_param(vap, frm);
+		}
+#endif
+		skb_trim(skb, frm - skb->data);
+		break;
+
+	case IEEE80211_FC0_SUBTYPE_AUTH:
+		status = arg >> 16;
+		arg &= 0xffff;
+		has_challenge = ((arg == IEEE80211_AUTH_SHARED_CHALLENGE ||
+			arg == IEEE80211_AUTH_SHARED_RESPONSE) &&
+			ni->ni_challenge != NULL);
+		uint8_t len = 0;
+		uint8_t *data = NULL;
+
+		/*
+		 * Deduce whether we're doing open authentication or
+		 * shared key authentication.  We do the latter if
+		 * we're in the middle of a shared key authentication
+		 * handshake or if we're initiating an authentication
+		 * request and configured to use shared key.
+		 */
+		is_shared_key = has_challenge ||
+			arg >= IEEE80211_AUTH_SHARED_RESPONSE ||
+			(arg == IEEE80211_AUTH_SHARED_REQUEST &&
+			vap->iv_bss &&
+			vap->iv_bss->ni_authmode == IEEE80211_AUTH_SHARED);
+
+		len = 3 * sizeof(u_int16_t)
+			+ (has_challenge && status == IEEE80211_STATUS_SUCCESS ?
+				sizeof(u_int16_t)+IEEE80211_CHALLENGE_LEN : 0);
+		if (ni->ni_tx_md_ie && ni->ni_tx_md_ie[1] > 0)
+			len += ni->ni_tx_md_ie[1] + 2;
+		if (ni->ni_tx_ft_ie && ni->ni_tx_ft_ie[1] > 0)
+			len += ni->ni_tx_ft_ie[1] + 2;
+		if (ni->ni_tx_rsn_ie && ni->ni_tx_rsn_ie[1] > 0)
+			len += ni->ni_tx_rsn_ie[1] + 2;
+
+		skb = ieee80211_getmgtframe(&frm, len);
+		if (skb == NULL)
+			senderr(ENOMEM, is_tx_nobuf);
+
+		((__le16 *)frm)[0] =
+			(is_shared_key) ? htole16(IEEE80211_AUTH_ALG_SHARED)
+				: htole16(IEEE80211_AUTH_ALG_OPEN);
+		((__le16 *)frm)[1] = htole16(arg);	/* sequence number */
+		((__le16 *)frm)[2] = htole16(status);	/* status */
+
+		if (has_challenge && status == IEEE80211_STATUS_SUCCESS) {
+			((__le16 *)frm)[3] =
+				htole16((IEEE80211_CHALLENGE_LEN << 8) |
+					IEEE80211_ELEMID_CHALLENGE);
+			memcpy(&((__le16 *)frm)[4], ni->ni_challenge,
+				IEEE80211_CHALLENGE_LEN);
+			if (arg == IEEE80211_AUTH_SHARED_RESPONSE) {
+				IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni,
+					"request encrypt frame (%s)", __func__);
+				M_FLAG_SET(skb, M_LINK0);
+			}
+		}
+		if ( arg == IEEE80211_AUTH_FT) {
+			((__le16 *)frm)[0] = htole16(IEEE80211_AUTH_ALG_FT);
+			((__le16 *)frm)[1] = htole16(2);	/* sequence number */
+
+			if (status == IEEE80211_STATUS_SUCCESS) {
+				data = &frm[6];
+				if (ni->ni_tx_md_ie) {
+					memcpy(data, ni->ni_tx_md_ie, ni->ni_tx_md_ie[1] + 2);
+					data += ni->ni_tx_md_ie[1] + 2;
+				}
+				if (ni->ni_tx_ft_ie) {
+					memcpy(data, ni->ni_tx_ft_ie, ni->ni_tx_ft_ie[1] + 2);
+					data += ni->ni_tx_ft_ie[1] + 2;
+				}
+				if (ni->ni_tx_rsn_ie) {
+					memcpy(data, ni->ni_tx_rsn_ie, ni->ni_tx_rsn_ie[1] + 2);
+					data += ni->ni_tx_rsn_ie[1] + 2;
+				}
+			}
+		}
+
+		/* XXX not right for shared key */
+		if (status == IEEE80211_STATUS_SUCCESS) {
+			IEEE80211_NODE_STAT(ni, tx_auth);
+			if (arg == IEEE80211_AUTH_OPEN_RESPONSE && vap->iv_opmode == IEEE80211_M_HOSTAP) {
+				char event_string[IW_CUSTOM_MAX]; /* Buffer for IWEVENT message */
+				union iwreq_data wreq;
+				memset(&wreq, 0, sizeof(wreq));
+				snprintf(event_string,IW_CUSTOM_MAX,"%sClient authenticated [%pM]",
+							QEVT_COMMON_PREFIX, ni->ni_macaddr);
+				wreq.data.length = strlen(event_string);
+				wireless_send_event(vap->iv_dev, IWEVCUSTOM, &wreq, event_string);
+			}
+		} else {
+			IEEE80211_NODE_STAT(ni, tx_auth_fail);
+			if (arg == IEEE80211_AUTH_OPEN_RESPONSE && vap->iv_opmode == IEEE80211_M_HOSTAP) {
+				char event_string[IW_CUSTOM_MAX]; /* Buffer for IWEVENT message */
+				union iwreq_data wreq;
+				memset(&wreq, 0, sizeof(wreq));
+				snprintf(event_string,IW_CUSTOM_MAX,"%sClient failed to authenticate [%pM]",
+							QEVT_COMMON_PREFIX, ni->ni_macaddr);
+				wreq.data.length = strlen(event_string);
+				wireless_send_event(vap->iv_dev, IWEVCUSTOM, &wreq, event_string);
+			}
+		}
+
+		if (vap->iv_opmode == IEEE80211_M_STA)
+			timer = IEEE80211_TRANS_WAIT;
+		break;
+
+	case IEEE80211_FC0_SUBTYPE_DEAUTH:
+		IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni,
+			"send station deauthenticate (reason %d)", arg);
+		skb = ieee80211_getmgtframe(&frm, sizeof(u_int16_t));
+		if (skb == NULL)
+			senderr(ENOMEM, is_tx_nobuf);
+		*(__le16 *)frm = htole16(arg);	/* reason */
+
+		IEEE80211_NODE_STAT(ni, tx_deauth);
+		IEEE80211_NODE_STAT_SET(ni, tx_deauth_code, arg);
+		{
+			int msg = IEEE80211_DOT11_MSG_CLIENT_REMOVED;
+			if (vap->iv_opmode == IEEE80211_M_STA) {
+				msg = IEEE80211_DOT11_MSG_AP_DISCONNECTED;
+			}
+			if (arg == IEEE80211_REASON_AUTH_EXPIRE) {
+				ieee80211_eventf(vap->iv_dev,"%s[WLAN access rejected: incorrect "
+						 "security] from MAC address %pM", QEVT_ACL_PREFIX,
+						 ni->ni_macaddr);
+			}
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+			if ((vap->bsa_status == BSA_STATUS_ACTIVE) &&
+					(vap->iv_opmode == IEEE80211_M_HOSTAP))
+				ieee80211_bsa_disconnect_event_send(vap, ni, arg,
+						IEEE80211_FC0_SUBTYPE_DEAUTH,
+						BSA_DISCONNECT_SELF_GENERATED);
+#endif
+
+			ieee80211_dot11_msg_send(ni->ni_vap,
+					(char *)ni->ni_macaddr,
+					d11_m[msg],
+					d11_c[IEEE80211_DOT11_MSG_REASON_DEAUTHENTICATED],
+					arg,
+					(arg < DOT11_MAX_REASON_CODE) ? d11_r[arg] : "Reserved",
+					NULL,
+					NULL);
+		}
+
+		ieee80211_node_unauthorize(ni);		/* port closed */
+		break;
+
+	case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
+	case IEEE80211_FC0_SUBTYPE_REASSOC_REQ:
+		/*
+		 * asreq frame format
+		 *	[2] capability information
+		 *	[2] listen interval
+		 *	[6*] current AP address (reassoc only)
+		 *	[tlv] ssid
+		 *	[tlv] supported rates
+		 *	[4] power capability (802.11h)
+		 *	[54] supported channels element (802.11h)
+		 *	[tlv] extended supported rates
+		 *	[tlv] WME [if enabled and AP capable]
+		 *      [tlv] Atheros advanced capabilities
+		 *	[tlv] user-specified ie's
+		 *      [tlv] QTN IE
+		 */
+		skb = ieee80211_getmgtframe(&frm,
+			sizeof(u_int16_t) +
+			sizeof(u_int16_t) +
+			IEEE80211_ADDR_LEN +
+			2 + IEEE80211_NWID_LEN +
+			2 + IEEE80211_RATE_SIZE +
+			4 + (2 + IEEE80211_SUPPCHAN_LEN) +
+			2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) +
+			((ic->ic_curmode >= IEEE80211_MODE_11NA) ?
+					(sizeof(struct ieee80211_ie_htcap) +
+					 sizeof(struct ieee80211_extcap_param)) : 0) +
+			sizeof(struct ieee80211_ie_wme) +
+			sizeof(struct ieee80211_ie_athAdvCap) +
+			(vap->iv_opt_ie != NULL ? vap->iv_opt_ie_len : 0) +
+			vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_REQ].length +
+			sizeof(struct ieee80211_ie_qtn) +
+			(vap->qtn_pairing_ie.ie ? sizeof(struct ieee80211_ie_qtn_pairing) : 0) +
+			(IS_IEEE80211_DUALBAND_VHT_ENABLED(ic) ?
+				sizeof(struct ieee80211_ie_vhtcap) +
+				sizeof(struct ieee80211_ie_vhtop_notif) : 0)
+			);
+		if (skb == NULL)
+			senderr(ENOMEM, is_tx_nobuf);
+
+		capinfo = 0;
+		if (vap->iv_opmode == IEEE80211_M_IBSS)
+			capinfo |= IEEE80211_CAPINFO_IBSS;
+		else		/* IEEE80211_M_STA */
+			capinfo |= IEEE80211_CAPINFO_ESS;
+		if (vap->iv_flags & IEEE80211_F_PRIVACY)
+			capinfo |= IEEE80211_CAPINFO_PRIVACY;
+		/*
+		 * NB: Some 11a AP's reject the request when
+		 *     short premable is set.
+		 */
+		/* Capability information */
+		if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+		    IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
+			capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
+		if ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME) &&
+		    (ic->ic_caps & IEEE80211_C_SHSLOT))
+			capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
+		if ((ic->ic_flags & IEEE80211_F_DOTH) && vap->iv_bss &&
+			(vap->iv_bss->ni_flags & IEEE80211_NODE_HT)) {
+			capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;
+		}
+		*(__le16 *)frm = htole16(capinfo);
+		frm += 2;
+
+		/* listen interval */
+		*(__le16 *)frm = htole16(ic->ic_lintval / ni->ni_intval);
+		frm += 2;
+
+		/* Current AP address */
+		if (type == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
+			if (vap->iv_bss) {
+				IEEE80211_ADDR_COPY(frm, vap->iv_bss->ni_bssid);
+			} else {
+				printk(KERN_ERR "Sending Reassoc Req frame"
+						" with NULL BSSID \n");
+				memset(frm, 0, IEEE80211_ADDR_LEN);
+			}
+			frm += IEEE80211_ADDR_LEN;
+		}
+		/* ssid */
+		frm = ieee80211_add_ssid(frm, ni->ni_essid, ni->ni_esslen);
+
+		is_11b_ap = ieee80211_check_11b_ap(ni);
+		/* supported rates */
+		if (!is_11b_ap) {
+			frm = ieee80211_add_rates(frm, &ic->ic_sup_rates[mode]);
+		} else {
+			frm = ieee80211_add_rates(frm, &ic->ic_sup_rates[IEEE80211_MODE_11B]);
+		}
+
+		if ((ic->ic_curmode >= IEEE80211_MODE_11NA) && vap->iv_bss &&
+			(vap->iv_bss->ni_flags & IEEE80211_NODE_HT) && !sta_pure_tkip) {
+			frm = ieee80211_add_htcap(ni, frm, &ic->ic_htcap, type);
+			/* Ext. Capabilities - For AP mode hostapd adds the extended cap */
+			if (vap->iv_opmode == IEEE80211_M_STA)
+				frm = ieee80211_add_extcap(frm);
+		}
+
+		/* ext. supp. rates */
+		if (!is_11b_ap) {
+			frm = ieee80211_add_xrates(frm, &ic->ic_sup_rates[mode]);
+		}
+
+		/* power capability/supported channels
+		 * in chapter 8.3.3.5, power capability IE is right after extended supported rates
+		 * and before supported channels
+		 * */
+		if (ic->ic_flags & IEEE80211_F_DOTH)
+			frm = ieee80211_add_doth(frm, ic);
+
+		/* Supported Channels */
+		frm = ieee80211_add_supported_chans(frm, ic);
+
+		/* WME */
+		if ((vap->iv_flags & IEEE80211_F_WME) && (ni->ni_wme_ie != NULL))
+			frm = ieee80211_add_wme(frm, ni);
+
+		/* ath adv. cap */
+		if (ni->ni_ath_flags & vap->iv_ath_cap) {
+			IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+				"Adding ath adv cap ie: ni_ath_flags = %02x, "
+				"iv_ath_cap = %02x", ni->ni_ath_flags,
+				vap->iv_ath_cap);
+
+			/* Setup default key index for static wep case */
+			def_keyindex = IEEE80211_INVAL_DEFKEY;
+			if (((vap->iv_flags & IEEE80211_F_WPA) == 0) &&
+			    (ni->ni_authmode != IEEE80211_AUTH_8021X) &&
+			    (vap->iv_def_txkey != IEEE80211_KEYIX_NONE))
+				def_keyindex = vap->iv_def_txkey;
+
+			frm = ieee80211_add_athAdvCap(frm,
+				ni->ni_ath_flags & vap->iv_ath_cap,
+				def_keyindex);
+		}
+
+		/* 802.11ac vht capability */
+		if (IS_IEEE80211_VHT_ENABLED(ic) && !sta_pure_tkip) {
+			frm = ieee80211_add_vhtcap(ni, frm, &ic->ic_vhtcap, type);
+			frm = ieee80211_add_vhtop_notif(ni, frm, ic, 0);
+		} else if (IS_IEEE80211_11NG_VHT_ENABLED(ic) && !sta_pure_tkip) {
+			/* QTN 2.4G VHT IE */
+			frm = ieee80211_add_vhtcap(ni, frm, &ic->ic_vhtcap_24g, type);
+			frm = ieee80211_add_vhtop_notif(ni, frm, ic, 1);
+		}
+
+		/* User-spec */
+		if (vap->iv_opt_ie != NULL) {
+			memcpy(frm, vap->iv_opt_ie, vap->iv_opt_ie_len);
+			frm += vap->iv_opt_ie_len;
+		}
+
+		if (vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_REQ].ie) {
+			memcpy(frm, vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_REQ].ie,
+				vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_REQ].length);
+			frm += vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_REQ].length;
+		}
+
+		frm = ieee80211_add_qtn_ie(frm, ic,
+			(vap->iv_flags_ext & IEEE80211_FEXT_WDS ? (IEEE80211_QTN_BRIDGEMODE) : 0),
+			(vap->iv_flags_ext & IEEE80211_FEXT_WDS ?
+				(IEEE80211_QTN_BRIDGEMODE | IEEE80211_QTN_LNCB) : 0),
+			vap->iv_implicit_ba, IEEE80211_DEFAULT_BA_WINSIZE_H,
+			ni->ni_rate_train);
+
+		/* Add QTN Pairing IE */
+		if (vap->qtn_pairing_ie.ie) {
+			frm = ieee80211_add_qtn_pairing_ie(frm, &vap->qtn_pairing_ie);
+		}
+
+		skb_trim(skb, frm - skb->data);
+
+		timer = IEEE80211_TRANS_WAIT;
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+			       "send station %s",
+			       (type == IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ? "assoc_req" : "reassoc_req");
+		break;
+
+	case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
+	case IEEE80211_FC0_SUBTYPE_REASSOC_RESP:
+		/*
+		 * asreq frame format
+		 *	[2] capability information
+		 *	[2] status
+		 *	[2] association ID
+		 *	[tlv] supported rates
+		 *	[tlv] extended supported rates
+		 *      [tlv] WME (if enabled and STA enabled)
+		 *      [tlv] Atheros Advanced Capabilities
+		 *      [tlv] QTN IE
+		 *      [tlv] VSP IE
+		 */
+		skb = ieee80211_getmgtframe(&frm,
+			3 * sizeof(u_int16_t) +
+			2 + IEEE80211_RATE_SIZE +
+			2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) +
+			((IEEE80211_IS_CHAN_ANYN(ic->ic_bsschan) &&
+			  (ic->ic_curmode >= IEEE80211_MODE_11NA)) ?
+					(sizeof(struct ieee80211_ie_htcap) +
+					 sizeof(struct ieee80211_ie_htinfo)) : 0) +
+			sizeof(struct ieee80211_wme_param) +
+			(vap->iv_ath_cap ? sizeof(struct ieee80211_ie_athAdvCap) : 0) +
+			vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_RESP].length +
+			sizeof(struct ieee80211_ie_qtn) +
+			(vap->qtn_pairing_ie.ie ? sizeof(struct ieee80211_ie_qtn_pairing) : 0)
+#ifdef CONFIG_QVSP
+			+ ieee80211_vsp_ie_max_len(ic)
+#endif
+			+ (IS_IEEE80211_DUALBAND_VHT_ENABLED(ic) ?
+				(sizeof(struct ieee80211_ie_vhtcap) +
+				 sizeof(struct ieee80211_ie_vhtop)) : 0)
+			+ ((IS_IEEE80211_11NG(ic)) ?
+				(sizeof(struct ieee80211_20_40_coex_param) +
+				sizeof(struct ieee80211_obss_scan_ie)) : 0)
+			+ (IEEE80211_COM_NEIGHREPORT_ENABLED(ic) ? sizeof(struct ieee80211_ie_rrm) : 0)
+			+ (ni->ni_tx_md_ie ? IEEE80211_MDIE_LEN + 2 : 0)
+			+ (ni->ni_tx_ft_ie ? ni->ni_tx_ft_ie[1] + 2: 0)
+			+ (ni->ni_rsn_ie ? ni->ni_rsn_ie[1] + 2: 0)
+			);
+		if (skb == NULL)
+			senderr(ENOMEM, is_tx_nobuf);
+
+		/* Capability Information */
+		capinfo = IEEE80211_CAPINFO_ESS;
+		if (vap->iv_flags & IEEE80211_F_PRIVACY)
+			capinfo |= IEEE80211_CAPINFO_PRIVACY;
+		if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+		    IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
+			capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
+		if (ic->ic_flags & IEEE80211_F_SHSLOT)
+			capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
+		if (ic->ic_flags & IEEE80211_F_DOTH)
+			capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;
+		if (IEEE80211_COM_NEIGHREPORT_ENABLED(ic))
+			capinfo |= IEEE80211_CAPINFO_RM;
+		*(__le16 *)frm = htole16(capinfo);
+		frm += 2;
+
+		/* status */
+		*(__le16 *)frm = htole16(arg);
+		frm += 2;
+
+		/* Assoc ID */
+		if (arg == IEEE80211_STATUS_SUCCESS) {
+			*(__le16 *)frm = htole16(ni->ni_associd);
+			IEEE80211_NODE_STAT(ni, tx_assoc);
+		} else
+			IEEE80211_NODE_STAT(ni, tx_assoc_fail);
+		frm += 2;
+
+		/* supported rates */
+		frm = ieee80211_add_rates(frm, &ic->ic_sup_rates[mode]);
+
+		if (IS_IEEE80211_11NG(ic)) {
+			frm = ieee80211_add_20_40_bss_coex_ie(frm, vap->iv_coex);
+			frm = ieee80211_add_obss_scan_ie(frm, &ic->ic_obss_ie);
+		}
+
+		/* 802.11w / PMF Timeout element */
+		if(vap->iv_pmf && arg == IEEE80211_STATUS_PMF_REJECT_RETRY) {
+			frm = ieee80211_add_timeout_ie(frm);
+		}
+
+		if (IEEE80211_IS_CHAN_ANYN(ic->ic_bsschan) &&
+			(ic->ic_curmode >= IEEE80211_MODE_11NA)) {
+			if (arg == IEEE80211_STATUS_SUCCESS) {
+				if ((ni->ni_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) == 0) {
+					vap->iv_ht_anomaly_40MHz_present = 1;
+				}
+
+				if ((ni->ni_htcap.cap & IEEE80211_HTCAP_C_GREENFIELD) == 0)
+					vap->iv_non_gf_sta_present = 1;
+
+				if ((ni->ni_htcap.cap & IEEE80211_HTCAP_C_LSIGTXOPPROT) == 0)
+					vap->iv_lsig_txop_ok = 0;
+
+				vap->iv_ht_flags |= IEEE80211_HTF_HTINFOUPDATE;
+
+			}
+			if (!ap_pure_tkip &&
+				(ni->ni_rsn.rsn_ucastcipher != IEEE80211_CIPHER_TKIP) &&
+					(ni->ni_flags >= IEEE80211_NODE_HT)) {
+				frm = ieee80211_add_htcap(ni, frm, &ic->ic_htcap, type);
+				frm = ieee80211_add_htinfo(ni, frm, &ic->ic_htinfo);
+			}
+		}
+
+		/* ext. suppo. rates */
+		frm = ieee80211_add_xrates(frm, &ic->ic_sup_rates[mode]);
+
+		/* WME */
+		if ((vap->iv_flags & IEEE80211_F_WME) && (ni->ni_wme_ie != NULL))
+			frm = ieee80211_add_wme_param(frm, wme,
+						IEEE80211_VAP_UAPSD_ENABLED(vap), 0);
+
+		if (IEEE80211_COM_NEIGHREPORT_ENABLED(ic))
+			frm = ieee80211_add_rrm_enabled(frm, vap);
+
+		/* athAdvCap */
+		if (vap->iv_ath_cap)
+			frm = ieee80211_add_athAdvCap(frm,
+				vap->iv_ath_cap & ni->ni_ath_flags,
+				ni->ni_ath_defkeyindex);
+
+		if (vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_RESP].ie) {
+			memcpy(frm, vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_RESP].ie,
+				vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_RESP].length);
+			ieee80211_update_bss_tm(frm,
+				vap->app_ie[IEEE80211_APPIE_FRAME_PROBE_RESP].length, ic, vap);
+			frm += vap->app_ie[IEEE80211_APPIE_FRAME_ASSOC_RESP].length;
+		}
+
+		if (ni->ni_qtn_assoc_ie) {
+			frm = ieee80211_add_qtn_ie(frm, ic,
+				((struct ieee80211_ie_qtn *)ni->ni_qtn_assoc_ie)->qtn_ie_flags,
+				(vap->iv_flags_ext & IEEE80211_FEXT_WDS ?
+					(IEEE80211_QTN_BRIDGEMODE | IEEE80211_QTN_LNCB) : 0),
+				vap->iv_implicit_ba, IEEE80211_DEFAULT_BA_WINSIZE_H,
+				ni->ni_rate_train);
+#ifdef CONFIG_QVSP
+			frm = ieee80211_add_vsp_ie(vap, frm, skb->end);
+			/* QTN WME IE */
+			if (ic->ic_wme.wme_throt_bm && ic->ic_wme.wme_throt_add_qwme_ie &&
+					(vap->iv_flags & IEEE80211_F_WME)) {
+				frm = ieee80211_add_qtn_wme_param(vap, frm);
+			}
+#endif
+		}
+		if (!ap_pure_tkip && (ni->ni_flags & IEEE80211_NODE_VHT)) {
+			if (IS_IEEE80211_VHT_ENABLED(ic)) {
+				frm = ieee80211_add_vhtcap(ni, frm, &ic->ic_vhtcap, type);
+
+				/* VHT Operation element */
+				if ((IEEE80211_IS_VHT_40(ic)) || (IEEE80211_IS_VHT_20(ic))) {
+					ic->ic_vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ;
+					ic->ic_vhtop.centerfreq0 = 0;
+				} else if (IEEE80211_IS_VHT_80(ic)) {
+					ic->ic_vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_80MHZ;
+					ic->ic_vhtop.centerfreq0 = ic->ic_bsschan->ic_center_f_80MHz;
+				} else {
+					ic->ic_vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_160MHZ;
+					ic->ic_vhtop.centerfreq0 = ic->ic_bsschan->ic_center_f_160MHz;
+				}
+				frm = ieee80211_add_vhtop(ni, frm, &ic->ic_vhtop);
+			} else if (IS_IEEE80211_11NG_VHT_ENABLED(ic)) {
+				/* QTN 2.4G VHT IE */
+				frm = ieee80211_add_vhtcap(ni, frm, &ic->ic_vhtcap_24g, type);
+				frm = ieee80211_add_vhtop(ni, frm, &ic->ic_vhtop_24g);
+			}
+		}
+
+		/* Add QTN Pairing IE. */
+		if (vap->qtn_pairing_ie.ie) {
+			frm = ieee80211_add_qtn_pairing_ie(frm, &vap->qtn_pairing_ie);
+		}
+		if (ni->ni_tx_md_ie) {
+			memcpy(frm, ni->ni_tx_md_ie, ni->ni_tx_md_ie[1] + 2);
+			frm += 5;
+		}
+		if (ni->ni_tx_ft_ie) {
+			memcpy(frm, ni->ni_tx_ft_ie, ni->ni_tx_ft_ie[1] + 2);
+			frm += ni->ni_tx_ft_ie[1] + 2;
+		}
+		if (type == IEEE80211_FC0_SUBTYPE_REASSOC_RESP) {
+			if (ni->ni_tx_rsn_ie) {
+				memcpy(frm, ni->ni_tx_rsn_ie, ni->ni_tx_rsn_ie[1] + 2);
+				frm += ni->ni_tx_rsn_ie[1] + 2;
+			}
+		}
+
+		skb_trim(skb, frm - skb->data);
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+			       "send station %s (reason %d)",
+			       (type == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) ? "assoc_resp" : "reassoc_resp", arg);
+
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			ieee80211_eventf(vap->iv_dev, "%sClient associated [%pM]",
+					 QEVT_COMMON_PREFIX, ni->ni_macaddr);
+		}
+		break;
+
+	case IEEE80211_FC0_SUBTYPE_DISASSOC:
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+		    "send station disassociate (reason %d)", arg);
+		skb = ieee80211_getmgtframe(&frm, sizeof(u_int16_t));
+		if (skb == NULL)
+			senderr(ENOMEM, is_tx_nobuf);
+		*(__le16 *)frm = htole16(arg);	/* reason */
+
+		{
+			int msg = IEEE80211_DOT11_MSG_CLIENT_REMOVED;
+			if (vap->iv_opmode == IEEE80211_M_STA) {
+				msg = IEEE80211_DOT11_MSG_AP_DISCONNECTED;
+			}
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+			if ((vap->bsa_status == BSA_STATUS_ACTIVE) &&
+					(vap->iv_opmode == IEEE80211_M_HOSTAP))
+				ieee80211_bsa_disconnect_event_send(vap, ni, arg,
+						IEEE80211_FC0_SUBTYPE_DISASSOC,
+						BSA_DISCONNECT_SELF_GENERATED);
+#endif
+
+			ieee80211_dot11_msg_send(ni->ni_vap,
+					(char *)ni->ni_macaddr,
+					d11_m[msg],
+					d11_c[IEEE80211_DOT11_MSG_REASON_DISASSOCIATED],
+					arg,
+					(arg < DOT11_MAX_REASON_CODE) ? d11_r[arg] : "Reserved",
+					NULL,
+					NULL);
+		}
+
+		if (ic->ic_opmode == IEEE80211_M_STA)
+			del_timer_sync(&ic->ic_obss_timer);
+
+		IEEE80211_NODE_STAT(ni, tx_disassoc);
+		IEEE80211_NODE_STAT_SET(ni, tx_disassoc_code, arg);
+		break;
+
+	case IEEE80211_FC0_SUBTYPE_ACTION: {
+		u_int16_t temp16;
+		struct ieee80211_action_data *action_data = (struct ieee80211_action_data *)arg;
+		u_int8_t cat = action_data->cat;
+
+		IEEE80211_NODE_STAT(ni, tx_action);
+
+		switch (cat) {
+		case IEEE80211_ACTION_CAT_SPEC_MGMT:
+			switch (action_data->action) {
+			case IEEE80211_ACTION_S_TPC_REQUEST:
+			{
+				struct ieee80211_action_tpc_request *request;
+				u_int8_t tx_token;
+
+				request = (struct ieee80211_action_tpc_request *)action_data->params;
+				skb = ieee80211_getmgtframe(&frm, sizeof(u_int16_t) +	/* action header */
+									1 +		/* dialog token */
+									2);		/* tpc request ie */
+				if (skb == NULL)
+					senderr(ENOMEM, is_tx_nobuf);
+				*frm++ = IEEE80211_ACTION_CAT_SPEC_MGMT;
+				*frm++ = IEEE80211_ACTION_S_TPC_REQUEST;
+				tx_token = ni->ni_action_token++;
+				*frm++ = tx_token;
+				*frm++ = IEEE80211_ELEMID_TPCREQ;
+				*frm++ = 0;
+
+				if (request->expire != 0) {
+					skb = ieee80211_ppqueue_pre_tx(ni,
+							skb,
+							IEEE80211_ACTION_CAT_SPEC_MGMT,
+							IEEE80211_ACTION_S_TPC_REPORT,
+							tx_token,
+							request->expire,
+							request->fn_success,
+							request->fn_fail);
+
+					if (skb == NULL) {
+						ret = -ENOMEM;
+						goto bad;
+					}
+				}
+				break;
+			}
+			case IEEE80211_ACTION_S_TPC_REPORT: {
+				struct ieee80211_action_tpc_report *tpc_report = (struct ieee80211_action_tpc_report *)action_data->params;
+				skb = ieee80211_getmgtframe(&frm, sizeof(u_int16_t) +	/* action header */
+									1 +		/* dialog token */
+									4);		/* tpc report ie */
+				if (skb == NULL)
+					senderr(ENOMEM, is_tx_nobuf);
+				*frm++ = IEEE80211_ACTION_CAT_SPEC_MGMT;
+				*frm++ = IEEE80211_ACTION_S_TPC_REPORT;
+				*frm++ = tpc_report->rx_token;
+				*frm++ = IEEE80211_ELEMID_TPCREP;
+				*frm++ = 2;
+				*frm++ = tpc_report->tx_power;
+				*frm++ = tpc_report->link_margin;
+				break;
+			}
+			case IEEE80211_ACTION_S_MEASUREMENT_REQUEST:
+			case IEEE80211_ACTION_S_MEASUREMENT_REPORT:
+				ret = ieee80211_compile_action_measurement_11h(ni, action_data->params, action_data->action, &skb);
+				if (ret != 0)
+					goto bad;
+				break;
+			default:
+				break;
+			}
+			break;
+		case IEEE80211_ACTION_CAT_HT: {
+			switch (action_data->action) {
+			case IEEE80211_ACTION_HT_NCBEAMFORMING:
+				IEEE80211_DPRINTF(vap,IEEE80211_MSG_OUTPUT,
+					"Err: Action frame construction not suppported\n",0);
+				break;
+			case IEEE80211_ACTION_HT_MIMOPWRSAVE:
+				{
+				/* Form the HT SM PS frame - change of mode for this client. */
+				/* Single byte argument, which is formatted as per 802.11n d11.0 section 7.3.1.22 */
+				u_int8_t *p_byte = (u_int8_t *)&action_data->params;
+				skb = ieee80211_getmgtframe(&frm, sizeof(u_int16_t) + /* action header */
+								1 /* SMPS state change */ );
+				if (skb == NULL) {
+					senderr(ENOMEM, is_tx_nobuf);
+				}
+
+				*(u_int8_t *)frm = IEEE80211_ACTION_CAT_HT;
+				frm += 1;
+
+				*(u_int8_t *)frm = IEEE80211_ACTION_HT_MIMOPWRSAVE;
+				frm += 1;
+
+				*(u_int8_t *)frm = *p_byte; /* New power save mode */
+				frm += 1;
+				}
+				break;
+			default:
+				break;
+			}
+
+			if(skb != NULL) {
+				skb_trim(skb, frm - skb->data);
+			}
+			break;
+		}
+		case IEEE80211_ACTION_CAT_BA: {
+			switch (action_data->action) {
+			case IEEE80211_ACTION_BA_ADDBA_REQ: {
+				struct ba_action_req *ba = (struct ba_action_req *)action_data->params;
+
+				skb = ieee80211_getmgtframe(&frm,
+							sizeof(u_int16_t) + /* action header */
+							sizeof(u_int8_t) + /* dialog */
+							sizeof(u_int16_t) + /* BA params */
+							sizeof(u_int16_t) + /* BA timeout */
+							sizeof(u_int16_t) /* BA sequence control */
+							);
+				if (skb == NULL) {
+					senderr(ENOMEM, is_tx_nobuf);
+				}
+
+				*(u_int8_t *)frm = IEEE80211_ACTION_CAT_BA;
+				frm += 1;
+
+				*(u_int8_t *)frm = IEEE80211_ACTION_BA_ADDBA_REQ;
+				frm += 1;
+
+				/* fill ba dialog */
+				ni->ni_ba_tx[ba->tid].dlg_out = (ni->ni_ba_tx[ba->tid].dlg_out + 1) % 0xFF;
+				*(u_int8_t *)frm = ni->ni_ba_tx[ba->tid].dlg_out;
+				frm += 1;
+
+				/* fill ba params (non half word aligned) */
+				temp16 = 0;
+				temp16 |= ((ba->type == IEEE80211_BA_DELAYED) ?
+						IEEE80211_A_BA_DELAYED : IEEE80211_A_BA_IMMEDIATE);
+				temp16 |= ((ba->tid) << IEEE80211_A_BA_TID_S);
+				temp16 |= ((ba->buff_size) << IEEE80211_A_BA_BUFF_SIZE_S);
+				if (!ieee80211_tx_amsdu_disabled(ni))
+					temp16 |= IEEE80211_A_BA_AMSDU_SUPPORTED;
+
+				*(u_int8_t *)frm = temp16 & 0xFF;
+				frm += 1;
+
+				*(u_int8_t *)frm = temp16 >> 8;
+				frm += 1;
+
+				/* fill ba timeout (non half word aligned) */
+				*(u_int8_t *)frm = ba->timeout & 0xFF;
+				frm += 1;
+
+				*(u_int8_t *)frm = ba->timeout >> 8;
+				frm += 1;
+
+				/* fill sequence control (non half word aligned) */
+				*(u_int8_t *)frm = ba->frag | ((ba->seq & 0xF) << 4);
+				frm += 1;
+
+				*(u_int8_t *)frm = (ba->seq  & 0xFF0) >> 4;
+				frm += 1;
+				break;
+			}
+			case IEEE80211_ACTION_BA_ADDBA_RESP: {
+				struct ba_action_resp *ba = (struct ba_action_resp *)action_data->params;
+				skb = ieee80211_getmgtframe(&frm,
+							sizeof(u_int16_t) + /* action header */
+							sizeof(u_int8_t) + /* dialog */
+							sizeof(u_int16_t) + /* status */
+							sizeof(u_int16_t) + /* BA params */
+							sizeof(u_int16_t) /* BA timeout */
+							);
+				if (skb == NULL)
+					senderr(ENOMEM, is_tx_nobuf);
+
+				*(u_int8_t *)frm = IEEE80211_ACTION_CAT_BA;
+				frm += 1;
+
+				*(u_int8_t *)frm = IEEE80211_ACTION_BA_ADDBA_RESP;
+				frm += 1;
+
+				/* fill ba dialog */
+				*(u_int8_t *)frm = ni->ni_ba_rx[ba->tid].dlg_in;
+				frm += 1;
+
+				/* fill ba status (non half word aligned) */
+				*(u_int8_t *)frm = ba->reason & 0xFF;
+				frm += 1;
+
+				*(u_int8_t *)frm = ba->reason >> 8;
+				frm += 1;
+
+				/* fill ba params (non half word aligned) */
+				temp16 = 0;
+				temp16 |= ((ba->type == IEEE80211_BA_DELAYED) ?
+						IEEE80211_A_BA_DELAYED : IEEE80211_A_BA_IMMEDIATE);
+				temp16 |= ((ba->tid) << IEEE80211_A_BA_TID_S);
+				temp16 |= ((ba->buff_size) << IEEE80211_A_BA_BUFF_SIZE_S);
+
+				if (!ieee80211_rx_amsdu_allowed(ni)) {
+					IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACTION,
+							"receive AMSDU within AMPDU is not permitted from \
+							station %pM\n",	ni->ni_macaddr);
+					temp16 &= ~IEEE80211_A_BA_AMSDU_SUPPORTED;
+				} else {
+					temp16 |= IEEE80211_A_BA_AMSDU_SUPPORTED;
+				}
+
+				*(u_int8_t *)frm = temp16 & 0xFF;
+				frm += 1;
+
+				*(u_int8_t *)frm = temp16 >> 8;
+				frm += 1;
+
+				/* fill ba timeout (non half word aligned) */
+				*(u_int8_t *)frm = ba->timeout & 0xFF;
+				frm += 1;
+
+				*(u_int8_t *)frm = ba->timeout >> 8;
+				frm += 1;
+				break;
+			}
+			case IEEE80211_ACTION_BA_DELBA: {
+				struct ba_action_del *ba = (struct ba_action_del *)action_data->params;
+				skb = ieee80211_getmgtframe(&frm,
+							sizeof(u_int16_t) + /* action header */
+							sizeof(u_int16_t) + /* DELBA params */
+							sizeof(u_int16_t) /* DELBA reason */
+							);
+				if (skb == NULL)
+					senderr(ENOMEM, is_tx_nobuf);
+
+				*(u_int8_t *)frm = IEEE80211_ACTION_CAT_BA;
+				frm += 1;
+
+				*(u_int8_t *)frm = IEEE80211_ACTION_BA_DELBA;
+				frm += 1;
+
+				/* fill ba params (non half word aligned) */
+				temp16 = 0;
+				temp16 |= SM(ba->tid, IEEE80211_A_BA_DELBA_TID);
+				temp16 |= SM(ba->initiator, IEEE80211_A_BA_INITIATOR);
+
+				*(u_int8_t *)frm = temp16 & 0xFF;
+				frm += 1;
+
+				*(u_int8_t *)frm = temp16 >> 8;
+				frm += 1;
+
+				/* fill ba reason (non half word aligned) */
+				*(u_int8_t *)frm = ba->reason & 0xFF;
+				frm += 1;
+
+				*(u_int8_t *)frm = ba->reason >> 8;
+				frm += 1;
+				break;
+			}
+			default:
+				break;
+			}
+
+			if(skb != NULL) {
+				skb_trim(skb, frm - skb->data);
+			}
+			break;
+		}
+		case IEEE80211_ACTION_CAT_RM:
+		{
+			switch (action_data->action) {
+			case IEEE80211_ACTION_R_MEASUREMENT_REQUEST:
+			case IEEE80211_ACTION_R_MEASUREMENT_REPORT:
+				ret = ieee80211_compile_action_measurement_11k(ni, action_data->params, action_data->action, &skb);
+				break;
+			case IEEE80211_ACTION_R_LINKMEASURE_REQUEST:
+				ret = ieee80211_compile_action_link_measure_request(ni, action_data->params, &skb);
+				break;
+			case IEEE80211_ACTION_R_LINKMEASURE_REPORT:
+				ret = ieee80211_compile_action_link_measure_report(ni, action_data->params, &skb);
+				break;
+			case IEEE80211_ACTION_R_NEIGHBOR_REQUEST:
+				ret = ieee80211_compile_action_neighbor_report_request(ni, action_data->params, &skb);
+				break;
+			case IEEE80211_ACTION_R_NEIGHBOR_REPORT:
+				ret = ieee80211_compile_action_neighbor_report_response(ni, action_data->params, &skb);
+				break;
+			default:
+				ret = -1;
+				break;
+			}
+
+			if (ret != 0)
+				goto bad;
+
+			break;
+		}
+#ifdef CONFIG_QVSP
+		case IEEE80211_ACTION_CAT_VENDOR: {
+			/* FIXME: Work out which vendor specific type to output. */
+			struct ieee80211_qvsp_act *qvsp_a = (struct ieee80211_qvsp_act *)action_data->params;
+			ret = ieee80211_compile_action_qvsp_frame(vap, qvsp_a, &skb);
+			if (ret != 0)
+				goto bad;
+			break;
+		}
+#endif
+		case IEEE80211_ACTION_CAT_SA_QUERY: {
+			ret = ieee80211_compile_action_sa_query_frame(vap, action_data, &skb);
+			if (ret != 0)
+				goto bad;
+			break;
+		}
+		case IEEE80211_ACTION_CAT_PUBLIC:
+			if (action_data->action == IEEE80211_ACTION_PUB_20_40_COEX) {
+				ret = ieee80211_compile_action_20_40_coex_frame(vap,
+							action_data, &skb, ni);
+				if (ret != 0)
+					goto bad;
+				break;
+			}
+		/* fall through if condition is not true */
+
+		case IEEE80211_ACTION_CAT_FBSS:
+		case IEEE80211_ACTION_CAT_QOS: {
+			struct action_frame_payload *frm_payload =
+					(struct action_frame_payload *)action_data->params;
+
+			skb = ieee80211_getmgtframe(&frm, frm_payload->length);
+
+			if (skb == NULL) {
+				senderr(ENOMEM, is_tx_nobuf);
+			} else {
+				memcpy(frm, frm_payload->data, frm_payload->length);
+				frm += frm_payload->length;
+			}
+			break;
+		}
+		case IEEE80211_ACTION_CAT_WNM: {
+			if (action_data->action == IEEE80211_WNM_BSS_TRANS_MGMT_REQ) {
+				struct btm_request_params *ctrl
+					= (struct btm_request_params *)action_data->params;
+				ret = ieee80211_compile_action_btm_req(ni, ctrl, &skb);
+				if (ret != 0)
+					goto bad;
+			} else {
+				struct action_frame_payload *frm_payload =
+						(struct action_frame_payload *)action_data->params;
+
+				skb = ieee80211_getmgtframe(&frm, frm_payload->length);
+
+				if (skb == NULL) {
+					senderr(ENOMEM, is_tx_nobuf);
+				} else {
+					memcpy(frm, frm_payload->data, frm_payload->length);
+					frm += frm_payload->length;
+				}
+			}
+			break;
+		}
+		default:
+			break;
+		}
+		break;
+	}
+
+	default:
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
+			"invalid mgmt frame type %u", type);
+		senderr(EINVAL, is_tx_unknownmgt);
+		/* NOTREACHED */
+	}
+
+	if (skb != NULL) {
+		if (timer) {
+			if (ni != vap->iv_mgmt_retry_ni || type != vap->iv_mgmt_retry_type ||
+					arg != vap->iv_mgmt_retry_arg) {
+				vap->iv_mgmt_retry_ni = ni;
+				vap->iv_mgmt_retry_type = type;
+				vap->iv_mgmt_retry_arg = arg;
+				vap->iv_mgmt_retry_cnt = 0;
+			}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+			expired_timer = (timer * HZ) + (random32() % HZ);
+#else
+			expired_timer = (timer * HZ) + (prandom_u32() % HZ);
+#endif
+			mod_timer(&vap->iv_mgtsend, jiffies + expired_timer);
+		}
+
+		ieee80211_mgmt_output(ni, skb, type, ni->ni_macaddr);
+	} else {
+		ieee80211_free_node(ni);
+	}
+
+	return 0;
+
+bad:
+	ieee80211_free_node(ni);
+	return ret;
+#undef senderr
+}
+
+/*
+ * Send PS-POLL from to bss. Should only be called when as STA.
+ */
+void
+ieee80211_send_pspoll(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct sk_buff *skb;
+	struct ieee80211_ctlframe_addr2 *wh;
+
+	skb = dev_alloc_skb(sizeof(struct ieee80211_ctlframe_addr2));
+	if (skb == NULL) {
+		return;
+	}
+	ieee80211_ref_node(ni);
+
+	wh = (struct ieee80211_ctlframe_addr2 *) skb_put(skb, sizeof(struct ieee80211_ctlframe_addr2));
+
+	wh->i_aidordur = htole16(0xc000 | IEEE80211_NODE_AID(ni));
+	IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_bssid);
+	IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
+	wh->i_fc[0] = 0;
+	wh->i_fc[1] = 0;
+	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL |
+		IEEE80211_FC0_SUBTYPE_PS_POLL;
+	if (IEEE80211_VAP_IS_SLEEPING(ni->ni_vap))
+		wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS,
+			"[%s] send ps poll frame on channel %u\n",
+			ether_sprintf(ni->ni_macaddr),
+			ieee80211_chan2ieee(ic, ic->ic_curchan));
+	ic->ic_send_80211(ic, ni, skb, WME_AC_VO, 0);
+}
+EXPORT_SYMBOL(ieee80211_send_pspoll);
+
+
+/*
+ * Send DELBA management frame.
+ */
+void ieee80211_send_delba(struct ieee80211_node *ni, int tid, int tx, int reason)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211_action_data act;
+	struct ba_action_del ba_del;
+
+	memset(&ba_del, 0, sizeof(ba_del));
+	ba_del.tid = tid;
+	ba_del.reason = reason;
+	//	ba_del.initiator = (tx) ? 1 : 0;
+	ba_del.initiator = !tx;
+
+	memset(&act, 0, sizeof(act));
+	act.cat = IEEE80211_ACTION_CAT_BA;
+	act.action = IEEE80211_ACTION_BA_DELBA;
+	act.params = (void *)&ba_del;
+
+	ic->ic_send_mgmt(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&act);
+}
+EXPORT_SYMBOL(ieee80211_send_delba);
+
+void ieee80211_rm_req_callback_success(void *ctx)
+{
+	struct ieee80211_node *ni = (struct ieee80211_node *)ctx;
+
+	if (ni->ni_dotk_meas_state.meas_state_sta.pending) {
+		ni->ni_dotk_meas_state.meas_state_sta.pending = 0;
+		wake_up_interruptible(&ni->ni_dotk_waitq);
+	}
+}
+
+void ieee80211_rm_req_callback_fail(void *ctx, int32_t reason)
+{
+	struct ieee80211_node *ni = (struct ieee80211_node *)ctx;
+
+	if (ni->ni_dotk_meas_state.meas_state_sta.pending) {
+		ni->ni_dotk_meas_state.meas_state_sta.status = -ETIMEDOUT;
+		ni->ni_dotk_meas_state.meas_state_sta.pending = 0;
+		wake_up_interruptible(&ni->ni_dotk_waitq);
+	}
+}
+
+/*
+ * Send radio measurement request - STA Statistics to associated STA. Should only be called when as AP.
+ */
+void
+ieee80211_send_rm_req_stastats(struct ieee80211_node *ni, u_int32_t flags)
+{
+	struct ieee80211_meas_request_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+	ieee80211_11k_sub_element_head se_head;
+	ieee80211_11k_sub_element *p_se;
+	struct stastats_subele_vendor *vendor = NULL;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_STA;
+	ctrl.u.sta_stats.duration_tu = 0;
+	if (flags & RM_QTN_MEASURE_MASK) {
+		ctrl.u.sta_stats.group_id = 0;
+		SLIST_INIT(&se_head);
+		p_se = (ieee80211_11k_sub_element *)kmalloc(sizeof(*p_se) + sizeof(struct stastats_subele_vendor), GFP_KERNEL);
+		if (p_se != NULL) {
+			p_se->sub_id = IEEE80211_ELEMID_VENDOR;
+			vendor = (struct stastats_subele_vendor *)p_se->data;
+			vendor->flags = flags;
+			SLIST_INSERT_HEAD(&se_head, p_se, next);
+		}
+		ctrl.u.sta_stats.sub_item = &se_head;
+	} else {
+		ctrl.u.sta_stats.group_id = 221;
+	}
+	ctrl.expire = IEEE80211K_RM_MEASURE_STA_TIMEOUT;
+	ctrl.fn_success = ieee80211_rm_req_callback_success;
+	ctrl.fn_fail = ieee80211_rm_req_callback_fail;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REQUEST;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_req_stastats);
+
+int32_t
+ieee80211_send_rm_rep_stastats(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t group_id,
+		u_int16_t duration_tu,
+		void *sub_item)
+{
+	struct ieee80211_meas_report_ctrl mreport_ctrl;
+	struct ieee80211_action_data action_data;
+
+	mreport_ctrl.meas_type = IEEE80211_RM_MEASTYPE_STA;
+	mreport_ctrl.token = token;
+	mreport_ctrl.meas_token = meas_token;
+	mreport_ctrl.report_mode = report_mode;
+	mreport_ctrl.autonomous = 0;
+	if (report_mode == 0) {
+		mreport_ctrl.u.sta_stats.group_id = group_id;
+		mreport_ctrl.u.sta_stats.duration_tu = duration_tu;
+		mreport_ctrl.u.sta_stats.sub_item = sub_item;
+	}
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+	action_data.params = &mreport_ctrl;
+
+	return IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_rep_stastats);
+
+/*
+ * Send radio measurement request - CCA. Should only be called when as AP.
+ */
+void
+ieee80211_send_rm_req_cca(struct ieee80211_node *ni)
+{
+	struct ieee80211_meas_request_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_QTN_CCA;
+	ctrl.u.qtn_cca.duration_tu = 1;
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REQUEST;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_req_cca);
+
+void
+ieee80211_send_rm_req_stastats_all(struct ieee80211com *ic)
+{
+	struct ieee80211_node *ni;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	/* Fixed structure STA statistics */
+	u_int32_t flags = BIT(RM_QTN_MAX + 1) - 1;
+
+	IEEE80211_NODE_LOCK_BH(nt);
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (ni->ni_vap->iv_opmode != IEEE80211_M_HOSTAP)
+			continue;
+		if (ni->ni_associd == 0)
+			continue;
+		ieee80211_send_rm_req_stastats(ni, flags);
+	}
+	IEEE80211_NODE_UNLOCK_BH(nt);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_req_stastats_all);
+
+void
+ieee80211_send_rm_req_chan_load(struct ieee80211_node *ni,
+				u_int8_t channel,
+				u_int16_t duration_ms,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail)
+{
+	struct ieee80211_meas_request_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_CH_LOAD;
+	ctrl.u.chan_load.channel = (channel == 0 ? ni->ni_ic->ic_curchan->ic_ieee : channel);
+	ctrl.u.chan_load.duration_ms = duration_ms;
+
+	ctrl.expire = expire;
+	ctrl.fn_success = (ppq_callback_success)fn_success;
+	ctrl.fn_fail = (ppq_callback_fail)fn_fail;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REQUEST;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_req_chan_load);
+
+void
+ieee80211_send_rm_rep_chan_load(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t op_class,
+		u_int8_t channel,
+		u_int16_t duration_tu,
+		u_int8_t channel_load)
+{
+	struct ieee80211_meas_report_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_CH_LOAD;
+	ctrl.report_mode = report_mode;
+	ctrl.token = token;
+	ctrl.meas_token = meas_token;
+	ctrl.autonomous = 0;
+	if (report_mode == 0) {
+		ctrl.u.chan_load.op_class = op_class;
+		ctrl.u.chan_load.channel = channel;
+		ctrl.u.chan_load.duration_tu = duration_tu;
+		ctrl.u.chan_load.channel_load = channel_load;
+	}
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_rep_chan_load);
+
+void
+ieee80211_send_rm_req_noise_his(struct ieee80211_node *ni,
+				u_int8_t channel,
+				u_int16_t duration_ms,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail)
+{
+	struct ieee80211_meas_request_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_NOISE;
+	ctrl.u.noise_his.channel = (channel == 0 ? ni->ni_ic->ic_curchan->ic_ieee : channel);
+	ctrl.u.noise_his.duration_ms = duration_ms;
+
+	ctrl.expire = expire;
+	ctrl.fn_success = (ppq_callback_success)fn_success;
+	ctrl.fn_fail = (ppq_callback_fail)fn_fail;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REQUEST;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_req_noise_his);
+
+void
+ieee80211_send_rm_rep_noise_his(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t op_class,
+		u_int8_t channel,
+		u_int16_t duration_tu,
+		u_int8_t antenna_id,
+		u_int8_t anpi,
+		u_int8_t *ipi)
+{
+	struct ieee80211_meas_report_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_NOISE;
+	ctrl.report_mode = report_mode;
+	ctrl.token = token;
+	ctrl.meas_token = meas_token;
+	ctrl.autonomous = 0;
+	if (report_mode == 0) {
+		ctrl.u.noise_his.op_class = op_class;
+		ctrl.u.noise_his.channel = channel;
+		ctrl.u.noise_his.duration_tu = duration_tu;
+		ctrl.u.noise_his.antenna_id = antenna_id;
+		ctrl.u.noise_his.anpi = anpi;
+		if (ipi != NULL)
+			memcpy(ctrl.u.noise_his.ipi, ipi, sizeof(ctrl.u.noise_his.ipi));
+	}
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_rep_noise_his);
+
+void
+ieee80211_send_rm_req_beacon(struct ieee80211_node *ni,
+				u_int8_t op_class,
+				u_int8_t channel,
+				u_int16_t duration_ms,
+				u_int8_t mode,
+				u_int8_t *bssid,
+				u_int8_t *ssid,
+				u_int8_t ssid_len,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail)
+{
+	struct ieee80211_meas_request_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_BEACON;
+	ctrl.u.beacon.op_class = op_class;
+	/* ctrl.u.beacon.channel = (channel == 0 ? ni->ni_ic->ic_curchan->ic_ieee : channel); */
+	ctrl.u.beacon.channel = channel;
+	ctrl.u.beacon.duration_ms = duration_ms;
+	ctrl.u.beacon.mode = mode;
+	ctrl.u.beacon.ssid = ssid;
+	ctrl.u.beacon.ssid_len = ssid_len;
+
+	if (bssid != NULL)
+		memcpy(ctrl.u.beacon.bssid, bssid, IEEE80211_ADDR_LEN);
+	else
+		memset(ctrl.u.beacon.bssid, 0xFF, IEEE80211_ADDR_LEN);
+
+	ctrl.expire = expire;
+	ctrl.fn_success = (ppq_callback_success)fn_success;
+	ctrl.fn_fail = (ppq_callback_fail)fn_fail;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REQUEST;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_req_beacon);
+
+void
+ieee80211_send_rm_rep_beacon(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t op_class,
+		u_int8_t channel,
+		u_int16_t duration_tu,
+		u_int8_t reported_frame_info,
+		u_int8_t rcpi,
+		u_int8_t rsni,
+		u_int8_t *bssid,
+		u_int8_t antenna_id,
+		u_int8_t *parent_tsf)
+{
+	struct ieee80211_meas_report_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_BEACON;
+	ctrl.report_mode = report_mode;
+	ctrl.token = token;
+	ctrl.meas_token = meas_token;
+	ctrl.autonomous = 0;
+	if (report_mode == 0) {
+		ctrl.u.beacon.op_class = op_class;
+		ctrl.u.beacon.channel = channel;
+		ctrl.u.beacon.duration_tu = duration_tu;
+		ctrl.u.beacon.reported_frame_info = reported_frame_info;
+		ctrl.u.beacon.rcpi = rcpi;
+		ctrl.u.beacon.rsni = rsni;
+		if (bssid != NULL)
+			memcpy(ctrl.u.beacon.bssid, bssid, IEEE80211_ADDR_LEN);
+		ctrl.u.beacon.antenna_id = antenna_id;
+		if (parent_tsf != NULL)
+			memcpy(ctrl.u.beacon.parent_tsf, parent_tsf, sizeof(ctrl.u.beacon.parent_tsf));
+	}
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_rep_beacon);
+
+void
+ieee80211_send_rm_req_frame(struct ieee80211_node *ni,
+				u_int8_t op_class,
+				u_int8_t channel,
+				u_int16_t duration_ms,
+				u_int8_t type,
+				u_int8_t *mac_address,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail)
+{
+	struct ieee80211_meas_request_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_FRAME;
+	ctrl.u.frame.op_class = op_class;
+	ctrl.u.frame.channel = (channel == 0 ? ni->ni_ic->ic_curchan->ic_ieee : channel);
+	ctrl.u.frame.duration_ms = duration_ms;
+	ctrl.u.frame.type = type;
+	if (mac_address != NULL)
+		memcpy(ctrl.u.frame.mac_address, mac_address, IEEE80211_ADDR_LEN);
+	else
+		memset(ctrl.u.frame.mac_address, 0xFF, IEEE80211_ADDR_LEN);
+
+	ctrl.expire = expire;
+	ctrl.fn_success = (ppq_callback_success)fn_success;
+	ctrl.fn_fail = (ppq_callback_fail)fn_fail;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REQUEST;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_req_frame);
+
+void
+ieee80211_send_rm_rep_frame(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t op_class,
+		u_int8_t channel,
+		u_int16_t duration_tu,
+		void *sub_ele)
+{
+	struct ieee80211_meas_report_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_FRAME;
+	ctrl.report_mode = report_mode;
+	ctrl.token = token;
+	ctrl.meas_token = meas_token;
+	ctrl.autonomous = 0;
+	if (report_mode == 0) {
+		ctrl.u.frame.op_class = op_class;
+		ctrl.u.frame.channel = channel;
+		ctrl.u.frame.duration_tu = duration_tu;
+		ctrl.u.frame.sub_item = sub_ele;
+	}
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_rep_frame);
+
+void
+ieee80211_send_rm_req_tran_stream_cat(struct ieee80211_node *ni,
+				u_int16_t duration_ms,
+				u_int8_t *peer_sta,
+				u_int8_t tid,
+				u_int8_t bin0,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail)
+{
+	struct ieee80211_meas_request_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+	u_int8_t null_mac[IEEE80211_ADDR_LEN] = {0};
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_CATEGORY;
+	ctrl.u.tran_stream_cat.duration_ms = duration_ms;
+	if (peer_sta != NULL && memcmp(null_mac, ctrl.u.tran_stream_cat.peer_sta, IEEE80211_ADDR_LEN) != 0)
+		memcpy(ctrl.u.tran_stream_cat.peer_sta, peer_sta, IEEE80211_ADDR_LEN);
+	else
+		memcpy(ctrl.u.tran_stream_cat.peer_sta, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+	ctrl.u.tran_stream_cat.tid = tid;
+	ctrl.u.tran_stream_cat.bin0 = bin0;
+
+	ctrl.expire = expire;
+	ctrl.fn_success = (ppq_callback_success)fn_success;
+	ctrl.fn_fail = (ppq_callback_fail)fn_fail;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REQUEST;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_req_tran_stream_cat);
+
+void
+ieee80211_send_rm_req_multicast_diag(struct ieee80211_node *ni,
+				u_int16_t duration_ms,
+				u_int8_t *group_mac,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail)
+{
+	struct ieee80211_meas_request_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_MUL_DIAG;
+	ctrl.u.multicast_diag.duration_ms = duration_ms;
+	if (group_mac != NULL)
+		memcpy(ctrl.u.multicast_diag.group_mac, group_mac, IEEE80211_ADDR_LEN);
+	else
+		memset(ctrl.u.multicast_diag.group_mac, 0, IEEE80211_ADDR_LEN);
+
+	ctrl.expire = expire;
+	ctrl.fn_success = (ppq_callback_success)fn_success;
+	ctrl.fn_fail = (ppq_callback_fail)fn_fail;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REQUEST;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_req_multicast_diag);
+
+void
+ieee80211_send_rm_rep_multicast_diag(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int16_t duration_tu,
+		u_int8_t *group_mac,
+		u_int8_t reason,
+		u_int32_t mul_rec_msdu_cnt,
+		u_int16_t first_seq_num,
+		u_int16_t last_seq_num,
+		u_int16_t mul_rate)
+{
+	struct ieee80211_meas_report_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_RM_MEASTYPE_MUL_DIAG;
+	ctrl.report_mode = report_mode;
+	ctrl.token = token;
+	ctrl.meas_token = meas_token;
+	ctrl.autonomous = 0;
+	if (report_mode == 0) {
+		ctrl.u.multicast_diag.duration_tu = duration_tu;
+		if (group_mac != NULL)
+			memcpy(ctrl.u.multicast_diag.group_mac, group_mac, IEEE80211_ADDR_LEN);
+		else
+			memset(ctrl.u.multicast_diag.group_mac, 0, IEEE80211_ADDR_LEN);
+		ctrl.u.multicast_diag.reason = reason;
+		ctrl.u.multicast_diag.mul_rec_msdu_cnt = mul_rec_msdu_cnt;
+		ctrl.u.multicast_diag.first_seq_num = first_seq_num;
+		ctrl.u.multicast_diag.last_seq_num = last_seq_num;
+		ctrl.u.multicast_diag.mul_rate = mul_rate;
+	}
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+	action_data.params = &ctrl;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_rm_rep_multicast_diag);
+
+int32_t ieee80211_send_meas_request_basic(struct ieee80211_node *ni,
+		u_int8_t channel,
+		u_int16_t tsf_offset,
+		u_int16_t duration,
+		unsigned long expire,
+		void *fn_success,
+		void *fn_fail)
+{
+	struct ieee80211com *ic = ni->ni_vap->iv_ic;
+	u_int64_t tsf;
+	struct ieee80211_meas_request_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	ctrl.meas_type = IEEE80211_CCA_MEASTYPE_BASIC;
+	if (channel == 0)
+		ctrl.u.basic.channel = ic->ic_curchan->ic_ieee;
+	else
+		ctrl.u.basic.channel = channel;
+	ic->ic_get_tsf(&tsf);
+	if (tsf_offset == 0)
+		ctrl.u.basic.start_tsf = 0;
+	else
+		ctrl.u.basic.start_tsf = tsf + tsf_offset * 1000;
+	ctrl.u.basic.duration_ms = duration;
+	ctrl.expire = expire;
+	ctrl.fn_success = (ppq_callback_success)fn_success;
+	ctrl.fn_fail = (ppq_callback_fail)fn_fail;
+
+	action_data.cat = IEEE80211_ACTION_CAT_SPEC_MGMT;
+	action_data.action = IEEE80211_ACTION_S_MEASUREMENT_REQUEST;
+	action_data.params = &ctrl;
+
+	return IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_meas_request_basic);
+
+int32_t ieee80211_send_meas_request_cca(struct ieee80211_node *ni,
+		u_int8_t channel,
+		u_int16_t tsf_offset,
+		u_int16_t duration,
+		unsigned long expire,
+		void *fn_success,
+		void *fn_fail)
+{
+	struct ieee80211com *ic = ni->ni_vap->iv_ic;
+	u_int64_t tsf;
+	struct ieee80211_meas_request_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	ctrl.meas_type = IEEE80211_CCA_MEASTYPE_CCA;
+	if (channel == 0)
+		ctrl.u.cca.channel = ic->ic_curchan->ic_ieee;
+	else
+		ctrl.u.cca.channel = channel;
+	ic->ic_get_tsf(&tsf);
+	if (tsf_offset == 0)
+		ctrl.u.cca.start_tsf = 0;
+	else
+		ctrl.u.cca.start_tsf = tsf + tsf_offset * 1000;
+	ctrl.u.cca.duration_ms = duration;
+	ctrl.expire = expire;
+	ctrl.fn_success = (ppq_callback_success)fn_success;
+	ctrl.fn_fail = (ppq_callback_fail)fn_fail;
+
+	action_data.cat = IEEE80211_ACTION_CAT_SPEC_MGMT;
+	action_data.action = IEEE80211_ACTION_S_MEASUREMENT_REQUEST;
+	action_data.params = &ctrl;
+
+	return IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_meas_request_cca);
+
+int32_t ieee80211_send_meas_request_rpi(struct ieee80211_node *ni,
+		u_int8_t channel,
+		u_int16_t tsf_offset,
+		u_int16_t duration,
+		unsigned long expire,
+		void *fn_success,
+		void *fn_fail)
+{
+	struct ieee80211com *ic = ni->ni_vap->iv_ic;
+	u_int64_t tsf;
+	struct ieee80211_meas_request_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	ctrl.meas_type = IEEE80211_CCA_MEASTYPE_RPI;
+	if (channel == 0)
+		ctrl.u.rpi.channel = ic->ic_curchan->ic_ieee;
+	else
+		ctrl.u.rpi.channel = channel;
+	ic->ic_get_tsf(&tsf);
+	if (tsf_offset == 0)
+		ctrl.u.rpi.start_tsf = 0;
+	else
+		ctrl.u.rpi.start_tsf = tsf + tsf_offset * 1000;
+	ctrl.u.rpi.duration_ms = duration;
+	ctrl.expire = expire;
+	ctrl.fn_success = (ppq_callback_success)fn_success;
+	ctrl.fn_fail = (ppq_callback_fail)fn_fail;
+
+	action_data.cat = IEEE80211_ACTION_CAT_SPEC_MGMT;
+	action_data.action = IEEE80211_ACTION_S_MEASUREMENT_REQUEST;
+	action_data.params = &ctrl;
+
+	return IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_meas_request_rpi);
+
+int32_t ieee80211_send_meas_report_basic(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t channel,
+		u_int64_t start_tsf,
+		u_int16_t duration,
+		u_int8_t basic_report)
+{
+	struct ieee80211_meas_report_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_CCA_MEASTYPE_BASIC;
+	ctrl.report_mode = report_mode;
+	ctrl.token = token;
+	ctrl.meas_token = meas_token;
+	ctrl.autonomous = 0;
+	if (report_mode == 0) {
+		ctrl.u.basic.channel = channel;
+		ctrl.u.basic.start_tsf = start_tsf;
+		ctrl.u.basic.duration_tu = duration;
+		ctrl.u.basic.basic_report = basic_report;
+	}
+
+	action_data.cat = IEEE80211_ACTION_CAT_SPEC_MGMT;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+	action_data.params = &ctrl;
+
+	return IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_meas_report_basic);
+
+int32_t ieee80211_send_meas_report_cca(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t channel,
+		u_int64_t start_tsf,
+		u_int16_t duration,
+		u_int8_t cca_report)
+{
+	struct ieee80211_meas_report_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_CCA_MEASTYPE_CCA;
+	ctrl.report_mode = report_mode;
+	ctrl.token = token;
+	ctrl.meas_token = meas_token;
+	ctrl.autonomous = 0;
+	if (report_mode == 0) {
+		ctrl.u.cca.channel = channel;
+		ctrl.u.cca.start_tsf = start_tsf;
+		ctrl.u.cca.duration_tu = duration;
+		ctrl.u.cca.cca_report = cca_report;
+	}
+
+	action_data.cat = IEEE80211_ACTION_CAT_SPEC_MGMT;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+	action_data.params = &ctrl;
+
+	return IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_meas_report_cca);
+
+int32_t ieee80211_send_meas_report_rpi(struct ieee80211_node *ni,
+		u_int8_t report_mode,
+		u_int8_t token,
+		u_int8_t meas_token,
+		u_int8_t channel,
+		u_int64_t start_tsf,
+		u_int16_t duration,
+		u_int8_t *rpi_report)
+{
+	struct ieee80211_meas_report_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = IEEE80211_CCA_MEASTYPE_RPI;
+	ctrl.report_mode = report_mode;
+	ctrl.token = token;
+	ctrl.meas_token = meas_token;
+	ctrl.autonomous = 0;
+	if (report_mode == 0) {
+		ctrl.u.rpi.channel = channel;
+		ctrl.u.rpi.start_tsf = start_tsf;
+		ctrl.u.rpi.duration_tu = duration;
+		memcpy(ctrl.u.rpi.rpi_report, rpi_report, sizeof(ctrl.u.rpi.rpi_report));
+	}
+
+	action_data.cat = IEEE80211_ACTION_CAT_SPEC_MGMT;
+	action_data.action = IEEE80211_ACTION_R_MEASUREMENT_REPORT;
+	action_data.params = &ctrl;
+
+	return IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_meas_report_rpi);
+
+void ieee80211_send_link_measure_request(struct ieee80211_node *ni,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail)
+{
+	struct ieee80211_link_measure_request request;
+	struct ieee80211_action_data action_data;
+
+	memset(&request, 0, sizeof(request));
+	request.ppq.expire = expire;
+	request.ppq.fn_success = (ppq_callback_success)fn_success;
+	request.ppq.fn_fail = (ppq_callback_fail)fn_fail;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_LINKMEASURE_REQUEST;
+	action_data.params = &request;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_link_measure_request);
+
+void ieee80211_send_neighbor_report_request(struct ieee80211_node *ni,
+				unsigned long expire,
+				void *fn_success,
+				void *fn_fail)
+{
+	struct ieee80211_neighbor_report_request request;
+	struct ieee80211_action_data action_data;
+
+	memset(&request, 0, sizeof(request));
+	request.ppq.expire = expire;
+	request.ppq.fn_success = (ppq_callback_success)fn_success;
+	request.ppq.fn_fail = (ppq_callback_fail)fn_fail;
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_NEIGHBOR_REQUEST;
+	action_data.params = &request;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+EXPORT_SYMBOL(ieee80211_send_neighbor_report_request);
+
+void ieee80211_send_neighbor_report_response(struct ieee80211_node *ni,
+					u_int8_t token,
+					u_int8_t bss_num,
+					void *table)
+{
+	struct ieee80211_neighbor_report_response response;
+	struct ieee80211_action_data action_data;
+	struct ieee80211_neighbor_report_request_item** bss_table;
+	u_int8_t i;
+
+	memset(&response, 0, sizeof(response));
+	response.token = token;
+	response.bss_num = bss_num;
+	bss_table = (struct ieee80211_neighbor_report_request_item**)table;
+	for (i = 0; i < bss_num; i++) {
+		response.neighbor_report_ptr[i] = bss_table[i];
+	}
+
+	action_data.cat = IEEE80211_ACTION_CAT_RM;
+	action_data.action = IEEE80211_ACTION_R_NEIGHBOR_REPORT;
+	action_data.params = &response;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+
+void
+ieee80211_send_vht_opmode_action(struct ieee80211vap *vap,
+					struct ieee80211_node *ni,
+					uint8_t bw, uint8_t rx_nss)
+{
+	struct sk_buff *skb;
+	int frm_len;
+	u_int8_t *frm;
+
+	frm_len = IEEE80211_NCW_ACT_LEN;
+
+	skb = ieee80211_getmgtframe(&frm, frm_len);
+	if (skb == NULL) {
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
+			"%s: cannot get buf; size %u", __func__, frm_len);
+		vap->iv_stats.is_tx_nobuf++;
+		return;
+	}
+
+	*frm++ = IEEE80211_ACTION_CAT_VHT;
+	*frm++ = IEEE80211_ACTION_VHT_OPMODE_NOTIFICATION;
+	*frm++ = bw | (rx_nss << 4);
+
+	ieee80211_ref_node(ni);
+	ieee80211_mgmt_output(ni, skb, IEEE80211_FC0_SUBTYPE_ACTION,
+					ni->ni_macaddr);
+}
+
+/* sending Notify Channel Width Action
+ * if the ni is NULL, then it sends it as broadcast
+ * otherwise, unicast it to the targeted node
+ *     width == 0:  use HT20
+ *     width != 0   use any channel width the node support
+ *
+ * Notify Channel Width Action frame fromat:
+ *     |category: 1byte, value(7): CAT_HT | action: 1byte, value(0): NCW | Channel_width: 1byte, value: 0/HT20, or 1/HT40 |
+ */
+void
+ieee80211_send_notify_chan_width_action(struct ieee80211vap *vap,
+					struct ieee80211_node *ni,
+					u_int32_t width)
+{
+	// struct ieee80211com *ic = vap->iv_ic;
+	struct sk_buff *skb;
+	int frm_len;
+	u_int8_t *frm;
+	int is_bcst = (ni == NULL) ? 1 : 0;
+
+	frm_len = IEEE80211_NCW_ACT_LEN;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_11N,
+			  "%s: Sending Notify Chan Width %s action to %s\n",
+			  __func__,
+			  (width) ? "HT40/20" : "HT20",
+			  (is_bcst) ? "bcast" : ether_sprintf(ni->ni_macaddr));
+
+	skb = ieee80211_getmgtframe(&frm, frm_len);
+	if (skb == NULL) {
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
+			"%s: cannot get buf; size %u", __func__, frm_len);
+		vap->iv_stats.is_tx_nobuf++;
+		return;
+	}
+
+	*frm++ = IEEE80211_ACTION_CAT_HT;            /* Category */
+	*frm++ = IEEE80211_ACTION_HT_TXCHWIDTH;      /* notify channel width action */
+	if (width) {
+		*frm++ = IEEE80211_CWM_WIDTH40;
+	} else {
+		*frm++ = IEEE80211_CWM_WIDTH20;
+	}
+
+	if (is_bcst) {
+		ieee80211_ref_node(vap->iv_bss);
+		ieee80211_mgmt_output(vap->iv_bss, skb, IEEE80211_FC0_SUBTYPE_ACTION,
+					vap->iv_dev->broadcast);
+	} else {
+		ieee80211_ref_node(ni);
+		ieee80211_mgmt_output(ni, skb, IEEE80211_FC0_SUBTYPE_ACTION,
+					ni->ni_macaddr);
+	}
+}
+EXPORT_SYMBOL(ieee80211_send_notify_chan_width_action);
+
+/* sending 11ac(VHT) Group ID mgmt Action
+ * node group membership and the positions are stored inside the node struct
+ * VHT group ID mgmt action frame format:
+ * |category: 1byte, value(21): CAT_VHT | action: 1byte, value(1): GRP ID MGMT | membership status array: 8 bytes | user position array: 16 bytes|
+ */
+void
+ieee80211_send_vht_grp_id_mgmt_action(struct ieee80211vap *vap,
+				      struct ieee80211_node *ni)
+{
+	struct sk_buff *skb;
+	int frm_len;
+	u_int8_t *frm;
+
+	frm_len = IEEE80211_MU_GRP_ID_ACT_LEN;
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_VHT,
+			  "%s: Sending MU GRP ID action to %s\n",
+			  __func__, ether_sprintf(ni->ni_macaddr));
+
+	skb = ieee80211_getmgtframe(&frm, frm_len);
+	if (skb == NULL) {
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
+			"%s: cannot get buf; size %u", __func__, frm_len);
+		vap->iv_stats.is_tx_nobuf++;
+		return;
+	}
+
+	/* Category VHT */
+	*frm++ = IEEE80211_ACTION_CAT_VHT;
+
+	/* MU GRP ID action */
+	*frm++ = IEEE80211_ACTION_VHT_MU_GRP_ID;
+
+	memcpy(frm, &ni->ni_mu_grp, sizeof(ni->ni_mu_grp));
+	frm += sizeof(ni->ni_mu_grp);
+
+	ieee80211_ref_node(ni);
+	ieee80211_mgmt_output(ni, skb, IEEE80211_FC0_SUBTYPE_ACTION,
+			      ni->ni_macaddr);
+
+}
+
+EXPORT_SYMBOL(ieee80211_send_vht_grp_id_mgmt_action);
+
+int
+ieee80211_send_20_40_bss_coex(struct ieee80211vap *vap)
+{
+
+	struct ieee80211_action_data action_data;
+	struct ieee80211_node *ni = vap->iv_bss;
+	uint8_t coex = vap->iv_coex;
+
+	if ((!ni) || (!IEEE80211_AID(ni->ni_associd)))
+		return -EFAULT;
+
+	action_data.cat = IEEE80211_ACTION_CAT_PUBLIC;
+	action_data.action = IEEE80211_ACTION_PUB_20_40_COEX;
+	action_data.params = &coex;
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+	return 0;
+}
+
+void ieee80211_send_sa_query (struct ieee80211_node *ni, u_int8_t action,
+					u_int16_t tid)
+{
+	struct ieee80211_action_data action_data;
+
+	if(RSN_IS_MFP(ni->ni_rsn.rsn_caps)) {
+		action_data.cat = IEEE80211_ACTION_CAT_SA_QUERY;
+		action_data.action = action;
+		action_data.params = &tid;
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+		ni->ni_sa_query_timeout = jiffies;
+	}
+}
+
+void ieee80211_btm_resp_timeout(unsigned long arg)
+{
+	struct ieee80211_node *ni = (struct ieee80211_node *)arg;
+	IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ACTION, \
+			"BSS Transition management response timeout token(%d)\n", ni->ni_btm_req);
+	ni->ni_btm_req = 0;
+}
+
+static int
+ieee80211_send_wnm_bss_tm_req(struct ieee80211_node *ni,
+			uint8_t mode,
+			uint16_t disassoc_timer,
+			uint8_t valid_int,
+			const uint8_t *bss_term_dur,
+			const char *url,
+			const uint8_t *nei_rep,
+			size_t nei_rep_len,
+			uint8_t tx_token)
+{
+	struct btm_request_params req;
+	struct ieee80211_action_data action_data;
+	int url_len = 0;
+
+	if (url) {
+		/* Session Information URL */
+		url_len = strlen(url);
+		if (url_len > 255) {
+			return -1;
+		}
+	}
+	IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ACTION,
+			"WNM: Send BSS Transition Management Request to %s"
+			" req_mode=0x%x disassoc_timer=%u valid_int=0x%u\n",
+			ether_sprintf(ni->ni_macaddr), mode, disassoc_timer, valid_int);
+
+	memset(&req, 0, sizeof(req));
+
+	req.dialog_token = tx_token;
+	req.request_mode = mode;
+	req.disassoc_timer = disassoc_timer;
+	req.validity_interval = valid_int;
+	req.bss_term_dur = (uint8_t *)bss_term_dur;
+	req.url = (char *)url;
+	req.neigh_reports = (uint8_t *)nei_rep;
+	req.neigh_reports_length = nei_rep_len;
+
+	action_data.cat = IEEE80211_ACTION_CAT_WNM;
+	action_data.action = IEEE80211_WNM_BSS_TRANS_MGMT_REQ;
+	action_data.params = &req;
+
+	if (ni->ni_btm_req != 0) {
+		del_timer(&ni->ni_btm_resp_wait_timer);
+		ni->ni_btm_req = 0;
+	}
+	ni->ni_btm_req = tx_token;
+	ni->ni_btm_resp_wait_timer.function = ieee80211_btm_resp_timeout;
+	ni->ni_btm_resp_wait_timer.data = (unsigned long)ni;
+	mod_timer(&ni->ni_btm_resp_wait_timer, jiffies + ((disassoc_timer + 1) * ni->ni_intval));
+
+	IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+	return 0;
+}
+
+int
+ieee80211_send_wnm_bss_tm_solicited_req(struct ieee80211_node *ni,
+					uint8_t mode,
+					uint16_t disassoc_timer,
+					uint8_t valid_int,
+					const uint8_t *bss_term_dur,
+					const char *url,
+					const uint8_t *nei_rep,
+					size_t nei_rep_len,
+					uint8_t tx_token)
+{
+	return ieee80211_send_wnm_bss_tm_req(ni, mode, disassoc_timer, valid_int,
+					bss_term_dur, url, nei_rep, nei_rep_len, tx_token);
+}
+
+int
+ieee80211_send_wnm_bss_tm_unsolicited_req(struct ieee80211_node *ni,
+					uint8_t mode,
+					uint16_t disassoc_timer,
+					uint8_t valid_int,
+					const uint8_t *bss_term_dur,
+					const char *url,
+					const uint8_t *nei_rep,
+					size_t nei_rep_len,
+					uint8_t tx_token)
+{
+	if (!ni)
+		return -1;
+
+	if (valid_int == 0)
+		disassoc_timer = WNM_BTM_DEFAULT_VAL_INTVAL;
+
+	return ieee80211_send_wnm_bss_tm_req(ni, mode, disassoc_timer, valid_int,
+					bss_term_dur, url, nei_rep, nei_rep_len, tx_token);
+}
diff --git a/drivers/qtn/wlan/ieee80211_power.c b/drivers/qtn/wlan/ieee80211_power.c
new file mode 100644
index 0000000..b8bc967
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_power.c
@@ -0,0 +1,361 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_power.c 1721 2006-09-20 08:45:13Z mentor $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 power save support.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "qtn/qtn_global.h"
+
+#include "net80211/if_media.h"
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_proto.h"
+
+static void ieee80211_set_tim(struct ieee80211_node *ni, int set);
+
+void
+ieee80211_power_attach(struct ieee80211com *ic)
+{
+}
+
+void
+ieee80211_power_detach(struct ieee80211com *ic)
+{
+}
+
+void
+ieee80211_power_vattach(struct ieee80211vap *vap)
+{
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
+	    vap->iv_opmode == IEEE80211_M_IBSS) {
+		/* NB: driver should override */
+		vap->iv_set_tim = ieee80211_set_tim;
+	}
+}
+
+void
+ieee80211_power_latevattach(struct ieee80211vap *vap)
+{
+	/*
+	 * Allocate these only if needed.  Beware that we
+	 * know adhoc mode doesn't support ATIM yet...
+	 */
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		vap->iv_tim_len = howmany(vap->iv_max_aid,8) * sizeof(u_int8_t);
+		MALLOC(vap->iv_tim_bitmap, u_int8_t *, vap->iv_tim_len,
+			M_DEVBUF, M_NOWAIT | M_ZERO);
+		if (vap->iv_tim_bitmap == NULL) {
+			printf("%s: no memory for TIM bitmap!\n", __func__);
+			/* XXX good enough to keep from crashing? */
+			vap->iv_tim_len = 0;
+		}
+	}
+}
+
+void
+ieee80211_power_vdetach(struct ieee80211vap *vap)
+{
+	vap->iv_set_tim = NULL;
+	if (vap->iv_tim_bitmap != NULL) {
+		FREE(vap->iv_tim_bitmap, M_DEVBUF);
+		vap->iv_tim_bitmap = NULL;
+	}
+}
+
+/*
+ * Clear any frames queued on a node's power save queue.
+ * The number of frames that were present is returned.
+ */
+int
+ieee80211_node_saveq_drain(struct ieee80211_node *ni)
+{
+	struct sk_buff *skb;
+	int qlen;
+
+	IEEE80211_NODE_SAVEQ_LOCK(ni);
+	qlen = skb_queue_len(&ni->ni_savedq);
+	while ((skb = __skb_dequeue(&ni->ni_savedq)) != NULL) {
+		ieee80211_free_node(ni);
+		dev_kfree_skb_any(skb);
+	}
+	IEEE80211_NODE_SAVEQ_UNLOCK(ni);
+
+	return qlen;
+}
+
+/*
+ * Age frames on the power save queue. The aging interval is
+ * 4 times the listen interval specified by the station.  This
+ * number is factored into the age calculations when the frame
+ * is placed on the queue.  We store ages as time differences
+ * so we can check and/or adjust only the head of the list.
+ * If a frame's age exceeds the threshold then discard it.
+ * The number of frames discarded is returned so the caller
+ * can check if it needs to adjust the tim.
+ */
+int
+ieee80211_node_saveq_age(struct ieee80211_node *ni)
+{
+	int discard = 0;
+
+	/* XXX racey but good 'nuf? */
+	if (IEEE80211_NODE_SAVEQ_QLEN(ni) != 0) {
+#ifdef IEEE80211_DEBUG
+		struct ieee80211vap *vap = ni->ni_vap;
+#endif
+		struct sk_buff *skb;
+
+		IEEE80211_NODE_SAVEQ_LOCK(ni);
+		while ((skb = skb_peek(&ni->ni_savedq)) != NULL &&
+		     M_AGE_GET(skb) < IEEE80211_INACT_WAIT) {
+			IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+				"discard frame, age %u", M_AGE_GET(skb));
+
+			skb = __skb_dequeue(&ni->ni_savedq);
+			if(skb)
+			  dev_kfree_skb_any(skb);
+			discard++;
+		}
+		if (skb != NULL)
+			M_AGE_SUB(skb, IEEE80211_INACT_WAIT);
+		IEEE80211_NODE_SAVEQ_UNLOCK(ni);
+
+		IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+			"discard %u frames for age", discard);
+		IEEE80211_NODE_STAT_ADD(ni, ps_discard, discard);
+	}
+	return discard;
+}
+
+/*
+ * Indicate whether there are frames queued for a station in power-save mode.
+ */
+static void
+ieee80211_set_tim(struct ieee80211_node *ni, int set)
+{
+/*
+* TIM IE is programmed in QTN FW hence code in this routine is removed
+*/
+}
+
+/*
+ * Save an outbound packet for a node in power-save sleep state.
+ * The new packet is placed on the node's saved queue, and the TIM
+ * is changed, if necessary.
+ */
+void
+ieee80211_pwrsave(struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	unsigned long flags;
+	struct sk_buff *tail;
+	int qlen, age;
+
+	spin_lock_irqsave(&ni->ni_savedq.lock, flags);
+	if (skb_queue_len(&ni->ni_savedq) >= IEEE80211_PS_MAX_QUEUE) {
+		IEEE80211_NODE_STAT(ni, psq_drops);
+		spin_unlock_irqrestore(&ni->ni_savedq.lock, flags);
+		IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
+			"pwr save q overflow, drops %d (size %d)",
+			ni->ni_stats.ns_psq_drops, IEEE80211_PS_MAX_QUEUE);
+#ifdef IEEE80211_DEBUG
+		if (ieee80211_msg_dumppkts(vap))
+			ieee80211_dump_pkt(ni->ni_ic, skb->data, skb->len, -1, -1);
+#endif
+		dev_kfree_skb(skb);
+		return;
+	}
+	/*
+	 * Tag the frame with it's expiry time and insert
+	 * it in the queue.  The aging interval is 4 times
+	 * the listen interval specified by the station.
+	 * Frames that sit around too long are reclaimed
+	 * using this information.
+	 */
+	/* XXX handle overflow? */
+	age = ((ni->ni_intval * ic->ic_lintval) << 2) / 1024; /* TU -> secs */
+	tail = skb_peek_tail(&ni->ni_savedq);
+	if (tail != NULL) {
+		age -= M_AGE_GET(tail);
+		__skb_append(tail, skb, &ni->ni_savedq);
+	} else
+		__skb_queue_head(&ni->ni_savedq, skb);
+	M_AGE_SET(skb, age);
+	qlen = skb_queue_len(&ni->ni_savedq);
+	spin_unlock_irqrestore(&ni->ni_savedq.lock, flags);
+
+	IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+		"save frame, %u now queued", qlen);
+
+	if (qlen == 1 && vap->iv_set_tim != NULL)
+		vap->iv_set_tim(ni, 1);
+}
+
+/*
+ * Handle power-save state change in ap/ibss mode.
+ */
+void
+ieee80211_node_pwrsave(struct ieee80211_node *ni, int enable)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	KASSERT(vap->iv_opmode == IEEE80211_M_HOSTAP ||
+		vap->iv_opmode == IEEE80211_M_IBSS,
+		("unexpected operating mode %u", vap->iv_opmode));
+
+	if (enable) {
+		if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) == 0)
+			vap->iv_ps_sta++;
+		ni->ni_flags |= IEEE80211_NODE_PWR_MGT;
+		IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+			"power save mode on, %u sta's in ps mode",
+			vap->iv_ps_sta);
+		return;
+	}
+
+	if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT))
+		vap->iv_ps_sta--;
+	ni->ni_flags &= ~IEEE80211_NODE_PWR_MGT;
+	IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+		"power save mode off, %u sta's in ps mode", vap->iv_ps_sta);
+	/* XXX if no stations in ps mode, flush mc frames */
+
+	/*
+	 * Flush queued unicast frames.
+	 */
+	if (IEEE80211_NODE_SAVEQ_QLEN(ni) == 0) {
+		if (vap->iv_set_tim != NULL)
+			vap->iv_set_tim(ni, 0);		/* just in case */
+		return;
+	}
+	IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+		"flush ps queue, %u packets queued",
+		IEEE80211_NODE_SAVEQ_QLEN(ni));
+	for (;;) {
+		struct sk_buff *skb;
+		int qlen;
+
+		IEEE80211_NODE_SAVEQ_LOCK(ni);
+		IEEE80211_NODE_SAVEQ_DEQUEUE(ni, skb, qlen);
+		IEEE80211_NODE_SAVEQ_UNLOCK(ni);
+		if (skb == NULL)
+			break;
+		/* 
+		 * If this is the last packet, turn off the TIM bit.
+		 *
+		 * Set the M_PWR_SAV bit on skb to allow encap to test for
+		 * adding MORE_DATA bit to wh.
+		 *
+		 * The 802.11 MAC Spec says we should only set MORE_DATA for 
+		 * unicast packets when the STA is in PS mode (7.1.3.1.8);
+		 * which it isn't.
+		 */
+		// M_PWR_SAV_SET(skb);
+
+		ieee80211_parent_queue_xmit(skb);
+	}
+	vap->iv_set_tim(ni, 0);
+}
+
+/*
+ * Handle power-save state change in station mode.
+ */
+void
+ieee80211_sta_pwrsave(struct ieee80211vap *vap, int enable)
+{
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct ieee80211com *ic = vap->iv_ic;
+	int qlen;
+
+	if (!ni)
+		return;
+
+	if (!((enable != 0) ^ ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) != 0)))
+		return;
+
+	IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+		"sta power save mode %s", enable ? "on" : "off");
+	if (!enable) {
+		ni->ni_flags &= ~IEEE80211_NODE_PWR_MGT;
+		ic->ic_power_save(ni, 0);
+
+		ieee80211_ref_node(ni);
+		ieee80211_send_nulldata(ni);
+		/*
+		 * Flush any queued frames; we can do this immediately
+		 * because we know they'll be queued behind the null
+		 * data frame we send the ap.
+		 * XXX can we use a data frame to take us out of ps?
+		 */
+		qlen = IEEE80211_NODE_SAVEQ_QLEN(ni);
+		if (qlen != 0) {
+			IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+				"flush ps queue, %u packets queued", qlen);
+			for (;;) {
+				struct sk_buff *skb;
+
+				IEEE80211_NODE_SAVEQ_LOCK(ni);
+				skb = __skb_dequeue(&ni->ni_savedq);
+				IEEE80211_NODE_SAVEQ_UNLOCK(ni);
+				if (skb == NULL)
+					break;
+				ieee80211_parent_queue_xmit(skb);
+			}
+		}
+	} else {
+		ni->ni_flags |= IEEE80211_NODE_PWR_MGT;
+		ieee80211_ref_node(ni);
+		ieee80211_send_nulldata(ni);
+
+		/*
+		* Delay 1 msec to ensure null data frame has
+		* already been send out before power save
+		*/
+		mdelay(1);
+		ic->ic_power_save(ni, 1);
+	}
+}
+EXPORT_SYMBOL(ieee80211_sta_pwrsave);
diff --git a/drivers/qtn/wlan/ieee80211_proto.c b/drivers/qtn/wlan/ieee80211_proto.c
new file mode 100644
index 0000000..89feff4
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_proto.c
@@ -0,0 +1,2327 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_proto.c 1849 2006-12-08 17:20:08Z proski $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 protocol support.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <asm/board/pm.h>
+#include <qtn/qtn_debug.h>
+#include <qtn/shared_defs.h>
+
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_dot11_msg.h"
+#include "net80211/ieee80211_tpc.h"
+
+/* XXX tunables */
+#define	AGGRESSIVE_MODE_SWITCH_HYSTERESIS	3	/* pkts / 100ms */
+#define	HIGH_PRI_SWITCH_THRESH			10	/* pkts / 100ms */
+
+#define	IEEE80211_RATE2MBS(r)	(((r) & IEEE80211_RATE_VAL) / 2)
+
+const char *ieee80211_mgt_subtype_name[] = {
+	"assoc_req",	"assoc_resp",	"reassoc_req",	"reassoc_resp",
+	"probe_req",	"probe_resp",	"reserved#6",	"reserved#7",
+	"beacon",	"atim",		"disassoc",	"auth",
+	"deauth",	"action",	"reserved#14",	"reserved#15"
+};
+EXPORT_SYMBOL(ieee80211_mgt_subtype_name);
+const char *ieee80211_ctl_subtype_name[] = {
+	"reserved#0",	"reserved#1",	"reserved#2",	"reserved#3",
+	"reserved#3",	"reserved#5",	"reserved#6",	"reserved#7",
+	"reserved#8",	"reserved#9",	"ps_poll",	"rts",
+	"cts",		"ack",		"cf_end",	"cf_end_ack"
+};
+EXPORT_SYMBOL(ieee80211_ctl_subtype_name);
+const char *ieee80211_state_name[IEEE80211_S_MAX] = {
+	"INIT",		/* IEEE80211_S_INIT */
+	"SCAN",		/* IEEE80211_S_SCAN */
+	"AUTH",		/* IEEE80211_S_AUTH */
+	"ASSOC",	/* IEEE80211_S_ASSOC */
+	"RUN"		/* IEEE80211_S_RUN */
+};
+EXPORT_SYMBOL(ieee80211_state_name);
+const char *ieee80211_wme_acnames[] = {
+	"WME_AC_BE",
+	"WME_AC_BK",
+	"WME_AC_VI",
+	"WME_AC_VO",
+	"WME_UPSD",
+};
+EXPORT_SYMBOL(ieee80211_wme_acnames);
+
+extern u_int16_t ht_rate_table_20MHz_800[];
+extern u_int16_t ht_rate_table_40MHz_800[];
+
+static int ieee80211_newstate(struct ieee80211vap *, enum ieee80211_state, int);
+static void ieee80211_tx_timeout(unsigned long);
+static void ieee80211_test_traffic_timeout(unsigned long);
+void ieee80211_auth_setup(void);
+
+void
+ieee80211_proto_attach(struct ieee80211com *ic)
+{
+
+	ic->ic_protmode = IEEE80211_PROT_CTSONLY;
+	ic->ic_flags_ext |= IEEE80211_FEXT_BG_PROTECT;
+	ic->ic_flags_ext |= IEEE80211_FEXT_11N_PROTECT;
+
+	ic->ic_wme.wme_hipri_switch_hysteresis =
+		AGGRESSIVE_MODE_SWITCH_HYSTERESIS;
+
+	/* initialize management frame handlers */
+	ic->ic_recv_mgmt = ieee80211_recv_mgmt;
+	ic->ic_send_mgmt = ieee80211_send_mgmt;
+	/* TKIP MIC failure report from the lower layers */
+	ic->ic_tkip_mic_failure = ieee80211_tkip_mic_failure;
+
+	ieee80211_auth_setup();
+}
+
+void
+ieee80211_proto_detach(struct ieee80211com *ic)
+{
+}
+
+void
+ieee80211_proto_vattach(struct ieee80211vap *vap)
+{
+#ifdef notdef
+	vap->iv_rtsthreshold = IEEE80211_RTS_DEFAULT;
+#else
+	vap->iv_rtsthreshold = IEEE80211_RTS_MAX;
+#endif
+	vap->iv_fragthreshold = 2346;		/* XXX not used yet */
+	vap->iv_fixed_rate = IEEE80211_FIXED_RATE_NONE;
+	init_timer(&vap->iv_mgtsend);
+	init_timer(&vap->iv_xrvapstart);
+	init_timer(&vap->iv_swbmiss);
+	init_timer(&vap->iv_swberp);
+	init_timer(&vap->iv_test_traffic);
+	init_timer(&vap->iv_sta_fast_rejoin);
+	vap->iv_mgtsend.function = ieee80211_tx_timeout;
+	vap->iv_mgtsend.data = (unsigned long) vap;
+	vap->iv_test_traffic.function = ieee80211_test_traffic_timeout;
+	vap->iv_test_traffic.data = (unsigned long) vap;
+	vap->iv_sta_fast_rejoin.function = ieee80211_sta_fast_rejoin;
+	vap->iv_sta_fast_rejoin.data = (unsigned long) vap;
+	ieee80211_ppqueue_init(vap);
+
+	vap->bcast_pps.max_bcast_pps = 0; /* Zero indicates no limit on bcst pps */
+	vap->bcast_pps.rx_bcast_counter = 0;
+	vap->bcast_pps.rx_bcast_pps_start_time = 0;
+	vap->bcast_pps.tx_bcast_counter = 0;
+	vap->bcast_pps.tx_bcast_pps_start_time = 0;
+
+	/* Initilze the timeout functions for non-HT and non-ERP protection */
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		/* Initialize the timeout function for non-HT protection */
+		vap->iv_swbmiss.function = ieee80211_swbmiss;
+		vap->iv_swbmiss.data = (unsigned long) vap;
+		vap->iv_swbmiss_period = IEEE80211_TU_TO_JIFFIES(
+						IEEE80211_BINTVAL_DEFAULT * 20);
+
+		/* Initialize the timeout function non-ERP protection */
+		vap->iv_swberp.function = ieee80211_swberp;
+		vap->iv_swberp.data = (unsigned long) vap;
+		vap->iv_swberp_period = IEEE80211_TU_TO_JIFFIES(
+						IEEE80211_BINTVAL_DEFAULT * 20);
+	}
+
+	/* protocol state change handler */
+	vap->iv_newstate = ieee80211_newstate;
+}
+
+void
+ieee80211_proto_vdetach(struct ieee80211vap *vap)
+{
+	/*
+	 * This should not be needed as we detach when reseting
+	 * the state but be conservative here since the
+	 * authenticator may do things like spawn kernel threads.
+	 */
+	if (vap->iv_auth->ia_detach)
+		vap->iv_auth->ia_detach(vap);
+
+	/*
+	 * Detach any ACL'ator.
+	 */
+	if (vap->iv_acl != NULL)
+		vap->iv_acl->iac_detach(vap);
+
+	IEEE80211_LOCK_IRQ(vap->iv_ic);
+	ieee80211_adjust_wme_by_vappri(vap->iv_ic);
+	IEEE80211_UNLOCK_IRQ(vap->iv_ic);
+}
+
+/*
+ * Simple-minded authenticator module support.
+ */
+
+#define	IEEE80211_AUTH_MAX	(IEEE80211_AUTH_WPA+1)
+/* XXX well-known names */
+static const char *auth_modnames[IEEE80211_AUTH_MAX] = {
+	"wlan_internal",	/* IEEE80211_AUTH_NONE */
+	"wlan_internal",	/* IEEE80211_AUTH_OPEN */
+	"wlan_internal",	/* IEEE80211_AUTH_SHARED */
+	"wlan_xauth",		/* IEEE80211_AUTH_8021X	 */
+	"wlan_internal",	/* IEEE80211_AUTH_AUTO */
+	"wlan_xauth",		/* IEEE80211_AUTH_WPA */
+};
+static const struct ieee80211_authenticator *authenticators[IEEE80211_AUTH_MAX];
+
+static const struct ieee80211_authenticator auth_internal = {
+	.ia_name		= "wlan_internal",
+	.ia_attach		= NULL,
+	.ia_detach		= NULL,
+	.ia_node_join		= NULL,
+	.ia_node_leave		= NULL,
+};
+
+/*
+ * Setup internal authenticators once; they are never unregistered.
+ */
+void
+ieee80211_auth_setup(void)
+{
+	ieee80211_authenticator_register(IEEE80211_AUTH_OPEN, &auth_internal);
+	ieee80211_authenticator_register(IEEE80211_AUTH_SHARED, &auth_internal);
+	ieee80211_authenticator_register(IEEE80211_AUTH_AUTO, &auth_internal);
+}
+
+const struct ieee80211_authenticator *
+ieee80211_authenticator_get(int auth)
+{
+	if (auth >= IEEE80211_AUTH_MAX)
+		return NULL;
+	if (authenticators[auth] == NULL)
+		ieee80211_load_module(auth_modnames[auth]);
+	return authenticators[auth];
+}
+
+void
+ieee80211_authenticator_register(int type,
+	const struct ieee80211_authenticator *auth)
+{
+	if (type >= IEEE80211_AUTH_MAX)
+		return;
+	authenticators[type] = auth;
+}
+EXPORT_SYMBOL(ieee80211_authenticator_register);
+
+void
+ieee80211_authenticator_unregister(int type)
+{
+	if (type >= IEEE80211_AUTH_MAX)
+		return;
+	authenticators[type] = NULL;
+}
+EXPORT_SYMBOL(ieee80211_authenticator_unregister);
+
+/*
+ * Very simple-minded authenticator backend module support.
+ */
+/* XXX just one for now */
+static	const struct ieee80211_authenticator_backend *backend = NULL;
+
+void
+ieee80211_authenticator_backend_register(
+	const struct ieee80211_authenticator_backend *be)
+{
+	printk(KERN_INFO "wlan: %s backend registered\n", be->iab_name);
+	backend = be;
+}
+EXPORT_SYMBOL(ieee80211_authenticator_backend_register);
+
+void
+ieee80211_authenticator_backend_unregister(
+	const struct ieee80211_authenticator_backend * be)
+{
+	if (backend == be)
+		backend = NULL;
+	printk(KERN_INFO "wlan: %s backend unregistered\n",
+		be->iab_name);
+}
+EXPORT_SYMBOL(ieee80211_authenticator_backend_unregister);
+
+const struct ieee80211_authenticator_backend *
+ieee80211_authenticator_backend_get(const char *name)
+{
+	if (backend == NULL)
+		ieee80211_load_module("wlan_radius");
+	return backend && strcmp(backend->iab_name, name) == 0 ? backend : NULL;
+}
+EXPORT_SYMBOL(ieee80211_authenticator_backend_get);
+
+/*
+ * Very simple-minded ACL module support.
+ */
+/* XXX just one for now */
+static const struct ieee80211_aclator *acl = NULL;
+
+void
+ieee80211_aclator_register(const struct ieee80211_aclator *iac)
+{
+	printk(KERN_INFO "wlan: %s acl policy registered\n", iac->iac_name);
+	acl = iac;
+}
+EXPORT_SYMBOL(ieee80211_aclator_register);
+
+void
+ieee80211_aclator_unregister(const struct ieee80211_aclator *iac)
+{
+	if (acl == iac)
+		acl = NULL;
+	printk(KERN_INFO "wlan: %s acl policy unregistered\n", iac->iac_name);
+}
+EXPORT_SYMBOL(ieee80211_aclator_unregister);
+
+const struct ieee80211_aclator *
+ieee80211_aclator_get(const char *name)
+{
+	if (acl == NULL)
+		ieee80211_load_module("wlan_acl");
+	return acl && strcmp(acl->iac_name, name) == 0 ? acl : NULL;
+}
+EXPORT_SYMBOL(ieee80211_aclator_get);
+
+void
+ieee80211_print_essid(const u_int8_t *essid, int len)
+{
+	int i;
+	const u_int8_t *p; 
+
+	if (len > IEEE80211_NWID_LEN)
+		len = IEEE80211_NWID_LEN;
+	/* determine printable or not */
+	for (i = 0, p = essid; i < len; i++, p++) {
+		if (*p < ' ' || *p > 0x7e)
+			break;
+	}
+	if (i == len) {
+		printf("\"");
+		for (i = 0, p = essid; i < len; i++, p++)
+			printf("%c", *p);
+		printf("\"");
+	} else {
+		printf("0x");
+		for (i = 0, p = essid; i < len; i++, p++)
+			printf("%02x", *p);
+	}
+}
+EXPORT_SYMBOL(ieee80211_print_essid);
+
+void
+ieee80211_dump_pkt(struct ieee80211com *ic,
+	const u_int8_t *buf, int len, int rate, int rssi)
+{
+	const struct ieee80211_frame *wh;
+	int i;
+
+	wh = (const struct ieee80211_frame *)buf;
+	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+	case IEEE80211_FC1_DIR_NODS:
+		printf("NODS %s", ether_sprintf(wh->i_addr2));
+		printf("->%s", ether_sprintf(wh->i_addr1));
+		printf("(%s)", ether_sprintf(wh->i_addr3));
+		break;
+	case IEEE80211_FC1_DIR_TODS:
+		printf("TODS %s", ether_sprintf(wh->i_addr2));
+		printf("->%s", ether_sprintf(wh->i_addr3));
+		printf("(%s)", ether_sprintf(wh->i_addr1));
+		break;
+	case IEEE80211_FC1_DIR_FROMDS:
+		printf("FRDS %s", ether_sprintf(wh->i_addr3));
+		printf("->%s", ether_sprintf(wh->i_addr1));
+		printf("(%s)", ether_sprintf(wh->i_addr2));
+		break;
+	case IEEE80211_FC1_DIR_DSTODS:
+		printf("DSDS %s", ether_sprintf((u_int8_t *)&wh[1]));
+		printf("->%s", ether_sprintf(wh->i_addr3));
+		printf("(%s", ether_sprintf(wh->i_addr2));
+		printf("->%s)", ether_sprintf(wh->i_addr1));
+		break;
+	}
+	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
+	case IEEE80211_FC0_TYPE_DATA:
+		printf(" data");
+		break;
+	case IEEE80211_FC0_TYPE_MGT:
+		printf(" %s", ieee80211_mgt_subtype_name[
+			(wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK)
+			>> IEEE80211_FC0_SUBTYPE_SHIFT]);
+		break;
+	default:
+		printf(" type#%d", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK);
+		break;
+	}
+	if (IEEE80211_QOS_HAS_SEQ(wh)) {
+		const struct ieee80211_qosframe *qwh = 
+			(const struct ieee80211_qosframe *)buf;
+		printf(" QoS [TID %u%s]", qwh->i_qos[0] & IEEE80211_QOS_TID,
+			qwh->i_qos[0] & IEEE80211_QOS_ACKPOLICY ? " ACM" : "");
+	}
+	if (wh->i_fc[1] & IEEE80211_FC1_PROT) {
+		int off;
+
+		off = ieee80211_anyhdrspace(ic, wh);
+		printf(" WEP [IV %.02x %.02x %.02x",
+			buf[off+0], buf[off+1], buf[off+2]);
+		if (buf[off+IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)
+			printf(" %.02x %.02x %.02x",
+				buf[off+4], buf[off+5], buf[off+6]);
+		printf(" KID %u]", buf[off+IEEE80211_WEP_IVLEN] >> 6);
+	}
+	if (rate >= 0)
+		printf(" %dM", rate / 2);
+	if (rssi >= 0)
+		printf(" +%d", rssi);
+	printf("\n");
+	if (len > 0) {
+		for (i = 0; i < len; i++) {
+			if ((i % 8) == 0)
+				printf(" ");
+			if ((i % 16) == 0)
+				printf("\n");
+			printf("%02x ", buf[i]);
+		}
+		printf("\n\n");
+	}
+}
+EXPORT_SYMBOL(ieee80211_dump_pkt);
+
+int
+ieee80211_fix_ht_rate(struct ieee80211_node *ni, int flags)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	int8_t i = 0, j = 0, k = 0;
+	int val = 0, rridx = 0;
+	u_int8_t rs[IEEE80211_HT_MAXMCS_SET_SUPPORTED] = {0};
+	u_int8_t fixed_rate = 0;
+
+	/* check if intersection is required */
+	if (flags & IEEE80211_F_DOXSECT)
+	{
+		k = 0;
+		ni->ni_htrates.rs_nrates = 0;
+
+		for (i = 0; i < IEEE80211_HT_MAXMCS_BASICSET_SUPPORTED; i++)
+		{
+			rs[i] = ni->ni_htcap.mcsset[i];
+			rs[i] &= ic->ic_htcap.mcsset[i];
+
+			for(j = 0, val = 0x1; j < 8; j++, val = val << 1)
+			{
+				if (rs[i] & val)
+				{
+					ni->ni_htrates.rs_rates[k] = IEEE80211_HT_RATE_TABLE_IDX(i,j);
+					ni->ni_htrates.rs_nrates++;
+					if ((vap->iv_fixed_rate & 0x80)&& (vap->iv_fixed_rate == (ni->ni_htrates.rs_rates[k]|0x80)))
+   	                                     fixed_rate = vap->iv_fixed_rate & 0x7F;
+					k++;
+				}
+				
+				if ((vap->iv_fixed_rate & 0x80)&& (vap->iv_fixed_rate == (ni->ni_htrates.rs_rates[k]|0x80)))
+					fixed_rate = vap->iv_fixed_rate & 0x7F;
+			}
+		}
+
+		/* sort the rates in ascending order */
+		for (i = 0; i < (ni->ni_htrates.rs_nrates - 1); i++)
+		{
+			for (j = i + 1; j < ni->ni_htrates.rs_nrates; j++)
+			{
+				int tempi, tempj;
+				tempi = ni->ni_htrates.rs_rates[i];
+				tempj = ni->ni_htrates.rs_rates[j];
+
+				if(IEEE80211_IS_CHAN_11N(ic->ic_bsschan))
+				{
+					if (ht_rate_table_20MHz_800[tempi] > ht_rate_table_20MHz_800[tempj])
+					{
+						ni->ni_htrates.rs_rates[i] = tempj;
+						ni->ni_htrates.rs_rates[j] = tempi;
+					}
+				}
+				else
+				{
+					if (ht_rate_table_40MHz_800[tempi] > ht_rate_table_40MHz_800[tempj])
+					{
+						ni->ni_htrates.rs_rates[i] = tempj;
+						ni->ni_htrates.rs_rates[j] = tempi;
+					}
+				}
+			}
+		}
+
+		rridx = ni->ni_htrates.rs_rates[ni->ni_htrates.rs_nrates - 1];
+	}
+
+	/* check if basic rate set is required */
+	for (i = 0; i < IEEE80211_HT_MAXMCS_BASICSET_SUPPORTED; i++)
+	{
+		if ((ni->ni_htcap.mcsset[i] & ic->ic_htinfo.basicmcsset[i]) == ic->ic_htinfo.basicmcsset[i])
+		{
+			if (flags & IEEE80211_F_DOBRS)
+				rs[i] &= ic->ic_htinfo.basicmcsset[i];
+		}
+		else
+		{
+			/* basic rate not supported */
+			return 0;
+		}
+	}
+
+	/* keep only those rates that are supported by both STAs */
+	if (flags & IEEE80211_F_DODEL)
+	{
+		for (i = 0; i < IEEE80211_HT_MAXMCS_SET_SUPPORTED; i++)
+		{
+			ni->ni_htcap.mcsset[i] &= ic->ic_htcap.mcsset[i];
+		}
+	}
+
+	if (flags & IEEE80211_F_DOFRATE)
+	{
+		if (fixed_rate != 0)
+			return fixed_rate;
+		else
+			return 0;
+	}
+
+	return rridx;
+}
+
+int
+ieee80211_fix_rate(struct ieee80211_node *ni, int flags)
+{
+#define	RV(v)	((v) & IEEE80211_RATE_VAL)
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = ni->ni_ic;
+	int i, j, ignore, error;
+	int okrate, badrate, fixedrate;
+	struct ieee80211_rateset *srs, *nrs;
+	u_int8_t r;
+
+	error = 0;
+	okrate = badrate = fixedrate = 0;
+	/* Supported rates are depends on the mode setting */
+	srs = &ic->ic_sup_rates[ic->ic_curmode];
+	nrs = &ni->ni_rates;
+	fixedrate = IEEE80211_FIXED_RATE_NONE;
+	for (i = 0; i < nrs->rs_nrates; ) {
+		ignore = 0;
+		if (flags & IEEE80211_F_DOSORT) {
+			/*
+			 * Sort rates.
+			 */
+			for (j = i + 1; j < nrs->rs_nrates; j++) {
+				if (RV(nrs->rs_rates[i]) > RV(nrs->rs_rates[j])) {
+					r = nrs->rs_rates[i];
+					nrs->rs_rates[i] = nrs->rs_rates[j];
+					nrs->rs_rates[j] = r;
+				}
+			}
+		}
+		r = nrs->rs_rates[i] & IEEE80211_RATE_VAL;
+		badrate = r;
+		if (flags & IEEE80211_F_DONEGO) {
+			/*
+			 * Check against supported rates.
+			 */
+			for (j = 0; j < srs->rs_nrates; j++) {
+				if (r == RV(srs->rs_rates[j])) {
+					/*
+					 * Overwrite with the supported rate
+					 * value so any basic rate bit is set.
+					 * This ensures that response we send
+					 * to stations have the necessary basic
+					 * rate bit set.
+					 */
+					nrs->rs_rates[i] = srs->rs_rates[j];
+					break;
+				}
+			}
+			if (j == srs->rs_nrates) {
+				/*
+				 * A rate in the node's rate set is not
+				 * supported.  If this is a basic rate and we
+				 * are operating as an AP then this is an error.
+				 * Otherwise we just discard/ignore the rate.
+				 * Note that this is important for 11b stations
+				 * when they want to associate with an 11g AP.
+				 *
+				 * Spec 8.4.2.3 specfies BSS membership selectors
+				 * are carried in the same IE as supported rates
+				 * or extended supported rates.
+				 *
+				 * On receiving BSSMembershipSelectorHTPHY in
+				 * PROBE_REQ or ASSOC_REQ, ignore it instead of
+				 * recognizing it as a basic rate and denying
+				 * connection.
+				 */
+				if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
+				    (nrs->rs_rates[i] & IEEE80211_RATE_BASIC) &&
+				    (RV(nrs->rs_rates[i]) != IEEE80211_BSS_MEMBERSHIP_SELECTOR_HT_PHY))
+					error++;
+				ignore++;
+			}
+		}
+		if (flags & IEEE80211_F_DODEL) {
+			/*
+			 * Delete unacceptable rates.
+			 */
+			if (ignore) {
+				nrs->rs_nrates--;
+				for (j = i; j < nrs->rs_nrates; j++)
+					nrs->rs_rates[j] = nrs->rs_rates[j + 1];
+				nrs->rs_rates[j] = 0;
+				continue;
+			}
+		}
+		if (!ignore)
+			okrate = nrs->rs_rates[i];
+		i++;
+	}
+
+	if (okrate == 0 || error != 0)
+		return badrate | IEEE80211_RATE_BASIC;
+	else
+		return RV(okrate);
+#undef RV
+}
+
+/*
+ * Reset 11g-related state.
+ */
+void
+ieee80211_reset_erp(struct ieee80211com *ic, enum ieee80211_phymode mode)
+{
+#define	IS_11G(m) \
+	((m) == IEEE80211_MODE_11G || (m) == IEEE80211_MODE_TURBO_G)
+
+	ic->ic_flags &= ~IEEE80211_F_USEPROT;
+	/*
+	 * Preserve the long slot and nonerp station count if
+	 * switching between 11g and turboG. Otherwise, inactivity
+	 * will cause the turbo station to disassociate and possibly
+	 * try to leave the network.
+	 * XXX not right if really trying to reset state
+	 */
+	if (IS_11G(mode) ^ IS_11G(ic->ic_curmode)) {
+		ic->ic_nonerpsta = 0;
+		ic->ic_longslotsta = 0;
+	}
+
+	/*
+	 * Short slot time is enabled only when operating in 11g
+	 * and not in an IBSS.  We must also honor whether or not
+	 * the driver is capable of doing it.
+	 */
+	ieee80211_set_shortslottime(ic,
+		IEEE80211_IS_CHAN_A(ic->ic_curchan) ||
+		(IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) &&
+		ic->ic_opmode == IEEE80211_M_HOSTAP &&
+		(ic->ic_caps & IEEE80211_C_SHSLOT)));
+	/*
+	 * Set short preamble and ERP barker-preamble flags.
+	 */
+	if (IEEE80211_IS_CHAN_A(ic->ic_curchan) ||
+	    (ic->ic_caps & IEEE80211_C_SHPREAMBLE)) {
+		ic->ic_flags |= IEEE80211_F_SHPREAMBLE;
+		ic->ic_flags &= ~IEEE80211_F_USEBARKER;
+	} else {
+		ic->ic_flags &= ~IEEE80211_F_SHPREAMBLE;
+		ic->ic_flags |= IEEE80211_F_USEBARKER;
+	}
+#undef IS_11G
+}
+
+/*
+ * Set the short slot time state and notify the driver.
+ */
+void
+ieee80211_set_shortslottime(struct ieee80211com *ic, int onoff)
+{
+	if (onoff)
+		ic->ic_flags |= IEEE80211_F_SHSLOT;
+	else
+		ic->ic_flags &= ~IEEE80211_F_SHSLOT;
+	/* notify driver */
+	if (ic->ic_updateslot != NULL)
+		ic->ic_updateslot(ic);
+}
+
+/*
+ * Check if the specified rate set supports ERP.
+ * 6, 12 and 24 are the mandantory ERP rates
+ */
+int
+ieee80211_iserp_rateset(struct ieee80211com *ic, struct ieee80211_rateset *rs)
+{
+	static const int erp_rates[] = { 12, 24, 48 };
+	int i, j;
+
+	for (i = 0; i < ARRAY_SIZE(erp_rates); i++) {
+		for (j = 0; j < rs->rs_nrates; j++) {
+			int r = rs->rs_rates[j] & IEEE80211_RATE_VAL;
+			if (erp_rates[i] == r) {
+				goto next;
+			}
+		}
+		return 0;
+	next:
+		;
+	}
+	return 1;
+}
+
+static const struct ieee80211_rateset basic11g[IEEE80211_MODE_MAX] = {
+    { 0 },			/* IEEE80211_MODE_AUTO */
+    { 3, 3, { 12, 24, 48 } },	/* IEEE80211_MODE_11A */
+    { 2, 2, { 2, 4 } },		/* IEEE80211_MODE_11B */
+    { 4, 4, { 2, 4, 11, 22 } },	/* IEEE80211_MODE_11G (mixed b/g) */
+    { 0, 0 },			/* IEEE80211_MODE_FH */
+    { 3, 3, { 12, 24, 48 } },	/* IEEE80211_MODE_TURBO_A */
+    { 4, 4, { 2, 4, 11, 22 } },	/* IEEE80211_MODE_TURBO_G (mixed b/g) */
+};
+
+/*
+ * Mark the basic rates for the 11g rate table based on the
+ * specified mode.  For 11b compatibility we mark only 11b
+ * rates.  There's also a pseudo 11a-mode used to mark only
+ * the basic OFDM rates; this is used to exclude 11b stations
+ * from an 11g bss.
+ */
+void
+ieee80211_set11gbasicrates(struct ieee80211_rateset *rs, enum ieee80211_phymode mode)
+{
+	int i, j;
+
+	KASSERT(mode < IEEE80211_MODE_MAX, ("invalid mode %u", mode));
+	for (i = 0; i < rs->rs_nrates; i++) {
+		rs->rs_rates[i] &= IEEE80211_RATE_VAL;
+		for (j = 0; j < basic11g[mode].rs_nrates; j++)
+			if (basic11g[mode].rs_rates[j] == rs->rs_rates[i]) {
+				rs->rs_rates[i] |= IEEE80211_RATE_BASIC;
+				break;
+			}
+	}
+}
+
+struct ieee80211vap *
+ieee80211_get_sta_vap(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap = NULL;
+
+	IEEE80211_LOCK_IRQ(ic);
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_opmode == IEEE80211_M_STA)
+			break;
+	}
+	IEEE80211_UNLOCK_IRQ(ic);
+
+	return vap;
+}
+EXPORT_SYMBOL(ieee80211_get_sta_vap);
+
+/*
+ * Deduce the 11g setup by examining the rates
+ * that are marked basic.
+ */
+enum ieee80211_phymode
+ieee80211_get11gbasicrates(struct ieee80211_rateset *rs)
+{
+	struct ieee80211_rateset basic;
+	int i;
+
+	memset(&basic, 0, sizeof(basic));
+	for (i = 0; i < rs->rs_nrates; i++)
+		if (rs->rs_rates[i] & IEEE80211_RATE_BASIC)
+			basic.rs_rates[basic.rs_nrates++] =
+				rs->rs_rates[i] & IEEE80211_RATE_VAL;
+	for (i = 0; i < IEEE80211_MODE_MAX; i++)
+		if (memcmp(&basic, &basic11g[i], sizeof(basic)) == 0)
+			return i;
+	return IEEE80211_MODE_AUTO;
+}
+
+struct ieee80211_wme_state *ieee80211_vap_get_wmestate(struct ieee80211vap *vap)
+{
+	if (vap->iv_opmode != IEEE80211_M_STA) {
+		return &vap->iv_wme;
+	} else {
+		return &vap->iv_ic->ic_wme;
+	}
+}
+EXPORT_SYMBOL(ieee80211_vap_get_wmestate);
+
+void ieee80211_vap_sync_chan_wmestate(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211vap *vap1;
+
+	if (vap->iv_opmode == IEEE80211_M_STA)
+		return;
+
+	/* sync to global */
+	memcpy(&ic->ic_wme.wme_wmeChanParams, &vap->iv_wme.wme_wmeChanParams,
+			sizeof(ic->ic_wme.wme_wmeChanParams));
+	memcpy(&ic->ic_wme.wme_chanParams, &vap->iv_wme.wme_chanParams,
+			sizeof(ic->ic_wme.wme_chanParams));
+
+	/*
+	 * Sync wme chan params across all SSID since we don't support per SSID wme chan params.
+	 * We only support per SSID wme bss params.
+	 */
+	TAILQ_FOREACH(vap1, &ic->ic_vaps, iv_next) {
+		if (vap1 == vap)
+			continue;
+		memcpy(&vap1->iv_wme.wme_wmeChanParams, &vap->iv_wme.wme_wmeChanParams,
+				sizeof(vap1->iv_wme.wme_wmeChanParams));
+		memcpy(&vap1->iv_wme.wme_chanParams, &vap->iv_wme.wme_chanParams,
+				sizeof(vap1->iv_wme.wme_chanParams));
+	}
+}
+EXPORT_SYMBOL(ieee80211_vap_sync_chan_wmestate);
+
+/*
+ * Automatically change the wmm params based on how many SSID priorities are used:
+ *  1. If all SSID has same priority, then apply default wmm params for all SSID.
+ *  2. Otherwise, automatically apply different wmm param set:
+ *     SSID priority    wmm bss params
+ *     3                all same as original AC_VO
+ *     2                all same as original AC_VI
+ *     1                all same as original AC_BE
+ *     0                all same as original AC_BK
+ *  Please note ic lock shall be held when calling this function.
+ */
+void ieee80211_adjust_wme_by_vappri(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+	uint8_t vap_pri;
+	uint8_t use_default = 1;
+	struct ieee80211_wme_state *wme_dft = &ic->ic_wme;
+	struct ieee80211_wme_state *wme;
+	uint8_t ac;
+	static uint8_t vappri_to_ac[QTN_VAP_PRIORITY_NUM] = {WMM_AC_BK, WMM_AC_BE, WMM_AC_VI, WMM_AC_VO};
+	uint8_t mapped_ac;
+
+	vap = TAILQ_FIRST(&ic->ic_vaps);
+	if (unlikely(vap == NULL))
+		return;
+
+	if (vap->iv_opmode == IEEE80211_M_STA)
+		return;
+
+	if (ic->ic_vap_pri_wme) {
+		vap_pri = vap->iv_pri;
+		TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+			if (vap->iv_pri != vap_pri)
+				use_default = 0;
+		}
+	}
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		wme = &vap->iv_wme;
+		if (use_default) {
+			/* apply default param set: different for different AC */
+			memcpy(&wme->wme_wmeBssChanParams.cap_wmeParams,
+				&wme_dft->wme_wmeBssChanParams.cap_wmeParams,
+				sizeof(wme->wme_wmeBssChanParams.cap_wmeParams));
+		} else {
+			/* determine wmm params based on vap priority */
+			mapped_ac = vappri_to_ac[vap->iv_pri];
+			for (ac = 0; ac < WMM_AC_NUM; ac++) {
+				memcpy(&wme->wme_wmeBssChanParams.cap_wmeParams[ac],
+					&wme_dft->wme_wmeBssChanParams.cap_wmeParams[mapped_ac],
+					sizeof(wme->wme_wmeBssChanParams.cap_wmeParams[0]));
+			}
+		}
+		wme->wme_wmeBssChanParams.cap_info_count++;
+		ieee80211_wme_updateparams(vap, 0);
+	}
+}
+
+void
+ieee80211_wme_initparams(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	IEEE80211_LOCK(ic);
+	ieee80211_wme_initparams_locked(vap);
+	ieee80211_adjust_wme_by_vappri(ic);
+	IEEE80211_UNLOCK(ic);
+}
+
+void
+ieee80211_wme_initparams_locked(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_wme_state *wme = ieee80211_vap_get_wmestate(vap);
+	typedef struct phyParamType {
+		u_int8_t aifsn;
+		u_int8_t logcwmin;
+		u_int8_t logcwmax;
+		u_int16_t txopLimit;
+		u_int8_t acm;
+	} paramType;
+	enum ieee80211_phymode mode;
+
+	paramType *pPhyParam, *pBssPhyParam;
+
+	static struct phyParamType phyParamForAC_BE[IEEE80211_MODE_MAX] = {
+	/* IEEE80211_MODE_AUTO  */ { 3, 4,  6,   0, 0 },
+	/* IEEE80211_MODE_11A   */ { 3, 4,  6,   0, 0 },
+	/* IEEE80211_MODE_11B   */ { 3, 5,  7,   0, 0 },
+	/* IEEE80211_MODE_11G   */ { 3, 4,  6,   0, 0 },
+	/* IEEE80211_MODE_FH    */ { 3, 5,  7,   0, 0 },
+	/* IEEE80211_MODE_TURBO */ { 2, 3,  5,   0, 0 },
+	/* IEEE80211_MODE_TURBO */ { 2, 3,  5,   0, 0 }};
+	static struct phyParamType phyParamForAC_BK[IEEE80211_MODE_MAX] = {
+	/* IEEE80211_MODE_AUTO  */ { 7, 4, 10,   0, 0 },
+	/* IEEE80211_MODE_11A   */ { 7, 4, 10,   0, 0 },
+	/* IEEE80211_MODE_11B   */ { 7, 5, 10,   0, 0 },
+	/* IEEE80211_MODE_11G   */ { 7, 4, 10,   0, 0 },
+	/* IEEE80211_MODE_FH    */ { 7, 5, 10,   0, 0 },
+	/* IEEE80211_MODE_TURBO */ { 7, 3, 10,   0, 0 },
+	/* IEEE80211_MODE_TURBO */ { 7, 3, 10,   0, 0 }};
+	static struct phyParamType phyParamForAC_VI[IEEE80211_MODE_MAX] = {
+	/* IEEE80211_MODE_AUTO  */ { 1, 3,  4, 188, 0 },
+	/* IEEE80211_MODE_11A   */ { 1, 3,  4,  94, 0 },
+	/* IEEE80211_MODE_11B   */ { 1, 4,  5, 188, 0 },
+	/* IEEE80211_MODE_11G   */ { 1, 3,  4,  94, 0 },
+	/* IEEE80211_MODE_FH    */ { 1, 4,  5, 188, 0 },
+	/* IEEE80211_MODE_TURBO */ { 1, 2,  3,  94, 0 },
+	/* IEEE80211_MODE_TURBO */ { 1, 2,  3,  94, 0 }};
+	static struct phyParamType phyParamForAC_VO[IEEE80211_MODE_MAX] = {
+	/* IEEE80211_MODE_AUTO  */ { 1, 2,  3, 188, 0 },
+	/* IEEE80211_MODE_11A   */ { 1, 2,  3,  47, 0 },
+	/* IEEE80211_MODE_11B   */ { 1, 3,  4, 102, 0 },
+	/* IEEE80211_MODE_11G   */ { 1, 2,  3,  47, 0 },
+	/* IEEE80211_MODE_FH    */ { 1, 3,  4, 102, 0 },
+	/* IEEE80211_MODE_TURBO */ { 1, 2,  2,  47, 0 },
+	/* IEEE80211_MODE_TURBO */ { 1, 2,  2,  47, 0 }};
+
+	static struct phyParamType bssPhyParamForAC_BE[IEEE80211_MODE_MAX] = {
+	/* IEEE80211_MODE_AUTO  */ { 3, 4, 10,   0, 0 },
+	/* IEEE80211_MODE_11A   */ { 3, 4, 10,   0, 0 },
+	/* IEEE80211_MODE_11B   */ { 3, 5, 10,   0, 0 },
+	/* IEEE80211_MODE_11G   */ { 3, 4, 10,   0, 0 },
+	/* IEEE80211_MODE_FH    */ { 3, 5, 10,   0, 0 },
+	/* IEEE80211_MODE_TURBO */ { 2, 3, 10,   0, 0 },
+	/* IEEE80211_MODE_TURBO */ { 2, 3, 10,   0, 0 }};
+	static struct phyParamType bssPhyParamForAC_VI[IEEE80211_MODE_MAX] = {
+	/* IEEE80211_MODE_AUTO  */ { 2, 3,  4,  94, 0 },
+	/* IEEE80211_MODE_11A   */ { 2, 3,  4,  94, 0 },
+	/* IEEE80211_MODE_11B   */ { 2, 4,  5, 188, 0 },
+	/* IEEE80211_MODE_11G   */ { 2, 3,  4,  94, 0 },
+	/* IEEE80211_MODE_FH    */ { 2, 4,  5, 188, 0 },
+	/* IEEE80211_MODE_TURBO */ { 2, 2,  3,  94, 0 },
+	/* IEEE80211_MODE_TURBO */ { 2, 2,  3,  94, 0 }};
+	static struct phyParamType bssPhyParamForAC_VO[IEEE80211_MODE_MAX] = {
+	/* IEEE80211_MODE_AUTO  */ { 2, 2,  3,  47, 0 },
+	/* IEEE80211_MODE_11A   */ { 2, 2,  3,  47, 0 },
+	/* IEEE80211_MODE_11B   */ { 2, 3,  4, 102, 0 },
+	/* IEEE80211_MODE_11G   */ { 2, 2,  3,  47, 0 },
+	/* IEEE80211_MODE_FH    */ { 2, 3,  4, 102, 0 },
+	/* IEEE80211_MODE_TURBO */ { 1, 2,  2,  47, 0 },
+	/* IEEE80211_MODE_TURBO */ { 1, 2,  2,  47, 0 }};
+
+	int i;
+
+	IEEE80211_LOCK_ASSERT(ic);
+
+	mode = IEEE80211_MODE_AUTO;
+	for (i = 0; i < WME_NUM_AC; i++) {
+		switch (i) {
+		case WME_AC_BK:
+			pPhyParam = &phyParamForAC_BK[mode];
+			pBssPhyParam = &phyParamForAC_BK[mode];
+			break;
+		case WME_AC_VI:
+			pPhyParam = &phyParamForAC_VI[mode];
+			pBssPhyParam = &bssPhyParamForAC_VI[mode];
+			break;
+		case WME_AC_VO:
+			pPhyParam = &phyParamForAC_VO[mode];
+			pBssPhyParam = &bssPhyParamForAC_VO[mode];
+			break;
+		case WME_AC_BE:
+		default:
+			pPhyParam = &phyParamForAC_BE[mode];
+			pBssPhyParam = &bssPhyParamForAC_BE[mode];
+			break;
+		}
+
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
+				vap->iv_opmode == IEEE80211_M_WDS) {
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_acm =
+				pPhyParam->acm;
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_aifsn =
+				pPhyParam->aifsn;
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_logcwmin =
+				pPhyParam->logcwmin;
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_logcwmax =
+				pPhyParam->logcwmax;
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_txopLimit =
+				pPhyParam->txopLimit;
+		} else {
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_acm =
+				pBssPhyParam->acm;
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_aifsn =
+				pBssPhyParam->aifsn;
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_logcwmin =
+				pBssPhyParam->logcwmin;
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_logcwmax =
+				pBssPhyParam->logcwmax;
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_txopLimit =
+				pBssPhyParam->txopLimit;
+		}
+		wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_acm =
+			pBssPhyParam->acm;
+		wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_aifsn =
+			pBssPhyParam->aifsn;
+		wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_logcwmin =
+			pBssPhyParam->logcwmin;
+		wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_logcwmax =
+			pBssPhyParam->logcwmax;
+		wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_txopLimit =
+			pBssPhyParam->txopLimit;
+	}
+
+	for (i = 0; i < WME_NUM_AC; i++) {
+		wme->wme_chanParams.cap_wmeParams[i].wmm_aifsn =
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_aifsn;
+		wme->wme_chanParams.cap_wmeParams[i].wmm_logcwmin =
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_logcwmin;
+		wme->wme_chanParams.cap_wmeParams[i].wmm_logcwmax =
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_logcwmax;
+		wme->wme_chanParams.cap_wmeParams[i].wmm_txopLimit =
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_txopLimit;
+		wme->wme_bssChanParams.cap_wmeParams[i].wmm_aifsn =
+			wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_aifsn;
+		wme->wme_bssChanParams.cap_wmeParams[i].wmm_logcwmin =
+			wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_logcwmin;
+		wme->wme_bssChanParams.cap_wmeParams[i].wmm_logcwmax =
+			wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_logcwmax;
+		wme->wme_bssChanParams.cap_wmeParams[i].wmm_txopLimit =
+			wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_txopLimit;
+	}
+	/* Set version to 1 so all STAs will pick up the AP parameters */
+	wme->wme_bssChanParams.cap_info_count = 1;
+	wme->wme_wmeBssChanParams.cap_info_count = 1;
+	wme->wme_chanParams.cap_info_count = 1;
+	wme->wme_wmeChanParams.cap_info_count = 1;
+
+	if (vap->iv_opmode != IEEE80211_M_STA)
+		memcpy(&ic->ic_wme, &vap->iv_wme, sizeof(ic->ic_wme));
+}
+
+/*
+ * Update WME parameters for ourself and the BSS.
+ */
+void
+ieee80211_wme_updateparams_locked(struct ieee80211vap *vap)
+{
+	struct ieee80211_wme_state *wme = ieee80211_vap_get_wmestate(vap);
+	int i;
+
+	/* set up the channel access parameters for the physical device */
+	for (i = 0; i < WME_NUM_AC; i++) {
+		wme->wme_chanParams.cap_wmeParams[i].wmm_acm =
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_acm;
+		wme->wme_chanParams.cap_wmeParams[i].wmm_aifsn =
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_aifsn;
+		wme->wme_chanParams.cap_wmeParams[i].wmm_logcwmin =
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_logcwmin;
+		wme->wme_chanParams.cap_wmeParams[i].wmm_logcwmax =
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_logcwmax;
+		wme->wme_chanParams.cap_wmeParams[i].wmm_txopLimit =
+			wme->wme_wmeChanParams.cap_wmeParams[i].wmm_txopLimit;
+		wme->wme_bssChanParams.cap_wmeParams[i].wmm_acm =
+			wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_acm;
+		wme->wme_bssChanParams.cap_wmeParams[i].wmm_aifsn =
+			wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_aifsn;
+		wme->wme_bssChanParams.cap_wmeParams[i].wmm_logcwmin =
+			wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_logcwmin;
+		wme->wme_bssChanParams.cap_wmeParams[i].wmm_logcwmax =
+			wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_logcwmax;
+		wme->wme_bssChanParams.cap_wmeParams[i].wmm_txopLimit =
+			wme->wme_wmeBssChanParams.cap_wmeParams[i].wmm_txopLimit;
+	}
+	wme->wme_bssChanParams.cap_info_count = wme->wme_wmeBssChanParams.cap_info_count;
+	wme->wme_chanParams.cap_info_count = wme->wme_wmeChanParams.cap_info_count;
+
+	/* For AP, wrap cap info count - new parameters may be broadcast */
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+#define IEEE80211_MAX_CAP_INFO_COUNT 0xF
+		if (wme->wme_chanParams.cap_info_count > IEEE80211_MAX_CAP_INFO_COUNT) {
+			wme->wme_wmeChanParams.cap_info_count = 0;
+			wme->wme_chanParams.cap_info_count = 0;
+		}
+		if (wme->wme_bssChanParams.cap_info_count > IEEE80211_MAX_CAP_INFO_COUNT) {
+			wme->wme_wmeBssChanParams.cap_info_count = 0;
+			wme->wme_bssChanParams.cap_info_count = 0;
+		}
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
+		"%s: WME params updated, cap_info 0x%x\n", __func__,
+		vap->iv_opmode == IEEE80211_M_STA ?
+			wme->wme_wmeChanParams.cap_info_count :
+			wme->wme_bssChanParams.cap_info_count);
+}
+
+void
+ieee80211_wme_updateparams(struct ieee80211vap *vap, int sync_chan_wme)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	IEEE80211_LOCK(ic);
+	ieee80211_wme_updateparams_locked(vap);
+	if (sync_chan_wme)
+		ieee80211_vap_sync_chan_wmestate(vap);
+	IEEE80211_UNLOCK(ic);
+
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP) &&
+		(vap->iv_state == IEEE80211_S_RUN)) {
+
+		ic->ic_beacon_update(vap);
+	} else if ((vap->iv_opmode == IEEE80211_M_STA) &&
+		(vap->iv_state == IEEE80211_S_RUN)) {
+
+		ic->ic_wmm_params_update(vap);
+	}
+}
+EXPORT_SYMBOL(ieee80211_wme_updateparams);
+
+/*
+ * Update WME parameters with deltas, for ourself and the BSS.
+ */
+void
+ieee80211_wme_updateparams_delta_locked(struct ieee80211vap *vap)
+{
+	struct ieee80211_wme_state *wme = ieee80211_vap_get_wmestate(vap);
+	static const uint8_t local_aifsn_min[WME_NUM_AC] = IEEE80211_DYN_WMM_LOCAL_AIFS_MIN;
+	static const uint8_t local_cwmin_min[WME_NUM_AC] = IEEE80211_DYN_WMM_LOCAL_CWMIN_MIN;
+	static const uint8_t local_cwmax_min[WME_NUM_AC] = IEEE80211_DYN_WMM_LOCAL_CWMAX_MIN;
+	static const uint8_t bss_aifsn_max[WME_NUM_AC] = IEEE80211_DYN_WMM_BSS_AIFS_MAX;
+	static const uint8_t bss_cwmin_max[WME_NUM_AC] = IEEE80211_DYN_WMM_BSS_CWMIN_MAX;
+	static const uint8_t bss_cwmax_max[WME_NUM_AC] = IEEE80211_DYN_WMM_BSS_CWMAX_MAX;
+	int i;
+
+	/* set up the channel access parameters for the physical device */
+	for (i = 0; i < WME_NUM_AC; i++) {
+		wme->wme_chanParams.cap_wmeParams[i].wmm_aifsn =
+			MAX(wme->wme_chanParams.cap_wmeParams[i].wmm_aifsn +
+				IEEE80211_DYN_WMM_LOCAL_AIFS_DELTA,
+				local_aifsn_min[i]);
+		wme->wme_chanParams.cap_wmeParams[i].wmm_logcwmin =
+			MAX(wme->wme_chanParams.cap_wmeParams[i].wmm_logcwmin +
+				IEEE80211_DYN_WMM_LOCAL_CWMIN_DELTA,
+				local_cwmin_min[i]);
+		wme->wme_chanParams.cap_wmeParams[i].wmm_logcwmax =
+			MAX(wme->wme_chanParams.cap_wmeParams[i].wmm_logcwmax +
+				IEEE80211_DYN_WMM_LOCAL_CWMAX_DELTA,
+				local_cwmax_min[i]);
+		wme->wme_bssChanParams.cap_wmeParams[i].wmm_aifsn =
+			MIN(wme->wme_bssChanParams.cap_wmeParams[i].wmm_aifsn +
+				IEEE80211_DYN_WMM_BSS_AIFS_DELTA,
+				bss_aifsn_max[i]);
+		wme->wme_bssChanParams.cap_wmeParams[i].wmm_logcwmin =
+			MIN(wme->wme_bssChanParams.cap_wmeParams[i].wmm_logcwmin +
+				IEEE80211_DYN_WMM_BSS_CWMIN_DELTA,
+				bss_cwmin_max[i]);
+		wme->wme_bssChanParams.cap_wmeParams[i].wmm_logcwmax =
+			MIN(wme->wme_bssChanParams.cap_wmeParams[i].wmm_logcwmax +
+				IEEE80211_DYN_WMM_BSS_CWMAX_DELTA,
+				bss_cwmax_max[i]);
+	}
+	wme->wme_bssChanParams.cap_info_count = wme->wme_wmeBssChanParams.cap_info_count;
+	wme->wme_chanParams.cap_info_count = wme->wme_wmeChanParams.cap_info_count;
+
+	/* For AP, wrap cap info count - new parameters may be broadcast */
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+#define IEEE80211_MAX_CAP_INFO_COUNT 0xF
+		if (wme->wme_chanParams.cap_info_count > IEEE80211_MAX_CAP_INFO_COUNT) {
+			wme->wme_wmeChanParams.cap_info_count = 0;
+			wme->wme_chanParams.cap_info_count = 0;
+		}
+		if (wme->wme_bssChanParams.cap_info_count > IEEE80211_MAX_CAP_INFO_COUNT) {
+			wme->wme_wmeBssChanParams.cap_info_count = 0;
+			wme->wme_bssChanParams.cap_info_count = 0;
+		}
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
+		"%s: WME params updated, cap_info 0x%x\n", __func__,
+		vap->iv_opmode == IEEE80211_M_STA ?
+			wme->wme_wmeChanParams.cap_info_count :
+			wme->wme_bssChanParams.cap_info_count);
+}
+
+void
+ieee80211_wme_updateparams_delta(struct ieee80211vap *vap, uint8_t apply_delta)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_wme_state *wme = ieee80211_vap_get_wmestate(vap);
+
+	IEEE80211_LOCK(ic);
+	wme->wme_wmeBssChanParams.cap_info_count++;
+	wme->wme_wmeChanParams.cap_info_count++;
+	if (apply_delta) {
+		ieee80211_wme_updateparams_delta_locked(vap);
+	} else {
+		ieee80211_wme_updateparams_locked(vap);
+	}
+	/* sync to global */
+	memcpy(&ic->ic_wme.wme_chanParams, &vap->iv_wme.wme_chanParams,
+			sizeof(ic->ic_wme.wme_chanParams));
+	IEEE80211_UNLOCK(ic);
+
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP) &&
+		(vap->iv_state == IEEE80211_S_RUN)) {
+
+		ic->ic_beacon_update(vap);
+	} else if ((vap->iv_opmode == IEEE80211_M_STA) &&
+		(vap->iv_state == IEEE80211_S_RUN)) {
+
+		ic->ic_wmm_params_update(vap);
+	}
+}
+EXPORT_SYMBOL(ieee80211_wme_updateparams_delta);
+
+
+/*
+ * Start a vap.  If this is the first vap running on the
+ * underlying device then we first bring it up.
+ */
+int
+ieee80211_init(struct net_device *dev, int forcescan)
+{
+#define	IS_RUNNING(_dev) \
+	((_dev->flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP))
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+
+	IEEE80211_DPRINTF(vap,
+		IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
+		"start running (state=%d)\n", vap->iv_state);
+
+	if (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] >= BOARD_PM_LEVEL_DUTY) {
+		/* wake up the device once it is brought up */
+		pm_qos_update_requirement(PM_QOS_POWER_SAVE, BOARD_PM_GOVERNOR_WLAN, BOARD_PM_LEVEL_NO);
+                ic->ic_pm_reason = IEEE80211_PM_LEVEL_DEVICE_INIT;
+		ieee80211_pm_queue_work(ic);
+	}
+
+	if ((dev->flags & IFF_RUNNING) == 0) {
+		ic->ic_init(ic);
+
+		/*
+		 * Mark us running.  Note that we do this after
+		 * opening the parent device to avoid recursion.
+		 */
+		dev->flags |= IFF_RUNNING;		/* mark us running */
+	}
+
+	/*
+	 * If the parent is up and running, then kick the
+	 * 802.11 state machine as appropriate.
+	 * XXX parent should always be up+running
+	 */
+	if (vap->iv_opmode == IEEE80211_M_STA) {
+		/*
+		 * Try to be intelligent about clocking the state
+		 * machine.  If we're currently in RUN state then
+		 * we should be able to apply any new state/parameters
+		 * simply by re-associating.  Otherwise we need to
+		 * re-scan to select an appropriate ap.
+		 */
+		if ((ic->ic_roaming != IEEE80211_ROAMING_MANUAL) && ieee80211_should_scan(vap)) {
+			if (vap->iv_state != IEEE80211_S_RUN || forcescan)
+				ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+			else
+				ieee80211_new_state(vap, IEEE80211_S_ASSOC, 1);
+		}
+	} else {
+		/*
+		 * When the old state is running the vap must
+		 * be brought to init.
+		 */
+		if (vap->iv_state == IEEE80211_S_RUN)
+			ieee80211_new_state(vap, IEEE80211_S_INIT, -1);
+		/*
+		 * For monitor+wds modes there's nothing to do but
+		 * start running.  Otherwise, if this is the first
+		 * vap to be brought up, start a scan which may be
+		 * preempted if the station is locked to a particular
+		 * channel.
+		 */
+		if (vap->iv_opmode == IEEE80211_M_MONITOR ||
+		    vap->iv_opmode == IEEE80211_M_WDS) {
+			ieee80211_new_state(vap, IEEE80211_S_RUN, -1);
+		} else
+			ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+	}
+	return 0;
+#undef IS_RUNNING
+}
+
+int
+ieee80211_open(struct net_device *dev)
+{
+	return ieee80211_init(dev, 0);
+}
+EXPORT_SYMBOL(ieee80211_open);
+
+/*
+ * Start all runnable vap's on a device.
+ */
+void
+ieee80211_start_running(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+	struct net_device *dev;
+
+	/* XXX locking */
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		dev = vap->iv_dev;
+		if ((dev->flags & IFF_UP) && !(dev->flags & IFF_RUNNING))	/* NB: avoid recursion */
+		{
+			ieee80211_open(dev);
+		}
+	}
+}
+EXPORT_SYMBOL(ieee80211_start_running);
+
+/*
+ * Stop a vap.  We force it down using the state machine
+ * then mark it's device not running.  If this is the last
+ * vap running on the underlying device then we close it
+ * too to ensure it will be properly initialized when the
+ * next vap is brought up.
+ */
+int
+ieee80211_stop(struct net_device *dev)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	IEEE80211_DPRINTF(vap,
+		IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
+		"%s, caller: %p\n", "stop running", __builtin_return_address(0));
+
+	ieee80211_new_state(vap, IEEE80211_S_INIT, -1);
+	if (dev->flags & IFF_RUNNING) {
+		dev->flags &= ~IFF_RUNNING;		/* mark us stopped */
+		vap->iv_mgmt_retry_ni = NULL;
+		vap->iv_mgmt_retry_cnt = 0;
+		del_timer(&vap->iv_swbmiss);
+		del_timer(&vap->iv_swberp);
+		del_timer(&vap->iv_mgtsend);
+		del_timer(&vap->iv_test_traffic);
+		ieee80211_ppqueue_deinit(vap);
+		del_timer(&vap->iv_sta_fast_rejoin);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_stop);
+
+/*
+ * Stop all vap's running on a device.
+ */
+void
+ieee80211_stop_running(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+	struct net_device *dev;
+
+	/* XXX locking */
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		dev = vap->iv_dev;
+		if (dev->flags & IFF_RUNNING)	/* NB: avoid recursion */
+			ieee80211_stop(dev);
+	}
+}
+EXPORT_SYMBOL(ieee80211_stop_running);
+
+
+void
+ieee80211_beacon_miss(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+
+	if (ic->ic_flags & IEEE80211_F_SCAN) {
+		/* XXX check ic_curchan != ic_bsschan? */
+		return;
+	}
+	/* XXX locking */
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		IEEE80211_DPRINTF(vap,
+			IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
+			"%s\n", "beacon miss");
+
+		/*
+		 * Our handling is only meaningful for stations that are
+		 * associated; any other conditions else will be handled
+		 * through different means (e.g. the tx timeout on mgt frames).
+		 */
+		if (vap->iv_opmode != IEEE80211_M_STA ||
+		    vap->iv_state != IEEE80211_S_RUN)
+			continue;
+		if (ic->ic_roaming == IEEE80211_ROAMING_AUTO) {
+			/*
+			 * Try to reassociate before scanning for a new ap.
+			 */
+			ieee80211_new_state(vap, IEEE80211_S_ASSOC, 1);
+		} else if (vap->iv_bss->ni_inact == 0) {
+			/* Send log message out */
+			ieee80211_dot11_msg_send(vap,
+					(char *)vap->iv_bss->ni_macaddr,
+					d11_m[IEEE80211_DOT11_MSG_AP_DISCONNECTED],
+					d11_c[IEEE80211_DOT11_MSG_REASON_BEACON_LOSS],
+					-1,
+					NULL,
+					NULL,
+					NULL);
+			/*
+			 * Somebody else is controlling state changes (e.g.
+			 * a user-mode app) don't do anything that would
+			 * confuse them; just drop into scan mode so they'll
+			 * notified of the state change and given control.
+			 */
+			ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+		} else {
+			mod_timer(&vap->iv_swbmiss, jiffies + vap->iv_swbmiss_period);
+		}
+	}
+}
+EXPORT_SYMBOL(ieee80211_beacon_miss);
+
+/*
+ * Software OBSS erp timer
+ * This timeout function is called when the last non-ERP BSS
+ * disappears from the range
+ */
+void
+ieee80211_swberp(unsigned long arg)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *) arg;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (IEEE80211_BG_PROTECT_ENABLED(ic) && vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		if (!ic->ic_nonerpsta && (ic->ic_flags & IEEE80211_F_USEPROT)) {
+			ic->ic_flags &= ~IEEE80211_F_USEPROT;
+			ic->ic_flags_ext |= IEEE80211_FEXT_ERPUPDATE;
+
+			/* tell Muc to turn off ERP now */
+			ic->ic_set_11g_erp(vap, 0);
+			ic->ic_beacon_update(vap);
+		}
+	}
+}
+
+/*
+ * Software beacon timer callback. In STA mode this timer is triggered when we
+ * have a series of beacon misses, up to IEEE80211_SWBMISS_WARNINGS times
+ * before finally triggering the beacon missed processing. See
+ * ieee80211_recv_mgmt which updates the timer when beacons are properly
+ * received.
+ *
+ * In HOSTAP mode, this function is used to update the beacons when there are
+ * no non-HT BSSes on the channel.
+ */
+void
+ieee80211_swbmiss(unsigned long arg)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *) arg;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		if (ic->ic_non_ht_non_member) {
+			ic->ic_non_ht_non_member = 0;
+			vap->iv_ht_flags |= IEEE80211_HTF_HTINFOUPDATE;
+			ic->ic_beacon_update(vap);
+		}
+	} else {
+		if (vap->iv_swbmiss_warnings) {
+			char buf[64];
+
+			ic->ic_iwstats.miss.beacon += (vap->iv_bcn_miss_thr / IEEE80211_SWBMISS_WARNINGS);
+			snprintf(buf, sizeof(buf),
+					"Missed at least %d consecutive beacons",
+					(IEEE80211_SWBMISS_WARNINGS - vap->iv_swbmiss_warnings + 1) *
+					(vap->iv_bcn_miss_thr / IEEE80211_SWBMISS_WARNINGS));
+			ieee80211_dot11_msg_send(vap,
+					(char *)vap->iv_bss->ni_macaddr,
+					buf,
+					d11_c[IEEE80211_DOT11_MSG_REASON_BEACON_LOSS],
+					-1,
+					NULL,
+					NULL,
+					NULL);
+			mod_timer(&vap->iv_swbmiss, jiffies + vap->iv_swbmiss_period);
+
+#if defined(QBMPS_ENABLE)
+			if ((vap->iv_swbmiss_warnings + 2) <= IEEE80211_SWBMISS_WARNINGS) {
+				/* 3 swbmiss warnings received */
+				/* exit power-saving mode to help recover from beacon missing */
+				vap->iv_swbmiss_bmps_warning = 1;
+				if (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] >= BOARD_PM_LEVEL_DUTY) {
+					/* wake up the device once it is brought up */
+			                ic->ic_pm_reason = IEEE80211_PM_LEVEL_SWBCN_MISS_2;
+					ieee80211_pm_queue_work(ic);
+				}
+			}
+#endif
+			/* if we've not hit the limit yet, do nothing */
+			if (--vap->iv_swbmiss_warnings)
+				return;
+		}
+
+		if (vap->iv_link_loss_enabled)
+			ieee80211_beacon_miss(vap->iv_ic);
+	}
+}
+
+/*
+ * Per-ieee80211vap watchdog timer callback.  This
+ * is used only to timeout the xmit of management frames.
+ */
+static void
+ieee80211_tx_timeout(unsigned long arg)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *) arg;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+		"%s: state %s%s\n", __func__,
+		ieee80211_state_name[vap->iv_state],
+		vap->iv_ic->ic_flags & IEEE80211_F_SCAN ? ", scan active" : "");
+
+	if (vap->iv_mgmt_retry_ni && vap->iv_mgmt_retry_cnt++ < IEEE80211_MAX_MGMT_RETRY) {
+		ieee80211_send_mgmt(vap->iv_mgmt_retry_ni, vap->iv_mgmt_retry_type, vap->iv_mgmt_retry_arg);
+	} else {
+		vap->iv_mgmt_retry_ni = NULL;
+		vap->iv_mgmt_retry_cnt = 0;
+
+		if (vap->iv_state != IEEE80211_S_INIT &&
+			(vap->iv_ic->ic_flags & IEEE80211_F_SCAN) == 0) {
+			/*
+			 * NB: it's safe to specify a timeout as the reason here;
+			 *     it'll only be used in the right state.
+			 */
+			ieee80211_new_state(vap, IEEE80211_S_SCAN,
+				IEEE80211_SCAN_FAIL_TIMEOUT);
+		}
+	}
+}
+
+static void
+ieee80211_proto_sta_null_pkts(void *arg, struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = arg;
+
+	if (ni->ni_vap == vap && ni->ni_associd != 0) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
+			"%s: Sending null pkt to %p<%s>, refcnt %d\n", __func__, ni,
+			ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni));
+
+		ieee80211_ref_node(ni);
+		if (ni->ni_flags & IEEE80211_NODE_QOS) {
+			ieee80211_send_qosnulldata(ni, WME_AC_BK);
+		} else {
+			ieee80211_send_nulldata(ni);
+		}
+	}
+}
+
+/*
+ * Per-ieee80211vap test traffic timer callback.  This
+ * is used only to periodically sending null packets to
+ * all associated STAs.
+ */
+static void
+ieee80211_test_traffic_timeout(unsigned long arg)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *) arg;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
+		"%s: test traffic timeout on %s, period %d ms.\n", __func__,
+		vap->iv_dev->name, jiffies_to_msecs(vap->iv_test_traffic_period));
+
+	ieee80211_iterate_nodes(&vap->iv_ic->ic_sta, ieee80211_proto_sta_null_pkts, vap, 1);
+
+	if (vap->iv_test_traffic_period)
+		mod_timer(&vap->iv_test_traffic, jiffies + vap->iv_test_traffic_period);
+}
+
+static void
+sta_disassoc(void *arg, struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = arg;
+
+	if (ni->ni_vap == vap && ni->ni_associd != 0) {
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC,
+			IEEE80211_REASON_ASSOC_LEAVE);
+		ieee80211_node_leave(ni);
+	}
+}
+
+void
+ieee80211_disconnect_node(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+	if (ni->ni_vap == vap && ni->ni_associd != 0) {
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC,
+			IEEE80211_REASON_ASSOC_TOOMANY);
+		ieee80211_node_leave(ni);
+	}
+}
+EXPORT_SYMBOL(ieee80211_disconnect_node);
+
+static void
+sta_deauth(void *arg, struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = arg;
+
+	if (ni->ni_vap == vap)
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
+			IEEE80211_REASON_ASSOC_LEAVE);
+}
+
+/*
+ * Context: softIRQ (tasklet) and process
+ */
+int
+ieee80211_new_state(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	int rc;
+
+	/* grab the lock so that only one vap can go through transition at any time */
+	IEEE80211_VAPS_LOCK_BH(ic);
+	rc = vap->iv_newstate(vap, nstate, arg);
+	IEEE80211_VAPS_UNLOCK_BH(ic);
+	return rc;
+}
+EXPORT_SYMBOL(ieee80211_new_state);
+
+static void
+ieee80211_create_wds_node(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = TAILQ_FIRST(&ic->ic_vaps)->iv_bss;
+	struct ieee80211_node *wds_ni;
+
+	if (IEEE80211_ADDR_NULL(vap->wds_mac))
+		return;
+
+	wds_ni = ieee80211_alloc_node(&ic->ic_sta, vap, vap->wds_mac, "wds peer");
+	if (wds_ni == NULL) {
+		printk(KERN_WARNING "%s: couldn't create WDS node for %s\n",
+				vap->iv_dev->name, ether_sprintf(vap->wds_mac));
+		return;
+	}
+
+	if (ieee80211_aid_acquire(ic, wds_ni)) {
+		ieee80211_free_node(wds_ni);
+		return;
+	}
+
+	if (ieee80211_add_wds_addr(&ic->ic_sta, wds_ni, vap->wds_mac, 1) == 0) {
+		ieee80211_node_authorize(wds_ni);
+		ieee80211_node_set_chan(ic, wds_ni);
+		wds_ni->ni_capinfo = ni->ni_capinfo;
+		wds_ni->ni_txpower = ni->ni_txpower;
+		wds_ni->ni_ath_flags = vap->iv_ath_cap;
+		wds_ni->ni_flags |= IEEE80211_NODE_QOS;
+		wds_ni->ni_flags |= IEEE80211_NODE_HT;
+		wds_ni->ni_flags &= ~IEEE80211_NODE_VHT;
+		wds_ni->ni_flags |= IEEE80211_NODE_WDS_PEER;
+		wds_ni->ni_vendor = PEER_VENDOR_QTN;
+		wds_ni->ni_node_type = IEEE80211_NODE_TYPE_WDS;
+		wds_ni->ni_start_time_assoc = get_jiffies_64();
+		if (ic->ic_newassoc != NULL)
+			ic->ic_newassoc(wds_ni, 1);
+		if (vap->iv_wds_peer_key.wk_keylen != 0) {
+			memcpy(&wds_ni->ni_ucastkey, &vap->iv_wds_peer_key, sizeof(vap->iv_wds_peer_key));
+			ieee80211_key_update_begin(vap);
+			vap->iv_key_set(vap, &vap->iv_wds_peer_key, vap->wds_mac);
+			ieee80211_key_update_end(vap);
+		}
+		/*
+		 * WDS node is different from other associated nodes,
+		 * no association procedure.
+		 * Update these counters when wds node created.
+		 */
+		IEEE80211_LOCK_IRQ(ic);
+		ieee80211_sta_assocs_inc(vap, __func__);
+		ic->ic_wds_links++;
+		/*
+		 * Don't call ieee80211_pm_queue_work here.
+		 * We will handle PS in WDS inactivity and BA handling
+		 */
+		IEEE80211_UNLOCK_IRQ(ic);
+
+		if ((ic->ic_peer_rts_mode == IEEE80211_PEER_RTS_PMP) &&
+			((ic->ic_sta_assoc - ic->ic_nonqtn_sta) >= IEEE80211_MAX_STA_CCA_ENABLED)) {
+
+			ic->ic_peer_rts = 1;
+			ieee80211_beacon_update_all(ic);
+		}
+
+	}
+	ieee80211_free_node(wds_ni);
+}
+
+static void ieee80211_icac_select(struct ieee80211com *ic, uint32_t *scan_flags)
+{
+	if (ic->ic_get_init_cac_duration(ic) == 0) {
+		if (ic->ic_des_chan == IEEE80211_CHAN_ANYC) {
+			/* Select only non-DFS channel at the end; when max_boot_cac is zero */
+			*scan_flags |= IEEE80211_SCAN_NO_DFS;
+		} else {
+			ic->ic_des_chan_after_init_scan = ic->ic_des_chan->ic_ieee;
+			ic->ic_des_chan_after_init_cac = 0;
+		}
+	} else if (ic->ic_get_init_cac_duration(ic) > 0) {
+		/* Save ic->ic_des_chan into ic->ic_des_chan_after_init_cac */
+		ic->ic_des_chan_after_init_scan = 0;
+		ic->ic_des_chan_after_init_cac = (ic->ic_des_chan == IEEE80211_CHAN_ANYC) ?
+						0 : ic->ic_des_chan->ic_ieee;
+	}
+}
+
+static void
+ieee80211_sta_leave_run_state(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	ieee80211_sta_assocs_dec(vap, __func__);
+	ieee80211_nonqtn_sta_leave(vap, ni, __func__);
+
+	ieee80211_sta_leave(ni);
+	ieee80211_tdls_free_all_peers(vap);
+	ieee80211_restore_bw(vap, ic);
+}
+
+static int
+__ieee80211_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = vap->iv_bss;
+	enum ieee80211_state ostate;
+	uint32_t scan_flags = 0;
+
+	ostate = vap->iv_state;
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s\n", __func__,
+		ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
+	vap->iv_state = nstate;			/* state transition */
+
+	/* Legitimate state transition when shutting down; no BSS will be present */
+	if (IEEE80211_S_INIT == nstate && IEEE80211_S_INIT == ostate) {
+		return 0;
+	}
+
+	if (vap->iv_opmode == IEEE80211_M_WDS && ni == NULL) {
+		ni = TAILQ_FIRST(&ic->ic_vaps)->iv_bss;
+	}
+
+	KASSERT(ni, ("no bss node"));
+	ieee80211_ref_node(ni);
+
+	vap->iv_mgmt_retry_ni = NULL;
+	vap->iv_mgmt_retry_cnt = 0;
+	del_timer(&vap->iv_mgtsend);
+	if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
+			vap->iv_opmode != IEEE80211_M_WDS &&
+			ostate != IEEE80211_S_SCAN) {
+		ieee80211_cancel_scan(vap);	/* background scan */
+	}
+
+	switch (nstate) {
+	case IEEE80211_S_INIT:
+		switch (ostate) {
+		case IEEE80211_S_INIT:
+			break;
+		case IEEE80211_S_RUN:
+			if (vap->iv_opmode == IEEE80211_M_STA) {
+				printk(KERN_WARNING "%s: disassociated from AP %s\n",
+					vap->iv_dev->name, ether_sprintf(ni->ni_macaddr));
+
+				vap->iv_flags_ext &= ~IEEE80211_FEXT_AP_TDLS_PROHIB;
+				vap->iv_flags_ext &= ~IEEE80211_FEXT_TDLS_CS_PROHIB;
+				ieee80211_scan_remove(vap);
+				if (IEEE80211_REASON_DISASSOC_BAD_SUPP_CHAN == arg) {
+					IEEE80211_SEND_MGMT(ni,
+						IEEE80211_FC0_SUBTYPE_DISASSOC,
+						arg);
+				} else {
+					IEEE80211_SEND_MGMT(ni,
+						IEEE80211_FC0_SUBTYPE_DISASSOC,
+						IEEE80211_REASON_ASSOC_LEAVE);
+				}
+				/*
+				 * FIXME: This is nasty, but the simplest method to ensure the disassoc is sent.
+				 * FIXME: Revisit this when we have designed a more robust host->MuC synchronisation
+				 * mechanism.
+				 */
+				ieee80211_safe_wait_ms(50, !in_interrupt());
+				ieee80211_sta_leave_run_state(vap, ni);
+			} else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
+					vap->iv_opmode == IEEE80211_M_WDS) {
+				ieee80211_iterate_nodes(&ic->ic_sta,
+					sta_disassoc, vap, 1);
+			}
+			ieee80211_reset_bss(vap);
+			break;
+		case IEEE80211_S_ASSOC:
+			if (vap->iv_opmode == IEEE80211_M_STA) {
+				IEEE80211_SEND_MGMT(ni,
+					IEEE80211_FC0_SUBTYPE_DEAUTH,
+					IEEE80211_REASON_AUTH_LEAVE);
+			} else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+				ieee80211_iterate_nodes(&ic->ic_sta,
+					sta_deauth, vap, 1);
+			}
+			ieee80211_reset_bss(vap);
+			break;
+		case IEEE80211_S_SCAN:
+			ieee80211_cancel_scan(vap);
+			ieee80211_reset_bss(vap);
+			break;
+		case IEEE80211_S_AUTH:
+			ieee80211_reset_bss(vap);
+			break;
+		}
+
+		if (vap->iv_auth->ia_detach != NULL)
+			vap->iv_auth->ia_detach(vap);
+		break;
+	case IEEE80211_S_SCAN:
+		switch (ostate) {
+		case IEEE80211_S_INIT:
+		createibss:
+			scan_flags |= IEEE80211_SCAN_FLUSH | IEEE80211_SCAN_ACTIVE |
+				IEEE80211_SCAN_PICK1ST | (IEEE80211_USE_QTN_BGSCAN(vap) ?
+				IEEE80211_SCAN_QTN_BGSCAN : 0);
+
+			if ((vap->iv_opmode == IEEE80211_M_IBSS ||
+					vap->iv_opmode == IEEE80211_M_WDS ||
+					vap->iv_opmode == IEEE80211_M_AHDEMO) &&
+					(ic->ic_des_chan != IEEE80211_CHAN_ANYC)) {
+				ieee80211_create_bss(vap, ic->ic_des_chan);
+				break;
+			} else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+				if (ic->ic_get_init_cac_duration(ic) < 0) {
+					/* Always do a scan to get a sense of environment after creating BSS */
+					if (ic->ic_des_chan != IEEE80211_CHAN_ANYC)
+						ic->ic_des_chan_after_init_scan = ic->ic_des_chan->ic_ieee;
+				} else {
+					/*
+					 * Always do a scan, when max_boot_cac >= 0;
+					 * Initial CAC is done if max_boot_cac >= <1 CAC period>.
+					 */
+					ieee80211_icac_select(ic, &scan_flags);
+				}
+			}
+
+			if (ieee80211_chan_selection_allowed(ic))
+				ieee80211_start_chanset_selection(vap, scan_flags);
+			else
+				ieee80211_check_scan(vap, scan_flags, IEEE80211_SCAN_FOREVER,
+					vap->iv_des_nssid, vap->iv_des_ssid, NULL);
+			break;
+		case IEEE80211_S_SCAN:
+		case IEEE80211_S_AUTH:
+		case IEEE80211_S_ASSOC:
+			if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+				/*
+				 * Scan->Scan:
+				 * This can happen when channel scanning is on going and
+				 * channel is explicitly set by user at this moment.
+				 * If the desired channel is already set, startup immediately;
+				 * If not set, do channel re-scan.
+				 */
+				if ((ostate == IEEE80211_S_SCAN) &&
+						(ic->ic_des_chan != IEEE80211_CHAN_ANYC) &&
+						!(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER) &&
+						(!(ic->ic_des_chan->ic_flags & IEEE80211_CHAN_DFS) ||
+						(ic->ic_des_chan->ic_flags & IEEE80211_CHAN_DFS_CAC_DONE))) {
+					ieee80211_create_bss(vap, ic->ic_des_chan);
+				} else {
+					(void) ieee80211_check_scan(vap,
+						IEEE80211_SCAN_FLUSH | IEEE80211_SCAN_PICK1ST,
+						IEEE80211_SCAN_FOREVER,
+						vap->iv_des_nssid, vap->iv_des_ssid,
+						NULL);
+				}
+			} else {
+				/*
+				 * These can happen either because of a timeout
+				 * on an assoc/auth response or because of a
+				 * change in state that requires a reset.  For
+				 * the former we're called with a non-zero arg
+				 * that is the cause for the failure; pass this
+				 * to the scan code so it can update state.
+				 * Otherwise trigger a new scan unless we're in
+				 * manual roaming mode in which case an application
+				 * must issue an explicit scan request.
+				 */
+				if (arg != 0)
+					ieee80211_scan_assoc_fail(ic,
+						ni->ni_macaddr, arg);
+				if (ic->ic_roaming == IEEE80211_ROAMING_AUTO)
+				{
+					(void) ieee80211_check_scan(vap,
+						IEEE80211_SCAN_ACTIVE |
+						IEEE80211_SCAN_PICK1ST |
+						(IEEE80211_USE_QTN_BGSCAN(vap) ? IEEE80211_SCAN_QTN_BGSCAN : 0),
+						IEEE80211_SCAN_FOREVER,
+						vap->iv_des_nssid, vap->iv_des_ssid,
+						NULL);
+				}
+			}
+			break;
+		case IEEE80211_S_RUN:		/* beacon miss */
+			if (vap->iv_opmode == IEEE80211_M_STA) {
+				printk(KERN_WARNING "%s: disassociated from AP %s\n",
+					vap->iv_dev->name, ether_sprintf(ni->ni_macaddr));
+				ieee80211_scan_remove(vap);
+				IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC,
+					IEEE80211_REASON_ASSOC_LEAVE);
+				ieee80211_sta_leave_run_state(vap, ni);
+				vap->iv_flags &= ~IEEE80211_F_SIBSS;	/* XXX */
+				if (ic->ic_roaming == IEEE80211_ROAMING_AUTO)
+					(void) ieee80211_check_scan(vap,
+						IEEE80211_SCAN_ACTIVE |
+						IEEE80211_SCAN_PICK1ST |
+						(IEEE80211_USE_QTN_BGSCAN(vap) ? IEEE80211_SCAN_QTN_BGSCAN : 0),
+						IEEE80211_SCAN_FOREVER,
+						vap->iv_des_nssid,
+						vap->iv_des_ssid,
+						NULL);
+			} else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
+					vap->iv_opmode == IEEE80211_M_WDS) {
+				/* DFS channel switch by CSA, skip disassociation */
+				if (!(ic->ic_flags & IEEE80211_F_CHANSWITCH)) {
+					ieee80211_iterate_nodes(&ic->ic_sta,
+						sta_disassoc, vap, 1);
+				}
+				goto createibss;
+			}
+			break;
+		}
+		break;
+	case IEEE80211_S_AUTH:
+		/* auth frames are possible between IBSS nodes, see 802.11-1999, chapter 5.7.6 */
+		KASSERT(vap->iv_opmode == IEEE80211_M_STA || vap->iv_opmode == IEEE80211_M_IBSS,
+			("switch to %s state when operating in mode %u",
+			 ieee80211_state_name[nstate], vap->iv_opmode));
+		switch (ostate) {
+		case IEEE80211_S_INIT:
+		case IEEE80211_S_SCAN:
+			IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, 1);
+			break;
+		case IEEE80211_S_AUTH:
+		case IEEE80211_S_ASSOC:
+			switch (arg) {
+			case IEEE80211_FC0_SUBTYPE_AUTH:
+				/* ??? */
+				IEEE80211_SEND_MGMT(ni,
+					IEEE80211_FC0_SUBTYPE_AUTH, 2);
+				break;
+			case IEEE80211_FC0_SUBTYPE_DEAUTH:
+				IEEE80211_SEND_MGMT(ni,
+					IEEE80211_FC0_SUBTYPE_AUTH, 1);
+				break;
+			}
+			break;
+		case IEEE80211_S_RUN:
+			printk(KERN_WARNING "%s: deauthenticated from AP %s\n",
+				vap->iv_dev->name, ether_sprintf(ni->ni_macaddr));
+			switch (arg) {
+			case IEEE80211_FC0_SUBTYPE_AUTH:
+				IEEE80211_SEND_MGMT(ni,
+					IEEE80211_FC0_SUBTYPE_AUTH, 2);
+				vap->iv_state = ostate;	/* stay RUN */
+				break;
+			case IEEE80211_FC0_SUBTYPE_DEAUTH:
+				ieee80211_sta_leave_run_state(vap, ni);
+				if (ic->ic_roaming == IEEE80211_ROAMING_AUTO) {
+					/* try to reauth */
+					IEEE80211_SEND_MGMT(ni,
+						IEEE80211_FC0_SUBTYPE_AUTH, 1);
+				}
+				break;
+			}
+			break;
+		}
+		break;
+	case IEEE80211_S_ASSOC:
+		KASSERT(vap->iv_opmode == IEEE80211_M_STA,
+			("switch to %s state when operating in mode %u",
+			 ieee80211_state_name[nstate], vap->iv_opmode));
+		switch (ostate) {
+		case IEEE80211_S_INIT:
+		case IEEE80211_S_SCAN:
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY,
+				"%s: invalid transition\n", __func__);
+			break;
+		case IEEE80211_S_AUTH:
+		case IEEE80211_S_ASSOC:
+			IEEE80211_SEND_MGMT(ni,
+				IEEE80211_FC0_SUBTYPE_ASSOC_REQ, 0);
+			break;
+		case IEEE80211_S_RUN:
+			printk(KERN_WARNING "%s: disassociated from AP %s\n",
+				vap->iv_dev->name, ether_sprintf(ni->ni_macaddr));
+			ieee80211_sta_leave_run_state(vap, ni);
+			if (ic->ic_roaming == IEEE80211_ROAMING_AUTO) {
+				/* NB: caller specifies ASSOC/REASSOC by arg */
+				IEEE80211_SEND_MGMT(ni, arg ?
+					IEEE80211_FC0_SUBTYPE_REASSOC_REQ :
+					IEEE80211_FC0_SUBTYPE_ASSOC_REQ, 0);
+			}
+			break;
+		}
+		break;
+	case IEEE80211_S_RUN:
+		if (vap->iv_flags & IEEE80211_F_WPA) {
+			/* XXX validate prerequisites */
+		}
+
+		switch (ostate) {
+		case IEEE80211_S_INIT:
+			if (vap->iv_opmode == IEEE80211_M_MONITOR ||
+			    vap->iv_opmode == IEEE80211_M_WDS ||
+			    vap->iv_opmode == IEEE80211_M_HOSTAP) {
+				/*
+				 * Already have a channel; bypass the
+				 * scan and startup immediately.
+				 */
+				KASSERT(is_ieee80211_chan_valid(ic->ic_des_chan),
+						("Error: create BSS on an "
+						 "invalid desired channel"));
+				ieee80211_create_bss(vap, ic->ic_des_chan);
+
+				/*
+				 * In wds mode allocate and initialize peer node
+				 */
+				if (vap->iv_opmode == IEEE80211_M_WDS) {
+					ieee80211_create_wds_node(vap);
+					if (IEEE80211_COM_WDS_IS_RBS(ic))
+						ieee80211_beacon_update_all(ic);
+				}
+				break;
+			}
+			/* fall thru... */
+		case IEEE80211_S_AUTH:
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY,
+				"%s: invalid transition\n", __func__);
+			break;
+		case IEEE80211_S_RUN:
+			break;
+		case IEEE80211_S_SCAN:		/* adhoc/hostap mode */
+		case IEEE80211_S_ASSOC:		/* infra mode */
+			KASSERT(ni->ni_txrate < ni->ni_rates.rs_nrates,
+				("%s: bogus xmit rate %u setup\n", __func__,
+				ni->ni_txrate));
+#ifdef IEEE80211_DEBUG
+			if (ieee80211_msg_debug(vap)) {
+				ieee80211_note(vap, "%s with %s ssid ",
+					(vap->iv_opmode == IEEE80211_M_STA ?
+					"associated" : "synchronized "),
+					ether_sprintf(ni->ni_bssid));
+				ieee80211_print_essid(ni->ni_essid,
+					ni->ni_esslen);
+				printf(" channel %d start %uMb\n",
+					ieee80211_chan2ieee(ic, ic->ic_curchan),
+					IEEE80211_RATE2MBS(ni->ni_rates.rs_rates[ni->ni_txrate]));
+			}
+#endif
+			if (vap->iv_opmode == IEEE80211_M_STA) {
+				printk(KERN_WARNING "%s: associated with AP %s\n",
+					vap->iv_dev->name, ether_sprintf(ni->ni_macaddr));
+				ieee80211_scan_assoc_success(ic,
+					ni->ni_macaddr);
+				ieee80211_notify_node_join(ni,
+					(arg == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) | \
+					(arg == IEEE80211_FC0_SUBTYPE_REASSOC_RESP));
+				if ((vap->iv_qtn_flags & IEEE80211_QTN_BRIDGEMODE_DISABLED) &&
+						(vap->iv_qtn_ap_cap & IEEE80211_QTN_BRIDGEMODE)) {
+					printk(KERN_WARNING "%s: 4-address mode is supported "
+						"by the associated AP but is disabled\n",
+						vap->iv_dev->name);
+				}
+				if (ic->ic_pwr_adjust_scancnt > 0)
+					ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_PWR_ADJUST_AUTO, 1, NULL, 0);
+
+				/* check if need to activate tdls discovery timer */
+				if ((vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) == 0) {
+					if ((vap->tdls_discovery_interval > 0) &&
+						(!timer_pending(&vap->tdls_rate_detect_timer)))
+						ieee80211_tdls_trigger_rate_detection((unsigned long)vap);
+				}
+
+				ieee80211_sta_assocs_inc(vap, __func__);
+				ieee80211_nonqtn_sta_join(vap, ni, __func__);
+
+				if (ic->sta_dfs_info.sta_dfs_strict_mode) {
+					if (ieee80211_is_chan_not_available(ni->ni_chan)) {
+						if (ic->ic_mark_channel_availability_status) {
+							ic->ic_mark_channel_availability_status(ic,
+									ni->ni_chan,
+									IEEE80211_CHANNEL_STATUS_AVAILABLE);
+						}
+					}
+				}
+				SCSDBG(SCSLOG_NOTICE, "send qtn DFS report (DFS %s)\n",
+							ic->ic_flags_ext & IEEE80211_FEXT_MARKDFS ?
+							"Enabled" :
+							"Disabled");
+				ieee80211_send_action_dfs_report(ni);
+			}
+			break;
+		}
+
+		/* WDS/Repeater: Start software beacon timer for STA */
+		if (ostate != IEEE80211_S_RUN &&
+		    (vap->iv_opmode == IEEE80211_M_STA &&
+		     vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS)) {
+
+			if (!vap->iv_bcn_miss_thr)
+				vap->iv_bcn_miss_thr = IEEE80211_NUM_BEACONS_TO_MISS;
+
+			vap->iv_swbmiss.function = ieee80211_swbmiss;
+			vap->iv_swbmiss.data = (unsigned long) vap;
+			vap->iv_swbmiss_warnings = IEEE80211_SWBMISS_WARNINGS;
+			vap->iv_swbmiss_period = IEEE80211_TU_TO_JIFFIES(
+				ni->ni_intval * vap->iv_bcn_miss_thr);
+
+#if defined(QBMPS_ENABLE)
+			vap->iv_swbmiss_bmps_warning = 0;
+#endif
+			if (vap->iv_swbmiss_warnings)
+				vap->iv_swbmiss_period /= (vap->iv_swbmiss_warnings + 1);
+
+			mod_timer(&vap->iv_swbmiss, jiffies + vap->iv_swbmiss_period);
+		}
+
+#if defined(QBMPS_ENABLE)
+		if ((vap->iv_opmode == IEEE80211_M_STA) &&
+		    (ic->ic_flags_qtn & IEEE80211_QTN_BMPS)) {
+	                ic->ic_pm_reason = IEEE80211_PM_LEVEL_NEW_STATE_IEEE80211_S_RUN;
+			ieee80211_pm_queue_work(ic);
+		}
+#endif
+
+		/*
+		 * Start/stop the authenticator when operating as an
+		 * AP.  We delay until here to allow configuration to
+		 * happen out of order.
+		 */
+		/* XXX WDS? */
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP && /* XXX IBSS/AHDEMO */
+		    vap->iv_auth->ia_attach != NULL) {
+			/* XXX check failure */
+			vap->iv_auth->ia_attach(vap);
+		} else if (vap->iv_auth->ia_detach != NULL)
+			vap->iv_auth->ia_detach(vap);
+		/*
+		 * When 802.1x is not in use mark the port authorized
+		 * at this point so traffic can flow.
+		 */
+		if ((ni->ni_authmode != IEEE80211_AUTH_8021X) &&
+			(!(vap->iv_flags & (IEEE80211_F_WPA1 | IEEE80211_F_WPA2)))) {
+			ieee80211_node_authorize(ni);
+		}
+		break;
+	}
+
+	ieee80211_free_node(ni);
+
+	return 0;
+}
+
+static int
+ieee80211_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	enum ieee80211_state ostate;
+	struct ieee80211vap *tmpvap;
+
+	ostate = vap->iv_state;
+
+	if (ic->ic_flags_qtn & IEEE80211_QTN_MONITOR) {
+		return 0;
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+			"%s: %s -> %s\n",
+			__FUNCTION__,
+			ieee80211_state_name[ostate],
+			ieee80211_state_name[nstate]);
+
+	switch (nstate) {
+	case IEEE80211_S_SCAN:
+		if (ostate == IEEE80211_S_INIT) {
+			int nrunning, nscanning;
+
+			nrunning = nscanning = 0;
+			TAILQ_FOREACH(tmpvap, &ic->ic_vaps, iv_next) {
+				if (vap != tmpvap) {
+					if (tmpvap->iv_opmode == IEEE80211_M_MONITOR ||
+					    tmpvap->iv_opmode == IEEE80211_M_WDS)
+						/*
+						 * Skip monitor and WDS vaps as their S_RUN
+						 * shouldn't have any influence on modifying
+						 * state transition.
+						 */
+						continue;
+					if (ieee80211_is_repeater(ic) &&
+							tmpvap->iv_opmode == IEEE80211_M_STA &&
+							tmpvap->iv_state != IEEE80211_S_INIT)
+						nrunning++;
+					else if (tmpvap->iv_state == IEEE80211_S_RUN)
+						nrunning++;
+					else if (tmpvap->iv_state == IEEE80211_S_SCAN ||
+					    tmpvap->iv_state == IEEE80211_S_AUTH || /* STA in WDS/Repeater */
+					    tmpvap->iv_state == IEEE80211_S_ASSOC)
+						nscanning++;
+				}
+			}
+
+			KASSERT(!(nscanning && nrunning), ("SCAN and RUN can't happen at the same time\n"));
+
+			if (!nscanning && !nrunning) {
+				/* when no one is running or scanning, start a new scan */
+				__ieee80211_newstate(vap, nstate, arg);
+			} else if (!nscanning && nrunning) {
+				/* when no one is scanning but someone is running, bypass
+				 * scan and go to run state immediately */
+				if (vap->iv_opmode == IEEE80211_M_MONITOR ||
+				    vap->iv_opmode == IEEE80211_M_WDS ||
+				    vap->iv_opmode == IEEE80211_M_HOSTAP) {
+					__ieee80211_newstate(vap, IEEE80211_S_RUN, arg);
+				} else {
+					/* MW: avoid invalid S_INIT -> S_RUN transition */
+					__ieee80211_newstate(vap, nstate, arg);
+				}
+			} else if (nscanning && !nrunning) {
+				/* when someone is scanning and no one is running, set
+				 * the scan pending flag. Don't go through state machine */
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+					"%s: %s -> %s with SCAN_PENDING\n",
+					__func__,
+					ieee80211_state_name[ostate],
+					ieee80211_state_name[nstate]);
+				vap->iv_flags_ext |= IEEE80211_FEXT_SCAN_PENDING;
+			}
+		} else {
+			TAILQ_FOREACH(tmpvap, &ic->ic_vaps, iv_next) {
+				if (vap != tmpvap && tmpvap->iv_state != IEEE80211_S_INIT && !(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER)) {
+					if ((ic->ic_flags & IEEE80211_F_CHANSWITCH) &&
+							(vap->iv_opmode == IEEE80211_M_HOSTAP) &&
+							(ic->ic_des_chan != IEEE80211_CHAN_ANYC) &&
+							(ostate == IEEE80211_S_RUN) &&
+							(tmpvap->iv_opmode == IEEE80211_M_HOSTAP) &&
+							(tmpvap->iv_state == IEEE80211_S_RUN)) {
+						/*
+						 * DFS channel switch enabled
+						 * vap is AP mode and old state is RUN;
+						 * Desired channel is set;
+						 * tmpvap is AP mode and current state is RUN.
+						 * Do nothing but pending for vap from SCAN to RUN,
+						 * re-enter RUN state and update beacon when pending was cleared.
+						 */
+						;	/* Noting */
+					} else {
+						/*
+						 * The VAP is forced to scan, we need to change all other vap's state
+						 * to INIT and pend for the scan completion.
+						 *
+						 * For WDS, change state to INIT as long as channel will be changed.
+						 */
+						tmpvap->iv_newstate(tmpvap, IEEE80211_S_INIT, 0);
+					}
+					tmpvap->iv_flags_ext |= IEEE80211_FEXT_SCAN_PENDING;
+				}
+			}
+
+			/* start the new scan */
+			__ieee80211_newstate(vap, nstate, arg);
+		}
+		break;
+	case IEEE80211_S_RUN:
+		if ((ostate == IEEE80211_S_SCAN ||		/* AP coming out of scan */
+				vap->iv_opmode == IEEE80211_M_STA) /* STA in WDS/Repeater needs to bring up other VAPs */
+				&& !(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER)) {
+			__ieee80211_newstate(vap, nstate, arg);
+
+			/* bring up all other vaps pending on the scan*/
+			TAILQ_FOREACH(tmpvap, &ic->ic_vaps, iv_next) {
+				if (vap != tmpvap
+						&& (tmpvap->iv_flags_ext & IEEE80211_FEXT_SCAN_PENDING)) {
+					tmpvap->iv_flags_ext &= ~IEEE80211_FEXT_SCAN_PENDING;
+					tmpvap->iv_newstate(tmpvap, IEEE80211_S_RUN, 0);
+				}
+			}
+		} else {
+			__ieee80211_newstate(vap, nstate, arg);
+		}
+		break;
+	case IEEE80211_S_INIT:
+		if (ostate == IEEE80211_S_INIT && vap->iv_flags_ext & IEEE80211_FEXT_SCAN_PENDING)
+			vap->iv_flags_ext &= ~IEEE80211_FEXT_SCAN_PENDING;
+		/* fall through */
+	default:
+		__ieee80211_newstate(vap, nstate, arg);
+	}
+	return 0;
+}
+
diff --git a/drivers/qtn/wlan/ieee80211_qfdr.c b/drivers/qtn/wlan/ieee80211_qfdr.c
new file mode 100644
index 0000000..84835dd
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_qfdr.c
@@ -0,0 +1,424 @@
+/*-
+ * Copyright (c) 2015 Quantenna Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_qfdr.c 2759 2015-12-20 10:48:20Z Jason $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 sync scan result for Quantenna QFDR.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/in.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <net/sock.h>
+
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+
+#define QFDR_REP_TIMEOUT   (2 * HZ)
+#define QFDR_REQ_MAX_SIZE  512
+
+static char *local_ip = "0.0.0.0";
+module_param(local_ip, charp, S_IRUGO);
+MODULE_PARM_DESC(local_ip, "qfdr local ip");
+
+static char *remote_ip = "0.0.0.0";
+module_param(remote_ip, charp, S_IRUGO);
+MODULE_PARM_DESC(remote_ip, "qfdr remote ip");
+
+static unsigned short req_port = 0;
+module_param(req_port, ushort, S_IRUGO);
+MODULE_PARM_DESC(req_port, "qfdr port to recv req");
+
+static unsigned short rep_port = 0;
+module_param(rep_port, ushort, S_IRUGO);
+MODULE_PARM_DESC(rep_port, "qfdr port to recv rep");
+
+static struct socket *sock_send;
+static struct socket *sock_recv_req;
+static struct socket *sock_recv_rep;
+static struct sockaddr_in sin_req;
+static struct sockaddr_in sin_rep;
+
+static struct task_struct *thread_recv_req;
+static struct completion comp_recv_req_thread;
+
+static struct socket * qfdr_create_recv_socket(unsigned int addr, unsigned short port)
+{
+	struct sockaddr_in sin_bind;
+	struct socket *socket = NULL;
+
+	if (sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &socket)) {
+		printk(KERN_ERR "%s: Failed to create socket\n", __func__);
+		return NULL;
+	}
+
+	sin_bind.sin_family = AF_INET;
+	sin_bind.sin_addr.s_addr = addr;
+	sin_bind.sin_port = htons(port);
+
+	if (kernel_bind(socket, (struct sockaddr *)&sin_bind, sizeof(sin_bind))) {
+		printk(KERN_ERR "%s: Failed to bind socket to port %pI4:%d\n", __func__, &addr, port);
+		sock_release(socket);
+		return NULL;
+	}
+
+	return socket;
+}
+
+static int qfdr_recv(struct socket *sock, char *buffer, size_t buflen)
+{
+	struct msghdr	msg = {NULL};
+	struct kvec	iov;
+	int		len;
+
+	/* adjust the RCVBUF */
+	if (buflen > sock->sk->sk_rcvbuf)
+		sock->sk->sk_rcvbuf = buflen;
+
+	iov.iov_base     = buffer;
+	iov.iov_len      = buflen;
+
+	len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, 0);
+
+	return len;
+}
+
+static int qfdr_send(struct socket *sock, char *buffer, size_t buflen, struct sockaddr_in *dest_addr)
+{
+	struct msghdr	msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL};
+	struct kvec	iov;
+	int		len;
+
+	/* adjust the SNDBUF */
+	if (buflen > sock->sk->sk_sndbuf)
+		sock->sk->sk_sndbuf = buflen;
+
+	msg.msg_name     = dest_addr;
+	msg.msg_namelen  = sizeof(struct sockaddr_in);
+
+	iov.iov_base     = buffer;
+	iov.iov_len      = buflen;
+
+	len = kernel_sendmsg(sock, &msg, &iov, 1, buflen);
+
+	return len;
+}
+
+int qfdr_remote_giwscan(struct iwscanreq *req)
+{
+	struct qfdr_remote_aplist_req request;
+	char *recvbuf;
+	size_t recvbuf_size;
+	int recvlen;
+	struct qfdr_remote_aplist_rep *rep;
+
+	request.type = QFDR_GIWSCAN;
+	memcpy(&request.info, req->info, sizeof(struct iw_request_info));
+	request.extra_len = req->end_buf - req->current_ev;
+	strcpy(request.dev_name, req->vap->iv_dev->name);
+	qfdr_send(sock_send, (char *)&request, sizeof(request), &sin_req);
+
+	recvbuf_size = request.extra_len + sizeof(struct qfdr_remote_aplist_rep);
+	recvbuf = kmalloc(recvbuf_size, GFP_KERNEL);
+	if (!recvbuf) {
+		printk(KERN_ERR "%s: Failed to alloc recvbuf\n", __func__);
+		return 0;
+	}
+
+	recvlen = qfdr_recv(sock_recv_rep, recvbuf, recvbuf_size);
+	if (recvlen < sizeof(struct qfdr_remote_aplist_rep)) {
+		kfree(recvbuf);
+		if (recvlen < 0)
+			printk(KERN_ERR "%s: Failed to recv rep with errno %d\n", __func__, recvlen);
+		else
+			printk(KERN_ERR "%s: recv invalid rep\n", __func__);
+		return 0;
+	}
+
+	rep = (struct qfdr_remote_aplist_rep *)recvbuf;
+	if (rep->res != ENOMEM && rep->type == QFDR_GIWSCAN) {
+		memcpy(req->current_ev, rep->extra, rep->length);
+		req->current_ev += rep->length;
+	}
+	kfree(recvbuf);
+
+	return rep->res;
+}
+
+int qfdr_remote_ap_scan_results(struct ap_scan_iter *iter)
+{
+	struct qfdr_remote_aplist_req request;
+	char *recvbuf;
+	size_t recvbuf_size;
+	int recvlen;
+	struct qfdr_remote_aplist_rep *rep;
+
+	request.type = QFDR_AP_SCAN_RESULT;
+	request.extra_len = iter->end_buf - iter->current_env;
+	strcpy(request.dev_name, iter->vap->iv_dev->name);
+	qfdr_send(sock_send, (char *)&request, sizeof(request), &sin_req);
+
+	recvbuf_size = request.extra_len + sizeof(struct qfdr_remote_aplist_rep);
+	recvbuf = kmalloc(recvbuf_size, GFP_KERNEL);
+	if (!recvbuf) {
+		printk(KERN_ERR "%s: Failed to alloc recvbuf\n", __func__);
+		return 0;
+	}
+
+	recvlen = qfdr_recv(sock_recv_rep, recvbuf, recvbuf_size);
+	if (recvlen < sizeof(struct qfdr_remote_aplist_rep)) {
+		kfree(recvbuf);
+		if (recvlen < 0)
+			printk(KERN_ERR "%s: Failed to recv rep with errno %d\n", __func__, recvlen);
+		else
+			printk(KERN_ERR "%s: recv invalid rep\n", __func__);
+		return 0;
+	}
+
+	rep = (struct qfdr_remote_aplist_rep *)recvbuf;
+	if (rep->res != ENOMEM && rep->type == QFDR_AP_SCAN_RESULT) {
+		memcpy(iter->current_env, rep->extra, rep->length);
+		iter->current_env += rep->length;
+		iter->ap_counts += rep->ap_counts;
+	}
+	kfree(recvbuf);
+
+	return rep->res;
+}
+
+int qfdr_remote_siwscan(char *dev_name, struct iw_point *data)
+{
+	struct qfdr_remote_scan_req *request;
+	int req_len = sizeof(struct qfdr_remote_scan_req);
+
+	if (data)
+		req_len += data->length;
+	if (req_len > QFDR_REQ_MAX_SIZE) {
+		printk(KERN_ERR "%s: qfdr peer canonly recv %d bytes req\n", __func__, QFDR_REQ_MAX_SIZE);
+		return -EINVAL;
+	}
+	request = kmalloc(req_len, GFP_KERNEL);
+	if (!request) {
+		printk(KERN_ERR "%s: Failed to alloc buf\n", __func__);
+		return -ENOMEM;
+	}
+
+	strcpy(request->dev_name, dev_name);
+	if (data) {
+		request->type = QFDR_SIWSCAN;
+		request->flags = data->flags;
+		request->length = data->length;
+		memcpy(request->pointer, data->pointer, data->length);
+	} else {
+		request->type = QFDR_SIWSCAN_SIMPLE;
+	}
+
+	qfdr_send(sock_send, (char *)request, req_len, &sin_req);
+	kfree(request);
+
+	return 0;
+}
+
+static void qfdr_process_req(char *recvbuf, int recvlen)
+{
+	int type = *((int *)recvbuf);
+	struct qfdr_remote_aplist_rep rep_nomem = {ENOMEM, 0, 0, 0};
+	struct qfdr_remote_aplist_rep *rep;
+
+	if (type == QFDR_GIWSCAN) {
+		rep = qfdr_giwscan_for_remote((struct qfdr_remote_aplist_req *)recvbuf);
+		if (rep) {
+			qfdr_send(sock_send, (char *)rep, sizeof(struct qfdr_remote_aplist_rep) + rep->length, &sin_rep);
+			kfree(rep);
+		} else {
+			rep = &rep_nomem;
+			rep->type = QFDR_GIWSCAN;
+			qfdr_send(sock_send, (char *)rep, sizeof(struct qfdr_remote_aplist_rep) + rep->length, &sin_rep);
+		}
+	} else if (type == QFDR_AP_SCAN_RESULT) {
+		rep = qfdr_ap_scan_results_for_remote((struct qfdr_remote_aplist_req *)recvbuf);
+		if (rep) {
+			qfdr_send(sock_send, (char *)rep, sizeof(struct qfdr_remote_aplist_rep) + rep->length, &sin_rep);
+			kfree(rep);
+		} else {
+			rep = &rep_nomem;
+			rep->type = QFDR_AP_SCAN_RESULT;
+			qfdr_send(sock_send, (char *)rep, sizeof(struct qfdr_remote_aplist_rep) + rep->length, &sin_rep);
+		}
+	} else if (type == QFDR_SIWSCAN_SIMPLE || type == QFDR_SIWSCAN) {
+		qfdr_siwscan_for_remote((struct qfdr_remote_scan_req *)recvbuf);
+	}
+}
+
+static int qfdr_recv_req_thread(void *data)
+{
+	char *recvbuf;
+	int recvlen;
+
+	recvbuf = kmalloc(QFDR_REQ_MAX_SIZE, GFP_KERNEL);
+	if (!recvbuf) {
+		printk(KERN_ERR "%s: Failed to alloc recvbuf\n", __func__);
+		return -ENOMEM;
+	}
+
+	allow_signal(SIGTERM);
+
+	while (!signal_pending(current)) {
+		recvlen = qfdr_recv(sock_recv_req, recvbuf, QFDR_REQ_MAX_SIZE);
+		if (recvlen > 0) {
+			qfdr_process_req(recvbuf, recvlen);
+		} else {
+			printk(KERN_WARNING "%s: broken pipe on socket\n", __func__);
+		}
+	}
+
+	kfree(recvbuf);
+	complete(&comp_recv_req_thread);
+	thread_recv_req = NULL;
+
+	return 0;
+}
+
+/*
+ * Module glue.
+ */
+MODULE_AUTHOR("Quantenna, Jason.Wang");
+MODULE_DESCRIPTION("802.11 wireless support: Quantenna QFDR sync scan result");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual BSD/GPL");
+#endif
+
+static int __init init_wlan_qfdr(void)
+{
+	unsigned int local_addr, remote_addr;
+
+	local_addr = in_aton(local_ip);
+	if (local_addr == INADDR_ANY || local_addr == INADDR_NONE) {
+		printk(KERN_ERR "%s: Invalid local IP %pI4\n", __func__, &local_addr);
+		return -EINVAL;
+	}
+
+	remote_addr = in_aton(remote_ip);
+	if (remote_addr == INADDR_ANY || remote_addr == INADDR_NONE) {
+		printk(KERN_ERR "%s: Invalid remote IP %pI4\n", __func__, &remote_addr);
+		return -EINVAL;
+	}
+
+	if (req_port == 0) {
+		printk(KERN_ERR "%s: Invalid req port %u, must be greater than 0\n", __func__, req_port);
+		return -EINVAL;
+	}
+
+	if (rep_port == 0 || rep_port == req_port) {
+		printk(KERN_ERR "%s: Invalid rep port %u, must be greater than 0 and not same as req port\n", __func__, rep_port);
+		return -EINVAL;
+	}
+
+	if (sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock_send)) {
+		printk(KERN_ERR "%s: Failed to create send socket\n", __func__);
+		goto fail;
+	}
+
+	sock_recv_req = qfdr_create_recv_socket(local_addr, req_port);
+	if (!sock_recv_req) {
+		printk(KERN_ERR "%s: Failed to create req recv socket\n", __func__);
+		goto fail;
+	}
+
+	sock_recv_rep = qfdr_create_recv_socket(local_addr, rep_port);
+	if (!sock_recv_rep) {
+		printk(KERN_ERR "%s: Failed to create rep recv socket\n", __func__);
+		goto fail;
+	}
+
+	/* set the RCVTIMEO */
+	sock_recv_rep->sk->sk_rcvtimeo = QFDR_REP_TIMEOUT;
+
+	sin_req.sin_family = AF_INET;
+	sin_req.sin_addr.s_addr = remote_addr;
+	sin_req.sin_port = htons(req_port);
+
+	sin_rep.sin_family = AF_INET;
+	sin_rep.sin_addr.s_addr = remote_addr;
+	sin_rep.sin_port = htons(rep_port);
+
+	thread_recv_req = kthread_run(qfdr_recv_req_thread, NULL, "qfdr");
+	if (IS_ERR(thread_recv_req)) {
+		printk(KERN_ERR "%s: Failed to start qfdr_recv_req_thread\n", __func__);
+		goto fail;
+	}
+
+	init_completion(&comp_recv_req_thread);
+	ieee80211_register_qfdr_remote_siwscan_hook(qfdr_remote_siwscan);
+	ieee80211_register_qfdr_remote_giwscan_hook(qfdr_remote_giwscan);
+	ieee80211_register_qfdr_remote_ap_scan_results_hook(qfdr_remote_ap_scan_results);
+
+	printk(KERN_INFO "Load qfdr module successfully, local ip:%pI4, remote ip:%pI4, req_port:%u, rep_port:%u\n", &local_addr, &remote_addr, req_port, rep_port);
+	return 0;
+fail:
+	if (sock_send)
+		sock_release(sock_send);
+	if (sock_recv_req)
+		sock_release(sock_recv_req);
+	if (sock_recv_rep)
+		sock_release(sock_recv_rep);
+
+	return -EAGAIN;
+}
+module_init(init_wlan_qfdr);
+
+static void __exit exit_wlan_qfdr(void)
+{
+	ieee80211_register_qfdr_remote_siwscan_hook(NULL);
+	ieee80211_register_qfdr_remote_giwscan_hook(NULL);
+	ieee80211_register_qfdr_remote_ap_scan_results_hook(NULL);
+	if (thread_recv_req) {
+		send_sig(SIGTERM, thread_recv_req, 0);
+		wait_for_completion(&comp_recv_req_thread);
+	}
+
+	if (sock_send)
+		sock_release(sock_send);
+	if (sock_recv_req)
+		sock_release(sock_recv_req);
+	if (sock_recv_rep)
+		sock_release(sock_recv_rep);
+}
+module_exit(exit_wlan_qfdr);
diff --git a/drivers/qtn/wlan/ieee80211_rate.c b/drivers/qtn/wlan/ieee80211_rate.c
new file mode 100644
index 0000000..f35b043
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_rate.c
@@ -0,0 +1,120 @@
+/*-
+ * Copyright (c) 2007 Pavel Roskin
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * Atheros module glue for rate control algorithms.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "net80211/if_media.h"
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_rate.h"
+
+static const char *module_names[] = {
+	[IEEE80211_RATE_AMRR] = "ath_rate_amrr",
+	[IEEE80211_RATE_ONOE] = "ath_rate_onoe",
+	[IEEE80211_RATE_SAMPLE] = "ath_rate_sample"
+};
+
+/*
+ * Table of registered rate controllers.
+ */
+static struct ieee80211_rate_ops ratectls[IEEE80211_RATE_MAX];
+
+int ieee80211_rate_register(struct ieee80211_rate_ops *ops)
+{
+	int id = ops->ratectl_id;
+
+	if (id >= 0 && id < IEEE80211_RATE_MAX) {
+		memcpy(&ratectls[id], ops, sizeof(*ops));
+		return 0;
+	}
+	return -EINVAL;
+}
+EXPORT_SYMBOL(ieee80211_rate_register);
+
+void ieee80211_rate_unregister(struct ieee80211_rate_ops *ops)
+{
+	int id = ops->ratectl_id;
+
+	if (id > 0 && id < IEEE80211_RATE_MAX)
+		memset(&ratectls[id], 0, sizeof(ratectls[0]));
+}
+EXPORT_SYMBOL(ieee80211_rate_unregister);
+
+struct ath_ratectrl *ieee80211_rate_attach(struct ath_softc *sc,
+					    const char *name)
+{
+	int id;
+	char buf[64];
+	struct ath_ratectrl *ctl;
+
+	snprintf(buf, sizeof(buf), "ath_rate_%s", name);
+	for (id = 0; id < IEEE80211_RATE_MAX; id++) {
+		if (strcmp(buf, module_names[id]) == 0)
+			break;
+	}
+
+	if (id >= IEEE80211_RATE_MAX) {
+		printk(KERN_ERR "Module \"%s\" is not known\n", buf);
+		return NULL;
+	}
+
+	if (!ratectls[id].attach)
+		ieee80211_load_module(buf);
+
+	if (!ratectls[id].attach) {
+		printk(KERN_ERR "Error loading module \"%s\"\n", buf);
+		return NULL;
+	}
+
+	ctl = ratectls[id].attach(sc);
+	if (!ctl) {
+		printk(KERN_ERR "Module \"%s\" failed to initialize\n", buf);
+		return NULL;
+	}
+
+	ctl->ops = &ratectls[id];
+	return ctl;
+}
+EXPORT_SYMBOL(ieee80211_rate_attach);
+
+void ieee80211_rate_detach(struct ath_ratectrl *ctl) {
+	ctl->ops->detach(ctl);
+}
+EXPORT_SYMBOL(ieee80211_rate_detach);
diff --git a/drivers/qtn/wlan/ieee80211_scan.c b/drivers/qtn/wlan/ieee80211_scan.c
new file mode 100644
index 0000000..c29ffa99
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_scan.c
@@ -0,0 +1,2254 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_scan.c 1849 2006-12-08 17:20:08Z proski $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 scanning support.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <qtn/qtn_debug.h>
+#include <qtn/shared_defs.h>
+#include <qtn/shared_params.h>
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_scan.h"
+
+struct scan_state {
+	struct ieee80211_scan_state base;	/* public state */
+
+	u_int ss_iflags;				/* flags used internally */
+#define	ISCAN_MINDWELL 	0x0001		/* min dwell time reached */
+#define	ISCAN_DISCARD	0x0002		/* discard rx'd frames */
+#define	ISCAN_CANCEL	0x0004		/* cancel current scan */
+#define	ISCAN_START	0x0008		/* 1st time through next_scan */
+	unsigned long ss_chanmindwell;		/* min dwell on curchan */
+	unsigned long ss_scanend;		/* time scan must stop */
+	u_int ss_duration;			/* duration for next scan */
+	struct tasklet_struct ss_pwrsav;	/* sta ps ena tasklet */
+	struct timer_list ss_scan_timer;	/* scan timer */
+	struct timer_list ss_probe_timer;	/* start sending probe requests timer */
+};
+#define	SCAN_PRIVATE(ss)	((struct scan_state *) ss)
+
+/*
+ * Amount of time to go off-channel during a background
+ * scan.  This value should be large enough to catch most
+ * ap's but short enough that we can return on-channel
+ * before our listen interval expires.
+ *
+ * XXX tunable
+ * XXX check against configured listen interval
+ */
+#define	IEEE80211_SCAN_OFFCHANNEL	msecs_to_jiffies(150)
+
+/*
+ * Roaming-related defaults.  RSSI thresholds are as returned by the
+ * driver (dBm).  Transmit rate thresholds are IEEE rate codes (i.e
+ * .5M units).
+ */
+#define	SCAN_VALID_DEFAULT		60	/* scan cache valid age (secs) */
+#define	ROAM_RSSI_11A_DEFAULT		24	/* rssi threshold for 11a bss */
+#define	ROAM_RSSI_11B_DEFAULT		24	/* rssi threshold for 11b bss */
+#define	ROAM_RSSI_11BONLY_DEFAULT	24	/* rssi threshold for 11b-only bss */
+#define	ROAM_RATE_11A_DEFAULT		2*24	/* tx rate threshold for 11a bss */
+#define	ROAM_RATE_11B_DEFAULT		2*9	/* tx rate threshold for 11b bss */
+#define	ROAM_RATE_11BONLY_DEFAULT	2*5	/* tx rate threshold for 11b-only bss */
+
+static u_int32_t txpow_rxgain_count = 0;
+static u_int32_t txpow_rxgain_state = 1;
+
+static void scan_restart_pwrsav(unsigned long);
+static void scan_next(unsigned long);
+static void send_probes(unsigned long);
+static void scan_saveie(u_int8_t **iep, const u_int8_t *ie);
+
+#ifdef QSCS_ENABLED
+int ieee80211_scs_init_ranking_stats(struct ieee80211com *ic)
+{
+	struct ap_state *as;
+	int i;
+
+	MALLOC(as, struct ap_state *, sizeof(struct ap_state),
+		M_SCANCACHE, M_NOWAIT | M_ZERO);
+	if (as == NULL) {
+		printk("Failed to alloc scs ranking stats\n");
+		return -1;
+	}
+
+	if (ic->ic_scan != NULL) {
+		as->as_age = AP_PURGE_SCS;
+		ic->ic_scan->ss_scs_priv = as;
+		spin_lock_init(&as->asl_lock);
+		for (i = 0; i < IEEE80211_CHAN_MAX; i++)
+			TAILQ_INIT(&as->as_scan_list[i].asl_head);
+	} else {
+		FREE(as, M_SCANCACHE);
+		return -1;
+	}
+
+	ieee80211_scs_clean_stats(ic, IEEE80211_SCS_STATE_RESET, 0);
+
+	return 0;
+}
+
+void ieee80211_scs_deinit_ranking_stats(struct ieee80211com *ic)
+{
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+	struct ap_state *as;
+	struct ap_state *as_bak;
+	const struct ieee80211_scanner *scan;
+
+	as = (struct ap_state *)ss->ss_scs_priv;
+	if (as != NULL) {
+		scan = ieee80211_scanner_get(IEEE80211_M_HOSTAP, 0);
+		if (scan == NULL) {
+			IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+				"%s: no scanner support for AP mode\n", __func__);
+		} else {
+			as_bak = ss->ss_priv;
+			ss->ss_priv = as;
+			scan->scan_detach(ss);
+			ss->ss_priv = as_bak;
+		}
+		FREE(as, M_SCANCACHE);
+	}
+
+	ss->ss_scs_priv = NULL;
+}
+#endif
+
+void
+ieee80211_scan_attach(struct ieee80211com *ic)
+{
+	struct scan_state *ss;
+
+	ic->ic_roaming = IEEE80211_ROAMING_AUTO;
+
+	MALLOC(ss, struct scan_state *, sizeof(struct scan_state),
+		M_80211_SCAN, M_NOWAIT | M_ZERO);
+	if (ss != NULL) {
+		init_timer(&ss->ss_scan_timer);
+		ss->ss_scan_timer.function = scan_next;
+		ss->ss_scan_timer.data = (unsigned long) ss;
+		/* Init the send probe timer for active scans */
+		init_timer(&ss->ss_probe_timer);
+		ss->ss_probe_timer.function = send_probes;
+		ss->ss_probe_timer.data = (unsigned long) ss;
+		tasklet_init(&ss->ss_pwrsav, scan_restart_pwrsav,
+			(unsigned long) ss);
+		ss->base.ss_pick_flags = IEEE80211_PICK_DEFAULT;
+		ss->base.is_scan_valid = 0;
+		ic->ic_scan = &ss->base;
+	} else
+		ic->ic_scan = NULL;
+
+#ifdef QSCS_ENABLED
+	ieee80211_scs_init_ranking_stats(ic);
+#endif
+}
+
+void
+ieee80211_scan_detach(struct ieee80211com *ic)
+{
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	if (ss != NULL) {
+#ifdef QSCS_ENABLED
+		ieee80211_scs_deinit_ranking_stats(ic);
+#endif
+		del_timer(&SCAN_PRIVATE(ss)->ss_scan_timer);
+		del_timer(&SCAN_PRIVATE(ss)->ss_probe_timer);
+		tasklet_kill(&SCAN_PRIVATE(ss)->ss_pwrsav);
+		if (ss->ss_ops != NULL) {
+			ss->ss_ops->scan_detach(ss);
+			ss->ss_ops = NULL;
+		}
+		ic->ic_flags &= ~IEEE80211_F_SCAN;
+#ifdef QTN_BG_SCAN
+		ic->ic_flags_qtn &= ~IEEE80211_QTN_BGSCAN;
+#endif /* QTN_BG_SCAN */
+		ic->ic_scan = NULL;
+		FREE(SCAN_PRIVATE(ss), M_80211_SCAN);
+	}
+}
+
+void
+ieee80211_scan_vattach(struct ieee80211vap *vap)
+{
+	vap->iv_bgscanidle = msecs_to_jiffies(IEEE80211_BGSCAN_IDLE_DEFAULT);
+	vap->iv_bgscanintvl = vap->iv_ic->ic_extender_bgscanintvl;
+	vap->iv_scanvalid = SCAN_VALID_DEFAULT * HZ;
+	vap->iv_roam.rssi11a = ROAM_RSSI_11A_DEFAULT;
+	vap->iv_roam.rssi11b = ROAM_RSSI_11B_DEFAULT;
+	vap->iv_roam.rssi11bOnly = ROAM_RSSI_11BONLY_DEFAULT;
+	vap->iv_roam.rate11a = ROAM_RATE_11A_DEFAULT;
+	vap->iv_roam.rate11b = ROAM_RATE_11B_DEFAULT;
+	vap->iv_roam.rate11bOnly = ROAM_RATE_11BONLY_DEFAULT;
+
+	txpow_rxgain_count = 0;
+	txpow_rxgain_state = 1;
+}
+
+void
+ieee80211_scan_vdetach(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	IEEE80211_LOCK_IRQ(ic);
+	if (ss->ss_vap == vap) {
+		if ((ic->ic_flags & IEEE80211_F_SCAN)
+#ifdef QTN_BG_SCAN
+			|| (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+#endif /* QTN_BG_SCAN */
+		) {
+			del_timer(&SCAN_PRIVATE(ss)->ss_scan_timer);
+			del_timer(&SCAN_PRIVATE(ss)->ss_probe_timer);
+			ic->ic_flags &= ~IEEE80211_F_SCAN;
+#ifdef QTN_BG_SCAN
+			ic->ic_flags_qtn &= ~IEEE80211_QTN_BGSCAN;
+#endif /* QTN_BG_SCAN */
+		}
+		if (ss->ss_ops != NULL) {
+			ss->ss_ops->scan_detach(ss);
+			ss->ss_ops = NULL;
+		}
+	}
+	IEEE80211_UNLOCK_IRQ(ic);
+}
+
+/*
+ * Simple-minded scanner module support.
+ */
+#define	IEEE80211_SCANNER_MAX	(IEEE80211_M_MONITOR+1)
+
+static const char *scan_modnames[IEEE80211_SCANNER_MAX] = {
+	[IEEE80211_M_IBSS]	= "wlan_scan_sta",
+	[IEEE80211_M_STA]	= "wlan_scan_sta",
+	[IEEE80211_M_AHDEMO]	= "wlan_scan_sta",
+	[IEEE80211_M_HOSTAP]	= "wlan_scan_ap",
+};
+static const struct ieee80211_scanner *scanners[IEEE80211_SCANNER_MAX];
+
+const struct ieee80211_scanner *
+ieee80211_scanner_get(enum ieee80211_opmode mode, int tryload)
+{
+	int err;
+	if (mode >= IEEE80211_SCANNER_MAX)
+		return NULL;
+	if (scan_modnames[mode] == NULL)
+		return NULL;
+	if (scanners[mode] == NULL && tryload) {
+		err = ieee80211_load_module(scan_modnames[mode]);
+		if (scanners[mode] == NULL || err)
+			printk(KERN_WARNING "unable to load %s\n", scan_modnames[mode]);
+	}
+	return scanners[mode];
+}
+EXPORT_SYMBOL(ieee80211_scanner_get);
+
+void
+ieee80211_scanner_register(enum ieee80211_opmode mode,
+	const struct ieee80211_scanner *scan)
+{
+	if (mode >= IEEE80211_SCANNER_MAX)
+		return;
+	scanners[mode] = scan;
+}
+EXPORT_SYMBOL(ieee80211_scanner_register);
+
+void
+ieee80211_scanner_unregister(enum ieee80211_opmode mode,
+	const struct ieee80211_scanner *scan)
+{
+	if (mode >= IEEE80211_SCANNER_MAX)
+		return;
+	if (scanners[mode] == scan)
+		scanners[mode] = NULL;
+}
+EXPORT_SYMBOL(ieee80211_scanner_unregister);
+
+void
+ieee80211_scanner_unregister_all(const struct ieee80211_scanner *scan)
+{
+	int m;
+
+	for (m = 0; m < IEEE80211_SCANNER_MAX; m++)
+		if (scanners[m] == scan)
+			scanners[m] = NULL;
+}
+EXPORT_SYMBOL(ieee80211_scanner_unregister_all);
+
+u_int8_t g_channel_fixed = 0;
+static void
+change_channel(struct ieee80211com *ic,
+	struct ieee80211_channel *chan)
+{
+#if 1
+	/* If channel is fixed using iwconfig then don't do anything */
+	if(!g_channel_fixed) 
+	{
+		ic->ic_prevchan = ic->ic_curchan;
+		ic->ic_curchan = chan;
+		//printk("Curr chan : %d\n", ic->ic_curchan->ic_ieee);
+		ic->ic_set_channel(ic);
+	}
+#else
+
+		ic->ic_curchan = chan;
+#endif
+}
+
+static char
+channel_type(const struct ieee80211_channel *c)
+{
+	if (IEEE80211_IS_CHAN_ST(c))
+		return 'S';
+	if (IEEE80211_IS_CHAN_108A(c))
+		return 'T';
+	if (IEEE80211_IS_CHAN_108G(c))
+		return 'G';
+	if (IEEE80211_IS_CHAN_A(c))
+		return 'a';
+	if (IEEE80211_IS_CHAN_ANYG(c))
+		return 'g';
+	if (IEEE80211_IS_CHAN_B(c))
+		return 'b';
+	return 'f';
+}
+
+void
+ieee80211_scan_dump_channels(const struct ieee80211_scan_state *ss)
+{
+	struct ieee80211com *ic = ss->ss_vap->iv_ic;
+	const char *sep;
+	int i;
+
+	sep = "";
+	for (i = ss->ss_next; i < ss->ss_last; i++) {
+		const struct ieee80211_channel *c = ss->ss_chans[i];
+
+		printf("%s%u%c", sep, ieee80211_chan2ieee(ic, c),
+			channel_type(c));
+		sep = ", ";
+	}
+}
+EXPORT_SYMBOL(ieee80211_scan_dump_channels);
+
+/*
+ * Enable station power save mode and start/restart the scanning thread.
+ */
+static void
+scan_restart_pwrsav(unsigned long arg)
+{
+	struct scan_state *ss = (struct scan_state *) arg;
+	struct ieee80211vap *vap = ss->base.ss_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	int delay;
+
+	ieee80211_sta_pwrsave(vap, 1);
+	/*
+	 * Use an initial 1ms delay to ensure the null
+	 * data frame has a chance to go out.
+	 * XXX 1ms is a lot, better to trigger scan
+	 * on tx complete.
+	 */
+	delay = msecs_to_jiffies(1);
+	if (delay < 1)
+		delay = 1;
+
+	ic->ic_setparam(vap->iv_bss, IEEE80211_PARAM_BEACON_ALLOW,
+			1, NULL, 0);
+	ic->ic_scan_start(ic);			/* notify driver */
+	ss->ss_scanend = jiffies + delay + ss->ss_duration;
+	ss->ss_iflags |= ISCAN_START;
+	mod_timer(&ss->ss_scan_timer, jiffies + delay);
+	/*
+	 * FIXME: Note, we are not delaying probes at the start here so there
+	 * may be issues with probe requests not being on the correct
+	 * channel for the first channel scanned.
+	 */
+}
+
+/*
+ * Start/restart scanning.  If we're operating in station mode
+ * and associated notify the ap we're going into power save mode
+ * and schedule a callback to initiate the work (where there's a
+ * better context for doing the work).  Otherwise, start the scan
+ * directly.
+ */
+static int
+scan_restart(struct scan_state *ss, u_int duration)
+{
+	struct ieee80211vap *vap = ss->base.ss_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	int defer = 0;
+
+	if (ss->base.ss_next == ss->base.ss_last) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: no channels to scan\n", __func__);
+		return 0;
+	}
+	if (vap->iv_opmode == IEEE80211_M_STA &&
+#ifdef QTN_BG_SCAN
+	    /* qtn bgscan sends pwrsav frame in MuC, or use large NAV */
+	    (ss->base.ss_flags & IEEE80211_SCAN_QTN_BGSCAN) == 0 &&
+#endif /* QTN_BG_SCAN */
+	    vap->iv_state == IEEE80211_S_RUN &&
+	    (ss->base.ss_flags & IEEE80211_SCAN_OPCHAN) == 0) {
+		if ((vap->iv_bss->ni_flags & IEEE80211_NODE_PWR_MGT) == 0) {
+			/*
+			 * Initiate power save before going off-channel.
+			 * Note that we cannot do this directly because
+			 * of locking issues; instead we defer it to a
+			 * tasklet.
+			 */
+			ss->ss_duration = duration;
+			tasklet_schedule(&ss->ss_pwrsav);
+			defer = 1;
+		}
+	}
+
+	if (!defer) {
+		if (vap->iv_opmode == IEEE80211_M_STA &&
+#ifdef QTN_BG_SCAN
+				!(ss->base.ss_flags & IEEE80211_SCAN_QTN_BGSCAN) &&
+#endif
+				vap->iv_state == IEEE80211_S_RUN) {
+			ic->ic_setparam(vap->iv_bss, IEEE80211_PARAM_BEACON_ALLOW,
+				1, NULL, 0);
+		}
+
+		ic->ic_scan_start(ic);		/* notify driver */
+		ss->ss_scanend = jiffies + duration;
+		ss->ss_iflags |= ISCAN_START;
+		mod_timer(&ss->ss_scan_timer, jiffies);
+		/*
+		 * FIXME: Note, we are not delaying probes at the start here so there
+		 * may be issues with probe requests not being on the correct
+		 * channel for the first channel scanned.
+		 */
+	}
+	return 1;
+}
+
+static void
+copy_ssid(struct ieee80211vap *vap, struct ieee80211_scan_state *ss,
+	int nssid, const struct ieee80211_scan_ssid ssids[])
+{
+	if (nssid > IEEE80211_SCAN_MAX_SSID) {
+		/* XXX printf */
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: too many ssid %d, ignoring all of them\n",
+			__func__, nssid);
+		return;
+	}
+	memcpy(ss->ss_ssid, ssids, nssid * sizeof(ssids[0]));
+	ss->ss_nssid = nssid;
+}
+
+/*
+ * Start a scan unless one is already going.
+ */
+int
+ieee80211_start_scan(struct ieee80211vap *vap, int flags, u_int duration,
+	u_int nssid, const struct ieee80211_scan_ssid ssids[])
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	const struct ieee80211_scanner *scan;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	if (ic->sta_dfs_info.sta_dfs_strict_mode) {
+		if ((ic->ic_bsschan != IEEE80211_CHAN_ANYC) &&
+			IEEE80211_IS_CHAN_CAC_IN_PROGRESS(ic->ic_bsschan)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				"%s: Ignored-CAC in progress\n", __func__);
+			return 0;
+		}
+	}
+
+	scan = ieee80211_scanner_get(vap->iv_opmode, 0);
+	if (scan == NULL) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: no scanner support for mode %u\n",
+			__func__, vap->iv_opmode);
+		/* XXX stat */
+		return 0;
+	}
+
+	if (ic->ic_flags_qtn & IEEE80211_QTN_MONITOR) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: not scanning - monitor mode enabled\n", __func__);
+		return 0;
+	}
+
+	IEEE80211_LOCK_IRQ(ic);
+	if ((ic->ic_flags & IEEE80211_F_SCAN) == 0
+#ifdef QTN_BG_SCAN
+		&& (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) == 0
+#endif /* QTN_BG_SCAN */
+	) {
+		if (flags & IEEE80211_SCAN_BW40)
+			ss->ss_scan_bw = BW_HT40;
+		else if (flags & IEEE80211_SCAN_BW80)
+			ss->ss_scan_bw = BW_HT80;
+		else if (flags & IEEE80211_SCAN_BW160)
+			ss->ss_scan_bw = BW_HT160;
+		else
+			ss->ss_scan_bw = BW_HT20;
+
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: %s scan, bw %s, duration %lu, desired mode %s, %s%s%s%s%s\n",
+			__func__,
+			flags & IEEE80211_SCAN_ACTIVE ? "active" : "passive",
+			ieee80211_bw2str(ss->ss_scan_bw),
+			duration,
+			ieee80211_phymode_name[ic->ic_des_mode],
+			flags & IEEE80211_SCAN_FLUSH ? "flush" : "append",
+			flags & IEEE80211_SCAN_NOPICK ? ", nopick" : "",
+			flags & IEEE80211_SCAN_PICK1ST ? ", pick1st" : "",
+			flags & IEEE80211_SCAN_ONCE ? ", once" : "",
+			flags & IEEE80211_SCAN_OPCHAN ? ", operating channel only" : "");
+
+		ss->ss_vap = vap;
+		if (ss->ss_ops != scan) {
+			/* switch scanners; detach old, attach new */
+			if (ss->ss_ops != NULL)
+				ss->ss_ops->scan_detach(ss);
+			if (!scan->scan_attach(ss)) {
+				/* XXX attach failure */
+				/* XXX stat+msg */
+				ss->ss_ops = NULL;
+			} else
+				ss->ss_ops = scan;
+		}
+
+		if (ss->ss_ops != NULL) {
+			if ((flags & IEEE80211_SCAN_NOSSID) == 0)
+				copy_ssid(vap, ss, nssid, ssids);
+
+			/* NB: top 4 bits for internal use */
+			ss->ss_flags = flags & 0xfff;
+			if (ss->ss_flags & IEEE80211_SCAN_ACTIVE)
+				vap->iv_stats.is_scan_active++;
+			else
+				vap->iv_stats.is_scan_passive++;
+			if (flags & IEEE80211_SCAN_FLUSH)
+				ss->ss_ops->scan_flush(ss);
+
+			/* NB: flush frames rx'd before 1st channel change */
+			SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_DISCARD;
+			ss->ss_ops->scan_start(ss, vap);
+			if (scan_restart(SCAN_PRIVATE(ss), duration)) {
+#ifdef QTN_BG_SCAN
+				if (flags & IEEE80211_SCAN_QTN_BGSCAN)
+					ic->ic_flags_qtn |= IEEE80211_QTN_BGSCAN;
+				else
+#endif /*QTN_BG_SCAN */
+					ic->ic_flags |= IEEE80211_F_SCAN;
+				ieee80211_scan_scs_sample_cancel(vap);
+#if defined(QBMPS_ENABLE)
+				if ((ic->ic_flags_qtn & IEEE80211_QTN_BMPS) &&
+						(vap->iv_opmode == IEEE80211_M_STA)) {
+					/* exit power-saving */
+			                ic->ic_pm_reason = IEEE80211_PM_LEVEL_SCAN_START;
+					ieee80211_pm_queue_work(ic);
+				}
+#endif
+			}
+
+#ifdef QTN_BG_SCAN
+			if (ic->ic_qtn_bgscan.debug_flags >= 3) {
+				printk("BG_SCAN: start %s scanning...\n",
+					(ic->ic_flags & IEEE80211_F_SCAN)?"regular":"background");
+			}
+#endif /*QTN_BG_SCAN */
+
+		}
+	} else {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: %s scan already in progress\n", __func__,
+			ss->ss_flags & IEEE80211_SCAN_ACTIVE ? "active" : "passive");
+	}
+	IEEE80211_UNLOCK_IRQ(ic);
+
+	/* Don't transmit beacons while scanning */
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP
+#ifdef QTN_BG_SCAN
+			&& !(flags & IEEE80211_SCAN_QTN_BGSCAN)
+#endif /*QTN_BG_SCAN */
+	) {
+		ic->ic_beacon_stop(vap);
+	}
+
+	/* NB: racey, does it matter? */
+	return (ic->ic_flags & IEEE80211_F_SCAN);
+}
+EXPORT_SYMBOL(ieee80211_start_scan);
+
+/*
+ * Under repeater mode, when the AP interface is not in RUN state,
+ * hold off scanning procedure on STA interface
+ */
+int ieee80211_should_scan(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211vap *first_ap = NULL;
+	struct ieee80211vap *vap_each;
+	struct ieee80211vap *vap_tmp;
+	int ret = 1;
+
+	if (vap->iv_opmode != IEEE80211_M_STA || !(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER))
+		return 1;
+
+	IEEE80211_VAPS_LOCK_BH(ic);
+
+	TAILQ_FOREACH_SAFE(vap_each, &ic->ic_vaps, iv_next, vap_tmp) {
+		if (vap_each->iv_opmode == IEEE80211_M_HOSTAP) {
+			first_ap = vap_each;
+			break;
+		}
+	}
+	KASSERT((first_ap != NULL), ("Repeater mode must have an AP interface"));
+
+	if (first_ap->iv_state != IEEE80211_S_RUN)
+		/*
+		 * Do not initiate scan for repeater STA if AP interface hasn't
+		 * been properly running yet
+		 */
+		ret = 0;
+
+	IEEE80211_VAPS_UNLOCK_BH(ic);
+
+	return ret;
+}
+
+/*
+ * Check the scan cache for an ap/channel to use; if that
+ * fails then kick off a new scan.
+ */
+int
+ieee80211_check_scan(struct ieee80211vap *vap, int flags, u_int duration,
+	u_int nssid, const struct ieee80211_scan_ssid ssids[],
+	int (*action)(struct ieee80211vap *, const struct ieee80211_scan_entry *))
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+#ifdef SCAN_CACHE_ENABLE
+	int checkscanlist = 0;
+#endif
+
+	/*
+	 * Check if there's a list of scan candidates already.
+	 * XXX want more than the ap we're currently associated with
+	 */
+	IEEE80211_LOCK_IRQ(ic);
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+		"%s: %s scan, duration %lu, desired mode %s, %s%s%s%s\n",
+		__func__,
+		flags & IEEE80211_SCAN_ACTIVE ? "active" : "passive",
+		duration,
+		ieee80211_phymode_name[ic->ic_des_mode],
+		flags & IEEE80211_SCAN_FLUSH ? "flush" : "append",
+		flags & IEEE80211_SCAN_NOPICK ? ", nopick" : "",
+		flags & IEEE80211_SCAN_PICK1ST ? ", pick1st" : "",
+		flags & IEEE80211_SCAN_ONCE ? ", once" : "",
+		flags & IEEE80211_SCAN_USECACHE ? ", usecache" : "");
+
+	if (ss->ss_ops != NULL) {
+		/* XXX verify ss_ops matches vap->iv_opmode */
+		if ((flags & IEEE80211_SCAN_NOSSID) == 0) {
+			/*
+			 * Update the ssid list and mark flags so if
+			 * we call start_scan it doesn't duplicate work.
+			 */
+			copy_ssid(vap, ss, nssid, ssids);
+			flags |= IEEE80211_SCAN_NOSSID;
+		}
+#ifdef SCAN_CACHE_ENABLE
+		if ((ic->ic_flags & IEEE80211_F_SCAN) == 0 &&
+#ifdef QTN_BG_SCAN
+		     (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) == 0 &&
+#endif /* QTN_BG_SCAN */
+		     time_before(jiffies, ic->ic_lastscan + vap->iv_scanvalid)) {
+			/*
+			 * We're not currently scanning and the cache is
+			 * deemed hot enough to consult.  Lock out others
+			 * by marking IEEE80211_F_SCAN while we decide if
+			 * something is already in the scan cache we can
+			 * use.  Also discard any frames that might come
+			 * in while temporarily marked as scanning.
+			 */
+			SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_DISCARD;
+			ic->ic_flags |= IEEE80211_F_SCAN;
+			checkscanlist = 1;
+		}
+#endif
+	}
+	IEEE80211_UNLOCK_IRQ(ic);
+#ifdef SCAN_CACHE_ENABLE
+	if (checkscanlist) {
+		/*
+		 * ss must be filled out so scan may be restarted "outside"
+		 * of the current callstack.
+		 */
+		ss->ss_flags = flags;
+		ss->ss_duration = duration;
+		if (ss->ss_ops->scan_end(ss, ss->ss_vap, action, flags & IEEE80211_SCAN_KEEPMODE)) {
+			/* found an ap, just clear the flag */
+			ic->ic_flags &= ~IEEE80211_F_SCAN;
+			return 1;
+		}
+		/* no ap, clear the flag before starting a scan */
+		ic->ic_flags &= ~IEEE80211_F_SCAN;
+	}
+#endif
+	if ((flags & IEEE80211_SCAN_USECACHE) == 0 &&
+			ieee80211_should_scan(vap)) {
+		return ieee80211_start_scan(vap, flags, duration, nssid, ssids);
+	} else {
+		/* If we *must* use the cache and no ap was found, return failure */
+		return 0;
+	}
+}
+
+/*
+ * Restart a previous scan.  If the previous scan completed
+ * then we start again using the existing channel list.
+ */
+int
+ieee80211_bg_scan(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	IEEE80211_LOCK_IRQ(ic);
+	if ((ic->ic_flags & IEEE80211_F_SCAN) == 0
+#ifdef QTN_BG_SCAN
+		&& (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) == 0
+#endif /* QTN_BG_SCAN */
+	) {
+		u_int duration;
+		/*
+		 * Go off-channel for a fixed interval that is large
+		 * enough to catch most ap's but short enough that
+		 * we can return on-channel before our listen interval
+		 * expires.
+		 */
+		duration = IEEE80211_SCAN_OFFCHANNEL;
+
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: %s scan, jiffies %lu duration %lu\n", __func__,
+			ss->ss_flags & IEEE80211_SCAN_ACTIVE ? "active" : "passive",
+			jiffies, duration);
+
+		if (ss->ss_ops != NULL) {
+			ss->ss_vap = vap;
+			/*
+			 * A background scan does not select a new sta; it
+			 * just refreshes the scan cache.  Also, indicate
+			 * the scan logic should follow the beacon schedule:
+			 * we go off-channel and scan for a while, then
+			 * return to the bss channel to receive a beacon,
+			 * then go off-channel again.  All during this time
+			 * we notify the ap we're in power save mode.  When
+			 * the scan is complete we leave power save mode.
+			 * If any beacon indicates there are frames pending
+			 *for us then we drop out of power save mode
+			 * (and background scan) automatically by way of the
+			 * usual sta power save logic.
+			 */
+			ss->ss_flags |= IEEE80211_SCAN_NOPICK |
+				IEEE80211_SCAN_BGSCAN;
+
+			if (ic->ic_scan_opchan_enable && vap->iv_opmode == IEEE80211_M_STA) {
+				ss->ss_flags |= IEEE80211_SCAN_OPCHAN | IEEE80211_SCAN_ACTIVE;
+				ss->ss_ops->scan_start(ss, vap);
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+					"%s: force a new active bgscan", __func__);
+			}
+
+			/* if previous scan completed, restart */
+			if (ss->ss_next >= ss->ss_last) {
+				ss->ss_next = 0;
+				if (ss->ss_flags & IEEE80211_SCAN_ACTIVE)
+					vap->iv_stats.is_scan_active++;
+				else
+					vap->iv_stats.is_scan_passive++;
+				ss->ss_ops->scan_restart(ss, vap);
+			}
+			/* NB: flush frames rx'd before 1st channel change */
+			SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_DISCARD;
+			ss->ss_mindwell = duration;
+			if (scan_restart(SCAN_PRIVATE(ss), duration)) {
+				ic->ic_flags |= IEEE80211_F_SCAN;
+				ic->ic_flags_ext |= IEEE80211_FEXT_BGSCAN;
+			}
+		} else {
+			/* XXX msg+stat */
+		}
+	} else {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: %s scan already in progress\n", __func__,
+			ss->ss_flags & IEEE80211_SCAN_ACTIVE ? "active" : "passive");
+	}
+	IEEE80211_UNLOCK_IRQ(ic);
+
+	/* NB: racey, does it matter? */
+	return (ic->ic_flags & IEEE80211_F_SCAN);
+}
+EXPORT_SYMBOL(ieee80211_bg_scan);
+
+static void
+_ieee80211_cancel_scan(struct ieee80211vap *vap, int no_wait)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	IEEE80211_LOCK_IRQ(ic);
+	if ((ic->ic_flags & IEEE80211_F_SCAN)
+#ifdef QTN_BG_SCAN
+		|| (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+#endif /* QTN_BG_SCAN */
+	) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: cancel %s scan\n", __func__,
+			ss->ss_flags & IEEE80211_SCAN_ACTIVE ? "active" : "passive");
+
+		/* clear bg scan NOPICK and mark cancel request */
+		ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
+		SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_CANCEL;
+		ss->ss_ops->scan_cancel(ss, vap);
+
+		if (no_wait) {
+			/* force it to fire immediately */
+			del_timer(&SCAN_PRIVATE(ss)->ss_scan_timer);
+			(SCAN_PRIVATE(ss)->ss_scan_timer).function((SCAN_PRIVATE(ss)->ss_scan_timer).data);
+		} else {
+			/* force it to fire asap */
+			mod_timer(&SCAN_PRIVATE(ss)->ss_scan_timer, jiffies);
+		}
+
+		/*
+		 * The probe timer is not cleared, so there may be some
+		 * probe requests sent after the scan, but that should not
+		 * cause any issues.
+		 */
+	}
+	IEEE80211_UNLOCK_IRQ(ic);
+}
+
+/*
+ * Cancel any scan currently going on.
+ */
+void
+ieee80211_cancel_scan(struct ieee80211vap *vap)
+{
+	_ieee80211_cancel_scan(vap, 0);
+}
+
+/*
+ * Cancel any scan currently going on immediately
+ */
+void
+ieee80211_cancel_scan_no_wait(struct ieee80211vap *vap)
+{
+	_ieee80211_cancel_scan(vap, 1);
+}
+
+/*
+ * Process a beacon or probe response frame for SCS off channel sampling
+ */
+void ieee80211_add_scs_off_chan(struct ieee80211vap *vap,
+	const struct ieee80211_scanparams *sp,
+	const struct ieee80211_frame *wh,
+	int subtype, int rssi, int rstamp)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+	struct ap_state *as;
+	struct ap_state *as_bak;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP)
+		return;
+
+	as = (struct ap_state *)ss->ss_scs_priv;
+	if (as && ss->ss_ops && ss->ss_ops->scan_add) {
+		as_bak = ss->ss_priv;
+		ss->ss_priv = as;
+		ss->ss_ops->scan_add(ss, sp, wh, subtype, rssi, rstamp);
+		ss->ss_priv = as_bak;
+	}
+}
+
+void
+ieee80211_scan_scs_sample_cancel(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP)
+		return;
+
+	ic->ic_sample_channel_cancel(vap);
+}
+
+/*
+ * Sample the state of an off-channel for Interference Mitigation
+ */
+void
+ieee80211_scan_scs_sample(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	int scanning;
+	int16_t scs_chan = ic->ic_scs.scs_last_smpl_chan;
+	int16_t chan_count = 0;
+	struct ieee80211_channel *chan;
+	const struct ieee80211_scanner *scan;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+	int cur_bw;
+	struct ieee80211vap *tmp_vap;
+	int wds_basic_pure = 0;
+
+	IEEE80211_LOCK_IRQ(ic);
+	scanning = ((ic->ic_flags & IEEE80211_F_SCAN)
+#ifdef QTN_BG_SCAN
+		|| (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+#endif /* QTN_BG_SCAN */
+		);
+	IEEE80211_UNLOCK_IRQ(ic);
+	if (scanning) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: not sampling - scan in progress\n", __func__);
+		IEEE80211_SCS_CNT_INC(&ic->ic_scs, IEEE80211_SCS_CNT_IN_SCAN);
+		return;
+	}
+
+	if (!ic->ic_scs.scs_stats_on) {
+		SCSDBG(SCSLOG_INFO, "not sampling - scs stats is disabled\n");
+		return;
+	}
+
+	if (vap->iv_state != IEEE80211_S_RUN) {
+		SCSDBG(SCSLOG_INFO, "not sampling - vap is not in running status\n");
+		return;
+	}
+
+	if (ic->ic_ocac.ocac_running) {
+		SCSDBG(SCSLOG_INFO, "not sampling - Seamless DFS is ongoing\n");
+		return;
+	}
+
+	TAILQ_FOREACH(tmp_vap, &ic->ic_vaps, iv_next) {
+		if (IEEE80211_VAP_WDS_IS_MBS(tmp_vap)) {
+			wds_basic_pure = 0;
+			break;
+		} else if (IEEE80211_VAP_WDS_IS_RBS(tmp_vap)) {
+			SCSDBG(SCSLOG_INFO, "not sampling - RBS mode\n");
+			return;
+		} else if (IEEE80211_VAP_WDS_BASIC(tmp_vap)) {
+			wds_basic_pure = 1;
+		}
+	}
+
+	if (wds_basic_pure) {
+		SCSDBG(SCSLOG_INFO, "not sampling - basic WDS mode\n");
+		return;
+	}
+
+	cur_bw = ieee80211_get_bw(ic);
+
+	scan = ieee80211_scanner_get(IEEE80211_M_HOSTAP, 0);
+	if (scan == NULL) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: no scanner support for AP mode\n", __func__);
+		return;
+	}
+
+	ss->ss_vap = vap;
+	if (ss->ss_ops != scan) {
+		if (ss->ss_ops != NULL)
+			ss->ss_ops->scan_detach(ss);
+		if (!scan->scan_attach(ss)) {
+			ss->ss_ops = NULL;
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				"%s: scanner attach failed\n", __func__);
+			return;
+		} else {
+			ss->ss_ops = scan;
+		}
+	}
+
+scan_next_chan:
+	chan_count++;
+	scs_chan += IEEE80211_SUBCHANNELS_OF_20MHZ;
+	if (cur_bw >= BW_HT40)
+		scs_chan += IEEE80211_SUBCHANNELS_OF_40MHZ - IEEE80211_SUBCHANNELS_OF_20MHZ;
+	if (cur_bw >= BW_HT80)
+		scs_chan += IEEE80211_SUBCHANNELS_OF_80MHZ - IEEE80211_SUBCHANNELS_OF_40MHZ;
+
+	if (chan_count > ic->ic_nchans) {
+		SCSDBG(SCSLOG_INFO, "no available off channel for sampling\n");
+		return;
+	}
+
+	if (scs_chan >= ic->ic_nchans) {
+		if (cur_bw > BW_HT20)
+			ic->ic_scs.scs_smpl_chan_offset++;
+		if (cur_bw == BW_HT40 && ic->ic_scs.scs_smpl_chan_offset >
+					IEEE80211_SUBCHANNELS_OF_40MHZ - 1)
+			ic->ic_scs.scs_smpl_chan_offset = 0;
+		else if (cur_bw == BW_HT80 && ic->ic_scs.scs_smpl_chan_offset >
+					IEEE80211_SUBCHANNELS_OF_80MHZ - 1)
+			ic->ic_scs.scs_smpl_chan_offset = 0;
+		scs_chan = 0;
+		scs_chan += ic->ic_scs.scs_smpl_chan_offset;
+	}
+
+	chan = &ic->ic_channels[scs_chan];
+
+	if (isclr(ic->ic_chan_active, chan->ic_ieee)) {
+		goto scan_next_chan;
+	}
+
+	/* do not scan current working channel */
+	if (chan->ic_ieee == ic->ic_curchan->ic_ieee) {
+		goto scan_next_chan;
+	}
+
+	if (cur_bw == BW_HT40) {
+		if (!(chan->ic_flags & IEEE80211_CHAN_HT40) ||
+				(chan->ic_ieee == ieee80211_find_sec_chan(ic->ic_curchan))) {
+			goto scan_next_chan;
+		}
+	}
+
+	if (cur_bw >= BW_HT80) {
+		if (!(chan->ic_flags & IEEE80211_CHAN_VHT80) ||
+				(chan->ic_center_f_80MHz == ic->ic_curchan->ic_center_f_80MHz)) {
+			goto scan_next_chan;
+		}
+	}
+
+	SCSDBG(SCSLOG_INFO, "choose sampling channel: %u\n", chan->ic_ieee);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+		"%s: sampling channel %u freq=%u\n", __func__,
+		chan->ic_ieee, chan->ic_freq);
+
+	/* don't move to next until muc finish sampling */
+	ic->ic_scs.scs_des_smpl_chan = scs_chan;
+
+	ic->ic_sample_channel(vap, chan);
+}
+EXPORT_SYMBOL(ieee80211_scan_scs_sample);
+
+
+int
+ap_list_asl_table(struct ieee80211_scan_state *ss)
+{
+	struct ap_state *as = ss->ss_priv;
+	struct ap_scan_entry *se, *next;
+	int i;
+
+	printk(KERN_ERR "CHINUSE_START\n");
+
+	for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+		TAILQ_FOREACH_SAFE(se, &as->as_scan_list[i].asl_head, ase_list, next) {
+				printk(KERN_EMERG "Channel %d : %d Mhz\n",
+				       se->base.se_chan->ic_ieee, se->base.se_chan->ic_freq);
+				break;
+		}
+	}
+	printk(KERN_ERR "CHINUSE_END\n");
+	return 0;
+}
+
+/*
+ * Getting maximum and minimum dwell time for scanning
+ */
+static void
+get_max_min_dwell(struct ieee80211_scan_state *ss, struct ieee80211_node *ni,
+		int is_passive, int is_obss_scan, int *mindwell, int *maxdwell)
+{
+	if (is_passive) {
+		if (is_obss_scan) {
+			*mindwell = msecs_to_jiffies(ni->ni_obss_ie.obss_passive_dwell);
+			if (*mindwell > ss->ss_maxdwell_passive)
+				*maxdwell = *mindwell;
+			else
+				*maxdwell = ss->ss_maxdwell_passive;
+		} else {
+			*mindwell = ss->ss_mindwell_passive;
+			*maxdwell = ss->ss_maxdwell_passive;
+		}
+	} else {
+		if (is_obss_scan) {
+			*mindwell = msecs_to_jiffies(ni->ni_obss_ie.obss_active_dwell);
+			if (*mindwell > ss->ss_maxdwell)
+				*maxdwell = *mindwell;
+			else
+				*maxdwell = ss->ss_maxdwell;
+		} else {
+			*mindwell = ss->ss_mindwell;
+			*maxdwell = ss->ss_maxdwell;
+		}
+	}
+}
+
+/*
+ * Switch to the next channel marked for scanning.
+ */
+static void
+scan_next(unsigned long arg)
+{
+#define	ISCAN_REP	(ISCAN_MINDWELL | ISCAN_START | ISCAN_DISCARD)
+	struct ieee80211_scan_state *ss = (struct ieee80211_scan_state *) arg;
+	struct ieee80211vap *vap = ss->ss_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct ieee80211_channel *chan;
+	unsigned long maxdwell, scanend;
+	int scanning, scandone;
+
+	/* Make passive channels special */
+	int is_passive;
+	int is_obss = 0;
+	int maxdwell_used;
+	int mindwell_used;
+#ifdef QTN_BG_SCAN
+	int bgscan_dwell = 0;
+#endif /* QTN_BG_SCAN */
+
+	if (ss->ss_flags & IEEE80211_SCAN_OBSS) {
+		if (ni && IEEE80211_AID(ni->ni_associd))
+			is_obss = 1;
+		else
+			return;
+	}
+
+	IEEE80211_LOCK_IRQ(ic);
+	scanning = ((ic->ic_flags & IEEE80211_F_SCAN)
+#ifdef QTN_BG_SCAN
+			|| (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+#endif /* QTN_BG_SCAN */
+		);
+	IEEE80211_UNLOCK_IRQ(ic);
+	if (!scanning)			/* canceled */
+		return;
+
+again:
+	scandone = (ss->ss_next >= ss->ss_last) ||
+		(SCAN_PRIVATE(ss)->ss_iflags & ISCAN_CANCEL) != 0;
+	scanend = SCAN_PRIVATE(ss)->ss_scanend;
+
+	if ((vap->iv_opmode == IEEE80211_M_STA) && (ss->ss_next == 0)) {
+		/*
+		 * Periodically scan using low Rx gain and Tx power in case
+		 * association is failing because the AP is too close.
+		 * More suitable power settings will be determined after association.
+		 */
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				  "%s Starting a scan (Low power %s, count %d)\n",
+				  __func__, txpow_rxgain_state ? "on" : "off", txpow_rxgain_count);
+
+		if ((ic->ic_pwr_adjust_scancnt > 0) && (ss->is_scan_valid) &&
+#ifdef QTN_BG_SCAN
+		     !(ss->ss_flags & IEEE80211_SCAN_QTN_BGSCAN) &&
+#endif /* QTN_BG_SCAN */
+		    (txpow_rxgain_count) && ((txpow_rxgain_count % ic->ic_pwr_adjust_scancnt) == 0)) {
+			ieee80211_pwr_adjust(vap, txpow_rxgain_state);
+			txpow_rxgain_state = !txpow_rxgain_state;
+		}
+		txpow_rxgain_count++;
+#ifdef QTN_BG_SCAN
+		if ((ss->ss_flags & IEEE80211_SCAN_QTN_BGSCAN) &&
+				(SCAN_PRIVATE(ss)->ss_iflags & ISCAN_START)) {
+			ic->ic_bgscan_start(ic);
+		}
+#endif /* QTN_BG_SCAN */
+	}
+
+	/* Work out the previous channel (to use passive vs. active mindwell) */
+	chan = (ss->ss_next) ? ss->ss_chans[ss->ss_next-1] : ss->ss_chans[0];
+	is_passive = (!(ss->ss_flags & IEEE80211_SCAN_ACTIVE) ||
+				(chan->ic_flags & IEEE80211_CHAN_PASSIVE));
+	get_max_min_dwell(ss, ni, is_passive, is_obss, &mindwell_used, &maxdwell_used);
+
+	if (!scandone &&
+	    (ss->ss_flags & IEEE80211_SCAN_GOTPICK) == 0 &&
+	    ((SCAN_PRIVATE(ss)->ss_iflags & ISCAN_START) ||
+	     time_before(jiffies + mindwell_used, scanend))) {
+
+
+		chan = ss->ss_chans[ss->ss_next++];
+		is_passive = (!(ss->ss_flags & IEEE80211_SCAN_ACTIVE) ||
+					(chan->ic_flags & IEEE80211_CHAN_PASSIVE));
+		ic->ic_scanchan = chan;
+
+#ifdef QTN_BG_SCAN
+		if (ss->ss_flags & IEEE80211_SCAN_QTN_BGSCAN) {
+			uint32_t scan_mode = ss->ss_pick_flags & IEEE80211_PICK_BG_MODE_MASK;
+
+			if (!is_passive) {
+				scan_mode = IEEE80211_PICK_BG_ACTIVE;
+			}
+
+			if (chan->ic_ieee == ic->ic_bsschan->ic_ieee) {
+				if (vap->iv_opmode != IEEE80211_M_STA) {
+					scan_mode = IEEE80211_PICK_BG_ACTIVE;
+				} else if (scan_mode & IEEE80211_PICK_BG_ACTIVE) {
+					scan_mode = 0;
+				}
+			}
+
+			if (!scan_mode) {
+				/*
+				 * Auto passive mode selection:
+				 * 1) if FAT is larger than the threshold for fast mode
+				 *  which is 60% by default, will pick passive fast mode
+				 * 2) else if FAT is larger than the threshold for normal mode
+				 * which is 30% by default, will pick passive normal mode
+				 * 3) else pick passive slow mode.
+				 */
+				if (ic->ic_scs.scs_cca_idle_smthed >=
+						ic->ic_qtn_bgscan.thrshld_fat_passive_fast) {
+					scan_mode = IEEE80211_PICK_BG_PASSIVE_FAST;
+				} else if (ic->ic_scs.scs_cca_idle_smthed >=
+						ic->ic_qtn_bgscan.thrshld_fat_passive_normal) {
+					scan_mode = IEEE80211_PICK_BG_PASSIVE_NORMAL;
+				} else {
+					scan_mode = IEEE80211_PICK_BG_PASSIVE_SLOW;
+				}
+			}
+
+			if (scan_mode & IEEE80211_PICK_BG_ACTIVE) {
+				maxdwell_used = msecs_to_jiffies(ic->ic_qtn_bgscan.duration_msecs_active);
+			} else if (scan_mode & IEEE80211_PICK_BG_PASSIVE_FAST) {
+				maxdwell_used = msecs_to_jiffies(ic->ic_qtn_bgscan.duration_msecs_passive_fast);
+			} else if (scan_mode & IEEE80211_PICK_BG_PASSIVE_NORMAL) {
+				maxdwell_used = msecs_to_jiffies(ic->ic_qtn_bgscan.duration_msecs_passive_normal);
+			} else {
+				maxdwell_used = msecs_to_jiffies(ic->ic_qtn_bgscan.duration_msecs_passive_slow);
+			}
+			maxdwell = mindwell_used = maxdwell_used;
+
+			if (scan_mode & IEEE80211_PICK_BG_ACTIVE) {
+				bgscan_dwell = ic->ic_qtn_bgscan.dwell_msecs_passive;
+			} else {
+				bgscan_dwell = ic->ic_qtn_bgscan.dwell_msecs_active;
+			}
+
+			/*
+			 * Workaround: in STA mode, don't send probe request frame
+			 * directly because the probe response frame from other AP
+			 * may mess up the txalert timer
+			 */
+			if (chan->ic_ieee == ic->ic_bsschan->ic_ieee &&
+					vap->iv_opmode != IEEE80211_M_STA) {
+				ieee80211_send_probereq(vap->iv_bss,
+						vap->iv_myaddr, vap->iv_dev->broadcast,
+						vap->iv_dev->broadcast,
+						(u_int8_t *)"", 0,
+						vap->iv_opt_ie, vap->iv_opt_ie_len);
+
+			} else {
+				ic->ic_bgscan_channel(vap, chan, scan_mode, bgscan_dwell);
+			}
+		} else {
+#endif /* QTN_BG_SCAN */
+			/* Reset mindwell and maxdwell as the new channel could be passive */
+			get_max_min_dwell(ss, ni, is_passive, is_obss, &mindwell_used, &maxdwell_used);
+
+			/*
+			 * Watch for truncation due to the scan end time.
+			 */
+			if (time_after(jiffies + maxdwell_used, scanend))
+				maxdwell = scanend - jiffies;
+			else
+				maxdwell = maxdwell_used;
+
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				"%s: chan %3d%c -> %3d%c [%s, dwell min %lu max %lu]\n",
+				__func__,
+				ieee80211_chan2ieee(ic, ic->ic_curchan),
+					channel_type(ic->ic_curchan),
+				ieee80211_chan2ieee(ic, chan), channel_type(chan),
+				(ss->ss_flags & IEEE80211_SCAN_ACTIVE) &&
+					(chan->ic_flags & IEEE80211_CHAN_PASSIVE) == 0 ?
+					"active" : "passive",
+				mindwell_used, maxdwell);
+
+			/*
+			 * Potentially change channel and phy mode.
+			 */
+			/* Channel change done with 20MHz wide channels unless in 40MHz only mode */
+			if (!((ss->ss_flags & IEEE80211_SCAN_BGSCAN) &&
+					chan->ic_ieee == ic->ic_curchan->ic_ieee)) {
+				if (ss->ss_scan_bw == BW_HT20)
+					ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_20;
+				else if ((ss->ss_scan_bw == BW_HT40) || (ic->ic_11n_40_only_mode))
+					ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_40;
+
+				change_channel(ic, chan);
+
+				/* Clear the flag */
+				if (ss->ss_scan_bw == BW_HT20)
+					ic->ic_flags_ext &= ~IEEE80211_FEXT_SCAN_20;
+				else if ((ss->ss_scan_bw == BW_HT40) || (ic->ic_11n_40_only_mode))
+					ic->ic_flags_ext &= ~IEEE80211_FEXT_SCAN_40;
+			}
+#ifdef QTN_BG_SCAN
+		}
+#endif /* QTN_BG_SCAN */
+		/*
+		 * If doing an active scan and the channel is not
+		 * marked passive-only then send a probe request.
+		 * Otherwise just listen for traffic on the channel.
+		 */
+		if ((ss->ss_flags & IEEE80211_SCAN_ACTIVE) &&
+#ifdef QTN_BG_SCAN
+		    /* qtn bgscan sends probe request in MuC */
+		    (ss->ss_flags & IEEE80211_SCAN_QTN_BGSCAN) == 0 &&
+#endif /* QTN_BG_SCAN */
+		    (chan->ic_flags & IEEE80211_CHAN_PASSIVE) == 0) {
+			/*
+			 * Delay sending the probe requests so we are on
+			 * the new channel. Current delay is half of maxdwell
+			 * to make sure it is well within the dwell time,
+			 * this can be fine tuned later if necessary.
+			 */
+			mod_timer(&SCAN_PRIVATE(ss)->ss_probe_timer,
+				  jiffies + (maxdwell / 2));
+		}
+		SCAN_PRIVATE(ss)->ss_chanmindwell = jiffies + mindwell_used;
+		mod_timer(&SCAN_PRIVATE(ss)->ss_scan_timer, jiffies + maxdwell);
+		/* clear mindwell lock and initial channel change flush */
+		SCAN_PRIVATE(ss)->ss_iflags &= ~ISCAN_REP;
+	} else {
+		ic->ic_scan_end(ic);		/* notify driver */
+		/*
+		 * Record scan complete time.  Note that we also do
+		 * this when canceled so any background scan will
+		 * not be restarted for a while.
+		 */
+		if (scandone)
+			ic->ic_lastscan = jiffies;
+
+		/* return to the bss channel */
+		if (ic->ic_bsschan != IEEE80211_CHAN_ANYC) {
+#ifdef QTN_BG_SCAN
+			if (ss->ss_flags & IEEE80211_SCAN_QTN_BGSCAN) {
+				struct ieee80211vap *tmp_vap;
+				/*
+				 * Need to update beacon because beacon may have been
+				 * updated when AP is on off channel, and in this case,
+				 * the channel offset in beacon's retry table setting
+				 * may not be correct.
+				 */
+				TAILQ_FOREACH(tmp_vap, &ic->ic_vaps, iv_next) {
+					if (tmp_vap->iv_opmode != IEEE80211_M_HOSTAP)
+						continue;
+					if (tmp_vap->iv_state != IEEE80211_S_RUN)
+						continue;
+					ic->ic_beacon_update(tmp_vap);
+				}
+			}
+			else
+#endif /* QTN_BG_SCAN */
+				change_channel(ic, ic->ic_bsschan);
+		}
+
+		/* clear internal flags and any indication of a pick */
+		SCAN_PRIVATE(ss)->ss_iflags &= ~ISCAN_REP;
+		ss->ss_flags &= ~IEEE80211_SCAN_GOTPICK;
+
+		if ((SCAN_PRIVATE(ss)->ss_iflags & ISCAN_CANCEL) == 0) {
+			ieee80211_check_type_of_neighborhood(ic);
+#ifdef QSCS_ENABLED
+			ieee80211_scs_update_ranking_table_by_scan(ic);
+			ieee80211_scs_adjust_cca_threshold(ic);
+#endif
+		}
+
+		/*
+		 * If not canceled and scan completed, do post-processing.
+		 * If the callback function returns 0, then it wants to
+		 * continue/restart scanning.  Unfortunately we needed to
+		 * notify the driver to end the scan above to avoid having
+		 * rx frames alter the scan candidate list.
+		 */
+		if ((SCAN_PRIVATE(ss)->ss_iflags & ISCAN_CANCEL) == 0 &&
+		    !ss->ss_ops->scan_end(ss, vap, NULL, 0) &&
+		    (ss->ss_flags & IEEE80211_SCAN_ONCE) == 0 &&
+		    time_before(jiffies + mindwell_used, scanend)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				"%s: done, restart "
+				"[jiffies %lu, dwell min %lu scanend %lu]\n",
+				__func__,
+				jiffies, mindwell_used, scanend);
+			ss->ss_next = 0;	/* reset to beginning */
+			if (ss->ss_flags & IEEE80211_SCAN_ACTIVE)
+				vap->iv_stats.is_scan_active++;
+			else
+				vap->iv_stats.is_scan_passive++;
+
+//			ic->ic_scan_start(ic);	/* notify driver */
+			goto again;
+		} else {
+			/* past here, scandone is ``true'' if not in bg mode */
+			if ((ss->ss_flags & IEEE80211_SCAN_BGSCAN) == 0)
+				scandone = 1;
+
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				"%s: %s, "
+				"[jiffies %lu, dwell min %lu scanend %lu]\n",
+				__func__, scandone ? "done" : "stopped",
+				jiffies, mindwell_used, scanend);
+
+			/* don't care about bgscan case */
+			if ((ic->ic_flags & IEEE80211_F_SCAN)
+#ifdef QTN_BG_SCAN
+				|| (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+#endif /* QTN_BG_SCAN */
+				) {
+				wake_up_interruptible_all(&ic->ic_scan_comp);
+			}
+			/*
+			 * Clear the SCAN bit first in case frames are
+			 * pending on the station power save queue.  If
+			 * we defer this then the dispatch of the frames
+			 * may generate a request to cancel scanning.
+			 */
+			ic->ic_flags &= ~IEEE80211_F_SCAN;
+#ifdef QTN_BG_SCAN
+			ic->ic_flags_qtn &= ~IEEE80211_QTN_BGSCAN;
+#endif /* QTN_BG_SCAN */
+
+#if defined(QBMPS_ENABLE)
+			if ((ic->ic_flags_qtn & IEEE80211_QTN_BMPS) &&
+			    (vap->iv_opmode == IEEE80211_M_STA)) {
+				/* re-enter power-saving if possible */
+		                ic->ic_pm_reason = IEEE80211_PM_LEVEL_SCAN_STOP;
+				ieee80211_pm_queue_work(ic);
+			}
+#endif
+			/*
+			 * Drop out of power save mode when a scan has
+			 * completed.  If this scan was prematurely terminated
+			 * because it is a background scan then don't notify
+			 * the ap; we'll either return to scanning after we
+			 * receive the beacon frame or we'll drop out of power
+			 * save mode because the beacon indicates we have frames
+			 * waiting for us.
+			 */
+			if (scandone) {
+				ieee80211_sta_pwrsave(vap, 0);
+				if ((vap->iv_state == IEEE80211_S_RUN) &&
+				    (vap->iv_opmode == IEEE80211_M_STA)) {
+					ic->ic_setparam(vap->iv_bss, IEEE80211_PARAM_BEACON_ALLOW,
+							0, NULL, 0);
+				}
+
+				if (ss->ss_next >= ss->ss_last) {
+					ieee80211_notify_scan_done(vap);
+					ic->ic_flags_ext &= ~IEEE80211_FEXT_BGSCAN;
+
+					if (IEEE80211_IS_11NG_40(ic) &&
+							(ic->ic_opmode == IEEE80211_M_STA))
+						ieee80211_send_20_40_bss_coex(vap);
+				}
+			}
+
+			if ((ic->ic_flags_qtn & IEEE80211_QTN_PRINT_CH_INUSE) &&
+			    (ic->ic_opmode == IEEE80211_M_HOSTAP))
+				ap_list_asl_table(ss);
+
+			SCAN_PRIVATE(ss)->ss_iflags &= ~ISCAN_CANCEL;
+			ss->ss_flags &= ~(IEEE80211_SCAN_ONCE | IEEE80211_SCAN_PICK1ST);
+
+			if (!ieee80211_chanset_scan_finished(ic))
+				ieee80211_start_chanset_scan(vap, ic->ic_autochan_scan_flags);
+		}
+	}
+#undef ISCAN_REP
+}
+
+/*
+ * Timer handler to send probe requests after a delay when doing an active
+ * scan. This is done to allow time for the channel change to complete first.
+ */
+static void
+send_probes(unsigned long arg)
+{
+	struct ieee80211_scan_state *ss = (struct ieee80211_scan_state *) arg;
+	struct ieee80211vap *vap = ss->ss_vap;
+	struct net_device *dev = vap->iv_dev;
+	int i;
+
+	/*
+	 * Send a broadcast probe request followed by
+	 * any specified directed probe requests.
+	 * XXX suppress broadcast probe req?
+	 * XXX remove dependence on vap/vap->iv_bss
+	 * XXX move to policy code?
+	 */
+	if (vap->iv_bss) {
+		ieee80211_send_probereq(vap->iv_bss,
+			vap->iv_myaddr, dev->broadcast,
+			dev->broadcast,
+			(u_int8_t *)"", 0,
+			vap->iv_opt_ie, vap->iv_opt_ie_len);
+
+		for (i = 0; i < ss->ss_nssid; i++)
+			ieee80211_send_probereq(vap->iv_bss,
+				vap->iv_myaddr, dev->broadcast,
+				dev->broadcast,
+				ss->ss_ssid[i].ssid,
+				ss->ss_ssid[i].len,
+				vap->iv_opt_ie, vap->iv_opt_ie_len);
+	}
+}
+
+#ifdef IEEE80211_DEBUG
+static void
+dump_probe_beacon(u_int8_t subtype,
+	const u_int8_t mac[IEEE80211_ADDR_LEN],
+	const struct ieee80211_scanparams *sp)
+{
+
+	printf("[%s] %02x ", ether_sprintf(mac), subtype);
+	if (sp) {
+		printf("on chan %u (bss chan %u) ", sp->chan, sp->bchan);
+		ieee80211_print_essid(sp->ssid + 2, sp->ssid[1]);
+	}
+	printf("\n");
+
+	if (sp) {
+		printf("[%s] caps 0x%x bintval %u erp 0x%x", 
+			ether_sprintf(mac), sp->capinfo, sp->bintval, sp->erp);
+		if (sp->country != NULL) {
+#ifdef __FreeBSD__
+			printf(" country info %*D",
+				sp->country[1], sp->country + 2, " ");
+#else
+			int i;
+			printf(" country info");
+			for (i = 0; i < sp->country[1]; i++)
+				printf(" %02x", sp->country[i + 2]);
+#endif
+		}
+		printf("\n");
+	}
+}
+#endif /* IEEE80211_DEBUG */
+
+/*
+ * Process a beacon or probe response frame.
+ */
+void
+ieee80211_add_scan(struct ieee80211vap *vap,
+	const struct ieee80211_scanparams *sp,
+	const struct ieee80211_frame *wh,
+	int subtype, int rssi, int rstamp)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	/*
+	 * Frames received during startup are discarded to avoid
+	 * using scan state setup on the initial entry to the timer
+	 * callback.  This can occur because the device may enable
+	 * rx prior to our doing the initial channel change in the
+	 * timer routine (we defer the channel change to the timer
+	 * code to simplify locking on linux).
+	 */
+
+	if (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_DISCARD)
+		return;
+#ifdef IEEE80211_DEBUG
+	if (ieee80211_msg_scan(vap) && (ic->ic_flags & IEEE80211_F_SCAN) && sp)
+		dump_probe_beacon(subtype, wh->i_addr2, sp);
+#endif
+	if ((ic->ic_opmode == IEEE80211_M_STA) &&
+	    (ic->ic_flags & IEEE80211_F_SCAN) &&
+#ifdef QTN_BG_SCAN
+	    /* For qtn bgscan, probereq is sent by MuC */
+	    ((ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) == 0) &&
+#endif /* QTN_BG_SCAN */
+	    (subtype == IEEE80211_FC0_SUBTYPE_BEACON) &&
+	    (ic->ic_curchan->ic_flags & IEEE80211_CHAN_DFS) &&
+	    (ic->ic_curchan->ic_flags & IEEE80211_CHAN_PASSIVE)) {
+		/* Beacon received on a DFS channel, OK to send probe */
+#ifdef IEEE80211_DEBUG
+		if (ieee80211_msg_scan(vap))
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				"%s: sending a probe req on DFS channel %3d%c\n",
+				__func__,
+				ieee80211_chan2ieee(ic, ic->ic_curchan),
+				channel_type(ic->ic_curchan));
+#endif
+		if (ic->sta_dfs_info.sta_dfs_strict_mode) {
+			if ((ss->ss_ops != NULL) &&
+					ieee80211_is_chan_cac_required(ic->ic_curchan)) {
+				ss->ss_ops->scan_add(ss, sp, wh, subtype, rssi, rstamp);
+			}
+		}
+		send_probes((unsigned long)ss);
+	} else if (ss->ss_ops != NULL &&
+	    ss->ss_ops->scan_add(ss, sp, wh, subtype, rssi, rstamp)) {
+#ifdef QTN_BG_SCAN
+		if (ic->ic_qtn_bgscan.debug_flags >= 4) {
+			u_int8_t *mac = (u_int8_t *)wh->i_addr2;
+			u_int8_t ssid[IEEE80211_NWID_LEN+1] = {0};
+			if (sp->ssid[1] && sp->ssid[1] <= IEEE80211_NWID_LEN) {
+				memcpy(ssid, sp->ssid + 2, sp->ssid[1]);
+			}
+			printk("==> Add scan entry -- chan: %u,"
+					" mac: %02x:%02x:%02x:%02x:%02x:%02x,"
+					" ssid: %s <==\n",
+					sp->chan,
+					mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+					(char *)ssid);
+		}
+#endif /*QTN_BG_SCAN */
+
+		/*
+		 * If we've reached the min dwell time terminate
+		 * the timer so we'll switch to the next channel.
+		 */
+		if ((SCAN_PRIVATE(ss)->ss_iflags & ISCAN_MINDWELL) == 0 &&
+#ifdef QTN_BG_SCAN
+		    ((ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) == 0) &&
+#endif /* QTN_BG_SCAN */
+		    time_after_eq(jiffies, SCAN_PRIVATE(ss)->ss_chanmindwell)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				"%s: chan %3d%c min dwell met (%lu > %lu)\n",
+				__func__,
+				ieee80211_chan2ieee(ic, ic->ic_curchan),
+					channel_type(ic->ic_curchan),
+				jiffies, SCAN_PRIVATE(ss)->ss_chanmindwell);
+			/*
+			 * XXX
+			 * We want to just kick the timer and still
+			 * process frames until it fires but linux
+			 * will livelock unless we discard frames.
+			 */
+#if 0
+			SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_MINDWELL;
+#else
+			SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_DISCARD;
+#endif
+			/* NB: trigger at next clock tick */
+			mod_timer(&SCAN_PRIVATE(ss)->ss_scan_timer, jiffies);
+		}
+	}
+}
+EXPORT_SYMBOL(ieee80211_add_scan);
+
+/*
+ * Remove a particular scan entry
+ */
+void
+ieee80211_scan_remove(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	if (ss->ss_ops != NULL) {
+		ss->ss_ops->scan_remove(ss, ni);
+	}
+}
+
+/*
+ * Timeout/age scan cache entries; called from sta timeout
+ * timer (XXX should be self-contained).
+ */
+void
+_ieee80211_scan_timeout(struct ieee80211com *ic)
+{
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	if (ss->ss_ops != NULL)
+		ss->ss_ops->scan_age(ss);
+}
+
+void ieee80211_scan_timeout(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *) arg;
+
+	if (ic != NULL) {
+		_ieee80211_scan_timeout(ic);
+		ic->ic_scan_results_expire.expires = jiffies + ic->ic_scan_results_check * HZ;
+		add_timer(&ic->ic_scan_results_expire);
+	}
+}
+
+/*
+ * Mark a scan cache entry after a successful associate.
+ */
+void
+ieee80211_scan_assoc_success(struct ieee80211com *ic, const u_int8_t mac[])
+{
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	if (ss->ss_ops != NULL) {
+		IEEE80211_NOTE_MAC(ss->ss_vap, IEEE80211_MSG_SCAN,
+			mac, "%s",  __func__);
+		ss->ss_ops->scan_assoc_success(ss, mac);
+	}
+}
+
+/*
+ * Demerit a scan cache entry after failing to associate.
+ */
+void
+ieee80211_scan_assoc_fail(struct ieee80211com *ic,
+	const u_int8_t mac[], int reason)
+{
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	if (ss->ss_ops != NULL) {
+		IEEE80211_NOTE_MAC(ss->ss_vap, IEEE80211_MSG_SCAN, mac,
+			"%s: reason %u", __func__, reason);
+		ss->ss_ops->scan_assoc_fail(ss, mac, reason);
+	}
+}
+
+/*
+ * Iterate over the contents of the scan cache.
+ */
+int
+ieee80211_scan_iterate(struct ieee80211com *ic,
+	ieee80211_scan_iter_func *f, void *arg)
+{
+  int res = 0;
+  struct ieee80211_scan_state *ss = ic->ic_scan;
+	
+  if (ss->ss_ops != NULL) {
+    res = ss->ss_ops->scan_iterate(ss, f, arg);
+  }
+  return res;
+}
+
+static void
+scan_saveie(u_int8_t **iep, const u_int8_t *ie)
+{
+	if (ie == NULL) {
+		if (*iep) {
+			FREE(*iep, M_DEVBUF);
+		}
+		*iep = NULL;
+	} else {
+		ieee80211_saveie(iep, ie);
+	}
+}
+
+void
+ieee80211_add_scan_entry(struct ieee80211_scan_entry *ise,
+			const struct ieee80211_scanparams *sp,
+			const struct ieee80211_frame *wh,
+			int subtype, int rssi, int rstamp)
+{
+	if (sp->ssid[1] != 0 &&
+	    (ISPROBE(subtype) || ise->se_ssid[1] == 0)) {
+		memcpy(ise->se_ssid, sp->ssid, 2 + sp->ssid[1]);
+	}
+	memcpy(ise->se_rates, sp->rates,
+			2 + IEEE80211_SANITISE_RATESIZE(sp->rates[1]));
+	if (sp->xrates != NULL) {
+		memcpy(ise->se_xrates, sp->xrates,
+				2 + IEEE80211_SANITISE_RATESIZE(sp->xrates[1]));
+	} else {
+		ise->se_xrates[1] = 0;
+	}
+	IEEE80211_ADDR_COPY(ise->se_bssid, wh->i_addr3);
+
+	ise->se_rstamp = rstamp;
+	memcpy(ise->se_tstamp.data, sp->tstamp, sizeof(ise->se_tstamp));
+	ise->se_intval = sp->bintval;
+	ise->se_capinfo = sp->capinfo;
+	ise->se_chan = sp->rxchan;
+	ise->se_fhdwell = sp->fhdwell;
+	ise->se_fhindex = sp->fhindex;
+	ise->se_erp = sp->erp;
+	ise->se_timoff = sp->timoff;
+	if (sp->tim != NULL) {
+		const struct ieee80211_tim_ie *tim =
+		    (const struct ieee80211_tim_ie *) sp->tim;
+		ise->se_dtimperiod = tim->tim_period;
+	}
+	scan_saveie(&ise->se_wme_ie, sp->wme);
+	scan_saveie(&ise->se_wpa_ie, sp->wpa);
+	scan_saveie(&ise->se_rsn_ie, sp->rsn);
+	scan_saveie(&ise->se_wsc_ie, sp->wsc);
+	scan_saveie(&ise->se_ath_ie, sp->ath);
+	scan_saveie(&ise->se_qtn_ie, sp->qtn);
+	if (sp->qtn != NULL) {
+		ise->se_qtn_ie_flags = ((struct ieee80211_ie_qtn *)sp->qtn)->qtn_ie_flags;
+		ise->se_is_qtn_dev = 1;
+	} else {
+		ise->se_qtn_ie_flags = 0;
+		ise->se_is_qtn_dev = 0;
+	}
+	scan_saveie(&ise->se_htcap_ie, sp->htcap);
+	scan_saveie(&ise->se_htinfo_ie, sp->htinfo);
+	scan_saveie(&ise->se_vhtcap_ie, sp->vhtcap);
+	scan_saveie(&ise->se_vhtop_ie, sp->vhtop);
+	scan_saveie(&ise->se_pairing_ie, sp->pairing_ie);
+	scan_saveie(&ise->se_bss_load_ie, sp->bssload);
+
+	ise->se_ext_role = sp->extender_role;
+	scan_saveie(&ise->se_ext_bssid_ie, sp->ext_bssid_ie);
+	ise->local_max_txpwr = sp->local_max_txpwr;
+	scan_saveie(&ise->se_md_ie, sp->mdie);
+}
+EXPORT_SYMBOL(ieee80211_add_scan_entry);
+
+static void
+ieee80211_scan_set_channel_obssflag(struct ieee80211_scan_state *ss, uint8_t ch, int flag)
+{
+	struct ieee80211com *ic = ss->ss_vap->iv_ic;
+	struct ap_state *as = ss->ss_priv;
+	struct ieee80211_channel *chan;
+
+	chan = ieee80211_find_channel_by_ieee(ic, ch);
+	if (chan == NULL)
+		return;
+
+	as->as_obss_chanlayout[ch] |= flag;
+}
+
+int
+ieee80211_scan_check_secondary_channel(struct ieee80211_scan_state *ss,
+			struct ieee80211_scan_entry *ise)
+{
+	int bss_bw = ieee80211_get_max_ap_bw(ise);
+	struct ieee80211vap *vap = ss->ss_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *chan;
+	uint8_t chan_pri;
+	uint8_t chan_sec;
+
+	if (bss_bw <= BW_HT20)
+		return 0;
+
+	ieee80211_find_ht_pri_sec_chan(vap, ise, &chan_pri, &chan_sec);
+	if (chan_pri == 0)
+		return 0;
+
+	ieee80211_scan_set_channel_obssflag(ss, chan_pri, IEEE80211_OBSS_CHAN_PRI20);
+	ieee80211_scan_set_channel_obssflag(ss, chan_sec, IEEE80211_OBSS_CHAN_SEC20);
+
+	if (bss_bw >= BW_HT80) {
+		ieee80211_scan_set_channel_obssflag(ss, chan_pri, IEEE80211_OBSS_CHAN_PRI40);
+		ieee80211_scan_set_channel_obssflag(ss, chan_sec, IEEE80211_OBSS_CHAN_PRI40);
+
+		chan = ieee80211_find_channel_by_ieee(ic, chan_pri);
+		if (chan) {
+			chan_sec = ieee80211_find_sec40u_chan(chan);
+			if (chan_sec != 0)
+				ieee80211_scan_set_channel_obssflag(ss, chan_sec, IEEE80211_OBSS_CHAN_SEC40);
+			chan_sec = ieee80211_find_sec40l_chan(chan);
+			if (chan_sec != 0)
+				ieee80211_scan_set_channel_obssflag(ss, chan_sec, IEEE80211_OBSS_CHAN_SEC40);
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_scan_check_secondary_channel);
+
+static int
+ieee80211_prichan_check_newchan(struct ieee80211_scan_state *ss,
+			struct ieee80211_channel *chan,
+			int32_t *max_bsscnt)
+{
+	struct ap_state *as = ss->ss_priv;
+	struct ieee80211vap *vap = ss->ss_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint32_t ch;
+	int32_t cur_bss;
+
+	if (!chan)
+		return -1;
+
+	ch = ieee80211_chan2ieee(ic, chan);
+	if (!is_channel_valid(ch))
+		return -1;
+
+	if (isset(ic->ic_chan_pri_inactive, ch) || isclr(ic->ic_chan_active, ch))
+		return -1;
+
+	cur_bss = (int32_t)as->as_numbeacons[ch];
+	if (!IEEE80211_IS_OBSS_CHAN_SECONDARY(as->as_obss_chanlayout[ch])) {
+		if (cur_bss > *max_bsscnt) {
+			*max_bsscnt = cur_bss;
+			return 1;
+		}
+		return 0;
+	}
+
+	return -1;
+}
+
+/*
+ * Channel selection methods for a VHT BSS,
+ * as per IEEE Std 802.11ac 10.39.2, IEEE Std 802.11ac 10.5.3.
+ * New BSS's primary channel shall not overlap other BSSs' secondary channels.
+ */
+struct ieee80211_channel *
+ieee80211_scan_switch_pri_chan(struct ieee80211_scan_state *ss,
+			struct ieee80211_channel *chan_pri)
+{
+	struct ieee80211vap *vap = NULL;
+	struct ieee80211com *ic = NULL;
+	int cur_bw = BW_INVALID;
+	struct ieee80211_channel *chan_sec;
+	uint32_t ch_pri = 0;
+	uint32_t ch_sec;
+	int32_t bsscnt = -1;
+
+	if (!chan_pri || !ss || !ss->ss_vap || !ss->ss_vap->iv_ic) {
+		return NULL;
+	}
+
+	vap = ss->ss_vap;
+	ic = vap->iv_ic;
+	cur_bw = ieee80211_get_bw(ic);
+
+	if (cur_bw >= BW_HT20) {
+		ch_pri = chan_pri->ic_ieee;
+		if (ieee80211_prichan_check_newchan(ss, chan_pri, &bsscnt) < 0)
+			ch_pri = 0;
+	}
+
+	if (cur_bw >= BW_HT40) {
+		/* we look up operating class to follow different primary channel layouts, esp. 2.4G */
+		ch_sec = ieee80211_find_sec_chan_by_operating_class(ic,
+					chan_pri->ic_ieee,
+					IEEE80211_OC_BEHAV_CHAN_UPPER);
+		chan_sec = ieee80211_find_channel_by_ieee(ic, ch_sec);
+		if (ieee80211_prichan_check_newchan(ss, chan_sec, &bsscnt) > 0)
+			ch_pri = ch_sec;
+
+		ch_sec = ieee80211_find_sec_chan_by_operating_class(ic,
+					chan_pri->ic_ieee,
+					IEEE80211_OC_BEHAV_CHAN_LOWWER);
+		chan_sec = ieee80211_find_channel_by_ieee(ic, ch_sec);
+		if (ieee80211_prichan_check_newchan(ss, chan_sec, &bsscnt) > 0)
+			ch_pri = ch_sec;
+	}
+
+	if (cur_bw >= BW_HT80) {
+		ch_sec = ieee80211_find_sec40u_chan(chan_pri);
+		chan_sec = ieee80211_find_channel_by_ieee(ic, ch_sec);
+		if (ieee80211_prichan_check_newchan(ss, chan_sec, &bsscnt) > 0)
+			ch_pri = ch_sec;
+
+		ch_sec = ieee80211_find_sec40l_chan(chan_pri);
+		chan_sec = ieee80211_find_channel_by_ieee(ic, ch_sec);
+		if (ieee80211_prichan_check_newchan(ss, chan_sec, &bsscnt) > 0)
+			ch_pri = ch_sec;
+	}
+
+	return ieee80211_find_channel_by_ieee(ic, ch_pri);
+}
+EXPORT_SYMBOL(ieee80211_scan_switch_pri_chan);
+
+struct ieee80211_channel *
+ieee80211_scs_switch_pri_chan(struct ieee80211_scan_state *ss,
+			struct ieee80211_channel *chan_pri)
+{
+	struct ieee80211_channel *chan;
+	struct ap_state *as;
+	struct ap_state *as_bak;
+
+	as = (struct ap_state *)ss->ss_scs_priv;
+	as_bak = ss->ss_priv;
+	ss->ss_priv = as;
+
+	chan = ieee80211_scan_switch_pri_chan(ss, chan_pri);
+
+	ss->ss_priv = as_bak;
+
+	return chan;
+}
+EXPORT_SYMBOL(ieee80211_scs_switch_pri_chan);
+
+int
+ieee80211_wps_active(uint8_t *wsc_ie)
+{
+#define IEEE80211_WPS_SELECTED_REGISTRAR 0x1041
+	uint16_t type;
+	uint16_t len;
+	uint8_t *pos;
+	uint8_t *end;
+
+	if (!wsc_ie)
+		return 0;
+
+	pos = wsc_ie;
+	end = wsc_ie + wsc_ie[1];
+
+	pos += (2 + 4);
+	while (pos < end) {
+		if (end - pos < 4)
+			break;
+
+		type = be16toh(*(__be16 *)pos);
+		pos += 2;
+		len = be16toh(*(__be16 *)pos);
+		pos += 2;
+
+		if (len > end - pos)
+			break;
+
+		if ((type == IEEE80211_WPS_SELECTED_REGISTRAR) && (len == 1))
+			return 1;
+
+		pos += len;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_wps_active);
+
+void
+ieee80211_dump_scan_res(struct ieee80211_scan_state *ss)
+{
+#define IEEE80211_BSS_CAPA_STR_LEN 30
+	struct ieee80211vap *vap;
+	struct sta_table *st;
+	struct sta_entry *se, *next;
+	struct ieee80211_scan_entry *ise;
+	char ssid[IEEE80211_NWID_LEN + 1];
+	char bss_capa[IEEE80211_BSS_CAPA_STR_LEN];
+	char *pos;
+	char *end;
+	int len;
+
+	if (!ss)
+		return;
+
+	vap = ss->ss_vap;
+	st = ss->ss_priv;
+	if (!ieee80211_msg(vap, IEEE80211_MSG_SCAN))
+		return;
+
+	printk("%-18s  %-33s  %-7s  %-25s  %-5s\n",
+		"BSSID", "SSID", "Channel", "BSS Capabilities", "RSSI");
+
+	TAILQ_FOREACH_SAFE(se, &st->st_entry, se_list, next) {
+		ise = &se->base;
+		memset(ssid, 0, sizeof(ssid));
+		memcpy(ssid, &ise->se_ssid[2], MIN(sizeof(ssid), ise->se_ssid[1]));
+
+		len = 0;
+		pos = bss_capa;
+		end = bss_capa + IEEE80211_BSS_CAPA_STR_LEN;
+		memset(bss_capa, 0, sizeof(bss_capa));
+
+		if (ise->se_capinfo & IEEE80211_CAPINFO_IBSS) {
+			len = snprintf(pos, end - pos, "IBSS");
+			pos += len;
+		} else if (ise->se_capinfo & IEEE80211_CAPINFO_ESS) {
+			len = snprintf(pos, end - pos, "ESS");
+			pos += len;
+		}
+
+		if (ise->se_wpa_ie) {
+			len = snprintf(pos, end - pos, "|WPA");
+			pos += len;
+		}
+		if (ise->se_rsn_ie) {
+			len = snprintf(pos, end - pos, "|RSN");
+			pos += len;
+		}
+
+		if (ieee80211_wps_active(ise->se_wsc_ie))
+		      snprintf(pos, end - pos, "|WPS_ACTIVE");
+		else if (ise->se_wsc_ie)
+		      snprintf(pos, end - pos, "|WPS");
+
+		printk("%-18pM  %-33s  %-7u  %-25s  %-5d\n",
+			ise->se_bssid,
+			ssid,
+			ise->se_chan->ic_ieee,
+			bss_capa,
+			ise->se_rssi);
+	}
+}
+EXPORT_SYMBOL(ieee80211_dump_scan_res);
+
+/*
+ * Flush the contents of the scan cache.
+ */
+void
+ieee80211_scan_flush(struct ieee80211com *ic)
+{
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	if (ss->ss_ops != NULL) {
+		IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+			"%s\n",  __func__);
+		ss->ss_ops->scan_flush(ss);
+	}
+}
+EXPORT_SYMBOL(ieee80211_scan_flush);
+
+/*
+ * Refresh scan module channel list
+ * In cases where ieee80211_scan_pickchannel is called
+ * without initiating proper scan from ap scan module,
+ * the channel list can be out of sync between QDRV and scan_ap modules
+ */
+void ieee80211_scan_refresh_scan_module_chan_list(struct ieee80211com *ic, struct ieee80211vap *vap)
+{
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	IEEE80211_LOCK_ASSERT(ic);
+
+	if (ss == NULL || ss->ss_ops == NULL || ss->ss_vap == NULL) {
+		printk(KERN_WARNING "scan state structure not attached or not initialized\n");
+		return;
+	}
+	if (ss->ss_ops->scan_start == NULL) {
+		IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+		    "%s: scan module does not scan start, "
+		    "opmode %s\n", __func__, ss->ss_vap->iv_opmode);
+		return;
+	}
+
+	ss->ss_ops->scan_start(ss, vap);
+}
+EXPORT_SYMBOL(ieee80211_scan_refresh_scan_module_chan_list);
+
+/*
+ * Check the scan cache for an ap/channel to use
+ */
+struct ieee80211_channel *
+ieee80211_scan_pickchannel(struct ieee80211com *ic, int flags)
+{
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+
+	IEEE80211_LOCK_ASSERT(ic);
+
+	if (ss == NULL || ss->ss_ops == NULL || ss->ss_vap == NULL) {
+		printk(KERN_WARNING "scan state structure not attached or not initialized\n");
+		return NULL;
+	}
+	if (ss->ss_ops->scan_pickchan == NULL) {
+		IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+		    "%s: scan module does not support picking a channel, "
+		    "opmode %s\n", __func__, ss->ss_vap->iv_opmode);
+		return NULL;
+	}
+
+	return ss->ss_ops->scan_pickchan(ic, ss, flags);
+}
+EXPORT_SYMBOL(ieee80211_scan_pickchannel);
+
+int ieee80211_get_type_of_neighborhood(struct ieee80211com *ic)
+{
+	if (ic->ic_neighbor_count < 0)
+		return IEEE80211_NEIGHBORHOOD_TYPE_UNKNOWN;
+	else if (ic->ic_neighbor_count <= ic->ic_neighbor_cnt_sparse)
+		return IEEE80211_NEIGHBORHOOD_TYPE_SPARSE;
+	else if (ic->ic_neighbor_count <= ic->ic_neighbor_cnt_dense)
+		return IEEE80211_NEIGHBORHOOD_TYPE_DENSE;
+	else
+		return IEEE80211_NEIGHBORHOOD_TYPE_VERY_DENSE;
+}
+
+char * ieee80211_neighborhood_type2str(int type)
+{
+	char *str = "Unknown";
+
+	switch (type) {
+	case IEEE80211_NEIGHBORHOOD_TYPE_SPARSE:
+		str = "Sparse";
+		break;
+	case IEEE80211_NEIGHBORHOOD_TYPE_DENSE:
+		str = "Dense";
+		break;
+	case IEEE80211_NEIGHBORHOOD_TYPE_VERY_DENSE:
+		str = "Very dense";
+		break;
+	default:
+		break;
+	}
+
+	return str;
+}
+
+void ieee80211_check_type_of_neighborhood(struct ieee80211com *ic)
+{
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+	struct ap_state *as = ss->ss_priv;
+	struct ap_scan_entry *apse;
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *se;
+	int i;
+
+	ic->ic_neighbor_count = 0;
+
+	if (ss->ss_vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+			TAILQ_FOREACH(apse, &as->as_scan_list[i].asl_head, ase_list) {
+				ic->ic_neighbor_count++;
+			}
+		}
+	} else if (ss->ss_vap->iv_opmode == IEEE80211_M_STA) {
+		TAILQ_FOREACH(se, &st->st_entry, se_list) {
+			ic->ic_neighbor_count++;
+		}
+	}
+	IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+			"%s: found %d neighborhood APs\n", __func__, ic->ic_neighbor_count);
+}
+
diff --git a/drivers/qtn/wlan/ieee80211_scan_ap.c b/drivers/qtn/wlan/ieee80211_scan_ap.c
new file mode 100644
index 0000000..9939a0c
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_scan_ap.c
@@ -0,0 +1,1309 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_scan_ap.c 1721 2006-09-20 08:45:13Z mentor $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 ap scanning support.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_mlme_statistics.h"
+
+static int ap_flush(struct ieee80211_scan_state *);
+static void action_tasklet(IEEE80211_TQUEUE_ARG);
+
+static int
+lock_ap_list(struct ap_state *as)
+{
+	int bh_disabled = !in_softirq() && !irqs_disabled();
+
+	WARN_ON_ONCE(in_irq());
+
+	spin_lock(&as->asl_lock);
+	if (bh_disabled) {
+		local_bh_disable();
+	}
+	return bh_disabled;
+}
+
+static void
+unlock_ap_list(struct ap_state *as, int bh_disabled)
+{
+	if (bh_disabled) {
+		local_bh_enable();
+	}
+	spin_unlock(&as->asl_lock);
+}
+
+static int
+ap_lock(struct ieee80211_scan_state *ss)
+{
+	struct ap_state *as = ss->ss_priv;
+	return lock_ap_list(as);
+}
+
+static void ap_unlock(struct ieee80211_scan_state *ss, int bh_disabled)
+{
+	struct ap_state *as = ss->ss_priv;
+	unlock_ap_list(as, bh_disabled);
+}
+
+static void
+cleanup_se(struct ap_scan_entry *se)
+{
+	struct ieee80211_scan_entry *ise = &se->base;
+	if (ise->se_wpa_ie) {
+		FREE(ise->se_wpa_ie, M_DEVBUF);
+		ise->se_wpa_ie = NULL;
+	}
+	if (ise->se_rsn_ie) {
+		FREE(ise->se_rsn_ie, M_DEVBUF);
+		ise->se_rsn_ie = NULL;
+	}
+	if (ise->se_wme_ie) {
+		FREE(ise->se_wme_ie, M_DEVBUF);
+		ise->se_wme_ie = NULL;
+	}
+	if (ise->se_wsc_ie) {
+		FREE(ise->se_wsc_ie, M_DEVBUF);
+		ise->se_wsc_ie = NULL;
+	}
+	if (ise->se_htcap_ie) {
+		FREE(ise->se_htcap_ie, M_DEVBUF);
+		ise->se_htcap_ie = NULL;
+	}
+	if (ise->se_htinfo_ie) {
+		FREE(ise->se_htinfo_ie, M_DEVBUF);
+		ise->se_htinfo_ie = NULL;
+	}
+	if (ise->se_vhtcap_ie) {
+		FREE(ise->se_vhtcap_ie, M_DEVBUF);
+		ise->se_vhtcap_ie = NULL;
+	}
+	if (ise->se_vhtop_ie) {
+		FREE(ise->se_vhtop_ie, M_DEVBUF);
+		ise->se_vhtop_ie = NULL;
+	}
+	if (ise->se_ath_ie) {
+		FREE(ise->se_ath_ie, M_DEVBUF);
+		ise->se_ath_ie = NULL;
+	}
+	if (ise->se_qtn_ie)
+	{
+		FREE(ise->se_qtn_ie, M_DEVBUF);
+		ise->se_qtn_ie = NULL;
+	}
+	if (ise->se_ext_bssid_ie) {
+		FREE(ise->se_ext_bssid_ie, M_DEVBUF);
+		ise->se_ext_bssid_ie = NULL;
+	}
+
+}
+
+static void
+free_se(struct ap_scan_entry *se)
+{
+	cleanup_se(se);
+	FREE(se, M_80211_SCAN);
+}
+
+static void
+free_se_request(struct ap_scan_entry *se)
+{
+	if (se->se_inuse) {
+		se->se_request_to_free = 1;
+	} else {
+		free_se(se);
+	}
+}
+
+static void
+free_se_process(struct ap_scan_entry *se)
+{
+	if (!se->se_inuse && se->se_request_to_free) {
+		free_se(se);
+	}
+}
+
+static void
+set_se_inuse(struct ap_scan_entry *se)
+{
+	se->se_inuse = 1;
+}
+
+static void
+reset_se_inuse(struct ap_scan_entry *se)
+{
+	se->se_inuse = 0;
+	free_se_process(se);
+}
+/*
+ * Attach prior to any scanning work.
+ */
+static int
+ap_attach(struct ieee80211_scan_state *ss)
+{
+	struct ap_state *as;
+	int i;
+
+	_MOD_INC_USE(THIS_MODULE, return 0);
+
+	MALLOC(as, struct ap_state *, sizeof(struct ap_state),
+		M_SCANCACHE, M_NOWAIT | M_ZERO);
+	if (as == NULL) {
+		if (printk_ratelimit())
+			printk("failed to attach before scanning\n");
+		return 0;
+	}
+	as->as_age = AP_PURGE_SCANS;
+	ss->ss_priv = as;
+	IEEE80211_INIT_TQUEUE(&as->as_actiontq, action_tasklet, ss);
+	spin_lock_init(&as->asl_lock);
+	for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+		TAILQ_INIT(&as->as_scan_list[i].asl_head);
+	}
+	return 1;
+}
+
+
+static int
+ap_flush_asl_table(struct ieee80211_scan_state *ss)
+{
+	struct ap_state *as = ss->ss_priv;
+	struct ap_scan_entry *se, *next;
+	int i;
+
+	for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+		TAILQ_FOREACH_SAFE(se, &as->as_scan_list[i].asl_head, ase_list, next) {
+			TAILQ_REMOVE(&as->as_scan_list[i].asl_head, se, ase_list);
+			free_se_request(se);
+			if (as->as_entry_num > 0)
+				as->as_entry_num--;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Cleanup any private state.
+ */
+static int
+ap_detach(struct ieee80211_scan_state *ss)
+{
+	struct ap_state *as = ss->ss_priv;
+
+	if (as != NULL) {
+		ap_flush_asl_table(ss);
+		FREE(as, M_SCANCACHE);
+	}
+
+	_MOD_DEC_USE(THIS_MODULE);
+	return 1;
+}
+
+/*
+ * Flush all per-scan state.
+ */
+static int
+ap_flush(struct ieee80211_scan_state *ss)
+{
+	struct ap_state *as = ss->ss_priv;
+	int bh_disabled;
+
+	bh_disabled = lock_ap_list(as);
+	ap_flush_asl_table(ss);
+	unlock_ap_list(as, bh_disabled);
+
+	memset(as->as_maxrssi, 0, sizeof(as->as_maxrssi));
+	memset(as->as_numpkts, 0, sizeof(as->as_numpkts));
+	memset(as->as_aci,     0, sizeof(as->as_aci));
+	memset(as->as_cci,     0, sizeof(as->as_aci));
+	memset(as->as_numbeacons, 0, sizeof(as->as_numbeacons));
+	memset(as->as_chanmetric, 0, sizeof(as->as_chanmetric));
+	memset(as->as_obss_chanlayout, 0, sizeof(as->as_obss_chanlayout));
+	ss->ss_last = 0;		/* ensure no channel will be picked */
+	return 0;
+}
+
+static int
+find11gchannel(struct ieee80211com *ic, int i, int freq)
+{
+	const struct ieee80211_channel *c;
+	int j;
+
+	/*
+	 * The normal ordering in the channel list is b channel
+	 * immediately followed by g so optimize the search for
+	 * this.  We'll still do a full search just in case.
+	 */
+	for (j = i+1; j < ic->ic_nchans; j++) {
+		c = &ic->ic_channels[j];
+		if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c))
+			return 1;
+	}
+	for (j = 0; j < i; j++) {
+		c = &ic->ic_channels[j];
+		if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c))
+			return 1;
+	}
+	return 0;
+}
+
+/*
+ * Start an ap scan by populating the channel list.
+ */
+static int
+ap_start(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *c;
+	int i;
+
+	ss->ss_last = 0;
+
+	if (ic->ic_des_mode == IEEE80211_MODE_AUTO) {
+		for (i = 0; i < ic->ic_nchans; i++) {
+			c = &ic->ic_channels[i];
+			if (c == NULL || isclr(ic->ic_chan_active, c->ic_ieee))
+				continue;
+			if (IEEE80211_IS_CHAN_TURBO(c)) {
+				/* XR is not supported on turbo channels */
+				if (vap->iv_ath_cap & IEEE80211_ATHC_XR)
+					continue;
+				/* dynamic channels are scanned in base mode */
+				if (!IEEE80211_IS_CHAN_ST(c))
+					continue;
+			} else {
+				/*
+				 * Use any 11g channel instead of 11b one.
+				 */
+				if (IEEE80211_IS_CHAN_B(c) &&
+				    find11gchannel(ic, i, c->ic_freq))
+					continue;
+			}
+			if (c->ic_flags & IEEE80211_CHAN_RADAR)
+				continue;
+			if (ss->ss_last >= IEEE80211_SCAN_MAX)
+				break;
+			/* avoid DFS channels if so configured */
+			if ((ss->ss_flags & IEEE80211_SCAN_NO_DFS) && (c->ic_flags & IEEE80211_CHAN_DFS))
+				continue;
+			ss->ss_chans[ss->ss_last++] = c;
+		}
+	} else {
+		u_int modeflags;
+
+		modeflags = ieee80211_get_chanflags(ic->ic_des_mode);
+		if (vap->iv_ath_cap & IEEE80211_ATHC_TURBOP && modeflags != IEEE80211_CHAN_ST) {
+			if (ic->ic_des_mode == IEEE80211_MODE_11G)
+				modeflags = IEEE80211_CHAN_108G;
+			else
+				modeflags = IEEE80211_CHAN_108A;
+		}
+		for (i = 0; i < ic->ic_nchans; i++) {
+			c = &ic->ic_channels[i];
+			if (c == NULL || isclr(ic->ic_chan_active, c->ic_ieee))
+				continue;
+			if ((c->ic_flags & modeflags) != modeflags)
+				continue;
+			/* XR is not supported on turbo channels */
+			if (IEEE80211_IS_CHAN_TURBO(c) && vap->iv_ath_cap & IEEE80211_ATHC_XR)
+				continue;
+			if (ss->ss_last >= IEEE80211_SCAN_MAX)
+				break;
+			/*
+			 * do not select static turbo channels if the mode is not
+			 * static turbo .
+			 */
+			if (IEEE80211_IS_CHAN_STURBO(c) && ic->ic_des_mode != IEEE80211_MODE_MAX)
+				continue;
+			/* No dfs interference detected channels */
+			if (c->ic_flags & IEEE80211_CHAN_RADAR)
+				continue;
+			/* avoid DFS channels if so configured */
+			if ((ss->ss_flags & IEEE80211_SCAN_NO_DFS) && (c->ic_flags & IEEE80211_CHAN_DFS))
+				continue;
+			ss->ss_chans[ss->ss_last++] = c;
+		}
+	}
+	ss->ss_next = 0;
+	/* XXX tunables */
+	ss->ss_mindwell = msecs_to_jiffies(ic->ic_mindwell_active);
+	ss->ss_maxdwell = msecs_to_jiffies(ic->ic_maxdwell_active);
+	ss->ss_maxdwell_passive = msecs_to_jiffies(ic->ic_maxdwell_passive);
+	ss->ss_mindwell_passive = msecs_to_jiffies(ic->ic_mindwell_passive);
+
+#ifdef IEEE80211_DEBUG
+	if (ieee80211_msg_scan(vap)) {
+		printf("%s: scan set ", vap->iv_dev->name);
+		ieee80211_scan_dump_channels(ss);
+		printf(" dwell min %ld max %ld\n",
+			ss->ss_mindwell, ss->ss_maxdwell);
+	}
+#endif /* IEEE80211_DEBUG */
+
+	return 0;
+}
+
+/*
+ * Restart a bg scan.
+ */
+static int
+ap_restart(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+	return 0;
+}
+
+/*
+ * Cancel an ongoing scan.
+ */
+static int
+ap_cancel(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+	struct ap_state *as = ss->ss_priv;
+
+	IEEE80211_CANCEL_TQUEUE(&as->as_actiontq);
+	return 0;
+}
+
+static int
+ap_add(struct ieee80211_scan_state *ss, const struct ieee80211_scanparams *sp,
+	const struct ieee80211_frame *wh, int subtype, int rssi, int rstamp)
+{
+	struct ap_state *as = ss->ss_priv;
+	struct ieee80211vap *vap = ss->ss_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ap_scan_entry *se;
+	struct ieee80211_scan_entry *ise;
+	const u_int8_t *macaddr = wh->i_addr2;
+	int bh_disabled;
+	int chan;
+	int found = 0;
+
+	if (is_channel_valid(sp->chan)) {
+		chan = sp->chan;
+	} else {
+		chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
+		if (!is_channel_valid(chan))
+			return 1;
+	}
+
+	/* XXX better quantification of channel use? */
+	/* XXX count bss's? */
+	/* Now we Only count beacons from different bss for better quantification of channel use */
+
+	if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) {
+		if (rssi > as->as_maxrssi[chan])
+			as->as_maxrssi[chan] = rssi;
+	}
+
+	as->as_numpkts[chan]++;
+
+	bh_disabled = lock_ap_list(as);
+	TAILQ_FOREACH(se, &as->as_scan_list[chan].asl_head, ase_list) {
+		if (IEEE80211_ADDR_EQ(se->base.se_macaddr, macaddr)) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found) {
+		if (as->as_entry_num >= ic->ic_scan_tbl_len_max) {
+			if (printk_ratelimit())
+			      printk("scan found %u scan results but the list is"
+					" restricted to %u entries\n", as->as_entry_num,
+					ic->ic_scan_tbl_len_max);
+			unlock_ap_list(as, bh_disabled);
+			return 0;
+		}
+
+		MALLOC(se, struct ap_scan_entry *, sizeof(*se), M_80211_SCAN, M_NOWAIT | M_ZERO);
+		if (se == NULL) {
+			if (printk_ratelimit())
+				printk("failed to allocate new scan entry\n");
+			unlock_ap_list(as, bh_disabled);
+			return 0;
+		}
+		as->as_entry_num++;
+
+		IEEE80211_ADDR_COPY(se->base.se_macaddr, macaddr);
+		TAILQ_INSERT_TAIL(&as->as_scan_list[chan].asl_head, se, ase_list);
+
+		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) {
+			as->as_numbeacons[chan]++;
+		}
+	}
+	ise = &se->base;
+
+	ieee80211_add_scan_entry(ise, sp, wh, subtype, rssi, rstamp);
+	ieee80211_scan_check_secondary_channel(ss, ise);
+
+	if (se->se_lastupdate == 0) {		/* first sample */
+		se->se_avgrssi = RSSI_IN(rssi);
+	} else {				/* avg with previous samples */
+		RSSI_LPF(se->se_avgrssi, rssi);
+	}
+	ise->se_rssi = RSSI_GET(se->se_avgrssi);
+
+	unlock_ap_list(as, bh_disabled);
+	se->se_lastupdate = jiffies;		/* update time */
+	se->se_seen = 1;
+	se->se_notseen = 0;
+
+	return 1;
+}
+
+enum chan_sel_algorithm {
+	CHAN_SEL_CLEAREST = 0,		/* Select the clearest channel */
+	CHAN_SEL_DFS_REENTRY = 1,	/* Select the channel based on DFS entry/re-entry requirement */
+	CHAN_SEL_MAX = 2
+};
+
+
+typedef struct
+{
+	int tx_power_factor;		/*Tx power weighting factor*/
+	int aci_factor;			/*ACI weighting factor*/
+	int cci_factor;			/*CCI weighting factor*/
+	int dfs_factor;			/*DFS weighting factor*/
+	int beacon_factor;		/*Beacon number weighting factor */
+} decision_metric_factor;
+
+/*
+ * Weighting factor for TX power is 2, because we have to multiply the CCI factor by 2
+ * to prevent losing precision when deriving the ACI, as the ACI is 1/2 of the CCI on
+ * an adjacent channel.
+ */
+static const decision_metric_factor g_dm_factor[CHAN_SEL_MAX] =
+{
+	{2, -1, -1, 0, -1},	/* CHAN_SEL_CLEAREST */
+	{2, -1, -1, 8, -1}	/* CHAN_SEL_DFS_REENTRY */
+};
+
+#define QTN_CHAN_METRIC_BASE		160	/* to make sure the channel metric not to be negative */
+#define QTN_METRIC_CCI_LIMIT		16
+#define QTN_METRIC_BEACON_LIMIT		4
+#define QTN_AS_CCA_INTF_DIVIDER		(IEEE80211_SCS_CCA_INTF_SCALE / QTN_METRIC_CCI_LIMIT)
+
+/* Some custom knobs for out ap scan alg */
+#define QTN_APSCAN_DFS_BIAS 10  /* Positive bias added for DFS channels */
+#define QTN_APSCAN_METSHIFT 16  /* Precision of the metric. 16.16 format*/
+
+#define QTN_APSCAN_TXPOWER_RANDOM_LIMIT    4
+
+enum ieee802111_scan_skipchan_reason {
+	IEEE80211_SCAN_SKIPCHAN_REASON_INVALID		= 1,
+	IEEE80211_SCAN_SKIPCHAN_REASON_DFS		= 2,
+	IEEE80211_SCAN_SKIPCHAN_REASON_AVAIL		= 3,
+	IEEE80211_SCAN_SKIPCHAN_REASON_NONAVAIL		= 4,
+	IEEE80211_SCAN_SKIPCHAN_REASON_RADAR		= 5,
+	IEEE80211_SCAN_SKIPCHAN_REASON_TURBO		= 6,
+	IEEE80211_SCAN_SKIPCHAN_REASON_PURE20		= 7,
+	IEEE80211_SCAN_SKIPCHAN_REASON_MISMATCH_NOTDFS	= 8,
+	IEEE80211_SCAN_SKIPCHAN_REASON_MISMATCH_DFS	= 9,
+	IEEE80211_SCAN_SKIPCHAN_REASON_MISMATCH_BW	= 10,
+	IEEE80211_SCAN_SKIPCHAN_REASON_METRIC_BETTER	= 11,
+	IEEE80211_SCAN_SKIPCHAN_REASON_PRI_INACTIVE	= 12,
+	IEEE80211_SCAN_SKIPCHAN_REASON_WEATHER_CHAN	= 13,
+	IEEE80211_SCAN_SKIPCHAN_REASON_NON_DFS		= 14,
+};
+
+static void local_ap_pick_channel_debug(struct ieee80211_scan_state *ss, int chan, int skip_reason)
+{
+	struct ap_state *as = ss->ss_priv;
+
+	if (skip_reason == IEEE80211_SCAN_SKIPCHAN_REASON_INVALID)
+		return;
+
+	IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+			"ap_pick_channel: channel %3u rssi %2d numbss %2d numpkts %2d metric %3d.%-5d (%8d) reason %2d\n",
+			chan, as->as_maxrssi[chan], as->as_numbeacons[chan], as->as_numpkts[chan],
+			as->as_chanmetric[chan] >> QTN_APSCAN_METSHIFT,
+			as->as_chanmetric[chan] & ((1<<QTN_APSCAN_METSHIFT)-1), as->as_chanmetric[chan],
+			skip_reason);
+}
+
+static int local_ap_metric_compare_by_chan(struct ieee80211com *ic, int32_t chan1, int32_t chan2)
+{
+	struct ap_state *as = ic->ic_scan->ss_priv;
+
+	if (as->as_chanmetric[chan1] > as->as_chanmetric[chan2])
+		return 1;
+	else if (as->as_chanmetric[chan1] == as->as_chanmetric[chan2])
+		return 0;
+
+	return -1;
+}
+
+/*
+ * Pick a quiet channel to use for ap operation.
+ *
+ * (i) When ap_pick_channel is being called when channel=0, max_boot_cac=0
+ *     ap_pick_channel picks from set of non-DFS channels only
+ *
+ * (ii) When ap_pick_channel is being called when channel=non-DFS, max_boot_cac=0
+ *      DUT becomes operational on channel=non-DFS
+ *
+ * (iii) When ap_pick_channel is being called when channel=DFS, max_boot_cac=0
+ *      DUT performs CAC on channel=DFS, start operation after CAC completes
+ *
+ * (iv) When ap_pick_channel is being called when channel=0, max_boot_cac=140
+ *      DUT performs ICAC
+ *      clears two channels
+ *      Triggers auto-channel and selects a channel from available cleared channel list
+ *
+ * (v) when ap_pick_channel is being called when channel=non-DFS, max_boot_cac=140
+ *     DUT performs ICAC
+ *     clears two channels
+ *     Starts operation on channel=non-DFS
+ *
+ * (vi) When ap_pick_channel is being called when channel=DFS, max_boot_cac=140
+ *      DUT performs CAC on channel=DFS
+ *      DUT clears one more DFS channel
+ *      Starts operation on channel=DFS
+ */
+static struct ieee80211_channel *
+ap_pick_channel(struct ieee80211com *ic, struct ieee80211_scan_state *ss, int flags)
+{
+	struct ap_state *as = ss->ss_priv;
+	decision_metric_factor dm_factor;
+	int i, chan=0, chan2, bestmetricchan = -1, bestchanix = -1;
+	struct ieee80211_channel *bestchan = NULL;
+	struct ieee80211_channel *ic_best_chan = NULL;
+	struct ieee80211_channel *fs1_bestchan = NULL;
+	struct ieee80211_channel *fs1_secbestchan = NULL;
+	struct ieee80211_channel *fs2_bestchan = NULL;
+	struct ieee80211_channel *fs2_secbestchan = NULL;
+	char rndbuf[2];
+	int txpower_random;
+	int cur_bw;
+	int pri_inactive;
+	int skip_reason = 0;
+
+	if (IS_IEEE80211_24G_BAND(ic)) {
+		bestchan = ieee80211_chanset_pick_channel(ss->ss_vap);
+		goto end;
+	}
+
+	/*
+	 * Convert CCA interference to CCI factor
+	 */
+	for (i = 0; i < ss->ss_last; i++) {
+		chan = ieee80211_chan2ieee(ic, ss->ss_chans[i]);
+		if (!is_channel_valid(chan))
+			continue;
+
+		if (as->as_cca_intf[chan] <= IEEE80211_SCS_CCA_INTF_SCALE) {
+			as->as_cci[chan] = 2 * as->as_cca_intf[chan] / QTN_AS_CCA_INTF_DIVIDER;
+			as->as_cci[chan] = MIN(as->as_cci[chan], QTN_METRIC_CCI_LIMIT);
+		} else {
+			as->as_cci[chan] = 0;
+		}
+
+		/* Reset ACI here */
+		as->as_aci[chan] = 0;
+	}
+
+	/*
+	 * Derive ACI (Adjacent Channel Interference) from CCI.
+	 */
+	for (i = 0; i < ss->ss_last; i++) {
+		chan = ieee80211_chan2ieee(ic, ss->ss_chans[i]);
+		if (!is_channel_valid(chan))
+			continue;
+
+		/* Adjust adjacent channel metrics to bias against close selection */
+		if (i != 0) {
+			chan2 = ieee80211_chan2ieee(ic, ss->ss_chans[i-1]);
+			if (!is_channel_valid(chan2))
+				continue;
+			if (chan2 >= (chan - 4)){
+				as->as_aci[chan2] += (as->as_cci[chan] >> 1);
+			}
+		}
+
+		if (i != ss->ss_last - 1) {
+			chan2 = ieee80211_chan2ieee(ic, ss->ss_chans[i+1]);
+			if (!is_channel_valid(chan2))
+				continue;
+			if (chan2 <= (chan + 4)){
+				as->as_aci[chan2] += (as->as_cci[chan] >> 1);
+			}
+		}
+	}
+
+	/* DFS entry enabled by default */
+	memcpy(&dm_factor, &g_dm_factor[CHAN_SEL_DFS_REENTRY], sizeof(dm_factor));
+	if (ic->ic_dm_factor.flags) {
+		if (ic->ic_dm_factor.flags & DM_FLAG_TXPOWER_FACTOR_PRESENT) {
+			dm_factor.tx_power_factor = ic->ic_dm_factor.txpower_factor;
+		}
+		if (ic->ic_dm_factor.flags & DM_FLAG_ACI_FACTOR_PRESENT) {
+			dm_factor.aci_factor = ic->ic_dm_factor.aci_factor;
+		}
+		if (ic->ic_dm_factor.flags & DM_FLAG_CCI_FACTOR_PRESENT) {
+			dm_factor.cci_factor = ic->ic_dm_factor.cci_factor;
+		}
+		if (ic->ic_dm_factor.flags & DM_FLAG_DFS_FACTOR_PRESENT) {
+			dm_factor.dfs_factor = ic->ic_dm_factor.dfs_factor;
+		}
+		if (ic->ic_dm_factor.flags & DM_FLAG_BEACON_FACTOR_PRESENT) {
+			dm_factor.beacon_factor = ic->ic_dm_factor.beacon_factor;
+		}
+	}
+
+	/*
+	 * Compute Channel Metric (Decision Metric) based on Hossein D's formula.
+	 */
+	for (i = 0; i < ss->ss_last; i++) {
+		struct ieee80211_channel *c = ss->ss_chans[i];
+
+		chan = ieee80211_chan2ieee(ic, ss->ss_chans[i]);
+		if (!is_channel_valid(chan))
+			continue;
+
+		/* Add noise to txpower to improve random selection within channels with small txpower difference */
+		get_random_bytes(rndbuf, 1);
+		txpower_random = rndbuf[0] / (0xFF / (QTN_APSCAN_TXPOWER_RANDOM_LIMIT + 1));
+		as->as_chanmetric[chan] = QTN_CHAN_METRIC_BASE
+			+ dm_factor.tx_power_factor * (c->ic_maxpower + txpower_random)
+			+ dm_factor.cci_factor * as->as_cci[chan]
+			+ dm_factor.aci_factor * as->as_aci[chan]
+			+ dm_factor.dfs_factor * ((c->ic_flags & IEEE80211_CHAN_DFS) ? 1 : 0)
+			+ dm_factor.beacon_factor * MIN(as->as_numbeacons[chan], QTN_METRIC_BEACON_LIMIT);
+
+		/* Add a little noise */
+		get_random_bytes(rndbuf, sizeof(rndbuf));
+		as->as_chanmetric[chan] <<= QTN_APSCAN_METSHIFT;
+		as->as_chanmetric[chan] += (rndbuf[0] << 8) | rndbuf[1];
+	}
+
+	cur_bw = ieee80211_get_bw(ic);
+
+	/* NB: use scan list order to preserve channel preference */
+	for (i = 0; i < ss->ss_last; local_ap_pick_channel_debug(ss, chan, skip_reason), i++) {
+		ic_best_chan = ss->ss_chans[i];
+		chan = ieee80211_chan2ieee(ic, ic_best_chan);
+		if (!is_channel_valid(chan)) {
+			skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_INVALID;
+			continue;
+		}
+
+		if (ic_best_chan == NULL) {
+			skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_INVALID;
+			continue;
+		}
+
+		/* Don't bypass the check of current channel in ic_check_channel */
+		if (flags == IEEE80211_SCAN_PICK_NOT_AVAILABLE_DFS_ONLY ||
+				flags == IEEE80211_SCAN_PICK_AVAILABLE_ANY_CHANNEL) {
+			ic->ic_chan_is_set = 0;
+		}
+
+		if (!ic->ic_check_channel(ic, ic_best_chan, 0, 0)) {
+			skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_RADAR;
+			continue;
+		}
+
+		if ((flags & IEEE80211_SCAN_NO_DFS)
+				&& (ic_best_chan->ic_flags & IEEE80211_CHAN_DFS)) {
+			skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_DFS;
+			continue;
+		}
+
+		if ((flags == IEEE80211_SCAN_PICK_ANY_DFS) &&
+				(!(ic_best_chan->ic_flags & IEEE80211_CHAN_DFS))) {
+			skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_NON_DFS;
+			continue;
+		}
+
+		/* IEEE80211_SCAN_PICK_NOT_AVAILABLE_DFS_ONLY is set only during ICAC */
+		if (flags == IEEE80211_SCAN_PICK_NOT_AVAILABLE_DFS_ONLY) {
+			if (ic->ic_dfs_chans_available_for_cac(ic, ic_best_chan) == false) {
+				skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_AVAIL;
+				continue;
+			}
+		}
+
+		/* IEEE80211_SCAN_PICK_AVAILABLE_ANY_CHANNEL is set only after ICAC */
+		/* When channel=0, max_cac_boot=140
+		 * perform cac on two of not yet available DFS channels
+		 * Mark the DFS channels as available after CAC complettion
+		 * At the end of initial CAC, choose the best available channel
+		 * from initial metric;
+		 */
+		if (flags == IEEE80211_SCAN_PICK_AVAILABLE_ANY_CHANNEL) {
+			if(!ieee80211_is_chan_available(ic_best_chan)) {
+				skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_NONAVAIL;
+				continue;
+			}
+		}
+
+		/*
+		 * If the channel is unoccupied the max rssi
+		 * should be zero; just take it.  Otherwise
+		 * track the channel with the lowest rssi and
+		 * use that when all channels appear occupied.
+		 *
+		 * Check for channel interference, and if found,
+		 * skip the channel.  We assume that all channels
+		 * will be checked so atleast one can be found
+		 * suitable and will change.  IF this changes,
+		 * then we must know when we "have to" change
+		 * channels for radar and move off.
+		 */
+
+		if (flags & IEEE80211_SCAN_KEEPMODE) {
+			if (ic->ic_curchan != NULL) {
+				if ((ic_best_chan->ic_flags & IEEE80211_CHAN_ALLTURBO) !=
+						(ic->ic_curchan->ic_flags & IEEE80211_CHAN_ALLTURBO)) {
+					skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_TURBO;
+					continue;
+				}
+			}
+		}
+
+		if (ic->ic_rf_chipid != CHIPID_DUAL) {
+			/* hzw: temporary disable these checking for RFIC5 */
+			/* FIXME: Temporarily dont select any pure 20 channels */
+			if (!(ic_best_chan->ic_flags & IEEE80211_CHAN_HT40)){
+				skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_PURE20;
+				continue;
+			}
+
+			if (((ss->ss_pick_flags & IEEE80211_PICK_DOMIAN_MASK) == IEEE80211_PICK_DFS) &&
+					!(ic_best_chan->ic_flags & IEEE80211_CHAN_DFS)) {
+				skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_MISMATCH_NOTDFS;
+				continue;
+			} else if (((ss->ss_pick_flags & IEEE80211_PICK_DOMIAN_MASK) == IEEE80211_PICK_NONDFS) &&
+					(ic_best_chan->ic_flags & IEEE80211_CHAN_DFS)) {
+				skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_MISMATCH_DFS;
+				continue;
+			}
+		}
+
+		pri_inactive = isset(ic->ic_chan_pri_inactive, chan) ? 1 : 0;
+		if (cur_bw >= BW_HT40) {
+			if (((cur_bw == BW_HT40) && !(ic_best_chan->ic_flags & IEEE80211_CHAN_HT40)) ||
+					((cur_bw >= BW_HT80) && !(ic_best_chan->ic_flags & IEEE80211_CHAN_VHT80))) {
+				skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_MISMATCH_BW;
+				continue;
+			}
+
+			/* use the worst chanmetric as the metric of this chan set */
+			chan2 = ieee80211_find_sec_chan(ic_best_chan);
+			if (chan2 == 0 || (isclr(ic->ic_chan_pri_inactive, chan2) &&
+					(as->as_chanmetric[chan] > as->as_chanmetric[chan2]))) {
+				skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_METRIC_BETTER;
+				continue;
+			}
+			if (isclr(ic->ic_chan_pri_inactive, chan2)) {
+				pri_inactive = 0;
+			}
+
+			if (cur_bw >= BW_HT80) {
+				chan2 = ieee80211_find_sec40u_chan(ic_best_chan);
+				if (chan2 == 0 || (isclr(ic->ic_chan_pri_inactive, chan2) &&
+						(as->as_chanmetric[chan] > as->as_chanmetric[chan2]))) {
+					skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_METRIC_BETTER;
+					continue;
+				}
+				if (isclr(ic->ic_chan_pri_inactive, chan2)) {
+					pri_inactive = 0;
+				}
+
+				chan2 = ieee80211_find_sec40l_chan(ic_best_chan);
+				if (chan2 == 0 || (isclr(ic->ic_chan_pri_inactive, chan2) &&
+						(as->as_chanmetric[chan] > as->as_chanmetric[chan2]))) {
+					skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_METRIC_BETTER;
+					continue;
+				}
+				if (isclr(ic->ic_chan_pri_inactive, chan2)) {
+					pri_inactive = 0;
+				}
+			}
+		}
+		if (pri_inactive) {
+			/* All the sub channel can't be primary channel */
+			skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_PRI_INACTIVE;
+			continue;
+		}
+
+		if (!ic->ic_weachan_cac_allowed &&
+				ieee80211_is_on_weather_channel(ic, ic_best_chan)) {
+			/*
+			 * Don't pick weather channel in auto channel mode since it need
+			 * too long CAC time, and it also fix the backward compatibility
+			 * issue with the stations which don't support weather channels
+			 */
+			skip_reason = IEEE80211_SCAN_SKIPCHAN_REASON_WEATHER_CHAN;
+			continue;
+		}
+
+		if (isclr(ic->ic_chan_pri_inactive, chan) && ((bestmetricchan == -1) ||
+				(as->as_chanmetric[chan] > as->as_chanmetric[bestmetricchan]))) {
+			bestmetricchan = chan;
+			bestchanix = i;
+		}
+
+		ieee80211_update_alternate_channels(ic,
+					ic_best_chan,
+					&fs2_bestchan,
+					&fs2_secbestchan,
+					local_ap_metric_compare_by_chan);
+
+		ic_best_chan = ieee80211_scan_switch_pri_chan(ss, ic_best_chan);
+		if (bestchan == NULL ||
+				(ic_best_chan &&
+				as->as_chanmetric[ic_best_chan->ic_ieee] > as->as_chanmetric[bestchan->ic_ieee])) {
+			bestchan = ic_best_chan;
+		}
+
+		ieee80211_update_alternate_channels(ic,
+					ic_best_chan,
+					&fs1_bestchan,
+					&fs1_secbestchan,
+					local_ap_metric_compare_by_chan);
+
+		skip_reason = 0;
+	}
+
+	/*
+	 * when ap_pick_channel is being called when channel=dfs, max_boot_cac=140
+	 * dut performs cac on channel=dfs
+	 * dut clears one more dfs channel
+	 * starts operation on channel=dfs
+	 */
+
+	/* IEEE80211_SCAN_PICK_NOT_AVAILABLE_DFS_ONLY is set only during ICAC */
+	if (((flags == IEEE80211_SCAN_PICK_NOT_AVAILABLE_DFS_ONLY) &&
+		(ic->ic_des_chan_after_init_cac)) && (!ic->ic_ignore_init_scan_icac)) {
+		ic_best_chan = ieee80211_find_channel_by_ieee(ic, ic->ic_des_chan_after_init_cac);
+		if (ic_best_chan && (ic->ic_dfs_chans_available_for_cac(ic, ic_best_chan))) {
+			IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+					"%s: original ic_des_chan_after_init_cac channel %d\n",
+					__func__, ic_best_chan->ic_ieee);
+
+			if (isset(ic->ic_chan_pri_inactive, ic_best_chan->ic_ieee)) {
+				IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+					"%s: ic_des_chan_after_init_cac channel %d in inactive primary"
+					" channel list, try to switch another sub-channel\n", __func__,
+					ic_best_chan->ic_ieee);
+				ic_best_chan = ieee80211_scan_switch_pri_chan(ss, ic_best_chan);
+				if (ic_best_chan) {
+					bestchan = ic_best_chan;
+					IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+						"%s: new ic_des_chan_after_init_cac channel %d\n",
+						__func__, ic_best_chan->ic_ieee);
+				} else {
+					IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+						"%s: fail to find new ic_des_chan_after_init_cac channel\n",
+						__func__);
+				}
+			} else {
+				bestchan = ic_best_chan;
+			}
+		}
+	}
+
+	if (!bestchan) {
+		if (bestchanix == -1 && ic->ic_bsschan == IEEE80211_CHAN_ANYC) {
+			bestchanix = 0;
+			IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+					"%s: no suitable channel, go to a default one\n", __func__);
+		}
+		if (bestchanix >= 0)
+			bestchan = ss->ss_chans[bestchanix];
+	}
+
+	if (bestchan)
+		IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN, "%s: bestchan %d bestchan rssi %d\n",
+				__func__, bestchan->ic_ieee, as->as_maxrssi[bestchan->ic_ieee]);
+
+	if (ss->ss_flags & IEEE80211_SCAN_NOPICK)
+		ic_best_chan = ic->ic_bsschan;
+	else if ((ic->ic_des_chan_after_init_scan) && (!ic->ic_ignore_init_scan_icac))
+		ic_best_chan = ieee80211_find_channel_by_ieee(ic, ic->ic_des_chan_after_init_scan);
+	else if ((ic->ic_des_chan_after_init_cac) && (!ic->ic_ignore_init_scan_icac))
+		ic_best_chan = ieee80211_find_channel_by_ieee(ic, ic->ic_des_chan_after_init_cac);
+	else
+		ic_best_chan = bestchan;
+	ieee80211_ap_pick_alternate_channel(ic,
+				ic_best_chan,
+				fs1_bestchan,
+				fs1_secbestchan,
+				fs2_bestchan,
+				fs2_secbestchan);
+
+	IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+		"%s: Fast-switch alternate best channel updated to %d\n",
+		__func__, ic->ic_ieee_best_alt_chan);
+
+end:
+	IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+		"%s: algorithm %s%s, pick in %s%s%s channels\n", __func__,
+		((ss->ss_pick_flags & IEEE80211_PICK_ALGORITHM_MASK) == IEEE80211_PICK_REENTRY) ? "dfs_reentry" : "",
+		((ss->ss_pick_flags & IEEE80211_PICK_ALGORITHM_MASK) == IEEE80211_PICK_CLEAREST) ? "clearest" : "",
+		((ss->ss_pick_flags & IEEE80211_PICK_DOMIAN_MASK) == IEEE80211_PICK_DFS) ? "dfs" : "",
+		((ss->ss_pick_flags & IEEE80211_PICK_DOMIAN_MASK) == IEEE80211_PICK_NONDFS) ? "non_dfs" : "",
+		((ss->ss_pick_flags & IEEE80211_PICK_DOMIAN_MASK) == IEEE80211_PICK_ALL) ? "all" : "");
+	ss->ss_pick_flags = IEEE80211_PICK_DEFAULT;	/* clean the flag */
+
+	return bestchan;
+}
+
+/*
+ * Pick a quiet channel to use for ap operation.
+ */
+static int
+ap_end(struct ieee80211_scan_state *ss, struct ieee80211vap *vap,
+       int (*action)(struct ieee80211vap *, const struct ieee80211_scan_entry *),
+       u_int32_t flags)
+{
+	struct ieee80211_channel * bestchan = NULL;
+	struct ap_state *as = ss->ss_priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_scan_entry se;
+	int ret;
+
+	KASSERT(vap->iv_opmode == IEEE80211_M_HOSTAP,
+		("wrong opmode %u", vap->iv_opmode));
+
+	/* scan end, no action and return */
+	if (ss->ss_flags & IEEE80211_SCAN_QTN_SEARCH_MBS)
+		return 1;
+
+	/* scan end, do DFS action and return */
+	if (ss->ss_flags & IEEE80211_SCAN_DFS_ACTION) {
+		ic->ic_dfs_action_scan_done();
+		return 1;
+	}
+
+#ifdef QTN_BG_SCAN
+	if (ss->ss_flags & IEEE80211_SCAN_QTN_BGSCAN) {
+		ss->ss_pick_flags = IEEE80211_PICK_DEFAULT;	/* clean the flag */
+		return 1;
+	}
+#endif
+
+	memset(&se, 0, sizeof(se));
+
+	if (ic->ic_get_init_cac_duration(ic) > 0) {
+		IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+				"%s: pick dfs channels only: for eu ICAC\n", __func__);
+		flags = IEEE80211_SCAN_PICK_NOT_AVAILABLE_DFS_ONLY;
+	}
+
+	bestchan = ap_pick_channel(ic, ss, flags);
+	if (bestchan == NULL) {
+		IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+			"%s: no suitable channel! Go back!\n", vap->iv_dev->name);
+
+		/*
+		 * When max_boot_cac is a very large value, all channels are cleared.
+		 * Return to ICAC completion procedure
+		 */
+		if (ic->ic_get_init_cac_duration(ic) > 0) {
+			return 0;
+		}
+
+		if (ic->ic_bsschan != IEEE80211_CHAN_ANYC) {
+			se.se_chan = ic->ic_bsschan;
+			ret = 0;
+		} else if (!ieee80211_chanset_scan_finished(ic)) {
+			return 1;
+		} else {
+			return 0;			/* restart scan */
+		}
+	} else {
+		struct ieee80211_channel *c;
+		/* XXX notify all vap's? */
+		/* if this is a dynamic turbo frequency , start with normal mode first */
+
+		c = bestchan;
+		if (IEEE80211_IS_CHAN_TURBO(c) && !IEEE80211_IS_CHAN_STURBO(c)) {
+			if ((c = ieee80211_find_channel(ic, c->ic_freq,
+					c->ic_flags & ~IEEE80211_CHAN_TURBO)) == NULL) {
+				/* should never happen ?? */
+				return 0;
+			}
+		}
+
+		/*
+		 * If bss channel is valid and if the
+		 * scan is to not pick any channel then select the
+		 * bss channel, otherwise choose the best channel.
+		 */
+		if ((ic->ic_bsschan != IEEE80211_CHAN_ANYC) &&
+					(ss->ss_flags & IEEE80211_SCAN_NOPICK)) {
+			se.se_chan = ic->ic_bsschan;
+		} else {
+			se.se_chan = c;
+		}
+
+		ret = 1;
+	}
+
+	/*
+	 * ic->ic_des_chan_after_init_scan is valid only during initial bootup scan
+	 * Any Scan after the initial bootup scan, shall choose the best channel
+	 * referring to as_chanmetric;
+	 */
+	if ((ic->ic_des_chan_after_init_scan) && (!ic->ic_ignore_init_scan_icac)) {
+		struct ieee80211_channel *ch = ieee80211_find_channel_by_ieee(ic, ic->ic_des_chan_after_init_scan);
+
+		ic->ic_chan_is_set = 0;
+		if (ic->ic_check_channel(ic, ch, 0, 1) && isclr(ic->ic_chan_pri_inactive, ch->ic_ieee)) {
+			se.se_chan = ch;
+		}
+		ic->ic_des_chan_after_init_scan = 0;
+	}
+
+
+	ic->ic_des_chan = se.se_chan;
+
+	as->as_action = ss->ss_ops->scan_default;
+	if (action)
+		as->as_action = action;
+	as->as_selbss = se;
+
+	/*
+	 * Must defer action to avoid possible recursive call through 80211
+	 * state machine, which would result in recursive locking.
+	 */
+	IEEE80211_SCHEDULE_TQUEUE(&as->as_actiontq);
+
+	return ret;
+}
+
+static int
+ap_iterate(struct ieee80211_scan_state *ss,
+	ieee80211_scan_iter_func *f, void *arg)
+{
+	struct ap_state *as = ss->ss_priv;
+	struct ieee80211vap *vap = ss->ss_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ap_scan_entry *se;
+	int chan;
+	int res = 0;
+	int i;
+	int bh_disabled;
+
+	bh_disabled = lock_ap_list(as);
+	for (i = 0; i < ss->ss_last; i++) {
+		chan = ieee80211_chan2ieee(ic, ss->ss_chans[i]);
+		if (!is_channel_valid(chan))
+			continue;
+
+		TAILQ_FOREACH(se, &as->as_scan_list[chan].asl_head, ase_list) {
+			set_se_inuse(se);
+			res = (*f)(arg, &se->base);
+			reset_se_inuse(se);
+			if (res) {
+				unlock_ap_list(as, bh_disabled);
+				return res;
+			}
+		}
+	}
+	unlock_ap_list(as, bh_disabled);
+	return res;
+}
+
+static void
+local_ap_age(struct ieee80211_scan_state *ss, struct ap_state *as, int age_out)
+{
+	struct ap_scan_entry *se, *next;
+	int i;
+	int bh_disabled;
+	int freed = 0;
+
+	bh_disabled = lock_ap_list(as);
+	for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+		TAILQ_FOREACH_SAFE(se, &as->as_scan_list[i].asl_head, ase_list, next) {
+			if (se->se_notseen > as->as_age) {
+				TAILQ_REMOVE(&as->as_scan_list[i].asl_head, se, ase_list);
+				if (age_out && as->as_numbeacons[se->base.se_chan->ic_ieee])
+					as->as_numbeacons[se->base.se_chan->ic_ieee]--;
+				free_se_request(se);
+				freed = 1;
+				if (as->as_entry_num > 0)
+					as->as_entry_num--;
+			} else {
+				if (se->se_seen) {
+					se->se_seen = 0;
+				} else {
+					se->se_notseen++;
+				}
+			}
+		}
+	}
+
+	if (age_out && freed)
+		memset(as->as_obss_chanlayout, 0, sizeof(as->as_obss_chanlayout));
+	unlock_ap_list(as, bh_disabled);
+
+	if (age_out && freed)
+		ap_iterate(ss, (ieee80211_scan_iter_func *)ieee80211_scan_check_secondary_channel, ss);
+}
+
+static void
+ap_age(struct ieee80211_scan_state *ss)
+{
+	struct ap_state *as;
+	struct ap_state *as_bak;
+
+	as = (struct ap_state *)ss->ss_scs_priv;
+	as_bak = ss->ss_priv;
+	ss->ss_priv = as;
+
+	local_ap_age(ss, ss->ss_scs_priv, 1);
+
+	ss->ss_priv = as_bak;
+
+	local_ap_age(ss, ss->ss_priv, 0);
+}
+
+static void
+ap_assoc_success(struct ieee80211_scan_state *ss,
+	const u_int8_t macaddr[IEEE80211_ADDR_LEN])
+{
+	/* should not be called */
+}
+
+static void
+ap_assoc_fail(struct ieee80211_scan_state *ss,
+	const u_int8_t macaddr[IEEE80211_ADDR_LEN], int reason)
+{
+	/* should not be called */
+}
+
+/*
+ * Default action to execute when a scan entry is found for ap
+ * mode.  Return 1 on success, 0 on failure
+ */
+static int
+ap_default_action(struct ieee80211vap *vap,
+	const struct ieee80211_scan_entry *se)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+			ic->ic_bsschan != se->se_chan &&
+			vap->iv_state == IEEE80211_S_RUN) {
+		ieee80211_enter_csa(ic,
+				se->se_chan,
+				NULL,
+				IEEE80211_CSW_REASON_SCAN,
+				IEEE80211_DEFAULT_CHANCHANGE_TBTT_COUNT,
+				IEEE80211_CSA_MUST_STOP_TX,
+				IEEE80211_CSA_F_BEACON | IEEE80211_CSA_F_ACTION);
+
+	} else {
+		ieee80211_create_bss(vap, se->se_chan);
+	}
+
+	if (IEEE80211_IS_11NG_40(ic))
+		ieee80211_check_40_bw_allowed(vap);
+
+	return 1;
+}
+
+static void
+action_tasklet(IEEE80211_TQUEUE_ARG data)
+{
+	struct ieee80211_scan_state *ss = (struct ieee80211_scan_state *)data;
+	struct ap_state *as = (struct ap_state *)ss->ss_priv;
+	struct ieee80211vap *vap = ss->ss_vap;
+
+	(*ss->ss_ops->scan_default)(vap, &as->as_selbss);
+}
+
+/*
+ * Module glue.
+ */
+MODULE_AUTHOR("Errno Consulting, Sam Leffler");
+MODULE_DESCRIPTION("802.11 wireless support: default ap scanner");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual BSD/GPL");
+#endif
+
+static const struct ieee80211_scanner ap_default = {
+	.scan_name		= "default",
+	.scan_attach		= ap_attach,
+	.scan_detach		= ap_detach,
+	.scan_start		= ap_start,
+	.scan_restart		= ap_restart,
+	.scan_cancel		= ap_cancel,
+	.scan_end		= ap_end,
+	.scan_flush		= ap_flush,
+	.scan_pickchan		= ap_pick_channel,
+	.scan_add		= ap_add,
+	.scan_age		= ap_age,
+	.scan_iterate		= ap_iterate,
+	.scan_assoc_success	= ap_assoc_success,
+	.scan_assoc_fail	= ap_assoc_fail,
+	.scan_lock		= ap_lock,
+	.scan_unlock		= ap_unlock,
+	.scan_default		= ap_default_action,
+};
+
+static int __init
+init_scanner_ap(void)
+{
+	mlme_stats_init();
+	ieee80211_scanner_register(IEEE80211_M_HOSTAP, &ap_default);
+	return 0;
+}
+module_init(init_scanner_ap);
+
+static void __exit
+exit_scanner_ap(void)
+{
+	ieee80211_scanner_unregister_all(&ap_default);
+	mlme_stats_exit();
+}
+module_exit(exit_scanner_ap);
diff --git a/drivers/qtn/wlan/ieee80211_scan_sta.c b/drivers/qtn/wlan/ieee80211_scan_sta.c
new file mode 100644
index 0000000..c7e1054
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_scan_sta.c
@@ -0,0 +1,1868 @@
+//swdepot/dev/ums/soc/main2/drivers/wlan/ieee80211_scan_sta.c#7 - edit change 3043 (text)
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_scan_sta.c 2749 2007-10-16 08:58:14Z kelmo $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * IEEE 802.11 station scanning support.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+#define RSNIE_GROUP_CIPHER_OFFSET       0x7
+static void sta_flush_table(struct sta_table *);
+static int match_bss(struct ieee80211vap *, const struct ieee80211_scan_state *,
+	const struct sta_entry *);
+static int match_ssid(const uint8_t *ie, int nssid,
+	const struct ieee80211_scan_ssid ssids[]);
+static void action_tasklet(IEEE80211_TQUEUE_ARG);
+
+static int
+lock_sta_table(struct sta_table *st)
+{
+	// Call can come in SoftIRQ (timer or tasklet) or process context.
+	// For optimization let's disable SoftIRQ only when call comes in process context.
+	int bh_disabled = !in_softirq() && !irqs_disabled();
+
+	// We must be not called within hardware interrupt context.
+	WARN_ON_ONCE(in_irq());
+
+	spin_lock(&st->st_lock);
+	if(bh_disabled) {
+		local_bh_disable();
+	}
+
+	return bh_disabled;
+}
+
+static void
+unlock_sta_table(struct sta_table *st, int bh_disabled)
+{
+	if(bh_disabled) {
+		local_bh_enable();
+	}
+	spin_unlock(&st->st_lock);
+}
+
+static int
+sta_lock(struct ieee80211_scan_state *ss)
+{
+	struct sta_table *st = ss->ss_priv;
+	return lock_sta_table(st);
+}
+
+static void sta_unlock(struct ieee80211_scan_state *ss, int bh_disabled)
+{
+	struct sta_table *st = ss->ss_priv;
+	unlock_sta_table(st, bh_disabled);
+}
+
+/*
+ * Attach prior to any scanning work.
+ */
+static int
+sta_attach(struct ieee80211_scan_state *ss)
+{
+	struct sta_table *st;
+
+	_MOD_INC_USE(THIS_MODULE, return 0);
+
+	MALLOC(st, struct sta_table *, sizeof(struct sta_table),
+		M_80211_SCAN, M_NOWAIT | M_ZERO);
+	if (st == NULL)
+		return 0;
+	spin_lock_init(&st->st_lock);
+	spin_lock_init(&st->st_scanlock);
+	TAILQ_INIT(&st->st_entry);
+	IEEE80211_INIT_TQUEUE(&st->st_actiontq, action_tasklet, ss);
+	ss->ss_priv = st;
+	return 1;
+}
+
+/**
+ * Clean up the scan entry structure, including freeing dynamic memory that may be used
+ * to contain received IEs.
+ */
+static void
+cleanup_se(struct sta_entry *se)
+{
+	struct ieee80211_scan_entry *ise = &se->base;
+	if (ise->se_wpa_ie)
+	{
+		FREE(ise->se_wpa_ie, M_DEVBUF);
+		ise->se_wpa_ie = NULL;
+	}
+	if (ise->se_rsn_ie)
+	{
+		FREE(ise->se_rsn_ie, M_DEVBUF);
+		ise->se_rsn_ie = NULL;
+	}
+	if (ise->se_wme_ie)
+	{
+		FREE(ise->se_wme_ie, M_DEVBUF);
+		ise->se_wme_ie = NULL;
+	}
+	if (ise->se_wsc_ie)
+	{
+		FREE(ise->se_wsc_ie, M_DEVBUF);
+		ise->se_wsc_ie = NULL;
+	}
+	if (ise->se_htcap_ie)
+	{
+		FREE(ise->se_htcap_ie, M_DEVBUF);
+		ise->se_htcap_ie = NULL;
+	}
+	if (ise->se_htinfo_ie)
+	{
+		FREE(ise->se_htinfo_ie, M_DEVBUF);
+		ise->se_htinfo_ie = NULL;
+	}
+	if (ise->se_vhtcap_ie)
+	{
+		FREE(ise->se_vhtcap_ie, M_DEVBUF);
+		ise->se_vhtcap_ie = NULL;
+	}
+	if (ise->se_vhtop_ie)
+	{
+		FREE(ise->se_vhtop_ie, M_DEVBUF);
+		ise->se_vhtop_ie = NULL;
+	}
+	if (ise->se_ath_ie)
+	{
+		FREE(ise->se_ath_ie, M_DEVBUF);
+		ise->se_ath_ie = NULL;
+	}
+	if (ise->se_qtn_ie)
+	{
+		FREE(ise->se_qtn_ie, M_DEVBUF);
+		ise->se_qtn_ie = NULL;
+	}
+	if (ise->se_ext_bssid_ie)
+	{
+		FREE(ise->se_ext_bssid_ie, M_DEVBUF);
+		ise->se_ext_bssid_ie = NULL;
+	}
+}
+
+/**
+ * Free scan entry structure.
+ */
+static void
+free_se(struct sta_entry *se)
+{
+	cleanup_se(se);
+	FREE(se, M_80211_SCAN);
+}
+
+/**
+ * Free scan entry structure or put request if it is in use.
+ * Function must be called with keeping table locked (lock_sta_table(st)/unlock_sta_table(st)).
+ */
+static void
+free_se_request(struct sta_entry *se)
+{
+	if(se->se_inuse) {
+		se->se_request_to_free = 1;
+	} else {
+		free_se(se);
+	}
+}
+
+/**
+ * Free scan entry function if it is not used and there is request to free it.
+ * Function must be called with keeping table locked (lock_sta_table(st)/unlock_sta_table(st)).
+ */
+static void
+free_se_process(struct sta_entry *se)
+{
+	if(!se->se_inuse && se->se_request_to_free) {
+		free_se(se);
+	}
+}
+/**
+ * Set in-use flag.
+ * Function must be called with keeping table locked (lock_sta_table(st)/unlock_sta_table(st)).
+ */
+static void
+set_se_inuse(struct sta_entry *se)
+{
+	se->se_inuse = 1;
+}
+/**
+ * Reset in-use flag. 'se' entry can be destroyed.
+ * Function must be called with keeping table locked (lock_sta_table(st)/unlock_sta_table(st)).
+ */
+static void
+reset_se_inuse(struct sta_entry *se)
+{
+	se->se_inuse = 0;
+	free_se_process(se);
+}
+
+/*
+ * Cleanup any private state.
+ */
+static int
+sta_detach(struct ieee80211_scan_state *ss)
+{
+	struct sta_table *st = ss->ss_priv;
+
+	if (st != NULL) {
+		IEEE80211_CANCEL_TQUEUE(&st->st_actiontq);
+		sta_flush_table(st);
+		FREE(st, M_80211_SCAN);
+	}
+
+	_MOD_DEC_USE(THIS_MODULE);
+	return 1;
+}
+
+/*
+ * Flush all per-scan state.
+ */
+static int
+sta_flush(struct ieee80211_scan_state *ss)
+{
+	struct sta_table *st = ss->ss_priv;
+	int bh_disabled;
+
+	bh_disabled = lock_sta_table(st);
+	sta_flush_table(st);
+	unlock_sta_table(st, bh_disabled);
+	ss->ss_last = 0;
+	return 0;
+}
+
+/*
+ * Flush all entries in the scan cache.
+ */
+static void
+sta_flush_table(struct sta_table *st)
+{
+	struct sta_entry *se, *next;
+
+	TAILQ_FOREACH_SAFE(se, &st->st_entry, se_list, next) {
+		TAILQ_REMOVE(&st->st_entry, se, se_list);
+		LIST_REMOVE(se, se_hash);
+		free_se_request(se);
+		if (st->st_entry_num > 0)
+			st->st_entry_num--;
+	}
+}
+
+/*
+ * Compare function for sorting scan results.
+ * Return >0 if @b is considered better.
+ */
+static int
+sta_cmp(struct ieee80211_scan_state *ss,
+	struct sta_entry *a, struct sta_entry *b)
+{
+	struct ieee80211_scan_entry *ise_a = &a->base;
+	struct ieee80211_scan_entry *ise_b = &b->base;
+
+	/* SSID matched AP preferred */
+	if (match_ssid(ise_a->se_ssid, ss->ss_nssid, ss->ss_ssid) &&
+			!match_ssid(ise_b->se_ssid, ss->ss_nssid, ss->ss_ssid))
+		return -1;
+	if (!match_ssid(ise_a->se_ssid, ss->ss_nssid, ss->ss_ssid) &&
+			match_ssid(ise_b->se_ssid, ss->ss_nssid, ss->ss_ssid))
+		return 1;
+
+	/* WPS active AP preferred */
+	if (ieee80211_wps_active(ise_a->se_wsc_ie) &&
+			!ieee80211_wps_active(ise_b->se_wsc_ie))
+		return -1;
+	if (!ieee80211_wps_active(ise_a->se_wsc_ie) &&
+			ieee80211_wps_active(ise_b->se_wsc_ie))
+		return 1;
+
+	/* WPA/WPA2 support preferred */
+	if ((ise_a->se_wpa_ie || ise_a->se_rsn_ie) &&
+			((!ise_b->se_wpa_ie && !ise_b->se_rsn_ie)))
+		return -1;
+	if ((!ise_a->se_wpa_ie && !ise_a->se_rsn_ie) &&
+			((ise_b->se_wpa_ie || ise_b->se_rsn_ie)))
+		return 1;
+
+	/* Higher RSSI AP preferred */
+	if (ise_a->se_rssi > ise_b->se_rssi)
+		return -1;
+
+	return 1;
+}
+
+/* Caller must lock the st->st_lock */
+static void
+sta_sort(struct ieee80211_scan_state *ss, struct sta_entry *se)
+{
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *se_tmp, *next;
+	int found = 0;
+
+	TAILQ_FOREACH_SAFE(se_tmp, &st->st_entry, se_list, next) {
+		if (sta_cmp(ss, se_tmp, se) > 0) {
+			TAILQ_INSERT_BEFORE(se_tmp, se, se_list);
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		TAILQ_INSERT_TAIL(&st->st_entry, se, se_list);
+}
+
+/*
+ * Process a beacon or probe response frame; create an
+ * entry in the scan cache or update any previous entry.
+ */
+static int
+sta_add(struct ieee80211_scan_state *ss, const struct ieee80211_scanparams *sp,
+	const struct ieee80211_frame *wh, int subtype, int rssi, int rstamp)
+{
+#define	PICK1ST(_ss) \
+	((ss->ss_flags & (IEEE80211_SCAN_PICK1ST | IEEE80211_SCAN_GOTPICK)) == \
+	IEEE80211_SCAN_PICK1ST)
+	struct sta_table *st = ss->ss_priv;
+	const u_int8_t *macaddr = wh->i_addr2;
+	struct ieee80211vap *vap = ss->ss_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct sta_entry *se;
+	struct ieee80211_scan_entry *ise;
+	int hash;
+	int found = 0;
+	int bh_disabled;
+
+	if (!sp)
+		return 0;
+
+	hash = STA_HASH(macaddr);
+	bh_disabled = lock_sta_table(st);
+	LIST_FOREACH(se, &st->st_hash[hash], se_hash)
+		if (IEEE80211_ADDR_EQ(se->base.se_macaddr, macaddr) &&
+		    sp->ssid[1] == se->base.se_ssid[1] &&
+		    !memcmp(se->base.se_ssid + 2, sp->ssid + 2, se->base.se_ssid[1])) {
+			TAILQ_REMOVE(&st->st_entry, se, se_list);
+			found = 1;
+			break;
+		}
+
+	if (!found) {
+		if (st->st_entry_num >= ic->ic_scan_tbl_len_max) {
+			if (printk_ratelimit())
+				printk("scan found %u scan results but the list is"
+					" restricted to %u entries\n", st->st_entry_num,
+					ic->ic_scan_tbl_len_max);
+			unlock_sta_table(st, bh_disabled);
+			return 0;
+		}
+
+		MALLOC(se, struct sta_entry *, sizeof(struct sta_entry),
+		       M_80211_SCAN, M_NOWAIT | M_ZERO);
+		if (se == NULL) {
+			if (printk_ratelimit())
+				printk("failed to allocate new scan entry\n");
+			unlock_sta_table(st, bh_disabled);
+			return 0;
+		}
+		st->st_entry_num++;
+		se->se_inuse = 0;
+		se->se_request_to_free = 0;
+		se->se_scangen = st->st_scangen - 1;
+		IEEE80211_ADDR_COPY(se->base.se_macaddr, macaddr);
+		LIST_INSERT_HEAD(&st->st_hash[hash], se, se_hash);
+	}
+	ise = &se->base;
+
+	ieee80211_add_scan_entry(ise, sp, wh, subtype, rssi, rstamp);
+
+	if (se->se_lastupdate == 0) {			/* first sample */
+		se->se_avgrssi = RSSI_IN(rssi);
+	} else {					/* avg with previous samples */
+		RSSI_LPF(se->se_avgrssi, rssi);
+	}
+	ise->se_rssi = RSSI_GET(se->se_avgrssi);
+
+	/* clear failure count after STA_FAIL_AGE passes */
+	if (se->se_fails && (jiffies - se->se_lastfail) > STA_FAILS_AGE*HZ) {
+		se->se_fails = 0;
+		IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_SCAN, macaddr,
+			"%s: fails %u", __func__, se->se_fails);
+	}
+	se->se_lastupdate = jiffies;		/* update time */
+	se->se_seen = 1;
+	se->se_notseen = 0;
+
+	sta_sort(ss, se);
+
+	unlock_sta_table(st, bh_disabled);
+
+	/*
+	 * If looking for a quick choice and nothing's
+	 * been found check here.
+	 */
+
+	if (PICK1ST(ss) && match_bss(vap, ss, se) == 0)
+	{
+		ss->ss_flags |= IEEE80211_SCAN_GOTPICK;
+	}
+
+	return 1;
+#undef PICK1ST
+}
+
+static struct ieee80211_channel *
+find11gchannel(struct ieee80211com *ic, int i, int freq)
+{
+	struct ieee80211_channel *c;
+	int j;
+
+	/*
+	 * The normal ordering in the channel list is b channel
+	 * immediately followed by g so optimize the search for
+	 * this.  We'll still do a full search just in case.
+	 */
+	for (j = i+1; j < ic->ic_nchans; j++) {
+		c = &ic->ic_channels[j];
+		if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c))
+			return c;
+	}
+	for (j = 0; j < i; j++) {
+		c = &ic->ic_channels[j];
+		if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c))
+			return c;
+	}
+	return NULL;
+}
+
+static u_int8_t sschans[IEEE80211_CHAN_BYTES];
+static void
+add_channels(struct ieee80211com *ic,
+	struct ieee80211_scan_state *ss,
+	enum ieee80211_phymode mode, const u_int16_t freq[], int nfreq)
+{
+	struct ieee80211_channel *c, *cg;
+	u_int modeflags;
+	int i;
+	struct ieee80211vap *vap = ss->ss_vap;
+
+	modeflags = ieee80211_get_chanflags(mode);
+	for (i = 0; i < nfreq; i++) {
+		c = ieee80211_find_channel(ic, freq[i], modeflags);
+		if (c == NULL || isclr(ic->ic_chan_active_20, c->ic_ieee))
+			continue;
+		/* Channel already selected */
+		if (isset(sschans, c->ic_ieee))
+			continue;
+		setbit(sschans, c->ic_ieee);
+		if (mode == IEEE80211_MODE_AUTO) {
+			/*
+			 * XXX special-case 11b/g channels so we select
+			 *     the g channel if both are present.
+			 */
+			if (IEEE80211_IS_CHAN_B(c) &&
+			    (cg = find11gchannel(ic, i, c->ic_freq)) != NULL)
+				c = cg;
+		}
+		if (ss->ss_last >= IEEE80211_SCAN_MAX)
+			break;
+
+		if (ic->ic_flags_ext & IEEE80211_FEXT_SCAN_FAST_REASS &&
+			ic->ic_fast_reass_chan && (ic->ic_fast_reass_chan != IEEE80211_CHAN_ANYC) &&
+			(ic->ic_fast_reass_chan->ic_ieee != c->ic_ieee)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+					"Skipping channel %u (fast reassoc channel %u)\n",
+					c->ic_ieee, ic->ic_fast_reass_chan->ic_ieee);
+			continue;
+		} else if (ic->ic_flags_ext & IEEE80211_FEXT_SCAN_FAST_REASS &&
+			ic->ic_fast_reass_chan && (ic->ic_fast_reass_chan != IEEE80211_CHAN_ANYC) &&
+			(ic->ic_fast_reass_chan->ic_ieee == c->ic_ieee)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				"Adding channel %u (fast reassoc channel %u)\n",
+				c->ic_ieee, ic->ic_fast_reass_chan->ic_ieee);
+		}
+
+		ss->ss_chans[ss->ss_last++] = c;
+	}
+#undef N
+}
+
+static const u_int16_t rcl1[] =		/* 8 FCC channel: 36, 40, 44, 48, 52, 56, 60, 64 */
+{ 5180, 5200, 5220, 5240, 5260, 5280, 5300, 5320};
+static const u_int16_t rcl2[] =		/* 4 MKK channels: 34, 38, 42, 46 */
+{ 5170, 5190, 5210, 5230 };
+static const u_int16_t rcl3[] =		/* 2.4Ghz ch: 1,2,3,4,5,6,7,8,9,10,11,12,13 */
+	{ 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472};
+static const u_int16_t rcl4[] =		/* 5 FCC channel: 149, 153, 157, 161, 165, 169 */
+{ 5745, 5765, 5785, 5805, 5825, 5845 };
+static const u_int16_t rcl7[] =		/* 11 FCC channel: 100,104,108,112,116,120,124,128,132,136,140,144 */
+{ 5500, 5520, 5540, 5560, 5580, 5600, 5620, 5640, 5660, 5680, 5700, 5720 };
+static const u_int16_t rcl8[] =		/* 2.4Ghz ch: 2,3,4,5,8,9,10,12 */
+{ 2417, 2422, 2427, 2432, 2447, 2452, 2457, 2467 };
+static const u_int16_t rcl9[] =		/* 2.4Ghz ch: 14 */
+{ 2484 };
+static const u_int16_t rcl10[] =	/* Added Korean channels 2312-2372 */
+{ 2312, 2317, 2322, 2327, 2332, 2337, 2342, 2347, 2352, 2357, 2362, 2367, 2372 };
+static const u_int16_t rcl11[] =	/* Added Japan channels in 4.9/5.0 spectrum */
+{ 5040, 5060, 5080, 4920, 4940, 4960, 4980 };
+
+/* Other 5GHz channels : 35, 37, 39, 41, 43, 45, 47,
+   49, 50, 51, 53, 54, 55, 57, 58, 59, 61, 62, 63, 65, 66 */
+static const u_int16_t rcl14[] =
+{ 5175, 5185, 5195, 5205, 5215, 5225, 5235,
+  5245, 5250, 5255, 5265, 5270, 5275, 5285, 5290, 5295, 5305, 5310, 5315, 5325, 5330 };
+
+/* Other 5GHz channels : 98, 99, 101, 102, 103, 105, 106,
+   107, 109, 110, 111, 113, 114, 115, 117, 118, 119, 121,
+   122, 123, 125, 126, 127, 129, 130, 131, 133, 134, 135,
+   137, 138, 139, 141, 142 */
+static const u_int16_t rcl15[] =
+{ 5490, 5495, 5505, 5510, 5515, 5525, 5530,
+  5535, 5545, 5550, 5555, 5565, 5570, 5575, 5585, 5590, 5595, 5605,
+  5610, 5615, 5625, 5630, 5635, 5645, 5650, 5655, 5665, 5670, 5675,
+  5685, 5690, 5695, 5705, 5710 };
+
+/* Other 5GHz channels : 147, 148, 150, 151, 152, 154, 155,
+   156, 158, 159, 160, 162, 163, 164, 166, 167 */
+static const u_int16_t rcl16[] =
+{ 5735, 5740, 5750, 5755, 5760, 5770, 5775,
+  5780, 5790, 5795, 5800, 5810, 5815, 5820, 5830, 5835 };
+
+/* Other 5GHz channels : 182, 183, 184, 185, 186, 187, 188,
+   189, 190, 191, 192, 193, 194, 195, 196, 197, 198 */
+static const u_int16_t rcl17[] =
+{ 4910, 4915, 4920, 4925, 4930, 4935, 4940,
+  4945, 4950, 4955, 4960, 4965, 4970, 4975, 4980, 4985, 4990 };
+
+/* 5GHz channels  - 40 MHz mode:
+ * 34, 38, 42, 46, 50, 54, 58, 62, 66, 102, 106, 110, 114, 118,
+ * 122, 126, 130, 134, 138, 142, 151, 155, 159, 163, 167
+ */
+static const u_int16_t rcl18[] =
+{ 5170, 5190, 5210, 5230, 5250, 5270, 5290, 5310, 5330, 5510, 5530, 5550, 5570, 5590,
+  5610, 5630, 5650, 5670, 5690, 5710, 5755, 5775, 5795, 5815, 5835 };
+
+struct scanlist {
+	u_int16_t	mode;
+	u_int16_t	count;
+	const u_int16_t	*list;
+};
+
+#define	IEEE80211_MODE_TURBO_STATIC_A	IEEE80211_MODE_MAX
+#define	X(a)	.count = sizeof(a)/sizeof(a[0]), .list = a
+
+static const struct scanlist staScanTable[] = {
+	{ IEEE80211_MODE_11B,		X(rcl3) },
+	{ IEEE80211_MODE_11A,		X(rcl1) },
+	{ IEEE80211_MODE_11A,		X(rcl2) },
+	{ IEEE80211_MODE_11B,		X(rcl8) },
+	{ IEEE80211_MODE_11B,		X(rcl9) },
+	{ IEEE80211_MODE_11A,		X(rcl4) },
+	{ IEEE80211_MODE_11A,		X(rcl7) },
+	{ IEEE80211_MODE_11B,		X(rcl10) },
+	{ IEEE80211_MODE_11A,		X(rcl11) },
+	{ IEEE80211_MODE_11NG,		X(rcl3) },
+	{ IEEE80211_MODE_11NG_HT40PM,	X(rcl3) },
+	{ IEEE80211_MODE_11NA,		X(rcl1) },
+	{ IEEE80211_MODE_11NA,		X(rcl7) },
+	{ IEEE80211_MODE_11NA,		X(rcl4) },
+	{ IEEE80211_MODE_11NA,		X(rcl11) },
+	{ IEEE80211_MODE_11NA,		X(rcl2) },
+#ifdef QTN_SUPP_ALL_CHAN
+	{ IEEE80211_MODE_11NA,		X(rcl14) },
+	{ IEEE80211_MODE_11NA,		X(rcl15) },
+	{ IEEE80211_MODE_11NA,		X(rcl16) },
+	{ IEEE80211_MODE_11NA,		X(rcl17) },
+#endif /* QTN_SUPP_ALL_CHAN */
+	{ IEEE80211_MODE_11NA_HT40PM,	X(rcl1) },
+	{ IEEE80211_MODE_11NA_HT40PM,	X(rcl4) },
+	{ IEEE80211_MODE_11NA_HT40PM,	X(rcl7) },
+	{ IEEE80211_MODE_11NA_HT40PM,	X(rcl11) },
+/*	{ IEEE80211_MODE_11NA_HT40PM,	X(rcl2) }, */
+#ifdef QTN_SUPP_ALL_CHAN
+	{ IEEE80211_MODE_11NA_HT40PM,	X(rcl14) },
+	{ IEEE80211_MODE_11NA_HT40PM,	X(rcl15) },
+	{ IEEE80211_MODE_11NA_HT40PM,	X(rcl16) },
+	{ IEEE80211_MODE_11NA_HT40PM,	X(rcl17) },
+#endif /* QTN_SUPP_ALL_CHAN */
+	{ IEEE80211_MODE_11AC_VHT20PM,	X(rcl1) },
+	{ IEEE80211_MODE_11AC_VHT20PM,	X(rcl4) },
+	{ IEEE80211_MODE_11AC_VHT20PM,	X(rcl7) },
+	{ IEEE80211_MODE_11AC_VHT20PM,	X(rcl11) },
+
+	{ IEEE80211_MODE_11AC_VHT40PM,	X(rcl1) },
+	{ IEEE80211_MODE_11AC_VHT40PM,	X(rcl4) },
+	{ IEEE80211_MODE_11AC_VHT40PM,	X(rcl7) },
+	{ IEEE80211_MODE_11AC_VHT40PM,	X(rcl11) },
+
+	{ IEEE80211_MODE_11AC_VHT80PM,	X(rcl1) },
+	{ IEEE80211_MODE_11AC_VHT80PM,	X(rcl4) },
+	{ IEEE80211_MODE_11AC_VHT80PM,	X(rcl7) },
+	{ IEEE80211_MODE_11AC_VHT80PM,	X(rcl11) },
+	{ .list = NULL }
+};
+
+#undef X
+
+/*
+ * Start a station-mode scan by populating the channel list.
+ */
+static int
+sta_start(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+#define	N(a)	(sizeof(a)/sizeof(a[0]))
+	struct ieee80211com *ic = vap->iv_ic;
+	struct sta_table *st = ss->ss_priv;
+	const struct scanlist *scan;
+	enum ieee80211_phymode mode;
+	struct ieee80211_channel *c;
+	int i;
+
+	ss->ss_last = 0;
+	/* Selected scan channel list */
+	memset(sschans, 0, sizeof(sschans));
+
+	if ((ss->ss_flags & IEEE80211_SCAN_OPCHAN) && vap->iv_state == IEEE80211_S_RUN) {
+		ss->ss_chans[ss->ss_last++] = ic->ic_curchan;
+		goto scan_channel_list_ready;
+	}
+
+#if defined (PLATFORM_QFDR)
+	/* One of the use case QFDR requires fast scan procedure,
+	 * when channel is known */
+	if (vap->iv_scan_only_freq && vap->iv_scan_only_cnt) {
+		c = ieee80211_find_channel(ic, vap->iv_scan_only_freq, 0);
+
+		/* check active channels list */
+		if (c && isset(ic->ic_chan_active, c->ic_ieee)) {
+			ss->ss_chans[ss->ss_last++] = c;
+			goto scan_channel_list_ready;
+		}
+	}
+#endif
+
+	/*
+	 * Use the table of ordered channels to construct the list
+	 * of channels for scanning.  Any channels in the ordered
+	 * list not in the master list will be discarded.
+	 */
+	for (scan = staScanTable; scan->list != NULL; scan++) {
+		mode = scan->mode;
+		if ((ic->ic_des_mode != IEEE80211_MODE_AUTO) && (ic->ic_rf_chipid != CHIPID_DUAL)) {
+			/*
+			 * If a desired mode was specified, scan only
+			 * channels that satisfy that constraint.
+			 */
+			if (ic->ic_des_mode != mode) {
+				/*
+				 * The scan table marks 2.4Ghz channels as b
+				 * so if the desired mode is 11g, then use
+				 * the 11b channel list but upgrade the mode.
+				 */
+				if (ic->ic_des_mode != IEEE80211_MODE_11G ||
+				    mode != IEEE80211_MODE_11B)
+					continue;
+				mode = IEEE80211_MODE_11G;	/* upgrade */
+			}
+		} else if ((ss->ss_flags & IEEE80211_SCAN_OBSS) &&
+				!IS_IEEE80211_MODE_24G_BAND(mode)) {
+			continue;
+		} else {
+			/*
+			 * This lets ieee80211_scan_add_channels
+			 * upgrade an 11b channel to 11g if available.
+			 */
+			if (mode == IEEE80211_MODE_11B)
+				mode = IEEE80211_MODE_AUTO;
+		}
+		/*
+		 * Add the list of the channels; any that are not
+		 * in the master channel list will be discarded.
+		 */
+		add_channels(ic, ss, mode, scan->list, scan->count);
+	}
+
+	/*
+	 * Add the channels from the ic (from HAL) that are not present
+	 * in the staScanTable.
+	 */
+	for (i = 0; i < ic->ic_nchans; i++) {
+		c = &ic->ic_channels[i];
+		if (isclr(ic->ic_chan_active_20, c->ic_ieee))
+			continue;
+		/* No dfs interference detected channels */
+		if (c->ic_flags & IEEE80211_CHAN_RADAR)
+			continue;
+		/*
+		 * scan dynamic turbo channels in normal mode.
+		 */
+		if (IEEE80211_IS_CHAN_DTURBO(c))
+			continue;
+		mode = ieee80211_chan2mode(c);
+		if (ic->ic_des_mode != IEEE80211_MODE_AUTO) {
+			/*
+			 * If a desired mode was specified, scan only 
+			 * channels that satisfy that constraint.
+			 */
+			if (ic->ic_des_mode != mode)
+				continue;
+		}
+		/* Is channel already selected? */
+		if (isset(sschans, c->ic_ieee))
+			continue;
+		setbit(sschans, c->ic_ieee);
+		ss->ss_chans[ss->ss_last++] = c;
+	}
+
+scan_channel_list_ready:
+	ss->ss_next = 0;
+
+	ss->ss_mindwell = msecs_to_jiffies(ic->ic_mindwell_active);
+	ss->ss_mindwell_passive = msecs_to_jiffies(ic->ic_mindwell_passive);
+	ss->ss_maxdwell = msecs_to_jiffies(ic->ic_maxdwell_active);
+	ss->ss_maxdwell_passive = msecs_to_jiffies(ic->ic_maxdwell_passive);
+
+#if defined (PLATFORM_QFDR)
+	if (vap->iv_scan_only_freq && vap->iv_scan_only_cnt) {
+		/* case of QFDR fast scan on specific channel,
+		 * increase DWELL times */
+#define QFDR_DWELL_EMPIRICAL_FACTOR 2
+
+		ss->ss_mindwell <<= QFDR_DWELL_EMPIRICAL_FACTOR;
+		ss->ss_mindwell_passive <<= QFDR_DWELL_EMPIRICAL_FACTOR;
+		ss->ss_maxdwell <<= QFDR_DWELL_EMPIRICAL_FACTOR;
+		ss->ss_maxdwell_passive <<= QFDR_DWELL_EMPIRICAL_FACTOR;
+
+		vap->iv_scan_only_cnt--;
+		if (vap->iv_scan_only_cnt == 0)
+			vap->iv_scan_only_freq = 0;
+	}
+#endif
+
+#ifdef IEEE80211_DEBUG
+	if (ieee80211_msg_scan(vap)) {
+		printk("%s: scan set ", vap->iv_dev->name);
+		ieee80211_scan_dump_channels(ss);
+		printk(" dwell min %ld max %ld\n",
+			ss->ss_mindwell, ss->ss_maxdwell);
+	}
+#endif /* IEEE80211_DEBUG */
+
+	st->st_newscan = 1;
+
+	if (ic->ic_flags_ext & IEEE80211_FEXT_SCAN_FAST_REASS &&
+		ic->ic_fast_reass_chan && (ic->ic_fast_reass_chan != IEEE80211_CHAN_ANYC)) {
+
+		if (ieee80211_msg(vap, IEEE80211_MSG_SCAN)) {
+			int i = 0;
+			printk("%p Fast reassoc scan (%u)\n",
+					ic, ic->ic_fast_reass_chan->ic_ieee);
+			printk("Channel list for scan (should be 1 entry first %u last %u):\n", ss->ss_next, ss->ss_last);
+			/* NULL terminate the list to print it out. */
+			ss->ss_chans[ss->ss_last] = NULL;
+			c = ss->ss_chans[i];
+			while(c != IEEE80211_CHAN_ANYC && c != NULL) {
+				printk("channel %u ->", c->ic_ieee);
+				c = ss->ss_chans[++i];
+			}
+			printk("<end>\n");
+		}
+
+		ic->ic_fast_reass_scan_cnt++;
+		if (ic->ic_fast_reass_scan_cnt > IEEE80211_FAST_REASS_SCAN_MAX) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				"Clearing fast scan channel - tried %u times\n", --ic->ic_fast_reass_scan_cnt);
+			ic->ic_fast_reass_chan = IEEE80211_CHAN_ANYC;
+			ic->ic_fast_reass_scan_cnt = 0;
+		}
+	}
+
+	return 0;
+#undef N
+}
+
+/*
+ * Restart a bg scan.
+ */
+static int
+sta_restart(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+	struct sta_table *st = ss->ss_priv;
+
+	st->st_newscan = 1;
+	return 0;
+}
+
+/*
+ * Cancel an ongoing scan.
+ */
+static int
+sta_cancel(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+	struct sta_table *st = ss->ss_priv;
+
+	IEEE80211_CANCEL_TQUEUE(&st->st_actiontq);
+	return 0;
+}
+
+static u_int8_t
+maxrate(const struct ieee80211_scan_entry *se)
+{
+	u_int8_t max, r;
+	int i;
+
+	max = 0;
+	for (i = 0; i < se->se_rates[1]; i++) {
+		r = se->se_rates[2+i] & IEEE80211_RATE_VAL;
+		if (r > max)
+			max = r;
+	}
+	for (i = 0; i < se->se_xrates[1]; i++) {
+		r = se->se_xrates[2+i] & IEEE80211_RATE_VAL;
+		if (r > max)
+			max = r;
+	}
+	return max;
+}
+
+/*
+ * Compare the capabilities of two entries and decide which is
+ * more desirable (return >0 if a is considered better).  Note
+ * that we assume compatibility/usability has already been checked
+ * so we don't need to (e.g. validate whether privacy is supported).
+ * Used to select the best scan candidate for association in a BSS.
+ */
+static int
+sta_compare(const struct sta_entry *a, const struct sta_entry *b)
+{
+	u_int8_t maxa, maxb;
+	int weight;
+
+	/* privacy support preferred */
+	if ((a->base.se_capinfo & IEEE80211_CAPINFO_PRIVACY) &&
+	    (b->base.se_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0)
+		return 1;
+	if ((a->base.se_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0 &&
+	    (b->base.se_capinfo & IEEE80211_CAPINFO_PRIVACY))
+		return -1;
+
+	/* compare count of previous failures */
+	weight = b->se_fails - a->se_fails;
+	if (abs(weight) > 1)
+		return weight;
+
+	if (abs(b->base.se_rssi - a->base.se_rssi) < 5) {
+		/* best/max rate preferred if signal level close enough XXX */
+		maxa = maxrate(&a->base);
+		maxb = maxrate(&b->base);
+		if (maxa != maxb)
+			return maxa - maxb;
+		/* XXX use freq for channel preference */
+		/* for now just prefer 5Ghz band to all other bands */
+		if (IEEE80211_IS_CHAN_5GHZ(a->base.se_chan) &&
+		   !IEEE80211_IS_CHAN_5GHZ(b->base.se_chan))
+			return 1;
+		if (!IEEE80211_IS_CHAN_5GHZ(a->base.se_chan) &&
+		    IEEE80211_IS_CHAN_5GHZ(b->base.se_chan))
+			return -1;
+	}
+	/* all things being equal, use signal level */
+	return a->base.se_rssi - b->base.se_rssi;
+}
+
+
+/*
+ * Check MCS suitability and return the best supported rate.
+ */
+static int
+check_basic_mcs(struct ieee80211vap *vap, const struct ieee80211_scan_entry *se)
+{
+#ifdef RATE_SUPP_ENABLE
+	struct ieee80211com *ic = vap->iv_ic;
+	int i, j, okset = 0, okidx = 0, okridx, val = 0;
+	u_int8_t mcs = 0;
+	struct ieee80211_ie_htinfo *htinfo = (struct ieee80211_ie_htinfo *)se->htinfo;
+
+	/*
+	 * first check for the sets that we support
+	 */
+	for (i = 0; i < IEEE80211_HT_MAXMCS_BASICSET_SUPPORTED; i++)
+	{
+		mcs = IEEE80211_HTINFO_BASIC_MCS_VALUE(htinfo,i);
+		mcs = mcs & (ic->ic_htcap.mcsset[i]);
+		IEEE80211_HT_MCS_IDX(mcs,val);
+		if (val != 0xFF)
+		{
+			okidx = val;
+			okset = i;
+		}
+	}
+
+	if !(okidx)
+		return IEEE80211_HT_BASIC_RATE;
+	else
+		okridx = IEEE80211_HT_RATE_TABLE_IDX(okset,okidx)
+
+	/*
+	 * now check for the sets that we do not support
+	 */
+	for (i = 0; i < IEEE80211_HT_MAXMCS_BASICSET_SUPPORTED; i++)
+	{
+		mcs = IEEE80211_HTINFO_BASIC_MCS_VALUE(htinfo,i);
+		IEEE80211_HT_MCS_IDX(mcs,val)
+		if (val != 0xFF)
+			return (IEEE80211_HT_BASIC_RATE | IEEE80211_HT_RATE_TABLE_IDX(i,val-1));
+	}
+
+	return okridx;
+#else
+		return 0;
+#endif
+}
+
+/*
+ * Check rate set suitability and return the best supported rate.
+ */
+static int
+check_rate(struct ieee80211vap *vap, const struct ieee80211_scan_entry *se)
+{
+#define	RV(v)	((v) & IEEE80211_RATE_VAL)
+	struct ieee80211com *ic = vap->iv_ic;
+	const struct ieee80211_rateset *srs;
+	int i, j, nrs, r, okrate, badrate, fixedrate;
+	const u_int8_t *rs;
+
+	okrate = badrate = fixedrate = 0;
+	
+	if (IEEE80211_IS_CHAN_HALF(se->se_chan))
+		srs = &ic->ic_sup_half_rates;
+	else if (IEEE80211_IS_CHAN_QUARTER(se->se_chan))
+		srs = &ic->ic_sup_quarter_rates;
+	else
+		srs = &ic->ic_sup_rates[ieee80211_chan2mode(se->se_chan)];
+	nrs = se->se_rates[1];
+	rs = se->se_rates + 2;
+	fixedrate = IEEE80211_FIXED_RATE_NONE;
+again:
+	for (i = 0; i < nrs; i++) {
+		r = RV(rs[i]);
+		badrate = r;
+		/*
+		 * Check any fixed rate is included. 
+		 */
+#if 0 /* Not required */
+		if (r == vap->iv_fixed_rate)
+			fixedrate = r;
+#endif
+		/*
+		 * Check against our supported rates.
+		 */
+		for (j = 0; j < srs->rs_nrates; j++)
+			if (r == RV(srs->rs_rates[j])) {
+				if (r > okrate)		/* NB: track max */
+					okrate = r;
+				break;
+			}
+	}
+	if (rs == se->se_rates+2) {
+		/* scan xrates too; sort of an algol68-style for loop */
+		nrs = se->se_xrates[1];
+		rs = se->se_xrates + 2;
+		goto again;
+	}
+	//if (okrate == 0 || vap->iv_fixed_rate != fixedrate)
+	if (okrate == 0)
+		return badrate | IEEE80211_RATE_BASIC;
+	else
+		return RV(okrate);
+#undef RV
+}
+
+static int
+match_ssid(const u_int8_t *ie,
+	int nssid, const struct ieee80211_scan_ssid ssids[])
+{
+	int i;
+
+	for (i = 0; i < nssid; i++) {
+		if (ie[1] == ssids[i].len &&
+		     memcmp(ie + 2, ssids[i].ssid, ie[1]) == 0)
+			return 1;
+	}
+	return 0;
+}
+
+/*
+ * Test a scan candidate for suitability/compatibility.
+ */
+static int
+match_bss(struct ieee80211vap *vap,
+	const struct ieee80211_scan_state *ss, const struct sta_entry *se0)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	const struct ieee80211_scan_entry *se = &se0->base;
+	u_int8_t rate;
+	int fail;
+	u_int8_t ridx;
+	uint32_t channel;
+
+	fail = 0;
+	channel = ieee80211_chan2ieee(ic, se->se_chan);
+	channel = (channel > IEEE80211_CHAN_MAX) ? 0 : channel;
+	if (isclr(ic->ic_chan_active_20, channel))
+		fail |= 0x01;
+	if (vap->iv_opmode == IEEE80211_M_IBSS) {
+		if ((se->se_capinfo & IEEE80211_CAPINFO_IBSS) == 0)
+			fail |= 0x02;
+	} else {
+		if ((se->se_capinfo & IEEE80211_CAPINFO_ESS) == 0)
+			fail |= 0x02;
+	}
+	if (vap->iv_flags & IEEE80211_F_PRIVACY) {
+		if ((se->se_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0)
+			fail |= 0x04;
+	} else {
+		/* XXX does this mean privacy is supported or required? */
+		if (se->se_capinfo & IEEE80211_CAPINFO_PRIVACY)
+			fail |= 0x04;
+	}
+	rate = check_rate(vap, se);
+	if (rate & IEEE80211_RATE_BASIC)
+		fail |= 0x08;
+	if ((ss->ss_nssid != 0) &&
+	    !match_ssid(se->se_ssid, ss->ss_nssid, ss->ss_ssid))
+		fail |= 0x10;
+	if ((vap->iv_flags & IEEE80211_F_DESBSSID) &&
+	    !IEEE80211_ADDR_EQ(vap->iv_des_bssid, se->se_bssid))
+		fail |= 0x20;
+
+	if (se0->se_fails >= STA_FAILS_MAX)
+		fail |= 0x40;
+	if (se0->se_notseen >= STA_PURGE_SCANS)
+		fail |= 0x80;
+
+	ridx = check_basic_mcs(vap, se);
+#define IEEE80211_HT_IS_BASIC_MCS(var) var&0x80
+	if (IEEE80211_HT_IS_BASIC_MCS(ridx))
+		fail |= 0x100;
+	/*
+	 * Ignore APs that do not support Bridge Mode if Bridge Mode has not been
+	 * disabled.
+	 * But: dual band RFIC can be used as regular STA connect to 3rd party AP
+	 * so disable the bridge mode checking for RFIC5
+	 */
+
+	if (ic->ic_rf_chipid != CHIPID_DUAL) {
+		if (!(vap->iv_qtn_flags & IEEE80211_QTN_BRIDGEMODE_DISABLED) &&
+		    !se->se_qtn_ie_flags & IEEE80211_QTN_BRIDGEMODE) {
+			fail |= 0x200;
+		}
+	}
+	/* allow tkip for non US/FCC regions */
+	if (!IEEE80211_IS_TKIP_ALLOWED(ic)) {
+		if ((se->se_rsn_ie != NULL) && ((se->se_rsn_ie)[RSNIE_GROUP_CIPHER_OFFSET] == RSN_CSE_TKIP))
+			fail |= 0x04;
+	}
+#ifdef IEEE80211_DEBUG
+	if (ieee80211_msg(vap, IEEE80211_MSG_SCAN | IEEE80211_MSG_ROAM)) {
+		printf(" %03x", fail);
+		printf(" %c %s",
+			fail & 0x40 ? '=' : fail & 0x80 ? '^' : fail ? '-' : '+',
+			ether_sprintf(se->se_macaddr));
+		printf(" %s%c", ether_sprintf(se->se_bssid),
+			fail & 0x20 ? '!' : ' ');
+		printf(" %3d%c", ieee80211_chan2ieee(ic, se->se_chan),
+			fail & 0x01 ? '!' : ' ');
+		printf(" %+4d", se->se_rssi);
+		printf(" %2dM%c", (rate & IEEE80211_RATE_VAL) / 2,
+			fail & 0x08 ? '!' : ' ');
+		printf(" %4s%c",
+			(se->se_capinfo & IEEE80211_CAPINFO_ESS) ? "ess" :
+			(se->se_capinfo & IEEE80211_CAPINFO_IBSS) ? "ibss" : "????",
+			fail & 0x02 ? '!' : ' ');
+		printf(" %3s%c ",
+			(se->se_capinfo & IEEE80211_CAPINFO_PRIVACY) ? "wep" : "no",
+			fail & 0x04 ? '!' : ' ');
+		printf(" bm=%u/0x%02x%c ",
+			(vap->iv_qtn_flags & IEEE80211_QTN_BRIDGEMODE_DISABLED), se->se_qtn_ie_flags,
+			fail & 0x200 ? '!' : ' ');
+		printk(" %6s%d","htridx",(ridx & IEEE80211_HT_RATE_TABLE_IDX_MASK));
+		ieee80211_print_essid(se->se_ssid + 2, se->se_ssid[1]);
+		printf("%s\n", fail & 0x10 ? "!" : "");
+	}
+#endif
+	return fail;
+}
+
+static void
+sta_update_notseen(struct sta_table *st)
+{
+	struct sta_entry *se;
+	int bh_disabled;
+
+	bh_disabled = lock_sta_table(st);
+	TAILQ_FOREACH(se, &st->st_entry, se_list) {
+		/*
+		 * If seen then reset and don't bump the count;
+		 * otherwise bump the ``not seen'' count.  Note
+		 * that this ensures that stations for which we
+		 * see frames while not scanning but not during
+		 * this scan will not be penalized.
+		 */
+		if (se->se_seen)
+			se->se_seen = 0;
+		else
+			se->se_notseen++;
+	}
+	unlock_sta_table(st, bh_disabled);
+}
+
+static void
+sta_dec_fails(struct sta_table *st)
+{
+	struct sta_entry *se;
+	int bh_disabled;
+
+	bh_disabled = lock_sta_table(st);
+	TAILQ_FOREACH(se, &st->st_entry, se_list)
+		if (se->se_fails)
+			se->se_fails--;
+	unlock_sta_table(st, bh_disabled);
+}
+
+static struct sta_entry *
+select_bss(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *se, *selbs = NULL;
+	int bh_disabled;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN | IEEE80211_MSG_ROAM, " %s\n",
+		"macaddr          bssid         chan  rssi  rate flag  wep  essid");
+	bh_disabled = lock_sta_table(st);
+	TAILQ_FOREACH(se, &st->st_entry, se_list) {
+		if (match_bss(vap, ss, se) == 0) {
+			if (selbs == NULL)
+				selbs = se;
+			else if (sta_compare(se, selbs) > 0)
+				selbs = se;
+		}
+	}
+	unlock_sta_table(st, bh_disabled);
+
+	return selbs;
+}
+
+/*
+ * Pick an ap or ibss network to join or find a channel
+ * to use to start an ibss network.
+ */
+static int
+sta_pick_bss(struct ieee80211_scan_state *ss, struct ieee80211vap *vap,
+	int (*action)(struct ieee80211vap *, const struct ieee80211_scan_entry *),
+	u_int32_t flags)
+{
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *selbss;
+
+	KASSERT(vap->iv_opmode == IEEE80211_M_STA,
+		("wrong mode %u", vap->iv_opmode));
+
+	if (st->st_newscan) {
+		sta_update_notseen(st);
+		st->st_newscan = 0;
+	}
+	if (ss->ss_flags & IEEE80211_SCAN_NOPICK) {
+		/*
+		 * Manual/background scan, don't select+join the
+		 * bss, just return.  The scanning framework will
+		 * handle notification that this has completed.
+		 */
+		ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
+		return 1;
+	}
+	/*
+	 * Automatic sequencing; look for a candidate and
+	 * if found join the network.
+	 */
+	/* NB: unlocked read should be ok */
+	if (TAILQ_FIRST(&st->st_entry) == NULL) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: no scan candidate\n", __func__);
+notfound:
+		/*
+		 * If nothing suitable was found decrement
+		 * the failure counts so entries will be
+		 * reconsidered the next time around.  We
+		 * really want to do this only for sta's
+		 * where we've previously had some success.
+		 */
+		sta_dec_fails(st);
+		st->st_newscan = 1;
+		return 0;			/* restart scan */
+	}
+	st->st_action = ss->ss_ops->scan_default;
+	if (action)
+		st->st_action = action;
+	if ((selbss = select_bss(ss, vap)) == NULL ) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: select_bss failed\n", __func__);
+		goto notfound;
+	}
+	st->st_selbss = selbss->base;
+
+	/*
+	 * Must defer action to avoid possible recursive call through 80211
+	 * state machine, which would result in recursive locking.
+	 */
+	if (!(ss->ss_flags & IEEE80211_SCAN_ONCE)) {
+		IEEE80211_SCHEDULE_TQUEUE(&st->st_actiontq);
+	}
+
+	return 1;				/* terminate scan */
+}
+
+
+/*
+ * Lookup an entry in the scan cache.  We assume we're
+ * called from the bottom half or such that we don't need
+ * to block the bottom half so that it's safe to return
+ * a reference to an entry w/o holding the lock on the table.
+ */
+static struct sta_entry *
+sta_lookup(struct sta_table *st, const u_int8_t macaddr[IEEE80211_ADDR_LEN])
+{
+	struct sta_entry *se;
+	int hash = STA_HASH(macaddr);
+	int bh_disabled;
+
+	bh_disabled = lock_sta_table(st);
+	LIST_FOREACH(se, &st->st_hash[hash], se_hash)
+		if (IEEE80211_ADDR_EQ(se->base.se_macaddr, macaddr))
+			break;
+	unlock_sta_table(st, bh_disabled);
+
+	return se;		/* NB: unlocked */
+}
+
+static void
+sta_roam_check(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *se, *selbs;
+	u_int8_t roamRate, curRate;
+	int8_t roamRssi, curRssi;
+
+	se = sta_lookup(st, ni->ni_macaddr);
+	if (se == NULL) {
+		/* XXX something is wrong */
+		return;
+	}
+
+	/* XXX do we need 11g too? */
+	if (IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan)) {
+		roamRate = vap->iv_roam.rate11b;
+		roamRssi = vap->iv_roam.rssi11b;
+	} else if (IEEE80211_IS_CHAN_B(ic->ic_bsschan)) {
+		roamRate = vap->iv_roam.rate11bOnly;
+		roamRssi = vap->iv_roam.rssi11bOnly;
+	} else {
+		roamRate = vap->iv_roam.rate11a;
+		roamRssi = vap->iv_roam.rssi11a;
+	}
+	/* NB: the most up to date rssi is in the node, not the scan cache */
+	curRssi = ic->ic_node_getrssi(ni);
+	if (vap->iv_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
+		curRate = ni->ni_rates.rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ROAM,
+			"%s: currssi %d currate %u roamrssi %d roamrate %u\n",
+			__func__, curRssi, curRate, roamRssi, roamRate);
+	} else {
+		curRate = roamRate;		/* NB: ensure compare below fails */
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ROAM,
+			"%s: currssi %d roamrssi %d\n",
+			__func__, curRssi, roamRssi);
+	}
+	if (((vap->iv_flags & IEEE80211_F_BGSCAN) || ic->ic_scan_opchan_enable) &&
+	    time_after(jiffies, ic->ic_lastscan + vap->iv_scanvalid)) {
+		/*
+		 * Scan cache contents is too old; check about updating it.
+		 */
+		if (curRate < roamRate || curRssi < roamRssi) {
+			/*
+			 * Thresholds exceeded, force a scan now so we
+			 * have current state to make a decision with.
+			 */
+			ieee80211_bg_scan(vap);
+		} else if (time_after(jiffies,
+			ic->ic_lastdata + vap->iv_bgscanidle)) {
+			/*
+			 * We're not in need of a new ap, but idle;
+			 * kick off a bg scan to replenish the cache.
+			 */
+			ieee80211_bg_scan(vap);
+		}
+	} else {
+		/*
+		 * Scan cache contents are warm enough to use;
+		 * check if a new ap should be used and switch.
+		 * XXX deauth current ap
+		 */
+		if (curRate < roamRate || curRssi < roamRssi) {
+			se->base.se_rssi = curRssi;
+			selbs = select_bss(ss, vap);
+			if (selbs != NULL && selbs != se)
+				ieee80211_sta_join(vap, &selbs->base);
+		}
+	}
+}
+
+/*
+ * Age entries in the scan cache.
+ * XXX also do roaming since it's convenient
+ */
+static void
+sta_age(struct ieee80211_scan_state *ss)
+{
+	struct ieee80211vap *vap = ss->ss_vap;
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *se, *next;
+	int bh_disabled;
+
+	bh_disabled = lock_sta_table(st);
+	TAILQ_FOREACH_SAFE(se, &st->st_entry, se_list, next) {
+		if (se->se_notseen >= STA_PURGE_SCANS) {
+			TAILQ_REMOVE(&st->st_entry, se, se_list);
+			LIST_REMOVE(se, se_hash);
+			free_se_request(se);
+			if (st->st_entry_num > 0)
+				st->st_entry_num--;
+		}
+	}
+	unlock_sta_table(st, bh_disabled);
+	/*
+	 * If rate control is enabled check periodically to see if
+	 * we should roam from our current connection to one that
+	 * might be better.  This only applies when we're operating
+	 * in sta mode and automatic roaming is set.
+	 * XXX defer if busy
+	 * XXX repeater station
+	 */
+	KASSERT(vap->iv_opmode == IEEE80211_M_STA,
+		("wrong mode %u", vap->iv_opmode));
+	/* XXX turn this off until the ap release is cut */
+	if (0 && vap->iv_ic->ic_roaming == IEEE80211_ROAMING_AUTO &&
+	    vap->iv_state >= IEEE80211_S_RUN)
+		/* XXX vap is implicit */
+		sta_roam_check(ss, vap);
+}
+
+/*
+ * Remove particular entry from the scan cache,
+ * if the sta state changes from RUN to any other state
+ */
+static void
+sta_remove(struct ieee80211_scan_state *ss, struct ieee80211_node *ni)
+{
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *se, *next;
+	int bh_disabled;
+
+	bh_disabled = lock_sta_table(st);
+	TAILQ_FOREACH_SAFE(se, &st->st_entry, se_list, next) {
+		if (IEEE80211_ADDR_EQ(se->base.se_macaddr, ni->ni_macaddr) &&
+			!memcmp(se->base.se_ssid + 2, ni->ni_essid, se->base.se_ssid[1])) {
+			TAILQ_REMOVE(&st->st_entry, se, se_list);
+			LIST_REMOVE(se, se_hash);
+			free_se_request(se);
+			if (st->st_entry_num > 0)
+				st->st_entry_num--;
+		}
+	}
+
+	unlock_sta_table(st, bh_disabled);
+}
+
+/*
+ * Iterate over the entries in the scan cache, invoking
+ * the callback function on each one.
+ */
+static int
+sta_iterate(struct ieee80211_scan_state *ss, 
+	ieee80211_scan_iter_func *f, void *arg)
+{
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *se;
+	u_int gen;
+	int res = 0;
+	int bh_disabled;
+
+	spin_lock(&st->st_scanlock);
+	gen = st->st_scangen++;
+restart:
+	bh_disabled = lock_sta_table(st);
+	TAILQ_FOREACH(se, &st->st_entry, se_list) {
+		if (se->se_scangen != gen) {
+			se->se_scangen = gen;
+			/* update public state */
+			se->base.se_age = jiffies - se->se_lastupdate;
+			/* we are going to use entry after unlocking */
+			set_se_inuse(se);
+			unlock_sta_table(st, bh_disabled);
+
+			res = (*f)(arg, &se->base);
+
+			bh_disabled = lock_sta_table(st);
+			reset_se_inuse(se);
+			unlock_sta_table(st, bh_disabled);
+
+			if(res != 0) {
+			  /* We probably ran out of buffer space. */
+			  goto done;
+			}
+			goto restart;
+		}
+	}
+
+	unlock_sta_table(st, bh_disabled);
+
+ done:
+	spin_unlock(&st->st_scanlock);
+
+	return res;
+}
+
+static void
+sta_assoc_fail(struct ieee80211_scan_state *ss,
+	const u_int8_t macaddr[IEEE80211_ADDR_LEN], int reason)
+{
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *se;
+
+	se = sta_lookup(st, macaddr);
+	if (se != NULL) {
+		se->se_fails++;
+		se->se_lastfail = jiffies;
+		IEEE80211_NOTE_MAC(ss->ss_vap, IEEE80211_MSG_SCAN,
+			macaddr, "%s: reason %u fails %u",
+			__func__, reason, se->se_fails);
+	}
+}
+
+static void
+sta_assoc_success(struct ieee80211_scan_state *ss,
+	const u_int8_t macaddr[IEEE80211_ADDR_LEN])
+{
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *se;
+
+	se = sta_lookup(st, macaddr);
+	if (se != NULL) {
+		se->se_fails = 0;
+		IEEE80211_NOTE_MAC(ss->ss_vap, IEEE80211_MSG_SCAN,
+			macaddr, "%s: fails %u", __func__, se->se_fails);
+		se->se_lastassoc = jiffies;
+	}
+}
+
+static const struct ieee80211_scanner sta_default = {
+	.scan_name		= "default",
+	.scan_attach		= sta_attach,
+	.scan_detach		= sta_detach,
+	.scan_start		= sta_start,
+	.scan_restart		= sta_restart,
+	.scan_cancel		= sta_cancel,
+	.scan_end		= sta_pick_bss,
+	.scan_flush		= sta_flush,
+	.scan_add		= sta_add,
+	.scan_age		= sta_age,
+	.scan_iterate		= sta_iterate,
+	.scan_assoc_fail	= sta_assoc_fail,
+	.scan_assoc_success	= sta_assoc_success,
+	.scan_lock		= sta_lock,
+	.scan_unlock		= sta_unlock,
+	.scan_default		= ieee80211_sta_join,
+	.scan_remove            = sta_remove,
+};
+
+/*
+ * Start an adhoc-mode scan by populating the channel list.
+ */
+static int
+adhoc_start(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+#define	N(a)	(sizeof(a)/sizeof(a[0]))
+	struct ieee80211com *ic = vap->iv_ic;
+	struct sta_table *st = ss->ss_priv;
+	const struct scanlist *scan;
+	enum ieee80211_phymode mode;
+	
+	ss->ss_last = 0;
+	/*
+	 * Use the table of ordered channels to construct the list
+	 * of channels for scanning.  Any channels in the ordered
+	 * list not in the master list will be discarded.
+	 */
+	for (scan = staScanTable; scan->list != NULL; scan++) {
+		mode = scan->mode;
+		if (ic->ic_des_mode != IEEE80211_MODE_AUTO) {
+			/*
+			 * If a desired mode was specified, scan only 
+			 * channels that satisfy that constraint.
+			 */
+			if (ic->ic_des_mode != mode) {
+				/*
+				 * The scan table marks 2.4Ghz channels as b
+				 * so if the desired mode is 11g, then use
+				 * the 11b channel list but upgrade the mode.
+				 */
+				if (ic->ic_des_mode != IEEE80211_MODE_11G ||
+				    mode != IEEE80211_MODE_11B)
+					continue;
+				mode = IEEE80211_MODE_11G;	/* upgrade */
+			}
+		} else {
+			/*
+			 * This lets ieee80211_scan_add_channels
+			 * upgrade an 11b channel to 11g if available.
+			 */
+			if (mode == IEEE80211_MODE_11B)
+				mode = IEEE80211_MODE_AUTO;
+		}
+		/*
+		 * Add the list of the channels; any that are not
+		 * in the master channel list will be discarded.
+		 */
+		add_channels(ic, ss, mode, scan->list, scan->count);
+	}
+	ss->ss_next = 0;
+	/* XXX tunables */
+	ss->ss_mindwell = msecs_to_jiffies(200);	/* 200ms */
+	ss->ss_maxdwell = msecs_to_jiffies(200);	/* 200ms */
+
+#ifdef IEEE80211_DEBUG
+	if (ieee80211_msg_scan(vap)) {
+		printf("%s: scan set ", vap->iv_dev->name);
+		ieee80211_scan_dump_channels(ss);
+		printf(" dwell min %ld max %ld\n",
+			ss->ss_mindwell, ss->ss_maxdwell);
+	}
+#endif /* IEEE80211_DEBUG */
+
+	st->st_newscan = 1;
+
+	return 0;
+#undef N
+}
+
+/*
+ * Select a channel to start an adhoc network on.
+ * The channel list was populated with appropriate
+ * channels so select one that looks least occupied.
+ * XXX need regulatory domain constraints
+ */
+static struct ieee80211_channel *
+adhoc_pick_channel(struct ieee80211_scan_state *ss)
+{
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *se;
+	struct ieee80211_channel *c, *bestchan;
+	int i, bestrssi, maxrssi;
+	int bh_disabled;
+
+	bestchan = NULL;
+	bestrssi = -1;
+
+	bh_disabled = lock_sta_table(st);
+	for (i = 0; i < ss->ss_last; i++) {
+		c = ss->ss_chans[i];
+		maxrssi = 0;
+		TAILQ_FOREACH(se, &st->st_entry, se_list) {
+			if (se->base.se_chan != c)
+				continue;
+			if (se->base.se_rssi > maxrssi)
+				maxrssi = se->base.se_rssi;
+		}
+		if (bestchan == NULL || maxrssi < bestrssi)
+			bestchan = c;
+	}
+	unlock_sta_table(st, bh_disabled);
+
+	return bestchan;
+}
+
+/*
+ * Pick an ibss network to join or find a channel
+ * to use to start an ibss network.
+ */
+static int
+adhoc_pick_bss(struct ieee80211_scan_state *ss, struct ieee80211vap *vap,
+	int (*action)(struct ieee80211vap *, const struct ieee80211_scan_entry *),
+	u_int32_t flags)
+{
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *selbs;
+	struct ieee80211_channel *chan;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	KASSERT(vap->iv_opmode == IEEE80211_M_IBSS ||
+		vap->iv_opmode == IEEE80211_M_AHDEMO,
+		("wrong opmode %u", vap->iv_opmode));
+
+	if (st->st_newscan) {
+		sta_update_notseen(st);
+		st->st_newscan = 0;
+	}
+	if (ss->ss_flags & IEEE80211_SCAN_NOPICK) {
+		/*
+		 * Manual/background scan, don't select+join the
+		 * bss, just return.  The scanning framework will
+		 * handle notification that this has completed.
+		 */
+		ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
+		return 1;
+	}
+
+	st->st_action = ss->ss_ops->scan_default;
+	if (action)
+		st->st_action = action;
+
+	/*
+	 * Automatic sequencing; look for a candidate and
+	 * if found join the network.
+	 */
+	/* NB: unlocked read should be ok */
+	if (TAILQ_FIRST(&st->st_entry) == NULL ||
+		(selbs = select_bss(ss, vap)) == NULL ) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: no scan candidate\n", __func__);
+		if (vap->iv_des_nssid) {
+			/*
+			 * No existing adhoc network to join and we have
+			 * an ssid; start one up.  If no channel was
+			 * specified, try to select a channel.
+			 */
+			if (ic->ic_des_chan == IEEE80211_CHAN_ANYC)
+				chan = adhoc_pick_channel(ss);
+			else
+				chan = ic->ic_des_chan;
+			if (chan != NULL) {
+				struct ieee80211_scan_entry se;
+
+				memset(&se, 0, sizeof(se));
+				se.se_chan = chan;
+				st->st_selbss = se;
+				/* defer action */
+				IEEE80211_SCHEDULE_TQUEUE(&st->st_actiontq);
+				return 1;
+			}
+		}
+		/*
+		 * If nothing suitable was found decrement
+		 * the failure counts so entries will be
+		 * reconsidered the next time around.  We
+		 * really want to do this only for sta's
+		 * where we've previously had some success.
+		 */
+		sta_dec_fails(st);
+		st->st_newscan = 1;
+		return 0;			/* restart scan */
+	}
+
+	/* 
+	 * Must defer action to avoid possible recursive call through 80211
+	 * state machine, which would result in recursive locking.
+	 */
+	st->st_selbss = selbs->base;
+	IEEE80211_SCHEDULE_TQUEUE(&st->st_actiontq);
+
+	return 1;				/* terminate scan */
+}
+
+/*
+ * Age entries in the scan cache.
+ */
+static void
+adhoc_age(struct ieee80211_scan_state *ss)
+{
+	struct sta_table *st = ss->ss_priv;
+	struct sta_entry *se, *next;
+	int bh_disabled;
+
+	bh_disabled = lock_sta_table(st);
+	TAILQ_FOREACH_SAFE(se, &st->st_entry, se_list, next) {
+		if (se->se_notseen > STA_PURGE_SCANS) {
+			TAILQ_REMOVE(&st->st_entry, se, se_list);
+			LIST_REMOVE(se, se_hash);
+			free_se_request(se);
+			if (st->st_entry_num > 0)
+				st->st_entry_num--;
+		}
+	}
+	unlock_sta_table(st, bh_disabled);
+}
+
+/*
+ * Default action to execute when a scan entry is found for adhoc
+ * mode.  Return 1 on success, 0 on failure
+ */
+static int
+adhoc_default_action(struct ieee80211vap *vap,
+	const struct ieee80211_scan_entry *se)
+{
+	u_int8_t zeroMacAddr[IEEE80211_ADDR_LEN];
+
+	memset(&zeroMacAddr, 0, IEEE80211_ADDR_LEN);
+	if (IEEE80211_ADDR_EQ(se->se_bssid, &zeroMacAddr[0])) {
+		ieee80211_create_bss(vap, se->se_chan);
+		return 1;
+	} else 
+		return ieee80211_sta_join(vap,se);
+}
+
+static const struct ieee80211_scanner adhoc_default = {
+	.scan_name		= "default",
+	.scan_attach		= sta_attach,
+	.scan_detach		= sta_detach,
+	.scan_start		= adhoc_start,
+	.scan_restart		= sta_restart,
+	.scan_cancel		= sta_cancel,
+	.scan_end		= adhoc_pick_bss,
+	.scan_flush		= sta_flush,
+	.scan_add		= sta_add,
+	.scan_age		= adhoc_age,
+	.scan_iterate		= sta_iterate,
+	.scan_assoc_fail	= sta_assoc_fail,
+	.scan_assoc_success	= sta_assoc_success,
+	.scan_lock		= sta_lock,
+	.scan_unlock		= sta_unlock,
+	.scan_default		= adhoc_default_action,
+};
+
+static void
+action_tasklet(IEEE80211_TQUEUE_ARG data)
+{
+	struct ieee80211_scan_state *ss = (struct ieee80211_scan_state *)data;
+	struct sta_table *st = (struct sta_table *)ss->ss_priv;
+	struct ieee80211vap *vap = ss->ss_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *chan;
+
+	switch (vap->iv_opmode) {
+	case IEEE80211_M_STA:
+		sta_dec_fails(st);
+		st->st_newscan = 1;
+		break;
+	default:
+		/* ADHOC */
+        	if (vap->iv_des_nssid) {
+			/*
+			 * No existing adhoc network to join and we have
+			 * an ssid; start one up.  If no channel was
+			 * specified, try to select a channel.
+			 */
+			if (ic->ic_des_chan == IEEE80211_CHAN_ANYC)
+				chan = adhoc_pick_channel(ss);
+			else
+				chan = ic->ic_des_chan;
+			if (chan != NULL) {
+				struct ieee80211_scan_entry se;
+				
+				memset(&se, 0, sizeof(se));
+				se.se_chan = chan;
+				if ((*ss->ss_ops->scan_default)(vap, &se))
+					return;
+			}
+		}
+		/*
+		 * If nothing suitable was found decrement
+	         * the failure counts so entries will be
+		 * reconsidered the next time around.  We
+		 * really want to do this only for sta's
+		 * where we've previously had some success.
+		 */
+		sta_dec_fails(st);
+		st->st_newscan = 1;
+			break;
+	}
+
+	/*
+	 * restart scan
+	 */
+
+	/* no ap, clear the flag for a new scan */
+	vap->iv_ic->ic_flags &= ~IEEE80211_F_SCAN;
+	if ((ss->ss_flags & IEEE80211_SCAN_USECACHE) == 0)
+		(void) ieee80211_start_scan(vap, ss->ss_flags, ss->ss_duration, ss->ss_nssid, ss->ss_ssid);
+}
+
+/*
+ * Module glue.
+ */
+MODULE_AUTHOR("Errno Consulting, Sam Leffler");
+MODULE_DESCRIPTION("802.11 wireless support: default station scanner");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual BSD/GPL");
+#endif
+
+static int __init
+init_scanner_sta(void)
+{
+	ieee80211_scanner_register(IEEE80211_M_STA, &sta_default);
+	ieee80211_scanner_register(IEEE80211_M_IBSS, &adhoc_default);
+	ieee80211_scanner_register(IEEE80211_M_AHDEMO, &adhoc_default);
+	return 0;
+}
+module_init(init_scanner_sta);
+
+static void __exit
+exit_scanner_sta(void)
+{
+	ieee80211_scanner_unregister_all(&sta_default);
+	ieee80211_scanner_unregister_all(&adhoc_default);
+}
+module_exit(exit_scanner_sta);
diff --git a/drivers/qtn/wlan/ieee80211_tdls.c b/drivers/qtn/wlan/ieee80211_tdls.c
new file mode 100755
index 0000000..1544d66
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_tdls.c
@@ -0,0 +1,4411 @@
+ /*SH0
+*******************************************************************************
+**                                                                           **
+**         Copyright (c) 2010-2013 Quantenna Communications, Inc.            **
+**                            All Rights Reserved                            **
+**                                                                           **
+**  File        : ieee80211_tdls.c                                           **
+**  Description : Tunnelled Direct-Link Setup                                **
+**                                                                           **
+**  This module implements the IEEE Std 802.11z specification as well as a   **
+**  proprietary discovery mechanism.                                         **
+**                                                                           **
+*******************************************************************************
+**                                                                           **
+**  Redistribution and use in source and binary forms, with or without       **
+**  modification, are permitted provided that the following conditions       **
+**  are met:                                                                 **
+**  1. Redistributions of source code must retain the above copyright        **
+**     notice, this list of conditions and the following disclaimer.         **
+**  2. Redistributions in binary form must reproduce the above copyright     **
+**     notice, this list of conditions and the following disclaimer in the   **
+**     documentation and/or other materials provided with the distribution.  **
+**  3. The name of the author may not be used to endorse or promote products **
+**     derived from this software without specific prior written permission. **
+**                                                                           **
+**  Alternatively, this software may be distributed under the terms of the   **
+**  GNU General Public License ("GPL") version 2, or (at your option) any    **
+**  later version as published by the Free Software Foundation.              **
+**                                                                           **
+**  In the case this software is distributed under the GPL license,          **
+**  you should have received a copy of the GNU General Public License        **
+**  along with this software; if not, write to the Free Software             **
+**  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA  **
+**                                                                           **
+**  THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR       **
+**  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
+**  OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  **
+**  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,         **
+**  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
+**  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
+**  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY    **
+**  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT      **
+**  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
+**  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.        **
+**                                                                           **
+*******************************************************************************
+
+EH0*/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sort.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <net/iw_handler.h>
+
+#include "net80211/if_llc.h"
+#include "net80211/if_ethersubr.h"
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211.h"
+#include "net80211/ieee80211_tdls.h"
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_dot11_msg.h"
+#include "net80211/ieee80211_linux.h"
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+#include <linux/if_bridge.h>
+#include "linux/net/bridge/br_private.h"
+#endif
+
+#define IEEE80211_TDLS_FRAME_MAX 512
+#define IEEE80211_TDLS_MAX_BRIDGE_CLIENTS 256
+#define IEEE80211_TDLS_BR_SUB_PORT_SHIFT 4
+
+/*
+ * FIXME: Make bridge table extraction more efficient.  Possible improvements:
+ * - Support a filter on the fillbuf call so that only the entries we want are
+ * returned.  Or perhaps a new, specialised bridge function.
+ * - Build an ordered linked list of pointers to bridge entries instead of
+ * sorting after the fact. This would be faster and save stack space (only 4
+ * bytes per bridge entry), but we'd have to lock out bridge updates while
+ * building the IE from the pointers.
+ * - Maintain the linked list in the bridge instead of creating it during each
+ * call.
+ *
+ * FIXME: This bridge_entries pointer doesn't belong here, fix with above.
+ */
+static struct __fdb_entry *bridge_entries;
+
+static const char *ieee80211_tdls_action_name[] = {
+	"setup request",
+	"setup response",
+	"setup confirm",
+	"teardown",
+	"peer traffic indication",
+	"channel switch request",
+	"channel switch response",
+	"peer PSM request",
+	"peer PSM response",
+	"peer traffic response",
+	"discovery request"
+};
+
+static const char *ieee80211_tdls_stats_string[] = {
+	"none",
+	"inactive",
+	"starting",
+	"active",
+	"idle"
+};
+
+static __inline int
+bit_num(int32_t val)
+{
+	int i;
+
+	for (i = 0; i < 32; i++) {
+		if (val == 0)
+			return i;
+		val = val >> 1;
+	}
+
+	return i;
+}
+
+const char *
+ieee80211_tdls_action_name_get(uint8_t action)
+{
+	if (action >= ARRAY_SIZE(ieee80211_tdls_action_name))
+		return "unknown";
+
+	return ieee80211_tdls_action_name[action];
+}
+
+const char *
+ieee80211_tdls_status_string_get(uint8_t stats)
+{
+	if (stats >= ARRAY_SIZE(ieee80211_tdls_stats_string))
+		return "unknown";
+
+	return ieee80211_tdls_stats_string[stats];
+}
+
+/*
+ * TDLS is not allowed when using TKIP
+ * Returns 0 if the security config is valid, else 1.
+ */
+static int
+ieee80211_tdls_sec_mode_valid(struct ieee80211vap *vap)
+{
+	if (vap->iv_flags & IEEE80211_F_PRIVACY) {
+		if (vap->iv_bss->ni_ucastkey.wk_ciphertype == IEEE80211_CIPHER_TKIP)
+			return 0;
+	}
+
+	return 1;
+}
+
+static int
+ieee80211_tdls_get_privacy(struct ieee80211vap *vap)
+{
+	return vap->iv_bss->ni_ucastkey.wk_ciphertype != IEEE80211_CIPHER_NONE;
+}
+
+int
+ieee80211_tdls_get_smoothed_rssi(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	int32_t rssi = ic->ic_rssi(ni);
+	int32_t cur_rssi = 0;
+	int32_t cur_smthd_rssi = 0;
+
+	if (rssi < -1 && rssi > -1200)
+		cur_rssi = rssi;
+	else if (ni->ni_rssi > 0)
+		/* Correct pseudo RSSIs that apparently still get into the node table */
+		cur_rssi = (ni->ni_rssi * 10) - 900;
+
+	cur_smthd_rssi = ic->ic_smoothed_rssi(ni);
+	if ((cur_smthd_rssi > -1) || (cur_smthd_rssi < -1200))
+		cur_smthd_rssi = cur_rssi;
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: Peer: %pM, rssi=%d\n", __func__,
+			ni->ni_macaddr, cur_smthd_rssi);
+
+	return cur_smthd_rssi;
+}
+
+static void
+ieee80211_tdls_update_peer_assoc_bw(struct ieee80211_node *peer_ni,
+		struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211com *ic = peer_ni->ni_vap->iv_ic;
+	struct ieee80211_ie_htcap *htcap = (struct ieee80211_ie_htcap *)tdls->htcap;
+	struct ieee80211_ie_vhtcap *vhtcap = (struct ieee80211_ie_vhtcap *)tdls->vhtcap;
+	enum ieee80211_vhtop_chanwidth assoc_vhtop_bw;
+	enum ieee80211_vhtop_chanwidth bss_bw;
+
+	if (htcap && (ic->ic_bss_bw < BW_HT40))
+		peer_ni->ni_htcap.cap &= ~(IEEE80211_HTCAP_C_CHWIDTH40 |
+				IEEE80211_HTCAP_C_SHORTGI40);
+
+	if (vhtcap) {
+		switch (IEEE80211_VHTCAP_GET_CHANWIDTH(vhtcap)) {
+		case IEEE80211_VHTCAP_CW_160M:
+			assoc_vhtop_bw = IEEE80211_VHTOP_CHAN_WIDTH_160MHZ;
+			break;
+
+		case IEEE80211_VHTCAP_CW_160_AND_80P80M:
+			assoc_vhtop_bw = IEEE80211_VHTOP_CHAN_WIDTH_80PLUS80MHZ;
+			break;
+
+		case IEEE80211_VHTCAP_CW_80M_ONLY:
+		default:
+			assoc_vhtop_bw = IEEE80211_VHTOP_CHAN_WIDTH_80MHZ;
+			break;
+		}
+
+		switch (ic->ic_bss_bw) {
+		case BW_HT160:
+			bss_bw = IEEE80211_VHTOP_CHAN_WIDTH_160MHZ;
+			break;
+		case BW_HT80:
+			bss_bw = IEEE80211_VHTOP_CHAN_WIDTH_80MHZ;
+			break;
+		case BW_HT40:
+		case BW_HT20:
+		default:
+			bss_bw = IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ;
+			break;
+		}
+
+		assoc_vhtop_bw = min(bss_bw, assoc_vhtop_bw);
+		peer_ni->ni_vhtop.chanwidth = min(ic->ic_vhtop.chanwidth, assoc_vhtop_bw);
+
+		if (IS_IEEE80211_11NG_VHT_ENABLED(ic)) {
+			assoc_vhtop_bw = IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ;
+			assoc_vhtop_bw = min(bss_bw, assoc_vhtop_bw);
+			peer_ni->ni_vhtop.chanwidth =
+					min(ic->ic_vhtop_24g.chanwidth, assoc_vhtop_bw);
+		}
+	}
+}
+
+static void
+ieee80211_tdls_update_rates(struct ieee80211_node *peer_ni,
+	struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211com *ic;
+
+	if (!peer_ni || !tdls)
+		return;
+
+	ic = peer_ni->ni_vap->iv_ic;
+
+	if (tdls->rates)
+		ieee80211_parse_rates(peer_ni, tdls->rates, tdls->xrates);
+	if (tdls->htcap) {
+		peer_ni->ni_flags |= IEEE80211_NODE_HT;
+		ieee80211_parse_htcap(peer_ni, tdls->htcap);
+		if (tdls->htinfo)
+		      ieee80211_parse_htinfo(peer_ni, tdls->htinfo);
+
+		if (IS_IEEE80211_VHT_ENABLED(ic) && tdls->vhtcap) {
+			peer_ni->ni_flags |= IEEE80211_NODE_VHT;
+			ieee80211_parse_vhtcap(peer_ni, tdls->vhtcap);
+			if (tdls->vhtop)
+				ieee80211_parse_vhtop(peer_ni, tdls->vhtop);
+			else
+				ieee80211_tdls_update_peer_assoc_bw(peer_ni, tdls);
+		} else {
+			peer_ni->ni_flags &= ~IEEE80211_NODE_VHT;
+			memset(&peer_ni->ni_vhtcap, 0, sizeof(peer_ni->ni_vhtcap));
+			memset(&peer_ni->ni_vhtop, 0, sizeof(peer_ni->ni_vhtop));
+		}
+	} else {
+		memset(&peer_ni->ni_htcap, 0, sizeof(peer_ni->ni_htcap));
+		memset(&peer_ni->ni_htinfo, 0, sizeof(peer_ni->ni_htinfo));
+		memset(&peer_ni->ni_vhtcap, 0, sizeof(peer_ni->ni_vhtcap));
+		memset(&peer_ni->ni_vhtop, 0, sizeof(peer_ni->ni_vhtop));
+
+		peer_ni->ni_flags &= ~IEEE80211_NODE_HT;
+		peer_ni->ni_flags &= ~IEEE80211_NODE_VHT;
+	}
+
+	ieee80211_fix_rate(peer_ni, IEEE80211_F_DONEGO |
+		IEEE80211_F_DOXSECT | IEEE80211_F_DODEL);
+	ieee80211_fix_ht_rate(peer_ni, IEEE80211_F_DONEGO |
+		IEEE80211_F_DOXSECT | IEEE80211_F_DODEL);
+}
+
+int
+ieee80211_tdls_set_key(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+	struct ieee80211_key *wk;
+	const uint8_t *mac;
+	int error = 0;
+
+	if (ni == NULL)
+		return -1;
+
+	mac = ni->ni_macaddr;
+	wk = &ni->ni_ucastkey;
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"[%s] Setting key bcast=%u\n", ether_sprintf(mac),
+		IEEE80211_IS_MULTICAST(mac));
+
+	if (wk->wk_keylen != 0) {
+		ieee80211_key_update_begin(vap);
+		error = vap->iv_key_set(vap, wk, mac);
+		ieee80211_key_update_end(vap);
+	}
+
+	return error;
+}
+
+int
+ieee80211_tdls_del_key(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+	struct ieee80211_key *wk;
+	const uint8_t *mac;
+	int error = 0;
+
+	if (ni == NULL)
+		return -1;
+
+	mac = ni->ni_macaddr;
+	wk = &ni->ni_ucastkey;
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"[%s] deleting key bcast=%u\n", ether_sprintf(mac),
+		IEEE80211_IS_MULTICAST(mac));
+
+	/* wk must be set to ni->ni_ucastkey for sw crypto */
+	wk->wk_ciphertype = 0;
+	wk->wk_keytsc = 0;
+	wk->wk_keylen = sizeof(wk->wk_key);
+	memset(wk->wk_key, 0, sizeof(wk->wk_key));
+
+	ieee80211_key_update_begin(vap);
+	error = vap->iv_key_delete(vap, wk, mac);
+	ieee80211_key_update_end(vap);
+
+	return error;
+}
+
+static void
+ieee80211_create_tdls_peer(struct ieee80211vap *vap, struct ieee80211_node *peer_ni,
+	struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = TAILQ_FIRST(&ic->ic_vaps)->iv_bss;
+
+	ieee80211_node_set_chan(ic, peer_ni);
+
+	peer_ni->ni_capinfo = ni->ni_capinfo;
+	peer_ni->ni_txpower = ni->ni_txpower;
+	peer_ni->ni_ath_flags = vap->iv_ath_cap;
+	peer_ni->ni_flags |= (IEEE80211_NODE_AUTH | IEEE80211_NODE_QOS);
+	peer_ni->ni_node_type = IEEE80211_NODE_TYPE_TDLS;
+
+	if (tdls && tdls->supp_chan)
+		ieee80211_parse_supp_chan(peer_ni, tdls->supp_chan);
+
+	ieee80211_tdls_update_rates(peer_ni, tdls);
+	ieee80211_update_current_mode(peer_ni);
+	peer_ni->ni_start_time_assoc = get_jiffies_64();
+	if (ic->ic_newassoc != NULL)
+		ic->ic_newassoc(peer_ni, 1);
+}
+
+void
+ieee80211_tdls_update_uapsd_indicication_windows(struct ieee80211vap *vap)
+{
+	if (vap->iv_opmode == IEEE80211_M_STA && vap->iv_ic->ic_set_tdls_param)
+		vap->iv_ic->ic_set_tdls_param(vap->iv_bss, IOCTL_TDLS_UAPSD_IND_WND,
+					(int)vap->tdls_uapsd_indicat_wnd);
+}
+
+static void
+ieee80211_tdls_update_peer(struct ieee80211_node *peer_ni,
+		struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = peer_ni->ni_vap;
+	int ni_update_required = 0;
+
+	if (tdls == NULL)
+		return;
+
+	if ((tdls->act != IEEE80211_ACTION_TDLS_SETUP_REQ) &&
+		(tdls->act != IEEE80211_ACTION_TDLS_SETUP_RESP))
+		return;
+
+	if (tdls->supp_chan)
+		ieee80211_parse_supp_chan(peer_ni, tdls->supp_chan);
+
+	if (tdls->qtn_info) {
+		if (peer_ni->ni_qtn_assoc_ie == NULL)
+			ni_update_required = 1;
+		else if (memcmp(tdls->qtn_info, peer_ni->ni_qtn_assoc_ie,
+				sizeof(struct ieee80211_ie_qtn)))
+			ni_update_required = 1;
+	}
+
+	if (tdls->htcap && memcmp(tdls->htcap, &peer_ni->ni_ie_htcap,
+				sizeof(peer_ni->ni_ie_htcap)))
+		ni_update_required = 1;
+	if (tdls->vhtcap && memcmp(tdls->vhtcap, &peer_ni->ni_ie_vhtcap,
+				sizeof(peer_ni->ni_ie_vhtcap)))
+		ni_update_required = 1;
+
+	if (ni_update_required) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: update peer %pM, tdls_status: %d, ref_cnt = %d\n", __func__,
+			peer_ni->ni_macaddr, peer_ni->tdls_status, ieee80211_node_refcnt(peer_ni));
+
+		ieee80211_tdls_update_rates(peer_ni, tdls);
+		ieee80211_input_tdls_qtnie(peer_ni, vap, (struct ieee80211_ie_qtn *)tdls->qtn_info);
+		ieee80211_update_current_mode(peer_ni);
+	}
+}
+
+
+/*
+ * Find or create a peer node
+ * The returned node structure must be freed after use
+ * Returns a pointer to a node structure if successful, else NULL
+ */
+static struct ieee80211_node *
+ieee80211_tdls_find_or_create_peer(struct ieee80211_node *ni, uint8_t *peer_mac,
+		struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *peer_ni = NULL;
+
+	peer_ni = ieee80211_find_node(&ic->ic_sta, peer_mac);
+	if (peer_ni == NULL) {
+		peer_ni = ieee80211_alloc_node(&ic->ic_sta, vap, peer_mac, "TDLS peer");
+		if (peer_ni == NULL) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: could not create peer node %pM\n",
+				__func__, peer_ni->ni_macaddr);
+			return NULL;
+		}
+
+		peer_ni->tdls_status = IEEE80211_TDLS_NODE_STATUS_INACTIVE;
+		if (ieee80211_aid_acquire(ic, peer_ni)) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: could not create peer node %pM"
+				" - too many nodes\n",
+				__func__, peer_ni->ni_macaddr);
+			ieee80211_node_decref(peer_ni);
+			ieee80211_free_node(peer_ni);
+			return NULL;
+		}
+
+		IEEE80211_ADDR_COPY(peer_ni->ni_bssid, ni->ni_bssid);
+
+		if (tdls && tdls->qtn_info != NULL)
+			ieee80211_input_tdls_qtnie(peer_ni, vap,
+						(struct ieee80211_ie_qtn *)tdls->qtn_info);
+
+		/* Add node to macfw iv_aid_ni table */
+		ieee80211_create_tdls_peer(vap, peer_ni, tdls);
+
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: created peer node %pM aid = %d, ref_cnt = %d\n", __func__,
+			peer_ni->ni_macaddr, IEEE80211_NODE_AID(peer_ni), ieee80211_node_refcnt(peer_ni));
+	} else {
+		ieee80211_tdls_update_peer(peer_ni, tdls);
+	}
+
+	return peer_ni;
+}
+
+int
+ieee80211_tdls_send_event(struct ieee80211_node *peer_ni,
+		enum ieee80211_tdls_event event, void *data)
+{
+	struct ieee80211vap *vap = peer_ni->ni_vap;
+	struct ieee80211_tdls_event_data event_data;
+	union iwreq_data wreq;
+	char *event_name = NULL;
+
+	switch (event) {
+	case IEEE80211_EVENT_TDLS:
+		event_name = "EVENT_TDLS";
+		break;
+	case IEEE80211_EVENT_STATION_LOW_ACK:
+		event_name = "EVENT_STA_LOW_ACK";
+		break;
+	default:
+		break;
+	}
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS:%s Send event %s, run operation %d\n", __func__,
+		event_name, *((enum ieee80211_tdls_operation *)data));
+
+	memset(&event_data, 0, sizeof(event_data));
+	strncpy(event_data.name, event_name, sizeof(event_data.name));
+	event_data.index = event;
+	event_data.sub_index = *((enum ieee80211_tdls_operation *)data);
+	memcpy(event_data.peer_mac, peer_ni->ni_macaddr, IEEE80211_ADDR_LEN);
+
+	memset(&wreq, 0, sizeof(wreq));
+	wreq.data.length = sizeof(event_data);
+	wireless_send_event(vap->iv_dev, IWEVCUSTOM, &wreq, (char *)&event_data);
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_tdls_send_event);
+
+static struct tdls_peer_ps_info *
+ieee80211_tdls_find_or_create_peer_ps_info(struct ieee80211vap *vap,
+	struct ieee80211_node *peer_ni)
+{
+	int found = 0;
+	unsigned long flags;
+	struct tdls_peer_ps_info *peer_ps_info = NULL;
+	int hash = IEEE80211_NODE_HASH(peer_ni->ni_macaddr);
+
+	spin_lock_irqsave(&vap->tdls_ps_lock, flags);
+	LIST_FOREACH(peer_ps_info, &vap->tdls_ps_hash[hash], peer_hash) {
+		if (IEEE80211_ADDR_EQ(peer_ps_info->peer_addr, peer_ni->ni_macaddr)) {
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&vap->tdls_ps_lock, flags);
+
+	if (found == 1)
+		return peer_ps_info;
+
+	MALLOC(peer_ps_info, struct tdls_peer_ps_info *,
+				sizeof(*peer_ps_info), M_DEVBUF, M_WAITOK);
+	if (peer_ps_info != NULL) {
+		memcpy(peer_ps_info->peer_addr, peer_ni->ni_macaddr, IEEE80211_ADDR_LEN);
+		peer_ps_info->tdls_path_down_cnt = 0;
+		peer_ps_info->tdls_link_disabled_ints = 0;
+		spin_lock_irqsave(&vap->tdls_ps_lock, flags);
+		LIST_INSERT_HEAD(&vap->tdls_ps_hash[hash], peer_ps_info, peer_hash);
+		spin_unlock_irqrestore(&vap->tdls_ps_lock, flags);
+	}
+
+	return peer_ps_info;
+}
+
+static void
+ieee80211_tdls_peer_ps_info_decre(struct ieee80211vap *vap)
+{
+	int i;
+	unsigned long flags;
+	struct tdls_peer_ps_info *peer_ps_info = NULL;
+
+	spin_lock_irqsave(&vap->tdls_ps_lock, flags);
+	for (i = 0; i < IEEE80211_NODE_HASHSIZE; i++) {
+		LIST_FOREACH(peer_ps_info, &vap->tdls_ps_hash[i], peer_hash) {
+			if ((peer_ps_info != NULL) &&
+					(peer_ps_info->tdls_link_disabled_ints > 0))
+				peer_ps_info->tdls_link_disabled_ints--;
+		}
+	}
+	spin_unlock_irqrestore(&vap->tdls_ps_lock, flags);
+}
+
+void
+ieee80211_tdls_free_peer_ps_info(struct ieee80211vap *vap)
+{
+	int i;
+	unsigned long flags;
+	struct tdls_peer_ps_info *peer_ps_info = NULL;
+
+	spin_lock_irqsave(&vap->tdls_ps_lock, flags);
+	for (i = 0; i < IEEE80211_NODE_HASHSIZE; i++) {
+		LIST_FOREACH(peer_ps_info, &vap->tdls_ps_hash[i], peer_hash) {
+			if (peer_ps_info != NULL) {
+				LIST_REMOVE(peer_ps_info, peer_hash);
+				FREE(peer_ps_info, M_DEVBUF);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&vap->tdls_ps_lock, flags);
+}
+
+static int
+ieee80211_tdls_state_should_move(struct ieee80211vap *vap,
+	struct ieee80211_node *peer_ni)
+{
+	int mu = STATS_SU;
+#define QTN_TDLS_RATE_CHANGE_THRSH	100
+	struct tdls_peer_ps_info *peer_ps_info = NULL;
+	int32_t cur_ap_tx_rate = vap->iv_bss->ni_shared_stats->tx[mu].avg_tx_phy_rate;
+	int32_t last_ap_tx_rate = vap->iv_bss->last_tx_phy_rate;
+	int32_t ap_rate_diff;
+	int should_move = 1;
+
+	peer_ps_info = ieee80211_tdls_find_or_create_peer_ps_info(vap, peer_ni);
+	if ((peer_ps_info) && (peer_ps_info->tdls_link_disabled_ints > 0)) {
+		if (cur_ap_tx_rate > last_ap_tx_rate)
+			ap_rate_diff = cur_ap_tx_rate - last_ap_tx_rate;
+		else
+			ap_rate_diff = last_ap_tx_rate - cur_ap_tx_rate;
+
+		if (ap_rate_diff > QTN_TDLS_RATE_CHANGE_THRSH) {
+			peer_ps_info->tdls_path_down_cnt = 0;
+			peer_ps_info->tdls_link_disabled_ints = 0;
+		} else {
+			should_move = 0;
+		}
+	}
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: Peer: %pM link_disabled_ints = %d, "
+		"should %s move to next state\n", __func__, peer_ni->ni_macaddr,
+		(peer_ps_info == NULL) ? 0 : peer_ps_info->tdls_link_disabled_ints,
+		should_move ? "" : "not");
+
+	return should_move;
+}
+
+/*
+ * Decide if should try to setup TDLS link
+ * Return 0: shouldn't setup
+ * Return 1: setup TDLS link
+ */
+int
+ieee80211_tdls_link_should_setup(struct ieee80211vap *vap,
+	struct ieee80211_node *peer_ni)
+{
+	int should_setup = 1;
+	int32_t smthd_rssi = ieee80211_tdls_get_smoothed_rssi(vap, peer_ni);
+
+	if (IEEE80211_NODE_IS_TDLS_ACTIVE(peer_ni)) {
+		should_setup = 0;
+		goto OUT;
+	}
+
+	if (vap->tdls_path_sel_prohibited == 1)
+		goto OUT;
+	/*
+	 * if peer is 3rd-party STA, we will establish TDLS Link first, then send trainning packet,
+	 * otherwise, training packets will be treated as attacking packets by 3rd-party STA,
+	 * and it send deauth frame to QTN-STA, the result is not what we exspect.
+	 */
+	if ((vap->tdls_discovery_interval > 0) && peer_ni->ni_qtn_assoc_ie) {
+		should_setup = 0;
+		goto OUT;
+	}
+
+	if (smthd_rssi < vap->tdls_min_valid_rssi) {
+		should_setup = 0;
+		goto OUT;
+	}
+
+	if (timer_pending(&vap->tdls_rate_detect_timer))
+		should_setup = ieee80211_tdls_state_should_move(vap, peer_ni);
+
+OUT:
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: %s peer: %pM RSSI = %d, should %s setup TDLS link\n",
+		__func__, peer_ni->ni_qtn_assoc_ie ? "qtn" : "non-qtn",
+		peer_ni->ni_macaddr, smthd_rssi, should_setup ? "" : "not");
+
+	return should_setup;
+}
+
+int
+ieee80211_tdls_link_should_response(struct ieee80211vap *vap,
+	struct ieee80211_node *peer_ni)
+{
+	int should_resp = 1;
+	int32_t smthd_rssi = ieee80211_tdls_get_smoothed_rssi(vap, peer_ni);
+
+	if (vap->tdls_path_sel_prohibited == 1)
+		goto OUT;
+
+	if (smthd_rssi < vap->tdls_min_valid_rssi) {
+		should_resp = 0;
+		goto OUT;
+	}
+
+	if (timer_pending(&vap->tdls_rate_detect_timer))
+		should_resp = ieee80211_tdls_state_should_move(vap, peer_ni);
+
+OUT:
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: Peer: %pM RSSI = %d, should %s send response frame\n",
+		__func__, peer_ni->ni_macaddr, smthd_rssi, should_resp ? "" : "not");
+
+	return should_resp;
+}
+
+/*
+ * Decide if TDLS link should be torn down or established
+ * Return 0: teardown TDLS link
+ * Return 1: establish TDLS link
+ */
+static int
+ieee80211_tdls_data_path_selection(struct ieee80211vap *vap,
+	struct ieee80211_node *peer_ni)
+{
+	int mu = STATS_SU;
+	int32_t last_ap_tx_rate = vap->iv_bss->last_tx_phy_rate;
+	int32_t last_peer_tx_rate = peer_ni->last_tx_phy_rate;
+	int32_t avg_ap_tx_rate = 0;
+	int32_t avg_peer_tx_rate = 0;
+	int32_t smthd_rssi = 0;
+	int is_tdls_path = -1;
+	int use_tdls_path = 0;
+	int32_t cur_ap_tx_rate = vap->iv_bss->ni_shared_stats->tx[mu].avg_tx_phy_rate;
+	int32_t cur_peer_tx_rate = peer_ni->ni_shared_stats->tx[mu].avg_tx_phy_rate;
+
+	if (vap->tdls_path_sel_prohibited == 1) {
+		is_tdls_path = 1;
+		goto out;
+	}
+
+	if (last_ap_tx_rate <= 0)
+		last_ap_tx_rate = cur_ap_tx_rate;
+	avg_ap_tx_rate = last_ap_tx_rate * vap->tdls_phy_rate_wgt / 10 +
+				cur_ap_tx_rate * (10 - vap->tdls_phy_rate_wgt) / 10;
+	if (last_peer_tx_rate <= 0)
+		last_peer_tx_rate = cur_peer_tx_rate;
+	avg_peer_tx_rate = last_peer_tx_rate * vap->tdls_phy_rate_wgt / 10 +
+				cur_peer_tx_rate * (10 - vap->tdls_phy_rate_wgt) / 10;
+
+	smthd_rssi = ieee80211_tdls_get_smoothed_rssi(vap, peer_ni);
+	if (smthd_rssi >= vap->tdls_min_valid_rssi) {
+		if ((vap->iv_bss->ni_training_flag == NI_TRAINING_END) &&
+				(peer_ni->ni_training_flag == NI_TRAINING_END)) {
+			/*
+			 * Use the TDLS path if the Tx rate is better than a predefined proportion
+			 * of the Tx rate via the AP.  E.g. if the weighting is 8, then the direct
+			 * rate must be at least 80% of the rate via the AP.
+			 */
+			if ((avg_peer_tx_rate > vap->tdls_path_sel_rate_thrshld) &&
+					(avg_peer_tx_rate >= avg_ap_tx_rate * vap->tdls_path_sel_weight / 10))
+				use_tdls_path = 1;
+			else
+				use_tdls_path = 0;
+		} else {
+			use_tdls_path = 1;
+		}
+	} else {
+		use_tdls_path = 0;
+	}
+
+	if (use_tdls_path != peer_ni->tdls_last_path_sel)
+		peer_ni->tdls_path_sel_num = 0;
+
+	if (use_tdls_path == 0)
+		peer_ni->tdls_path_sel_num--;
+	else
+		peer_ni->tdls_path_sel_num++;
+
+	if (peer_ni->tdls_path_sel_num >= vap->tdls_switch_ints)
+		is_tdls_path = 1;
+	else if (peer_ni->tdls_path_sel_num <= (0 - vap->tdls_switch_ints))
+		is_tdls_path = 0;
+
+	vap->iv_bss->last_tx_phy_rate = avg_ap_tx_rate;
+	peer_ni->last_tx_phy_rate = avg_peer_tx_rate;
+	peer_ni->tdls_last_path_sel =  use_tdls_path;
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: peer %pM rssi=%d, path_sel_num=%d, avg_ap_rate=%d, avg_peer_rate=%d\n",
+		__func__, peer_ni->ni_macaddr, smthd_rssi, peer_ni->tdls_path_sel_num,
+		avg_ap_tx_rate, avg_peer_tx_rate);
+
+out:
+	if (is_tdls_path == 1)
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: Creating TDLS data link with %pM\n",
+			__func__, peer_ni->ni_macaddr);
+	else if (is_tdls_path == 0)
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: Tearing down TDLS data link with %pM\n",
+			__func__, peer_ni->ni_macaddr);
+
+	return is_tdls_path;
+}
+
+int ieee80211_tdls_remain_on_channel(struct ieee80211vap *vap,
+		struct ieee80211_node *ni, uint8_t chan, uint8_t bandwidth,
+		uint64_t start_tsf, uint32_t timeout, uint32_t duration)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *newchan = NULL;
+	int ret = 0;
+
+	if (ic->ic_flags & IEEE80211_F_CHANSWITCH) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: Channel switch already in progress, owner=%d\n",
+			__func__, ic->ic_csa_reason);
+		return -1;
+	}
+
+	newchan = ic->ic_findchannel(ic, chan, ic->ic_des_mode);
+	if (newchan == NULL) {
+		newchan = ic->ic_findchannel(ic, chan, IEEE80211_MODE_AUTO);
+		if (newchan == NULL) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS,
+				IEEE80211_TDLS_MSG_DBG, "TDLS %s: Fail to find target channel\n", __func__);
+			return -1;
+		}
+	}
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: Start to switch channel %d (bw:%d), start_tsf: %llu, duration: %u\n",
+			__func__, chan, bandwidth, start_tsf, duration);
+
+	ret = ic->ic_remain_on_channel(ic, ni, newchan, bandwidth, start_tsf, timeout, duration, 0);
+	if (!ret)
+		vap->tdls_cs_node = ni;
+
+	return ret;
+}
+
+/*
+ * Initialise an action frame
+ */
+static struct sk_buff *
+ieee80211_tdls_init_frame(struct ieee80211_node *ni, uint8_t **frm_p,
+		uint8_t action, uint8_t direct)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ether_header *eh;
+
+	struct sk_buff *skb;
+	uint8_t *frm = *frm_p;
+	uint8_t payload_type = IEEE80211_SNAP_TYPE_TDLS;
+	uint16_t frm_len = IEEE80211_TDLS_FRAME_MAX;
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_NODE, IEEE80211_TDLS_MSG_DBG,
+		"%s: Sending %s\n", __func__, ieee80211_tdls_action_name_get(action));
+
+	skb = dev_alloc_skb(frm_len);
+	if (skb == NULL) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_NODE, IEEE80211_TDLS_MSG_WARN,
+			"%s: cannot get buf; size %u", __func__, frm_len);
+		vap->iv_stats.is_tx_nobuf++;
+		return NULL;
+	}
+	frm = skb_put(skb, frm_len);
+
+	skb->priority = WME_AC_VI;	/* "unless specified otherwise" 11.21.2 */
+	M_FLAG_SET(skb, M_CLASSIFY);
+
+	eh = (struct ether_header *)frm;
+	if (direct) {
+		IEEE80211_ADDR_COPY(eh->ether_dhost, ni->ni_macaddr);
+		IEEE80211_ADDR_COPY(eh->ether_shost, vap->iv_myaddr);
+	} else {
+		IEEE80211_ADDR_COPY(eh->ether_dhost, vap->iv_dev->broadcast);
+		IEEE80211_ADDR_COPY(eh->ether_shost, vap->iv_myaddr);
+	}
+	eh->ether_type = htons(ETHERTYPE_80211MGT);
+	frm += ETHER_HDR_LEN;
+
+	*frm++ = payload_type;
+	*frm++ = IEEE80211_ACTION_CAT_TDLS;
+	*frm++ = action;
+
+	*frm_p = frm;
+
+	return skb;
+}
+
+static int
+ieee80211_tdls_over_qhop_enabled(struct ieee80211vap *vap)
+{
+	uint8_t ext_role = vap->iv_bss->ni_ext_role;
+
+	return (vap->tdls_over_qhop_en &&
+			(ext_role != IEEE80211_EXTENDER_ROLE_NONE));
+}
+
+static int
+ieee80211_tdls_ext_bssid_allowed(struct ieee80211vap *vap, u8 *bssid)
+{
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct ieee80211_qtn_ext_bssid *ext_bssid;
+	int i;
+
+	if (!ni || !ni->ni_ext_bssid_ie)
+		return 0;
+
+	ext_bssid = (struct ieee80211_qtn_ext_bssid *)ni->ni_ext_bssid_ie;
+
+	if (!is_zero_ether_addr(ext_bssid->mbs_bssid) &&
+			IEEE80211_ADDR_EQ(ext_bssid->mbs_bssid, bssid)) {
+		return 1;
+	} else {
+		for (i = 0; i < QTN_MAX_RBS_NUM; i++) {
+			if (!is_zero_ether_addr(ext_bssid->rbs_bssid[i]) &&
+					IEEE80211_ADDR_EQ(ext_bssid->rbs_bssid[i], bssid))
+				return 1;
+		}
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_copy_link_id(struct ieee80211_node *ni, uint8_t **frm,
+			struct ieee80211_tdls_action_data *data)
+{
+	uint8_t *ie;
+	uint8_t *ie_end;
+
+	if (!ni || !frm || !data)
+		return 0;
+
+	ie = data->ie_buf;
+	ie_end = ie + data->ie_buflen;
+	while (ie < ie_end) {
+		switch (*ie) {
+		case IEEE80211_ELEMID_TDLS_LINK_ID:
+			memcpy(*frm, ie, ie[1] + 2);
+			*frm += ie[1] + 2;
+			return 1;
+		default:
+			break;
+		}
+		ie += ie[1] + 2;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_link_id(struct ieee80211_node *ni,
+	struct sk_buff *skb, uint8_t action, uint8_t **frm, uint8_t *da,
+	struct ieee80211_tdls_action_data *data)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_tdls_link_id *link_id;
+
+	if ((*frm + sizeof(*link_id)) > (skb->data + skb->len)) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ELEMID,
+			"[%s] TDLS %s frame is too big\n", vap->iv_dev->name,
+			ieee80211_tdls_action_name_get(action));
+		vap->iv_stats.is_rx_elem_toosmall++;
+		return 1;
+	}
+
+	if (!ieee80211_tdls_copy_link_id(ni, frm, data)) {
+		link_id = (struct ieee80211_tdls_link_id *)*frm;
+		link_id->id = IEEE80211_ELEMID_TDLS_LINK_ID;
+		link_id->len = sizeof(*link_id) - 2;
+		IEEE80211_ADDR_COPY(link_id->bssid, ni->ni_bssid);
+		switch (action) {
+		case IEEE80211_ACTION_TDLS_DISC_REQ:
+			IEEE80211_ADDR_COPY(link_id->init_sa, vap->iv_myaddr);
+			IEEE80211_ADDR_COPY(link_id->resp_sa, da);
+			break;
+		case IEEE80211_ACTION_PUB_TDLS_DISC_RESP:
+			IEEE80211_ADDR_COPY(link_id->init_sa, da);
+			IEEE80211_ADDR_COPY(link_id->resp_sa, vap->iv_myaddr);
+			break;
+		case IEEE80211_ACTION_TDLS_SETUP_REQ:
+		case IEEE80211_ACTION_TDLS_SETUP_RESP:
+		case IEEE80211_ACTION_TDLS_SETUP_CONFIRM:
+		case IEEE80211_ACTION_TDLS_TEARDOWN:
+		case IEEE80211_ACTION_TDLS_PTI:
+		case IEEE80211_ACTION_TDLS_PEER_TRAF_RESP:
+		case IEEE80211_ACTION_TDLS_CS_REQ:
+		case IEEE80211_ACTION_TDLS_CS_RESP:
+			/*
+			 * tdls_initiator means who starts to setup TDLS link firstly.
+			 * 1 indicates our own peer setups TDLS link.
+			 * 0 indicates the other peer setups TDLS link.
+			 */
+			if (ni->tdls_initiator) {
+				IEEE80211_ADDR_COPY(link_id->init_sa, vap->iv_myaddr);
+				IEEE80211_ADDR_COPY(link_id->resp_sa, da);
+			} else {
+				IEEE80211_ADDR_COPY(link_id->init_sa, da);
+				IEEE80211_ADDR_COPY(link_id->resp_sa, vap->iv_myaddr);
+			}
+			break;
+		}
+
+		*frm += sizeof(*link_id);
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_cap(struct ieee80211_node *ni, uint8_t **frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint8_t *cap_p = *frm;
+	uint16_t capinfo = 0;
+
+	if (vap->iv_opmode == IEEE80211_M_IBSS)
+		capinfo = IEEE80211_CAPINFO_IBSS;
+	else
+		capinfo = IEEE80211_CAPINFO_ESS;
+	if (vap->iv_flags & IEEE80211_F_PRIVACY)
+		capinfo |= IEEE80211_CAPINFO_PRIVACY;
+	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+		IEEE80211_IS_CHAN_2GHZ(ic->ic_bsschan))
+		capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
+	if (ic->ic_flags & IEEE80211_F_SHSLOT)
+		capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
+	if (ic->ic_flags & IEEE80211_F_DOTH)
+		capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;
+	*(__le16 *)cap_p = htole16(capinfo);
+	*frm += 2;
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_rates(struct ieee80211_node *ni, uint8_t **frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint32_t mode = ic->ic_curmode;
+	struct ieee80211_rateset *rs = &ic->ic_sup_rates[mode];
+	uint8_t *ie = *frm;
+	int nrates;
+
+	*ie++ = IEEE80211_ELEMID_RATES;
+	nrates = rs->rs_legacy_nrates;
+	if (nrates > IEEE80211_RATE_SIZE)
+		nrates = IEEE80211_RATE_SIZE;
+	*ie++ = nrates;
+	memcpy(ie, rs->rs_rates, nrates);
+	*frm += nrates + 2;
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_country(struct ieee80211_node *ni, uint8_t **frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint8_t *ie = *frm;
+
+	if (ic->ic_country_ie.country_len > 0) {
+		memcpy(ie, (uint8_t *)&ic->ic_country_ie,
+			ic->ic_country_ie.country_len + 2);
+		*frm += ic->ic_country_ie.country_len + 2;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_xrates(struct ieee80211_node *ni, uint8_t **frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint32_t mode = ic->ic_curmode;
+	struct ieee80211_rateset *rs = &ic->ic_sup_rates[mode];
+	uint8_t *ie = *frm;
+	int nrates = 0;
+
+	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
+		nrates = rs->rs_legacy_nrates - IEEE80211_RATE_SIZE;
+		if (nrates) {
+			*ie++ = IEEE80211_ELEMID_XRATES;
+			*ie++ = nrates;
+			memcpy(ie, rs->rs_rates + IEEE80211_RATE_SIZE, nrates);
+			*frm += nrates + 2;
+		}
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_rsn(struct ieee80211_node *ni, uint8_t **frm,
+			struct ieee80211_tdls_action_data *data)
+{
+	uint8_t *ie;
+	uint8_t *ie_end;
+
+	if ((!ni) || (!frm) || (!data))
+		return 1;
+
+	ie = data->ie_buf;
+	ie_end = ie + data->ie_buflen;
+	while (ie < ie_end) {
+		switch (*ie) {
+		case IEEE80211_ELEMID_RSN:
+			memcpy(*frm, ie, ie[1] + 2);
+			*frm += ie[1] + 2;
+			break;
+		default:
+			break;
+		}
+		ie += ie[1] + 2;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_ext_cap(struct ieee80211_node *ni, uint8_t **frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	uint8_t *excap_p = *frm;
+	uint32_t excapinfo[2] = {0, 0};
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_PROHIB)
+		excapinfo[1] |= IEEE80211_EXTCAP2_TDLS_PROHIB;
+	else
+		excapinfo[1] |= IEEE80211_EXTCAP2_TDLS;
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PROHIB)
+		excapinfo[1] |= IEEE80211_EXTCAP2_TDLS_CS_PROHIB;
+	else
+		excapinfo[0] |= IEEE80211_EXTCAP1_TDLS_CS;
+
+	excapinfo[0] |= IEEE80211_EXTCAP1_TDLS_UAPSD;
+	*excap_p++ = IEEE80211_ELEMID_EXTCAP;
+	*excap_p++ = IEEE8211_EXTCAP_LENGTH;
+	*(__le32 *)excap_p = htole32(excapinfo[0]);
+	excap_p += 4;
+	*(__le32 *)excap_p = htole32(excapinfo[1]);
+
+	*frm += IEEE8211_EXTCAP_LENGTH + 2;
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_qos_cap(struct ieee80211_node *ni, uint8_t **frm)
+{
+	static const u_int8_t oui[3] = {0x00, 0x50, 0xf2};
+	struct ieee80211_ie_wme *ie = (struct ieee80211_ie_wme *) *frm;
+
+	ie->wme_id = IEEE80211_ELEMID_VENDOR;
+	ie->wme_len = sizeof(*ie) - 2;
+	memcpy(ie->wme_oui,oui,sizeof(oui));
+	ie->wme_type = WME_OUI_TYPE;
+	ie->wme_subtype = WME_INFO_OUI_SUBTYPE;
+	ie->wme_version = WME_VERSION;
+	ie->wme_info = 0;
+	ie->wme_info |= WME_UAPSD_MASK;
+
+	*frm += sizeof(*ie);
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_edca_param(struct ieee80211_node *ni, uint8_t **frm)
+{
+#define	ADDSHORT(frm, v) do {			\
+	frm[0] = (v) & 0xff;			\
+	frm[1] = (v) >> 8;			\
+	frm += 2;				\
+	} while (0)
+	static const u_int8_t oui[3] = {0x00, 0x50, 0xf2};
+	struct ieee80211_wme_param *ie = (struct ieee80211_wme_param *) *frm;
+	struct ieee80211_wme_state *wme = &ni->ni_ic->ic_wme;
+	struct ieee80211vap *vap = ni->ni_vap;
+	u_int8_t *frame_p = NULL;
+	int i;
+
+	ie->param_id = IEEE80211_ELEMID_VENDOR;
+	ie->param_len = sizeof(*ie) - 2;
+	memcpy(ie->param_oui,oui,sizeof(oui));
+	ie->param_oui_type = WME_OUI_TYPE;
+	ie->param_oui_sybtype = WME_PARAM_OUI_SUBTYPE;
+	ie->param_version = WME_VERSION;
+	ie->param_qosInfo = 0;
+	ie->param_qosInfo |= WME_UAPSD_MASK;
+	ie->param_reserved = 0;
+
+	frame_p = *frm + 10;
+	for(i = 0; i < WME_NUM_AC; i++) {
+		const struct wmm_params *ac;
+
+		ac = &wme->wme_bssChanParams.cap_wmeParams[i];
+
+		*frame_p++ = SM(i, WME_PARAM_ACI) |
+			SM(ac->wmm_acm, WME_PARAM_ACM) |
+			SM(ac->wmm_aifsn, WME_PARAM_AIFSN);
+		*frame_p++ = SM(ac->wmm_logcwmax, WME_PARAM_LOGCWMAX) |
+			SM(ac->wmm_logcwmin, WME_PARAM_LOGCWMIN);
+		ADDSHORT(frame_p, ac->wmm_txopLimit);
+	}
+
+	*frm += sizeof(*ie);
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"ieee80211_tdls_add_tlv_edca_param: add edca parameter qos info"
+			" is [%02x] reserve is [%02x]\n", ie->param_qosInfo, ie->param_reserved);
+	return 0;
+#undef ADDSHORT
+}
+
+static int
+ieee80211_tdls_add_tlv_ftie(struct ieee80211_node *ni, uint8_t **frm,
+			struct ieee80211_tdls_action_data *data)
+{
+	uint8_t *ie;
+	uint8_t *ie_end;
+
+	if ((!ni) || (!frm) || (!data))
+		return 1;
+
+	ie = data->ie_buf;
+	ie_end = ie + data->ie_buflen;
+	while (ie < ie_end) {
+		switch (*ie) {
+		case IEEE80211_ELEMID_FTIE:
+			memcpy(*frm, ie, ie[1] + 2);
+			*frm += ie[1] + 2;
+			break;
+		default:
+			break;
+		}
+		ie += ie[1] + 2;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_tpk_timeout(struct ieee80211_node *ni, uint8_t **frm,
+			struct ieee80211_tdls_action_data *data)
+{
+	uint8_t *ie;
+	uint8_t *ie_end;
+
+	if ((!ni) || (!frm) || (!data))
+		return 1;
+
+	ie = data->ie_buf;
+	ie_end = ie + data->ie_buflen;
+	while (ie < ie_end) {
+		switch (*ie) {
+		case IEEE80211_ELEMID_TIMEOUT_INT:
+			memcpy(*frm, ie, ie[1] + 2);
+			*frm += ie[1] + 2;
+			break;
+		default:
+			break;
+		}
+		ie += ie[1] + 2;
+	}
+
+	return 0;
+
+}
+
+static int
+ieee80211_tdls_add_tlv_sup_reg_class(struct ieee80211_node *ni, uint8_t **frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint8_t *ie = *frm;
+	uint8_t *ie_len = *frm + 1;
+	uint8_t cur_reg_class;
+	int bandwidth;
+	int i;
+
+	bandwidth = ieee80211_get_bw(ic);
+	cur_reg_class = ieee80211_get_current_operating_class(ic->ic_country_code,
+			ic->ic_bsschan->ic_ieee, bandwidth);
+
+	*ie++ = IEEE80211_ELEMID_REG_CLASSES;
+	*ie++ = 1;
+	*ie++ = cur_reg_class;
+
+	for (i = 0; i < IEEE80211_OPER_CLASS_MAX; i++) {
+		if (isset(ic->ic_oper_class, i)) {
+			*ie++ = i;
+			(*ie_len)++;
+		}
+	}
+
+	*frm += *ie_len + 2;
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_ht_cap(struct ieee80211_node *ni, uint8_t **frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_htcap htcap;
+
+	if (!IS_IEEE80211_11NA(ic) && !IS_IEEE80211_11NG(ic) &&
+			!IS_IEEE80211_VHT_ENABLED(ic))
+		return 0;
+
+	memcpy(&htcap, &ic->ic_htcap, sizeof(htcap));
+	if (ic->ic_bss_bw < BW_HT40)
+		htcap.cap &= ~(IEEE80211_HTCAP_C_CHWIDTH40 |
+					IEEE80211_HTCAP_C_SHORTGI40);
+
+	*frm = ieee80211_add_htcap(ni, *frm, &htcap,
+				IEEE80211_FC0_SUBTYPE_ACTION);
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_ht_oper(struct ieee80211_node *ni, uint8_t **frm)
+{
+	struct ieee80211com *ic = ni->ni_vap->iv_ic;
+	struct ieee80211_htinfo htinfo;
+	int16_t htinfo_channel_width = 0;
+	int16_t htinfo_2nd_channel_offset = 0;
+
+	if (!IS_IEEE80211_11NA(ic) && !IS_IEEE80211_11NG(ic) &&
+			!IS_IEEE80211_VHT_ENABLED(ic))
+		return 0;
+
+	memcpy(&htinfo, &ic->ic_htinfo, sizeof(htinfo));
+	ieee80211_get_channel_bw_offset(ic, &htinfo_channel_width,
+			&htinfo_2nd_channel_offset);
+
+	if (IEEE80211_IS_CHAN_ANYN(ic->ic_bsschan) &&
+			(ic->ic_curmode >= IEEE80211_MODE_11NA)) {
+		htinfo.ctrlchannel = ieee80211_chan2ieee(ic, ic->ic_bsschan);
+		if (ic->ic_bss_bw >= BW_HT40) {
+			htinfo.byte1 |= (htinfo_channel_width ?
+				IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40 : 0x0);
+			htinfo.choffset = htinfo_2nd_channel_offset;
+		} else {
+			htinfo.byte1 &= ~IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40;
+			htinfo.choffset = IEEE80211_HTINFO_CHOFF_SCN;
+		}
+	}
+
+	*frm = ieee80211_add_htinfo(ni, *frm, &htinfo);
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_bss_2040_coex(struct ieee80211_node *ni, u_int8_t **frm)
+{
+	uint8_t coex = 0;
+
+	/* Test case 5.9 requires information request bit must be set */
+	coex |= WLAN_20_40_BSS_COEX_INFO_REQ;
+	*frm = ieee80211_add_20_40_bss_coex_ie(*frm, coex);
+
+	return 0;
+}
+
+static void
+ieee80211_tdls_add_sec_chan_off(uint8_t **frm,
+		struct ieee80211vap *vap, uint8_t csa_chan)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *chan = NULL;
+	uint8_t sec_position = IEEE80211_HTINFO_EXTOFFSET_NA;
+	struct ieee80211_ie_sec_chan_off *sco = (struct ieee80211_ie_sec_chan_off *)(*frm);
+	chan = ieee80211_find_channel_by_ieee(ic, csa_chan);
+
+	if (!chan) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: failed to find the target channel %u\n", __func__, csa_chan);
+		return;
+	}
+
+	if (chan->ic_flags & IEEE80211_CHAN_HT40D)
+		sec_position = IEEE80211_HTINFO_EXTOFFSET_BELOW;
+	else if (chan->ic_flags & IEEE80211_CHAN_HT40U)
+		sec_position = IEEE80211_HTINFO_EXTOFFSET_ABOVE;
+
+	sco->sco_id = IEEE80211_ELEMID_SEC_CHAN_OFF;
+	sco->sco_len = 1;
+	sco->sco_off = sec_position;
+
+	*frm += sizeof(struct ieee80211_ie_sec_chan_off);
+
+	return;
+}
+
+static int
+ieee80211_tdls_add_tlv_2nd_chan_off(struct ieee80211_node *ni,
+                u_int8_t **frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *newchan = NULL;
+
+	if (vap->tdls_off_chan_bw < BW_HT40)
+		return 0;
+
+	newchan = ic->ic_findchannel(ic, vap->tdls_target_chan, IEEE80211_MODE_AUTO);
+	if (newchan == NULL) {
+	    IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+	            "TDLS %s: failed to find the target channel %u\n",
+	            __func__, vap->tdls_target_chan);
+	    return -1;
+	}
+
+	ieee80211_tdls_add_sec_chan_off(frm, vap, newchan->ic_ieee);
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+	                "%s: Add second channel offset IE\n", __func__);
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_vhtcap(struct ieee80211_node *ni, uint8_t **frm)
+{
+	struct ieee80211com *ic = ni->ni_vap->iv_ic;
+
+	if (!IS_IEEE80211_VHT_ENABLED(ic))
+		return 0;
+
+	*frm = ieee80211_add_vhtcap(ni, *frm, &ic->ic_vhtcap,
+				IEEE80211_FC0_SUBTYPE_ACTION);
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_vhtop(struct ieee80211_node *ni, uint8_t **frm)
+{
+	struct ieee80211com *ic = ni->ni_vap->iv_ic;
+	struct ieee80211_vhtop vhtop;
+	uint8_t bw = BW_HT20;
+
+	if (!IS_IEEE80211_VHT_ENABLED(ic))
+		return 0;
+
+	memcpy(&vhtop, &ic->ic_vhtop, sizeof(vhtop));
+
+	if (IEEE80211_IS_VHT_20(ic))
+		bw = MIN(ic->ic_bss_bw, BW_HT20);
+	else if (IEEE80211_IS_VHT_40(ic))
+		bw = MIN(ic->ic_bss_bw, BW_HT40);
+	else if (IEEE80211_IS_VHT_80(ic))
+		bw = MIN(ic->ic_bss_bw, BW_HT80);
+	else if (IEEE80211_IS_VHT_160(ic))
+		bw = MIN(ic->ic_bss_bw, BW_HT160);
+
+	if ((bw == BW_HT20) || (bw == BW_HT40)) {
+		vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ;
+	} else if (bw == BW_HT80) {
+		vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_80MHZ;
+		vhtop.centerfreq0 = ic->ic_bsschan->ic_center_f_80MHz;
+	} else if (bw == BW_HT160) {
+		vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_160MHZ;
+		vhtop.centerfreq0 = ic->ic_bsschan->ic_center_f_160MHz;
+	}
+	memcpy(&ni->ni_vhtop, &vhtop, sizeof(ni->ni_vhtop));
+
+	*frm = ieee80211_add_vhtop(ni, *frm, &vhtop);
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_aid(struct ieee80211_node *ni, u_int8_t **frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_ie_aid *aid = (struct ieee80211_ie_aid *)(*frm);
+
+	if (!IS_IEEE80211_VHT_ENABLED(ic))
+		return 0;
+
+	aid->aid_id= IEEE80211_ELEMID_AID;
+	aid->aid_len= 2;
+	aid->aid = htole16(vap->iv_bss->ni_associd);
+
+	*frm += sizeof(struct ieee80211_ie_aid);
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"%s: Add AID IE, AID = %d\n", __func__, aid->aid);
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_wide_bw_cs(struct ieee80211_node *ni,
+		u_int8_t **frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_ie_wbchansw *ie = (struct ieee80211_ie_wbchansw *)(*frm);
+	struct ieee80211_channel *des_chan = NULL;
+	u_int32_t chwidth = 0;
+
+	if (vap->tdls_off_chan_bw <= BW_HT40)
+		return 0;
+
+	des_chan = ic->ic_findchannel(ic, vap->tdls_target_chan, IEEE80211_MODE_AUTO);
+	if (des_chan == NULL) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: Fail to find the target channel\n", __func__);
+		return -1;
+	}
+
+	ie->wbcs_id = IEEE80211_ELEMID_WBWCHANSWITCH;
+	ie->wbcs_len = sizeof(struct ieee80211_ie_wbchansw) - 2;
+	switch (vap->tdls_off_chan_bw) {
+		case BW_HT20:
+		case BW_HT40:
+			chwidth = IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ;
+			break;
+		case BW_HT80:
+			chwidth = IEEE80211_VHTOP_CHAN_WIDTH_80MHZ;
+			break;
+		default:
+			chwidth = IEEE80211_VHTOP_CHAN_WIDTH_80MHZ;
+	}
+
+	ie->wbcs_newchanw = chwidth;
+	if (vap->tdls_off_chan_bw == BW_HT40) {
+		ie->wbcs_newchancf0 = des_chan->ic_center_f_40MHz;
+		ie->wbcs_newchancf1 = 0;
+	} else if (vap->tdls_off_chan_bw == BW_HT80) {
+		ie->wbcs_newchancf0 = des_chan->ic_center_f_80MHz;
+		ie->wbcs_newchancf1 = 0;
+	} else {
+		ie->wbcs_newchancf0 = 0;
+		ie->wbcs_newchancf1 = 0;
+	}
+
+	*frm += sizeof(struct ieee80211_ie_wbchansw);
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_vht_tx_power_evlope(struct ieee80211_node *ni,
+		u_int8_t **frm)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_ie_vtxpwren *ie = (struct ieee80211_ie_vtxpwren *)(*frm);
+	u_int8_t local_max_tx_pwrcnt = 0;
+	struct ieee80211_channel *des_chan = NULL;
+
+	if (!IS_IEEE80211_VHT_ENABLED(ic) || !(ic->ic_flags & IEEE80211_F_DOTH))
+		return 0;
+
+	des_chan = ic->ic_findchannel(ic, vap->tdls_target_chan, IEEE80211_MODE_AUTO);
+	if (des_chan == NULL) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: Fail to find the target channel\n", __func__);
+		return -1;
+	}
+
+	switch (vap->tdls_off_chan_bw) {
+		case BW_HT20:
+			local_max_tx_pwrcnt = IEEE80211_TX_POW_FOR_20MHZ;
+			break;
+		case BW_HT40:
+			local_max_tx_pwrcnt = IEEE80211_TX_POW_FOR_40MHZ;
+			break;
+		case BW_HT80:
+			local_max_tx_pwrcnt = IEEE80211_TX_POW_FOR_80MHZ;
+			break;
+		default:
+			local_max_tx_pwrcnt = IEEE80211_TX_POW_FOR_80MHZ;
+	}
+
+	ie->vtxpwren_id = IEEE80211_ELEMID_VHTXMTPWRENVLP;
+	ie->vtxpwren_len = sizeof(struct ieee80211_ie_vtxpwren) - 2;
+
+	ie->vtxpwren_txpwr_info = local_max_tx_pwrcnt;
+	ie->vtxpwren_tp20 = des_chan->ic_maxregpower - ic->ic_pwr_constraint;
+	ie->vtxpwren_tp40 = des_chan->ic_maxregpower - ic->ic_pwr_constraint;
+	ie->vtxpwren_tp80 = des_chan->ic_maxregpower - ic->ic_pwr_constraint;
+	ie->vtxpwren_tp160 = 0;
+
+	*frm += sizeof(struct ieee80211_ie_vtxpwren);
+
+	return 0;
+}
+
+static uint8_t *
+ieee80211_tdls_find_cs_timing(uint8_t *buf, uint32_t buf_len)
+{
+	uint8_t *ie = buf;
+	uint8_t *ie_end = ie + buf_len;
+	while (ie < ie_end) {
+		switch (*ie) {
+		case IEEE80211_ELEMID_TDLS_CS_TIMING:
+			return ie;
+		default:
+			break;
+		}
+		ie += ie[1] + 2;
+	}
+
+	return NULL;
+}
+
+static int
+ieee80211_tdls_copy_cs_timing(struct ieee80211_node *ni, uint8_t **frm,
+			struct ieee80211_tdls_action_data *data)
+{
+	uint8_t *ie;
+	uint8_t *ie_end;
+
+	if (!ni || !frm || !data)
+		return 1;
+
+	ie = data->ie_buf;
+	ie_end = ie + data->ie_buflen;
+	while (ie < ie_end) {
+		switch (*ie) {
+		case IEEE80211_ELEMID_TDLS_CS_TIMING:
+			memcpy(*frm, ie, ie[1] + 2);
+			*frm += ie[1] + 2;
+			return 0;
+		default:
+			break;
+		}
+		ie += ie[1] + 2;
+	}
+
+	return 1;
+}
+
+static int
+ieee80211_tdls_add_tlv_cs_timimg(struct ieee80211_node *ni, u_int8_t **frm,
+		struct ieee80211_tdls_action_data *data)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_tdls_cs_timing *cs_timing = (struct ieee80211_tdls_cs_timing *)(*frm);
+	uint16_t sw_time;
+	uint16_t sw_timeout;
+
+	sw_time = DEFAULT_TDLS_CH_SW_NEGO_TIME;
+	sw_timeout = DEFAULT_TDLS_CH_SW_NEGO_TIME + DEFAULT_TDLS_CH_SW_TIMEOUT;
+
+	cs_timing->id = IEEE80211_ELEMID_TDLS_CS_TIMING;
+	cs_timing->len = 4;
+	cs_timing->switch_time = cpu_to_le16(sw_time);
+	cs_timing->switch_timeout = cpu_to_le16(sw_timeout);
+
+	*frm += sizeof(struct ieee80211_tdls_cs_timing);
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"%s: Add cs timing IE, cs_time = %d us, cs_timeout = %d us \n",
+			__func__, sw_time, sw_timeout);
+
+	return 0;
+}
+
+
+static int
+ieee80211_tdls_compare_fdb_entry(const void *a, const void *b)
+{
+	struct __fdb_entry * fdb_a = (struct __fdb_entry *) a;
+	struct __fdb_entry * fdb_b = (struct __fdb_entry *) b;
+
+	if (fdb_a->ageing_timer_value > fdb_b->ageing_timer_value)
+		return 1;
+	if (fdb_a->ageing_timer_value < fdb_b->ageing_timer_value)
+		return -1;
+	return 0;
+}
+
+static int
+ieee80211_tdls_add_tlv_downstream_clients(struct ieee80211_node *ni, uint8_t **frm, size_t buf_size)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	int bridge_entry_cnt = 0;
+	int max_clients = 0;
+	int client_list_cnt = 0;
+	struct ieee80211_ie_qtn_tdls_clients *clients;
+	int i;
+	struct net_bridge_port *br_port = get_br_port(vap->iv_dev);
+
+	if (buf_size < sizeof(*clients)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"%s: Not enough space for TDLS downstream "
+			"client TLV, increase TDLS frame size\n", __func__);
+		return 1;
+	}
+
+	/* First, extract all bridge entries */
+	if (!br_fdb_fillbuf_hook || !br_port) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"%s: Not a bridge port or bridge (%p) or "
+			"callback func was not initialized (%p)",
+			__func__, br_port, br_fdb_fillbuf_hook);
+		return 1;
+	}
+
+	if (!bridge_entries) {
+		bridge_entries = (struct __fdb_entry *) kmalloc(
+				sizeof(struct __fdb_entry) *
+				IEEE80211_TDLS_MAX_BRIDGE_CLIENTS, GFP_KERNEL);
+		if (bridge_entries == NULL) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"%s: can't alloc space for "
+				"downstream bridge entries", __func__);
+			return 1;
+		}
+	}
+
+	bridge_entry_cnt = br_fdb_fillbuf_hook(br_port->br,
+			bridge_entries, IEEE80211_TDLS_MAX_BRIDGE_CLIENTS, 0);
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS: bridge_entry_cnt=%d\n", bridge_entry_cnt);
+
+	if (bridge_entry_cnt >= IEEE80211_TDLS_MAX_BRIDGE_CLIENTS)
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"%s: at maximum # of TDLS bridge entries "
+			"(%d)\n", __func__, bridge_entry_cnt);
+
+	/* Sort the bridge entries by age */
+	sort(bridge_entries, bridge_entry_cnt, sizeof(struct __fdb_entry),
+			ieee80211_tdls_compare_fdb_entry, NULL);
+
+	/* Calculate the space we have for downstream entries in the frame */
+	max_clients = (buf_size - sizeof(*clients)) / IEEE80211_ADDR_LEN;
+	if (max_clients > IEEE80211_QTN_IE_DOWNSTREAM_MAC_MAX)
+		max_clients = IEEE80211_QTN_IE_DOWNSTREAM_MAC_MAX;
+
+	/* Fill out the frame */
+	clients = (struct ieee80211_ie_qtn_tdls_clients *)*frm;
+	clients->qtn_ie_id = IEEE80211_ELEMID_VENDOR;
+	clients->qtn_ie_oui[0] = QTN_OUI & 0xff;
+	clients->qtn_ie_oui[1] = (QTN_OUI >> 8) & 0xff;
+	clients->qtn_ie_oui[2] = (QTN_OUI >> 16) & 0xff;
+	clients->qtn_ie_type = QTN_OUI_TDLS_BRMACS;
+
+	for (i = 0; i < bridge_entry_cnt; i++) {
+		if (bridge_entries[i].is_local || !bridge_entries[i].is_wlan) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"TDLS: Bridge table macs %pM"
+				" %s a local address and %s a wlan device\n",
+				bridge_entries[i].mac_addr,
+				bridge_entries[i].is_local ? "is" : "is not",
+				bridge_entries[i].is_wlan ? "is" : "is not");
+
+			IEEE80211_ADDR_COPY(clients->qtn_ie_mac +
+					client_list_cnt * IEEE80211_ADDR_LEN,
+					bridge_entries[i].mac_addr);
+
+			client_list_cnt++;
+
+			if (client_list_cnt >= max_clients)
+				break;
+		}
+
+	}
+
+	clients->qtn_ie_mac_cnt = client_list_cnt;
+	clients->qtn_ie_len = sizeof(*clients) - 2 +
+			(client_list_cnt * IEEE80211_ADDR_LEN);
+	*frm += sizeof(*clients) + (client_list_cnt * IEEE80211_ADDR_LEN);
+
+	return 0;
+}
+
+static void
+ieee80211_tdls_add_bridge_entry(struct ieee80211_node *ni, uint8_t *addr,
+		__u16 sub_port)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct net_bridge_port *br_port = get_br_port(vap->iv_dev);
+
+	rcu_read_lock();
+	if (br_fdb_update_const_hook && br_port) {
+		br_fdb_update_const_hook(br_port->br, br_port, addr, 0, 0,
+						IEEE80211_NODE_IDX_MAP(sub_port));
+	}
+	rcu_read_unlock();
+}
+
+int
+ieee80211_tdls_add_bridge_entry_for_peer(struct ieee80211_node *peer_ni)
+{
+	IEEE80211_TDLS_DPRINTF(peer_ni->ni_vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: for mac %pM ncidx = 0x%x\n", __func__,
+		peer_ni->ni_macaddr, peer_ni->ni_node_idx);
+
+	ieee80211_tdls_add_bridge_entry(peer_ni, peer_ni->ni_macaddr,
+			peer_ni->ni_node_idx);
+
+	return 0;
+}
+
+int
+ieee80211_tdls_disable_peer_link(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+	struct net_bridge_port *br_port = get_br_port(vap->iv_dev);
+#endif
+
+	if (!IEEE80211_NODE_IS_TDLS_ACTIVE(ni) &&
+			!IEEE80211_NODE_IS_TDLS_STARTING(ni))
+		return 0;
+
+	if (ni->ni_node_idx) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"Purge subport [0x%x] since link disabled\n", ni->ni_node_idx);
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+		/*
+		 * Delete all bridge table entries for the AID.  They would eventually
+		 * age out, but in the mean time data will be directed to the wrong
+		 * sub_port (AID) until the bridge entries get updated by upstream
+		 * traffic from the endpoint.
+		 * Multicast port entries for the AID (sub_port) are not aged and would
+		 * hang around for ever, so they are also deleted
+		 */
+		if (br_fdb_delete_by_sub_port_hook &&
+				br_port) {
+			br_fdb_delete_by_sub_port_hook(br_port->br, br_port,
+					ni->ni_node_idx);
+		}
+#endif
+	}
+
+	ni->tdls_initiator = 0;
+
+	if (ni->ni_ext_flags & IEEE80211_NODE_TDLS_AUTH) {
+		ieee80211_sta_assocs_dec(vap, __func__);
+		ieee80211_nonqtn_sta_leave(vap, ni, __func__);
+	}
+
+	/* Restore ni_bssid to the local AP BSSID */
+	if (!IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_bss->ni_bssid))
+		IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_bss->ni_bssid);
+
+	if (vap->tdls_cs_node && (vap->tdls_cs_node == ni))
+		ieee80211_tdls_return_to_base_channel(vap, 0);
+	ieee80211_tdls_update_node_status(ni, IEEE80211_TDLS_NODE_STATUS_IDLE);
+
+	ni->ni_ext_flags &= ~IEEE80211_NODE_TDLS_AUTH;
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS peer %pM teared down\n", ni->ni_macaddr);
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_check_target_chan(struct ieee80211_node *ni,
+			struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	int tar_chan =  tdls->target_chan;
+	struct ieee80211_channel *chan = NULL;
+
+	chan = ic->ic_findchannel(ic, tar_chan, IEEE80211_MODE_AUTO);
+	if (isclr(ic->ic_chan_active, tar_chan) || !chan ||
+			(chan->ic_flags & IEEE80211_CHAN_DFS))
+		return 1;
+	else
+		return 0;
+}
+
+static int
+ieee80211_tdls_check_2nd_chan_off(struct ieee80211_node *ni,
+			struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *chan = NULL;
+	struct ieee80211_ie_sec_chan_off *sco =
+		(struct ieee80211_ie_sec_chan_off *)tdls->sec_chan_off;
+	int sec_chan;
+	int tar_chan;
+
+	if (!sco) {
+		vap->tdls_off_chan_bw = BW_HT20;
+		return 0;
+	}
+
+	sec_chan = sco->sco_off;
+	tar_chan = tdls->target_chan;
+	chan = ic->ic_findchannel(ic, tar_chan, IEEE80211_MODE_AUTO);
+	if (!chan)
+		return 1;
+
+	if ((sec_chan == IEEE80211_HTINFO_EXTOFFSET_BELOW) &&
+			(chan->ic_flags & IEEE80211_CHAN_HT40D)) {
+		vap->tdls_off_chan_bw = BW_HT40;
+		return 0;
+	}
+
+	if ((sec_chan == IEEE80211_HTINFO_EXTOFFSET_ABOVE) &&
+			(chan->ic_flags & IEEE80211_CHAN_HT40U)) {
+		vap->tdls_off_chan_bw = BW_HT40;
+		return 0;
+	}
+
+	if ((sec_chan == IEEE80211_HTINFO_EXTOFFSET_NA) &&
+			(chan->ic_flags & IEEE80211_CHAN_HT20)) {
+		vap->tdls_off_chan_bw = BW_HT20;
+		return 0;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_tdls_check_wide_bw_cs(struct ieee80211_node *ni,
+			struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *chan = NULL;
+	struct ieee80211_ie_wbchansw *bw_cs =
+		(struct ieee80211_ie_wbchansw *)tdls->wide_bw_cs;
+
+	if (!bw_cs)
+		return 1;
+
+	if (bw_cs->wbcs_newchanw == 0)
+		return 1;
+
+	chan = ic->ic_findchannel(ic, tdls->target_chan, IEEE80211_MODE_AUTO);
+	if (!chan)
+		return 1;
+
+	if ((bw_cs->wbcs_newchanw == 1) &&
+			(chan->ic_center_f_80MHz == bw_cs->wbcs_newchancf0)) {
+		vap->tdls_off_chan_bw = BW_HT80;
+		return 0;
+	}
+
+	if ((bw_cs->wbcs_newchanw == 2) &&
+				(chan->ic_center_f_160MHz== bw_cs->wbcs_newchancf0)) {
+		vap->tdls_off_chan_bw = BW_HT160;
+		return 0;
+	}
+
+	return 1;
+}
+
+static int
+ieee80211_tdls_check_reg_class(struct ieee80211_node *ni,
+			struct ieee80211_tdls_params *tdls)
+{
+	return 0;
+}
+
+static int
+ieee80211_tdls_check_link_id(struct ieee80211_node *ni,
+			struct ieee80211_tdls_params *tdls, uint8_t action)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	int ret = 1;
+
+	if (!tdls->link_id)
+		return ret;
+
+	if (ieee80211_tdls_over_qhop_enabled(vap))
+		ret = !ieee80211_tdls_ext_bssid_allowed(vap, tdls->link_id->bssid);
+	else
+		ret = !IEEE80211_ADDR_EQ(ni->ni_bssid, tdls->link_id->bssid);
+
+	return ret;
+}
+
+static int
+ieee80211_tdls_check_chan_switch_timing(struct ieee80211_node *ni,
+			struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_tdls_cs_timing *cs_timing =
+			(struct ieee80211_tdls_cs_timing *)(tdls->cs_timing);
+	uint32_t sw_time;
+	uint32_t sw_timeout;
+	uint64_t cur_tsf;
+	uint64_t tbtt;
+	uint32_t duration;
+
+	if (!cs_timing)
+		return 1;
+
+	sw_time = cs_timing->switch_time;
+	sw_timeout = cs_timing->switch_timeout;
+
+	ic->ic_get_tsf(&cur_tsf);
+	tbtt = vap->iv_bss->ni_shared_stats->dtim_tbtt;
+	duration = (uint32_t)(tbtt - cur_tsf);
+
+	if ((sw_timeout < DEFAULT_TDLS_CH_SW_TIMEOUT) ||
+			(sw_timeout <= sw_time))
+		return 1;
+
+	return 0;
+}
+
+static uint8_t
+ieee80211_tdls_select_target_channel(struct ieee80211vap *vap)
+{
+	uint8_t tar_chan = 0;
+
+	tar_chan = IEEE80211_DEFAULT_5_GHZ_CHANNEL;
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: Target channel %d\n", __func__, tar_chan);
+
+	return tar_chan;
+}
+
+int
+ieee80211_tdls_channel_switch_allowed(struct ieee80211vap *vap)
+{
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni;
+	struct ieee80211_node *tmp;
+
+	TAILQ_FOREACH_SAFE(ni, &nt->nt_node, ni_list, tmp) {
+		if (ni->ni_flags & IEEE80211_NODE_PS_DELIVERING)
+			return 0;
+	}
+
+	return 1;
+}
+extern int dev_queue_xmit(struct sk_buff *skb);
+
+static void
+ieee80211_tdls_send_frame(struct ieee80211_node *ni, struct sk_buff *skb)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_node *bss;
+	int err;
+
+	if ((ni->ni_vap)->iv_debug & IEEE80211_MSG_OUTPUT) {
+		int i;
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"Xmit:dev[%s]len[%d]bcast[%d]pri[%d]\n",
+			ni->ni_vap->iv_dev->name, skb->len,
+			ni->ni_stats.ns_tx_bcast, skb->priority);
+		for(i = 0; i < 64; i += 16) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"Data[%p]: %02x %02x %02x %02x", &skb->data[i<<4],
+				skb->data[i << 4], skb->data[(i << 4) + 1],
+				skb->data[(i << 4) + 2], skb->data[(i << 4) + 3]);
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"  %02x %02x %02x %02x\t",
+				skb->data[(i << 4) + 4], skb->data[(i << 4) + 5],
+				skb->data[(i << 4) + 6], skb->data[(i << 4) + 7]);
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"%02x %02x %02x %02x",
+				skb->data[(i << 4) + 8], skb->data[(i << 4) + 9],
+				skb->data[(i << 4) + 10], skb->data[(i << 4) + 11]);
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"  %02x %02x %02x %02x\n",
+				skb->data[(i << 4) + 12], skb->data[(i << 4) + 13],
+				skb->data[(i << 4) + 14], skb->data[(i << 4) + 15]);
+		}
+	}
+
+	bss = vap->iv_bss;
+	ieee80211_ref_node(bss);
+
+	skb->dev = bss->ni_vap->iv_dev;
+	QTN_SKB_CB_NI(skb) = bss;
+	M_FLAG_SET(skb, M_NO_AMSDU);
+
+	vap->iv_stats.is_tx_tdls++;
+	IEEE80211_NODE_STAT(ni, tx_tdls_action);
+
+	err = dev_queue_xmit(skb);
+	if (err < 0) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Sending failed\n", __func__);
+		ieee80211_free_node(bss);
+	}
+}
+
+static void
+ieee80211_tdls_send_frame_over_tdls(struct ieee80211_node *peer_ni, struct sk_buff *skb)
+{
+	struct ieee80211vap *vap = peer_ni->ni_vap;
+	int err;
+
+	if (!IEEE80211_NODE_IS_TDLS_ACTIVE(peer_ni))
+		return;
+
+	if (vap->iv_debug & IEEE80211_MSG_OUTPUT) {
+		int i;
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"Xmit:dev[%s]len[%d]bcast[%d]pri[%d]\n",
+			peer_ni->ni_vap->iv_dev->name, skb->len,
+			peer_ni->ni_stats.ns_tx_bcast, skb->priority);
+		for(i = 0; i < 64; i += 16) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"Data[%p]: %02x %02x %02x %02x", &skb->data[i<<4],
+				skb->data[i << 4], skb->data[(i << 4) + 1],
+				skb->data[(i << 4) + 2], skb->data[(i << 4) + 3]);
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"  %02x %02x %02x %02x\t",
+				skb->data[(i << 4) + 4], skb->data[(i << 4) + 5],
+				skb->data[(i << 4) + 6], skb->data[(i << 4) + 7]);
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"%02x %02x %02x %02x",
+				skb->data[(i << 4) + 8], skb->data[(i << 4) + 9],
+				skb->data[(i << 4) + 10], skb->data[(i << 4) + 11]);
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"  %02x %02x %02x %02x\n",
+				skb->data[(i << 4) + 12], skb->data[(i << 4) + 13],
+				skb->data[(i << 4) + 14], skb->data[(i << 4) + 15]);
+		}
+	}
+
+	ieee80211_ref_node(peer_ni);
+
+	skb->dev = peer_ni->ni_vap->iv_dev;
+	QTN_SKB_CB_NI(skb) = peer_ni;
+	M_FLAG_SET(skb, M_NO_AMSDU);
+
+	vap->iv_stats.is_tx_tdls++;
+	IEEE80211_NODE_STAT(peer_ni, tx_tdls_action);
+
+	err = dev_queue_xmit(skb);
+	if (err < 0) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Sending failed\n", __func__);
+		ieee80211_free_node(peer_ni);
+	}
+}
+
+/*
+ * Send a Setup Confirm
+ */
+static int
+ieee80211_tdls_send_setup_confirm(struct ieee80211_node *peer_ni,
+		struct ieee80211_tdls_action_data *data)
+{
+	struct ieee80211vap *vap;
+	uint8_t action = IEEE80211_ACTION_TDLS_SETUP_CONFIRM;
+	struct sk_buff *skb;
+	uint8_t *frm = NULL;
+	uint16_t frm_len = IEEE80211_TDLS_FRAME_MAX;
+
+	if (!peer_ni || !data)
+		return 1;
+
+	vap = peer_ni->ni_vap;
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: tdls function prohibited, don't send setup confirm\n", __func__);
+		return 1;
+	}
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: send setup confirm frame to %pM\n", __func__, peer_ni->ni_macaddr);
+
+	peer_ni->tdls_initiator = 1;
+
+	skb = ieee80211_tdls_init_frame(peer_ni, &frm, action, 1);
+	if (!skb)
+		goto error;
+
+	memcpy(frm, &data->status, sizeof(data->status));
+	frm += sizeof(data->status);
+	*frm = data->dtoken;
+	frm += sizeof(data->dtoken);
+
+	if (le16toh(data->status) == IEEE80211_STATUS_SUCCESS) {
+		if (ieee80211_tdls_get_privacy(vap)) {
+			if (ieee80211_tdls_add_tlv_rsn(peer_ni, &frm, data))
+				goto error;
+		}
+		if (ieee80211_tdls_add_tlv_edca_param(peer_ni, &frm))
+			goto error;
+		if (ieee80211_tdls_get_privacy(vap)) {
+			if (ieee80211_tdls_add_tlv_ftie(peer_ni, &frm, data))
+				goto error;
+			if (ieee80211_tdls_add_tlv_tpk_timeout(peer_ni, &frm, data))
+				goto error;
+		}
+		if (!IEEE80211_NODE_IS_HT(vap->iv_bss) &&
+				IEEE80211_NODE_IS_HT(peer_ni))
+			ieee80211_tdls_add_tlv_ht_oper(peer_ni, &frm);
+
+		if (ieee80211_tdls_add_tlv_link_id(peer_ni, skb, action, &frm,
+				peer_ni->ni_macaddr, data))
+			goto error;
+
+		if (!IEEE80211_NODE_IS_VHT(vap->iv_bss) &&
+				IEEE80211_NODE_IS_VHT(peer_ni))
+			ieee80211_tdls_add_tlv_vhtop(peer_ni, &frm);
+	}
+
+	frm = ieee80211_add_qtn_ie(frm, peer_ni->ni_ic,
+			IEEE80211_QTN_BRIDGEMODE, IEEE80211_QTN_BRIDGEMODE,
+			vap->iv_implicit_ba, IEEE80211_DEFAULT_BA_WINSIZE_H, 0);
+
+	if (ieee80211_tdls_add_tlv_downstream_clients(peer_ni, &frm,
+			(frm_len - (frm - skb->data))))
+		goto error;
+
+	skb_trim(skb, frm - skb->data);
+	ieee80211_tdls_send_frame(peer_ni, skb);
+
+	return 0;
+
+error:
+	if (skb)
+		dev_kfree_skb(skb);
+
+	return 1;
+}
+
+/*
+ * Send a Setup Response
+ * A reference to the peer_ni structure must be held before calling this function, and
+ * must be freed on return if not sent.
+ * Returns 0 if successful, else 1.
+ * Note: Link ID is always present, not just if status code is 0 (correction to 7.4.11.2).
+ */
+static int
+ieee80211_tdls_send_setup_resp(struct ieee80211_node *peer_ni,
+		struct ieee80211_tdls_action_data *data)
+{
+	struct ieee80211vap *vap;
+	uint8_t action = IEEE80211_ACTION_TDLS_SETUP_RESP;
+	struct sk_buff *skb;
+	uint8_t *frm = NULL;
+	uint16_t frm_len = IEEE80211_TDLS_FRAME_MAX;
+	struct ieee80211com *ic;
+	if (!peer_ni || !data)
+		return 1;
+
+	vap = peer_ni->ni_vap;
+	ic = vap->iv_ic;
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: tdls function prohibited, don't send setup response\n", __func__);
+		return 1;
+	}
+
+	if (!ieee80211_tdls_link_should_response(vap, peer_ni)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"TDLS %s: Don't send setup response to Peer %pM due to bad link\n",
+				__func__, peer_ni->ni_macaddr);
+		return 1;
+	}
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: send setup response frame to %pM\n", __func__, peer_ni->ni_macaddr);
+
+	peer_ni->tdls_initiator = 0;
+
+	skb = ieee80211_tdls_init_frame(peer_ni, &frm, action, 1);
+	if (!skb)
+		goto error;
+
+	memcpy(frm, &data->status, sizeof(data->status));
+	frm += sizeof(data->status);
+	*frm = data->dtoken;
+	frm += sizeof(data->dtoken);
+	if (le16toh(data->status) == IEEE80211_STATUS_SUCCESS) {
+		if (ieee80211_tdls_add_cap(peer_ni, &frm))
+			goto error;
+		if (ieee80211_tdls_add_tlv_rates(peer_ni, &frm))
+			goto error;
+
+		if ((ic->ic_flags_ext & IEEE80211_FEXT_COUNTRYIE)
+				|| ((ic->ic_flags & IEEE80211_F_DOTH) &&
+					(ic->ic_flags_ext & IEEE80211_FEXT_TPC))) {
+			if (ieee80211_tdls_add_tlv_country(peer_ni, &frm))
+				goto error;
+		}
+
+		if (ieee80211_tdls_add_tlv_xrates(peer_ni, &frm))
+			goto error;
+
+		if (!(vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PROHIB))
+			frm = ieee80211_add_supported_chans(frm, ic);
+
+		if (ieee80211_tdls_get_privacy(vap)) {
+			if (ieee80211_tdls_add_tlv_rsn(peer_ni, &frm, data))
+				goto error;
+		}
+		if (ieee80211_tdls_add_tlv_ext_cap(peer_ni, &frm))
+			goto error;
+		if (ieee80211_tdls_add_tlv_qos_cap(peer_ni, &frm))
+			goto error;
+		if (ieee80211_tdls_get_privacy(vap)) {
+			if (ieee80211_tdls_add_tlv_ftie(peer_ni, &frm, data))
+				goto error;
+			if (ieee80211_tdls_add_tlv_tpk_timeout(peer_ni, &frm, data))
+				goto error;
+		}
+		if (!(vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PROHIB)) {
+			if (ieee80211_tdls_add_tlv_sup_reg_class(peer_ni, &frm))
+				goto error;
+		}
+		if (ieee80211_tdls_add_tlv_ht_cap(peer_ni, &frm))
+			goto error;
+		if (ieee80211_tdls_add_tlv_bss_2040_coex(peer_ni, &frm))
+			goto error;
+		if (ieee80211_tdls_add_tlv_link_id(peer_ni, skb, action, &frm,
+						   peer_ni->ni_macaddr, data))
+			goto error;
+		if (ieee80211_tdls_add_tlv_aid(peer_ni, &frm))
+			goto error;
+		if (ieee80211_tdls_add_tlv_vhtcap(peer_ni, &frm))
+			goto error;
+	}
+
+	frm = ieee80211_add_qtn_ie(frm, peer_ni->ni_ic,
+			IEEE80211_QTN_BRIDGEMODE, IEEE80211_QTN_BRIDGEMODE,
+			vap->iv_implicit_ba, IEEE80211_DEFAULT_BA_WINSIZE_H, 0);
+
+	if (ieee80211_tdls_add_tlv_downstream_clients(peer_ni, &frm,
+			(frm_len - (frm - skb->data))))
+		goto error;
+
+	skb_trim(skb, frm - skb->data);
+	ieee80211_tdls_send_frame(peer_ni, skb);
+
+	return 0;
+
+error:
+	if (skb)
+		dev_kfree_skb(skb);
+
+	return 1;
+}
+
+/*
+ * Send a Setup Request
+ * A reference to the peer_ni structure must be held before calling this function, and
+ * must be freed on return if not sent.
+ * Returns 0 if sent successfully, else 1.
+ */
+static int
+ieee80211_tdls_send_setup_req(struct ieee80211_node *peer_ni,
+		struct ieee80211_tdls_action_data *data)
+{
+	struct ieee80211vap *vap;
+	uint8_t action = IEEE80211_ACTION_TDLS_SETUP_REQ;
+	struct sk_buff *skb;
+	uint8_t *frm = NULL;
+	uint16_t frm_len = IEEE80211_TDLS_FRAME_MAX;
+
+	if (!peer_ni || !data)
+		return 1;
+
+	vap = peer_ni->ni_vap;
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: tdls function prohibited, don't send setup request\n", __func__);
+		return 1;
+	}
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: send SETUP REQUEST frame to %pM\n", __func__, peer_ni->ni_macaddr);
+
+	peer_ni->tdls_initiator = 1;
+
+	skb = ieee80211_tdls_init_frame(peer_ni, &frm, action, 1);
+	if (!skb)
+		goto error;
+
+	*frm = data->dtoken;
+	frm += sizeof(data->dtoken);
+
+	if (ieee80211_tdls_add_cap(peer_ni, &frm))
+		goto error;
+	if (ieee80211_tdls_add_tlv_rates(peer_ni, &frm))
+		goto error;
+	if (ieee80211_tdls_add_tlv_country(peer_ni, &frm))
+		goto error;
+	if (ieee80211_tdls_add_tlv_xrates(peer_ni, &frm))
+		goto error;
+
+	if (!(vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PROHIB))
+		frm = ieee80211_add_supported_chans(frm, vap->iv_ic);
+
+	if (ieee80211_tdls_get_privacy(vap)) {
+		if (ieee80211_tdls_add_tlv_rsn(peer_ni, &frm, data))
+			goto error;
+	}
+	if (ieee80211_tdls_add_tlv_ext_cap(peer_ni, &frm))
+		goto error;
+	if (ieee80211_tdls_add_tlv_qos_cap(peer_ni, &frm))
+		goto error;
+	if (ieee80211_tdls_get_privacy(vap)) {
+		if (ieee80211_tdls_add_tlv_ftie(peer_ni, &frm, data))
+			goto error;
+		if (ieee80211_tdls_add_tlv_tpk_timeout(peer_ni, &frm, data))
+			goto error;
+	}
+	if (!(vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PROHIB)) {
+		if (ieee80211_tdls_add_tlv_sup_reg_class(peer_ni, &frm))
+			goto error;
+	}
+	if (ieee80211_tdls_add_tlv_ht_cap(peer_ni, &frm))
+		goto error;
+	if (ieee80211_tdls_add_tlv_bss_2040_coex(peer_ni, &frm))
+		goto error;
+	if (ieee80211_tdls_add_tlv_link_id(peer_ni, skb, action, &frm,
+					   peer_ni->ni_macaddr, data))
+		goto error;
+	if (ieee80211_tdls_add_tlv_aid(peer_ni, &frm))
+		goto error;
+	if (ieee80211_tdls_add_tlv_vhtcap(peer_ni, &frm))
+		goto error;
+
+	frm = ieee80211_add_qtn_ie(frm, peer_ni->ni_ic,
+			IEEE80211_QTN_BRIDGEMODE, IEEE80211_QTN_BRIDGEMODE,
+			vap->iv_implicit_ba, IEEE80211_DEFAULT_BA_WINSIZE_H, 0);
+
+	if (ieee80211_tdls_add_tlv_downstream_clients(peer_ni, &frm,
+			(frm_len - (frm - skb->data))))
+		goto error;
+
+	skb_trim(skb, frm - skb->data);
+	ieee80211_tdls_send_frame(peer_ni, skb);
+
+	peer_ni->tdls_setup_start = jiffies;
+	ieee80211_tdls_update_node_status(peer_ni,
+		IEEE80211_TDLS_NODE_STATUS_STARTING);
+
+	return 0;
+
+error:
+	if (skb)
+		dev_kfree_skb(skb);
+
+	return 1;
+}
+
+/*
+ * Send a Discovery Response
+ * Returns 0 if sent successfully, else 1.
+ * Note: Discovery response is a public action frame.  All other TDLS frames are
+ * management over data.
+ */
+static int
+ieee80211_tdls_send_disc_resp(struct ieee80211_node *peer_ni,
+	struct ieee80211_tdls_action_data *data)
+{
+	struct ieee80211vap *vap;
+	struct sk_buff *skb = NULL;
+	uint16_t frm_len = IEEE80211_TDLS_FRAME_MAX;
+	uint8_t *frm;
+	uint8_t action = IEEE80211_ACTION_PUB_TDLS_DISC_RESP;
+	struct ieee80211_action *ia;
+	struct ieee80211_tdls_link_id *link_id = NULL;
+	uint8_t *bssid;
+
+	if (!peer_ni) {
+		printk(KERN_WARNING "%s: Invalid peer node\n", __func__);
+		return 1;
+	}
+
+	vap = peer_ni->ni_vap;
+
+	if (!data) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Invalid argument data\n", __func__);
+		goto error;
+	}
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: tdls function prohibited, don't send discover response\n", __func__);
+		goto error;
+	}
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: send DISC RESPONSE frame to %pM\n", __func__, peer_ni->ni_macaddr);
+
+
+	skb = ieee80211_getmgtframe(&frm, frm_len);
+	if (skb == NULL) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_NODE, IEEE80211_TDLS_MSG_WARN,
+			"%s: cannot get buf; size %u", __func__, frm_len);
+		vap->iv_stats.is_tx_nobuf++;
+		goto error;
+	}
+
+	ia = (struct ieee80211_action *)frm;
+	ia->ia_category = IEEE80211_ACTION_CAT_PUBLIC;
+	ia->ia_action = IEEE80211_ACTION_PUB_TDLS_DISC_RESP;
+	frm += sizeof(*ia);
+
+	/* Fixed Length Fields */
+	*frm = data->dtoken;
+	frm += sizeof(data->dtoken);
+	if (ieee80211_tdls_add_cap(peer_ni, &frm))
+		goto error;
+	if (ieee80211_tdls_add_tlv_rates(peer_ni, &frm))
+		goto error;
+	if (ieee80211_tdls_add_tlv_xrates(peer_ni, &frm))
+		goto error;
+
+	frm = ieee80211_add_supported_chans(frm, vap->iv_ic);
+
+	if (ieee80211_tdls_get_privacy(vap)) {
+		if (ieee80211_tdls_add_tlv_rsn(peer_ni, &frm, data))
+			goto error;
+	}
+	if (ieee80211_tdls_add_tlv_ext_cap(peer_ni, &frm))
+		goto error;
+	if (ieee80211_tdls_get_privacy(vap)) {
+		if (ieee80211_tdls_add_tlv_ftie(peer_ni, &frm, data))
+			goto error;
+		if (ieee80211_tdls_add_tlv_tpk_timeout(peer_ni, &frm, data))
+			goto error;
+	}
+	if (!(vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PROHIB)) {
+		if (ieee80211_tdls_add_tlv_sup_reg_class(peer_ni, &frm))
+			goto error;
+	}
+	if (ieee80211_tdls_add_tlv_ht_cap(peer_ni, &frm))
+		goto error;
+	if (ieee80211_tdls_add_tlv_bss_2040_coex(peer_ni, &frm))
+		goto error;
+
+	link_id = (struct ieee80211_tdls_link_id *)frm;
+	if (ieee80211_tdls_add_tlv_link_id(peer_ni, skb, action, &frm,
+					   peer_ni->ni_macaddr, data))
+		goto error;
+	if (ieee80211_tdls_add_tlv_vhtcap(peer_ni, &frm))
+		goto error;
+
+	frm = ieee80211_add_qtn_ie(frm, peer_ni->ni_ic,
+			IEEE80211_QTN_BRIDGEMODE, IEEE80211_QTN_BRIDGEMODE,
+			vap->iv_implicit_ba, IEEE80211_DEFAULT_BA_WINSIZE_H, 0);
+
+	if (ieee80211_tdls_add_tlv_downstream_clients(peer_ni, &frm, frm_len -
+			(frm - skb->data))) {
+		goto error;
+	}
+
+	skb_trim(skb, frm - skb->data);
+
+	vap->iv_stats.is_tx_tdls++;
+	IEEE80211_NODE_STAT(peer_ni, tx_tdls_action);
+
+	bssid = peer_ni->ni_bssid;
+	if (ieee80211_tdls_over_qhop_enabled(vap) && link_id)
+		bssid = link_id->bssid;
+
+	ieee80211_tdls_mgmt_output(peer_ni, skb,
+		IEEE80211_FC0_TYPE_MGT, IEEE80211_FC0_SUBTYPE_ACTION, peer_ni->ni_macaddr, bssid);
+
+	return 0;
+
+error:
+	if (skb)
+		dev_kfree_skb(skb);
+	ieee80211_free_node(peer_ni);
+
+	return 1;
+}
+
+/*
+ * Send a Discovery Request
+ * Returns 0 if sent successfully, else 1.
+ */
+static int
+ieee80211_tdls_send_disc_req(struct ieee80211_node *peer_ni,
+		struct ieee80211_tdls_action_data *data)
+{
+	struct ieee80211vap *vap;
+	static uint8_t dtoken = 0;
+	uint8_t action = IEEE80211_ACTION_TDLS_DISC_REQ;
+	struct sk_buff *skb;
+	uint8_t *frm = NULL;
+	uint16_t frm_len = IEEE80211_TDLS_FRAME_MAX;
+
+	if (!peer_ni) {
+		printk(KERN_WARNING "%s: Invalid peer node\n", __func__);
+		return 1;
+	}
+
+	vap = peer_ni->ni_vap;
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: tdls function prohibited, don't send discover request\n", __func__);
+		return 1;
+	}
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: send DISC RESQUEST frame to %pM\n", __func__, peer_ni->ni_macaddr);
+
+	if (peer_ni == vap->iv_bss)
+		skb = ieee80211_tdls_init_frame(peer_ni, &frm, action, 0);
+	else
+		skb = ieee80211_tdls_init_frame(peer_ni, &frm, action, 1);
+	if (!skb) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"%s: can't alloc space for disc request frame", __func__);
+		goto error;
+	}
+
+	if (!data) {
+		*frm = dtoken++;
+		frm += sizeof(dtoken);
+
+		if (ieee80211_tdls_add_tlv_link_id(peer_ni, skb, action,
+				&frm, vap->iv_dev->broadcast, data)) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"%s: Failed to add link id for bcast", __func__);
+			goto error;
+		}
+	} else {
+		*frm = data->dtoken;
+		frm += sizeof(data->dtoken);
+
+		memcpy(frm, data->ie_buf, le32toh(data->ie_buflen));
+		frm += le32toh(data->ie_buflen);
+
+		if (ieee80211_tdls_add_tlv_link_id(peer_ni, skb, action,
+				&frm, peer_ni->ni_macaddr, data)) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"%s: Failed to add link id", __func__);
+			goto error;
+		}
+	}
+
+	if (ieee80211_tdls_add_tlv_rates(peer_ni, &frm))
+		goto error;
+
+	if (ieee80211_tdls_add_tlv_xrates(peer_ni, &frm))
+		goto error;
+
+	if (ieee80211_tdls_add_tlv_ht_cap(peer_ni, &frm))
+		goto error;
+
+	frm = ieee80211_add_qtn_ie(frm, peer_ni->ni_ic,
+			IEEE80211_QTN_BRIDGEMODE, IEEE80211_QTN_BRIDGEMODE,
+			vap->iv_implicit_ba, IEEE80211_DEFAULT_BA_WINSIZE_H, 0);
+
+	if (ieee80211_tdls_add_tlv_downstream_clients(peer_ni, &frm, frm_len -
+			(frm - skb->data))) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"%s: Failed to add DS clients", __func__);
+		goto error;
+	}
+
+	skb_trim(skb, frm - skb->data);
+	ieee80211_tdls_send_frame(peer_ni, skb);
+
+	return 0;
+
+error:
+	if (skb)
+		dev_kfree_skb(skb);
+	return 1;
+}
+
+static int
+ieee80211_tdls_send_teardown(struct ieee80211_node *peer_ni,
+		struct ieee80211_tdls_action_data *data)
+{
+	struct ieee80211vap *vap;
+	uint8_t action = IEEE80211_ACTION_TDLS_TEARDOWN;
+	struct sk_buff *skb = NULL;
+	struct sk_buff *skb2 = NULL;
+	uint8_t *frm = NULL;
+	uint16_t frm_len = IEEE80211_TDLS_FRAME_MAX;
+
+	if (!peer_ni || !data)
+		return 1;
+
+	vap = peer_ni->ni_vap;
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: send teardown frame to %pM\n", __func__, peer_ni->ni_macaddr);
+
+	skb = ieee80211_tdls_init_frame(peer_ni, &frm, action, 1);
+	if (!skb)
+		goto error;
+
+	memcpy(frm, &data->status, sizeof(data->status));
+	frm += sizeof(data->status);
+
+	if (ieee80211_tdls_get_privacy(vap)) {
+		if (ieee80211_tdls_add_tlv_ftie(peer_ni, &frm, data))
+			goto error;
+	}
+
+	if (ieee80211_tdls_add_tlv_link_id(peer_ni, skb, action, &frm,
+					   peer_ni->ni_macaddr, data))
+		goto error;
+
+	frm = ieee80211_add_qtn_ie(frm, peer_ni->ni_ic,
+			IEEE80211_QTN_BRIDGEMODE, IEEE80211_QTN_BRIDGEMODE,
+			vap->iv_implicit_ba, IEEE80211_DEFAULT_BA_WINSIZE_H, 0);
+
+	if (ieee80211_tdls_add_tlv_downstream_clients(peer_ni, &frm, frm_len -
+			(frm - skb->data))) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"%s: Failed to add DS clients", __func__);
+		goto error;
+	}
+
+	/*
+	 * Send teardown frame via AP and TDLS link simultaneously to
+	 * avoid peer fails to receive it.
+	 */
+	skb_trim(skb, frm - skb->data);
+	skb2 = skb_copy(skb, GFP_ATOMIC);
+
+	ieee80211_tdls_send_frame_over_tdls(peer_ni, skb);
+	if (skb2)
+		ieee80211_tdls_send_frame(peer_ni, skb2);
+
+	return 0;
+
+error:
+	if (skb)
+		dev_kfree_skb(skb);
+
+	return 1;
+}
+
+/*
+ * Send a Peer Traffic Indication Request
+ * Returns 0 if sent successfully, else 1.
+ */
+static int
+ieee80211_tdls_send_pti_req(struct ieee80211_node *peer_ni,
+		struct ieee80211_tdls_action_data *data)
+{
+	struct ieee80211vap *vap= peer_ni->ni_vap;
+	static uint8_t dtoken = 0;
+	uint8_t action = IEEE80211_ACTION_TDLS_PTI;
+	struct sk_buff *skb = NULL;
+	uint8_t *frm = NULL;
+	uint32_t pti = 0;
+	uint32_t pti_ctrl = 0;
+
+	/* Although it's unlikely, pti maybe 0 */
+	pti = vap->iv_ic->ic_get_tdls_param(peer_ni, IOCTL_TDLS_PTI);
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: ni=%p ni_ref=%d vap=%p iv_myaddr=%pM\n ni_bssid=%pM"
+		" traffic indicator 0x%x\n", __func__, peer_ni,
+		ieee80211_node_refcnt(peer_ni), vap,
+		vap->iv_myaddr, peer_ni->ni_bssid, pti);
+
+	if (pti == 0)
+		goto error;
+
+	skb = ieee80211_tdls_init_frame(peer_ni, &frm, action, 1);
+	if (!skb) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"%s: can't alloc space for pti request frame", __func__);
+		goto error;
+	}
+
+	if (!data) {
+		*frm = dtoken++;
+		frm += sizeof(dtoken);
+	} else {
+		*frm = data->dtoken;
+		frm += sizeof(data->dtoken);
+	}
+
+	if (ieee80211_tdls_add_tlv_link_id(peer_ni, skb, action,
+			&frm, peer_ni->ni_macaddr, data)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"%s: Failed to add link id", __func__);
+		goto error;
+	}
+
+	pti_ctrl = vap->iv_ic->ic_get_tdls_param(peer_ni, IOCTL_TDLS_PTI_CTRL);
+
+	*frm++ = IEEE80211_ELEMID_TDLS_PTI_CTRL;
+	*frm++ = 3;
+	*frm++ = (pti_ctrl >> 16) & 0xFF;
+	*((uint16_t*)frm) = htole16(pti_ctrl) & 0xFFFF;
+	frm += 2;
+
+	*frm++ = IEEE80211_ELEMID_TDLS_PU_BUF_STAT;
+	*frm++ = 1;
+	*frm++ = (pti & 0xF);
+
+	skb_trim(skb, frm - skb->data);
+	ieee80211_tdls_send_frame(peer_ni, skb);
+	vap->iv_ic->ic_set_tdls_param(peer_ni, IOCTL_TDLS_PTI_PENDING, 1);
+
+	return 0;
+error:
+	if (skb)
+		dev_kfree_skb(skb);
+	return 1;
+}
+
+static int
+ieee80211_tdls_get_off_chan_and_bw(struct ieee80211vap *vap,
+		struct ieee80211_node *ni, uint8_t *tar_chan, uint8_t *tar_bw)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *chan = NULL;
+	int bw_tmp = BW_INVALID;
+	int chan_tmp = 0;
+
+	if (vap->tdls_fixed_off_chan == TDLS_INVALID_CHANNEL_NUM)
+		chan_tmp = ieee80211_tdls_select_target_channel(vap);
+	else
+		chan_tmp = vap->tdls_fixed_off_chan;
+
+	chan = ic->ic_findchannel(ic, chan_tmp, IEEE80211_MODE_AUTO);
+	if (!chan)
+		return 1;
+
+	bw_tmp = ieee80211_get_max_bw(vap, ni, chan_tmp);
+	if (vap->tdls_fixed_off_chan_bw != BW_INVALID)
+		bw_tmp = MIN(vap->tdls_fixed_off_chan_bw, bw_tmp);
+
+	*tar_chan = chan_tmp;
+	*tar_bw = bw_tmp;
+
+	return 0;
+}
+
+int
+ieee80211_tdls_send_chan_switch_req(struct ieee80211_node *peer_ni,
+		struct ieee80211_tdls_action_data *data)
+{
+	struct ieee80211vap *vap;
+	struct ieee80211com *ic;
+	uint8_t action = IEEE80211_ACTION_TDLS_CS_REQ;
+	struct sk_buff *skb;
+	uint8_t *frm = NULL;
+	uint8_t reg_class = 0;
+
+	if (!peer_ni)
+		return 1;
+
+	vap = peer_ni->ni_vap;
+	ic = vap->iv_ic;
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS: Don't send channel switch req to peer %pM since tdls prohibited\n",
+			peer_ni->ni_macaddr);
+		return 1;
+	}
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PROHIB) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS: Don't send channel switch req to peer %pM since channel switch"
+			" prohibited\n", peer_ni->ni_macaddr);
+		return 1;
+	}
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PASSIVE) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS: Don't send channel switch req to peer %pM since passive mode\n",
+			peer_ni->ni_macaddr);
+		return 1;
+	}
+
+	if (vap->tdls_chan_switching == 1) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS: Don't send channel switch req to peer %pM since channel switch"
+			" in progress\n", peer_ni->ni_macaddr);
+		return 1;
+	}
+
+	if (peer_ni->tdls_no_send_cs_resp == 1) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS: Don't send channel switch req to peer %pM since channel switch"
+			" request processing\n", peer_ni->ni_macaddr);
+		return 1;
+	}
+
+	skb = ieee80211_tdls_init_frame(peer_ni, &frm, action, 1);
+	if (!skb) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Fail to alloc skb\n", __func__);
+		goto error;
+	}
+
+	if (ieee80211_tdls_get_off_chan_and_bw(vap, peer_ni,
+				&vap->tdls_target_chan, &vap->tdls_off_chan_bw)) {
+		if (vap->tdls_target_chan == 0)
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: Skip channel switch since current channel is best\n", __func__);
+		else
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: Fail to get target channel and bandwidth\n", __func__);
+		goto error;
+	}
+
+	if (vap->tdls_target_chan == ic->ic_curchan->ic_ieee) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Skip channel switch since off channel is equal with"
+			" current channel\n", __func__);
+		goto error;
+	}
+
+	reg_class = ieee80211_get_current_operating_class(ic->ic_country_code,
+				vap->tdls_target_chan, vap->tdls_off_chan_bw);
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS: send channel switch frame to %pM, tar_chan: %d, BW: %d, reg_class:%d\n",
+		peer_ni->ni_macaddr, vap->tdls_target_chan, vap->tdls_off_chan_bw, reg_class);
+
+	*frm = vap->tdls_target_chan;
+	frm += sizeof(vap->tdls_target_chan);
+	*frm = reg_class;
+	frm += sizeof(reg_class);
+	if (ieee80211_tdls_add_tlv_2nd_chan_off(peer_ni, &frm)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Fail to add second channel offset IE for CS request\n", __func__);
+		goto error;
+	}
+	if (ieee80211_tdls_add_tlv_link_id(peer_ni, skb, action, &frm,
+					   peer_ni->ni_macaddr, data)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Fail to add link ID IE for CS request\n", __func__);
+		goto error;
+	}
+	if (ieee80211_tdls_add_tlv_cs_timimg(peer_ni, &frm, data)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Fail to add cs_timing IE for CS request\n", __func__);
+		goto error;
+	}
+	if (ieee80211_tdls_add_tlv_wide_bw_cs(peer_ni, &frm)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Fail to add wide_bw_cs IE for CS request\n", __func__);
+		goto error;
+	}
+	if (ieee80211_tdls_add_tlv_vht_tx_power_evlope(peer_ni, &frm)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Fail to add vht_tx_power_envlope IE for CS request\n", __func__);
+		goto error;
+	}
+
+	skb_trim(skb, frm - skb->data);
+	ieee80211_tdls_send_frame_over_tdls(peer_ni, skb);
+
+	peer_ni->tdls_send_cs_req = 1;
+
+	return 0;
+
+error:
+	if (skb)
+		dev_kfree_skb(skb);
+
+	return 1;
+}
+
+int
+ieee80211_tdls_send_chan_switch_resp(struct ieee80211_node *peer_ni,
+		struct ieee80211_tdls_action_data *data)
+{
+	struct ieee80211vap *vap;
+	struct ieee80211com *ic;
+	uint8_t action = IEEE80211_ACTION_TDLS_CS_RESP;
+	struct ieee80211_tdls_cs_timing *cs_timing = NULL;
+	struct sk_buff *skb;
+	uint8_t *frm = NULL;
+	uint16_t status;
+	uint8_t tar_chan;
+	uint64_t cur_tsf;
+	uint64_t start_tsf;
+	uint64_t tbtt;
+	uint32_t duration = 0;
+	uint32_t timeout = 0;
+	int chan_switch;
+
+	if ((!peer_ni) || (!data))
+		return 1;
+
+	vap = peer_ni->ni_vap;
+	ic = vap->iv_ic;
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS: Don't send channel switch resp to peer %pM since tdls prohibited\n",
+			peer_ni->ni_macaddr);
+		return 1;
+	}
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PROHIB) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS: Don't send channel switch resp to peer %pM since channel switch"
+			" prohibited\n", peer_ni->ni_macaddr);
+		return 1;
+	}
+
+	tar_chan = data->dtoken;
+	status = data->status;
+	cs_timing = (struct ieee80211_tdls_cs_timing *)
+			ieee80211_tdls_find_cs_timing(data->ie_buf, data->ie_buflen);
+	if (!cs_timing)
+		return 1;
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS: send channel switch response frame to %pM\n", peer_ni->ni_macaddr);
+
+	skb = ieee80211_tdls_init_frame(peer_ni, &frm, action, 1);
+	if (!skb) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Fail to alloc skb\n", __func__);
+		goto error;
+	}
+
+	*((uint16_t *)frm) = cpu_to_le16(status);
+	frm += sizeof(status);
+	if (ieee80211_tdls_add_tlv_link_id(peer_ni, skb, action, &frm,
+						   peer_ni->ni_macaddr, data)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Fail to add link ID IE for channel switch resp\n", __func__);
+			goto error;
+	}
+	if (ieee80211_tdls_copy_cs_timing(peer_ni, &frm, data)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Fail to add cs timing IE for channel switch resp\n", __func__);
+			goto error;
+	}
+
+	chan_switch = (!status && !vap->tdls_chan_switching);
+
+	if (chan_switch)
+		ieee80211_sta_pwrsave(vap, 1);
+
+	skb_trim(skb, frm - skb->data);
+	ieee80211_tdls_send_frame_over_tdls(peer_ni, skb);
+
+	peer_ni->tdls_no_send_cs_resp = 0;
+
+	if (chan_switch) {
+		ic->ic_get_tsf(&cur_tsf);
+		vap->tdls_target_chan = tar_chan;
+		vap->tdls_cs_time = cs_timing->switch_time;
+		vap->tdls_cs_timeout = cs_timing->switch_timeout;
+
+		tbtt = vap->iv_bss->ni_shared_stats->dtim_tbtt;
+		start_tsf = cur_tsf + cs_timing->switch_time
+					- DEFAULT_TDLS_CH_SW_PROC_TIME;
+		timeout = cs_timing->switch_timeout - cs_timing->switch_time
+					+ DEFAULT_TDLS_CH_SW_PROC_TIME;
+		if (tbtt > (start_tsf + DEFAULT_TDLS_CH_SW_OC_MARGIN))
+			duration = (uint32_t)(tbtt - start_tsf - DEFAULT_TDLS_CH_SW_OC_MARGIN);
+		duration = MAX(duration, cs_timing->switch_timeout - cs_timing->switch_time);
+		vap->tdls_cs_duration = duration;
+
+		if (ieee80211_tdls_remain_on_channel(vap, peer_ni, tar_chan, vap->tdls_off_chan_bw,
+				start_tsf, timeout, vap->tdls_cs_duration) != 0) {
+			ieee80211_sta_pwrsave(vap, 0);
+		}
+	}
+
+	return 0;
+
+error:
+	if (skb)
+		dev_kfree_skb(skb);
+
+	return 1;
+}
+
+int
+ieee80211_tdls_validate_vap_state(struct ieee80211vap *vap)
+{
+	if (vap->iv_state != IEEE80211_S_RUN) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: not in run state\n", __func__);
+		return 1;
+	}
+
+	/* TDLS is not allowed when disabled by the AP */
+	if (!ieee80211_tdls_sec_mode_valid(vap)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: ignore TDLS because of TKIP\n", __func__);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+ieee80211_tdls_validate_params(struct ieee80211_node *ni, struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (ieee80211_tdls_validate_vap_state(vap) != 0)
+		return 1;
+
+	return 0;
+}
+
+static void
+ieee80211_tdls_add_clients_to_bridge(struct ieee80211_node *ni,
+					struct ieee80211_node *peer_ni, uint8_t *ie_buf)
+{
+	struct ieee80211_ie_qtn_tdls_clients *clients =
+			(struct ieee80211_ie_qtn_tdls_clients *)ie_buf;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (clients != NULL) {
+		uint8_t i;
+
+		/* Validate qtn_ie_mac_cnt */
+		if ((clients->qtn_ie_mac_cnt * IEEE80211_ADDR_LEN) !=
+				(clients->qtn_ie_len - 5)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_ELEMID,
+					"[%s] Bad qtn_ie_mac_cnt in TDLS client list\n",
+					vap->iv_dev->name);
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS error %s: Received TDLS clients "
+					"with bad qtn_ie_mac_cnt\n", __func__);
+			return;
+		}
+
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS number of clients = %d\n", clients->qtn_ie_mac_cnt);
+		for (i = 0; i < clients->qtn_ie_mac_cnt; i++) {
+			uint8_t *m = &clients->qtn_ie_mac[i * IEEE80211_ADDR_LEN];
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"TDLS client: %pM ncidx 0x%x\n", m, peer_ni->ni_node_idx);
+
+			if (!is_multicast_ether_addr(m)) {
+				ieee80211_tdls_add_bridge_entry(peer_ni, m,
+							peer_ni->ni_node_idx);
+			}
+		}
+	}
+}
+
+/*
+ * Process a Discovery Response (Public Action frame)
+ */
+void
+ieee80211_tdls_recv_disc_resp(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_node *peer_ni;
+	enum ieee80211_tdls_operation operation;
+
+	if (!tdls || (ieee80211_tdls_validate_params(ni, tdls) != 0))
+		return;
+
+	tdls->act = IEEE80211_ACTION_PUB_TDLS_DISC_RESP;
+
+	/*
+	 * A discovery response may be unsolicited.  Find or create the peer node.
+	 */
+	peer_ni = ieee80211_tdls_find_or_create_peer(ni, tdls->sa, tdls);
+	if (peer_ni == NULL)
+		return;
+
+	peer_ni->ni_rssi = rssi;
+	peer_ni->tdls_last_seen = jiffies;
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS: received DISC RESP from peer %pM at %u, rssi=%d\n",
+		peer_ni->ni_macaddr, peer_ni->tdls_last_seen, rssi);
+
+	if (tdls->qtn_brmacs != NULL) {
+		if (peer_ni->ni_qtn_brmacs == NULL)
+			MALLOC(peer_ni->ni_qtn_brmacs, uint8_t *,
+				IEEE80211_MAX_IE_LEN, M_DEVBUF, M_WAITOK);
+		if (peer_ni->ni_qtn_brmacs != NULL)
+			memcpy(peer_ni->ni_qtn_brmacs, tdls->qtn_brmacs,
+				tdls->qtn_brmacs[1] + 2);
+	}
+
+	/* FIXME What if this node thinks we are active but the other node doesn't think so? */
+	if (IEEE80211_NODE_IS_TDLS_ACTIVE(peer_ni)) {
+		/* Extract downstream mac addresses */
+		ieee80211_tdls_add_clients_to_bridge(ni, peer_ni, tdls->qtn_brmacs);
+	} else if (ieee80211_tdls_link_should_setup(vap, peer_ni)) {
+		operation = IEEE80211_TDLS_SETUP;
+		if (ieee80211_tdls_send_event(peer_ni, IEEE80211_EVENT_TDLS, &operation))
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: Send event %d failed\n", __func__, operation);
+	}
+
+	ieee80211_free_node(peer_ni);
+}
+
+/*
+ * Process a Setup Request
+ */
+void
+ieee80211_tdls_recv_setup_req(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_node *peer_ni;
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,"%s",
+			"TDLS: received SETUP REQUEST, but tdls function prohibited, drop it\n");
+		return;
+	}
+
+	if (!tdls || (ieee80211_tdls_validate_params(ni, tdls) != 0))
+		return;
+
+	/* We need to do this in the absence of explicit discovery message. */
+	peer_ni = ieee80211_tdls_find_or_create_peer(ni, tdls->sa, tdls);
+	if (peer_ni) {
+		peer_ni->tdls_initiator = 0;
+		peer_ni->tdls_last_seen = jiffies;
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS: received SETUP REQUEST from peer %pM at %u, rssi=%d\n",
+			peer_ni->ni_macaddr, peer_ni->tdls_last_seen, rssi);
+
+		peer_ni->tdls_setup_start = jiffies;
+		ieee80211_tdls_update_node_status(peer_ni,
+				IEEE80211_TDLS_NODE_STATUS_STARTING);
+
+		peer_ni->ni_vendor = PEER_VENDOR_NONE;
+		if (tdls->qtn_info != NULL)
+			peer_ni->ni_vendor = PEER_VENDOR_QTN;
+
+		if (tdls->aid != NULL)
+			peer_ni->tdls_peer_associd = le16toh(tdls->aid->aid);
+
+		if (tdls->qtn_brmacs != NULL) {
+			if (peer_ni->ni_qtn_brmacs == NULL)
+				MALLOC(peer_ni->ni_qtn_brmacs, uint8_t *,
+					IEEE80211_MAX_IE_LEN, M_DEVBUF, M_WAITOK);
+			if (peer_ni->ni_qtn_brmacs != NULL)
+				memcpy(peer_ni->ni_qtn_brmacs, tdls->qtn_brmacs,
+					tdls->qtn_brmacs[1] + 2);
+		}
+
+		if (ieee80211_tdls_over_qhop_enabled(vap)) {
+			if (ieee80211_tdls_ext_bssid_allowed(vap, tdls->link_id->bssid) &&
+					!IEEE80211_ADDR_EQ(peer_ni->ni_bssid, tdls->link_id->bssid))
+				IEEE80211_ADDR_COPY(peer_ni->ni_bssid, tdls->link_id->bssid);
+		} else {
+			if (!IEEE80211_ADDR_EQ(peer_ni->ni_bssid, vap->iv_bss->ni_bssid))
+				IEEE80211_ADDR_COPY(peer_ni->ni_bssid, vap->iv_bss->ni_bssid);
+		}
+
+		ieee80211_free_node(peer_ni);
+	}
+}
+
+/*
+ * Process a Setup Response
+ */
+void
+ieee80211_tdls_recv_setup_resp(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211_node *peer_ni;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (!tdls || (ieee80211_tdls_validate_params(ni, tdls) != 0))
+		return;
+
+	/* A setup response may be unsolicited.  Find or create the peer node. */
+	peer_ni = ieee80211_tdls_find_or_create_peer(ni, tdls->sa, tdls);
+	if (peer_ni != NULL) {
+		peer_ni->ni_rssi = rssi;
+		peer_ni->tdls_last_seen = jiffies;
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS: received SETUP RESP from peer %pM at %u, rssi=%d\n",
+			peer_ni->ni_macaddr, peer_ni->tdls_last_seen, rssi);
+
+		peer_ni->ni_vendor = PEER_VENDOR_NONE;
+		if (tdls->qtn_info != NULL)
+			peer_ni->ni_vendor = PEER_VENDOR_QTN;
+
+		peer_ni->tdls_initiator = 1;
+
+		if (tdls->aid != NULL)
+			peer_ni->tdls_peer_associd = le16toh(tdls->aid->aid);
+
+		if (tdls->qtn_brmacs != NULL) {
+			if (peer_ni->ni_qtn_brmacs == NULL)
+				MALLOC(peer_ni->ni_qtn_brmacs, uint8_t *,
+					IEEE80211_MAX_IE_LEN, M_DEVBUF, M_WAITOK);
+			if (peer_ni->ni_qtn_brmacs != NULL)
+				memcpy(peer_ni->ni_qtn_brmacs, tdls->qtn_brmacs,
+					tdls->qtn_brmacs[1] + 2);
+		}
+
+		ieee80211_free_node(peer_ni);
+	}
+}
+
+/*
+ * Process a Discovery Request
+ */
+void
+ieee80211_tdls_recv_disc_req(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_node *peer_ni;
+
+	if (tdls == NULL) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Required TLV is missing\n", __func__);
+		return;
+	}
+
+	if (ieee80211_tdls_validate_params(ni, tdls) != 0)
+		return;
+
+	if (memcmp(ni->ni_vap->iv_myaddr, tdls->sa, IEEE80211_ADDR_LEN) == 0)
+		return;
+
+	/* when tdls function is prohibited, ignore discovery request */
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: receive discover request, but tdls function prohibited, drop it\n", __func__);
+		return;
+	}
+
+	/* Find or create the peer node */
+	peer_ni = ieee80211_tdls_find_or_create_peer(ni, tdls->sa, tdls);
+	if (peer_ni) {
+		peer_ni->tdls_last_seen = jiffies;
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS: received DISC REQUEST from peer %pM at %u, rssi=%d\n",
+			peer_ni->ni_macaddr, peer_ni->tdls_last_seen, rssi);
+
+		if (IEEE80211_NODE_IS_TDLS_ACTIVE(peer_ni)) {
+			ieee80211_tdls_add_clients_to_bridge(ni, peer_ni, tdls->qtn_brmacs);
+		} else if (tdls->qtn_brmacs != NULL) {
+			if (peer_ni->ni_qtn_brmacs == NULL)
+				MALLOC(peer_ni->ni_qtn_brmacs, uint8_t *,
+					IEEE80211_MAX_IE_LEN, M_DEVBUF, M_WAITOK);
+			if (peer_ni->ni_qtn_brmacs != NULL)
+				memcpy(peer_ni->ni_qtn_brmacs, tdls->qtn_brmacs,
+					tdls->qtn_brmacs[1] + 2);
+		}
+
+		ieee80211_free_node(peer_ni);
+	}
+}
+
+/*
+ * Process a channel switch Request
+ */
+void
+ieee80211_tdls_recv_chan_switch_req(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_node *peer_ni;
+	struct ieee80211_tdls_action_data *data;
+	uint16_t status = 0;
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: drop channel switch req since tdls prohibited\n", __func__);
+		return;
+	}
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PROHIB) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: drop channel switch req since tdls chan switch prohibited\n", __func__);
+		return;
+	}
+
+	if (ieee80211_tdls_validate_params(ni, tdls) != 0)
+		return;
+
+	peer_ni = ieee80211_tdls_find_or_create_peer(ni, tdls->sa, tdls);
+	if (peer_ni) {
+		peer_ni->ni_rssi = rssi;
+		peer_ni->tdls_last_seen = jiffies;
+		peer_ni->tdls_no_send_cs_resp = 1;
+
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS: received CHAN SWITCH REQUEST from peer %pM at %u, rssi=%d\n",
+			peer_ni->ni_macaddr, peer_ni->tdls_last_seen, rssi);
+
+		MALLOC(data, struct ieee80211_tdls_action_data *,
+					sizeof(struct ieee80211_tdls_action_data) +
+					sizeof(struct ieee80211_tdls_cs_timing),
+					M_DEVBUF, M_WAITOK);
+
+		if (ieee80211_tdls_check_target_chan(peer_ni, tdls)) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"TDLS: Reject channel switch req from peer: %pM due to invalid"
+					" target channel: %u\n", peer_ni->ni_macaddr, tdls->target_chan);
+			status = IEEE80211_STATUS_PEER_MECHANISM_REJECT;
+		}
+
+		if (ieee80211_tdls_check_reg_class(peer_ni, tdls)) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"TDLS: Reject channel switch reqt from peer: %pM due to invalid"
+					" reglatory class: %d \n", peer_ni->ni_macaddr, tdls->reg_class);
+			status = IEEE80211_STATUS_PEER_MECHANISM_REJECT;
+		}
+
+		if (ieee80211_tdls_check_wide_bw_cs(peer_ni, tdls)) {
+			if (ieee80211_tdls_check_2nd_chan_off(peer_ni, tdls)) {
+				IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"TDLS: Reject channel switch requset from peer: %pM due to invalid"
+					" second channel offset: %d \n", peer_ni->ni_macaddr, tdls->sec_chan_off[2]);
+				status = IEEE80211_STATUS_PEER_MECHANISM_REJECT;
+			}
+		}
+
+		if (ieee80211_tdls_check_link_id(peer_ni, tdls, IEEE80211_ACTION_TDLS_CS_REQ)) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"TDLS: Reject channel switch requset from peer: %pM due to invalid"
+					" link id\n", peer_ni->ni_macaddr);
+			status = IEEE80211_STATUS_PEER_MECHANISM_REJECT;
+		}
+
+		if (ieee80211_tdls_check_chan_switch_timing(peer_ni, tdls)) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"TDLS: Reject channel switch requset from peer: %pM due to invalid"
+					" switch timing parameter\n", peer_ni->ni_macaddr);
+			status = IEEE80211_STATUS_PEER_MECHANISM_REJECT;
+		}
+
+		memcpy(data->dest_mac, peer_ni->ni_macaddr, sizeof(data->dest_mac));
+		data->status = cpu_to_le16(status);
+		data->dtoken = tdls->target_chan;
+		data->ie_buflen = sizeof(struct ieee80211_tdls_cs_timing);
+		memcpy(data->ie_buf, tdls->cs_timing, data->ie_buflen);
+		ieee80211_tdls_send_chan_switch_resp(peer_ni, data);
+
+		FREE(data, M_DEVBUF);
+
+		ieee80211_free_node(peer_ni);
+	}
+}
+
+/*
+ * Process a channel switch Response
+ */
+void
+ieee80211_tdls_recv_chan_switch_resp(struct ieee80211_node *ni,
+	struct sk_buff *skb, int rssi, struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_tdls_cs_timing *cs_timing =
+		(struct ieee80211_tdls_cs_timing *)tdls->cs_timing;
+	struct ieee80211_node *peer_ni;
+	int tar_chan;
+	int chan_bw;
+	uint64_t cur_tsf;
+	uint64_t start_tsf;
+	uint64_t tbtt;
+	uint32_t duration = 0;
+	uint32_t timeout = 0;
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: drop channel switch resp since tdls prohibited\n", __func__);
+		return;
+	}
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PROHIB) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: drop channel switch resp since tdls chan switch"
+			" prohibited\n", __func__);
+		return;
+	}
+
+	if (ieee80211_tdls_validate_params(ni, tdls) != 0) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"TDLS %s: TDLS parameters verification fails\n", __func__);
+		return;
+	}
+
+	peer_ni = ieee80211_tdls_find_or_create_peer(ni, tdls->sa, tdls);
+	if (peer_ni != NULL) {
+		peer_ni->ni_rssi = rssi;
+		peer_ni->tdls_last_seen = jiffies;
+
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS: received CHAN SWITCH RESP from peer %pM at %u, rssi=%d\n",
+			peer_ni->ni_macaddr, peer_ni->tdls_last_seen, rssi);
+
+		if (tdls->status == 0) {
+			ic->ic_get_tsf(&cur_tsf);
+			/*
+			 * TDLS link should be returned back base channel
+			 * on reception unsolicited TDLS channel switch response
+			 */
+			if ((ni->tdls_send_cs_req == 0) &&
+				(vap->tdls_chan_switching == 1) &&
+				(vap->tdls_cs_time == cs_timing->switch_time) &&
+				(vap->tdls_cs_timeout == cs_timing->switch_timeout)) {
+				tar_chan = ic->ic_bsschan->ic_ieee;
+				chan_bw = ieee80211_get_bw(ic);
+			} else {
+				tar_chan = vap->tdls_target_chan;
+				chan_bw = vap->tdls_off_chan_bw;
+			}
+
+			vap->tdls_cs_time = cs_timing->switch_time;
+			vap->tdls_cs_timeout = cs_timing->switch_timeout;
+
+			tbtt = vap->iv_bss->ni_shared_stats->dtim_tbtt;
+			start_tsf = cur_tsf + cs_timing->switch_time
+						- DEFAULT_TDLS_CH_SW_PROC_TIME;
+			timeout = cs_timing->switch_timeout - cs_timing->switch_time
+						+ DEFAULT_TDLS_CH_SW_PROC_TIME;
+			if (tbtt > (start_tsf + DEFAULT_TDLS_CH_SW_OC_MARGIN))
+				duration = (uint32_t)(tbtt - start_tsf - DEFAULT_TDLS_CH_SW_OC_MARGIN);
+			duration = MAX(duration, cs_timing->switch_timeout - cs_timing->switch_time);
+			vap->tdls_cs_duration = duration;
+
+			ieee80211_sta_pwrsave(vap, 1);
+			if (ieee80211_tdls_remain_on_channel(vap, peer_ni, tar_chan,
+					chan_bw, start_tsf, timeout, vap->tdls_cs_duration) != 0) {
+				IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"TDLS %s: Peer %pM channel switch fails\n", __func__, peer_ni->ni_macaddr);
+				ieee80211_sta_pwrsave(vap, 0);
+			}
+		} else {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"TDLS %s: Peer %pM rejects channel switch requset\n",
+					__func__, peer_ni->ni_macaddr);
+		}
+
+		peer_ni->tdls_send_cs_req = 0;
+		ieee80211_free_node(peer_ni);
+	}
+}
+
+/*
+ * Make TDLS link switch to base channel
+ * Returns:
+ *   1 - returned to base channel
+ *   0 - failed to return to base channel
+ */
+int
+ieee80211_tdls_return_to_base_channel(struct ieee80211vap *vap, int ap_disassoc)
+{
+#define	IEEE80211_TDLS_RET_BASE_CHAN_WAIT_TIME	5
+#define	IEEE80211_TDLS_RET_BASE_CHAN_WAIT_CYCL	10
+	struct ieee80211com *ic = vap->iv_ic;
+	int tar_chan = ic->ic_bsschan->ic_ieee;
+	int chan_bw = 0;
+	uint8_t count = 0;
+	uint64_t cur_tsf;
+	uint64_t start_tsf;
+
+	if ((vap->tdls_chan_switching == 0) ||
+			(vap->tdls_cs_node == NULL))
+		return 1;
+
+	if (ap_disassoc)
+		vap->tdls_cs_disassoc_pending = 1;
+
+	IEEE80211_TDLS_DPRINTF(vap,
+		IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+		"TDLS %s: TDLS link with peer %pM needs to return base channel,"
+		" disassoc_peeding = %d\n", __func__,
+		vap->tdls_cs_node->ni_macaddr, vap->tdls_cs_disassoc_pending);
+
+	ic->ic_get_tsf(&cur_tsf);
+	start_tsf = cur_tsf + DEFAULT_TDLS_CH_SW_PROC_TIME;
+	ieee80211_tdls_remain_on_channel(vap, vap->tdls_cs_node, tar_chan, chan_bw,
+			start_tsf, DEFAULT_TDLS_CH_SW_TIMEOUT, vap->tdls_cs_duration);
+
+	if (!in_interrupt()) {
+		while (vap->tdls_chan_switching == 1) {
+			msleep(IEEE80211_TDLS_RET_BASE_CHAN_WAIT_TIME);
+			if (count++ > IEEE80211_TDLS_RET_BASE_CHAN_WAIT_CYCL) {
+				IEEE80211_TDLS_DPRINTF(vap,
+					IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"TDLS %s: TDLS link with peer %pM failed to return"
+					" to base channel\n", __func__,
+					vap->tdls_cs_node->ni_macaddr);
+				break;
+			}
+		}
+	}
+
+	if (vap->tdls_chan_switching == 0) {
+		vap->tdls_cs_disassoc_pending = 0;
+		return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_tdls_return_to_base_channel);
+
+/*
+ * Process a Setup Confirm
+ */
+void
+ieee80211_tdls_recv_setup_confirm(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211_node *peer_ni;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (!tdls || (ieee80211_tdls_validate_params(ni, tdls) != 0)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"TDLS %s: TDLS parameters verification fails\n", __func__);
+		return;
+	}
+
+	peer_ni = ieee80211_tdls_find_or_create_peer(ni, tdls->sa, tdls);
+	if (peer_ni == NULL)
+		return;
+
+	peer_ni->tdls_initiator = 0;
+	peer_ni->tdls_last_seen = jiffies;
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS: received SETUP CONFIRM from peer %pM at %u, rssi=%d\n",
+		peer_ni->ni_macaddr, peer_ni->tdls_last_seen, rssi);
+
+	if (tdls->htinfo)
+		ieee80211_parse_htinfo(peer_ni, tdls->htinfo);
+
+	if (IS_IEEE80211_VHT_ENABLED(ic) && tdls->vhtop)
+		ieee80211_parse_vhtop(peer_ni, tdls->vhtop);
+
+	if (tdls->qtn_brmacs != NULL) {
+		if (peer_ni->ni_qtn_brmacs == NULL)
+			MALLOC(peer_ni->ni_qtn_brmacs, uint8_t *,
+				IEEE80211_MAX_IE_LEN, M_DEVBUF, M_WAITOK);
+		if (peer_ni->ni_qtn_brmacs != NULL)
+			memcpy(peer_ni->ni_qtn_brmacs, tdls->qtn_brmacs,
+			tdls->qtn_brmacs[1] + 2);
+	}
+
+	ieee80211_free_node(peer_ni);
+}
+
+void
+ieee80211_tdls_recv_teardown(struct ieee80211_node *ni, struct sk_buff *skb,
+	int rssi, struct ieee80211_tdls_params *tdls)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (ieee80211_tdls_validate_params(ni, tdls) != 0) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"TDLS %s: TDLS parameters verification fails\n", __func__);
+		return;
+	}
+
+	ni->tdls_last_seen = jiffies;
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS: received TEARDOWN from peer %pM at %u, rssi=%d\n", tdls->sa,
+		ni->tdls_last_seen, rssi);
+}
+
+int
+ieee80211_tdls_send_action_frame(struct net_device *ndev,
+		struct ieee80211_tdls_action_data *data)
+{
+	struct ieee80211vap *vap = netdev_priv(ndev);
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *peer_ni;
+	int ret = 0;
+
+	if (data == NULL) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Action data is NULL\n", __func__);
+		return -1;
+	}
+
+	if (vap->iv_opmode != IEEE80211_M_STA) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: vap is not in STA mode\n", __func__);
+		return -1;
+	}
+
+	if (ieee80211_tdls_validate_vap_state(vap) != 0) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: vap is not in correct state\n", __func__);
+		return -1;
+	}
+
+	if (IEEE80211_ADDR_EQ(vap->iv_bss->ni_macaddr, data->dest_mac)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Should not send to BSS\n", __func__);
+		return -1;
+	}
+
+	if (is_multicast_ether_addr(data->dest_mac)) {
+		if (data->action == IEEE80211_ACTION_TDLS_DISC_REQ) {
+			peer_ni = vap->iv_bss;
+			ieee80211_ref_node(peer_ni);
+		} else {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: Dest address of %s action must be unicast\n",
+				__func__, ieee80211_tdls_action_name_get(data->action));
+			return -1;
+		}
+	} else {
+		if ((data->action == IEEE80211_ACTION_TDLS_TEARDOWN) ||
+				(data->action == IEEE80211_ACTION_TDLS_PTI))
+			peer_ni = ieee80211_find_node(&ic->ic_sta, data->dest_mac);
+		else
+			peer_ni = ieee80211_tdls_find_or_create_peer(ni, data->dest_mac, NULL);
+		if (peer_ni == NULL) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: Peer node is not found\n", __func__);
+			return -1;
+		}
+	}
+
+	switch (data->action) {
+	case IEEE80211_ACTION_TDLS_SETUP_REQ:
+		ret = ieee80211_tdls_send_setup_req(peer_ni, data);
+		ieee80211_free_node(peer_ni);
+		break;
+	case IEEE80211_ACTION_TDLS_SETUP_RESP:
+		ret = ieee80211_tdls_send_setup_resp(peer_ni, data);
+		ieee80211_free_node(peer_ni);
+		break;
+	case IEEE80211_ACTION_TDLS_SETUP_CONFIRM:
+		ret = ieee80211_tdls_send_setup_confirm(peer_ni, data);
+		ieee80211_free_node(peer_ni);
+		break;
+	case IEEE80211_ACTION_TDLS_TEARDOWN:
+		ret = ieee80211_tdls_send_teardown(peer_ni, data);
+		ieee80211_free_node(peer_ni);
+		break;
+	case IEEE80211_ACTION_TDLS_DISC_REQ:
+		ret = ieee80211_tdls_send_disc_req(peer_ni, data);
+		ieee80211_free_node(peer_ni);
+		break;
+	case IEEE80211_ACTION_PUB_TDLS_DISC_RESP:
+		ret = ieee80211_tdls_send_disc_resp(peer_ni, data);
+		break;
+	case IEEE80211_ACTION_TDLS_PTI:
+		ret = ieee80211_tdls_send_pti_req(peer_ni, data);
+		ieee80211_free_node(peer_ni);
+		break;
+	default:
+		ieee80211_free_node(peer_ni);
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * Periodically send TDLS discovery requests
+ */
+void
+ieee80211_tdls_trigger_rate_detection(unsigned long arg)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *) arg;
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: discovery timeout\n", __func__);
+
+	if (ieee80211_tdls_validate_vap_state(vap) != 0) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: not sending disc req while not associated\n", __func__);
+	} else {
+		ieee80211_tdls_send_disc_req(vap->iv_bss, NULL);
+		schedule_delayed_work(&vap->tdls_rate_detect_work,
+				DEFAULT_TDLS_RATE_DETECTION_WAITING_T * HZ);
+		mod_timer(&vap->tdls_rate_detect_timer,
+				jiffies + vap->tdls_discovery_interval * HZ);
+	}
+}
+
+static void
+ieee80211_tdls_bottom_half_rate_detetion(struct work_struct *work)
+{
+	struct delayed_work *dwork = (struct delayed_work *)work;
+	struct ieee80211vap *vap =
+			container_of(dwork, struct ieee80211vap, tdls_rate_detect_work);
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni;
+	struct ieee80211_node *next;
+	uint8_t random;
+	int mu = STATS_SU;
+
+	get_random_bytes(&random, sizeof(random));
+
+	if (vap->tdls_path_sel_prohibited == 0) {
+		if (vap->iv_bss->ni_shared_stats->tx[mu].pkts_per_sec <
+					vap->tdls_path_sel_pps_thrshld)
+			ieee80211_tdls_add_rate_detection(vap->iv_bss);
+
+		TAILQ_FOREACH_SAFE(ni, &nt->nt_node, ni_list, next) {
+			/*
+			 * Don't send training packets to 3rd part TDLS peer before
+			 * TDLS peer is established since it could cause 3rd part TDLS
+			 * peer send deauth frame.
+			 */
+			if (!ni->ni_qtn_assoc_ie &&
+					!IEEE80211_NODE_IS_TDLS_ACTIVE(ni))
+				continue;
+
+			if (IEEE80211_NODE_IS_TDLS_ACTIVE(ni)) {
+				if ((ni->tdls_initiator == 1) &&
+					(ni->ni_shared_stats->tx[mu].pkts_per_sec <
+						vap->tdls_path_sel_pps_thrshld))
+					ieee80211_tdls_add_rate_detection(ni);
+			} else if (!IEEE80211_NODE_IS_NONE_TDLS(ni)) {
+				ieee80211_tdls_add_rate_detection(ni);
+			}
+		}
+
+		ieee80211_tdls_peer_ps_info_decre(vap);
+	}
+
+	schedule_delayed_work(&vap->tdls_link_switch_work,
+			(DEFAULT_TDLS_RATE_DETECTION_WAITING_T + (random % 10)) * HZ);
+}
+
+static void
+ieee80211_tdls_data_link_switch(struct work_struct *work)
+{
+	struct delayed_work *dwork = (struct delayed_work *)work;
+	struct ieee80211vap *vap =
+			container_of(dwork, struct ieee80211vap, tdls_link_switch_work);
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni;
+	enum ieee80211_tdls_operation operation;
+	struct tdls_peer_ps_info *peer_ps_info = NULL;
+	int link_switch = -1;
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: Check if need to switch data link\n", __func__);
+
+	IEEE80211_NODE_LOCK(nt);
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (IEEE80211_NODE_IS_TDLS_STARTING(ni) &&
+				(time_after(jiffies, ni->tdls_setup_start +
+					DEFAULT_TDLS_SETUP_EXPIRE_DURATION * HZ)))
+			ieee80211_tdls_disable_peer_link(ni);
+
+		if (IEEE80211_NODE_IS_NONE_TDLS(ni) ||
+				IEEE80211_NODE_IS_TDLS_STARTING(ni))
+			continue;
+
+		if (vap->tdls_path_sel_prohibited == 0) {
+			peer_ps_info = ieee80211_tdls_find_or_create_peer_ps_info(vap, ni);
+			if ((peer_ps_info) && (peer_ps_info->tdls_link_disabled_ints > 0)) {
+				IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+					"TDLS %s: Peer %pM status: %d, disabled_ints: %d\n",
+					__func__, ni->ni_macaddr, ni->tdls_status,
+					peer_ps_info->tdls_link_disabled_ints);
+				continue;
+			}
+
+			if (IEEE80211_NODE_IS_TDLS_ACTIVE(ni) &&
+					(ni->tdls_initiator == 0)) {
+				IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+					"TDLS %s: Peer %pM status: %d, initiator: %d\n", __func__,
+					ni->ni_macaddr, ni->tdls_status, ni->tdls_initiator);
+				continue;
+			}
+		}
+
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: Peer %pM status: %d, rssi: %d, disabled_ints: %d\n", __func__,
+			ni->ni_macaddr, ni->tdls_status, ieee80211_tdls_get_smoothed_rssi(vap, ni),
+			(peer_ps_info == NULL) ? 0 : peer_ps_info->tdls_link_disabled_ints);
+
+		link_switch = ieee80211_tdls_data_path_selection(vap, ni);
+		if (link_switch == 1) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"TDLS %s: Setting up TDLS link with peer %pM\n",
+				__func__, ni->ni_macaddr);
+
+			if (peer_ps_info != NULL) {
+				peer_ps_info->tdls_path_down_cnt = 0;
+				peer_ps_info->tdls_link_disabled_ints = 0;
+
+				IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"TDLS %s: Clear path selection info, path_down_cnt = %d,"
+					" link_disabled_ints = %d\n", __func__,
+					peer_ps_info->tdls_path_down_cnt,
+					peer_ps_info->tdls_link_disabled_ints);
+			}
+
+			if (!IEEE80211_NODE_IS_TDLS_ACTIVE(ni)) {
+				operation = IEEE80211_TDLS_SETUP;
+				ieee80211_tdls_send_event(ni, IEEE80211_EVENT_TDLS, &operation);
+			}
+		} else if (link_switch == 0) {
+			if (IEEE80211_NODE_IS_TDLS_ACTIVE(ni)) {
+				IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+					"TDLS %s: tearing down TDLS link with peer %pM\n",
+					__func__, ni->ni_macaddr);
+
+				if (peer_ps_info != NULL) {
+					peer_ps_info->tdls_path_down_cnt++;
+					peer_ps_info->tdls_link_disabled_ints = DEFAULT_TDLS_LINK_DISABLE_SCALE *
+									peer_ps_info->tdls_path_down_cnt;
+
+					IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+							"TDLS %s: Set path selection info, path_down_cnt = %d,"
+							" link_disabled_ints = %d\n", __func__,
+							peer_ps_info->tdls_path_down_cnt,
+							peer_ps_info->tdls_link_disabled_ints);
+				}
+
+				operation = IEEE80211_TDLS_TEARDOWN;
+				ieee80211_tdls_send_event(ni, IEEE80211_EVENT_TDLS, &operation);
+			}
+		}
+	}
+	IEEE80211_NODE_UNLOCK(nt);
+}
+
+/*
+ * Start or stop periodic TDLS discovery
+ *   Start broadcasting TDLS discovery frames every <value> seconds.
+ *   A value of 0 stops TDLS discovery.
+ *   Returns 0 if config applied, else 1.
+ */
+int
+ieee80211_tdls_cfg_disc_int(struct ieee80211vap *vap, int value)
+{
+	struct net_device *dev = vap->iv_dev;
+	unsigned int pre_disc_interval = 0;
+
+	if (vap->iv_opmode != IEEE80211_M_STA) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"%s: TDLS: discovery is only supported on STA nodes\n",
+					dev->name);
+			return -1;
+	}
+
+	/* TDLS recheck after assoc in case security mode changes */
+	if (!ieee80211_tdls_sec_mode_valid(vap)) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"%s: TDLS: not allowed when using TKIP\n",
+					dev->name);
+			return -1;
+	}
+
+	if (value <= 0)
+		value = 0;
+
+	pre_disc_interval = vap->tdls_discovery_interval;
+	vap->tdls_discovery_interval = value;
+
+	if ((vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) == 0) {
+		if ((pre_disc_interval == 0) && (vap->tdls_discovery_interval > 0))
+			mod_timer(&vap->tdls_rate_detect_timer, jiffies + HZ);
+
+		if ((vap->tdls_discovery_interval == 0) && timer_pending(&vap->tdls_rate_detect_timer)) {
+			del_timer(&vap->tdls_rate_detect_timer);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+			cancel_delayed_work_sync(&vap->tdls_rate_detect_work);
+			cancel_delayed_work_sync(&vap->tdls_link_switch_work);
+#else
+			cancel_rearming_delayed_work(&vap->tdls_rate_detect_work);
+			cancel_rearming_delayed_work(&vap->tdls_link_switch_work);
+#endif
+			ieee80211_tdls_free_peer_ps_info(vap);
+		}
+	}
+
+	return 0;
+}
+
+int
+ieee80211_tdls_enable_peer_link(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (ic->ic_newassoc != NULL)
+		ic->ic_newassoc(ni, 0);
+
+	if (!(ni->ni_ext_flags & IEEE80211_NODE_TDLS_AUTH)) {
+		ieee80211_sta_assocs_inc(vap, __func__);
+		ieee80211_nonqtn_sta_join(vap, ni, __func__);
+	}
+
+	if (ni->ni_qtn_assoc_ie && ni->ni_implicit_ba_valid)
+		ieee80211_node_implicit_ba_setup(ni);
+
+	ieee80211_tdls_set_key(vap, ni);
+
+	ieee80211_tdls_update_node_status(ni, IEEE80211_TDLS_NODE_STATUS_ACTIVE);
+	_ieee80211_node_authorize(ni);
+	ni->ni_ext_flags |= IEEE80211_NODE_TDLS_AUTH;
+	ieee80211_tdls_set_link_timeout(vap, ni);
+
+	ieee80211_tdls_add_bridge_entry_for_peer(ni);
+	ieee80211_tdls_add_clients_to_bridge(vap->iv_bss, ni, ni->ni_qtn_brmacs);
+
+	printk(KERN_INFO "%s: TDLS peer %s associated, tot=%u/%u\n",
+		vap->iv_dev->name, ether_sprintf(ni->ni_macaddr),
+		ic->ic_sta_assoc, ic->ic_nonqtn_sta);
+
+	return 0;
+}
+
+int
+ieee80211_tdls_node_leave(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+	ni->ni_chan_num = 0;
+	memset(ni->ni_supp_chans, 0, sizeof(ni->ni_supp_chans));
+	ieee80211_tdls_del_key(vap, ni);
+	ieee80211_tdls_update_node_status(ni, IEEE80211_TDLS_NODE_STATUS_INACTIVE);
+
+	ieee80211_node_leave(ni);
+
+	return 0;
+}
+
+int
+ieee80211_tdls_teardown_all_link(struct ieee80211vap *vap)
+{
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni;
+	enum ieee80211_tdls_operation operation;
+	if (vap->iv_opmode != IEEE80211_M_STA)
+		return -1;
+
+	IEEE80211_NODE_LOCK(nt);
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (!IEEE80211_NODE_IS_NONE_TDLS(ni)) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: tearing down TDLS link with peer %pM\n",__func__, ni->ni_macaddr);
+			operation = IEEE80211_TDLS_TEARDOWN;
+			ieee80211_tdls_send_event(ni, IEEE80211_EVENT_TDLS, &operation);
+		}
+	}
+	IEEE80211_NODE_UNLOCK(nt);
+
+	return 0;
+}
+
+int
+ieee80211_tdls_free_all_inactive_peers(struct ieee80211vap *vap)
+{
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni, *next;
+	if (vap->iv_opmode != IEEE80211_M_STA)
+		return -1;
+
+	IEEE80211_NODE_LOCK(nt);
+	TAILQ_FOREACH_SAFE(ni, &nt->nt_node, ni_list, next) {
+		if (!ieee80211_node_is_running(ni)) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+					"TDLS %s: free TDLS peer %pM\n", __func__, ni->ni_macaddr);
+			ieee80211_tdls_node_leave(vap, ni);
+		}
+	}
+	IEEE80211_NODE_UNLOCK(nt);
+
+	return 0;
+}
+
+int
+ieee80211_tdls_free_all_peers(struct ieee80211vap *vap)
+{
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni, *next;
+	if (vap->iv_opmode != IEEE80211_M_STA)
+		return -1;
+
+	IEEE80211_NODE_LOCK(nt);
+	TAILQ_FOREACH_SAFE(ni, &nt->nt_node, ni_list, next) {
+		if (!IEEE80211_NODE_IS_NONE_TDLS(ni)) {
+			ieee80211_tdls_disable_peer_link(ni);
+
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+					"TDLS %s: free TDLS peer %pM\n", __func__, ni->ni_macaddr);
+			ieee80211_tdls_node_leave(vap, ni);
+		}
+	}
+	IEEE80211_NODE_UNLOCK(nt);
+
+	return 0;
+}
+
+int
+ieee80211_tdls_init_disc_timer(struct ieee80211vap *vap)
+{
+	init_timer(&vap->tdls_rate_detect_timer);
+	vap->tdls_rate_detect_timer.function = ieee80211_tdls_trigger_rate_detection;
+	vap->tdls_rate_detect_timer.data = (unsigned long) vap;
+	INIT_DELAYED_WORK(&vap->tdls_rate_detect_work, ieee80211_tdls_bottom_half_rate_detetion);
+	INIT_DELAYED_WORK(&vap->tdls_link_switch_work, ieee80211_tdls_data_link_switch);
+
+	return 0;
+}
+
+int
+ieee80211_tdls_clear_disc_timer(struct ieee80211vap *vap)
+{
+	if (vap == NULL)
+		return -1;
+
+	if (vap->tdls_discovery_interval > 0) {
+		del_timer(&vap->tdls_rate_detect_timer);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		cancel_delayed_work_sync(&vap->tdls_rate_detect_work);
+		cancel_delayed_work_sync(&vap->tdls_link_switch_work);
+#else
+		cancel_rearming_delayed_work(&vap->tdls_rate_detect_work);
+		cancel_rearming_delayed_work(&vap->tdls_link_switch_work);
+#endif
+
+		ieee80211_tdls_free_peer_ps_info(vap);
+	}
+
+	return 0;
+}
+
+int
+ieee80211_tdls_start_disc_timer(struct ieee80211vap *vap)
+{
+	if (vap == NULL)
+		return -1;
+
+	if (vap->iv_opmode != IEEE80211_M_STA)
+		return -1;
+
+	/* TDLS recheck after assoc in case security mode changes */
+	if (!ieee80211_tdls_sec_mode_valid(vap))
+		return -1;
+
+	if (vap->tdls_discovery_interval > 0)
+		mod_timer(&vap->tdls_rate_detect_timer, jiffies + HZ);
+
+	return 0;
+}
+
+void
+ieee80211_tdls_node_expire(unsigned long arg)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *) arg;
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni;
+	int ni_expired;
+
+	IEEE80211_NODE_LOCK(nt);
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (IEEE80211_NODE_IS_NONE_TDLS(ni))
+		      continue;
+
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			 "TDLS: peer %pM, last_seen: %u, Now: %u\n",
+			 ni->ni_macaddr, ni->tdls_last_seen, jiffies);
+
+		if (IEEE80211_NODE_IS_TDLS_STARTING(ni) &&
+				(time_after(jiffies, ni->tdls_setup_start +
+					DEFAULT_TDLS_SETUP_EXPIRE_DURATION * HZ)))
+			ieee80211_tdls_disable_peer_link(ni);
+
+		if (IEEE80211_NODE_IS_TDLS_INACTIVE(ni) ||
+				IEEE80211_NODE_IS_TDLS_IDLE(ni)) {
+			ni_expired = time_after(jiffies,
+					ni->tdls_last_seen + vap->tdls_node_life_cycle * HZ);
+			if (ni_expired)
+				ieee80211_tdls_node_leave(vap, ni);
+		}
+	}
+	IEEE80211_NODE_UNLOCK(nt);
+
+	mod_timer(&vap->tdls_node_expire_timer,
+			jiffies + vap->tdls_node_life_cycle * HZ);
+}
+
+int
+ieee80211_tdls_start_node_expire_timer(struct ieee80211vap *vap)
+{
+	if (vap->iv_opmode != IEEE80211_M_STA)
+		return -1;
+
+	if (vap->tdls_node_life_cycle > 0) {
+		if (timer_pending(&vap->tdls_node_expire_timer))
+			del_timer(&vap->tdls_node_expire_timer);
+
+		mod_timer(&vap->tdls_node_expire_timer,
+			jiffies + vap->tdls_node_life_cycle * HZ);
+	}
+
+	return 0;
+}
+
+int
+ieee80211_tdls_init_node_expire_timer(struct ieee80211vap *vap)
+{
+	if (vap == NULL)
+		return -1;
+
+	init_timer(&vap->tdls_node_expire_timer);
+	vap->tdls_node_expire_timer.function = ieee80211_tdls_node_expire;
+	vap->tdls_node_expire_timer.data = (unsigned long) vap;
+
+	if ((vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) == 0)
+		ieee80211_tdls_start_node_expire_timer(vap);
+
+	return 0;
+}
+
+int
+ieee80211_tdls_clear_node_expire_timer(struct ieee80211vap *vap)
+{
+	if (vap)
+		del_timer(&vap->tdls_node_expire_timer);
+
+	return 0;
+}
+
+void
+ieee80211_tdls_all_peer_disabled(unsigned long arg)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *) arg;
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni;
+	int all_disabled = 1;
+
+	IEEE80211_NODE_LOCK(nt);
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (IEEE80211_NODE_IS_TDLS_ACTIVE(ni)) {
+			all_disabled = 0;
+			break;
+		}
+	}
+	IEEE80211_NODE_UNLOCK(nt);
+
+	if (all_disabled) {
+		del_timer(&vap->tdls_disassoc_timer);
+		ieee80211_new_state(vap, vap->tdls_pending_state,
+					vap->tdls_pending_arg);
+	} else {
+		mod_timer(&vap->tdls_disassoc_timer, jiffies + HZ / 2);
+	}
+}
+
+int
+ieee80211_tdls_init_disassoc_pending_timer(struct ieee80211vap *vap)
+{
+	if (vap == NULL)
+		return -1;
+
+	init_timer(&vap->tdls_disassoc_timer);
+	vap->tdls_disassoc_timer.function = ieee80211_tdls_all_peer_disabled;
+	vap->tdls_disassoc_timer.data = (unsigned long)vap;
+
+	return 0;
+}
+
+int
+ieee80211_tdls_pend_disassociation(struct ieee80211vap *vap,
+	enum ieee80211_state nstate, int arg)
+{
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni;
+	int need_pending = 0;
+
+	if ((vap->iv_opmode == IEEE80211_M_STA) && (vap->iv_state == IEEE80211_S_RUN) &&
+			((nstate == IEEE80211_S_INIT) || (nstate == IEEE80211_S_AUTH) ||
+				(nstate == IEEE80211_S_ASSOC))) {
+		IEEE80211_NODE_LOCK(nt);
+		TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+			if (IEEE80211_NODE_IS_TDLS_ACTIVE(ni)) {
+				need_pending = 1;
+				break;
+			}
+		}
+		IEEE80211_NODE_UNLOCK(nt);
+
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: pend disassoication with AP, nstate: %d\n", __func__, nstate);
+
+		if (need_pending) {
+			ieee80211_tdls_teardown_all_link(vap);
+
+			vap->tdls_pending_state = nstate;
+			vap->tdls_pending_arg = arg;
+			mod_timer(&vap->tdls_disassoc_timer, jiffies + HZ / 2);
+		}
+	}
+
+	return need_pending;
+}
+EXPORT_SYMBOL(ieee80211_tdls_pend_disassociation);
+
+int
+ieee80211_tdls_set_link_timeout(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+	uint16_t elapsed_count = 0;
+
+	if ((vap == NULL) || (ni == NULL))
+		return -1;
+
+	if (vap->iv_opmode != IEEE80211_M_STA)
+		return -1;
+
+	elapsed_count = ni->ni_inact_reload - ni->ni_inact;
+	if ((vap->tdls_timeout_time % IEEE80211_INACT_WAIT) != 0)
+		ni->ni_inact_reload = vap->tdls_timeout_time / IEEE80211_INACT_WAIT + 1;
+	else
+		ni->ni_inact_reload = vap->tdls_timeout_time / IEEE80211_INACT_WAIT;
+
+	if (ni->ni_inact_reload > elapsed_count)
+		ni->ni_inact = ni->ni_inact_reload - elapsed_count;
+	else
+		ni->ni_inact = IEEE80211_INACT_SEND_PKT_THRSH;
+
+	return 0;
+}
+
+void
+ieee80211_tdls_update_node_status(struct ieee80211_node *ni, enum ni_tdls_status stats)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (ni->tdls_status != stats) {
+		ni->tdls_status = stats;
+		vap->iv_ic->ic_set_tdls_param(ni, IOCTL_TDLS_STATUS, (int)ni->tdls_status);
+	}
+}
+
+int
+ieee80211_tdls_start_channel_switch(struct ieee80211vap *vap,
+		struct ieee80211_node *peer_ni)
+{
+	int ret = 0;
+
+	if ((!vap) || (vap->iv_opmode != IEEE80211_M_STA))
+		return 1;
+
+	if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PROHIB) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: Channel switch function has been prohibited\n", __func__);
+		return 1;
+	}
+
+	if (!ieee80211_tdls_channel_switch_allowed(vap)) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: Don't start channel switch due to "
+			"fail to enter power save state\n",__func__);
+		return 1;
+	}
+
+	ret = ieee80211_tdls_send_chan_switch_req(peer_ni, NULL);
+
+	return ret;
+}
+
+void
+ieee80211_tdls_vattach(struct ieee80211vap *vap)
+{
+	ieee80211_tdls_init_disc_timer(vap);
+	ieee80211_tdls_init_node_expire_timer(vap);
+	ieee80211_tdls_init_disassoc_pending_timer(vap);
+	ieee80211_tdls_update_uapsd_indicication_windows(vap);
+}
+
+void
+ieee80211_tdls_vdetach(struct ieee80211vap *vap)
+{
+	del_timer(&vap->tdls_rate_detect_timer);
+	del_timer(&vap->tdls_node_expire_timer);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	cancel_delayed_work_sync(&vap->tdls_rate_detect_work);
+	cancel_delayed_work_sync(&vap->tdls_link_switch_work);
+#else
+	cancel_rearming_delayed_work(&vap->tdls_rate_detect_work);
+	cancel_rearming_delayed_work(&vap->tdls_link_switch_work);
+#endif
+	del_timer(&vap->tdls_disassoc_timer);
+
+	ieee80211_tdls_free_peer_ps_info(vap);
+	ieee80211_tdls_free_all_peers(vap);
+}
+
+/* update tdls link timeout time for the peers who has established tdls link with station */
+int ieee80211_tdls_update_link_timeout(struct ieee80211vap *vap)
+{
+	struct ieee80211_node_table *nt = NULL;
+	struct ieee80211_node *ni = NULL;
+	uint16_t elapsed_count = 0;
+
+	if ((NULL == vap) || (vap->iv_opmode != IEEE80211_M_STA))
+		return -1;
+
+	nt = &vap->iv_ic->ic_sta;
+
+	IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+		"TDLS %s: update link timeout time [%u]\n", __func__, vap->tdls_timeout_time);
+
+	IEEE80211_NODE_LOCK(nt);
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (IEEE80211_NODE_IS_TDLS_ACTIVE(ni) ||
+				IEEE80211_NODE_IS_TDLS_IDLE(ni)) {
+			elapsed_count = ni->ni_inact_reload - ni->ni_inact;
+			if ((vap->tdls_timeout_time % IEEE80211_INACT_WAIT) != 0)
+				ni->ni_inact_reload = vap->tdls_timeout_time / IEEE80211_INACT_WAIT + 1;
+			else
+				ni->ni_inact_reload = vap->tdls_timeout_time / IEEE80211_INACT_WAIT;
+
+			if (ni->ni_inact_reload > elapsed_count)
+				ni->ni_inact = ni->ni_inact_reload - elapsed_count;
+			else
+				ni->ni_inact = IEEE80211_INACT_SEND_PKT_THRSH;
+		}
+	}
+	IEEE80211_NODE_UNLOCK(nt);
+
+	return 0;
+}
+
diff --git a/drivers/qtn/wlan/ieee80211_tpc.c b/drivers/qtn/wlan/ieee80211_tpc.c
new file mode 100644
index 0000000..cbe3174
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_tpc.c
@@ -0,0 +1,358 @@
+/*-
+ * Copyright (c) 2013 Quantenna
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieeee80211_tpc.c 5000 2013-01-25 10:22:59Z casper $
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "net80211/if_media.h"
+#include "net80211/ieee80211_var.h"
+#include "net80211/_ieee80211.h"
+#include "net80211/ieee80211_tpc.h"
+#include "net80211/ieee80211_linux.h"
+#include "net80211/ieee80211_proto.h"	/* IEEE80211_SEND_MGMT */
+
+void tpc_report_callback_success(void *ctx)
+{
+	struct ieee80211_node *ni = (struct ieee80211_node *)ctx;
+
+	printk("TPC:tx power = %d, link margin = %d from node (%s)\n",
+			ni->ni_tpc_info.tpc_report.node_txpow,
+			ni->ni_tpc_info.tpc_report.node_link_margin,
+			ether_sprintf(ni->ni_macaddr));
+}
+
+void tpc_report_callback_fail(void *ctx, int32_t reason)
+{
+	struct ieee80211_node *ni = (struct ieee80211_node *)ctx;
+
+	printk("TPC:fail to get tpc report from node (%s)\n",
+			ether_sprintf(ni->ni_macaddr));
+}
+
+/* TPC_REQ_PERIOD */
+static void node_send_tpc_request(void *arg, struct ieee80211_node *ni)
+{
+	struct ieee80211_action_data action_data;
+	struct ieee80211_action_tpc_request request;
+
+	request.expire = HZ / 10;
+	request.fn_success = tpc_report_callback_success;
+	request.fn_fail = tpc_report_callback_fail;
+	action_data.cat	= IEEE80211_ACTION_CAT_SPEC_MGMT;
+	action_data.action = IEEE80211_ACTION_S_TPC_REQUEST;
+	action_data.params = &request;
+
+	switch (ni->ni_vap->iv_opmode) {
+	case IEEE80211_M_HOSTAP:
+		if ((ni->ni_vap->iv_state == IEEE80211_S_RUN) &&
+				(ni->ni_associd != 0) &&
+				(ni->ni_capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT) &&
+				(ni->ni_flags & IEEE80211_NODE_TPC)) {
+			IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+		}
+		break;
+	case IEEE80211_M_STA:
+		if ((ni->ni_vap->iv_state == IEEE80211_S_RUN) &&
+				(ni == ni->ni_vap->iv_bss) &&
+				(ni->ni_capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT) &&
+				(ni->ni_flags & IEEE80211_NODE_TPC)) {
+			IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static void tpc_query_timer(unsigned long data)
+{
+	struct ieee80211_tpc_query_info *info = (struct ieee80211_tpc_query_info *)data;
+	struct ieee80211com *ic = (struct ieee80211com *)info->target;
+
+	ieee80211_iterate_nodes(&ic->ic_sta, node_send_tpc_request, NULL, 1);
+	mod_timer(&info->query_timer, jiffies + info->query_interval * HZ);
+}
+
+int ieee80211_tpc_query_init(struct ieee80211_tpc_query_info *info, struct ieee80211com *ic, int query_interval)
+{
+	if ((info == NULL) || (ic == NULL) || (query_interval < TPC_INTERVAL_MIN))
+		return -EINVAL;
+
+	memset(info, 0, sizeof(*info));
+	info->target = (void *)ic;
+	info->query_interval = query_interval;
+	init_timer(&info->query_timer);
+	setup_timer(&info->query_timer, tpc_query_timer, (unsigned long)info);
+
+	return 0;
+}
+
+void ieee80211_tpc_query_deinit(struct ieee80211_tpc_query_info *info)
+{
+	if (NULL != info) {
+		if (info->is_run) {
+			del_timer(&info->query_timer);
+			info->is_run = 0;
+		}
+		memset(info, 0, sizeof(*info));
+	}
+}
+
+int ieee80211_tpc_query_start(struct ieee80211_tpc_query_info *info)
+{
+	int ret = -1;
+	if (NULL != info) {
+		if (0 == info->is_run) {
+			mod_timer(&info->query_timer, jiffies + info->query_interval * HZ);
+			info->is_run = 1;
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+int ieee80211_tpc_query_stop(struct ieee80211_tpc_query_info *info)
+{
+	int ret = -1;
+	if (NULL != info) {
+		if (info->is_run) {
+			del_timer(&info->query_timer);
+			info->is_run = 0;
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+int ieee80211_tpc_query_config_interval(struct ieee80211_tpc_query_info *info, int interval)
+{
+	int ret = -1;
+
+	if ((NULL != info) && (interval >= TPC_INTERVAL_MIN)) {
+		info->query_interval = interval;
+		if (info->is_run)
+			mod_timer(&info->query_timer, jiffies + info->query_interval * HZ);
+		ret = 0;
+	}
+
+	return ret;
+}
+
+int ieee80211_tpc_query_get_interval(struct ieee80211_tpc_query_info *info)
+{
+	int interval = -1;
+
+	if (NULL != info)
+		interval = info->query_interval;
+
+	return interval;
+}
+
+int ieee80211_tpc_query_state(struct ieee80211_tpc_query_info *info)
+{
+	int state = -1;
+
+	if (NULL != info)
+		state = info->is_run;
+
+	return state;
+}
+
+/* TBD */
+int8_t ieee80211_update_tx_power(struct ieee80211com *ic, int8_t txpwr)
+{
+	return 0;
+}
+
+void get_max_in_minpwr(void *arg, struct ieee80211_node *ni)
+{
+	struct pwr_info_per_vap *p = (struct pwr_info_per_vap *)arg;
+
+	if ((ni->ni_vap == p->vap) && (ni->ni_associd != 0) && (ni->ni_capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT)) {
+		if (ni->ni_tpc_info.tpc_sta_cap.min_txpow > p->max_in_minpwr)
+			p->max_in_minpwr = ni->ni_tpc_info.tpc_sta_cap.min_txpow;
+	}
+}
+
+int ieee80211_parse_local_max_txpwr(struct ieee80211vap *vap, struct ieee80211_scanparams *scan)
+{
+	u_int8_t	*country = scan->country;
+	u_int8_t	*pwr_constraint = scan->pwr_constraint;
+	u_int8_t	start_chan;
+	u_int8_t	chan_number;
+	int8_t		chan_reg_max_txpwr;
+	int8_t		constraint;
+	u_int8_t	*ie;
+	u_int8_t	*ie_end;
+	u_int8_t	channel = vap->iv_ic->ic_curchan->ic_ieee;
+
+	scan->local_max_txpwr = -1;
+	if (pwr_constraint[1] != 1) {
+		IEEE80211_DPRINTF(vap,
+				IEEE80211_MSG_DOTH | IEEE80211_MSG_DEBUG,
+				"invalid pwr_constraint ie (len=%d)\n",
+				pwr_constraint[1]);
+		return -1;
+	}
+
+	constraint = pwr_constraint[2];
+
+	ie	= country + 2 + 3;
+	ie_end	= country + country[1] + 2;
+
+	while((ie_end - ie) >= 3) {
+		start_chan		= ie[0];
+		chan_number		= ie[1];
+		chan_reg_max_txpwr	= ie[2];
+		if ((channel >= start_chan) && (channel < (start_chan + chan_number))) {
+			scan->local_max_txpwr = chan_reg_max_txpwr - constraint;
+			IEEE80211_DPRINTF(vap,
+					IEEE80211_MSG_DOTH | IEEE80211_MSG_DEBUG,
+					"chan=%d regulatory powr=%d constraint=%d local max power=%d\n",
+					channel,
+					chan_reg_max_txpwr,
+					constraint,
+					scan->local_max_txpwr);
+			return 0;
+		}
+		ie += 3;
+	}
+	IEEE80211_DPRINTF(vap,
+			IEEE80211_MSG_DOTH | IEEE80211_MSG_DEBUG,
+			"No power constraint in scan channel %d,but beacon is there, something might be wrong\n",
+			channel);
+	return -1;
+}
+
+void ieee80211_doth_measurement_init(struct ieee80211com *ic)
+{
+	memset(&ic->ic_measure_info, 0, sizeof(ic->ic_measure_info));
+}
+
+void ieee80211_doth_measurement_deinit(struct ieee80211com *ic)
+{
+	ic->ic_measure_info.status = MEAS_STATUS_DISCRAD;
+}
+
+void ieee80211_action_finish_measurement(struct ieee80211com *ic, u_int8_t result)
+{
+	struct ieee80211_global_measure_info *ic_meas_info = &ic->ic_measure_info;
+
+	switch (ic_meas_info->type) {
+	case IEEE80211_CCA_MEASTYPE_BASIC:
+		ieee80211_send_meas_report_basic(ic_meas_info->ni,
+				result,
+				ic_meas_info->frame_token,
+				1,
+				ic_meas_info->param.basic.channel,
+				ic_meas_info->param.basic.tsf,
+				ic_meas_info->param.basic.duration_tu,
+				ic_meas_info->results.basic);
+		break;
+	case IEEE80211_CCA_MEASTYPE_CCA:
+		ieee80211_send_meas_report_cca(ic_meas_info->ni,
+				result,
+				ic_meas_info->frame_token,
+				1,
+				ic_meas_info->param.cca.channel,
+				ic_meas_info->param.cca.tsf,
+				ic_meas_info->param.cca.duration_tu,
+				ic_meas_info->results.cca);
+		break;
+	case IEEE80211_CCA_MEASTYPE_RPI:
+		ieee80211_send_meas_report_rpi(ic_meas_info->ni,
+				result,	ic_meas_info->frame_token,
+				1, ic_meas_info->param.rpi.channel,
+				ic_meas_info->param.rpi.tsf,
+				ic_meas_info->param.rpi.duration_tu,
+				ic_meas_info->results.rpi);
+		break;
+	case IEEE80211_RM_MEASTYPE_CH_LOAD:
+		ieee80211_send_rm_rep_chan_load(ic_meas_info->ni,
+				result, ic_meas_info->frame_token, 1,
+				ic_meas_info->param.chan_load.op_class,
+				ic_meas_info->param.chan_load.channel,
+				ic_meas_info->param.chan_load.duration_tu,
+				ic_meas_info->results.chan_load);
+		break;
+	case IEEE80211_RM_MEASTYPE_NOISE:
+		ieee80211_send_rm_rep_noise_his(ic_meas_info->ni, result, ic_meas_info->frame_token,
+				1, ic_meas_info->param.noise_his.op_class,
+				ic_meas_info->param.noise_his.channel,
+				ic_meas_info->param.chan_load.duration_tu,
+				255, ic_meas_info->results.noise_his.anpi,
+				ic_meas_info->results.noise_his.ipi);
+
+		break;
+	default:
+		break;
+	}
+}
+EXPORT_SYMBOL(ieee80211_action_finish_measurement);
+
+int ieee80211_action_trigger_measurement(struct ieee80211com *ic)
+{
+	return ic->ic_do_measurement(ic);
+}
+
+int ieee80211_action_measurement_report_fail(struct ieee80211_node *ni,
+					u_int8_t type,
+					u_int8_t report_mode,
+					u_int8_t token,
+					u_int8_t meas_token)
+{
+	struct ieee80211_meas_report_ctrl ctrl;
+	struct ieee80211_action_data action_data;
+
+	if (report_mode == 0)
+		return -1;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.meas_type = type;
+	ctrl.report_mode = report_mode;
+	ctrl.token = token;
+	ctrl.meas_token = meas_token;
+	ctrl.autonomous = 0;
+
+	action_data.cat = (type <= IEEE80211_CCA_MEASTYPE_RPI ? IEEE80211_ACTION_CAT_SPEC_MGMT : IEEE80211_ACTION_CAT_RM);
+	action_data.action = (type <= IEEE80211_CCA_MEASTYPE_RPI ? IEEE80211_ACTION_S_MEASUREMENT_REPORT : IEEE80211_ACTION_R_MEASUREMENT_REPORT);
+	action_data.params = &ctrl;
+
+	return IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+}
+
diff --git a/drivers/qtn/wlan/ieee80211_wireless.c b/drivers/qtn/wlan/ieee80211_wireless.c
new file mode 100644
index 0000000..2f971ca
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_wireless.c
@@ -0,0 +1,24250 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ *    redistribution must be conditioned upon including a substantially
+ *    similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: ieee80211_wireless.c 2614 2007-07-26 12:58:47Z mrenzmann $
+ */
+
+/*
+ * Wireless extensions support for 802.11 common code.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/utsname.h>
+#include <linux/if_arp.h>		/* XXX for ARPHRD_ETHER */
+#include <linux/delay.h>
+#include <linux/random.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
+#include <linux/watch64.h>
+#endif
+
+#include <qtn/muc_phy_stats.h>
+#include <qtn/shared_defs.h>
+#include <qtn/skb_recycle.h>
+#include <qtn/lhost_muc_comm.h>
+#include <qtn/qtn_global.h>
+
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+#include <linux/seq_file.h>
+#include <linux/jiffies.h>
+#include <linux/math64.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+#include <linux/pm_qos.h>
+#else
+#include <linux/pm_qos_params.h>
+#include <asm/uaccess.h>
+#endif
+#include "net80211/if_media.h"
+
+#include "net80211/ieee80211_var.h"
+#include "net80211/ieee80211_linux.h"
+#include "net80211/_ieee80211.h"
+#include "qdrv_sch_const.h"
+#include "net80211/ieee80211_tpc.h"
+#include "net80211/ieee80211_tdls.h"
+#include "net80211/ieee80211_mlme_statistics.h"
+#include "qtn_logging.h"
+
+#include <qdrv/qdrv_vap.h>
+#include <qtn/qtn_debug.h>
+#include <qtn/shared_params.h>
+#include <qtn/qtn_bb_mutex.h>
+#include <qtn/qtn_vlan.h>
+#include <linux/net/bridge/br_public.h>
+
+#include "qtn/wlan_ioctl.h"
+
+#include "soc.h"
+
+#include <qdrv/qdrv_mac.h>
+#include <qtn/txbf_mbox.h>
+
+#include <qtn/topaz_tqe_cpuif.h>
+
+#include <qtn/hardware_revision.h>
+
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+#include "net80211/ieee80211_bsa.h"
+#endif
+#include <asm/board/pm.h>
+
+#define	IS_UP(_dev) \
+	(((_dev)->flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP))
+#define	IS_UP_AUTO(_vap) \
+	(IS_UP((_vap)->iv_dev) && \
+	 (_vap)->iv_ic->ic_roaming == IEEE80211_ROAMING_AUTO)
+#define	RESCAN	1
+
+#define DBGMAC "%02X:%02X:%02X:%02X:%02X:%02X"
+#define ETHERFMT(a) \
+	        (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+
+#define STRUCT_MEMBER_SIZEOF(stype, member)		sizeof(((stype *)0)->member)
+#define RSSI_SWING_RANGE 30
+#define RSSI_MAX_SMTHING_FCTR 100
+#define RSSI_HIGH_SMTHING_FCTR 99
+#define RSSI_MED_SMTHING_FCTR 95
+#define QTN_AMPDU_DETECT_PERIOD 1
+#define QTN_RSSI_SAMPLE_TH	4
+
+#define SCS_CHAN_POWER_DIFF_SAFE		2
+#define SCS_CHAN_POWER_DIFF_MAX			16
+#define SCS_MAX_RAW_CHAN_METRIC			0x7FFFFFFF
+#define SCS_MAX_RATE_RATIO_CAP			0x7FFFFFFF
+#define SCS_PICK_CHAN_MIN_SCALED_TRAFFIC	100 /* ms */
+#define MIN_CAC_PERIOD				70 /* seconds */
+#define OCAC_MAX_SUPPORTED_VAPS			2
+
+#define DFS_S_DBG_QEVT(qevtdev, ...)	do {\
+						printk(__VA_ARGS__);\
+						ieee80211_eventf(qevtdev, __VA_ARGS__);\
+					} while (0)
+
+#define	IEEE80211_OBSS_AP_SCAN_INT 25
+int wlan_11ac_20M_mcs_nss_tbl[] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, -1,
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, -1,
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, -1
+};
+
+#define DM_DEFAULT_TX_POWER_FACTOR	2
+#define DM_DEFAULT_DFS_FACTOR		8
+
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+const uint8_t ieee80211_meas_sta_qtn_report_subtype_len[RM_QTN_CTRL_END + 1] = {
+	[RM_QTN_TX_STATS] = sizeof(struct ieee80211_ie_qtn_rm_txstats),
+	[RM_QTN_RX_STATS] = sizeof(struct ieee80211_ie_qtn_rm_rxstats),
+	[RM_QTN_MAX_QUEUED] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, max_queued),
+	[RM_QTN_LINK_QUALITY] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, link_quality),
+	[RM_QTN_RSSI_DBM] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, rssi_dbm),
+	[RM_QTN_BANDWIDTH] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, bandwidth),
+	[RM_QTN_SNR] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, snr),
+	[RM_QTN_TX_PHY_RATE] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, tx_phy_rate),
+	[RM_QTN_RX_PHY_RATE] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, rx_phy_rate),
+	[RM_QTN_CCA] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, cca),
+	[RM_QTN_BR_IP] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, br_ip),
+	[RM_QTN_RSSI] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, rssi),
+	[RM_QTN_HW_NOISE] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, hw_noise),
+	[RM_QTN_SOC_MACADDR] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, soc_macaddr),
+	[RM_QTN_SOC_IPADDR] = STRUCT_MEMBER_SIZEOF(struct ieee80211_ie_qtn_rm_sta_all, soc_ipaddr),
+	[RM_QTN_UNKNOWN] = sizeof(u_int32_t),
+	[RM_QTN_RESET_CNTS] = sizeof(u_int32_t),
+	[RM_QTN_RESET_QUEUED] = sizeof(u_int32_t),
+};
+#endif
+
+struct assoc_info_report {
+	uint64_t	ai_rx_bytes;
+	uint64_t	ai_tx_bytes;
+	uint32_t	ai_rx_packets;
+	uint32_t	ai_tx_packets;
+	uint32_t	ai_rx_errors;
+	uint32_t	ai_tx_errors;
+	uint32_t	ai_rx_dropped;
+	uint32_t	ai_tx_dropped;
+	uint32_t	ai_tx_wifi_sent[WMM_AC_NUM];
+	uint32_t	ai_tx_wifi_drop[WME_AC_NUM];
+	uint32_t	ai_tx_ucast;
+	uint32_t	ai_rx_ucast;
+	uint32_t	ai_tx_mcast;
+	uint32_t	ai_rx_mcast;
+	uint32_t	ai_tx_bcast;
+	uint32_t	ai_rx_bcast;
+	uint32_t	ai_tx_failed;
+	uint32_t	ai_time_associated;	/*Unit: seconds*/
+	uint16_t	ai_assoc_id;
+	uint16_t	ai_link_quality;
+	uint16_t	ai_tx_phy_rate;
+	uint16_t	ai_rx_phy_rate;
+	uint32_t	ai_achievable_tx_phy_rate;
+	uint32_t	ai_achievable_rx_phy_rate;
+	u_int32_t	ai_rx_fragment_pkts;
+	u_int32_t	ai_rx_vlan_pkts;
+	uint8_t		ai_mac_addr[IEEE80211_ADDR_LEN];
+	int32_t		ai_rssi;
+	int32_t		ai_smthd_rssi;
+	int32_t		ai_snr;
+	int32_t		ai_max_queued;
+	uint8_t		ai_bw;
+	uint8_t		ai_tx_mcs;
+	uint8_t		ai_rx_mcs;
+	uint8_t		ai_auth;
+	char		ai_ifname[IFNAMSIZ];
+	uint32_t	ai_ip_addr;
+	int32_t		ai_hw_noise;
+	uint32_t	ai_is_qtn_node;
+};
+
+struct assoc_info_table {
+	uint16_t	unit_size;	/* Size of structure assoc_info_table */
+	uint16_t	cnt;		/* Record the number of valid entries */
+	struct assoc_info_report array[QTN_ASSOC_LIMIT];
+};
+
+struct sample_assoc_data {
+	uint8_t mac_addr[IEEE80211_ADDR_LEN];
+	uint8_t assoc_id;
+	uint8_t bw;
+	uint8_t tx_stream;
+	uint8_t rx_stream;
+	uint32_t time_associated;	/*Unit: seconds*/
+	uint32_t achievable_tx_phy_rate;
+	uint32_t achievable_rx_phy_rate;
+	uint32_t rx_packets;
+	uint32_t tx_packets;
+	uint32_t rx_errors;
+	uint32_t tx_errors;
+	uint32_t rx_dropped;
+	uint32_t tx_dropped;
+	uint32_t tx_wifi_drop[WME_AC_NUM];
+	uint32_t rx_ucast;
+	uint32_t tx_ucast;
+	uint32_t rx_mcast;
+	uint32_t tx_mcast;
+	uint32_t rx_bcast;
+	uint32_t tx_bcast;
+	uint16_t link_quality;
+	uint32_t ip_addr;
+	uint64_t rx_bytes;
+	uint64_t tx_bytes;
+	uint32_t last_rssi_dbm[NUM_ANT + 1];
+	uint32_t last_rcpi_dbm[NUM_ANT + 1];
+	uint32_t last_evm_dbm[NUM_ANT + 1];
+	uint32_t last_hw_noise[NUM_ANT + 1];
+	uint8_t protocol;
+	uint8_t vendor;
+}__packed;
+
+struct sample_assoc_user_data {
+	int num_entry;
+	int offset;
+	struct sample_assoc_data *data;
+};
+
+struct node_client_data {
+	struct list_head node_list;
+	struct sample_assoc_data data;
+};
+
+struct scs_chan_intf_params {
+	struct ieee80211_channel *chan;
+	uint32_t chan_bw;
+	uint32_t cca_intf;
+	uint32_t pmbl_err;
+	uint32_t cca_dur;
+	uint32_t cca_pri;
+	uint32_t cca_sec;
+	uint32_t cca_sec40;
+};
+
+
+#ifdef WLAN_MALLOC_FREE_TOT_DEBUG
+int g_wlan_tot_alloc = 0;
+int g_wlan_tot_alloc_cnt = 0;
+int g_wlan_tot_free = 0;
+int g_wlan_tot_free_cnt = 0;
+int g_wlan_balance = 0;
+int g_wlan_tot_node_alloc = 0;
+int g_wlan_tot_node_alloc_tmp = 0;
+int g_wlan_tot_node_free = 0;
+int g_wlan_tot_node_free_tmp = 0;
+
+EXPORT_SYMBOL(g_wlan_tot_alloc);
+EXPORT_SYMBOL(g_wlan_tot_alloc_cnt);
+EXPORT_SYMBOL(g_wlan_tot_free);
+EXPORT_SYMBOL(g_wlan_tot_free_cnt);
+EXPORT_SYMBOL(g_wlan_balance);
+EXPORT_SYMBOL(g_wlan_tot_node_alloc);
+EXPORT_SYMBOL(g_wlan_tot_node_alloc_tmp);
+EXPORT_SYMBOL(g_wlan_tot_node_free);
+EXPORT_SYMBOL(g_wlan_tot_node_free_tmp);
+#endif
+
+extern uint16_t g_wowlan_host_state;
+extern uint16_t g_wowlan_match_type;
+extern uint16_t g_wowlan_l2_ether_type;
+extern uint16_t g_wowlan_l3_udp_port;
+
+extern int fwt_db_get_macs_behind_node(const uint8_t index, uint32_t *num_entries, uint32_t max_req,
+					uint32_t *flags, uint8_t *buf);
+
+int ieee80211_send_tuning_data(struct ieee80211_node *);
+void topaz_congest_set_unicast_queue_count(uint32_t qnum);
+static void get_node_max_rssi (void *arg, struct ieee80211_node *ni);
+static void ieee80211_pco_timer_func ( unsigned long arg );
+static int ieee80211_ba_setup_detect_set(struct ieee80211vap *vap, int enable);
+static int ieee80211_wds_vap_exists(struct ieee80211com *ic);
+static struct ieee80211_node *ieee80211_get_vap_node(struct ieee80211vap *vap);
+int ieee80211_should_disable_scs(struct ieee80211com *ic);
+
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+static int ieee80211_bsa_macfilter_add(struct ieee80211vap *vap, uint8_t *mac);
+static int ieee80211_bsa_macfilter_remove(struct ieee80211vap *vap, uint8_t *mac);
+#endif
+extern u_int8_t g_channel_fixed;
+/*
+ * The RSSI values reported in the TX/RX descriptors in the driver are the SNR
+ * expressed in dBm. Thus 'rssi' is signal level above the noise floor in dBm.
+ *
+ * Noise is measured in dBm and is negative unless there is an unimaginable
+ * level of RF noise.
+ *
+ * The signal level is noise + rssi.
+ *
+ * Note that the iw_quality values are 1 byte, and can be signed, unsigned or
+ * negative depending on context.
+ *
+ */
+void
+set_quality(struct iw_quality *iq, u_int rssi, int noise)
+{
+	iq->qual = rssi;
+	iq->noise = noise;
+	iq->level = ((((int)rssi + noise) <= 0) ? ((int)rssi + noise) : 0);
+	iq->updated = IW_QUAL_ALL_UPDATED;
+	iq->updated |= IW_QUAL_DBM;
+}
+static void
+pre_announced_chanswitch(struct net_device *dev, u_int32_t channel, u_int32_t tbtt);
+
+static void
+preempt_scan(struct net_device *dev, int max_grace, int max_wait)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	int total_delay = 0;
+	int canceled = 0, ready = 0;
+	while (!ready && total_delay < max_grace + max_wait) {
+	  if ((ic->ic_flags & IEEE80211_F_SCAN) == 0
+#ifdef QTN_BG_SCAN
+		&& (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) == 0
+#endif /* QTN_BG_SCAN */
+	  ) {
+	    ready = 1;
+	  } else {
+	    if (!canceled && total_delay > max_grace) {
+	      /*
+		 Cancel any existing active scan, so that any new parameters
+		 in this scan ioctl (or the defaults) can be honored, then
+		 wait around a while to see if the scan cancels properly.
+	      */
+	      IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				"%s: cancel pending scan request\n", __func__);
+	      (void) ieee80211_cancel_scan(vap);
+	      canceled = 1;
+	    }
+	    mdelay (1);
+	    total_delay += 1;
+	  }
+	}
+	if (!ready) {
+	  IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			    "%s: Timeout canceling current scan.\n",
+			    __func__);
+	}
+}
+
+static struct iw_statistics *
+ieee80211_iw_getstats(struct net_device *dev)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+
+#ifdef USE_LINUX_FRAMEWORK
+	struct iw_statistics *is = &vap->iv_iwstats;
+	set_quality(&is->qual, ieee80211_getrssi(vap->iv_ic),
+			ic->ic_channoise);
+	is->status = vap->iv_state;
+	is->discard.nwid = vap->iv_stats.is_rx_wrongbss +
+		vap->iv_stats.is_rx_ssidmismatch;
+	is->discard.code = vap->iv_stats.is_rx_wepfail +
+		vap->iv_stats.is_rx_decryptcrc;
+	is->discard.fragment = 0;
+	is->discard.retries = 0;
+	is->discard.misc = 0;
+
+	is->miss.beacon = 0;
+	return is;
+#else
+
+	struct iw_statistics *is = &ic->ic_iwstats;
+	ic->ic_get_wlanstats(ic, is);
+
+	return is;
+#endif
+
+}
+
+static int
+ieee80211_ioctl_giwname(struct net_device *dev, struct iw_request_info *info,
+	char *name, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211_channel *c = vap->iv_ic->ic_curchan;
+
+	if (vap->iv_ic->ic_des_mode == IEEE80211_MODE_AUTO &&
+		vap->iv_ic->ic_rf_chipid == CHIPID_DUAL)
+		/* Display all the supported modes for RFIC5 */
+		strncpy(name, "IEEE 802.11gnac", IFNAMSIZ);
+	else if ((IEEE80211_IS_CHAN_11AC(c) ) &&
+		(vap->iv_ic->ic_phymode >= IEEE80211_MODE_11AC_VHT20PM))
+                strncpy(name, "IEEE 802.11ac", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_108G(c))
+		strncpy(name, "IEEE 802.11Tg", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_108A(c))
+		strncpy(name, "IEEE 802.11Ta", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_TURBO(c))
+		strncpy(name, "IEEE 802.11T", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_11NG(c) &&
+		(vap->iv_ic->ic_phymode == IEEE80211_MODE_11NG_HT40PM))
+		strncpy(name, "IEEE 802.11ng40", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_11NG(c) &&
+		(vap->iv_ic->ic_phymode == IEEE80211_MODE_11NG))
+		strncpy(name, "IEEE 802.11ng", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_11NA(c) &&
+		(vap->iv_ic->ic_phymode == IEEE80211_MODE_11NA_HT40PM))
+		strncpy(name, "IEEE 802.11na40", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_11NA(c) &&
+		(vap->iv_ic->ic_phymode == IEEE80211_MODE_11NA))
+		strncpy(name, "IEEE 802.11na", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_ANYG(c) &&
+		(vap->iv_ic->ic_phymode == IEEE80211_MODE_11G))
+		strncpy(name, "IEEE 802.11g", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_A(c))
+		strncpy(name, "IEEE 802.11a", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_B(c))
+		strncpy(name, "IEEE 802.11b", IFNAMSIZ);
+	else
+		strncpy(name, "IEEE 802.11", IFNAMSIZ);
+	/* XXX FHSS */
+	return 0;
+}
+
+/*
+ * Get a key index from a request.  If nothing is
+ * specified in the request we use the current xmit
+ * key index.  Otherwise we just convert the index
+ * to be base zero.
+ */
+static int
+getiwkeyix(struct ieee80211vap *vap, const struct iw_point* erq, int *kix)
+{
+	int kid;
+
+	kid = erq->flags & IW_ENCODE_INDEX;
+	if (kid < 1 || kid > IEEE80211_WEP_NKID) {
+		kid = vap->iv_def_txkey;
+		if (kid == IEEE80211_KEYIX_NONE)
+			kid = 0;
+	} else {
+		--kid;
+	}
+	if (0 <= kid && kid < IEEE80211_WEP_NKID) {
+		*kix = kid;
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+static int
+ieee80211_ioctl_siwencode(struct net_device *dev,
+	struct iw_request_info *info, struct iw_point *erq, char *keybuf)
+{
+#ifndef IEEE80211_UNUSED_CRYPTO_COMMANDS
+	return -EOPNOTSUPP;
+#else
+	struct ieee80211vap *vap = netdev_priv(dev);
+	int kid = 0;
+	int error = -EINVAL;
+	int wepchange = 0;
+
+	if ((erq->flags & IW_ENCODE_DISABLED) == 0) {
+		/*
+		 * Enable crypto, set key contents, and
+		 * set the default transmit key.
+		 */
+		error = getiwkeyix(vap, erq, &kid);
+		if (error < 0)
+			return error;
+		if (erq->length > IEEE80211_KEYBUF_SIZE)
+			return -EINVAL;
+		/* XXX no way to install 0-length key */
+		if (erq->length > 0) {
+			struct ieee80211_key *k = &vap->iv_nw_keys[kid];
+
+			/*
+			 * Set key contents.  This interface only supports WEP.
+			 * Indicate intended key index.
+			 */
+			k->wk_keyix = kid;
+			k->wk_keylen = erq->length;
+			k->wk_ciphertype = IEEE80211_CIPHER_WEP;
+			memcpy(k->wk_key, keybuf, erq->length);
+			memset(k->wk_key + erq->length, 0,
+					IEEE80211_KEYBUF_SIZE - erq->length);
+			error = vap->iv_key_set(vap, k, vap->iv_myaddr);
+
+		} else
+			error = -EINVAL;
+	} else {
+		/*
+		 * When the length is zero the request only changes
+		 * the default transmit key.  Verify the new key has
+		 * a non-zero length.
+		 */
+		if (vap->iv_nw_keys[kid].wk_keylen == 0)
+			error = -EINVAL;
+	}
+	if (error == 0) {
+		/*
+		 * The default transmit key is only changed when:
+		 * 1. Privacy is enabled and no key matter is
+		 *    specified.
+		 * 2. Privacy is currently disabled.
+		 * This is deduced from the iwconfig man page.
+		 */
+		if (erq->length == 0 ||
+				(vap->iv_flags & IEEE80211_F_PRIVACY) == 0)
+			vap->iv_def_txkey = kid;
+		wepchange = (vap->iv_flags & IEEE80211_F_PRIVACY) == 0;
+		vap->iv_flags |= IEEE80211_F_PRIVACY;
+	} else {
+		if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0)
+			return 0;
+		vap->iv_flags &= ~IEEE80211_F_PRIVACY;
+		wepchange = 1;
+		error = 0;
+	}
+	if (error == 0) {
+		/* Set policy for unencrypted frames */
+		if ((erq->flags & IW_ENCODE_OPEN) &&
+				(!(erq->flags & IW_ENCODE_RESTRICTED))) {
+			vap->iv_flags &= ~IEEE80211_F_DROPUNENC;
+		} else if (!(erq->flags & IW_ENCODE_OPEN) &&
+				(erq->flags & IW_ENCODE_RESTRICTED)) {
+			vap->iv_flags |= IEEE80211_F_DROPUNENC;
+		} else {
+			/* Default policy */
+			if (vap->iv_flags & IEEE80211_F_PRIVACY)
+				vap->iv_flags |= IEEE80211_F_DROPUNENC;
+			else
+				vap->iv_flags &= ~IEEE80211_F_DROPUNENC;
+		}
+	}
+	if (error == 0 && IS_UP(vap->iv_dev)) {
+		/*
+		 * Device is up and running; we must kick it to
+		 * effect the change.  If we're enabling/disabling
+		 * crypto use then we must re-initialize the device
+		 * so the 802.11 state machine is reset.  Otherwise
+		 * the key state should have been updated above.
+		 */
+		if (wepchange && IS_UP_AUTO(vap))
+			ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+	}
+	return error;
+#endif /* IEEE80211_UNUSED_CRYPTO_COMMANDS */
+}
+
+static int
+ieee80211_ioctl_giwencode(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *erq, char *key)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211_key *k;
+	int error, kid;
+
+	if (vap->iv_flags & IEEE80211_F_PRIVACY) {
+		error = getiwkeyix(vap, erq, &kid);
+		if (error < 0)
+			return error;
+		k = &vap->iv_nw_keys[kid];
+		/* XXX no way to return cipher/key type */
+
+		erq->flags = kid + 1;			/* NB: base 1 */
+		if (erq->length > k->wk_keylen)
+			erq->length = k->wk_keylen;
+		memcpy(key, k->wk_key, erq->length);
+		erq->flags |= IW_ENCODE_ENABLED;
+	} else {
+		erq->length = 0;
+		erq->flags = IW_ENCODE_DISABLED;
+	}
+	if (vap->iv_flags & IEEE80211_F_DROPUNENC)
+		erq->flags |= IW_ENCODE_RESTRICTED;
+	else
+		erq->flags |= IW_ENCODE_OPEN;
+	return 0;
+}
+
+#ifndef ifr_media
+#define	ifr_media	ifr_ifru.ifru_ivalue
+#endif
+
+static int
+ieee80211_ioctl_siwrate(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	static const u_int mopts[] = {
+		IFM_AUTO,
+		IFM_IEEE80211_11A,
+		IFM_IEEE80211_11B,
+		IFM_IEEE80211_11G,
+		IFM_IEEE80211_FH,
+		IFM_IEEE80211_11A | IFM_IEEE80211_TURBO,
+		IFM_IEEE80211_11G | IFM_IEEE80211_TURBO,
+		IFM_IEEE80211_11NA,
+		IFM_IEEE80211_11NG,
+		IFM_IEEE80211_11NG_HT40PM,
+		IFM_IEEE80211_11NA_HT40PM,
+		IFM_IEEE80211_11AC_VHT20PM,
+		IFM_IEEE80211_11AC_VHT40PM,
+		IFM_IEEE80211_11AC_VHT80PM,
+		IFM_IEEE80211_11AC_VHT160PM,
+	};
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ifreq ifr;
+	int rate, retv;
+	u_int16_t mode = ic->ic_des_mode;
+	u_int16_t chan_mode = 0;
+	uint8_t sgi = 0;
+
+	if (mode == IEEE80211_MODE_AUTO)
+		return -EINVAL;
+
+	if (vap->iv_media.ifm_cur == NULL)
+		return -EINVAL;
+
+	memset(&ifr, 0, sizeof(ifr));
+	ifr.ifr_media = vap->iv_media.ifm_cur->ifm_media &~ (IFM_MMASK|IFM_TMASK);
+	ifr.ifr_media |= mopts[ic->ic_des_mode];
+
+
+	if (rrq->fixed) {
+		/* XXX fudge checking rates */
+		if (mode < IEEE80211_MODE_11NA) {
+			rate = ieee80211_rate2media(ic, 2 * rrq->value / 1000000,
+				ic->ic_des_mode);
+		} else {
+			if (ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) {
+				chan_mode = 1;
+				sgi = ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI40 ? 1 : 0;
+			} else {
+				sgi = ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI20 ? 1 : 0;
+			}
+
+			rate = ieee80211_rate2mcs(2 * rrq->value / 1000000, chan_mode, sgi);
+			/* No mcs match found. It can be a legacy rate */
+			if (rate < 0) {
+				rate = ieee80211_mcs2media(ic,
+					(2 * rrq->value / 1000000),
+					ic->ic_des_mode);
+			} else {
+				rate = ieee80211_mcs2media(ic, rate, ic->ic_des_mode);
+			}
+		}
+		if (rate == IFM_AUTO) {		/* NB: unknown rate */
+			return -EINVAL;
+		}
+	} else {
+		rate = IFM_AUTO;
+		vap->iv_mcs_config = IEEE80211_MCS_AUTO_RATE_ENABLE;
+	}
+	ifr.ifr_media |= IFM_SUBTYPE(rate);
+
+	/* refresh media capabilities based on channel */
+	ifmedia_removeall(&vap->iv_media);
+	(void) ieee80211_media_setup(ic, &vap->iv_media,
+		vap->iv_caps, vap->iv_media.ifm_change, vap->iv_media.ifm_status);
+
+	retv = ifmedia_ioctl(vap->iv_dev, &ifr, &vap->iv_media, SIOCSIFMEDIA);
+	if (retv == -ENETRESET)
+	{
+#if 0 //No need to restart network after rate change
+		retv = IS_UP_AUTO(vap) ? ieee80211_open(vap->iv_dev) : 0;
+#endif
+		return 0;
+	}
+	return retv;
+}
+
+static int
+ieee80211_ioctl_giwrate(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ifmediareq imr;
+	int rate, mcs;
+	u_int16_t mode, chan_mode = 0;
+	uint8_t sgi = 0;
+	mode = ic->ic_des_mode;
+
+	memset(&imr, 0, sizeof(imr));
+	vap->iv_media.ifm_status((void *) vap, &imr);
+
+	rrq->fixed = IFM_SUBTYPE(vap->iv_media.ifm_media) != IFM_AUTO;
+	/* media status will have the current xmit rate if available */
+
+	if(mode < IEEE80211_MODE_11NA)
+	{
+		rate = ieee80211_media2rate(imr.ifm_active);
+
+		if (rate == -1)		/* IFM_AUTO */
+			rate = 0;
+		rrq->value = 1000000 * (rate / 2);
+	}
+	else
+	{
+		if (ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) {
+			chan_mode = 1;
+			sgi = ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI40 ? 1 : 0;
+		} else {
+			sgi = ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI20 ? 1 : 0;
+		}
+		rate = ieee80211_media2mcs(imr.ifm_active);
+		if(rate > 0) //Fixed rate
+		{
+			if(rate & 0x80 ) /* if 11n rate is an mcs index */
+			{
+				mcs = rate & 0xf;
+				rate = ieee80211_mcs2rate(mcs, chan_mode, sgi, 0);
+			}
+		}
+		else		/* IFM_AUTO */
+		{
+			rate = 0;
+		}
+		rrq->value = 1000000 * (rate / 2);
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwsens(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_param *sens, char *extra)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+ieee80211_ioctl_giwsens(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_param *sens, char *extra)
+{
+	sens->value = 1;
+	sens->fixed = 1;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwrts(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rts, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	u32 val;
+
+	if (rts->disabled)
+		val = IEEE80211_RTS_THRESH_OFF;
+	else if (IEEE80211_RTS_MIN <= rts->value &&
+	    rts->value <= IEEE80211_RTS_MAX)
+		val = rts->value;
+	else
+		return -EINVAL;
+	if (val != vap->iv_rtsthreshold) {
+		vap->iv_rtsthreshold = val;
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_RTSTHRESHOLD, val, NULL, 0);
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwrts(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rts, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	rts->value = vap->iv_rtsthreshold;
+	rts->disabled = (rts->value == IEEE80211_RTS_THRESH_OFF);
+	rts->fixed = 1;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwfrag(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_param *rts, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	u16 val;
+
+	if (rts->disabled)
+		val = 2346;
+	else if (rts->value < 256 || rts->value > 2346)
+		return -EINVAL;
+	else
+		val = (rts->value & ~0x1);
+
+	if (val != vap->iv_fragthreshold) {
+		vap->iv_fragthreshold = val;
+		return ic->ic_reset(ic);
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwfrag(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_param *rts, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	rts->value = vap->iv_fragthreshold;
+	rts->disabled = (rts->value == 2346);
+	rts->fixed = 1;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwap(struct net_device *dev, struct iw_request_info *info,
+	struct sockaddr *ap_addr, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	/* NB: should not be set when in AP mode */
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+		return -EINVAL;
+
+	if (vap->iv_opmode == IEEE80211_M_WDS)
+		IEEE80211_ADDR_COPY(vap->wds_mac, &ap_addr->sa_data);
+
+	/*
+	 * zero address corresponds to 'iwconfig ath0 ap off', which means
+	 * enable automatic choice of AP without actually forcing a
+	 * reassociation.
+	 *
+	 * broadcast address corresponds to 'iwconfig ath0 ap any', which
+	 * means scan for the current best AP.
+	 *
+	 * anything else specifies a particular AP.
+	 */
+	vap->iv_flags &= ~IEEE80211_F_DESBSSID;
+	if (!IEEE80211_ADDR_NULL(&ap_addr->sa_data)) {
+		if (!IEEE80211_ADDR_EQ(vap->iv_des_bssid, (u_int8_t*) "\xff\xff\xff\xff\xff\xff"))
+			vap->iv_flags |= IEEE80211_F_DESBSSID;
+
+		IEEE80211_ADDR_COPY(vap->iv_des_bssid, &ap_addr->sa_data);
+		if (IS_UP_AUTO(vap))
+			ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwap(struct net_device *dev, struct iw_request_info *info,
+	struct sockaddr *ap_addr, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	if (vap->iv_flags & IEEE80211_F_DESBSSID) {
+		IEEE80211_ADDR_COPY(&ap_addr->sa_data, vap->iv_des_bssid);
+	} else if (vap->iv_opmode == IEEE80211_M_WDS) {
+		IEEE80211_ADDR_COPY(&ap_addr->sa_data, vap->wds_mac);
+	} else if ((vap->iv_opmode == IEEE80211_M_HOSTAP && vap->iv_state == IEEE80211_S_SCAN) ||
+		((vap->iv_state == IEEE80211_S_RUN) && vap->iv_bss)) {
+			IEEE80211_ADDR_COPY(&ap_addr->sa_data, vap->iv_bss->ni_bssid);
+	} else {
+		IEEE80211_ADDR_SET_NULL(&ap_addr->sa_data);
+	}
+
+	ap_addr->sa_family = ARPHRD_ETHER;
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwnickn(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *nickname)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	if (data->length > IEEE80211_NWID_LEN)
+		return -EINVAL;
+
+	memset(vap->iv_nickname, 0, IEEE80211_NWID_LEN);
+	memcpy(vap->iv_nickname, nickname, data->length);
+	vap->iv_nicknamelen = data->length;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwnickn(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *nickname)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	if (data->length > vap->iv_nicknamelen + 1)
+		data->length = vap->iv_nicknamelen + 1;
+	if (data->length > 0) {
+		memcpy(nickname, vap->iv_nickname, data->length - 1); /* XXX: strcpy? */
+		nickname[data->length-1] = '\0';
+	}
+	return 0;
+}
+
+static int
+find11gchannel(struct ieee80211com *ic, int i, int freq)
+{
+	for (; i < ic->ic_nchans; i++) {
+		const struct ieee80211_channel *c = &ic->ic_channels[i];
+		if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c))
+			return 1;
+	}
+	return 0;
+}
+
+struct ieee80211_channel *
+findchannel(struct ieee80211com *ic, int ieee, int mode)
+{
+	u_int modeflags;
+	int i;
+
+	modeflags = ieee80211_get_chanflags(mode);
+	for (i = 0; i < ic->ic_nchans; i++) {
+		struct ieee80211_channel *c = &ic->ic_channels[i];
+
+		if (c->ic_ieee != ieee)
+			continue;
+		if (mode == IEEE80211_MODE_AUTO) {
+			/*
+			 * XXX special-case 11b/g channels so we
+			 *     always select the g channel if both
+			 *     are present.
+			 */
+			if (!IEEE80211_IS_CHAN_B(c) ||
+			    !find11gchannel(ic, i + 1, c->ic_freq))
+				return c;
+		} else {
+			if ((c->ic_flags & modeflags) == modeflags)
+				return c;
+		}
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(findchannel);
+
+struct ieee80211_channel *
+findchannel_any(struct ieee80211com *ic, int ieee, int prefer_mode)
+{
+	struct ieee80211_channel *c;
+
+	c = findchannel(ic, ieee, prefer_mode);
+	if (c == NULL) {
+		c = findchannel(ic, ieee, IEEE80211_MODE_AUTO);
+		if (c == NULL) {
+			printk("Channel %d does not exist\n", ieee);
+			c = IEEE80211_CHAN_ANYC;
+		}
+	}
+
+	return c;
+}
+
+struct ieee80211_channel *ieee80211_find_channel_by_ieee(struct ieee80211com *ic, int chan_ieee)
+{
+	struct ieee80211_channel *chan;
+
+	if (chan_ieee > IEEE80211_CHAN_MAX) {
+		return NULL;
+	}
+
+	if (isclr(ic->ic_chan_active, chan_ieee)) {
+		return NULL;
+	}
+
+	chan = findchannel(ic, chan_ieee, ic->ic_des_mode);
+	if (chan == NULL) {
+		chan = findchannel(ic, chan_ieee, IEEE80211_MODE_AUTO);
+	}
+
+	return chan;
+}
+EXPORT_SYMBOL(ieee80211_find_channel_by_ieee);
+
+static char *
+ieee80211_wireless_swfeat_desc(const enum swfeat feat)
+{
+	char *desc = "Invalid feature";
+
+	switch (feat) {
+	case SWFEAT_ID_MODE_AP:
+		desc = "Access Point";
+		break;
+	case SWFEAT_ID_MODE_STA:
+		desc = "Non-AP station";
+		break;
+	case SWFEAT_ID_MODE_REPEATER:
+		desc = "Repeater";
+		break;
+	case SWFEAT_ID_PCIE_RC:
+		desc = "PCIe RC mode";
+		break;
+	case SWFEAT_ID_VHT:
+		desc = "VHT (802.11ac)";
+		break;
+	case SWFEAT_ID_2X2:
+		desc = "802.11ac 2x2";
+		break;
+	case SWFEAT_ID_2X4:
+		desc = "802.11ac 2x4";
+		break;
+	case SWFEAT_ID_3X3:
+		desc = "802.11ac 3x3";
+		break;
+	case SWFEAT_ID_4X4:
+		desc = "802.11ac 4x4";
+		break;
+	case SWFEAT_ID_HS20:
+		desc = "Hotspot 2.0 (802.11u)";
+		break;
+	case SWFEAT_ID_WPA2_ENT:
+		desc = "WPA2 Enterprise";
+		break;
+	case SWFEAT_ID_MESH:
+		desc = "Mesh (802.11s)";
+		break;
+	case SWFEAT_ID_TDLS:
+		desc = "TDLS (802.11z)";
+		break;
+	case SWFEAT_ID_OCAC:
+		desc = "Zero-Second DFS (OCAC)";
+		break;
+	case SWFEAT_ID_QHOP:
+		desc = "QHOP (WDS Extender)";
+		break;
+	case SWFEAT_ID_QSV:
+		desc = "Spectrum View (QSV)";
+		break;
+	case SWFEAT_ID_QSV_NEIGH:
+		desc = "Neighbour Report";
+		break;
+	case SWFEAT_ID_MU_MIMO:
+		desc = "MU-MIMO";
+		break;
+	case SWFEAT_ID_DUAL_CHAN_VIRT:
+		desc = "Dual Channel Virtual Concurrent";
+		break;
+	case SWFEAT_ID_DUAL_CHAN:
+		desc = "Dual Channel Dual Concurrent";
+		break;
+	case SWFEAT_ID_DUAL_BAND_VIRT:
+		desc = "Dual Band Virtual Concurrent";
+		break;
+	case SWFEAT_ID_DUAL_BAND:
+		desc = "Dual Band Dual Concurrent";
+		break;
+	case SWFEAT_ID_QTM_PRIO:
+		desc = "QTM - Per SSID Prioritisation ";
+		break;
+	case SWFEAT_ID_QTM:
+		desc = "QTM - Network Aware";
+		break;
+	case SWFEAT_ID_SPEC_ANALYZER:
+		desc = "Spectrum Analyzer";
+		break;
+	case SWFEAT_ID_MAX:
+		break;
+	}
+
+	return desc;
+}
+
+static int
+ieee80211_subioctl_print_swfeat_map(struct net_device *dev,
+				void __user *outbuf, int len)
+{
+	char *buf;
+	char *bufp;
+	int i;
+	int j;
+	int rem = len;
+	int rc = 0;
+
+	if (!outbuf) {
+		printk("%s: NULL pointer for user request\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	buf = kzalloc(len, GFP_KERNEL);
+	if (buf == NULL) {
+		printk("%s: buffer alloc failed\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	bufp = buf;
+	for (i = 0; i < SWFEAT_ID_MAX; i++) {
+		if (isset(soc_shared_params->swfeat_map, i)) {
+			j = snprintf(bufp, rem, "%s\n", ieee80211_wireless_swfeat_desc(i));
+			if (j <= 0)
+				break;
+			bufp += j;
+			rem -= j;
+			if (rem <= 0)
+				break;
+		}
+	}
+
+	if (copy_to_user(outbuf, buf, len) != 0) {
+		printk("%s: copy_to_user failed\n", __FUNCTION__);
+		rc = -EIO;
+	}
+
+	kfree(buf);
+
+	return rc;
+}
+
+static int
+ieee80211_subioctl_get_swfeat_map(struct net_device *dev,
+				void __user *swfeat_map, int len)
+{
+	if (!swfeat_map) {
+		printk("%s: NULL pointer for user request\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	if (len != sizeof(soc_shared_params->swfeat_map)) {
+		printk("%s: invalid size\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	if (copy_to_user(swfeat_map, &soc_shared_params->swfeat_map, len) != 0) {
+		printk("%s: copy_to_user failed\n", __FUNCTION__);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * Feature restrictions are enforced in the MuC firmware. Bypassing this check will
+ * cause the system to continually reboot.
+ */
+int ieee80211_swfeat_is_supported(uint16_t feat, uint8_t print_msg)
+{
+	if ((feat < SWFEAT_ID_MAX) && isset(soc_shared_params->swfeat_map, feat))
+		return 1;
+
+	if (print_msg)
+		printk("%s is not supported on this device\n",
+			ieee80211_wireless_swfeat_desc(feat));
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_swfeat_is_supported);
+
+static inline int
+ieee80211_vht_tx_mcs_is_valid(uint32_t mcs_val, uint32_t mcs_nss)
+{
+	if (mcs_val >= IEEE80211_AC_MCS_MAX || mcs_nss >= IEEE80211_AC_MCS_NSS_MAX)
+		return 0;
+
+	if (ieee80211_swfeat_is_supported(SWFEAT_ID_2X2, 0) ||
+			ieee80211_swfeat_is_supported(SWFEAT_ID_2X4, 0)) {
+		if (mcs_nss >= IEEE80211_VHT_NSS2)
+			return 0;
+	} else if (ieee80211_swfeat_is_supported(SWFEAT_ID_3X3, 0)) {
+		if (mcs_nss >= IEEE80211_VHT_NSS3)
+			return 0;
+	}
+
+	return 1;
+}
+
+static inline int
+ieee80211_ht_tx_mcs_is_valid(uint32_t mcs)
+{
+	if (mcs < IEEE80211_HT_EQUAL_MCS_START ||
+			mcs > IEEE80211_UNEQUAL_MCS_MAX ||
+			mcs == IEEE80211_EQUAL_MCS_32)
+		return 0;
+
+	if (ieee80211_swfeat_is_supported(SWFEAT_ID_2X2, 0) ||
+			ieee80211_swfeat_is_supported(SWFEAT_ID_2X4, 0)) {
+		if ((mcs > IEEE80211_HT_EQUAL_MCS_2SS_MAX &&
+					mcs < IEEE80211_UNEQUAL_MCS_START) ||
+				mcs > IEEE80211_HT_UNEQUAL_MCS_2SS_MAX) {
+			return 0;
+		}
+	} else if (ieee80211_swfeat_is_supported(SWFEAT_ID_3X3, 0)) {
+		if ((mcs > IEEE80211_HT_EQUAL_MCS_3SS_MAX &&
+					mcs < IEEE80211_UNEQUAL_MCS_START) ||
+				mcs > IEEE80211_HT_UNEQUAL_MCS_3SS_MAX) {
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+#define	IEEE80211_MODE_TURBO_STATIC_A	IEEE80211_MODE_MAX
+static int
+ieee80211_check_mode_consistency(struct ieee80211com *ic, int mode,
+	struct ieee80211_channel *c)
+{
+	if (c == IEEE80211_CHAN_ANYC)
+		return 0;
+	switch (mode) {
+	case IEEE80211_MODE_11B:
+		if (IEEE80211_IS_CHAN_B(c))
+			return 0;
+		else
+			return 1;
+		break;
+	case IEEE80211_MODE_11G:
+		if (IEEE80211_IS_CHAN_ANYG(c)) {
+			return 0;
+		} else {
+			return 1;
+		}
+		break;
+	case IEEE80211_MODE_11NG:
+		if (IEEE80211_IS_CHAN_11NG(c)) {
+			return 0;
+		} else {
+			return 1;
+		}
+		break;
+	case IEEE80211_MODE_11NG_HT40PM:
+		if (IEEE80211_IS_CHAN_11NG_HT40PLUS(c) || IEEE80211_IS_CHAN_11NG_HT40MINUS(c))
+			return 0;
+		else
+			return 1;
+		break;
+	case IEEE80211_MODE_11NA:
+		if (IEEE80211_IS_CHAN_11NA(c))
+			return 0;
+		else
+			return 1;
+		break;
+	case IEEE80211_MODE_11A:
+		if (IEEE80211_IS_CHAN_A(c))
+			return 0;
+		else
+			return 1;
+		break;
+	case IEEE80211_MODE_11NA_HT40PM:
+		if (IEEE80211_IS_CHAN_11NA_HT40PLUS(c) || IEEE80211_IS_CHAN_11NA_HT40MINUS(c))
+			return 0;
+		else
+			return 1;
+		break;
+	case IEEE80211_MODE_TURBO_STATIC_A:
+		if (IEEE80211_IS_CHAN_A(c) && IEEE80211_IS_CHAN_STURBO(c))
+			return -1; /* Mode not supported */
+		else
+			return 1;
+		break;
+	case IEEE80211_MODE_11AC_VHT20PM:
+		if (IEEE80211_IS_CHAN_11AC(c))
+			return 0;
+		else
+			return 1;
+		break;
+	case IEEE80211_MODE_11AC_VHT40PM:
+		if (IEEE80211_IS_CHAN_11AC_VHT40PLUS(c) || IEEE80211_IS_CHAN_11AC_VHT40MINUS(c))
+			return 0;
+		else
+			return 1;
+		break;
+	case IEEE80211_MODE_11AC_VHT80PM:
+		if (IEEE80211_IS_CHAN_11AC_VHT80_EDGEPLUS(c) ||
+			IEEE80211_IS_CHAN_11AC_VHT80_CNTRPLUS(c) ||
+			IEEE80211_IS_CHAN_11AC_VHT80_CNTRMINUS(c) ||
+			IEEE80211_IS_CHAN_11AC_VHT80_EDGEMINUS(c))
+			return 0;
+		else
+			return 1;
+	case IEEE80211_MODE_AUTO:
+		return 0;
+		break;
+	}
+	return -1;
+}
+#undef	IEEE80211_MODE_TURBO_STATIC_A
+
+static inline int ieee80211_chan_allowed_in_band(struct ieee80211com *ic,
+				struct ieee80211_channel *c, enum ieee80211_opmode opmode)
+{
+	if (opmode != IEEE80211_M_STA &&
+			ieee80211_check_mode_consistency(ic, ic->ic_des_mode, c))
+		return 0;
+
+	return 1;
+}
+
+void
+ieee80211_initiate_scan(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	ic->ic_des_chan = IEEE80211_CHAN_ANYC;
+
+	if (IS_UP(vap->iv_dev) && (vap->iv_opmode == IEEE80211_M_HOSTAP)) {
+		pre_announced_chanswitch(vap->iv_dev,
+					 ieee80211_chan2ieee(ic, ic->ic_des_chan),
+					 IEEE80211_DEFAULT_CHANCHANGE_TBTT_COUNT);
+		ic->ic_curchan = ic->ic_des_chan;
+		ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+	}
+}
+EXPORT_SYMBOL(ieee80211_initiate_scan);
+
+static int
+ieee80211_ioctl_siwfreq(struct net_device *dev, struct iw_request_info *info,
+	struct iw_freq *freq, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211vap *canceled_scan_vap = NULL;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *c, *c2;
+	int i;
+
+	if (freq->e > 1) {
+		return -EINVAL;
+	}
+	if (freq->e == 1) {
+		i = (ic->ic_mhz2ieee)(ic, freq->m / 100000, 0);
+	} else {
+		i = freq->m;
+	}
+
+	if (i != 0) {
+		if (i > IEEE80211_CHAN_MAX) {
+			printk("Channel %d is invalid\n", i);
+			return -EINVAL;
+		}
+
+		c = findchannel(ic, i, ic->ic_des_mode);
+		if (c == NULL) {
+			printk("Channel %d does not exist\n", i);
+			return -EINVAL;
+		}
+
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if ((c->ic_freq == ic->ic_curchan->ic_freq) && ic->ic_chan_is_set) {
+				if (ic->ic_get_init_cac_duration(ic) > 0) {
+					ic->ic_stop_icac_procedure(ic);
+					printk(KERN_DEBUG "ICAC: Aborted ICAC due to set channel request\n");
+				}
+				return 0;
+			}
+
+			if (!ic->ic_check_channel(ic, c, 0, 1)) {
+				printk("Channel %d (%d MHz) cannot be selected\n", i, c->ic_freq);
+				return -EINVAL;
+			}
+		}
+
+		if (!ic->ic_weachan_cac_allowed &&
+				(!ieee80211_is_chan_available(c)) &&
+				ieee80211_is_on_weather_channel(ic, c)) {
+			printk("Weather channel %d (%d MHz) cannot be selected\n", i, c->ic_freq);
+			return -EINVAL;
+		}
+
+		c = ieee80211_chk_update_pri_chan(ic, c, 0, "iwconfig", 1);
+		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
+				isset(ic->ic_chan_pri_inactive, c->ic_ieee) &&
+				isclr(ic->ic_is_inactive_autochan_only, c->ic_ieee)) {
+			return -EINVAL;
+		}
+
+		i = c->ic_ieee;
+
+		/*
+		 * Fine tune channel selection based on desired mode:
+		 *   if 11b is requested, find the 11b version of any
+		 *      11g channel returned,
+		 *   if static turbo, find the turbo version of any
+		 *	11a channel return,
+		 *   otherwise we should be ok with what we've got.
+		 */
+		switch (ic->ic_des_mode) {
+		case IEEE80211_MODE_11B:
+			if (IEEE80211_IS_CHAN_ANYG(c)) {
+				c2 = findchannel(ic, i, IEEE80211_MODE_11B);
+				/* NB: should not happen, =>'s 11g w/o 11b */
+				if (c2 != NULL)
+					c = c2;
+			}
+			break;
+		case IEEE80211_MODE_TURBO_A:
+			if (IEEE80211_IS_CHAN_A(c)) {
+				c2 = findchannel(ic, i, IEEE80211_MODE_TURBO_A);
+				if (c2 != NULL)
+					c = c2;
+			}
+			break;
+		default:		/* NB: no static turboG */
+			break;
+		}
+		if (ieee80211_check_mode_consistency(ic, ic->ic_des_mode, c)) {
+			if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+				return -EINVAL;
+		}
+
+		/*
+		 * Cancel scan before setting desired channel or before return when the channel
+		 * is same as bss channel
+		 */
+		if ((vap->iv_opmode == IEEE80211_M_HOSTAP) && ((ic->ic_flags & IEEE80211_F_SCAN)
+#ifdef QTN_BG_SCAN
+				|| (ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
+#endif /* QTN_BG_SCAN */
+				)) {
+			/*
+			 * Find which vap is in SCAN state(Only one can be in SCAN state at the same time, other
+			 * is pending for scan done on this vap)
+			 * For MBSS, it may be the primary vap, or the last vap whose mode is IEEE80211_M_HOSTAP
+			 */
+			TAILQ_FOREACH(canceled_scan_vap, &ic->ic_vaps, iv_next) {
+				if (canceled_scan_vap->iv_state == IEEE80211_S_SCAN) {
+					break;
+				}
+			}
+			if (canceled_scan_vap != NULL) {
+				if (canceled_scan_vap->iv_state != IEEE80211_S_SCAN) {
+					canceled_scan_vap = NULL;
+				}
+			}
+
+			if (canceled_scan_vap) {
+				/*
+				 * Cancel channel scan on vap which is SCAN state
+				 * For example: scan triggered by freq ioctl or channel auto when boot up
+				 */
+				ieee80211_cancel_scan_no_wait(canceled_scan_vap);
+			} else {
+				/*
+				 * Cancel channel scan(vap is not in SCAN state)
+				 * For example: scan triggered by scan ioctl
+				 */
+				ieee80211_cancel_scan_no_wait(vap);
+			}
+		}
+
+		if (vap->iv_state == IEEE80211_S_RUN && c == ic->ic_bsschan)
+			return 0;	/* no change, return */
+
+		ic->ic_des_chan = c;
+	} else {
+		/*
+		 * Intepret channel 0 to mean "no desired channel";
+		 * otherwise there's no way to undo fixing the desired
+		 * channel.
+		 */
+		if (ic->ic_des_chan == IEEE80211_CHAN_ANYC)
+			return 0;
+		ic->ic_des_chan = IEEE80211_CHAN_ANYC;
+	}
+
+	/* Go out of idle state and delay idle state check.*/
+	if (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] >= BOARD_PM_LEVEL_IDLE) {
+		pm_qos_update_requirement(PM_QOS_POWER_SAVE, BOARD_PM_GOVERNOR_WLAN, BOARD_PM_LEVEL_NO);
+		ic->ic_pm_reason = IEEE80211_PM_LEVEL_SIWFREQ;
+		ieee80211_pm_queue_work(ic);
+	}
+
+	if (ic->ic_des_chan == IEEE80211_CHAN_ANYC) {
+		ic->ic_csw_reason = IEEE80211_CSW_REASON_SCAN;
+	} else {
+		ic->ic_csw_reason = IEEE80211_CSW_REASON_MANUAL;
+	}
+
+	if ((vap->iv_opmode == IEEE80211_M_MONITOR ||
+	    vap->iv_opmode == IEEE80211_M_WDS) &&
+	    ic->ic_des_chan != IEEE80211_CHAN_ANYC) {
+		/* Monitor and wds modes can switch directly. */
+		ic->ic_curchan = ic->ic_des_chan;
+		if (vap->iv_state == IEEE80211_S_RUN) {
+			ic->ic_set_channel(ic);
+		}
+	} else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		/*
+		 * Use channel switch announcement on beacon if possible.
+		 * Otherwise, ic_des_chan will take  effect when we are transitioned
+		 * to RUN state later.
+		 * We use ic_set_channel directly if we are "running" but not "up".
+		 */
+
+		if (IS_UP(vap->iv_dev)) {
+			if ((ic->ic_des_chan != IEEE80211_CHAN_ANYC) &&
+				(vap->iv_state == IEEE80211_S_RUN)) {
+				ieee80211_enter_csa(ic, ic->ic_des_chan, NULL,
+					IEEE80211_CSW_REASON_MANUAL,
+					IEEE80211_DEFAULT_CHANCHANGE_TBTT_COUNT,
+					IEEE80211_CSA_MUST_STOP_TX,
+					IEEE80211_CSA_F_BEACON | IEEE80211_CSA_F_ACTION);
+			} else {
+				if (canceled_scan_vap) {
+					/*
+					 * Scan is canceled on vap which is in SCAN state,
+					 * do SCAN -> SCAN on vap of scan canceled
+					 */
+					ieee80211_new_state(canceled_scan_vap, IEEE80211_S_SCAN, 0);
+				} else {
+					ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+				}
+			}
+		} else if (ic->ic_des_chan != IEEE80211_CHAN_ANYC) {
+			ic->ic_curchan = ic->ic_des_chan;
+			ic->ic_set_channel(ic);
+		}
+	} else {
+		/* Need to go through the state machine in case we need
+		 * to reassociate or the like.  The state machine will
+		 * pickup the desired channel and avoid scanning. */
+		if (IS_UP_AUTO(vap)) {
+			ic->ic_curchan = ic->ic_des_chan;
+			ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+			/* In case of no channel change, Don't Scan. Only VCO cal is required */
+			ic->ic_set_channel(ic);
+		} else {
+			/* STA doesn't support auto channel */
+			if ((vap->iv_opmode == IEEE80211_M_STA) && (ic->ic_des_chan == IEEE80211_CHAN_ANYC)) {
+				ic->ic_des_chan = ic->ic_curchan;
+				return -EINVAL;
+			} else {
+				ic->ic_curchan = ic->ic_des_chan;
+				ic->ic_set_channel(ic);
+			}
+		}
+	}
+
+	if (ic->ic_get_init_cac_duration(ic) > 0) {
+		ic->ic_stop_icac_procedure(ic);
+		printk(KERN_DEBUG "ICAC: Aborted ICAC due to set channel request\n");
+	}
+
+	ic->ic_chan_switch_reason_record(ic, IEEE80211_CSW_REASON_MANUAL);
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwfreq(struct net_device *dev, struct iw_request_info *info,
+	struct iw_freq *freq, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (vap->iv_state == IEEE80211_S_RUN &&
+	    vap->iv_opmode != IEEE80211_M_MONITOR) {
+		/*
+		 * NB: use curchan for monitor mode so you can see
+		 *     manual scanning by apps like kismet.
+		 */
+		KASSERT(ic->ic_bsschan != IEEE80211_CHAN_ANYC,
+			("bss channel not set"));
+		freq->m = ic->ic_curchan->ic_freq;
+	} else if (vap->iv_state != IEEE80211_S_INIT) {	/* e.g. when scanning */
+		if (ic->ic_curchan != IEEE80211_CHAN_ANYC)
+			freq->m = ic->ic_curchan->ic_freq;
+		else
+			freq->m = 0;
+	} else if (ic->ic_des_chan != IEEE80211_CHAN_ANYC) {
+		freq->m = ic->ic_des_chan->ic_freq;
+	} else {
+		freq->m = 0;
+	}
+
+	freq->m *= 100000;
+	freq->e = 1;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwessid(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *ssid)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	if (vap->iv_opmode == IEEE80211_M_WDS)
+		return -EOPNOTSUPP;
+
+	if (data->flags == 0)		/* ANY */
+		vap->iv_des_nssid = 0;
+	else {
+		if (data->length > IEEE80211_NWID_LEN)
+			data->length = IEEE80211_NWID_LEN;
+		/* NB: always use entry 0 */
+		memcpy(vap->iv_des_ssid[0].ssid, ssid, data->length);
+		vap->iv_des_ssid[0].len = data->length;
+		vap->iv_des_nssid = 1;
+		/*
+		 * Deduct a trailing \0 since iwconfig passes a string
+		 * length that includes this.  Unfortunately this means
+		 * that specifying a string with multiple trailing \0's
+		 * won't be handled correctly.  Not sure there's a good
+		 * solution; the API is botched (the length should be
+		 * exactly those bytes that are meaningful and not include
+		 * extraneous stuff).
+		 */
+		if (data->length > 0 &&
+		    vap->iv_des_ssid[0].ssid[data->length - 1] == '\0')
+			vap->iv_des_ssid[0].len--;
+	}
+
+	if (vap->iv_opmode == IEEE80211_M_STA)
+		return IS_UP_AUTO(vap) ? ieee80211_init(vap->iv_dev, RESCAN) : 0;
+	else
+		return IS_UP(vap->iv_dev) ? ieee80211_init(vap->iv_dev, RESCAN) : 0;
+}
+
+static int
+ieee80211_ioctl_giwessid(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *essid)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	if (vap->iv_opmode == IEEE80211_M_WDS)
+		return -EOPNOTSUPP;
+
+	data->flags = 1;		/* active */
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		if (vap->iv_des_nssid > 0) {
+			if (data->length > vap->iv_des_ssid[0].len)
+				data->length = vap->iv_des_ssid[0].len;
+			memcpy(essid, vap->iv_des_ssid[0].ssid, data->length);
+		} else
+			data->length = 0;
+	} else {
+		if (vap->iv_des_nssid == 0 && vap->iv_bss) {
+			if (data->length > vap->iv_bss->ni_esslen)
+				data->length = vap->iv_bss->ni_esslen;
+			memcpy(essid, vap->iv_bss->ni_essid, data->length);
+		} else {
+			if (data->length > vap->iv_des_ssid[0].len)
+				data->length = vap->iv_des_ssid[0].len;
+			memcpy(essid, vap->iv_des_ssid[0].ssid, data->length);
+		}
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwrange(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	//struct ieee80211_node *ni = vap->iv_bss;
+	struct iw_range *range = (struct iw_range *) extra;
+	struct ieee80211_rateset *rs;
+	uint8_t reported[IEEE80211_CHAN_BYTES];	/* XXX stack usage? */
+	uint8_t *chan_active;
+	int i, r, chan_mode = 0;
+	int step = 0;
+	uint8_t sgi = 0;
+
+	data->length = sizeof(struct iw_range);
+	memset(range, 0, sizeof(struct iw_range));
+
+	/* txpower (128 values, but will print out only IW_MAX_TXPOWER) */
+	range->num_txpower = (ic->ic_txpowlimit >= 8) ? IW_MAX_TXPOWER : ic->ic_txpowlimit;
+	step = ic->ic_txpowlimit / (2 * (IW_MAX_TXPOWER - 1));
+
+	range->txpower[0] = 0;
+	for (i = 1; i < IW_MAX_TXPOWER; i++)
+		range->txpower[i] = (ic->ic_txpowlimit/2)
+			- (IW_MAX_TXPOWER - i - 1) * step;
+
+	range->txpower_capa = IW_TXPOW_DBM;
+
+	if (vap->iv_opmode == IEEE80211_M_STA ||
+	    vap->iv_opmode == IEEE80211_M_IBSS) {
+		range->min_pmp = 1 * 1024;
+		range->max_pmp = 65535 * 1024;
+		range->min_pmt = 1 * 1024;
+		range->max_pmt = 1000 * 1024;
+		range->pmp_flags = IW_POWER_PERIOD;
+		range->pmt_flags = IW_POWER_TIMEOUT;
+		range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT |
+			IW_POWER_UNICAST_R | IW_POWER_ALL_R;
+	}
+
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 13;
+
+	range->retry_capa = IW_RETRY_LIMIT;
+	range->retry_flags = IW_RETRY_LIMIT;
+	range->min_retry = 0;
+	range->max_retry = 255;
+
+	if (vap->iv_opmode == IEEE80211_M_STA)
+		chan_active = ic->ic_chan_active_20;
+	else
+		chan_active = ic->ic_chan_active;
+
+	range->num_frequency = 0;
+	memset(reported, 0, sizeof(reported));
+	for (i = 0; i < ic->ic_nchans; i++) {
+		struct ieee80211_channel *c = &ic->ic_channels[i];
+
+		/* discard if previously reported (e.g. b/g) */
+		if (isclr(reported, c->ic_ieee) &&
+				isset(chan_active, c->ic_ieee) &&
+				(ieee80211_chan_allowed_in_band(ic, c, vap->iv_opmode))) {
+			setbit(reported, c->ic_ieee);
+			range->freq[range->num_frequency].i = c->ic_ieee;
+			range->freq[range->num_frequency].m =
+				ic->ic_channels[i].ic_freq * 100000;
+			range->freq[range->num_frequency].e = 1;
+			if (++range->num_frequency == IW_MAX_FREQUENCIES)
+				break;
+		}
+	}
+
+	/* Supported channels count */
+	range->num_channels = range->num_frequency;
+
+	/* Atheros' RSSI value is SNR: 0 -> 60 for old chipsets. Range
+	 * for newer chipsets is unknown. This value is arbitarily chosen
+	 * to give an indication that full rate will be available and to be
+	 * a practicable maximum. */
+	range->max_qual.qual  = 70;
+
+	/* XXX: This should be updated to use the current noise floor. */
+	/* These are negative full bytes.
+	 * Min. quality is noise + 1 */
+#define QNT_DEFAULT_NOISE 0
+	range->max_qual.updated |= IW_QUAL_DBM;
+	range->max_qual.level = QNT_DEFAULT_NOISE + 1;
+	range->max_qual.noise = QNT_DEFAULT_NOISE;
+
+	range->sensitivity = 1;
+
+	range->max_encoding_tokens = IEEE80211_WEP_NKID;
+	/* XXX query driver to find out supported key sizes */
+	range->num_encoding_sizes = 3;
+	range->encoding_size[0] = 5;		/* 40-bit */
+	range->encoding_size[1] = 13;		/* 104-bit */
+	range->encoding_size[2] = 16;		/* 128-bit */
+
+	if (ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) {
+		chan_mode = 1;
+		sgi = ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI40 ? 1 : 0;
+	} else {
+		sgi = ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI20 ? 1 : 0;
+	}
+	rs = &ic->ic_sup_rates[ic->ic_des_mode];
+	range->num_bitrates = rs->rs_nrates;
+	if (range->num_bitrates > MIN(IEEE80211_RATE_MAXSIZE, IW_MAX_BITRATES))
+		range->num_bitrates = MIN(IEEE80211_RATE_MAXSIZE, IW_MAX_BITRATES);
+	for (i = 0; i < range->num_bitrates; i++) {
+			r = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+
+			/* Skip legacy rates */
+			if(i >= (rs->rs_legacy_nrates))
+			{
+				r = ieee80211_mcs2rate(r, chan_mode, sgi, 0);
+			}
+			range->bitrate[i] = (r * 1000000) / 2;
+	}
+
+	/* estimated maximum TCP throughput values (bps) */
+	range->throughput = 5500000;
+
+	range->min_rts = 0;
+	range->max_rts = 2347;
+	range->min_frag = 256;
+	range->max_frag = 2346;
+
+	/* Event capability (kernel) */
+	IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+
+	/* Event capability (driver) */
+	if (vap->iv_opmode == IEEE80211_M_STA ||
+		 vap->iv_opmode == IEEE80211_M_IBSS ||
+		 vap->iv_opmode == IEEE80211_M_AHDEMO) {
+		/* for now, only ibss, ahdemo, sta has this cap */
+		IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+	}
+
+	if (vap->iv_opmode == IEEE80211_M_STA) {
+		/* for sta only */
+		IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+		IW_EVENT_CAPA_SET(range->event_capa, IWEVREGISTERED);
+		IW_EVENT_CAPA_SET(range->event_capa, IWEVEXPIRED);
+	}
+
+	/* this is used for reporting replay failure, which is used by the different encoding schemes */
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVCUSTOM);
+
+	/* report supported WPA/WPA2 capabilities to userspace */
+	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
+			       IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setspy(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	/* save the list of node addresses */
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct sockaddr address[IW_MAX_SPY];
+	unsigned int number = data->length;
+	int i;
+
+	if (number > IW_MAX_SPY)
+		return -E2BIG;
+
+	/* get the addresses into the driver */
+	if (data->pointer) {
+		if (copy_from_user(address, data->pointer,
+		    sizeof(struct sockaddr) * number))
+			return -EFAULT;
+	} else {
+		return -EFAULT;
+	}
+
+	/* copy the MAC addresses into a list */
+	if (number > 0) {
+		/* extract the MAC addresses */
+		for (i = 0; i < number; i++)
+			memcpy(&vap->iv_spy.mac[i * IEEE80211_ADDR_LEN],
+				address[i].sa_data, IEEE80211_ADDR_LEN);
+		/* init rssi timestamps */
+		memset(vap->iv_spy.ts_rssi, 0, IW_MAX_SPY * sizeof(u_int32_t));
+	}
+	vap->iv_spy.num = number;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getspy(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	/*
+	 * locate nodes by mac (ieee80211_find_node()),
+	 * copy out rssi, set updated flag appropriately
+	 */
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct sockaddr *address;
+	struct iw_quality *spy_stat;
+	unsigned int number = vap->iv_spy.num;
+	int i;
+
+	address = (struct sockaddr *) extra;
+	spy_stat = (struct iw_quality *) (extra + number * sizeof(struct sockaddr));
+
+	for (i = 0; i < number; i++) {
+		memcpy(address[i].sa_data, &vap->iv_spy.mac[i * IEEE80211_ADDR_LEN],
+			IEEE80211_ADDR_LEN);
+		address[i].sa_family = AF_PACKET;
+	}
+
+	/* locate a node, read its rssi, check if updated, convert to dBm */
+	for (i = 0; i < number; i++) {
+		ni = ieee80211_find_node(nt, &vap->iv_spy.mac[i * IEEE80211_ADDR_LEN]);
+		/* check we are associated w/ this vap */
+		if (ni) {
+			if (ni->ni_vap == vap) {
+				set_quality(&spy_stat[i], ni->ni_rssi, ic->ic_channoise);
+				if (ni->ni_rstamp != vap->iv_spy.ts_rssi[i]) {
+					vap->iv_spy.ts_rssi[i] = ni->ni_rstamp;
+				} else {
+					spy_stat[i].updated = 0;
+				}
+			}
+			ieee80211_free_node(ni);
+		} else {
+			spy_stat[i].updated = IW_QUAL_ALL_INVALID;
+		}
+	}
+
+	/* copy results to userspace */
+	data->length = number;
+	return 0;
+}
+
+/* Enhanced iwspy support */
+static int
+ieee80211_ioctl_setthrspy(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct iw_thrspy threshold;
+
+	if (data->length != 1)
+		return -EINVAL;
+
+	/* get the threshold values into the driver */
+	if (data->pointer) {
+		if (copy_from_user(&threshold, data->pointer,
+		    sizeof(struct iw_thrspy)))
+			return -EFAULT;
+        } else
+		return -EINVAL;
+
+	if (threshold.low.level == 0) {
+		/* disable threshold */
+		vap->iv_spy.thr_low = 0;
+		vap->iv_spy.thr_high = 0;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,
+			"%s: disabled iw_spy threshold\n", __func__);
+	} else {
+		/* We are passed a signal level/strength - calculate
+		 * corresponding RSSI values */
+		/* XXX: We should use current noise value. */
+		vap->iv_spy.thr_low = threshold.low.level + QNT_DEFAULT_NOISE;
+		vap->iv_spy.thr_high = threshold.high.level + QNT_DEFAULT_NOISE;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,
+			"%s: enabled iw_spy threshold\n", __func__);
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getthrspy(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct iw_thrspy *threshold;
+
+	threshold = (struct iw_thrspy *) extra;
+
+	/* set threshold values */
+	set_quality(&(threshold->low), vap->iv_spy.thr_low, ic->ic_channoise);
+	set_quality(&(threshold->high), vap->iv_spy.thr_high, ic->ic_channoise);
+
+	/* copy results to userspace */
+	data->length = 1;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwmode(struct net_device *dev, struct iw_request_info *info,
+	__u32 *mode, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ifmediareq imr;
+	int valid = 0;
+
+	memset(&imr, 0, sizeof(imr));
+	vap->iv_media.ifm_status((void *) vap, &imr);
+
+	if (imr.ifm_active & IFM_IEEE80211_HOSTAP)
+		valid = (*mode == IW_MODE_MASTER);
+	else if (imr.ifm_active & IFM_IEEE80211_MONITOR)
+		valid = (*mode == IW_MODE_MONITOR);
+	else if (imr.ifm_active & IFM_IEEE80211_ADHOC)
+		valid = (*mode == IW_MODE_ADHOC);
+	else if (imr.ifm_active & IFM_IEEE80211_WDS)
+		valid = (*mode == IW_MODE_REPEAT);
+	else
+		valid = (*mode == IW_MODE_INFRA);
+
+	return valid ? 0 : -EINVAL;
+}
+
+static int
+ieee80211_ioctl_giwmode(struct net_device *dev,	struct iw_request_info *info,
+	__u32 *mode, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ifmediareq imr;
+
+	memset(&imr, 0, sizeof(imr));
+	vap->iv_media.ifm_status((void *) vap, &imr);
+
+	if (imr.ifm_active & IFM_IEEE80211_HOSTAP)
+		*mode = IW_MODE_MASTER;
+	else if (imr.ifm_active & IFM_IEEE80211_MONITOR)
+		*mode = IW_MODE_MONITOR;
+	else if (imr.ifm_active & IFM_IEEE80211_ADHOC)
+		*mode = IW_MODE_ADHOC;
+	else if (imr.ifm_active & IFM_IEEE80211_WDS)
+		*mode = IW_MODE_REPEAT;
+	else
+		*mode = IW_MODE_INFRA;
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwpower(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *wrq, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+
+	/* XXX: These values, flags, and caps do not seem to be used elsewhere
+	 * at all? */
+
+	if ((ic->ic_caps & IEEE80211_C_PMGT) == 0)
+		return -EOPNOTSUPP;
+
+	if (wrq->disabled) {
+		if (ic->ic_flags & IEEE80211_F_PMGTON)
+			ic->ic_flags &= ~IEEE80211_F_PMGTON;
+	} else {
+		switch (wrq->flags & IW_POWER_MODE) {
+		case IW_POWER_UNICAST_R:
+		case IW_POWER_ALL_R:
+		case IW_POWER_ON:
+			if (wrq->flags & IW_POWER_PERIOD) {
+				if (IEEE80211_BINTVAL_VALID(wrq->value))
+					ic->ic_lintval = IEEE80211_MS_TO_TU(wrq->value);
+				else
+					return -EINVAL;
+			}
+			if (wrq->flags & IW_POWER_TIMEOUT)
+				ic->ic_holdover = IEEE80211_MS_TO_TU(wrq->value);
+
+				ic->ic_flags |= IEEE80211_F_PMGTON;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return ic->ic_reset(ic);
+}
+
+static int
+ieee80211_ioctl_giwpower(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+
+	rrq->disabled = (ic->ic_flags & IEEE80211_F_PMGTON) == 0;
+	if (!rrq->disabled) {
+		switch (rrq->flags & IW_POWER_TYPE) {
+		case IW_POWER_TIMEOUT:
+			rrq->flags = IW_POWER_TIMEOUT;
+			rrq->value = IEEE80211_TU_TO_MS(ic->ic_holdover);
+			break;
+		case IW_POWER_PERIOD:
+			rrq->flags = IW_POWER_PERIOD;
+			rrq->value = IEEE80211_TU_TO_MS(ic->ic_lintval);
+			break;
+		}
+		rrq->flags |= IW_POWER_ALL_R;
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwretry(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (rrq->disabled) {
+		if (vap->iv_flags & IEEE80211_F_SWRETRY) {
+			vap->iv_flags &= ~IEEE80211_F_SWRETRY;
+			goto done;
+		}
+		return 0;
+	}
+
+	if ((vap->iv_caps & IEEE80211_C_SWRETRY) == 0)
+		return -EOPNOTSUPP;
+	if (rrq->flags == IW_RETRY_LIMIT) {
+		if (rrq->value >= 0) {
+			vap->iv_txmin = rrq->value;
+			vap->iv_txmax = rrq->value;	/* XXX */
+			vap->iv_txlifetime = 0;		/* XXX */
+			vap->iv_flags |= IEEE80211_F_SWRETRY;
+		} else {
+			vap->iv_flags &= ~IEEE80211_F_SWRETRY;
+		}
+		return 0;
+	}
+done:
+	return IS_UP(vap->iv_dev) ? ic->ic_reset(ic) : 0;
+}
+
+static int
+ieee80211_ioctl_giwretry(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	rrq->disabled = (vap->iv_flags & IEEE80211_F_SWRETRY) == 0;
+	if (!rrq->disabled) {
+		switch (rrq->flags & IW_RETRY_TYPE) {
+		case IW_RETRY_LIFETIME:
+			rrq->flags = IW_RETRY_LIFETIME;
+			rrq->value = IEEE80211_TU_TO_MS(vap->iv_txlifetime);
+			break;
+		case IW_RETRY_LIMIT:
+			rrq->flags = IW_RETRY_LIMIT;
+			switch (rrq->flags & IW_RETRY_MODIFIER) {
+			case IW_RETRY_MIN:
+				rrq->flags |= IW_RETRY_MAX;
+				rrq->value = vap->iv_txmin;
+				break;
+			case IW_RETRY_MAX:
+				rrq->flags |= IW_RETRY_MAX;
+				rrq->value = vap->iv_txmax;
+				break;
+			}
+			break;
+		}
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwtxpow(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	int fixed, disabled;
+
+	fixed = (ic->ic_flags & IEEE80211_F_TXPOW_FIXED);
+	if (!vap->iv_bss) {
+		return 0;
+	}
+
+	disabled = (fixed && vap->iv_bss->ni_txpower == 0);
+	if (rrq->disabled) {
+		if (!disabled) {
+			if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0)
+				return -EOPNOTSUPP;
+			ic->ic_flags |= IEEE80211_F_TXPOW_FIXED;
+			vap->iv_bss->ni_txpower = 0;
+			goto done;
+		}
+		return 0;
+	}
+
+	if (rrq->fixed) {
+		if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0)
+			return -EOPNOTSUPP;
+		if (rrq->flags != IW_TXPOW_DBM)
+			return -EOPNOTSUPP;
+		if (ic->ic_bsschan != IEEE80211_CHAN_ANYC) {
+			if (ic->ic_bsschan->ic_maxregpower >= rrq->value &&
+			    ic->ic_txpowlimit/2 >= rrq->value) {
+ 			        vap->iv_bss->ni_txpower = 2 * rrq->value;
+				ic->ic_newtxpowlimit = 2 * rrq->value;
+ 				ic->ic_flags |= IEEE80211_F_TXPOW_FIXED;
+ 			} else
+				return -EINVAL;
+		} else {
+			/*
+			 * No channel set yet
+			 */
+			if (ic->ic_txpowlimit/2 >= rrq->value) {
+				vap->iv_bss->ni_txpower = 2 * rrq->value;
+				ic->ic_newtxpowlimit = 2 * rrq->value;
+				ic->ic_flags |= IEEE80211_F_TXPOW_FIXED;
+			}
+			else
+				return -EINVAL;
+		}
+	} else {
+		if (!fixed)		/* no change */
+			return 0;
+		ic->ic_flags &= ~IEEE80211_F_TXPOW_FIXED;
+	}
+done:
+	return ic->ic_reset(ic);
+}
+
+static int
+ieee80211_ioctl_giwtxpow(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+
+	rrq->fixed = (ic->ic_flags & IEEE80211_F_TXPOW_FIXED) != 0;
+	rrq->disabled = (rrq->fixed && rrq->value == 0);
+	rrq->flags = IW_TXPOW_DBM;
+
+	if (vap->iv_bss) {
+		/* ni_txpower is stored in 0.5dBm units */
+		rrq->value = vap->iv_bss->ni_txpower >> 1;
+	} else {
+		rrq->value = 0;
+	}
+
+	return 0;
+}
+
+struct waplistreq {	/* XXX: not the right place for declaration? */
+	struct ieee80211vap *vap;
+	struct sockaddr addr[IW_MAX_AP];
+	struct iw_quality qual[IW_MAX_AP];
+	int i;
+};
+
+static int
+waplist_cb(void *arg, const struct ieee80211_scan_entry *se)
+{
+	struct waplistreq *req = arg;
+	int i = req->i;
+
+	if (i >= IW_MAX_AP)
+		return 0;
+	req->addr[i].sa_family = ARPHRD_ETHER;
+	if (req->vap->iv_opmode == IEEE80211_M_HOSTAP)
+		IEEE80211_ADDR_COPY(req->addr[i].sa_data, se->se_macaddr);
+	else
+		IEEE80211_ADDR_COPY(req->addr[i].sa_data, se->se_bssid);
+	set_quality(&req->qual[i], se->se_rssi, QNT_DEFAULT_NOISE);
+	req->i = i + 1;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_iwaplist(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct waplistreq req;		/* XXX off stack */
+
+	req.vap = vap;
+	req.i = 0;
+	ieee80211_scan_iterate(ic, waplist_cb, &req);
+
+	data->length = req.i;
+	memcpy(extra, &req.addr, req.i * sizeof(req.addr[0]));
+	data->flags = 1;		/* signal quality present (sort of) */
+	memcpy(extra + req.i * sizeof(req.addr[0]), &req.qual,
+		req.i * sizeof(req.qual[0]));
+
+	return 0;
+}
+
+#ifdef SIOCGIWSCAN
+static qfdr_remote_siwscan_hook_t qfdr_remote_siwscan_hook = NULL;
+void ieee80211_register_qfdr_remote_siwscan_hook(qfdr_remote_siwscan_hook_t hook)
+{
+	qfdr_remote_siwscan_hook = hook;
+}
+EXPORT_SYMBOL(ieee80211_register_qfdr_remote_siwscan_hook);
+
+static int
+ieee80211_ioctl_siwscan(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	uint32_t scan_flags = 0;
+	uint16_t pick_flags = 0;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+	int dfs_channel_available = 0;
+	int is_remote_req = 0;
+
+	if (info->cmd & QFDR_REMOTE_CMD) {
+		is_remote_req = 1;
+		info->cmd &= ~QFDR_REMOTE_CMD;
+	}
+
+	if (is_remote_req == 0 && qfdr_remote_siwscan_hook != NULL)
+		qfdr_remote_siwscan_hook(vap->iv_dev->name, data);
+
+	/*
+	 * XXX don't permit a scan to be started unless we
+	 * know the device is ready.  For the moment this means
+	 * the device is marked up as this is the required to
+	 * initialize the hardware.  It would be better to permit
+	 * scanning prior to being up but that'll require some
+	 * changes to the infrastructure.
+	 */
+	if (!IS_UP(vap->iv_dev))
+		return -ENETDOWN;	/* XXX */
+
+	if ((ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+			IEEE80211_IS_CHAN_CAC_IN_PROGRESS(ic->ic_bsschan)) ||
+			ic->ic_ocac.ocac_running)
+		return -EBUSY;
+
+	if (!ieee80211_should_scan(vap))
+		return -EAGAIN;
+
+
+	if (ic->ic_get_init_cac_duration(ic) > 0) {
+		return -EAGAIN;
+	}
+
+	/* XXX always manual... */
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+		"%s: active scan request\n", __func__);
+	preempt_scan(dev, 100, 100);
+	ss->is_scan_valid = 1;
+	ic->ic_csw_reason = IEEE80211_CSW_REASON_SCAN;
+
+	if (data && (data->flags & IW_SCAN_THIS_ESSID)) {
+		struct iw_scan_req req;
+		struct ieee80211_scan_ssid ssid;
+		int copyLength;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: SCAN_THIS_ESSID requested\n", __func__);
+		if (data->length > sizeof req) {
+			copyLength = sizeof req;
+		} else {
+			copyLength = data->length;
+		}
+		memset(&req, 0, sizeof req);
+		if (is_remote_req)
+			memcpy(&req, data->pointer, copyLength);
+		else {
+			if (copy_from_user(&req, data->pointer, copyLength))
+				return -EFAULT;
+		}
+		memcpy(&ssid.ssid, req.essid, sizeof ssid.ssid);
+		ssid.len = req.essid_len;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				  "%s: requesting scan of essid '%s'\n", __func__, ssid.ssid);
+		(void) ieee80211_start_scan(vap,
+			IEEE80211_SCAN_ACTIVE |
+			IEEE80211_SCAN_NOPICK |
+			IEEE80211_SCAN_ONCE |
+			(IEEE80211_USE_QTN_BGSCAN(vap) ? IEEE80211_SCAN_QTN_BGSCAN: 0) |
+			((vap->iv_opmode == IEEE80211_M_HOSTAP) ? IEEE80211_SCAN_FLUSH : 0),
+			IEEE80211_SCAN_FOREVER,
+			1, &ssid);
+		return 0;
+	}
+
+	if (data && data->pointer) {
+		u_int16_t flags_tmp;
+		u_int16_t flags_bg_scan_mode = 0;
+
+
+		if (is_remote_req)
+			memcpy(&pick_flags, data->pointer, sizeof(pick_flags));
+		else {
+			if (copy_from_user(&pick_flags, data->pointer, sizeof(pick_flags)))
+				return -EFAULT;
+		}
+
+		flags_tmp = pick_flags & IEEE80211_PICK_ALGORITHM_MASK;
+		flags_bg_scan_mode = 0;
+
+		/*
+		 * For DFS reentry, check if any DFS channel is available.
+		 * If not, skip channel scan and return directly.
+		 */
+		if (pick_flags & (IEEE80211_PICK_REENTRY | IEEE80211_PICK_DFS)) {
+			if (ic->ic_is_dfs_chans_available_for_dfs_reentry) {
+				 if ((dfs_channel_available = ic->ic_is_dfs_chans_available_for_dfs_reentry(ic, vap)) <= 0) {
+					return dfs_channel_available;
+				}
+			} else {
+				return -EOPNOTSUPP;
+			}
+		}
+
+		if (flags_tmp == IEEE80211_PICK_REENTRY || flags_tmp == IEEE80211_PICK_CLEAREST) {
+			scan_flags = IEEE80211_SCAN_FLUSH;
+			/* Go out of idle state and delay idle state check.*/
+			if (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] >= BOARD_PM_LEVEL_IDLE) {
+				pm_qos_update_requirement(PM_QOS_POWER_SAVE, BOARD_PM_GOVERNOR_WLAN, BOARD_PM_LEVEL_NO);
+				ic->ic_pm_reason = IEEE80211_PM_LEVEL_SIWSCAN;
+				ieee80211_pm_queue_work(ic);
+			}
+		}
+		else if (flags_tmp == IEEE80211_PICK_NOPICK) {
+			scan_flags = IEEE80211_SCAN_NOPICK;
+		}
+#ifdef QTN_BG_SCAN
+		else if (flags_tmp == IEEE80211_PICK_NOPICK_BG) {
+			scan_flags = IEEE80211_SCAN_NOPICK;
+			if ((vap->iv_opmode == IEEE80211_M_HOSTAP && ic->ic_sta_assoc > 0) ||
+					(vap->iv_opmode == IEEE80211_M_STA && vap->iv_state == IEEE80211_S_RUN)) {
+				scan_flags |= IEEE80211_SCAN_QTN_BGSCAN;
+				flags_bg_scan_mode = pick_flags & IEEE80211_PICK_BG_MODE_MASK;
+				if (IS_MULTIPLE_BITS_SET(flags_bg_scan_mode)) {
+					/* use auto mode if multiple modes are set */
+					flags_bg_scan_mode = 0;
+				}
+			}
+		}
+
+#endif /* QTN_BG_SCAN */
+		if (pick_flags & IEEE80211_PICK_SCAN_FLUSH) {
+			scan_flags |= IEEE80211_SCAN_FLUSH;
+		}
+
+		/*
+		 * set pick flags before start scanning, and remember to clean it when selection channel done
+		 * only for AP mode
+		 */
+		ss->ss_pick_flags = (pick_flags & (~IEEE80211_PICK_CONTROL_MASK)) | flags_bg_scan_mode;
+	}
+
+	if (IEEE80211_USE_QTN_BGSCAN(vap))
+		scan_flags |= IEEE80211_SCAN_QTN_BGSCAN;
+
+	(void) ieee80211_start_scan(vap, IEEE80211_SCAN_ACTIVE |
+			scan_flags | IEEE80211_SCAN_ONCE,
+		IEEE80211_SCAN_FOREVER,
+		/* XXX use ioctl params */
+		vap->iv_des_nssid, vap->iv_des_ssid);
+	return 0;
+}
+
+int qfdr_siwscan_for_remote(struct qfdr_remote_scan_req *remote_req)
+{
+	struct net_device *dev;
+	struct iw_point *data;
+	struct iw_point data_remote;
+	struct iw_request_info info;
+
+	dev = dev_get_by_name(&init_net, remote_req->dev_name);
+	if (!dev)
+		return -EINVAL;
+
+	memset(&info, 0, sizeof(info));
+	info.cmd = SIOCSIWSCAN | QFDR_REMOTE_CMD;
+
+	if (remote_req->type == QFDR_SIWSCAN_SIMPLE)
+		data = NULL;
+	else {
+		data_remote.length = remote_req->length;
+		data_remote.flags = remote_req->flags;
+		data_remote.pointer = remote_req->pointer;
+		data = &data_remote;
+	}
+
+	ieee80211_ioctl_siwscan(dev, &info, data, NULL);
+
+	dev_put(dev);
+	return 0;
+}
+EXPORT_SYMBOL(qfdr_siwscan_for_remote);
+
+/*
+ * Encode a WPA or RSN information element as a custom
+ * element using the hostap format.
+ */
+static u_int
+encode_ie(void *buf, size_t bufsize, const u_int8_t *ie, size_t ielen,
+	const char *leader, size_t leader_len)
+{
+	char *p;
+	int i;
+
+	if (bufsize < leader_len)
+		return 0;
+	p = buf;
+	memcpy(p, leader, leader_len);
+	bufsize -= leader_len;
+	p += leader_len;
+	for (i = 0; i < ielen && bufsize > 2; i++) {
+		p += sprintf(p, "%02x", ie[i]);
+		bufsize -= 2;
+	}
+	return (i == ielen ? p - (char *)buf : 0);
+}
+
+/*
+ * Recalculate the RSSI of MBS/RBS
+ * Make sure the RSSI has the following priority.
+ *	MBS in best rate range has the highest level RSSI.
+ *	RBS in best rate range has the second high level RSSI.
+ *	MBS not in best rate range has the third high level RSSI.
+ *	RBS not in best rate range has the fourth high level RSSI.
+ */
+static int8_t
+ieee80211_calcu_extwds_node_rssi(struct ieee80211com *ic,
+	const struct ieee80211_scan_entry *se)
+{
+	int8_t rssi = se->se_rssi;
+	if (se->se_ext_role == IEEE80211_EXTENDER_ROLE_NONE)
+		return rssi;
+
+	if (se->se_ext_role == IEEE80211_EXTENDER_ROLE_MBS) {
+		if (rssi >= ic->ic_extender_mbs_best_rssi) {
+			rssi = IEEE80211_EXTWDS_MBS_BEST_RATE_RSSI;
+		} else {
+			rssi = (rssi - IEEE80211_EXTWDS_MIN_PSEUDO_RSSI) *
+				IEEE80211_EXTWDS_BEST_RATE_BDRY_RSSI /
+				(ic->ic_extender_mbs_best_rssi -
+					IEEE80211_EXTWDS_MIN_PSEUDO_RSSI) *
+				ic->ic_extender_mbs_wgt / 10;
+		}
+	} else if (se->se_ext_role == IEEE80211_EXTENDER_ROLE_RBS) {
+		if (rssi >= ic->ic_extender_rbs_best_rssi) {
+			rssi = (rssi - ic->ic_extender_rbs_best_rssi) *
+				(IEEE80211_EXTWDS_MAX_PSEUDO_RSSI -
+					IEEE80211_EXTWDS_BEST_RATE_BDRY_RSSI) /
+				(IEEE80211_EXTWDS_MAX_PSEUDO_RSSI -
+					ic->ic_extender_rbs_best_rssi) +
+				IEEE80211_EXTWDS_BEST_RATE_BDRY_RSSI;
+		} else {
+			rssi = (rssi - IEEE80211_EXTWDS_MIN_PSEUDO_RSSI) *
+				IEEE80211_EXTWDS_BEST_RATE_BDRY_RSSI /
+				(ic->ic_extender_rbs_best_rssi -
+					IEEE80211_EXTWDS_MIN_PSEUDO_RSSI) *
+				ic->ic_extender_rbs_wgt / 10;
+		}
+	}
+
+	return rssi;
+}
+
+static int
+giwscan_cb(void *arg, const struct ieee80211_scan_entry *se)
+{
+	struct iwscanreq *req = arg;
+	struct ieee80211vap *vap = req->vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct iw_request_info *info = req->info;
+	char *current_ev = req->current_ev;
+	char *end_buf = req->end_buf;
+	char *last_ev;
+#define MAX_IE_LENGTH 257
+	char buf[MAX_IE_LENGTH];
+#ifndef IWEVGENIE
+	static const char rsn_leader[] = IEEE80211_IE_LEADER_STR_RSN;
+	static const char wpa_leader[] = IEEE80211_IE_LEADER_STR_WPA;
+#endif
+	struct iw_event iwe;
+	char *current_val;
+	int j;
+	u_int8_t chan_mode = 0;
+	uint8_t sgi = 0;
+	u_int8_t k, r;
+	u_int16_t mask;
+	struct ieee80211_ie_htcap *htcap;
+	struct ieee80211_ie_vhtcap *vhtcap;
+	int rate_ie_exist = 0;
+
+	if (current_ev >= end_buf)
+		return E2BIG;
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = SIOCGIWAP;
+	iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+		IEEE80211_ADDR_COPY(iwe.u.ap_addr.sa_data, se->se_macaddr);
+	else
+		IEEE80211_ADDR_COPY(iwe.u.ap_addr.sa_data, se->se_bssid);
+	current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_ADDR_LEN);
+
+	/* We ran out of space in the buffer. */
+	if (last_ev == current_ev)
+	  return E2BIG;
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = SIOCGIWESSID;
+	iwe.u.data.flags = 1;
+	iwe.u.data.length = se->se_ssid[1];
+	current_ev = iwe_stream_add_point(info, current_ev,
+					  end_buf, &iwe, (char *)se->se_ssid + 2);
+
+	/* We ran out of space in the buffer. */
+	if (last_ev == current_ev)
+	  return E2BIG;
+
+	if (se->se_capinfo & (IEEE80211_CAPINFO_ESS|IEEE80211_CAPINFO_IBSS)) {
+		memset(&iwe, 0, sizeof(iwe));
+		last_ev = current_ev;
+		iwe.cmd = SIOCGIWMODE;
+		iwe.u.mode = se->se_capinfo & IEEE80211_CAPINFO_ESS ?
+			IW_MODE_MASTER : IW_MODE_ADHOC;
+		current_ev = iwe_stream_add_event(info, current_ev,
+			end_buf, &iwe, IW_EV_UINT_LEN);
+
+		/* We ran out of space in the buffer. */
+		if (last_ev == current_ev)
+		  return E2BIG;
+	}
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = SIOCGIWFREQ;
+	iwe.u.freq.m = se->se_chan->ic_freq * 100000;
+	iwe.u.freq.e = 1;
+	current_ev = iwe_stream_add_event(info, current_ev,
+		end_buf, &iwe, IW_EV_FREQ_LEN);
+
+	/* We ran out of space in the buffer. */
+	if (last_ev == current_ev)
+	  return E2BIG;
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = IWEVQUAL;
+	set_quality(&iwe.u.qual, se->se_rssi, QNT_DEFAULT_NOISE);
+	/*
+	 * Assign the real RSSI to 'level' for MBS/RBS, so wpa_supplicant
+	 * can run roaming between MBS and RBS base on the 'level' value.
+	 */
+	if (se->se_ext_role != IEEE80211_EXTENDER_ROLE_NONE) {
+		iwe.u.qual.qual = ieee80211_calcu_extwds_node_rssi(ic, se);
+		iwe.u.qual.level = iwe.u.qual.qual - IEEE80211_PSEUDO_RSSI_TRANSITON_FACTOR;
+	}
+	current_ev = iwe_stream_add_event(info, current_ev,
+		end_buf, &iwe, IW_EV_QUAL_LEN);
+
+	/* We ran out of space in the buffer */
+	if (last_ev == current_ev)
+	  return E2BIG;
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = SIOCGIWENCODE;
+	if (se->se_capinfo & IEEE80211_CAPINFO_PRIVACY)
+		iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+	else
+		iwe.u.data.flags = IW_ENCODE_DISABLED;
+	iwe.u.data.length = 0;
+	current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, "");
+
+	/* We ran out of space in the buffer. */
+	if (last_ev == current_ev)
+	  return E2BIG;
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = SIOCGIWRATE;
+	current_val = current_ev + IW_EV_LCP_LEN;
+	/* NB: not sorted, does it matter? */
+	for (j = 0; j < se->se_rates[1]; j++) {
+		int r = se->se_rates[2 + j] & IEEE80211_RATE_VAL;
+		if (r != 0) {
+			iwe.u.bitrate.value = r * (1000000 / 2);
+			current_val = iwe_stream_add_value(info, current_ev,
+				current_val, end_buf, &iwe,
+				IW_EV_PARAM_LEN);
+			rate_ie_exist++;
+		}
+	}
+	for (j = 0; j < se->se_xrates[1]; j++) {
+		int r = se->se_xrates[2+j] & IEEE80211_RATE_VAL;
+		if (r != 0) {
+			iwe.u.bitrate.value = r * (1000000 / 2);
+			current_val = iwe_stream_add_value(info, current_ev,
+				current_val, end_buf, &iwe,
+				IW_EV_PARAM_LEN);
+			rate_ie_exist++;
+		}
+	}
+
+	htcap = (struct ieee80211_ie_htcap *)se->se_htcap_ie;
+	if (htcap) {
+		r = 0;
+		if (htcap->hc_cap[0] & IEEE80211_HTCAP_C_CHWIDTH40) {
+			chan_mode = 1;
+			sgi = htcap->hc_cap[0] & IEEE80211_HTCAP_C_SHORTGI40 ? 1 : 0;
+		} else {
+			chan_mode = 0;
+			sgi = htcap->hc_cap[0] & IEEE80211_HTCAP_C_SHORTGI20 ? 1 : 0;
+		}
+		for (j = IEEE80211_HT_MCSSET_20_40_NSS1; j <= IEEE80211_HT_MCSSET_20_40_NSS4; j++) {
+			mask = 1;
+			for (k = 0; k < 8; k++, r++) {
+				if (htcap->hc_mcsset[j] & mask) {
+					/* Copy HT rates */
+					iwe.u.bitrate.value = ieee80211_mcs2rate(r, chan_mode, sgi, 0) * (1000000 / 2);
+					current_val = iwe_stream_add_value(info,
+							current_ev,
+							current_val,
+							end_buf,
+							&iwe,
+							IW_EV_PARAM_LEN);
+					rate_ie_exist++;
+				}
+				mask = mask << 1;
+			}
+		}
+	}
+
+	vhtcap = (struct ieee80211_ie_vhtcap *)se->se_vhtcap_ie;
+	if (vhtcap) {
+		u_int16_t mcsmap = 0;
+		r = 0;
+		/* 80+80 or 160 Mhz */
+		if (IEEE80211_VHTCAP_GET_CHANWIDTH(vhtcap)) {
+			chan_mode = 1;
+			sgi = IEEE80211_VHTCAP_GET_SGI_160MHZ(vhtcap);
+		} else {
+			chan_mode = 0;
+			sgi = IEEE80211_VHTCAP_GET_SGI_80MHZ(vhtcap);
+		}
+		mask = 0x3;
+		mcsmap = (u_int16_t)IEEE80211_VHTCAP_GET_TX_MCS_NSS(vhtcap);
+		for (k = 0; k < 8; k++) {
+			if ((mcsmap & mask) != mask) {
+				int m;
+				int val = (mcsmap & mask)>>(k * 2);
+				r = (val == 2) ? 9: (val == 1) ? 8 : 7;
+				/* Copy HT rates */
+				for (m = 0; m <= r; m++) {
+					iwe.u.bitrate.value =
+						(ieee80211_mcs2rate(m, chan_mode, sgi, 1)
+						* (1000000 / 2)) * (k+1);
+					current_val = iwe_stream_add_value(info,
+						current_ev,
+						current_val,
+						end_buf,
+						&iwe,
+						IW_EV_PARAM_LEN);
+					rate_ie_exist++;
+				}
+				mask = mask << 2;
+			} else {
+				break;
+			}
+		}
+	}
+
+	/* remove fixed header if no rates were added */
+	if ((current_val - current_ev) > IW_EV_LCP_LEN) {
+		current_ev = current_val;
+	} else {
+	  /* We ran out of space in the buffer. */
+	  if (last_ev == current_ev && rate_ie_exist)
+	    return E2BIG;
+	}
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = IWEVCUSTOM;
+	snprintf(buf, sizeof(buf), "bcn_int=%d", se->se_intval);
+	iwe.u.data.length = strlen(buf);
+	current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, buf);
+
+	/* We ran out of space in the buffer. */
+	if (last_ev == current_ev)
+	  return E2BIG;
+
+	memset(&iwe, 0, sizeof(iwe));
+	memset(buf, 0, sizeof(buf));
+	last_ev = current_ev;
+	iwe.cmd = IWEVCUSTOM;
+	snprintf(buf, sizeof(buf) - 1, IEEE80211_IE_LEADER_STR_EXT_ROLE"%d", se->se_ext_role);
+	iwe.u.data.length = strlen(buf) + 1;
+	current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, buf);
+
+	/* We ran out of space in the buffer. */
+	if (last_ev == current_ev)
+		return E2BIG;
+
+	if (se->se_rsn_ie != NULL) {
+	  last_ev = current_ev;
+#ifdef IWEVGENIE
+		memset(&iwe, 0, sizeof(iwe));
+		if ((se->se_rsn_ie[1] + 2) > MAX_IE_LENGTH)
+			return E2BIG;
+		memcpy(buf, se->se_rsn_ie, se->se_rsn_ie[1] + 2);
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = se->se_rsn_ie[1] + 2;
+#else
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVCUSTOM;
+		if (se->se_rsn_ie[0] == IEEE80211_ELEMID_RSN)
+			iwe.u.data.length = encode_ie(buf, sizeof(buf),
+				se->se_rsn_ie, se->se_rsn_ie[1] + 2,
+				rsn_leader, sizeof(rsn_leader) - 1);
+#endif
+		if (iwe.u.data.length != 0) {
+			current_ev = iwe_stream_add_point(info,
+					current_ev, end_buf, &iwe, buf);
+
+			/* We ran out of space in the buffer */
+			if (last_ev == current_ev)
+			  return E2BIG;
+		}
+	}
+
+	if (se->se_wpa_ie != NULL) {
+	  last_ev = current_ev;
+#ifdef IWEVGENIE
+		memset(&iwe, 0, sizeof(iwe));
+		if ((se->se_wpa_ie[1] + 2) > MAX_IE_LENGTH)
+			return E2BIG;
+		memcpy(buf, se->se_wpa_ie, se->se_wpa_ie[1] + 2);
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = se->se_wpa_ie[1] + 2;
+#else
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVCUSTOM;
+		iwe.u.data.length = encode_ie(buf, sizeof(buf),
+			se->se_wpa_ie, se->se_wpa_ie[1] + 2,
+			wpa_leader, sizeof(wpa_leader) - 1);
+#endif
+		if (iwe.u.data.length != 0) {
+			current_ev = iwe_stream_add_point(info, current_ev,
+					end_buf, &iwe, buf);
+
+			/* We ran out of space in the buffer. */
+			if (last_ev == current_ev)
+			  return E2BIG;
+		}
+	}
+
+	if (se->se_wme_ie != NULL) {
+		static const char wme_leader[] = IEEE80211_IE_LEADER_STR_WME;
+
+		memset(&iwe, 0, sizeof(iwe));
+		last_ev = current_ev;
+		iwe.cmd = IWEVCUSTOM;
+		iwe.u.data.length = encode_ie(buf, sizeof(buf),
+			se->se_wme_ie, se->se_wme_ie[1] + 2,
+			wme_leader, sizeof(wme_leader) - 1);
+		if (iwe.u.data.length != 0) {
+			current_ev = iwe_stream_add_point(info, current_ev,
+					end_buf, &iwe, buf);
+
+			/* We ran out of space in the buffer. */
+			if (last_ev == current_ev)
+			  return E2BIG;
+		}
+	}
+	if (se->se_wsc_ie != NULL) {
+		last_ev = current_ev;
+#ifdef IWEVGENIE
+		memset(&iwe, 0, sizeof(iwe));
+		if ((se->se_wsc_ie[1] + 2) > sizeof(buf))
+			return E2BIG;
+		memcpy(buf, se->se_wsc_ie, se->se_wsc_ie[1] + 2);
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = se->se_wsc_ie[1] + 2;
+#endif
+		if (iwe.u.data.length != 0) {
+			current_ev = iwe_stream_add_point(info, current_ev,
+					end_buf, &iwe, buf);
+
+			/* We ran out of space in the buffer. */
+			if (last_ev == current_ev)
+			  return E2BIG;
+		}
+	}
+	if (se->se_ath_ie != NULL) {
+		static const char ath_leader[] = IEEE80211_IE_LEADER_STR_ATH;
+
+		memset(&iwe, 0, sizeof(iwe));
+		last_ev = current_ev;
+		iwe.cmd = IWEVCUSTOM;
+		iwe.u.data.length = encode_ie(buf, sizeof(buf),
+			se->se_ath_ie, se->se_ath_ie[1] + 2,
+			ath_leader, sizeof(ath_leader) - 1);
+		if (iwe.u.data.length != 0) {
+			current_ev = iwe_stream_add_point(info, current_ev,
+					end_buf, &iwe, buf);
+
+			/* We ran out of space in the buffer. */
+			if (last_ev == current_ev)
+			  return E2BIG;
+		}
+	}
+
+	if (se->se_htcap_ie != NULL) {
+		static const char htcap_leader[] = IEEE80211_IE_LEADER_STR_HTCAP;
+
+		memset(&iwe, 0, sizeof(iwe));
+		last_ev = current_ev;
+		iwe.cmd = IWEVCUSTOM;
+		iwe.u.data.length = encode_ie(buf, sizeof(buf),
+			se->se_htcap_ie, se->se_htcap_ie[1] + 2,
+			htcap_leader, sizeof(htcap_leader) - 1);
+		if (iwe.u.data.length != 0) {
+			current_ev = iwe_stream_add_point(info, current_ev,
+					end_buf, &iwe, buf);
+
+			/* We ran out of space in the buffer. */
+			if (last_ev == current_ev)
+			  return E2BIG;
+		}
+	}
+
+	if (se->se_vhtcap_ie != NULL) {
+		static const char vhtcap_leader[] = IEEE80211_IE_LEADER_STR_VHTCAP;
+
+		memset(&iwe, 0, sizeof(iwe));
+		last_ev = current_ev;
+		iwe.cmd = IWEVCUSTOM;
+		iwe.u.data.length = encode_ie(buf, sizeof(buf),
+			se->se_vhtcap_ie, se->se_vhtcap_ie[1] + 2,
+			vhtcap_leader, sizeof(vhtcap_leader) - 1);
+		if (iwe.u.data.length != 0) {
+			current_ev = iwe_stream_add_point(info, current_ev,
+					end_buf, &iwe, buf);
+
+			/* We ran out of space in the buffer. */
+			if (last_ev == current_ev)
+			  return E2BIG;
+		}
+	}
+
+	req->current_ev = current_ev;
+
+	return 0;
+}
+
+static qfdr_remote_giwscan_hook_t qfdr_remote_giwscan_hook = NULL;
+void ieee80211_register_qfdr_remote_giwscan_hook(qfdr_remote_giwscan_hook_t hook)
+{
+	qfdr_remote_giwscan_hook = hook;
+}
+EXPORT_SYMBOL(ieee80211_register_qfdr_remote_giwscan_hook);
+
+static int
+ieee80211_ioctl_giwscan(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct iwscanreq req;
+	int res = 0;
+	int is_remote_req = 0;
+	uint32_t *plen = NULL;
+	uint32_t buflen = data->length;
+
+	if (info->cmd & QFDR_REMOTE_CMD) {
+		is_remote_req = 1;
+		info->cmd &= ~QFDR_REMOTE_CMD;
+		plen = (uint32_t *)extra;
+		extra += sizeof(uint32_t);
+		buflen = *plen;
+	}
+
+	ieee80211_dump_scan_res(ic->ic_scan);
+
+	req.vap = vap;
+	req.current_ev = extra;
+	if (buflen == 0) {
+		req.end_buf = extra + IW_SCAN_MAX_DATA;
+	} else {
+		req.end_buf = extra + buflen;
+	}
+
+	/*
+	 * NB: This is no longer needed, as long as the caller supports
+	 * large scan results.
+	 *
+	 * Don't need do WPA/RSN sort any more since the original scan list
+	 * has been sorted.
+	 */
+	req.info = info;
+	res = ieee80211_scan_iterate(ic, giwscan_cb, &req);
+
+	if (is_remote_req) {
+		*plen = req.current_ev - extra;
+		return res;
+	}
+
+	if (res == 0 && qfdr_remote_giwscan_hook != NULL) {
+		res = qfdr_remote_giwscan_hook(&req);
+	}
+
+	data->length = req.current_ev - extra;
+
+	if (res != 0) {
+	  return -res;
+	}
+
+	return res;
+}
+
+struct qfdr_remote_aplist_rep *qfdr_giwscan_for_remote(struct qfdr_remote_aplist_req *remote_req)
+{
+	struct net_device *dev;
+	struct qfdr_remote_aplist_rep *rep;
+	char *extra;
+	struct iw_request_info *info;
+	struct iw_point data;
+
+	dev = dev_get_by_name(&init_net, remote_req->dev_name);
+	if (!dev)
+		return NULL;
+
+	rep = kmalloc(remote_req->extra_len + sizeof(struct qfdr_remote_aplist_rep), GFP_KERNEL);
+	if (!rep) {
+		printk(KERN_ERR "%s: Failed to alloc buf.\n", __func__);
+		dev_put(dev);
+		return NULL;
+	}
+
+	rep->length = remote_req->extra_len;
+	extra = (char *)&rep->length;
+
+	info = &remote_req->info;
+	info->cmd = SIOCGIWSCAN | QFDR_REMOTE_CMD;
+
+	memset(&data, 0, sizeof(data));
+
+	rep->res = ieee80211_ioctl_giwscan(dev, info, &data, extra);
+	rep->type = QFDR_GIWSCAN;
+
+	dev_put(dev);
+	return rep;
+}
+EXPORT_SYMBOL(qfdr_giwscan_for_remote);
+#endif /* SIOCGIWSCAN */
+
+static int
+cipher2cap(int cipher)
+{
+	switch (cipher) {
+	case IEEE80211_CIPHER_WEP:	return IEEE80211_C_WEP;
+	case IEEE80211_CIPHER_AES_OCB:	return IEEE80211_C_AES;
+	case IEEE80211_CIPHER_AES_CCM:	return IEEE80211_C_AES_CCM;
+	case IEEE80211_CIPHER_CKIP:	return IEEE80211_C_CKIP;
+	case IEEE80211_CIPHER_TKIP:	return IEEE80211_C_TKIP;
+	}
+	return 0;
+}
+
+#define	IEEE80211_MODE_TURBO_STATIC_A	IEEE80211_MODE_MAX
+
+static int
+ieee80211_convert_mode(const char *mode)
+{
+#define TOUPPER(c) ((((c) > 0x60) && ((c) < 0x7b)) ? ((c) - 0x20) : (c))
+	static const struct {
+		char *name;
+		int mode;
+	} mappings[] = {
+		/* NB: need to order longest strings first for overlaps */
+		{ "11AST" , IEEE80211_MODE_TURBO_STATIC_A },
+		{ "AUTO"  , IEEE80211_MODE_AUTO },
+		{ "11A"   , IEEE80211_MODE_11A },
+		{ "11B"   , IEEE80211_MODE_11B },
+		{ "11G"   , IEEE80211_MODE_11G },
+		{ "11NG"   , IEEE80211_MODE_11NG },
+		{ "11NG40"   , IEEE80211_MODE_11NG_HT40PM},
+		{ "11NA"   , IEEE80211_MODE_11NA },
+		{ "11NA40"   , IEEE80211_MODE_11NA_HT40PM},
+		{ "11AC"   , IEEE80211_MODE_11AC_VHT20PM},
+		{ "11AC40"   , IEEE80211_MODE_11AC_VHT40PM},
+		{ "11AC80"   , IEEE80211_MODE_11AC_VHT80PM},
+		{ "11AC160"   , IEEE80211_MODE_11AC_VHT160PM},
+		{ "FH"    , IEEE80211_MODE_FH },
+		{ "0"     , IEEE80211_MODE_AUTO },
+		{ "1"     , IEEE80211_MODE_11A },
+		{ "2"     , IEEE80211_MODE_11B },
+		{ "3"     , IEEE80211_MODE_11G },
+		{ "4"     , IEEE80211_MODE_FH },
+		{ "5"     , IEEE80211_MODE_TURBO_STATIC_A },
+		{ "11AC80EDGE+", IEEE80211_MODE_11AC_VHT80PM},
+		{ "11AC80CNTR+", IEEE80211_MODE_11AC_VHT80PM},
+		{ "11AC80CNTR-", IEEE80211_MODE_11AC_VHT80PM},
+		{ "11AC80EDGE-", IEEE80211_MODE_11AC_VHT80PM},
+		{ "11ACONLY",	IEEE80211_MODE_11AC_VHT20PM},
+		{ "11ACONLY40",	IEEE80211_MODE_11AC_VHT40PM},
+		{ "11ACONLY80",	IEEE80211_MODE_11AC_VHT80PM},
+		{ "11NONLY",	IEEE80211_MODE_11NA},
+		{ "11NONLY40",	IEEE80211_MODE_11NA_HT40PM},
+		{ NULL }
+	};
+	int i, j;
+	const char *cp;
+
+	for (i = 0; mappings[i].name != NULL; i++) {
+		cp = mappings[i].name;
+		for (j = 0; j < strlen(mode) + 1; j++) {
+			/* convert user-specified string to upper case */
+			if (TOUPPER(mode[j]) != cp[j])
+				break;
+			if (cp[j] == '\0')
+				return mappings[i].mode;
+		}
+	}
+	return -1;
+#undef TOUPPER
+}
+
+static int
+ieee80211_ioctl_postevent(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *wri, char *extra)
+{
+	char s[256];
+	static const char *tag = QEVT_COMMON_PREFIX;
+	memset(s, 0, sizeof(s));
+	if (wri->length > sizeof(s))		/* silently truncate */
+		wri->length = sizeof(s);
+	if (copy_from_user(s, wri->pointer, wri->length))
+		return -EINVAL;
+	s[sizeof(s)-1] = '\0';			/* ensure null termination */
+
+	/* We demux - one message is "WPA-PORT-ENABLE", the rest should be pushed
+	 * via wireless_send_event.
+	 */
+	if (!strncmp(s, "WPA-PORT-ENABLE", sizeof("WPA-PORT-ENABLE")-1)) {
+		struct ieee80211vap *vap = netdev_priv(dev);
+		struct ieee80211_node *ni = vap->iv_bss;
+		if (ni) {
+			_ieee80211_node_authorize(ni);
+		}
+	} else {
+		/* In this case, we assume the message from userspace has the correct format */
+		ieee80211_eventf(dev, "%s%s", tag, s);
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_txeapol(struct net_device *dev, struct iw_request_info *info,
+        struct iw_point *wri, char *extra)
+{
+	char *buf;
+
+	buf = ieee80211_malloc(wri->length, M_NOWAIT);
+	if (buf == NULL)
+		return -EINVAL;
+
+	if (copy_from_user(buf, wri->pointer, wri->length)) {
+		ieee80211_free(buf);
+		return -EINVAL;
+	}
+
+	ieee80211_eap_output(dev, buf, wri->length);
+
+	ieee80211_free(buf);
+
+	return 0;
+}
+
+/*
+ * Blacklist a station that is in the hostapd MAC filtering list.
+ */
+static void
+ieee80211_blacklist_add(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (vap->iv_blacklist_timeout > 0) {
+		ni->ni_blacklist_timeout = jiffies + vap->iv_blacklist_timeout;
+		/* Corner case - can't use zero! */
+		if (ni->ni_blacklist_timeout == 0) {
+		    ni->ni_blacklist_timeout = 1;
+		}
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+			"[%s] blacklisted\n",
+			ether_sprintf(ni->ni_macaddr));
+	}
+}
+
+static void
+ieee80211_wnm_btm_send_bss_termination(struct ieee80211_node *ni,
+				uint8_t mode,int size, uint8_t *list)
+{
+	struct ieee80211_ie_btm_bss_termdur term;
+	struct ieee80211com *ic = ni->ni_ic;
+	term.subelem_id = WNM_NEIGHBOR_BTM_TERMINATION_DURATION;
+	term.length = sizeof(term) - 2;
+	term.duration = WNM_BTM_BSS_TERMINATION_DURATION;
+	ic->ic_get_tsf(&term.bss_term_tsf);
+
+	if (size > 0)
+		mode |= BTM_REQ_PREF_CAND_LIST_INCLUDED;
+
+	if (ieee80211_send_wnm_bss_tm_solicited_req(ni, mode, 0,
+						WNM_BTM_DEFAULT_VAL_INTVAL,
+						(const uint8_t *)&term, NULL, list,
+						size,
+						0))
+			IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ACTION,
+					"WNM: Failed to send BSS termination BTM request %pM\n",
+					ni->ni_macaddr);
+
+}
+
+static void
+ieee80211_domlme(void *arg, struct ieee80211_node *ni)
+{
+	struct ieee80211req_mlme *mlme = arg;
+
+	if ((mlme->im_op != IEEE80211_MLME_DEBUG_CLEAR) &&
+		(ni->ni_associd != 0)) {
+		/* This status is only used internally, for blacklisting */
+		if (mlme->im_reason == IEEE80211_STATUS_DENIED) {
+			ieee80211_blacklist_add(ni);
+			mlme->im_reason = IEEE80211_REASON_UNSPECIFIED;
+		}
+
+		IEEE80211_SEND_MGMT(ni,
+			mlme->im_op == IEEE80211_MLME_DEAUTH ?
+				IEEE80211_FC0_SUBTYPE_DEAUTH :
+				IEEE80211_FC0_SUBTYPE_DISASSOC,
+			mlme->im_reason);
+
+		/*
+		 * Ensure that the deauth/disassoc frame is sent
+		 * before the node is deleted.
+		 */
+		if (mlme->im_reason == IEEE80211_REASON_MIC_FAILURE)
+			ieee80211_safe_wait_ms(150, !in_interrupt());
+	}
+	if (!(IEEE80211_ADDR_EQ(ni->ni_macaddr, ni->ni_bssid))) {
+		if (ni->ni_wnm_capability & IEEE80211_NODE_WNM_BTM_CAPABLE) {
+			uint8_t mode = BTM_REQ_ABRIDGED | BTM_REQ_BSS_TERMINATION_INCLUDED;
+			int nsize = 0;
+			uint8_t *neigh_repos = NULL;
+
+			nsize = ieee80211_wnm_btm_create_pref_candidate_list(ni, &neigh_repos);
+			if (nsize == 0)
+				mode &= ~BTM_REQ_PREF_CAND_LIST_INCLUDED;
+			ieee80211_wnm_btm_send_bss_termination(ni, mode, nsize, neigh_repos);
+			ieee80211_safe_wait_ms(150, !in_interrupt());
+			kfree(neigh_repos);
+		}
+		ieee80211_node_leave(ni);
+	}
+}
+
+/**
+ * Common routine to force reassociation due to fundamental changes in config.
+ */
+void
+ieee80211_wireless_reassoc(struct ieee80211vap *vap, int debug, int rescan)
+{
+	struct net_device *dev = vap->iv_dev;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211req_mlme mlme;
+
+	if (vap->iv_state == IEEE80211_S_INIT)
+	{
+		return;
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
+		"%s Forcing reassociation\n", __func__);
+	switch (vap->iv_opmode) {
+		case IEEE80211_M_STA:
+			if (rescan)
+			{
+				ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+			}
+			else
+			{
+				ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
+			}
+			break;
+		case IEEE80211_M_HOSTAP:
+			if (vap->iv_state == IEEE80211_S_RUN) {
+				ic->ic_beacon_update(vap);
+				if (debug)
+				{
+					mlme.im_op = IEEE80211_MLME_DEBUG_CLEAR;
+					mlme.im_reason = IEEE80211_REASON_UNSPECIFIED;
+				}
+				else
+				{
+					mlme.im_op = IEEE80211_MLME_DISASSOC;
+					mlme.im_reason = IEEE80211_REASON_UNSPECIFIED;
+				}
+				ieee80211_iterate_dev_nodes(dev, &ic->ic_sta, ieee80211_domlme, &mlme, 1);
+			}
+			break;
+		default:
+			break;
+	}
+}
+
+static void ieee80211_wireless_reassoc_all_vaps(struct ieee80211com *ic)
+{
+	struct ieee80211vap *tmp_vap;
+
+	TAILQ_FOREACH(tmp_vap, &ic->ic_vaps, iv_next) {
+		if (tmp_vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			ieee80211_wireless_reassoc(tmp_vap, 0, 0);
+		}
+	}
+}
+
+static struct ieee80211_channel *
+find_alt_primary_chan_11ac80(struct ieee80211com *ic, char *mode)
+{
+	struct ieee80211_channel *c = NULL;
+	int chan_offset = 0;
+	int chan;
+
+	if (!strncasecmp(mode ,"11ac80Edge+", strlen(mode))) {
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_LL)
+			chan_offset = 0;
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_LU)
+			chan_offset = -1;
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_UL)
+			chan_offset = -2;
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_UU)
+			chan_offset = -3;
+	} else if (!strncasecmp(mode ,"11ac80Cntr+", strlen(mode))) {
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_LL)
+			chan_offset = 1;
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_LU)
+			chan_offset = 0;
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_UL)
+			chan_offset = -1;
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_UU)
+			chan_offset = -2;
+	} else if (!strncasecmp(mode ,"11ac80Cntr-", strlen(mode))) {
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_LL)
+			chan_offset = 2;
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_LU)
+			chan_offset = 1;
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_UL)
+			chan_offset = 0;
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_UU)
+			chan_offset = -1;
+	} else if (!strncasecmp(mode ,"11ac80Edge-", strlen(mode))) {
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_LL)
+			chan_offset = 3;
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_LU)
+			chan_offset = 2;
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_UL)
+			chan_offset = 1;
+		if (ic->ic_curchan->ic_ext_flags & IEEE80211_CHAN_VHT80_UU)
+			chan_offset = 0;
+	}
+
+	chan = ic->ic_curchan->ic_ieee + (4 * chan_offset);
+	if (chan >= IEEE80211_DEFAULT_5_GHZ_CHANNEL) {
+		c = findchannel_any(ic, chan, ic->ic_curmode);
+	}
+	return c;
+}
+
+static void
+update_sta_profile(char *s, struct ieee80211vap *vap)
+{
+	if (strcasecmp(s, "11b") == 0) {
+		vap->iv_2_4ghz_prof.phy_mode = IEEE80211_MODE_11B;
+	} else if (strcasecmp(s, "11a") == 0) {
+		vap->iv_5ghz_prof.phy_mode = IEEE80211_MODE_11A;
+	} else if (strcasecmp(s, "11g") == 0) {
+		vap->iv_2_4ghz_prof.phy_mode = IEEE80211_MODE_11G;
+	} else if (strcasecmp(s, "11ng") == 0) {
+		vap->iv_2_4ghz_prof.phy_mode = IEEE80211_MODE_11NG;
+	} else if (strcasecmp(s, "11ng40") == 0) {
+		vap->iv_2_4ghz_prof.phy_mode = IEEE80211_MODE_11NG_HT40PM;
+	} else if (strcasecmp(s, "11na") == 0) {
+		vap->iv_5ghz_prof.phy_mode = IEEE80211_MODE_11NA;
+	} else if (strcasecmp(s, "11na40") == 0) {
+		vap->iv_5ghz_prof.phy_mode = IEEE80211_MODE_11NA_HT40PM;
+	} else if ((strcasecmp(s, "11ac") == 0 ) || (strcasecmp(s, "11acOnly") == 0)) {
+		vap->iv_5ghz_prof.phy_mode = IEEE80211_MODE_11AC_VHT20PM;
+	} else if ((strcasecmp(s, "11ac40") == 0) || (strcasecmp(s, "11acOnly40") == 0)) {
+		vap->iv_5ghz_prof.phy_mode = IEEE80211_MODE_11AC_VHT40PM;
+	} else if ((strcasecmp(s, "11ac80") == 0) || (strcasecmp(s, "11acOnly80") == 0) ||
+		(strcasecmp(s, "11ac80Edge+") == 0) || (strcasecmp(s, "11ac80Edge-") == 0) ||
+		(strcasecmp(s, "11ac80Cntr+") == 0) || (strcasecmp(s, "11ac80Cntr-") == 0)) {
+		vap->iv_5ghz_prof.phy_mode = IEEE80211_MODE_11AC_VHT80PM;
+	} else {
+		printk(KERN_INFO "In DBS mode - %s is not correct\n", s);
+	}
+}
+
+static int
+ieee80211_get_bw_from_phymode(int mode)
+{
+	int bw;
+
+	if (mode == IEEE80211_MODE_11AC_VHT160PM)
+		bw = BW_HT160;
+	else if (mode == IEEE80211_MODE_11AC_VHT80PM)
+		bw = BW_HT80;
+	else if ((mode == IEEE80211_MODE_11AC_VHT40PM) ||
+			(mode == IEEE80211_MODE_11NG_HT40PM) ||
+			(mode == IEEE80211_MODE_11NA_HT40PM))
+		bw = BW_HT40;
+	else
+		bw = BW_HT20;
+
+	return bw;
+}
+
+static void
+ieee80211_update_chanlist_from_bw(struct ieee80211com *ic, int bw)
+{
+	struct ieee80211_channel *chan;
+	int idx_bw;
+	int i;
+
+	/* Reset the active channel list as per the bw selected */
+	ieee80211_update_active_chanlist(ic, bw);
+
+	/* If region = none do not reconfigure the tx_power for the bw set */
+	if (ic->ic_country_code == CTRY_DEFAULT)
+		return;
+
+	/* Update ic_maxpower since bw is changed */
+	switch(bw) {
+	case BW_HT80:
+		idx_bw = PWR_IDX_80M;
+		break;
+	case BW_HT40:
+		idx_bw = PWR_IDX_40M;
+		break;
+	case BW_HT20:
+		idx_bw = PWR_IDX_20M;
+		break;
+	default:
+		printk("unsupported bw: %u\n", bw);
+		return;
+	}
+
+	for (i = 0; i < ic->ic_nchans; i++) {
+		chan = &ic->ic_channels[i];
+		if (!isset(ic->ic_chan_active, chan->ic_ieee))
+			continue;
+
+		chan->ic_maxpower = chan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][idx_bw];
+		chan->ic_maxpower_normal = chan->ic_maxpower;
+	}
+}
+
+static void
+ieee80211_update_bw_from_phymode(struct ieee80211vap *vap, int mode)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	int bw = ieee80211_get_bw_from_phymode(mode);
+
+	ic->ic_max_system_bw = bw;
+	ieee80211_update_bw_capa(vap, bw);
+	ieee80211_update_chanlist_from_bw(ic, bw);
+}
+
+
+static int
+ieee80211_ioctl_setmode(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *wri, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ifreq ifr;
+	char s[12];		/* big enough for "11ac80Edge+ */
+	int retv;
+	int mode;
+	int ifr_mode;
+	int aggr = 1;
+
+	if (ic->ic_media.ifm_cur == NULL)
+		return -EINVAL;
+	if (wri->length > sizeof(s))		/* silently truncate */
+		wri->length = sizeof(s);
+	if (copy_from_user(s, wri->pointer, wri->length))
+		return -EINVAL;
+
+	/* ensure null termination */
+	s[sizeof(s)-1] = '\0';
+	mode = ieee80211_convert_mode(s);
+	if (mode < 0)
+		return -EINVAL;
+
+	if (ic->fixed_legacy_rate_mode)
+		return -EINVAL;
+
+	/* update station profile */
+	if ((ic->ic_rf_chipid == CHIPID_DUAL) && (ic->ic_opmode == IEEE80211_M_STA))
+		update_sta_profile(s, vap);
+
+	if (((strcasecmp(s, "11ac") == 0) || (strcasecmp(s, "11acOnly") == 0) ||
+		(strcasecmp(s, "11acOnly40") == 0) ||(strcasecmp(s, "11acOnly80") == 0))
+		&& !ieee80211_swfeat_is_supported(SWFEAT_ID_VHT, 1))
+		return -EOPNOTSUPP;
+
+	if ((strcasecmp(s, "11acOnly") == 0) || (strcasecmp(s, "11acOnly40") == 0) ||
+		(strcasecmp(s, "11acOnly80") == 0)) {
+		vap->iv_11ac_and_11n_flag = IEEE80211_11AC_ONLY;
+	} else if ((strcasecmp(s, "11nOnly") == 0) || (strcasecmp(s, "11nOnly40") == 0)) {
+		vap->iv_11ac_and_11n_flag = IEEE80211_11N_ONLY;
+	} else {
+		vap->iv_11ac_and_11n_flag = 0;
+	}
+
+	/* In AP mode, redefining AUTO mode */
+	if (mode == IEEE80211_MODE_AUTO && ic->ic_opmode == IEEE80211_M_HOSTAP) {
+		if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
+			mode = IEEE80211_MODE_11NG;
+		else
+			mode = IEEE80211_MODE_11AC_VHT80PM;
+	}
+
+	retv = ieee80211_check_mode_consistency(ic, mode, ic->ic_curchan);
+	if (retv == -1) {
+		return -EINVAL;
+	} else if (retv == 1) {
+		/*
+		* Reset current channel with default channel when phy mode
+		* is not consistent with current channel for dual bands chip
+		*/
+		if (ic->ic_rf_chipid != CHIPID_DUAL) {
+			printk(KERN_INFO "mode - %s is not consistent with channel %d\n",
+						s, ic->ic_curchan->ic_ieee);
+			return -EOPNOTSUPP;
+		}
+
+		/* Send deauth frame before switching channel */
+		ieee80211_wireless_reassoc(vap, 0, 1);
+
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
+				ic->ic_des_chan = findchannel_any(ic,
+					IEEE80211_DEFAULT_2_4_GHZ_CHANNEL, mode);
+			else
+				ic->ic_des_chan = findchannel_any(ic,
+					IEEE80211_DEFAULT_5_GHZ_CHANNEL, mode);
+
+			ic->ic_curchan = ic->ic_des_chan;
+			ic->ic_csw_reason = IEEE80211_CSW_REASON_CONFIG;
+			ic->ic_set_channel(ic);
+		} else {
+			ic->ic_des_chan = IEEE80211_CHAN_ANYC;
+		}
+	}
+
+	ifr_mode = mode;
+	memset(&ifr, 0, sizeof(ifr));
+
+	if(vap->iv_media.ifm_cur == NULL)
+		return -EINVAL;
+
+	ifr.ifr_media = vap->iv_media.ifm_cur->ifm_media &~ IFM_MMASK;
+	if (mode == IEEE80211_MODE_TURBO_STATIC_A)
+		ifr_mode = IEEE80211_MODE_11A;
+	ifr.ifr_media |= IFM_MAKEMODE(ifr_mode);
+	/* We cannot call with the parent device, needs to be the VAP device */
+	retv = ifmedia_ioctl(vap->iv_dev, &ifr, &vap->iv_media, SIOCSIFMEDIA);
+	if (retv == -ENETRESET) {
+		/* Updating bandwidth base on mode */
+		ieee80211_update_bw_from_phymode(vap, mode);
+
+		/* Updating all mode related flags */
+		ic->ic_des_mode = ic->ic_phymode = mode;
+		ieee80211_setmode(ic, ic->ic_des_mode);
+
+		/* Switch channel according to Edge+/- Cntr +/- */
+		if (ic->ic_phymode == IEEE80211_MODE_11AC_VHT80PM) {
+			ic->ic_des_chan = find_alt_primary_chan_11ac80(ic, s);
+			if (!is_ieee80211_chan_valid(ic->ic_des_chan)) {
+				ic->ic_des_chan = ic->ic_curchan;
+				return -EINVAL;
+			}
+
+			ic->ic_curchan = ic->ic_des_chan;
+			ic->ic_csw_reason = IEEE80211_CSW_REASON_CONFIG;
+			ic->ic_set_channel(ic);
+		}
+
+		if ((vap->iv_opmode == IEEE80211_M_HOSTAP) && IS_UP_AUTO(vap))
+			ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+
+		if (ic->ic_curmode < IEEE80211_MODE_11NA)
+			aggr = 0;
+
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_PHY_MODE, ic->ic_phymode, NULL, 0);
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_TX_AMSDU, aggr, NULL, 0);
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_AGGREGATION, aggr, NULL, 0);
+
+		ieee80211_init_chanset_ranking_params(ic);
+		ieee80211_start_obss_scan_timer(vap);
+
+		ieee80211_wireless_reassoc(vap, 0, 1);
+		retv = 0;
+	}
+	return -retv;
+}
+
+void ieee80211_param_to_qdrv(struct ieee80211vap *vap,
+	int param, int value, unsigned char *data, int len)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = ieee80211_get_vap_node(vap);
+
+	if (ni == NULL) {
+		printk("no bss node: param %d\n", param);
+		return;
+	}
+
+	KASSERT(ni, ("no bss node"));
+
+	if (ic->ic_setparam != NULL) {
+		(*ic->ic_setparam)(ni, param, value, data, len);
+	}
+
+	ieee80211_free_node(ni);
+
+	return;
+}
+EXPORT_SYMBOL(ieee80211_param_to_qdrv);
+
+void ieee80211_param_from_qdrv(struct ieee80211vap *vap,
+	int param, int *value, unsigned char *data, int *len)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = ieee80211_get_vap_node(vap);
+
+	KASSERT(ni, ("no bss node"));
+
+	if (ic->ic_getparam != NULL) {
+		(*ic->ic_getparam)(ni, param, value, data, len);
+	}
+
+	ieee80211_free_node(ni);
+
+	return;
+}
+
+/* Function attached to the iwpriv wifi0 forcesmps call. */
+static int
+ieee80211_forcesmps(struct ieee80211vap *vap, int value)
+{
+	/* Ensure we only apply valid values to the local vap state */
+	short smps_mode = (short)value;
+	if (value == -1)
+	{
+		struct ieee80211_node *ni;
+		ni = ieee80211_find_node(&vap->iv_ic->ic_sta, vap->iv_myaddr);
+		if (ni == NULL) {
+			return 0;
+		}
+		printk("Clearing force SMPS mode in driver\n");
+		smps_mode = ni->ni_htcap.pwrsave;
+		ieee80211_free_node(ni);
+	}
+	if (smps_mode != IEEE80211_HTCAP_C_MIMOPWRSAVE_NA)
+	{
+		/* If we're a STA, send out an ACTION frame to change our SMPS mode */
+		if (vap->iv_opmode == IEEE80211_M_STA)
+		{
+			struct ieee80211_action_data act;
+			int action_byte = -1;
+			memset(&act, 0, sizeof(act));
+			act.cat = IEEE80211_ACTION_CAT_HT;
+			act.action = IEEE80211_ACTION_HT_MIMOPWRSAVE;
+			switch (smps_mode)
+			{
+				/* See 802.11n d11.0 section 7.3.1.22 for the formatting of the HT ACTION SMPS byte. */
+				case IEEE80211_HTCAP_C_MIMOPWRSAVE_STATIC:
+					action_byte = 0x1;
+					break;
+				case IEEE80211_HTCAP_C_MIMOPWRSAVE_DYNAMIC:
+					action_byte = 0x3;
+					break;
+				case IEEE80211_HTCAP_C_MIMOPWRSAVE_NONE:
+					action_byte = 0x0;
+					break;
+				default:
+					printf("Not sending ACTION frame\n");
+					break;
+			}
+			if (action_byte >= 0)
+			{
+				act.params = (int *)action_byte; //Contain the single byte for the SMPS action frame in the param field
+				printk("STA Sending HT Action frame to change PS mode (%02X->%02X)\n", vap->iv_smps_force & 0xFF, smps_mode);
+				if (value == -1)
+				{
+					vap->iv_smps_force &= ~0x8000;
+				}
+				else
+				{
+					vap->iv_smps_force = 0x8000 | smps_mode;
+				}
+				IEEE80211_SEND_MGMT(vap->iv_bss, IEEE80211_FC0_SUBTYPE_ACTION, (int)&act);
+			}
+		}
+	}
+	else
+	{
+		printk("Ignoring invalid SMPS mode (%04X) at WLAN driver\n", smps_mode);
+	}
+	return 1;
+}
+
+/*
+ * Check if a node is blacklisted.
+ * Returns 1 if true, else 0.
+ */
+int
+ieee80211_blacklist_check(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (ni->ni_blacklist_timeout == 0) {
+		return 0;
+	}
+
+	if (time_after(jiffies, ni->ni_blacklist_timeout)) {
+		ni->ni_blacklist_timeout = 0;
+		/* Remove blacklist entry from node table */
+		ieee80211_remove_node_blacklist_timeout(ni);
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+			"[%s] removed from blacklist\n",
+			ether_sprintf(ni->ni_macaddr));
+		return 0;
+	}
+
+	return 1;
+}
+EXPORT_SYMBOL(ieee80211_blacklist_check);
+
+/* Routine to clear existing BA agreements if necessary - used when the global BA control
+ * changes
+ */
+static void
+ieee80211_wireless_ba_change(struct ieee80211vap *vap)
+{
+	ieee80211_wireless_reassoc(vap, 0, 1);
+	/* FIXME: re-enable this code once DELBA is working properly. */
+#if 0
+	int i;
+	for (i = 0; i < 8; i++)
+	{
+		if ((vap->iv_ba_old_control & (1 << i)) && (!(vap->iv_ba_control & (1 << i))))
+		{
+			printk("Deleting block acks on TID %d\n", i);
+			switch (vap->iv_opmode)
+			{
+				case IEEE80211_M_HOSTAP:
+					struct ieee80211com *ic = vap->iv_ic;
+					struct net_device *dev = vap->iv_dev;
+					int tid_del = i;
+					/* Iterate through all STAs - if BA is established, delete it. */
+					ieee80211_iterate_dev_nodes(dev, &ic->ic_sta,
+						ieee80211_wireless_ba_del, &tid_del, 1);
+					break;
+
+				case IEEE80211_M_STA:
+					ieee80211_wireless_ba_del((void *)&i, vap->iv_bss);
+					break;
+
+				default:
+					break;
+			}
+		}
+	}
+#endif
+}
+
+void ieee80211_obss_scan_timer(unsigned long arg)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *)arg;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = vap->iv_bss;
+	int scanflags;
+
+	if (!IEEE80211_IS_11NG_40(ic) || !ic->ic_obss_scan_enable)
+		return;
+
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
+		if (!ic->ic_obss_scan_count) {
+			scanflags = IEEE80211_SCAN_NOPICK |
+				IEEE80211_SCAN_ONCE | IEEE80211_SCAN_ACTIVE;
+			mod_timer(&ic->ic_obss_timer,
+				jiffies + IEEE80211_OBSS_AP_SCAN_INT * HZ);
+			(void) ieee80211_start_scan(vap, scanflags,
+				IEEE80211_SCAN_FOREVER, 0, NULL);
+		}
+	} else if (ni && IEEE80211_AID(ni->ni_associd) &&
+			(ni->ni_obss_ie.param_id == IEEE80211_ELEMID_OBSS_SCAN)) {
+		scanflags = IEEE80211_SCAN_NOPICK | IEEE80211_SCAN_ACTIVE
+			| IEEE80211_SCAN_ONCE | IEEE80211_SCAN_OBSS;
+		mod_timer(&ic->ic_obss_timer,
+			jiffies + ni->ni_obss_ie.obss_trigger_interval * HZ);
+		(void) ieee80211_start_scan(vap, scanflags,
+			IEEE80211_SCAN_FOREVER, 0, NULL);
+	}
+}
+
+void ieee80211_start_obss_scan_timer(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (!IEEE80211_IS_11NG_40(ic) ||
+			!ic->ic_obss_scan_enable ||
+			!ic->ic_20_40_coex_enable)
+		return;
+
+	ic->ic_obss_scan_count = 0;
+	ic->ic_obss_timer.function = ieee80211_obss_scan_timer;
+	ic->ic_obss_timer.data = (unsigned long)vap;
+	mod_timer(&ic->ic_obss_timer, jiffies + IEEE80211_OBSS_AP_SCAN_INT * HZ);
+}
+
+uint8_t recalc_opmode(struct ieee80211_node *ni, uint8_t opmode)
+{
+	uint8_t max_bw = get_max_supported_chwidth(ni);
+	uint8_t bw = MS(opmode, IEEE80211_VHT_OPMODE_CHWIDTH);
+
+	if (bw > max_bw) {
+		opmode &= ~IEEE80211_VHT_OPMODE_CHWIDTH;
+		opmode |= SM(max_bw, IEEE80211_VHT_OPMODE_CHWIDTH);
+	}
+
+	return opmode;
+}
+
+void update_node_opmode(struct ieee80211com *ic, struct ieee80211_node *ni)
+{
+	int cur_opmode;
+	uint8_t opmode;
+
+	ieee80211_param_from_qdrv(ni->ni_vap, IEEE80211_PARAM_NODE_OPMODE, &cur_opmode, NULL, NULL);
+	ni->ni_chan = ni->ni_ic->ic_curchan;
+	opmode = recalc_opmode(ni, ni->ni_vhtop_notif_mode);
+	if (cur_opmode != opmode) {
+		ieee80211_param_to_qdrv(ni->ni_vap, IEEE80211_PARAM_NODE_OPMODE,
+				opmode, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+	}
+}
+
+static void update_node_opmodes(struct ieee80211com *ic)
+{
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211_node *ni;
+
+	IEEE80211_NODE_LOCK(nt);
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if(ni->ni_node_type == IEEE80211_NODE_TYPE_STA)
+			update_node_opmode(ic, ni);
+	}
+
+	IEEE80211_NODE_UNLOCK(nt);
+}
+
+int get_max_supported_chwidth(struct ieee80211_node *ni)
+{
+	struct ieee80211_channel *cur_chan = ni->ni_ic->ic_curchan;
+	u_int8_t ch = cur_chan->ic_ieee;
+
+	if (isclr(ni->ni_supp_chans, ch)) {
+		return -1;
+	}
+	/* Check for 80 MHz */
+	if (cur_chan->ic_flags & IEEE80211_CHAN_VHT80) {
+		if (cur_chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LL) {
+			if (	   isset(ni->ni_supp_chans, ch + 4)
+				&& isset(ni->ni_supp_chans, ch + 8)
+				&& isset(ni->ni_supp_chans, ch + 12)
+				) {
+				return IEEE80211_CWM_WIDTH80;
+			}
+		}
+		else if (cur_chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LU) {
+			if (	    isset(ni->ni_supp_chans, ch - 4)
+				&&  isset(ni->ni_supp_chans, ch + 4)
+				&&  isset(ni->ni_supp_chans, ch + 8)
+				) {
+				return IEEE80211_CWM_WIDTH80;
+			}
+		}
+		else if (cur_chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UL) {
+			if (	   isset(ni->ni_supp_chans, ch - 8)
+				&& isset(ni->ni_supp_chans, ch - 4)
+				&& isset(ni->ni_supp_chans, ch + 4)
+				) {
+				return IEEE80211_CWM_WIDTH80;
+			}
+		}
+		else if (cur_chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UU) {
+			if (	   isset(ni->ni_supp_chans, ch - 12)
+				&& isset(ni->ni_supp_chans, ch - 8)
+				&& isset(ni->ni_supp_chans, ch - 4)
+				) {
+				return IEEE80211_CWM_WIDTH80;
+			}
+		}
+	}
+
+	/* Check for 40 MHz */
+	if (cur_chan->ic_flags & IEEE80211_CHAN_VHT40) {
+		if (cur_chan->ic_flags & IEEE80211_CHAN_VHT40U) {
+			if (isset(ni->ni_supp_chans, ch + 4)) {
+				return IEEE80211_CWM_WIDTH40;
+			}
+		}
+		else if (cur_chan->ic_flags & IEEE80211_CHAN_VHT40D) {
+			if (isset(ni->ni_supp_chans, ch - 4)) {
+				return IEEE80211_CWM_WIDTH40;
+			}
+		}
+	}
+	return IEEE80211_CWM_WIDTH20;
+}
+
+void ieee80211_finish_csa(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *)arg;
+	struct ieee80211vap *vap;
+
+	/* clear DFS CAC state on previous channel */
+	if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+		ic->ic_bsschan->ic_freq != ic->ic_csa_chan->ic_freq &&
+		IEEE80211_IS_CHAN_CACDONE(ic->ic_bsschan)) {
+		/*
+		 * IEEE80211_CHAN_DFS_CAC_DONE indicates whether or not to do CAC afresh.
+		 * US   : IEEE80211_CHAN_DFS_CAC_DONE shall be cleared whenver we move to
+		 *        a different channel
+		 * ETSI : IEEE80211_CHAN_DFS_CAC_DONE shall be retained; Only event which
+		 *        would mark the channel as unusable is the radar indication
+		 */
+		if ((ic->ic_dfs_is_eu_region() == false) &&
+		   (ic->ic_chan_compare_equality(ic, ic->ic_bsschan, ic->ic_csa_chan) == false)) {
+			ic->ic_bsschan->ic_flags &= ~IEEE80211_CHAN_DFS_CAC_DONE;
+			if (ic->ic_mark_channel_dfs_cac_status) {
+				ic->ic_mark_channel_dfs_cac_status(ic, ic->ic_bsschan, IEEE80211_CHAN_DFS_CAC_DONE, false);
+				ic->ic_mark_channel_dfs_cac_status(ic, ic->ic_bsschan, IEEE80211_CHAN_DFS_CAC_IN_PROGRESS, false);
+			}
+			/* Mark the channel as not_available and ready for cac */
+			if (ic->ic_mark_channel_availability_status) {
+				ic->ic_mark_channel_availability_status(ic, ic->ic_bsschan,
+						IEEE80211_CHANNEL_STATUS_NOT_AVAILABLE_CAC_REQUIRED);
+			}
+			printk(KERN_DEBUG"ieee80211_finish_csa:"
+					"Clearing CAC_DONE Status for chan %d\n",
+					ic->ic_bsschan->ic_ieee);
+		}
+	}
+
+	ic->ic_prevchan = ic->ic_curchan;
+	ic->ic_curchan = ic->ic_csa_chan;
+	ic->ic_bsschan = ic->ic_csa_chan;
+	ic->ic_des_chan = ic->ic_csa_chan;
+	ic->ic_csa_count = 0;
+	update_node_opmodes(ic);
+
+	/* Remove the CSA IE from beacons and cause other field in beacon updated */
+	ic->ic_flags &= ~IEEE80211_F_CHANSWITCH;
+	ic->ic_set_channel(ic);
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			continue;
+
+		if ((vap->iv_state != IEEE80211_S_RUN) && (vap->iv_state != IEEE80211_S_SCAN))
+			continue;
+		ic->ic_beacon_update(vap);
+	}
+
+	/* record channel change event */
+	ic->ic_chan_switch_record(ic, ic->ic_csa_chan, ic->ic_csa_reason);
+	ic->ic_chan_switch_reason_record(ic, ic->ic_csa_reason);
+	return;
+}
+
+void ieee80211_csa_finish(struct work_struct *work)
+{
+	struct ieee80211com *ic = container_of(work, struct ieee80211com, csa_work);
+	struct ieee80211vap *vap;
+	unsigned long ret = 0;
+
+	while (completion_done(&ic->csa_completion)) {
+		try_wait_for_completion(&ic->csa_completion);
+		DBGPRINTF_W("Warning: CSA completion was done\n");
+	}
+
+	TAILQ_FOREACH (vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			continue;
+
+		if (vap->iv_state != IEEE80211_S_RUN)
+			continue;
+
+		/* Timeout indicates MuC has not responsed CSA request */
+		ret = wait_for_completion_interruptible_timeout(&ic->csa_completion,
+					msecs_to_jiffies((ic->ic_csa_count + 1) * ic->ic_lintval));
+		if (ret == 0) {
+			DBGPRINTF_W("Warning: timeout occurs when LHOST waits for MuC CSA completion!\n");
+			break;
+		}
+	}
+
+	if (ic->finish_csa == NULL) {
+		DBGPRINTF_E("Error: finish_csa callback is not setup\n");
+		return;
+	}
+
+	IEEE80211_DPRINTF(TAILQ_FIRST(&ic->ic_vaps), IEEE80211_MSG_DOTH,
+				"%s: channel switch finished, owner=%#x\n", __func__, ic->ic_csa_reason);
+
+	ic->finish_csa((unsigned long)ic);
+}
+EXPORT_SYMBOL(ieee80211_csa_finish);
+/*
+ * Start a CSA process: CSA beacon/action will be sent to STA to notify the CS.
+ * @finish_csa: At the CS time, finish_csa() will be called to do the actual CS. If not provided,
+ * ieee80211_finish_csa() will be called as default action.
+ * @flag: specify whether to use CSA beacon or CSA action or both.
+ */
+int ieee80211_enter_csa(struct ieee80211com *ic, struct ieee80211_channel *chan,
+		void (*finish_csa)(unsigned long arg), uint32_t reason,
+		uint8_t csa_count, uint8_t csa_mode, uint32_t flag)
+{
+	struct ieee80211vap *vap;
+	uint32_t csa_flag;
+
+	if (ic->ic_flags & IEEE80211_F_CHANSWITCH) {
+		IEEE80211_DPRINTF(TAILQ_FIRST(&ic->ic_vaps), IEEE80211_MSG_DOTH,
+				"%s: CSA already in progress, owner=%d, pre-"
+				"owner=%d\n", __func__, reason, ic->ic_csa_reason);
+		return -1;
+	}
+
+	ic->ic_csa_chan = chan;
+	csa_flag = ic->ic_csa_flag ? ic->ic_csa_flag : flag;
+
+	/* now flag the beacon update to include the channel switch IE */
+	ic->ic_flags |= IEEE80211_F_CHANSWITCH;
+	ic->ic_csa_count = csa_count;
+	ic->ic_csa_mode = csa_mode;
+	ic->ic_csa_reason = reason;
+	ic->ic_csw_reason = reason;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			continue;
+
+		if (vap->iv_state != IEEE80211_S_RUN)
+			continue;
+
+		/* send broadcast csa action */
+		if (csa_flag & IEEE80211_CSA_F_ACTION)
+			ic->ic_send_csa_frame(vap, ic->ic_csa_mode,
+				ic->ic_csa_chan->ic_ieee, ic->ic_csa_count, 0);
+		/* Update beacon to include CSA IE */
+		if (csa_flag & IEEE80211_CSA_F_BEACON)
+			ic->ic_beacon_update(vap);
+	}
+	ic->finish_csa = finish_csa ? finish_csa : ieee80211_finish_csa;
+	queue_work(ic->csa_work_queue, &ic->csa_work);
+	/*
+	 * Store original attenuation to handle the following case:
+	 * We switched to low power channel when attenuation is small. But then in low
+	 * power channel attenuation increases but interference is low. We need to detect
+	 * such rate ratio drop to trigger channel ranking.
+	 */
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
+			ic->ic_curchan != IEEE80211_CHAN_ANYC &&
+			ic->ic_scan &&
+			ic->ic_scan->ss_scs_priv) {
+		struct ap_state *as = ic->ic_scan->ss_scs_priv;
+
+		if (chan->ic_maxpower < (ic->ic_curchan->ic_maxpower - SCS_CHAN_POWER_DIFF_SAFE)) {
+			as->as_sta_atten_expect = as->as_sta_atten_max;
+		} else if (chan->ic_maxpower > (ic->ic_curchan->ic_maxpower + SCS_CHAN_POWER_DIFF_SAFE)) {
+			as->as_sta_atten_expect = SCS_ATTEN_UNINITED;
+		}
+		SCSDBG(SCSLOG_NOTICE, "atten expect set to %d\n", as->as_sta_atten_expect);
+	}
+
+	/* for dfs reentry demon */
+	vap = TAILQ_FIRST(&ic->ic_vaps);
+	if (ieee80211_is_repeater(ic) && !ieee80211_is_repeater_associated(ic))
+		vap = TAILQ_NEXT(vap, iv_next);
+
+	ic->ic_dfs_chan_switch_notify(vap->iv_dev, ic->ic_csa_chan);
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_enter_csa);
+
+int ieee80211_get_cap_bw(struct ieee80211com *ic)
+{
+	int bw = BW_INVALID;
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+	ieee80211_param_from_qdrv(vap, IEEE80211_PARAM_BW_SEL_MUC, &bw, NULL, 0);
+
+	return bw;
+}
+EXPORT_SYMBOL(ieee80211_get_cap_bw);
+
+int ieee80211_get_bw(struct ieee80211com *ic)
+{
+        int bw = ieee80211_get_cap_bw(ic);
+
+        if ((ic->ic_opmode == IEEE80211_M_STA) && (bw != BW_INVALID) && ic->ic_bss_bw) {
+                bw = MIN(bw, ic->ic_bss_bw);
+        }
+        return bw;
+}
+EXPORT_SYMBOL(ieee80211_get_bw);
+
+void ieee80211_update_bw_capa(struct ieee80211vap *vap, int bw)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *bss = vap->iv_bss;
+
+	if (bw == BW_HT20) {
+		ic->ic_htcap.cap &= ~(IEEE80211_HTCAP_C_CHWIDTH40 |
+					IEEE80211_HTCAP_C_SHORTGI40);
+		if (vap->iv_ht_flags & IEEE80211_HTF_SHORTGI_ENABLED)
+		      ic->ic_htcap.cap |= IEEE80211_HTCAP_C_SHORTGI20;
+		ic->ic_htinfo.byte1 &= ~IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40;
+		ic->ic_vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ;
+	} else if (bw == BW_HT40) {
+		ic->ic_htcap.cap |= IEEE80211_HTCAP_C_CHWIDTH40;
+		if (vap->iv_ht_flags & IEEE80211_HTF_SHORTGI_ENABLED)
+			ic->ic_htcap.cap |= (IEEE80211_HTCAP_C_SHORTGI40 |
+						IEEE80211_HTCAP_C_SHORTGI20);
+		ic->ic_htinfo.byte1 |= IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40;
+		ic->ic_vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ;
+	} else if (bw == BW_HT80) {
+		ic->ic_htcap.cap |= IEEE80211_HTCAP_C_CHWIDTH40;
+		if (vap->iv_ht_flags & IEEE80211_HTF_SHORTGI_ENABLED)
+			ic->ic_htcap.cap |= (IEEE80211_HTCAP_C_SHORTGI40 |
+						IEEE80211_HTCAP_C_SHORTGI20);
+		ic->ic_htinfo.byte1 |= IEEE80211_HTINFO_B1_REC_TXCHWIDTH_40;
+		if (vap->iv_vht_flags & IEEE80211_VHTCAP_C_SHORT_GI_80)
+			ic->ic_vhtcap.cap_flags |= IEEE80211_VHTCAP_C_SHORT_GI_80;
+		ic->ic_vhtop.chanwidth = IEEE80211_VHTOP_CHAN_WIDTH_80MHZ;
+	} else {
+		printk(KERN_INFO "%s: error - invalid bw %u\n", __func__, bw);
+		return;
+	}
+
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP) && bss) {
+		memcpy(&bss->ni_htcap, &ic->ic_htcap, sizeof(bss->ni_htcap));
+		memcpy(&bss->ni_htinfo, &ic->ic_htinfo, sizeof(bss->ni_htinfo));
+		memcpy(&bss->ni_vhtcap, &ic->ic_vhtcap, sizeof(bss->ni_vhtcap));
+		memcpy(&bss->ni_vhtop, &ic->ic_vhtop, sizeof(bss->ni_vhtop));
+	}
+}
+EXPORT_SYMBOL(ieee80211_update_bw_capa);
+
+void ieee80211_change_bw(struct ieee80211vap *vap, int bw, int delay_chan_switch)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_BW_SEL_MUC, bw, NULL, 0);
+
+	ieee80211_update_chanlist_from_bw(ic, bw);
+
+	/* chanage channel to apply the new bandwidth and power configuration */
+	if (!delay_chan_switch)
+		ic->ic_set_channel(ic);
+}
+
+int ieee80211_get_mu_grp(struct ieee80211com *ic,
+	struct qtn_mu_grp_args *mu_grp_tbl)
+{
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+	int len = sizeof(*mu_grp_tbl)*IEEE80211_MU_GRP_NUM_MAX;
+
+	ieee80211_param_from_qdrv(vap, IEEE80211_PARAM_GET_MU_GRP, NULL, (void*)mu_grp_tbl, &len);
+
+	return len;
+}
+EXPORT_SYMBOL(ieee80211_get_mu_grp);
+
+int ieee80211_find_sec_chan(struct ieee80211_channel *chan)
+{
+	int chan_sec = 0;
+
+	if (chan->ic_flags & IEEE80211_CHAN_HT40D) {
+		chan_sec = chan->ic_ieee - IEEE80211_CHAN_SEC_SHIFT;
+	} else if (chan->ic_flags & IEEE80211_CHAN_HT40U) {
+		chan_sec = chan->ic_ieee + IEEE80211_CHAN_SEC_SHIFT;
+	}
+
+	return chan_sec;
+}
+EXPORT_SYMBOL(ieee80211_find_sec_chan);
+
+int ieee80211_find_sec40u_chan(struct ieee80211_channel *chan)
+{
+	int chan_sec40u = 0;
+
+	if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LL) {
+		chan_sec40u = chan->ic_ieee + 3 * IEEE80211_CHAN_SEC_SHIFT;
+	} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LU) {
+		chan_sec40u = chan->ic_ieee + 2 * IEEE80211_CHAN_SEC_SHIFT;
+	} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UL) {
+		chan_sec40u = chan->ic_ieee - IEEE80211_CHAN_SEC_SHIFT;
+	} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UU) {
+		chan_sec40u = chan->ic_ieee - 2 * IEEE80211_CHAN_SEC_SHIFT;
+	}
+
+	return chan_sec40u;
+}
+EXPORT_SYMBOL(ieee80211_find_sec40u_chan);
+
+int ieee80211_find_sec40l_chan(struct ieee80211_channel *chan)
+{
+	int chan_sec40l = 0;
+
+	if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LL) {
+		chan_sec40l = chan->ic_ieee + 2 * IEEE80211_CHAN_SEC_SHIFT;
+	} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_LU) {
+		chan_sec40l = chan->ic_ieee + IEEE80211_CHAN_SEC_SHIFT;
+	} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UL) {
+		chan_sec40l = chan->ic_ieee - 2 * IEEE80211_CHAN_SEC_SHIFT;
+	} else if (chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UU) {
+		chan_sec40l = chan->ic_ieee - 3 * IEEE80211_CHAN_SEC_SHIFT;
+	}
+
+	return chan_sec40l;
+}
+EXPORT_SYMBOL(ieee80211_find_sec40l_chan);
+
+int ieee80211_find_sec_chan_by_operating_class(struct ieee80211com *ic, int chan, uint32_t preference)
+{
+	uint8_t *chan_list;
+	int chan_sec = 0;
+
+	chan_list = kzalloc(howmany(IEEE80211_CHAN_MAX, NBBY), GFP_ATOMIC);
+	if (chan_list == NULL) {
+		printk(KERN_ERR "%s: buffer alloc failed\n", __FUNCTION__);
+		return 0;
+	}
+
+	ieee80211_get_prichan_list_by_operating_class(ic,
+				BW_HT40,
+				(uint8_t *)chan_list,
+				preference);
+	if (isset(chan_list, chan)) {
+		if (IEEE80211_OC_BEHAV_CHAN_UPPER == preference)
+			chan_sec = chan - IEEE80211_CHAN_SEC_SHIFT;
+		else if (IEEE80211_OC_BEHAV_CHAN_LOWWER == preference)
+			chan_sec = chan + IEEE80211_CHAN_SEC_SHIFT;
+	}
+
+	if (chan_list)
+		kfree(chan_list);
+
+	return chan_sec;
+}
+EXPORT_SYMBOL(ieee80211_find_sec_chan_by_operating_class);
+
+/*
+ * Generate an interference mitigation event
+ */
+#ifdef QSCS_ENABLED
+struct brcm_rxglitch_thrshld_pair brcm_rxglitch_thrshlds[BRCM_RXGLITH_THRSHLD_PWR_NUM][BRCM_RXGLITH_THRSHLD_STEP] = {
+	{
+		{-49, BRCM_RXGLITCH_TOP},
+		{-58, 32000},
+		{-65, 20000},
+		{-73, 12000},
+		{BRCM_RSSI_MIN, 10000},
+	},
+	{
+		{-59, BRCM_RXGLITCH_TOP},
+		{-68, 40000},
+		{-74, 20000},
+		{BRCM_RSSI_MIN, 10000},
+		{0, 0},
+	},
+};
+
+static struct qtn_scs_vsp_node_stats *ieee80211_scs_find_node_stats(struct ieee80211com *ic, struct qtn_scs_info *scs_info_read, uint16_t aid);
+int ieee80211_scs_clean_stats(struct ieee80211com *ic, uint32_t level, int clear_dfs_reentry);
+static uint32_t ieee80211_scs_fix_cca_intf(struct ieee80211com *ic, struct ieee80211_node *ni, uint32_t cca_intf, uint32_t sp_fail, uint32_t lp_fail);
+
+static int ieee80211_prichan_is_newchan_better(struct ieee80211com *ic,
+		int newchan_ieee, int oldchan_ieee, int random_select)
+{
+	int cur_bw;
+	struct ieee80211_channel *newchan;
+	struct ieee80211_channel *oldchan;
+
+	if (!newchan_ieee || isclr(ic->ic_chan_active, newchan_ieee) ||
+			isset(ic->ic_chan_pri_inactive, newchan_ieee)) {
+		return 0;
+	}
+	newchan = findchannel_any(ic, newchan_ieee, ic->ic_des_mode);
+	if (!is_ieee80211_chan_valid(newchan)) {
+		return 0;
+	}
+
+	if (!oldchan_ieee || isclr(ic->ic_chan_active, oldchan_ieee) ||
+			isset(ic->ic_chan_pri_inactive, oldchan_ieee)) {
+		return 1;
+	}
+	oldchan = findchannel_any(ic, oldchan_ieee, ic->ic_des_mode);
+	if (!is_ieee80211_chan_valid(oldchan)) {
+		return 1;
+	}
+
+	/* Choose the channel with maximal power setting */
+	cur_bw = ieee80211_get_bw(ic);
+	if (cur_bw >= BW_HT80) {
+		if (newchan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][PWR_IDX_80M] >
+				oldchan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][PWR_IDX_80M]) {
+			return 1;
+		} else if (newchan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][PWR_IDX_80M] <
+				oldchan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][PWR_IDX_80M]) {
+			return 0;
+		}
+	}
+
+	if (cur_bw >= BW_HT40) {
+		if (newchan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][PWR_IDX_40M] >
+				oldchan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][PWR_IDX_40M]) {
+			return 1;
+		} else if (newchan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][PWR_IDX_40M] <
+				oldchan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][PWR_IDX_40M]) {
+			return 0;
+		}
+	}
+
+	if (newchan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][PWR_IDX_20M] >
+			oldchan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][PWR_IDX_20M]) {
+		return 1;
+	} else if (newchan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][PWR_IDX_20M] <
+			oldchan->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][PWR_IDX_20M]) {
+		return 0;
+	}
+
+	/* All powers are same, run random selection per request */
+	if (random_select) {
+		uint8_t rndbuf;
+		get_random_bytes(&rndbuf, 1);
+		return (rndbuf > 127);
+	}
+
+	return 0;
+}
+
+struct ieee80211_channel* ieee80211_chk_update_pri_chan(struct ieee80211com *ic,
+		struct ieee80211_channel *chan, uint32_t rank_by_pwr, const char* caller, int print_warning)
+{
+	struct ieee80211_channel *prichan;
+	int newchan_ieee;
+	int prichan_ieee = 0;
+	int cur_bw = ieee80211_get_bw(ic);
+	int is_manual_cfg;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP) {
+		return chan;
+	}
+
+	is_manual_cfg = !strcmp(caller, "iwconfig");
+
+	if (cur_bw <= BW_HT20) {
+		goto done;
+	}
+
+	if (isclr(ic->ic_chan_pri_inactive, chan->ic_ieee) ||
+			(is_manual_cfg && isset(ic->ic_is_inactive_autochan_only, chan->ic_ieee))) {
+		if (!rank_by_pwr) {
+			return chan;
+		} else {
+			prichan_ieee = chan->ic_ieee;
+		}
+	}
+
+	newchan_ieee = ieee80211_find_sec_chan(chan);
+	if (ieee80211_prichan_is_newchan_better(ic, newchan_ieee, prichan_ieee, 0)) {
+		prichan_ieee = newchan_ieee;
+	}
+
+	if (cur_bw > BW_HT40) {
+		newchan_ieee = ieee80211_find_sec40u_chan(chan);
+		if (ieee80211_prichan_is_newchan_better(ic, newchan_ieee, prichan_ieee,
+				prichan_ieee && (prichan_ieee != chan->ic_ieee))) {
+			prichan_ieee = newchan_ieee;
+		}
+
+		newchan_ieee = ieee80211_find_sec40l_chan(chan);
+		if (ieee80211_prichan_is_newchan_better(ic, newchan_ieee, prichan_ieee,
+				prichan_ieee && (prichan_ieee != chan->ic_ieee))) {
+			prichan_ieee = newchan_ieee;
+		}
+	}
+
+	if (prichan_ieee && prichan_ieee != chan->ic_ieee) {
+		prichan = findchannel_any(ic, prichan_ieee, ic->ic_des_mode);
+		if (is_ieee80211_chan_valid(prichan)) {
+			if (isset(ic->ic_chan_pri_inactive, chan->ic_ieee)) {
+				if (print_warning) {
+					printk("%s: channel %d can't be used as primary channel,"
+							" use %d instead within current bandwidth\n",
+							caller, chan->ic_ieee, prichan_ieee);
+				}
+			}
+			return prichan;
+		}
+	}
+
+done:
+	if (isset(ic->ic_chan_pri_inactive, chan->ic_ieee) &&
+			(!is_manual_cfg || isclr(ic->ic_is_inactive_autochan_only, chan->ic_ieee))) {
+		if (print_warning) {
+			printk("%s: channel %d can't be used as primary channel,"
+					" and no alternative channel within current bandwidth\n",
+					caller, chan->ic_ieee);
+		}
+	}
+
+	return chan;
+}
+EXPORT_SYMBOL(ieee80211_chk_update_pri_chan);
+
+static void
+ieee80211_wireless_scs_msg_send(struct ieee80211vap *vap, char *msg_buf)
+{
+	ieee80211_eventf(vap->iv_dev, "%s",  msg_buf);
+};
+
+static __inline int
+ieee80211_is_cac_in_progress(struct ieee80211com *ic)
+{
+	return ic->ic_curchan->ic_flags & IEEE80211_CHAN_DFS_CAC_IN_PROGRESS;
+}
+
+void ieee80211_off_channel_timeout(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *)arg;
+	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+	struct offchan_protect *offchan_prt = &ic->ic_offchan_protect;
+
+	if (vap == NULL || vap->iv_bss == NULL) {
+		if (vap) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,
+					"%s: BSS not ready, delay the timer\n",
+					__func__);
+		} else {
+			DBGPRINTF(DBG_LL_ERROR, QDRV_LF_VAP,
+					"%s: VAP not ready, delay the timer\n",
+					__func__);
+		}
+
+		offchan_prt->offchan_stop_expire.data = (unsigned long)ic;
+		offchan_prt->offchan_timeout = jiffies + IEEE80211_OFFCHAN_TIMEOUT_DEFAULT * HZ;
+		mod_timer(&offchan_prt->offchan_stop_expire, offchan_prt->offchan_timeout);
+		return;
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG, "%s: suspending counter %u\n",
+				__func__, offchan_prt->offchan_suspend_cnt);
+
+	offchan_prt->offchan_suspend_cnt = 0;
+	offchan_prt->offchan_timeout = 0;
+	ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_OFF_CHAN_SUSPEND, 0, NULL, 0);
+}
+
+void ieee80211_off_channel_suspend(struct ieee80211vap *vap, uint32_t timeout)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct offchan_protect *offchan_prt = &ic->ic_offchan_protect;
+
+	if (vap->iv_bss == NULL) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG, "%s: BSS not ready\n",
+				__func__);
+		return;
+	}
+
+	if (offchan_prt->offchan_timeout == 0) {
+		offchan_prt->offchan_stop_expire.data = (unsigned long)ic;
+		offchan_prt->offchan_stop_expire.expires = jiffies + IEEE80211_OFFCHAN_TIMEOUT_DEFAULT * HZ;
+		offchan_prt->offchan_timeout = jiffies + IEEE80211_OFFCHAN_TIMEOUT_DEFAULT * HZ;
+		add_timer(&offchan_prt->offchan_stop_expire);
+	}
+
+	if (time_after(jiffies + timeout * HZ, offchan_prt->offchan_timeout)) {
+		offchan_prt->offchan_stop_expire.data = (unsigned long)ic;
+		offchan_prt->offchan_timeout = jiffies + timeout * HZ;
+		mod_timer(&offchan_prt->offchan_stop_expire, offchan_prt->offchan_timeout);
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG, "%s: suspending counter %u, "
+				"timeout %lus later\n",
+				__func__, offchan_prt->offchan_suspend_cnt,
+				(offchan_prt->offchan_timeout - jiffies) / HZ);
+
+	offchan_prt->offchan_suspend_cnt++;
+	ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_OFF_CHAN_SUSPEND, 1, NULL, 0);
+	ieee80211_scan_scs_sample_cancel(vap);
+}
+
+void ieee80211_off_channel_resume(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct offchan_protect *offchan_prt = &ic->ic_offchan_protect;
+
+	if (vap->iv_bss == NULL) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG, "%s: BSS not ready\n",
+				__func__);
+		return;
+	}
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG, "%s: suspending counter %u, "
+				"timeout %lus later\n",
+				__func__, offchan_prt->offchan_suspend_cnt,
+				offchan_prt->offchan_timeout ?
+					(offchan_prt->offchan_timeout - jiffies) / HZ :
+					0);
+
+	/* There is a potential race condition here, but the timer will kick in and recover it.
+	 * Currently we don't plan to protect against it */
+	if (offchan_prt->offchan_suspend_cnt)
+		offchan_prt->offchan_suspend_cnt--;
+	else
+		return;
+
+	if (offchan_prt->offchan_suspend_cnt == 0) {
+		offchan_prt->offchan_timeout = 0;
+		del_timer(&offchan_prt->offchan_stop_expire);
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_OFF_CHAN_SUSPEND, 0, NULL, 0);
+	}
+}
+
+/*
+ * Interference mitigation sampling task
+ * Periodically go off-channel to sample the quality of another channel.
+ */
+static void
+ieee80211_wireless_scs_sampling_task(struct work_struct *work)
+{
+	struct delayed_work *dwork = (struct delayed_work *)work;
+	struct ieee80211com *ic =
+		container_of(dwork, struct ieee80211com, ic_scs_sample_work);
+	struct ieee80211vap *vap = NULL;
+	struct ieee80211vap *vap_first = TAILQ_FIRST(&ic->ic_vaps);
+	struct ieee80211vap *vap_next;
+
+	if (ieee80211_is_cac_in_progress(ic)) {
+		SCSDBG(SCSLOG_NOTICE, "%s: not sampling - CAC in progress\n", __func__);
+		goto next_work;
+	}
+
+	//FIXME check threshold
+
+	/* Only sample if at least one VAP is in run state and none are scanning */
+	vap_next = vap_first;
+	while ((vap_next != NULL) &&
+	       (vap_next->iv_state != IEEE80211_S_SCAN)) {
+
+		if ((vap == NULL) && (vap_next->iv_opmode == IEEE80211_M_HOSTAP) &&
+				(vap_next->iv_state == IEEE80211_S_RUN)) {
+			vap = vap_next;
+		}
+		vap_next = TAILQ_NEXT(vap_next, iv_next);
+	}
+
+	if (vap) {
+		if (vap_next == NULL) {
+			IEEE80211_SCS_CNT_INC(&ic->ic_scs, IEEE80211_SCS_CNT_TRIGGER);
+			ieee80211_scan_scs_sample(vap);
+		} else {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				"%s: not sampling - scan in progress\n", __func__);
+		}
+	} else {
+		SCSDBG(SCSLOG_NOTICE, "%s: not sampling - no VAPs in RUN state\n", __func__);
+	}
+
+next_work:
+	schedule_delayed_work(&ic->ic_scs_sample_work, ic->ic_scs.scs_sample_intv * HZ);
+}
+
+static void ieee80211_scs_trigger_channel_switch(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *)arg;
+
+	ieee80211_finish_csa((unsigned long)ic);
+
+	ieee80211_scs_clean_stats(ic, IEEE80211_SCS_STATE_CHANNEL_SWITCHING, 0);
+
+	return;
+}
+
+static __inline int
+ieee80211_scs_get_cca_intf_thrshld(struct ieee80211com *ic, uint8_t is_high)
+{
+	uint32_t thrshld = is_high ? ic->ic_scs.scs_cca_intf_hi_thrshld :
+			ic->ic_scs.scs_cca_intf_lo_thrshld;
+
+	if (ic->ic_curchan && (ic->ic_curchan->ic_flags & IEEE80211_CHAN_DFS)) {
+		thrshld += ic->ic_scs.scs_cca_intf_dfs_margin;
+	}
+
+	thrshld = MIN(thrshld, 100);
+	thrshld = thrshld * IEEE80211_SCS_CCA_INTF_SCALE / 100;
+
+	return thrshld;
+}
+
+static __inline int
+ieee80211_scs_get_cca_idle_thrshld(struct ieee80211com *ic)
+{
+	int thrshld = ic->ic_scs.scs_cca_idle_thrshld * IEEE80211_SCS_CCA_INTF_SCALE / 100;
+
+	return thrshld;
+}
+
+static int
+ieee80211_scs_is_interference_over_thresholds(struct ieee80211com *ic,
+		uint32_t cca_intf, uint32_t cca_idle, uint32_t pmbl_err)
+{
+	uint32_t cca_intf_high_thrshld = ieee80211_scs_get_cca_intf_thrshld(ic, 1);
+	uint32_t cca_intf_low_thrshld = ieee80211_scs_get_cca_intf_thrshld(ic, 0);
+	uint32_t cca_idle_thrshld = ieee80211_scs_get_cca_idle_thrshld(ic);
+	uint32_t pmbl_err_thrshld = ic->ic_scs.scs_pmbl_err_thrshld;
+
+	/* Currently we don't apply the thresholds of FAT and preamble error to QHop case */
+	if (ieee80211_wds_vap_exists(ic)) {
+		if (cca_intf >= cca_intf_low_thrshld) {
+			SCSDBG(SCSLOG_VERBOSE, "%s: [QHOP case:cca_intf > low thrshld] Trigger channel change\n", __func__);
+			return 1;
+		}
+	} else if (cca_idle < cca_idle_thrshld) {
+		if ((cca_intf > cca_intf_high_thrshld)
+			|| ((cca_intf > cca_intf_low_thrshld)
+			&& (pmbl_err > pmbl_err_thrshld))) {
+			SCSDBG(SCSLOG_VERBOSE, "%s: [cca_idle < thrshld, %s] - Trigger channel change\n", __func__,
+				((cca_intf > cca_intf_high_thrshld) ? "cca_intf > high thrshld" :
+				"cca_intf > low thrshld, pmbl_err > thrshld"));
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int
+ieee80211_is_cc_required(struct ieee80211com *ic, uint32_t compound_cca_intf,
+	uint32_t cca_idle_smthed, uint32_t pmbl_err)
+{
+	int res = 0;
+	struct ap_state *as;
+
+	if (ic->ic_sta_cc) {
+		SCSDBG(SCSLOG_NOTICE, "STA reported SCS measurements\n");
+		res |= IEEE80211_SCS_STA_CCA_REQ_CC;
+	}
+
+	if (ic->ic_sta_cc_brcm) {
+		SCSDBG(SCSLOG_NOTICE, "brcm STA info need channel change\n");
+		res |= IEEE80211_SCS_BRCM_STA_TRIGGER_CC;
+	}
+
+	if (ic->ic_opmode == IEEE80211_M_STA) {
+		/* For STA mode, always send cca report to AP */
+		res |= IEEE80211_SCS_SELF_CCA_CC;
+	} else if (ieee80211_scs_is_interference_over_thresholds(ic,
+			compound_cca_intf, cca_idle_smthed, pmbl_err)) {
+		SCSDBG(SCSLOG_NOTICE, "Self CCA requested channel change,"
+				" compound_cca_intf=%u, cca_idle_smth=%u, pmbl_err=%u\n",
+				compound_cca_intf, cca_idle_smthed, pmbl_err);
+		res |= IEEE80211_SCS_SELF_CCA_CC;
+	}
+
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP && ic->ic_scs.scs_atten_sw_enable) {
+		as = ic->ic_scan->ss_scs_priv;
+		if (SCS_ATTEN_VALID(as->as_sta_atten_expect) &&
+			SCS_ATTEN_VALID(as->as_sta_atten_max) &&
+			(as->as_sta_atten_max >= (as->as_sta_atten_expect + ic->ic_scs.scs_thrshld_atten_inc))) {
+			SCSDBG(SCSLOG_NOTICE, "raw attenuation increased, need channel change, curr=%d, expect=%d\n",
+					      as->as_sta_atten_max, as->as_sta_atten_expect);
+			res |= IEEE80211_SCS_ATTEN_INC_CC;
+		}
+	}
+
+	if ((res) && (ic->ic_radar_test_mode_enabled != NULL) && ic->ic_radar_test_mode_enabled()) {
+		SCSDBG(SCSLOG_NOTICE, "channel change is disabled under radar test mode\n");
+		res = 0;
+	}
+
+	/* Don't switch channel under basic WDS mode */
+	/* But channel switch is now possible on the WDS link if it is an MBS */
+	if (res) {
+		struct ieee80211vap *vap;
+		TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+			if (IEEE80211_VAP_WDS_BASIC(vap)) {
+				SCSDBG(SCSLOG_NOTICE, "channel change is disabled under basic WDS mode\n");
+				res = 0;
+				break;
+			}
+		}
+	}
+
+	if (res && (ic->ic_opmode == IEEE80211_M_HOSTAP) &&
+			(ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] >= BOARD_PM_LEVEL_DUTY)) {
+		SCSDBG(SCSLOG_NOTICE, "channel change is disabled in CoC idle state\n");
+		res = 0;
+	}
+
+	if (res && !ic->ic_scs.scs_enable) {
+		SCSDBG(SCSLOG_NOTICE, "channel change is disabled since SCS is disabled\n");
+		res = 0;
+	}
+
+	return res;
+}
+
+void ieee80211_scs_show_ranking_stats(struct ieee80211com *ic, int show_input, int show_result)
+{
+	struct ap_state *as;
+	int i;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP) {
+		printk("SCS ranking state is only available in AP mode\n");
+		return;
+	}
+
+	as = ic->ic_scan->ss_scs_priv;
+
+	if (show_input) {
+		printk("SCS: ranking parameters\n");
+		for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+			if (isclr(ic->ic_chan_active, i) ||
+					!findchannel(ic, i, ic->ic_des_mode)) {
+				continue;
+			}
+			if (as->as_cca_intf[i] != SCS_CCA_INTF_INVALID) {
+				printk("chan %d: cca_intf=%u, pmbl=%u %u\n",
+					i, as->as_cca_intf[i], as->as_pmbl_err_ap[i],
+					as->as_pmbl_err_sta[i]);
+			}
+		}
+
+		printk("SCS: atten info: num=%d, sum=%d, min=%d, max=%d, avg=%d, expect=%d\n",
+			as->as_sta_atten_num,
+			as->as_sta_atten_sum,
+			as->as_sta_atten_min,
+			as->as_sta_atten_max,
+			as->as_sta_atten_num ? (as->as_sta_atten_sum / as->as_sta_atten_num) : 0,
+			as->as_sta_atten_expect);
+
+		printk("SCS: tx_ms=%u, rx_ms=%u\n", as->as_tx_ms, as->as_rx_ms);
+	}
+
+	if (show_result) {
+		int isdfs;
+		int txpower;
+		struct ieee80211_channel *chan;
+
+		printk("SCS: ranking table, ranking_cnt=%u\n", as->as_scs_ranking_cnt);
+		printk("chan dfs xped txpower cca_intf     metric    pmbl_ap   pmbl_sta\n");
+		for (i = 1; i < IEEE80211_CHAN_MAX; i++) {
+			chan = ieee80211_find_channel_by_ieee(ic, i);
+			if (chan == NULL) {
+				continue;
+			}
+
+			isdfs = !!(chan->ic_flags & IEEE80211_CHAN_DFS);
+			txpower = chan->ic_maxpower;
+
+			printk("%4d %3d %4d %7d %8u %10d %10d %10d\n",
+					i,
+					isdfs,
+					!!isset(as->as_chan_xped, i),
+					txpower,
+					((as->as_cca_intf[i] == SCS_CCA_INTF_INVALID) ? 0 : as->as_cca_intf[i]),
+					as->as_chanmetric[i],
+					as->as_pmbl_err_ap[i], as->as_pmbl_err_sta[i]);
+		}
+	}
+}
+EXPORT_SYMBOL(ieee80211_scs_show_ranking_stats);
+
+void ieee80211_show_initial_ranking_stats(struct ieee80211com *ic)
+{
+	struct ap_state *as;
+	int i;
+	int isdfs;
+	int txpower;
+	struct ieee80211_channel *chan;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP) {
+		printk("Initial scan ranking state is only available in AP mode\n");
+		return;
+	}
+
+	as = ic->ic_scan->ss_priv;
+	if (as == NULL) {
+		printk("Initial scan ranking state is not available because auto channel is disabled\n");
+		return;
+	}
+
+	printk("AP: initial ranking table\n");
+	printk("chan dfs txpower  numbeacon        cci        aci   cca_intf   pmbl_err     metric\n");
+	for (i = 1; i < IEEE80211_CHAN_MAX; i++) {
+		chan = ieee80211_find_channel_by_ieee(ic, i);
+		if (chan == NULL) {
+			continue;
+		}
+
+		isdfs = !!(chan->ic_flags & IEEE80211_CHAN_DFS);
+		txpower = chan->ic_maxpower;
+
+		printk("%4d %3d %7d %10u %10d %10d %10d %10d %10d\n",
+				i,
+				isdfs,
+				txpower,
+				as->as_numbeacons[i],
+				as->as_cci[i],
+				as->as_aci[i],
+				as->as_cca_intf[i],
+				as->as_pmbl_err_ap[i],
+				as->as_chanmetric[i]);
+	}
+}
+EXPORT_SYMBOL(ieee80211_show_initial_ranking_stats);
+
+static __inline int
+ieee80211_scs_node_is_valid(struct ieee80211_node *ni)
+{
+	return (ni->ni_associd &&
+		ieee80211_node_is_authorized(ni) &&
+		!ieee80211_blacklist_check(ni));
+}
+
+void ieee80211_scs_node_clean_stats(void *s, struct ieee80211_node *ni)
+{
+	int level = (uint32_t)s;
+	struct ieee80211com *ic = ni->ni_ic;
+
+	if (!ieee80211_scs_node_is_valid(ni) && (level != IEEE80211_SCS_STATE_INIT)) {
+		return;
+	}
+
+	SCSDBG(SCSLOG_VERBOSE, "node 0x%x state clean with level %d\n", ni->ni_associd, level);
+
+	if (level <= IEEE80211_SCS_STATE_PERIOD_CLEAN) {
+		ni->ni_recent_cca_intf = SCS_CCA_INTF_INVALID;
+		ni->ni_recent_sp_fail = 0;
+		ni->ni_recent_lp_fail = 0;
+		ni->ni_recent_tdls_tx_time = 0;
+		ni->ni_recent_tdls_rx_time = 0;
+		ni->ni_recent_others_time = 0;
+	}
+
+	if (level <= IEEE80211_SCS_STATE_MEASUREMENT_CHANGE_CLEAN) {
+		/* set to -1 helps to discard first report after assoc or channel switch */
+		ni->ni_recent_rxglitch_trig_consecut = -1;
+		ni->ni_recent_rxglitch = 0;
+		ni->ni_recent_cca_intf_smthed = 0;
+		ni->ni_others_rx_time_smthed = 0;
+		ni->ni_others_tx_time_smthed = 0;
+		ni->ni_recent_others_time_smth = 0;
+		ni->ni_tdls_tx_time_smthed = 0;
+		ni->ni_tdls_rx_time_smthed = 0;
+	}
+
+	if (level <= IEEE80211_SCS_STATE_RESET) {
+		ni->ni_atten_smoothed = SCS_ATTEN_UNINITED;
+	}
+}
+
+void ieee80211_scs_clean_tdls_stats_list(struct ieee80211com *ic)
+{
+	int i;
+	unsigned long flags;
+	struct ieee80211_tdls_scs_entry *scs_entry = NULL;
+
+	SCSDBG(SCSLOG_NOTICE, "SCS: clean all of tdls stats\n");
+	spin_lock_irqsave(&ic->ic_scs.scs_tdls_lock, flags);
+	for (i = 0; i < IEEE80211_NODE_HASHSIZE; i++) {
+		LIST_FOREACH(scs_entry, &ic->ic_scs.scs_tdls_list[i], entry) {
+			if (scs_entry != NULL) {
+				scs_entry->stats.is_latest = 0;
+				scs_entry->stats.tx_time = 0;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&ic->ic_scs.scs_tdls_lock, flags);
+}
+
+void
+ieee80211_scs_metric_update_timestamps(struct ap_state *as)
+{
+	int i;
+	for (i = 0; i < IEEE80211_CHAN_MAX; ++i) {
+		as->as_chanmetric_timestamp[i] = jiffies;
+	}
+}
+EXPORT_SYMBOL(ieee80211_scs_metric_update_timestamps);
+
+/*
+ * Clean SCS state with different clean levels.
+ * Valid levels are IEEE80211_SCS_STATE_XXXX.
+ * @clear_dfs_reentry: only effective at level IEEE80211_SCS_STATE_PERIOD_CLEAN.
+ */
+int ieee80211_scs_clean_stats(struct ieee80211com *ic, uint32_t level, int clear_dfs_reentry)
+{
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+	int i;
+
+	SCSDBG(SCSLOG_INFO, "clean stats with level %u\n", level);
+
+	if (level <= IEEE80211_SCS_STATE_PERIOD_CLEAN) {
+		ic->ic_sta_cc = 0;
+		ic->ic_sta_cc_brcm = 0;
+		if (clear_dfs_reentry) {
+			as->as_dfs_reentry_cnt = 0;
+			as->as_dfs_reentry_level = 0;
+			SCSDBG(SCSLOG_INFO, "dfs reentry state cleared\n");
+		}
+		ieee80211_scs_clean_tdls_stats_list(ic);
+	}
+
+	if (level <= IEEE80211_SCS_STATE_MEASUREMENT_CHANGE_CLEAN) {
+		as->as_tx_ms_smth = 0;
+		as->as_rx_ms_smth = 0;
+		as->as_cca_intf_smth = 0;
+	}
+
+	if (level <= IEEE80211_SCS_STATE_CHANNEL_SWITCHING) {
+		ic->ic_scs.scs_cca_intf_smthed = 0;
+		ic->ic_scs.scs_sp_err_smthed = 0;
+		ic->ic_scs.scs_lp_err_smthed = 0;
+		ic->ic_scs.scs_cca_idle_smthed = 0;
+	}
+
+	if (level <= IEEE80211_SCS_STATE_RESET) {
+		SCSDBG(SCSLOG_NOTICE, "reset ranking stats\n");
+		ic->ic_scs.scs_brcm_rxglitch_thrshlds = (struct brcm_rxglitch_thrshld_pair*)brcm_rxglitch_thrshlds;
+		as->as_scs_ranking_cnt = 0;
+
+		for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+			as->as_cca_intf[i] = SCS_CCA_INTF_INVALID;
+			as->as_cca_intf_jiffies[i] = 0;
+			as->as_pmbl_err_ap[i] = 0;
+			as->as_pmbl_err_sta[i] = 0;
+		}
+
+		as->as_sta_atten_num = 0;
+		as->as_sta_atten_sum = 0;
+		as->as_sta_atten_min = SCS_ATTEN_UNINITED;
+		as->as_sta_atten_max = SCS_ATTEN_UNINITED;
+		as->as_sta_atten_expect = SCS_ATTEN_UNINITED;
+
+		as->as_dfs_reentry_cnt = 0;
+		as->as_dfs_reentry_level = 0;
+
+		as->as_tx_ms = 0;
+		as->as_rx_ms = 0;
+
+		memset(as->as_chan_xped, 0, sizeof(as->as_chan_xped));
+
+		memset(as->as_chanmetric, 0, sizeof(as->as_chanmetric));
+		ieee80211_scs_metric_update_timestamps(as);
+		memset(as->as_chanmetric_pref, 0, sizeof(as->as_chanmetric_pref));
+
+		ic->ic_scs.scs_burst_is_paused = 0;
+		memset(ic->ic_scs.scs_burst_queue, 0 , sizeof(ic->ic_scs.scs_burst_queue));
+	}
+
+	/*
+	 * No need to do clean node in level IEEE80211_SCS_STATE_PERIOD_CLEAN
+	 * because it is associated with jiffies. So that we don't need to iterate
+	 * all node every scs interval.
+	 */
+	if ((level <= IEEE80211_SCS_STATE_MEASUREMENT_CHANGE_CLEAN) &&
+		(ic->ic_opmode == IEEE80211_M_HOSTAP)) {
+			ic->ic_iterate_nodes(&ic->ic_sta, ieee80211_scs_node_clean_stats,
+				(void *)level, 1);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_scs_clean_stats);
+
+static void ieee80211_send_usr_l2_pkt(struct ieee80211vap *vap, uint8_t *pkt, uint32_t pkt_len)
+{
+	struct sk_buff *skb = dev_alloc_skb(qtn_rx_buf_size());
+
+	if (pkt_len > qtn_rx_buf_size())
+		pkt_len = qtn_rx_buf_size();
+
+	if (skb) {
+		if (copy_from_user(skb->data, pkt, pkt_len)) {
+			dev_kfree_skb(skb);
+			return;
+		}
+		skb->len = pkt_len;
+		skb->dest_port = 0;
+		skb->dev = vap->iv_dev;
+		skb->priority = QDRV_SCH_MODULE_ID | QDRV_BAND_AC_BK;
+		dev_queue_xmit(skb);
+	}
+}
+
+void ieee80211_scs_brcm_info_report(struct ieee80211com *ic, struct ieee80211_node *ni, int32_t rssi, uint32_t rxglitch)
+{
+	int i;
+	uint32_t glitch_thrshld = 0;
+	int ratio;
+	int pwr;
+	uint32_t cca_intf;
+	uint32_t trig_rxglitch;
+
+	/* Currently we don't support BRCM 11AC STA's CC request*/
+	if (IEEE80211_NODE_IS_VHT(ni)) {
+		SCSDBG(SCSLOG_NOTICE, "Ignore BRCM 11AC STA's report\n");
+		return;
+	}
+
+	if (!ic->ic_curchan)
+		return;
+
+	if (!ic->ic_scs.scs_stats_on)
+		return;
+
+	if (rxglitch >= BRCM_RXGLITCH_MAX_PER_INTVL) {
+		SCSDBG(SCSLOG_NOTICE, "brcm node 0x%x "MACSTR" rssi=%d, rxglitch=%u(discard)\n",
+			       ni->ni_associd, MAC2STR(ni->ni_macaddr), rssi, rxglitch);
+		return;
+	} else {
+		SCSDBG(SCSLOG_INFO, "brcm node 0x%x "MACSTR" rssi=%d, rxglitch=%u\n",
+			       ni->ni_associd, MAC2STR(ni->ni_macaddr), rssi, rxglitch);
+	}
+
+	if (ic->ic_curchan->ic_maxpower >= IEEE80211_SCS_CHAN_POWER_CUTPOINT) {
+		pwr = BRCM_RXGLITH_THRSHLD_HIPWR;
+	} else {
+		pwr = BRCM_RXGLITH_THRSHLD_LOWPWR;
+	}
+
+	for (i = 0; i < BRCM_RXGLITH_THRSHLD_STEP; i++) {
+		if (rssi > brcm_rxglitch_thrshlds[pwr][i].rssi) {
+			glitch_thrshld = brcm_rxglitch_thrshlds[pwr][i].rxglitch;
+			glitch_thrshld = glitch_thrshld * ic->ic_scs.scs_brcm_rxglitch_thrshlds_scale / 100;
+			break;
+		}
+	}
+	if (!glitch_thrshld)
+		return;
+
+	trig_rxglitch = 0;
+	if (rxglitch >= glitch_thrshld){
+		if (ni->ni_recent_rxglitch_trig_consecut > 0) {
+			trig_rxglitch = rxglitch;
+			SCSDBG(SCSLOG_NOTICE, "brcm node 0x%x is triggered consecutively\n", ni->ni_associd);
+		} else {
+			SCSDBG(SCSLOG_NOTICE, "brcm node 0x%x is not triggered in last report, wait for next\n", ni->ni_associd);
+		}
+		ni->ni_recent_rxglitch_trig_consecut++;
+	} else if ((ni->ni_recent_rxglitch_trig_consecut > 0) &&
+		(rxglitch >= (BRCM_RXGLITCH_NEXT_TRIG_THRSHLD * glitch_thrshld / 100))) {
+		SCSDBG(SCSLOG_NOTICE, "brcm node 0x%x is triggered in last report, and validated\n",
+				ni->ni_associd);
+		trig_rxglitch = ni->ni_recent_rxglitch;
+		ni->ni_recent_rxglitch_trig_consecut = 0;
+	} else {
+		ni->ni_recent_rxglitch_trig_consecut = 0;
+	}
+	ni->ni_recent_rxglitch = rxglitch;
+
+	if (trig_rxglitch) {
+		ratio = trig_rxglitch * 100 / glitch_thrshld;
+		cca_intf = ratio * ic->ic_scs.scs_cca_intf_lo_thrshld * IEEE80211_SCS_CCA_INTF_SCALE / 10000;
+		cca_intf = MIN(cca_intf, IEEE80211_SCS_CCA_INTF_SCALE);
+		SCSDBG(SCSLOG_NOTICE, "brcm node 0x%x report high rxglitch %u > %u, "
+					"with consecutive count %u, mapped to cca_intf %u\n",
+					ni->ni_associd, trig_rxglitch, glitch_thrshld,
+					ni->ni_recent_rxglitch_trig_consecut,
+					cca_intf);
+		ni->ni_recent_cca_intf = cca_intf;
+		ni->ni_recent_cca_intf_jiffies = jiffies;
+		ic->ic_sta_cc_brcm = 1;
+	}
+}
+
+void ieee80211_scs_update_cca_intf(struct ieee80211com *ic,
+				struct scs_chan_intf_params *params,
+				uint8_t iscochan,
+				struct ieee80211_node *ni)
+{
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+	uint16_t cca_intf_scaled;
+	uint16_t old_cca_intf = 0;
+	uint8_t smth_fctr = 0;
+	uint32_t old_pmbl_ap = 0;
+	uint32_t old_pmbl_sta = 0;
+	uint32_t old_cca_pri = 0;
+	uint32_t old_cca_sec = 0;
+	uint32_t old_cca_sec40 = 0;
+	uint32_t chan = params->chan->ic_ieee;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP) {
+		return;
+	}
+
+	if (!is_channel_valid(chan)) {
+		return;
+	}
+
+	/* reset entry */
+	if (params->cca_intf == SCS_CCA_INTF_INVALID) {
+		SCSDBG(SCSLOG_INFO, "clean chan %u cca_intf in ranking table\n", chan);
+		as->as_cca_intf[chan] = SCS_CCA_INTF_INVALID;
+		as->as_cca_intf_jiffies[chan] = 0;
+		as->as_pmbl_err_ap[chan] = 0;
+		as->as_pmbl_err_sta[chan] = 0;
+		as->as_cca_intf_pri[chan] = 0;
+		as->as_cca_intf_sec[chan] = 0;
+		as->as_cca_intf_sec40[chan] = 0;
+		return;
+	}
+
+	/* always scale cca_intf so that it is adaptive to cca duration change and off-chan sample */
+	cca_intf_scaled = params->cca_intf * IEEE80211_SCS_CCA_INTF_SCALE / params->cca_dur;
+	if (params->cca_dur != IEEE80211_SCS_CCA_INTF_SCALE)
+		SCSDBG(SCSLOG_NOTICE, "scale cca_intf from %u to %u\n", params->cca_intf, cca_intf_scaled);
+
+	if (ni) {
+		SCSDBG(SCSLOG_NOTICE, "%s - sta %x add cca intf %u to chan %u, sta max pmbl=%u\n",
+				iscochan ? "cochan" : "offchan",
+				ni->ni_associd, cca_intf_scaled, chan, params->pmbl_err);
+	} else {
+		SCSDBG(SCSLOG_NOTICE, "%s - self add cca intf %u to chan %u, pmbl=%u\n",
+				iscochan ? "cochan" : "offchan",
+				cca_intf_scaled, chan, params->pmbl_err);
+	}
+
+	if (iscochan) {
+		/* current channel's cca_intf use maximum of AP and STAs */
+		if ((cca_intf_scaled > as->as_cca_intf[chan]) ||
+				(as->as_cca_intf[chan] == SCS_CCA_INTF_INVALID)) {
+			as->as_cca_intf[chan] = cca_intf_scaled;
+			as->as_cca_intf_jiffies[chan] = jiffies;
+		}
+		if (ni != NULL) {
+			as->as_pmbl_err_sta[chan] = MAX(as->as_pmbl_err_sta[chan], params->pmbl_err);
+		} else {
+			as->as_pmbl_err_ap[chan] = params->pmbl_err;
+			as->as_cca_intf_pri[chan] = params->cca_pri;
+			as->as_cca_intf_sec[chan] = params->cca_sec;
+			as->as_cca_intf_sec40[chan] = params->cca_sec40;
+		}
+	} else {
+		/* update off-channel sampling stats with exponential smoothing */
+		if (as->as_cca_intf[chan] == SCS_CCA_INTF_INVALID) {
+			as->as_cca_intf[chan] = cca_intf_scaled;
+			/* only have AP side off-channel sampling now */
+			as->as_pmbl_err_ap[chan] = params->pmbl_err;
+			as->as_cca_intf_pri[chan] = params->cca_pri;
+			as->as_cca_intf_sec[chan] = params->cca_sec;
+			as->as_cca_intf_sec40[chan] = params->cca_sec40;
+		} else {
+			smth_fctr = (isclr(as->as_chan_xped, chan)) ?
+				ic->ic_scs.scs_cca_intf_smth_fctr[SCS_CCA_INTF_SMTH_FCTR_NOXP] :
+				ic->ic_scs.scs_cca_intf_smth_fctr[SCS_CCA_INTF_SMTH_FCTR_XPED];
+			old_cca_intf = as->as_cca_intf[chan];
+			as->as_cca_intf[chan] =  IEEE80211_SCS_SMOOTH(old_cca_intf, cca_intf_scaled,
+					smth_fctr);
+			/* only have AP side off-channel sampling now */
+			old_pmbl_ap = as->as_pmbl_err_ap[chan];
+			as->as_pmbl_err_ap[chan] = IEEE80211_SCS_SMOOTH(old_pmbl_ap, params->pmbl_err,
+					smth_fctr);
+			/* let sta side pmbl smoothing out */
+			old_pmbl_sta = as->as_pmbl_err_sta[chan];
+			as->as_pmbl_err_sta[chan] = IEEE80211_SCS_SMOOTH(old_pmbl_sta, 0,
+					smth_fctr);
+
+			old_cca_pri = as->as_cca_intf_pri[chan];
+			as->as_cca_intf_pri[chan] = IEEE80211_SCS_SMOOTH(old_cca_pri, params->cca_pri,
+					smth_fctr);
+
+			old_cca_sec = as->as_cca_intf_sec[chan];
+			as->as_cca_intf_sec[chan] = IEEE80211_SCS_SMOOTH(old_cca_sec, params->cca_sec,
+					smth_fctr);
+
+			old_cca_sec40 = as->as_cca_intf_sec40[chan];
+			as->as_cca_intf_sec40[chan] = IEEE80211_SCS_SMOOTH(old_cca_sec40, params->cca_sec40,
+					smth_fctr);
+		}
+		/* mark channel entry updated so it won't be aged out */
+		as->as_cca_intf_jiffies[chan] = jiffies;
+		SCSDBG(SCSLOG_INFO, "OC: chan=%u, smth_fctr=%u, cca_intf: prev=%u, "
+				"curr=%u, smthed=%u; pmbl: prev=%u,%u, curr=%u, smthed=%u,%u; "
+				"cca_pri: prev=%u, curr=%u, smthed=%u; "
+				"cca_sec: prev=%u, curr=%u, smthed=%u; "
+				"cca_sec40: prev=%u, curr=%u, smthed=%u\n",
+				chan, smth_fctr,
+				old_cca_intf, cca_intf_scaled, as->as_cca_intf[chan],
+				old_pmbl_ap, old_pmbl_sta, params->pmbl_err,
+				as->as_pmbl_err_ap[chan], as->as_pmbl_err_sta[chan],
+				old_cca_pri, params->cca_pri, as->as_cca_intf_pri[chan],
+				old_cca_sec, params->cca_sec, as->as_cca_intf_sec[chan],
+				old_cca_sec40, params->cca_sec40, as->as_cca_intf_sec40[chan]);
+	}
+}
+
+static void ieee80211_scs_update_chans_cca_intf(struct ieee80211com *ic,
+			struct scs_chan_intf_params *params,
+			uint32_t update_mode,
+			struct ieee80211_node *ni)
+{
+	uint32_t chan_bw = params->chan_bw;
+	uint32_t chan_sec = 0;
+	uint32_t chan_sec40u = 0;
+	uint32_t chan_sec40l = 0;
+	uint8_t isoffchan = (update_mode == IEEE80211_SCS_OFFCHAN);
+	uint8_t iscochan = (update_mode == IEEE80211_SCS_COCHAN);
+	struct ieee80211_channel *chan;
+
+	SCSDBG(SCSLOG_INFO, "Update chans cca intf -- chan:%u bw:%u intf:%u dur:%u pmbl:%u "
+				"cca_pri:%u cca_sec:%u cca_sec40:%u\n",
+				params->chan->ic_ieee,
+				params->chan_bw,
+				params->cca_intf,
+				params->cca_dur,
+				params->pmbl_err,
+				params->cca_pri,
+				params->cca_sec,
+				params->cca_sec40);
+
+	if (isoffchan && params->chan->ic_ieee == ic->ic_curchan->ic_ieee)
+		return;
+
+	if (chan_bw >= BW_HT40) {
+		chan_sec = ieee80211_find_sec_chan(params->chan);
+		if (isoffchan && chan_sec == ic->ic_curchan->ic_ieee)
+			return;
+		if (chan_bw >= BW_HT80) {
+			chan_sec40u = ieee80211_find_sec40u_chan(params->chan);
+			chan_sec40l = ieee80211_find_sec40l_chan(params->chan);
+			if (isoffchan && (chan_sec40u == ic->ic_curchan->ic_ieee ||
+					chan_sec40l == ic->ic_curchan->ic_ieee))
+				return;
+		}
+	}
+
+	ieee80211_scs_update_cca_intf(ic, params, iscochan, ni);
+	if (chan_bw >= BW_HT40) {
+		if (chan_sec) {
+			chan = ieee80211_find_channel_by_ieee(ic, chan_sec);
+			if (is_ieee80211_chan_valid(chan)) {
+				params->chan = chan;
+				ieee80211_scs_update_cca_intf(ic, params, iscochan, ni);
+			}
+		}
+		if (chan_bw >= BW_HT80) {
+			if (chan_sec40u) {
+				chan = ieee80211_find_channel_by_ieee(ic, chan_sec40u);
+				if (is_ieee80211_chan_valid(chan)) {
+					params->chan = chan;
+					ieee80211_scs_update_cca_intf(ic, params, iscochan, ni);
+				}
+			}
+			if (chan_sec40l) {
+				chan = ieee80211_find_channel_by_ieee(ic, chan_sec40l);
+				if (is_ieee80211_chan_valid(chan)) {
+					params->chan = chan;
+					ieee80211_scs_update_cca_intf(ic, params, iscochan, ni);
+				}
+			}
+		}
+	}
+}
+
+/*
+ * This function should NOT be called right after the txpower change because the rssi may come
+ * from the original txpower. Make sure txpower and rssi is match.
+ */
+void ieee80211_scs_node_update_rssi(void *s, struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = s;
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+	int32_t rssi = SCS_RSSI_UNINITED;
+	int32_t prev_atten;
+	uint8_t smth_fctr;
+	int32_t txpower = ic->ic_curchan->ic_maxpower;
+	int32_t atten = SCS_ATTEN_UNINITED;
+
+	if (!ieee80211_scs_node_is_valid(ni)) {
+		return;
+	}
+
+	/* get recent and smooth */
+	rssi = ic->ic_rssi(ni);
+	if (SCS_RSSI_VALID(rssi)) {
+		if (ieee80211_node_is_qtn(ni) && ni->ni_txpower) {
+			txpower = ni->ni_txpower;
+			SCSDBG(SCSLOG_VERBOSE, "qtn node %#x, using TX power %d\n",
+					ni->ni_associd, txpower);
+		}
+
+		atten = txpower - rssi / SCS_RSSI_PRECISION_RECIP;
+		if (ni->ni_atten_smoothed == SCS_ATTEN_UNINITED) {
+			ni->ni_atten_smoothed = atten;
+			SCSDBG(SCSLOG_INFO, "node 0x%x init rssi=%d, atten=%d\n", ni->ni_associd,
+				       rssi, ni->ni_atten_smoothed);
+		} else {
+			/* exponential smooth */
+			prev_atten = ni->ni_atten_smoothed;
+			smth_fctr = (atten <= prev_atten) ? ic->ic_scs.scs_rssi_smth_fctr[SCS_RSSI_SMTH_FCTR_UP]
+							: ic->ic_scs.scs_rssi_smth_fctr[SCS_RSSI_SMTH_FCTR_DOWN];
+			ni->ni_atten_smoothed = IEEE80211_SCS_SMOOTH(prev_atten, atten, smth_fctr);
+			SCSDBG(SCSLOG_VERBOSE, "node 0x%x prev_atten=%d, curr_rssi=%d, curr_atten=%d, "
+					"smooth_factor=%u, smoothed atten=%d\n",
+					ni->ni_associd, prev_atten, rssi, atten,
+					smth_fctr, ni->ni_atten_smoothed);
+		}
+	}
+
+	/* report smoothed one to ranking database */
+	atten = ni->ni_atten_smoothed;
+	if (SCS_ATTEN_VALID(atten)) {
+		as->as_sta_atten_num++;
+		as->as_sta_atten_sum += atten;
+		if ((as->as_sta_atten_min == SCS_ATTEN_UNINITED) || (atten < as->as_sta_atten_min)) {
+			as->as_sta_atten_min = atten;
+		}
+		if ((as->as_sta_atten_max == SCS_ATTEN_UNINITED) || (atten > as->as_sta_atten_max)) {
+			as->as_sta_atten_max = atten;
+		}
+	}
+}
+
+void ieee80211_scs_collect_node_atten(struct ieee80211com *ic)
+{
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+
+	/* clear the attenuation summary stats before collecting from nodes */
+	as->as_sta_atten_num = 0;
+	as->as_sta_atten_sum = 0;
+	as->as_sta_atten_min = SCS_ATTEN_UNINITED;
+	as->as_sta_atten_max = SCS_ATTEN_UNINITED;
+
+	/* let all node update their own information into the ranking stats database */
+	ic->ic_iterate_nodes(&ic->ic_sta, ieee80211_scs_node_update_rssi, (void *)ic, 1);
+	SCSDBG(SCSLOG_INFO, "atten info: num=%d, sum=%d, min=%d, max=%d, avg=%d\n",
+			as->as_sta_atten_num, as->as_sta_atten_sum,
+			as->as_sta_atten_min, as->as_sta_atten_max,
+			as->as_sta_atten_num ? (as->as_sta_atten_sum / as->as_sta_atten_num) : 0);
+}
+
+#define SCS_MIN_TX_TIME_FOR_COMP	10 /* ms */
+#define SCS_MIN_RX_TIME_FOR_COMP	10 /* ms */
+#define SCS_MIN_TDLS_TIME_FOR_COMP	10 /* ms */
+#define SCS_TX_TIME_COMP_STEP		50 /* ms */
+#define SCS_RX_TIME_COMP_STEP		50 /* ms */
+#define SCS_TDLS_TIME_COMP_STEP		50 /* ms */
+
+#define SCS_RX_COMPENSTATION		0
+#define SCS_TX_COMPENSTATION		1
+#define SCS_TDLS_COMPENSTATION		2
+
+static uint32_t tx_time_compenstation[SCS_MAX_TXTIME_COMP_INDEX] = {30, 35, 40, 45, 50, 50, 50, 50};
+static uint32_t rx_time_compenstation[SCS_MAX_RXTIME_COMP_INDEX] = {30, 50, 70, 90, 100, 110, 120, 130};
+static uint32_t tdls_time_compenstation[SCS_MAX_RXTIME_COMP_INDEX] = {40, 70, 70, 80, 80, 90, 90, 90};
+
+static void ieee80211_scs_set_time_compensation(uint32_t type, uint32_t index, uint32_t comp)
+{
+	int i;
+
+	if (type == SCS_RX_COMPENSTATION) {
+		if (index >= SCS_MAX_RXTIME_COMP_INDEX) {
+			printk("SCS: The index(%u) for rxtime compensation is not correct!\n", index);
+		} else {
+			rx_time_compenstation[index] = comp;
+		}
+
+		printk("Current rx time compensation:\n");
+		for (i = 0; i < SCS_MAX_RXTIME_COMP_INDEX; i++) {
+			printk("  %u", rx_time_compenstation[i]);
+		}
+		printk("\n");
+
+	} else if (type == SCS_TX_COMPENSTATION) {
+		if (index >= SCS_MAX_TXTIME_COMP_INDEX) {
+			printk("SCS: The index(%u) for txtime compensation is not correct!\n", index);
+		} else {
+			tx_time_compenstation[index] = comp;
+		}
+
+		printk("Current tx time compensation:\n");
+		for (i = 0; i < SCS_MAX_TXTIME_COMP_INDEX; i++) {
+			printk("  %u", tx_time_compenstation[i]);
+		}
+		printk("\n");
+	} else if (type == SCS_TDLS_COMPENSTATION) {
+		if (index >= SCS_MAX_TDLSTIME_COMP_INDEX) {
+			printk("SCS: The index(%u) for tdlstime compensation is not correct!\n", index);
+		} else {
+			tdls_time_compenstation[index] = comp;
+		}
+
+		printk("Current tdls time compensation:\n");
+		for (i = 0; i < SCS_MAX_TDLSTIME_COMP_INDEX; i++) {
+			printk("  %u", tdls_time_compenstation[i]);
+		}
+		printk("\n");
+	}
+}
+
+/* Add some compensation because (tx_time+rx_time) always is less than the caused cca interference */
+static uint32_t ieee80211_scs_get_time_compensation(uint32_t tx_time, uint32_t rx_time)
+{
+	uint32_t rx_comp, tx_comp;
+	uint32_t index;
+
+	if (rx_time > SCS_MIN_RX_TIME_FOR_COMP) {
+		index = rx_time / SCS_RX_TIME_COMP_STEP;
+		index = (index >= SCS_MAX_RXTIME_COMP_INDEX) ? (SCS_MAX_RXTIME_COMP_INDEX - 1) : index;
+		rx_comp = rx_time_compenstation[index];
+	} else {
+		/* if only downstream traffic or no traffic, don't add compensation */
+		return 0;
+	}
+
+	if (tx_time > SCS_MIN_TX_TIME_FOR_COMP) {
+		index = tx_time / SCS_TX_TIME_COMP_STEP;
+		index = (index >= SCS_MAX_TXTIME_COMP_INDEX) ? (SCS_MAX_TXTIME_COMP_INDEX - 1) : index;
+		tx_comp = tx_time_compenstation[index];
+	} else {
+		tx_comp = 0;
+	}
+
+	return (tx_comp + rx_comp);
+}
+
+#define SCS_MIN_STATS_FOR_STABLE_CHECK		30
+static uint32_t ieee80211_scs_is_stats_unstable(struct ieee80211com *ic,
+			uint32_t last_stats, uint32_t new_stats)
+{
+	uint32_t diff, sum;
+
+	if (last_stats < SCS_MIN_STATS_FOR_STABLE_CHECK && new_stats < SCS_MIN_STATS_FOR_STABLE_CHECK) {
+		return 0;
+	}
+
+	diff = (last_stats > new_stats) ? (last_stats - new_stats) : (new_stats - last_stats);
+	sum = last_stats + new_stats;
+
+	if (diff > ic->ic_scs.scs_pmp_stats_stable_range) {
+		return 1;
+	}
+
+	if ((diff * 100 / sum) > ic->ic_scs.scs_pmp_stats_stable_percent) {
+		return 1;
+	}
+
+	return 0;
+}
+
+static void ieee80211_scs_clear_node_smooth_data(struct ieee80211com *ic)
+{
+	struct ieee80211_node *ni;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP)
+		return;
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (!ieee80211_scs_node_is_valid(ni))
+			continue;
+
+		ni->ni_recent_cca_intf_smthed = 0;
+		ni->ni_others_rx_time_smthed = 0;
+		ni->ni_others_tx_time_smthed = 0;
+		ni->ni_tdls_tx_time_smthed = 0;
+		ni->ni_tdls_rx_time_smthed = 0;
+	}
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+}
+
+static uint32_t ieee80211_scs_tdls_link_is_existing(struct ieee80211com *ic,
+			struct ieee80211_node *node)
+{
+	int i;
+	unsigned long flags;
+	struct ieee80211_tdls_scs_entry *scs_entry = NULL;
+
+	spin_lock_irqsave(&ic->ic_scs.scs_tdls_lock, flags);
+	for (i = 0; i < IEEE80211_NODE_HASHSIZE; i++) {
+		LIST_FOREACH(scs_entry, &ic->ic_scs.scs_tdls_list[i], entry) {
+			if (scs_entry && scs_entry->stats.is_latest) {
+				if (!node || IEEE80211_ADDR_EQ(scs_entry->stats.s_addr,
+								node->ni_macaddr) ||
+						IEEE80211_ADDR_EQ(scs_entry->stats.r_addr,
+								node->ni_macaddr))
+					return 1;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&ic->ic_scs.scs_tdls_lock, flags);
+
+	return 0;
+}
+
+static uint16_t ieee80211_scs_ap_get_tdls_link_time(struct ieee80211com *ic)
+{
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211_node *ni;
+	struct ieee80211_node *ni_tmp;
+	uint16_t ap_tdls_time = 0;
+	uint16_t tdls_comp;
+	uint16_t index;
+
+	TAILQ_FOREACH_SAFE(ni, &nt->nt_node, ni_list, ni_tmp)
+		ap_tdls_time += ni->ni_tdls_tx_time_smthed;
+
+	/*  calculate compensation */
+	if (ap_tdls_time > SCS_MIN_TDLS_TIME_FOR_COMP) {
+		index = ap_tdls_time / SCS_TDLS_TIME_COMP_STEP;
+		index = (index >= SCS_MAX_TDLSTIME_COMP_INDEX) ?
+					(SCS_MAX_TDLSTIME_COMP_INDEX - 1) : index;
+		tdls_comp = tdls_time_compenstation[index];
+	} else {
+		tdls_comp = 0;
+	}
+
+	return (ap_tdls_time + tdls_comp);
+}
+
+static uint32_t ieee80211_scs_smooth_ap_cca_intf_time(struct ieee80211com *ic,
+		uint32_t raw_cca_intf, uint32_t *stats_unstable)
+{
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211_node *ni;
+	uint32_t ap_tdls_intf;
+	uint32_t corrective_cca_intf;
+	uint32_t compound_cca_intf;
+
+	if ((jiffies - ic->ic_scs.scs_cca_intf_smthed_jiffies) >
+			(ic->ic_scs.scs_pmp_stats_clear_interval * HZ)) {
+		as->as_tx_ms_smth = 0;
+		as->as_rx_ms_smth = 0;
+		ic->ic_scs.scs_cca_intf_smthed = 0;
+		ieee80211_scs_clear_node_smooth_data(ic);
+	}
+
+	*stats_unstable = ieee80211_scs_is_stats_unstable(ic,
+			ic->ic_scs.scs_cca_intf_smthed, raw_cca_intf);
+	*stats_unstable |= ((jiffies - ic->ic_scs.scs_cca_intf_smthed_jiffies) >
+			(5 * ic->ic_scs.scs_cca_sample_dur * HZ));
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		ni->ni_tdls_rx_time_smthed = IEEE80211_SCS_SMOOTH(ni->ni_tdls_rx_time_smthed,
+				ni->ni_recent_tdls_rx_time, ic->ic_scs.scs_pmp_rx_time_smth_fctr);
+		ni->ni_tdls_tx_time_smthed = IEEE80211_SCS_SMOOTH(ni->ni_tdls_tx_time_smthed,
+				ni->ni_recent_tdls_tx_time, ic->ic_scs.scs_pmp_tx_time_smth_fctr);
+		ni->ni_tdls_time_smth_jiffies = jiffies;
+		SCSDBG(SCSLOG_INFO, "STA 0x%x - tdls_tx_time_smth: %u, tdls_rx_time_smth=%u\n",
+				ni->ni_associd, ni->ni_tdls_tx_time_smthed, ni->ni_tdls_rx_time_smthed);
+	}
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+
+	ic->ic_scs.scs_cca_intf_smthed = IEEE80211_SCS_SMOOTH(ic->ic_scs.scs_cca_intf_smthed,
+			raw_cca_intf, ic->ic_scs.scs_pmp_rpt_cca_smth_fctr);
+	ic->ic_scs.scs_cca_intf_smthed_jiffies = jiffies;
+
+	ap_tdls_intf = ieee80211_scs_ap_get_tdls_link_time(ic);
+	if (ic->ic_scs.scs_cca_intf_smthed > ap_tdls_intf)
+		corrective_cca_intf = ic->ic_scs.scs_cca_intf_smthed - ap_tdls_intf;
+	else
+		corrective_cca_intf = 0;
+
+	compound_cca_intf = ieee80211_scs_fix_cca_intf(ic, NULL, corrective_cca_intf,
+				ic->ic_scs.scs_sp_err_smthed, ic->ic_scs.scs_lp_err_smthed);
+
+	SCSDBG(SCSLOG_INFO, "raw_cca_int %u, cca_int_smthed %u, ap_tdls_intf %u,"
+			" corrective_cca_intf %u, compound_cca_intf %u, stats_unstable %u\n",
+			raw_cca_intf, ic->ic_scs.scs_cca_intf_smthed, ap_tdls_intf,
+			corrective_cca_intf, compound_cca_intf, *stats_unstable);
+
+	return compound_cca_intf;
+}
+
+static uint32_t ieee80211_scs_smooth_sta_cca_intf_time(struct ieee80211com *ic,
+		struct ieee80211_node *ni, uint32_t total_tx_time, uint32_t total_rx_time,
+		uint32_t node_tx_time, uint32_t node_rx_time)
+{
+	uint32_t others_tx_time, others_rx_time;
+	uint32_t others_time, others_time_smth;
+	uint32_t is_stats_unstable = 0;
+	int32_t node_cca_intf;
+	uint32_t node_cca_idle;
+
+	/* get other nodes' tx/rx time and smooth them */
+	others_tx_time = (total_tx_time > node_tx_time) ? (total_tx_time - node_tx_time) : 0;
+	others_rx_time = (total_rx_time > node_rx_time) ? (total_rx_time - node_rx_time) : 0;
+	others_time = others_rx_time + others_tx_time;
+	others_time_smth = ni->ni_others_rx_time_smthed + ni->ni_others_tx_time_smthed; /* old smooth value */
+	if (ieee80211_scs_is_stats_unstable(ic, others_time_smth, others_time))
+		is_stats_unstable |= IEEE80211_SCS_UNSTABLE_OTHERSTIME;
+	if ((jiffies - ni->ni_others_time_smth_jiffies) > (5 * ic->ic_scs.scs_cca_sample_dur * HZ))
+		is_stats_unstable |= IEEE80211_SCS_UNSTABLE_OTHERSTIME_OUTDATED;
+
+	SCSDBG(SCSLOG_NOTICE, "node 0x%x time before smth -- self:(tx:%u, rx:%u), others:%u(tx:%u, rx:%u),"
+			"others_smth:%u(tx:%u, rx:%u), tdls:(tx:%u, rx:%u), tdls_smth:(tx:%u, rx:%u)\n",
+			ni->ni_associd, node_tx_time, node_rx_time, others_time, others_tx_time, others_rx_time,
+			others_time_smth, ni->ni_others_tx_time_smthed, ni->ni_others_rx_time_smthed,
+			ni->ni_recent_tdls_tx_time, ni->ni_recent_tdls_rx_time,
+			ni->ni_tdls_tx_time_smthed, ni->ni_tdls_rx_time_smthed);
+
+	ni->ni_others_rx_time_smthed = IEEE80211_SCS_SMOOTH(ni->ni_others_rx_time_smthed, others_rx_time,
+			ic->ic_scs.scs_pmp_rx_time_smth_fctr);
+	ni->ni_others_tx_time_smthed = IEEE80211_SCS_SMOOTH(ni->ni_others_tx_time_smthed, others_tx_time,
+			ic->ic_scs.scs_pmp_tx_time_smth_fctr);
+	ni->ni_others_time_smth_jiffies = jiffies;
+
+	if ((ic->ic_opmode == IEEE80211_M_HOSTAP) && ieee80211_scs_tdls_link_is_existing(ic, ni)) {
+		if (ieee80211_scs_is_stats_unstable(ic,
+					ni->ni_tdls_rx_time_smthed,
+					ni->ni_recent_tdls_rx_time))
+			is_stats_unstable |= IEEE80211_SCS_UNSTABLE_TDLS_RX;
+		if (ieee80211_scs_is_stats_unstable(ic,
+					ni->ni_tdls_tx_time_smthed,
+					ni->ni_recent_tdls_tx_time))
+			is_stats_unstable |= IEEE80211_SCS_UNSTABLE_TDLS_TX;
+		if ((jiffies - ni->ni_tdls_time_smth_jiffies) >
+					(5 * ic->ic_scs.scs_cca_sample_dur * HZ))
+			is_stats_unstable |= IEEE80211_SCS_UNSTABLE_TDLS_OUTDATED;
+	}
+
+	/* smooth cca interference */
+	node_cca_intf = ni->ni_recent_cca_intf;
+	node_cca_idle = SCS_CCA_IDLE_INVALID != ni->ni_recent_cca_idle ? ni->ni_recent_cca_idle_smthed : ic->ic_scs.scs_cca_idle_smthed;
+	if ((jiffies - ni->ni_recent_cca_intf_jiffies) < (ic->ic_scs.scs_cca_sample_dur * HZ) &&
+			node_cca_intf != SCS_CCA_INTF_INVALID) {
+		SCSDBG(SCSLOG_NOTICE, "node 0x%x cca intf before smth -- smthed:%u, recent_cca_intf:%u\n",
+				ni->ni_associd, ni->ni_recent_cca_intf_smthed, node_cca_intf);
+
+		if (ieee80211_scs_is_stats_unstable(ic, ni->ni_recent_cca_intf_smthed, node_cca_intf))
+			is_stats_unstable |= IEEE80211_SCS_UNSTABLE_INTF;
+		if ((jiffies - ni->ni_cca_intf_smth_jiffies) > (5 * ic->ic_scs.scs_cca_sample_dur * HZ))
+			is_stats_unstable |= IEEE80211_SCS_UNSTABLE_INTF_OUTDATED;
+
+		ni->ni_recent_cca_intf_smthed = IEEE80211_SCS_SMOOTH(ni->ni_recent_cca_intf_smthed, node_cca_intf,
+				ic->ic_scs.scs_pmp_rpt_cca_smth_fctr);
+		ni->ni_cca_intf_smth_jiffies = jiffies;
+
+		if (SCS_CCA_IDLE_INVALID == ni->ni_recent_cca_idle)
+			return is_stats_unstable;
+
+		if (ieee80211_scs_is_stats_unstable(ic, ni->ni_recent_cca_idle_smthed, node_cca_idle))
+			is_stats_unstable |= IEEE80211_SCS_UNSTABLE_IDLE;
+		if ((jiffies - ni->ni_recent_cca_idle_smth_jiffies) > (5 * ic->ic_scs.scs_cca_sample_dur * HZ))
+			is_stats_unstable |= IEEE80211_SCS_UNSTABLE_IDLE_OUTDATED;
+
+		ni->ni_recent_cca_idle_smthed = IEEE80211_SCS_SMOOTH(ni->ni_recent_cca_idle_smthed, node_cca_idle,
+				ic->ic_scs.scs_pmp_rpt_cca_smth_fctr);
+		ni->ni_recent_cca_idle_smth_jiffies = jiffies;
+	} else {
+		is_stats_unstable |= IEEE80211_SCS_UNSTABLE_INTF_INVALID;
+	}
+
+	return is_stats_unstable;
+}
+
+#define ADD_SCS_TDLS_STATS(frm, s_addr, r_addr, tx_time, is_latest)	\
+do {	\
+	IEEE80211_ADDR_COPY(frm, s_addr);	\
+	frm += IEEE80211_ADDR_LEN;	\
+	IEEE80211_ADDR_COPY(frm, r_addr);	\
+	frm += IEEE80211_ADDR_LEN;	\
+	ADDINT16LE(frm, tx_time);	\
+	ADDINT16LE(frm, is_latest);	\
+} while(0)
+
+static int ieee80211_scs_add_tdls_stats_ie(struct ieee80211vap *vap,
+	struct qtn_scs_info *scs_info_read, uint8_t *frm, uint16_t frm_len)
+{
+#define	IEEE80211_SCS_TDLS_TRAINING_DUATION		4
+#define IEEE80211_SCS_TDLS_TRAINING_COMPANSATION	20
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211_node *ni;
+	struct qtn_scs_vsp_node_stats *stats;
+	uint8_t *end = frm + frm_len;
+	int tdls_tx_time = 0;
+	int ie_len = 0;
+
+	if (vap->iv_opmode != IEEE80211_M_STA || (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED))
+		return ie_len;
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (!ieee80211_scs_node_is_valid(ni))
+			continue;
+
+		if (IEEE80211_NODE_IS_NONE_TDLS(ni) || IEEE80211_NODE_IS_TDLS_INACTIVE(ni))
+			continue;
+
+		stats = ieee80211_scs_find_node_stats(ic, scs_info_read, ni->ni_associd);
+		if (stats)
+			tdls_tx_time = stats->tx_usecs / 1000;
+		else
+			tdls_tx_time = 0;
+
+		/*
+		 * Add some compansation since training packets could
+		 * cause high interference in bad environment
+		 */
+		if (time_after(jiffies, ni->ni_training_start) &&
+				time_before(jiffies, ni->ni_training_start +
+					IEEE80211_SCS_TDLS_TRAINING_DUATION * HZ))
+			tdls_tx_time += IEEE80211_SCS_TDLS_TRAINING_COMPANSATION;
+
+		if (frm < end) {
+			ADD_SCS_TDLS_STATS(frm, vap->iv_myaddr, ni->ni_macaddr, tdls_tx_time, 1);
+			ie_len += sizeof(struct ieee80211_tdls_scs_stats);
+			SCSDBG(SCSLOG_NOTICE, "Add SCS TDLS status: sender_mac %pM "
+					"receiver_mac %pM tx_time %u\n",vap->iv_myaddr,
+					ni->ni_macaddr, tdls_tx_time);
+		} else {
+			SCSDBG(SCSLOG_NOTICE, "Failed to add tdls stats IE\n");
+		}
+	}
+
+	return ie_len;
+}
+
+void ieee80211_scs_update_tdls_stats(struct ieee80211com *ic,
+		struct ieee80211_tdls_scs_stats *scs_stats)
+{
+	int found = 0;
+	unsigned long flags;
+	struct ieee80211_tdls_scs_entry *scs_entry = NULL;
+	int hash = IEEE80211_NODE_HASH(scs_stats->s_addr);
+
+	SCSDBG(SCSLOG_INFO, "Update SCS TDLS status: sender_mac %pM "
+					"receiver_mac %pM tx_time %u\n", scs_stats->s_addr,
+					scs_stats->r_addr, le16toh(scs_stats->tx_time));
+
+	spin_lock_irqsave(&ic->ic_scs.scs_tdls_lock, flags);
+	LIST_FOREACH(scs_entry, &ic->ic_scs.scs_tdls_list[hash], entry) {
+		if (IEEE80211_ADDR_EQ(scs_entry->stats.s_addr, scs_stats->s_addr) &&
+				IEEE80211_ADDR_EQ(scs_entry->stats.r_addr, scs_stats->r_addr)) {
+			scs_entry->stats.is_latest = 1;
+			scs_entry->stats.tx_time = le16toh(scs_stats->tx_time);
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ic->ic_scs.scs_tdls_lock, flags);
+
+	if (found == 0) {
+		MALLOC(scs_entry, struct ieee80211_tdls_scs_entry *,
+					sizeof(*scs_entry), M_DEVBUF, M_WAITOK);
+		if (scs_entry != NULL) {
+			IEEE80211_ADDR_COPY(scs_entry->stats.s_addr, scs_stats->s_addr);
+			IEEE80211_ADDR_COPY(scs_entry->stats.r_addr, scs_stats->r_addr);
+			scs_entry->stats.is_latest = 1;
+			scs_entry->stats.tx_time = le16toh(scs_stats->tx_time);
+			spin_lock_irqsave(&ic->ic_scs.scs_tdls_lock, flags);
+			LIST_INSERT_HEAD(&ic->ic_scs.scs_tdls_list[hash], scs_entry, entry);
+			spin_unlock_irqrestore(&ic->ic_scs.scs_tdls_lock, flags);
+		} else {
+			SCSDBG(SCSLOG_NOTICE, "SCS TDLS entry allocation failed\n");
+		}
+	}
+}
+
+void ieee80211_scs_free_node_tdls_stats(struct ieee80211com *ic,
+		struct ieee80211_node *ni)
+{
+	int i;
+	int freed = 0;
+	unsigned long flags;
+	struct ieee80211_tdls_scs_entry *scs_entry;
+	struct ieee80211_tdls_scs_entry *temp_entry;
+
+	spin_lock_irqsave(&ic->ic_scs.scs_tdls_lock, flags);
+	for (i = 0; i < IEEE80211_NODE_HASHSIZE; i++) {
+		LIST_FOREACH_SAFE(scs_entry, &ic->ic_scs.scs_tdls_list[i], entry, temp_entry) {
+			if ((IEEE80211_ADDR_EQ(scs_entry->stats.s_addr, ni->ni_macaddr) ||
+					IEEE80211_ADDR_EQ(scs_entry->stats.r_addr, ni->ni_macaddr))) {
+				LIST_REMOVE(scs_entry, entry);
+				FREE(scs_entry, M_DEVBUF);
+				freed = 1;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&ic->ic_scs.scs_tdls_lock, flags);
+	if (freed)
+		SCSDBG(SCSLOG_NOTICE, "free node %pM tdls stats\n", ni->ni_macaddr);
+}
+
+void ieee80211_scs_free_tdls_stats_list(struct ieee80211com *ic)
+{
+	int i;
+	unsigned long flags;
+	struct ieee80211_tdls_scs_entry *scs_entry;
+	struct ieee80211_tdls_scs_entry *temp_entry;
+
+	SCSDBG(SCSLOG_NOTICE, "SCS: free all of tdls stats\n");
+	spin_lock_irqsave(&ic->ic_scs.scs_tdls_lock, flags);
+	for (i = 0; i < IEEE80211_NODE_HASHSIZE; i++) {
+		LIST_FOREACH_SAFE(scs_entry, &ic->ic_scs.scs_tdls_list[i], entry, temp_entry) {
+			LIST_REMOVE(scs_entry, entry);
+			FREE(scs_entry, M_DEVBUF);
+		}
+	}
+	spin_unlock_irqrestore(&ic->ic_scs.scs_tdls_lock, flags);
+}
+
+static void ieee80211_scs_dump_tdls_stats(struct ieee80211com *ic)
+{
+	int i;
+	unsigned long flags;
+	struct ieee80211_tdls_scs_entry *scs_entry = NULL;
+
+	SCSDBG(SCSLOG_INFO, "Dump SCS tdls stats:\n");
+	spin_lock_irqsave(&ic->ic_scs.scs_tdls_lock, flags);
+	for (i = 0; i < IEEE80211_NODE_HASHSIZE; i++) {
+		LIST_FOREACH(scs_entry, &ic->ic_scs.scs_tdls_list[i], entry) {
+			if (scs_entry != NULL) {
+				SCSDBG(SCSLOG_VERBOSE, "sender_mac %pM, receiver_mac %pM,"
+					" tx_time: %u\n", scs_entry->stats.s_addr,
+					scs_entry->stats.r_addr, scs_entry->stats.tx_time);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&ic->ic_scs.scs_tdls_lock, flags);
+}
+
+static void ieee80211_scs_update_current_tdls_time(struct ieee80211com *ic,
+			struct ieee80211_node *ni)
+{
+	struct ieee80211_tdls_scs_entry *scs_entry = NULL;
+	int hash = IEEE80211_NODE_HASH(ni->ni_macaddr);
+	unsigned long flags;
+	int i;
+
+	ni->ni_recent_tdls_tx_time = 0;
+	ni->ni_recent_tdls_rx_time = 0;
+
+	spin_lock_irqsave(&ic->ic_scs.scs_tdls_lock, flags);
+	LIST_FOREACH(scs_entry, &ic->ic_scs.scs_tdls_list[hash], entry) {
+		if (IEEE80211_ADDR_EQ(scs_entry->stats.s_addr, ni->ni_macaddr)) {
+			ni->ni_recent_tdls_tx_time += scs_entry->stats.tx_time;
+			SCSDBG(SCSLOG_VERBOSE, "Node %pM tdls_tx_time: sender_mac %pM,"
+				" receiver_mac %pM, tx_time: %u\n", ni->ni_macaddr,
+				scs_entry->stats.s_addr, scs_entry->stats.r_addr,
+				scs_entry->stats.tx_time);
+		}
+	}
+
+	for (i = 0; i < IEEE80211_NODE_HASHSIZE; i++) {
+		LIST_FOREACH(scs_entry, &ic->ic_scs.scs_tdls_list[i], entry) {
+			if (IEEE80211_ADDR_EQ(scs_entry->stats.r_addr, ni->ni_macaddr)) {
+				ni->ni_recent_tdls_rx_time += scs_entry->stats.tx_time;
+				SCSDBG(SCSLOG_VERBOSE, "Node %pM tdls_rx_time: sender_mac %pM,"
+					" receiver_mac %pM, rx_time: %u\n", ni->ni_macaddr,
+					scs_entry->stats.s_addr, scs_entry->stats.r_addr,
+					scs_entry->stats.tx_time);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&ic->ic_scs.scs_tdls_lock, flags);
+
+	SCSDBG(SCSLOG_INFO, "Node %pM, recent_tdls_tx_time %u, recent_tdls_rx_time %u\n",
+			ni->ni_macaddr, ni->ni_recent_tdls_tx_time, ni->ni_recent_tdls_rx_time);
+}
+
+static int ieee80211_scs_update_tdls_link_time(struct ieee80211com *ic)
+{
+	struct ieee80211_node *ni;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	int failed = 0;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP)
+		return failed;
+
+	ieee80211_scs_dump_tdls_stats(ic);
+
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (ni->ni_vap->iv_opmode == IEEE80211_M_WDS)
+			continue;
+
+		if (!ieee80211_scs_node_is_valid(ni))
+			continue;
+
+		/*
+		 * We must confirm all of CCA actions are received and all of CCA info are correct,
+		 * or we will get wrong total TDLS link time and calculate wrong CCA interference
+		 */
+		if (ni->ni_recent_cca_intf == SCS_CCA_INTF_INVALID) {
+			failed = 1;
+			SCSDBG(SCSLOG_NOTICE, "Get wrong CCA info from STA 0x%x\n", ni->ni_associd);
+			break;
+		}
+
+		ieee80211_scs_update_current_tdls_time(ic, ni);
+	}
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+
+	return failed;
+}
+
+static uint16_t ieee80211_scs_sta_get_tdls_link_time(struct ieee80211com *ic,
+		struct ieee80211_node *cur_ni)
+{
+	struct ieee80211_node_table *nt = &ic->ic_sta;;
+	struct ieee80211_node *ni;
+	uint16_t sta_tdls_time = 0;
+	uint16_t tdls_comp;
+	uint16_t index;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP)
+		return 0;
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (ni != cur_ni)
+			sta_tdls_time += ni->ni_tdls_tx_time_smthed;
+	}
+
+	sta_tdls_time = (sta_tdls_time > cur_ni->ni_tdls_rx_time_smthed) ?
+			(sta_tdls_time - cur_ni->ni_tdls_rx_time_smthed) : 0;
+
+	/* Add some compensation */
+	if (sta_tdls_time > SCS_MIN_TDLS_TIME_FOR_COMP) {
+		index = sta_tdls_time / SCS_TDLS_TIME_COMP_STEP;
+		index = (index >= SCS_MAX_TDLSTIME_COMP_INDEX) ?
+					(SCS_MAX_TDLSTIME_COMP_INDEX - 1) : index;
+		tdls_comp = tdls_time_compenstation[index];
+	} else {
+		tdls_comp = 0;
+	}
+
+	return (sta_tdls_time + tdls_comp);
+}
+
+int ieee80211_scs_is_wds_rbs_node(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+	int is_wds_rbs = 0;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (IEEE80211_VAP_WDS_IS_RBS(vap)) {
+			SCSDBG(SCSLOG_INFO, "channel change is disabled under RBS WDS mode\n");
+			is_wds_rbs = 1;
+			break;
+		}
+	}
+	return is_wds_rbs;
+}
+EXPORT_SYMBOL(ieee80211_scs_is_wds_rbs_node);
+
+static int
+ieee80211_scs_is_wds_mbs_node(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+	int is_wds_mbs = 0;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (IEEE80211_VAP_WDS_IS_MBS(vap)) {
+			is_wds_mbs = 1;
+			break;
+		}
+	}
+	return is_wds_mbs;
+}
+
+static int ieee80211_wds_vap_exists(struct ieee80211com *ic)
+{
+	struct ieee80211vap *vap;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if ((vap->iv_opmode == IEEE80211_M_WDS) && (vap->iv_state == IEEE80211_S_RUN)) {
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t
+ieee80211_adjust_others_time_on_mbs(struct ieee80211com *ic, uint32_t compound_cca_intf)
+{
+	struct ieee80211_node *ni;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+
+	SCSDBG(SCSLOG_INFO, "compound_cca_intf before %u\n", compound_cca_intf);
+	as->as_cca_intf_smth = IEEE80211_SCS_SMOOTH(as->as_cca_intf_smth, compound_cca_intf, IEEE80211_SCS_SMTH_RBS_TIME);
+	if (as->as_cca_intf_smth > compound_cca_intf) {
+		/* Smooth rising up; but sharp drop */
+		as->as_cca_intf_smth = compound_cca_intf;
+	}
+	SCSDBG(SCSLOG_INFO, "compound_cca_intf smoothed %u\n", as->as_cca_intf_smth);
+	compound_cca_intf = as->as_cca_intf_smth;
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (!ieee80211_scs_node_is_valid(ni)) {
+			continue;
+		}
+		compound_cca_intf = (compound_cca_intf > ni->ni_recent_others_time_smth) ?
+					(compound_cca_intf - ni->ni_recent_others_time_smth) : 0;
+
+		SCSDBG(SCSLOG_INFO, "compound_cca_intf %u others_time %u\n", compound_cca_intf, ni->ni_recent_others_time_smth);
+	}
+	SCSDBG(SCSLOG_INFO, "compound_cca_intf after %u\n", compound_cca_intf);
+	return compound_cca_intf;
+}
+
+static uint32_t
+ieee80211_adjust_node_rbs_others_time(struct ieee80211com *ic, uint32_t node_cca_intf)
+{
+	struct ieee80211_node *ni;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+
+	SCSDBG(SCSLOG_INFO, "node_cca_intf before %u\n", node_cca_intf);
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (!ieee80211_scs_node_is_valid(ni)) {
+			continue;
+		}
+		node_cca_intf = (node_cca_intf > ni->ni_recent_others_time_smth) ?
+					(node_cca_intf - ni->ni_recent_others_time_smth) : 0;
+	}
+	SCSDBG(SCSLOG_INFO, "node_cca_intf after %u\n", node_cca_intf);
+	return node_cca_intf;
+}
+
+uint32_t ieee80211_scs_collect_ranking_stats(struct ieee80211com *ic, struct qtn_scs_info *scs_info_read, uint32_t cc_flag, uint32_t compound_cca_intf)
+{
+	struct ieee80211_node *ni;
+	struct ieee80211_node_table *nt;
+	int cca_intf_is_recent = 0;
+	uint32_t node_num[SCS_NODE_TRAFFIC_TYPE_NUM][SCS_NODE_INTF_TYPE_NUM];
+	uint32_t traffic_idx, intf_idx;
+	uint32_t intf_max;
+	struct ieee80211_node *intf_max_ni;
+	struct qtn_scs_vsp_node_stats *stats;
+	uint32_t node_time, node_tx_time, node_rx_time;
+	uint32_t others_time_smth, others_time_comp;
+	uint32_t total_tx_time, total_rx_time;
+	uint32_t is_stats_unstable;
+	uint32_t new_cc_flag = cc_flag;
+	int32_t node_cca_intf, ap_cca_intf, node_compound_cca_intf;
+	uint32_t pmbl_max, node_pmbl, ap_pmbl;
+	int cur_bw = ieee80211_get_bw(ic);
+	int is_wds_mbs = ieee80211_scs_is_wds_mbs_node(ic);
+	uint16_t ap_tdls_intf;
+	uint16_t sta_tdls_intf;
+	uint32_t node_cca_idle;
+	struct scs_chan_intf_params intf_params= {0};
+
+	ap_tdls_intf = ieee80211_scs_ap_get_tdls_link_time(ic);
+	ap_cca_intf = (ic->ic_scs.scs_cca_intf_smthed > ap_tdls_intf) ?
+			(ic->ic_scs.scs_cca_intf_smthed - ap_tdls_intf) : 0;
+	ap_pmbl = (ic->ic_scs.scs_sp_err_smthed * ic->ic_scs.scs_sp_wf +
+				ic->ic_scs.scs_lp_err_smthed * ic->ic_scs.scs_lp_wf) / 100;
+
+	/*
+	 * Currently only reset current channel's cca_intf, because other channel's cca_intf
+	 * are updated more slowly.
+	 */
+	intf_params.chan = ic->ic_curchan;
+	intf_params.chan_bw = cur_bw;
+	intf_params.cca_intf = SCS_CCA_INTF_INVALID;
+	intf_params.pmbl_err = 0;
+	intf_params.cca_dur = IEEE80211_SCS_CCA_INTF_SCALE;
+	ieee80211_scs_update_chans_cca_intf(ic, &intf_params, IEEE80211_SCS_COCHAN, NULL);
+
+	/* collect interference info from all stas */
+	intf_max = SCS_CCA_INTF_INVALID;
+	pmbl_max = 0;
+	intf_max_ni = NULL;
+	memset(node_num, 0x0, sizeof(node_num));
+	nt = &ic->ic_sta;
+	IEEE80211_SCAN_LOCK_IRQ(nt);
+	IEEE80211_NODE_LOCK_IRQ(nt);
+
+	total_tx_time = scs_info_read->tx_usecs / 1000;
+	total_rx_time = scs_info_read->rx_usecs / 1000;
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (!ieee80211_scs_node_is_valid(ni)) {
+			continue;
+		}
+
+		/* traffic condition */
+		stats = ieee80211_scs_find_node_stats(ic, scs_info_read, ni->ni_associd);
+		if (stats) {
+			node_tx_time = stats->tx_usecs / 1000;
+			node_rx_time = stats->rx_usecs / 1000;
+			node_time = node_tx_time + node_rx_time;
+		} else {
+			SCSDBG(SCSLOG_NOTICE, "no stats available for node 0x%x\n", ni->ni_associd);
+			node_tx_time = 0;
+			node_rx_time = 0;
+			node_time = 0;
+		}
+		node_time += ni->ni_recent_tdls_tx_time;
+
+		is_stats_unstable = ieee80211_scs_smooth_sta_cca_intf_time(ic, ni, total_tx_time, total_rx_time,
+				node_tx_time, node_rx_time);
+		others_time_smth = ni->ni_others_rx_time_smthed + ni->ni_others_tx_time_smthed; /* new smooth value */
+		others_time_comp = ieee80211_scs_get_time_compensation(ni->ni_others_tx_time_smthed,
+				ni->ni_others_rx_time_smthed);
+		others_time_smth = others_time_smth + others_time_comp;
+		ni->ni_others_time = others_time_smth;	/* This is used to compensate for RBS */
+		sta_tdls_intf = ieee80211_scs_sta_get_tdls_link_time(ic, ni);
+
+		SCSDBG(SCSLOG_NOTICE, "node 0x%x time after smth and compensation -- others_smth:%u "
+				"(tx:%u, rx:%u, comp:%u), sta_tdls_intf: %u\n", ni->ni_associd,	others_time_smth,
+				ni->ni_others_tx_time_smthed, ni->ni_others_rx_time_smthed, others_time_comp, sta_tdls_intf);
+
+		traffic_idx = (node_time <= (ic->ic_scs.scs_thrshld_loaded * scs_info_read->cca_try / 1000)) ?
+				SCS_NODE_TRAFFIC_IDLE : SCS_NODE_TRAFFIC_LOADED;
+		/* inteference condition */
+		node_cca_intf = (int32_t)ni->ni_recent_cca_intf;
+		node_cca_idle = SCS_CCA_IDLE_INVALID != ni->ni_recent_cca_idle ? ni->ni_recent_cca_idle_smthed : ic->ic_scs.scs_cca_idle_smthed;
+		node_pmbl = 0;
+		cca_intf_is_recent = (jiffies - ni->ni_recent_cca_intf_jiffies) < (ic->ic_scs.scs_cca_sample_dur * HZ);
+		intf_idx = ((node_cca_intf != SCS_CCA_INTF_INVALID) && (cca_intf_is_recent)) ?
+				SCS_NODE_INTFED : SCS_NODE_NOTINTFED;
+
+		if (intf_idx == SCS_NODE_INTFED) {
+			/* vendor specific handle */
+			if ((ni->ni_vendor == PEER_VENDOR_QTN)
+					|| IEEE80211_VAP_WDS_IS_MBS(ni->ni_vap)) {
+				if (ic->ic_sta_assoc >= IEEE80211_MAX_STA_CCA_ENABLED) {
+					/* PMP case */
+					if (is_stats_unstable) {
+						/* stats is unstable, ignore it */
+						node_cca_intf = 0;
+						node_cca_idle = IEEE80211_SCS_CCA_INTF_SCALE;
+					} else {
+						/* remove the interference that come from other associated station */
+						node_cca_intf = (ni->ni_recent_cca_intf_smthed > others_time_smth) ?
+								(ni->ni_recent_cca_intf_smthed - others_time_smth) : 0;
+						node_cca_intf = (node_cca_intf > sta_tdls_intf) ? (node_cca_intf - sta_tdls_intf) : 0;
+						node_cca_intf = MIN(node_cca_intf, IEEE80211_SCS_CCA_INTF_SCALE);
+					}
+					SCSDBG(SCSLOG_NOTICE, "node 0x%x -- cca_smth: %u, others_time_smth: %u,"
+							"sta_tdls_inf: %u, diff: %d, node_cca_intf: %u, is_unstable: %#x\n",
+							ni->ni_associd,	ni->ni_recent_cca_intf_smthed, others_time_smth, sta_tdls_intf,
+							(int)(ni->ni_recent_cca_intf_smthed - others_time_smth - sta_tdls_intf),
+							node_cca_intf, is_stats_unstable);
+				}
+				if (is_wds_mbs) {
+					/* Is this node a non-WDS node ? */
+					if (!(IEEE80211_VAP_WDS_ANY(ni->ni_vap))) {
+						node_cca_intf = ieee80211_adjust_node_rbs_others_time(ic, node_cca_intf);
+					} else {
+						SCSDBG(SCSLOG_NOTICE, "WDS Node; No adjustment needed\n");
+					}
+				}
+				node_compound_cca_intf = ieee80211_scs_fix_cca_intf(ic, ni, node_cca_intf,
+						ni->ni_recent_sp_fail, ni->ni_recent_lp_fail);
+				node_pmbl = (ni->ni_recent_sp_fail * ic->ic_scs.scs_sp_wf +
+						ni->ni_recent_lp_fail * ic->ic_scs.scs_lp_wf) / 100;
+
+				if (ieee80211_scs_is_interference_over_thresholds(ic,
+						node_compound_cca_intf, node_cca_idle, node_pmbl)) {
+					if (!(cc_flag & IEEE80211_SCS_SELF_CCA_CC) && (node_cca_intf > ap_cca_intf)) {
+						SCSDBG(SCSLOG_NOTICE, "increase cca_intf for sta side ACI, sta cca_intf=%u,"
+								" self cca_intf=%u\n", node_cca_intf, ap_cca_intf);
+						node_cca_intf = node_cca_intf * 2 - ap_cca_intf;
+						node_cca_intf = MIN(node_cca_intf, IEEE80211_SCS_CCA_INTF_SCALE);
+						node_compound_cca_intf = ieee80211_scs_fix_cca_intf(ic, ni, node_cca_intf,
+								ni->ni_recent_sp_fail, ni->ni_recent_lp_fail);
+					}
+					node_cca_intf = node_compound_cca_intf;
+				} else {
+					intf_idx = SCS_NODE_NOTINTFED;
+					node_pmbl = 0; /* don't record it */
+				}
+			} else if (!(cc_flag & IEEE80211_SCS_SELF_CCA_CC) && (node_cca_intf > ap_cca_intf)) {
+				SCSDBG(SCSLOG_NOTICE, "increase cca_intf for sta side ACI, sta cca_intf=%u,"
+						" self cca_intf=%u\n", node_cca_intf, ap_cca_intf);
+				node_cca_intf = node_cca_intf * 2 - ap_cca_intf;
+				node_cca_intf = MIN(node_cca_intf, IEEE80211_SCS_CCA_INTF_SCALE);
+			}
+		}
+		SCSDBG(SCSLOG_NOTICE, "node 0x%x: vendor=%d, traffic=%d(time=%u), intfed=%d, cca_intf=%d cca_idle=%d\n",
+					ni->ni_associd, ni->ni_vendor, traffic_idx, node_time, intf_idx,
+					(intf_idx == SCS_NODE_INTFED) ? node_cca_intf : -1,
+					(intf_idx == SCS_NODE_INTFED) ? node_cca_idle : -1);
+		node_num[traffic_idx][intf_idx]++;
+		if (intf_idx == SCS_NODE_INTFED) {
+			if ((node_cca_intf > intf_max)
+				|| (intf_max == SCS_CCA_INTF_INVALID)) {
+				intf_max = node_cca_intf;
+				intf_max_ni = ni;
+			}
+		}
+		pmbl_max = MAX(node_pmbl, pmbl_max);
+
+		ni->ni_recent_cca_intf = SCS_CCA_INTF_INVALID;
+	}
+
+	/* Handle loaded and idle cases */
+	SCSDBG(SCSLOG_NOTICE, "traffic-intf matrix: loaded_intfed=%d, loaded_notintfed=%d, "
+				"idle_intfed=%d, idle_notintfed=%d\n",
+				node_num[SCS_NODE_TRAFFIC_LOADED][SCS_NODE_INTFED],
+				node_num[SCS_NODE_TRAFFIC_LOADED][SCS_NODE_NOTINTFED],
+				node_num[SCS_NODE_TRAFFIC_IDLE][SCS_NODE_INTFED],
+				node_num[SCS_NODE_TRAFFIC_IDLE][SCS_NODE_NOTINTFED]);
+	if (node_num[SCS_NODE_TRAFFIC_IDLE][SCS_NODE_INTFED] &&
+		!node_num[SCS_NODE_TRAFFIC_LOADED][SCS_NODE_INTFED] &&
+		node_num[SCS_NODE_TRAFFIC_LOADED][SCS_NODE_NOTINTFED]) {
+		SCSDBG(SCSLOG_NOTICE, "discard intfed idle sta report because loaded sta is not intfed\n");
+
+		new_cc_flag &= ~(IEEE80211_SCS_STA_CCA_REQ_CC | IEEE80211_SCS_BRCM_STA_TRIGGER_CC);
+	} else if (node_num[SCS_NODE_TRAFFIC_IDLE][SCS_NODE_INTFED] +
+			node_num[SCS_NODE_TRAFFIC_LOADED][SCS_NODE_INTFED]) {
+		SCSDBG(SCSLOG_NOTICE, "stas are under interference\n");
+		if (intf_max_ni)
+			memcpy(ic->ic_csw_mac, intf_max_ni->ni_macaddr, IEEE80211_ADDR_LEN);
+		memset(&intf_params, 0, sizeof(intf_params));
+		intf_params.chan = ic->ic_curchan;
+		intf_params.chan_bw = cur_bw;
+		intf_params.cca_intf = intf_max;
+		intf_params.pmbl_err = pmbl_max;
+		intf_params.cca_dur = IEEE80211_SCS_CCA_INTF_SCALE;
+		ieee80211_scs_update_chans_cca_intf(ic, &intf_params, IEEE80211_SCS_COCHAN, intf_max_ni);
+		if (!(new_cc_flag & (IEEE80211_SCS_STA_CCA_REQ_CC | IEEE80211_SCS_BRCM_STA_TRIGGER_CC))) {
+			new_cc_flag |= IEEE80211_SCS_STA_CCA_REQ_CC;
+		}
+	} else {
+		new_cc_flag &= ~(IEEE80211_SCS_STA_CCA_REQ_CC | IEEE80211_SCS_BRCM_STA_TRIGGER_CC);
+
+		SCSDBG(SCSLOG_NOTICE, "sta are free of interference\n");
+	}
+
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+	IEEE80211_SCAN_UNLOCK_IRQ(nt);
+
+	/* Adjust self intf on mbs-wds */
+	if (is_wds_mbs) {
+		compound_cca_intf = ieee80211_adjust_others_time_on_mbs(ic, compound_cca_intf);
+	}
+
+	/* update self's */
+	memset(&intf_params, 0, sizeof(intf_params));
+	intf_params.chan = ic->ic_curchan;
+	intf_params.chan_bw = cur_bw;
+	intf_params.cca_intf = compound_cca_intf;
+	intf_params.pmbl_err = ap_pmbl;
+	intf_params.cca_dur = IEEE80211_SCS_CCA_INTF_SCALE;
+	intf_params.cca_pri = scs_info_read->cca_pri;
+	intf_params.cca_sec = scs_info_read->cca_sec20;
+	intf_params.cca_sec40 = scs_info_read->cca_sec40;
+	ieee80211_scs_update_chans_cca_intf(ic, &intf_params, IEEE80211_SCS_COCHAN, NULL);
+
+	/* Recheck the interference detected by AP self */
+	if (ieee80211_scs_is_interference_over_thresholds(ic,
+			compound_cca_intf, ic->ic_scs.scs_cca_idle_smthed, ap_pmbl)) {
+		new_cc_flag |= IEEE80211_SCS_SELF_CCA_CC;
+		SCSDBG(SCSLOG_NOTICE, "Recheck Self CCA intf, need to change channel"
+				" compound_cca_intf=%u, cca_idle_smth=%u, pmbl_err=%u\n",
+				compound_cca_intf, ic->ic_scs.scs_cca_idle_smthed, ap_pmbl);
+	} else {
+		new_cc_flag &= ~IEEE80211_SCS_SELF_CCA_CC;
+		SCSDBG(SCSLOG_NOTICE, "Recheck Self CCA intf, needn't to change channel"
+				" compound_cca_intf=%u, cca_idle_smth=%u, pmbl_err=%u\n",
+				compound_cca_intf, ic->ic_scs.scs_cca_idle_smthed, ap_pmbl);
+	}
+
+	return new_cc_flag;
+}
+
+/*
+ * Calculate the rate ratio based on attenuation
+ * return unit: percent
+ * If no attenuation available, return 100
+ * For domains other than us, return 100
+ * Otherwise, calculate rate ratio according to attenuation.
+ */
+#define SCS_RATE_RATIO_QTN		0
+#define SCS_RATE_RATIO_NONQTN		1
+#define SCS_RATE_RATIO_NUM		2
+#define SCS_RATE_RATIO_ENTRY_MAX	9
+#define SCS_RATE_RATIO_DEFAULT		100
+#define SCS_RATE_RATIO_MIN		0
+struct scs_rate_ratio_table {
+	int atten_min;
+	int atten_max;
+	int atten_intvl;
+	int entry_num;
+	uint8_t rate_ratios[SCS_RATE_RATIO_ENTRY_MAX];	// percent
+} scs_rate_ratio_set[SCS_RATE_RATIO_NUM] = {
+	/* qtn */
+	{
+	76,
+	104,
+	4,
+	9,
+	{95, 90, 80, 70, 60, 45, 30, 20, 0},
+	},
+	/* non-qtn, based on test of brcm sta */
+	{
+	74,
+	98,
+	6,
+	6,
+	{95, 90, 80, 50, 30, 0},
+	}
+};
+uint8_t ieee80211_scs_calc_rate_ratio(struct ieee80211com *ic)
+{
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+	int32_t txpower = ic->ic_curchan->ic_maxpower;
+	int32_t attenuation;
+	int32_t idx = 0;
+	uint8_t rate_ratio;
+	int type = ic->ic_nonqtn_sta ? SCS_RATE_RATIO_NONQTN : SCS_RATE_RATIO_QTN;
+
+	/* currently only use worst case */
+	if (!SCS_ATTEN_VALID(as->as_sta_atten_max)) {
+		return SCS_RATE_RATIO_MIN;
+	}
+	attenuation = as->as_sta_atten_max;
+	SCSDBG(SCSLOG_NOTICE, "txpower=%d, raw attenuation=%d, attenuation adjust=%d, non_qtn_sta=%d\n",
+			txpower, attenuation, ic->ic_scs.scs_atten_adjust, ic->ic_nonqtn_sta);
+	attenuation += ic->ic_scs.scs_atten_adjust;
+
+	if (attenuation <= scs_rate_ratio_set[type].atten_min) {
+		idx = 0;
+	} else if (attenuation > scs_rate_ratio_set[type].atten_max) {
+		idx = scs_rate_ratio_set[type].entry_num - 1;
+	} else {
+		idx = (attenuation - scs_rate_ratio_set[type].atten_min +
+			scs_rate_ratio_set[type].atten_intvl - 1) / scs_rate_ratio_set[type].atten_intvl;
+	}
+	rate_ratio = scs_rate_ratio_set[type].rate_ratios[idx];
+
+	SCSDBG(SCSLOG_INFO, "txpower=%d, attenuation=%d, rate ratio = %d\n",
+			txpower, attenuation, rate_ratio);
+
+	return rate_ratio;
+}
+
+int ieee80211_scs_aging(struct ieee80211com *ic, uint32_t thrshld)
+{
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+	int i;
+	int aged_num = 0;
+	uint32_t jiffies_diff = thrshld * 60 * HZ;
+
+	if (thrshld == 0)
+		return 0;
+
+	for (i = 1; i < IEEE80211_CHAN_MAX; i++) {
+		if (as->as_cca_intf[i] == SCS_CCA_INTF_INVALID)
+			continue;
+
+		if ((jiffies - as->as_cca_intf_jiffies[i]) > jiffies_diff) {
+			SCSDBG(SCSLOG_NOTICE, "chan %d cca intf aged out(%u - %u > %u)\n", i,
+					(unsigned int)jiffies, as->as_cca_intf_jiffies[i],
+					jiffies_diff);
+			as->as_cca_intf[i] = SCS_CCA_INTF_INVALID;
+			as->as_cca_intf_jiffies[i] = 0;
+			as->as_pmbl_err_ap[i] = 0;
+			as->as_pmbl_err_sta[i] = 0;
+			aged_num++;
+		}
+	}
+
+	return aged_num;
+}
+
+static int
+ieee80211_scs_metric_compare(int32_t metric1, uint32_t metric_pref1, int32_t metric2, uint32_t metric_pref2)
+{
+	if (metric1 < metric2) {
+		return 1;
+	} else if (metric1 > metric2) {
+		return -1;
+	} else {
+		if (metric_pref1 > metric_pref2)
+			return 1;
+		else if (metric_pref1 == metric_pref2)
+			return 0;
+		else
+			return -1;
+	}
+}
+
+static int
+ieee80211_scs_metric_compare_by_chan(struct ieee80211com *ic, int32_t chan1, int32_t chan2)
+{
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+
+	return ieee80211_scs_metric_compare(as->as_chanmetric[chan1],
+				as->as_chanmetric_pref[chan1],
+				as->as_chanmetric[chan2],
+				as->as_chanmetric_pref[chan2]);
+}
+
+static void
+ieee80211_scs_get_chan_metric(struct ieee80211com *ic, struct ieee80211_channel *chan,
+		uint8_t rate_ratio, int32_t *metric, uint32_t *metric_pref, uint32_t cc_flag)
+{
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+	int32_t chan_metric;
+	int32_t chan_metric_pref = 0;
+	int32_t chan_rate_ratio;
+	int32_t chan_rate_ratio_cap;
+	int32_t txpower;
+	int32_t cur_txpower = 0;
+	uint32_t traffic_ms;
+	uint16_t cca_intf;
+	char rndbuf[2];
+	uint8_t chan_ieee = chan->ic_ieee;
+	int isdfs = !!(chan->ic_flags & IEEE80211_CHAN_DFS);
+
+	traffic_ms = MAX((as->as_tx_ms + as->as_rx_ms), SCS_PICK_CHAN_MIN_SCALED_TRAFFIC);
+	if (ic->ic_curchan != IEEE80211_CHAN_ANYC) {
+		cur_txpower = ic->ic_curchan->ic_maxpower;
+	}
+	txpower = chan->ic_maxpower;
+	cca_intf = (as->as_cca_intf[chan_ieee] == SCS_CCA_INTF_INVALID) ? 0 : as->as_cca_intf[chan_ieee];
+
+	chan_rate_ratio = 100 - ((100 - rate_ratio) * ABS(txpower - cur_txpower)) / SCS_CHAN_POWER_DIFF_MAX;
+	chan_rate_ratio = MAX(chan_rate_ratio, 0);
+	if ((IEEE80211_SCS_ATTEN_INC_CC & cc_flag) && txpower < cur_txpower) {
+		chan_metric = SCS_MAX_RAW_CHAN_METRIC;
+		chan_rate_ratio_cap = 0;
+	} else if ((txpower - cur_txpower) < -SCS_CHAN_POWER_DIFF_SAFE) {
+		if (chan_rate_ratio > 0) {
+			chan_metric = traffic_ms * 100 / chan_rate_ratio + cca_intf;
+			chan_rate_ratio_cap = chan_rate_ratio;
+		} else {
+			chan_metric = SCS_MAX_RAW_CHAN_METRIC;
+			chan_rate_ratio_cap = 0;
+		}
+	} else if ((txpower - cur_txpower) > SCS_CHAN_POWER_DIFF_SAFE) {
+		chan_metric = traffic_ms * chan_rate_ratio / 100 + cca_intf;
+		if (chan_rate_ratio > 0) {
+			chan_rate_ratio_cap = 100 * 100 / chan_rate_ratio;
+		} else {
+			chan_rate_ratio_cap = SCS_MAX_RATE_RATIO_CAP;
+		}
+	} else {
+		chan_metric = traffic_ms + cca_intf;
+		chan_rate_ratio_cap = 100;
+	}
+
+	/* Correct channel metric to account for different channel switch margins */
+	chan_metric = MAX(0, (chan_metric -
+			(isdfs * (ic->ic_scs.scs_leavedfs_chan_mtrc_mrgn - ic->ic_scs.scs_chan_mtrc_mrgn))));
+
+	if (metric) {
+		*metric = chan_metric;
+	}
+
+	if (metric_pref) {
+		int tx_power_factor = DM_DEFAULT_TX_POWER_FACTOR;
+		int dfs_factor = DM_DEFAULT_DFS_FACTOR;
+
+		if (ic->ic_dm_factor.flags) {
+			if (ic->ic_dm_factor.flags & DM_FLAG_TXPOWER_FACTOR_PRESENT) {
+				tx_power_factor = ic->ic_dm_factor.txpower_factor;
+			}
+			if (ic->ic_dm_factor.flags & DM_FLAG_DFS_FACTOR_PRESENT) {
+				dfs_factor = ic->ic_dm_factor.dfs_factor;
+			}
+		}
+
+		chan_metric_pref = (tx_power_factor * txpower) + (isdfs * dfs_factor);
+		/* metric preference: power.random */
+		chan_metric_pref = (chan_metric_pref << 16);
+		/* Add a little noise to equally choose best ones */
+		get_random_bytes(rndbuf, sizeof(rndbuf));
+		chan_metric_pref += (rndbuf[0] << 8) | rndbuf[1];
+
+		*metric_pref = chan_metric_pref;
+	}
+
+	SCSDBG(SCSLOG_NOTICE, "chan %d: txpower=%d, dfs=%d, radar=%d, rate_ratio_cap=%d, "
+				"margin_correction=-%d, metric=%d, pref=0x%x\n",
+				chan_ieee, txpower,
+				!!(chan->ic_flags & IEEE80211_CHAN_DFS),
+				!!(chan->ic_flags & IEEE80211_CHAN_RADAR),
+				chan_rate_ratio_cap,
+				(isdfs * (ic->ic_scs.scs_leavedfs_chan_mtrc_mrgn - ic->ic_scs.scs_chan_mtrc_mrgn)),
+				chan_metric, chan_metric_pref);
+}
+
+void ieee80211_ap_pick_alternate_channel(struct ieee80211com *ic,
+			struct ieee80211_channel *bestchan,
+			struct ieee80211_channel *fs1_bestchan,
+			struct ieee80211_channel *fs1_secbestchan,
+			struct ieee80211_channel *fs2_bestchan,
+			struct ieee80211_channel *fs2_secbestchan)
+{
+	int to_clear_availability;
+	struct ieee80211_channel *chan;
+
+	if (!is_ieee80211_chan_valid(bestchan))
+		return;
+
+	to_clear_availability = (!ic->ic_dfs_is_eu_region() &&
+				is_ieee80211_chan_valid(ic->ic_bsschan) &&
+				(ic->ic_bsschan->ic_flags & IEEE80211_CHAN_DFS) &&
+				!ic->ic_chan_compare_equality(ic, ic->ic_bsschan, bestchan));
+#define ALTERCHAN_IS_ACCEPTABLE(deschan) \
+	(!ic->ic_chan_compare_equality(ic, deschan, bestchan) && \
+	(!to_clear_availability || \
+		!ic->ic_chan_compare_equality(ic, deschan, ic->ic_bsschan)))
+
+	if (fs1_bestchan && (ALTERCHAN_IS_ACCEPTABLE(fs1_bestchan)))
+		ic->ic_ieee_best_alt_chan = fs1_bestchan->ic_ieee;
+	else if (fs1_secbestchan && (ALTERCHAN_IS_ACCEPTABLE(fs1_secbestchan)))
+		ic->ic_ieee_best_alt_chan = fs1_secbestchan->ic_ieee;
+	else if (fs2_bestchan && ALTERCHAN_IS_ACCEPTABLE(fs2_bestchan))
+		ic->ic_ieee_best_alt_chan = fs2_bestchan->ic_ieee;
+	else if (fs2_secbestchan && ALTERCHAN_IS_ACCEPTABLE(fs2_secbestchan))
+		ic->ic_ieee_best_alt_chan = fs2_secbestchan->ic_ieee;
+	/* else we keep ic->ic_ieee_best_alt_chan as-is since no better choice */
+
+	chan = ieee80211_find_channel_by_ieee(ic, ic->ic_ieee_best_alt_chan);
+	if (ic->ic_chan_compare_equality(ic, chan, bestchan)) {
+		ic->ic_ieee_best_alt_chan = 0;
+	}
+}
+EXPORT_SYMBOL(ieee80211_ap_pick_alternate_channel);
+
+void ieee80211_update_alternate_channels(struct ieee80211com *ic,
+			struct ieee80211_channel *bestchan,
+			struct ieee80211_channel **fs_bestchan,
+			struct ieee80211_channel **fs_secbestchan,
+			int (*compare_fn)(struct ieee80211com *, int, int))
+{
+	if (!ieee80211_is_chan_available(bestchan))
+		return;
+
+	if (!*fs_bestchan || compare_fn(ic,
+					bestchan->ic_ieee,
+					(*fs_bestchan)->ic_ieee) >= 0) {
+		*fs_secbestchan = *fs_bestchan;
+		*fs_bestchan = bestchan;
+	} else if (!*fs_secbestchan || compare_fn(ic,
+					bestchan->ic_ieee,
+					(*fs_secbestchan)->ic_ieee) >= 0) {
+		*fs_secbestchan = bestchan;
+	}
+}
+EXPORT_SYMBOL(ieee80211_update_alternate_channels);
+
+/*
+ * Channel ranking and selection of SCS
+ * Called when SCS decided that channel change is required.
+ * Return the selected channel number. 0 means no valid better channel.
+ */
+int
+ieee80211_scs_pick_channel(struct ieee80211com *ic, int pick_flags, uint32_t cc_flag)
+{
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+	uint8_t rate_ratio;
+	struct ieee80211_channel *chan;
+	struct ieee80211_channel *chan2;
+	struct ieee80211_channel *selected_chan = NULL;
+	struct ieee80211_channel *best_chan = NULL;
+	struct ieee80211_channel *fs1_bestchan = NULL;
+	struct ieee80211_channel *fs1_secbestchan = NULL;
+	struct ieee80211_channel *fs2_bestchan = NULL;
+	struct ieee80211_channel *fs2_secbestchan = NULL;
+	int i;
+	int chan_sec_ieee;
+	int chan_sec40u_ieee;
+	int chan_sec40l_ieee;
+	int selected_subchan;
+	int best_chan_ieee;
+	int chan_metric;
+	int best_metric;
+	int curr_metric = SCS_MAX_RAW_CHAN_METRIC;
+	uint32_t chan_metric_pref;
+	uint32_t best_metric_pref;
+	int isdfs;
+	int is_match_dfs_pickflags;
+	int pick_anyway = (pick_flags & IEEE80211_SCS_PICK_ANYWAY);
+	int curchan;
+	int cur_bw;
+	int bestchan_is_curchan = 0;
+	int is_curchan;
+	int is_weather_chan;
+	int bestchan_is_weather_chan = 0;
+
+	/* Prepare the information we need to do the channel ranking */
+	curchan = ic->ic_curchan->ic_ieee;
+	cur_bw = ieee80211_get_bw(ic);
+	rate_ratio = ieee80211_scs_calc_rate_ratio(ic);
+	SCSDBG(SCSLOG_NOTICE, "curchan=%d cur_bw=%d cur_txpower=%d,"
+			" rate_ratio=%d, tx_ms=%d, rx_ms=%d\n",
+			curchan, cur_bw, ic->ic_curchan->ic_maxpower,
+			rate_ratio, as->as_tx_ms, as->as_rx_ms);
+
+	ieee80211_scs_aging(ic, ic->ic_scs.scs_thrshld_aging_nor);
+
+	/* ranking */
+	memset(as->as_chanmetric, 0, sizeof(as->as_chanmetric));
+	ieee80211_scs_metric_update_timestamps(as);
+
+	best_chan_ieee = SCS_BEST_CHAN_INVALID;
+	best_metric = 0;
+	best_metric_pref = 0;
+
+	for (i = 1; i < IEEE80211_CHAN_MAX; i++) {
+		chan = ieee80211_find_channel_by_ieee(ic, i);
+		if (chan == NULL) {
+			continue;
+		}
+
+		isdfs = (chan->ic_flags & IEEE80211_CHAN_DFS);
+		is_match_dfs_pickflags = (isdfs && (pick_flags & IEEE80211_SCS_PICK_DFS_ONLY))
+			|| (isdfs && (pick_flags & IEEE80211_SCS_PICK_AVAILABLE_DFS_ONLY))
+			|| (!isdfs && (pick_flags & IEEE80211_SCS_PICK_NON_DFS_ONLY))
+			|| !(pick_flags & (IEEE80211_SCS_PICK_DFS_ONLY | IEEE80211_SCS_PICK_NON_DFS_ONLY |
+							IEEE80211_SCS_PICK_AVAILABLE_DFS_ONLY));
+
+		ieee80211_scs_get_chan_metric(ic, chan, rate_ratio,
+				&as->as_chanmetric[i], &as->as_chanmetric_pref[i], cc_flag);
+		/*
+		 * (i) When scs_pick_channel is being called from scs comparison task, scs manual,
+		 * we should choose from channels which are available
+		 * (ii) When scs_pick_channel is being called from OCAC task
+		 * we should choose from DFS channels which are not available, cac not done, and radar not detected.
+		 */
+
+		/* This is called only from SCS Manual and SCS comparison task context */
+		if ((pick_flags & IEEE80211_SCS_PICK_AVAILABLE_ANY_CHANNEL)
+			&& (!ieee80211_is_chan_available(chan))) {
+			SCSDBG(SCSLOG_INFO, "chan %d skipped as not available\n", chan->ic_ieee);
+			continue;
+		}
+
+		/* OCAC needs a DFS channel for which CAC is not already done */
+		if (pick_flags & IEEE80211_SCS_PICK_NOT_AVAILABLE_DFS_ONLY) {
+			if ((chan) && (ic->ic_dfs_chans_available_for_cac(ic, chan) == false)) {
+				SCSDBG(SCSLOG_INFO, "chan %d skipped (flag: %d)\n", chan->ic_ieee,
+						ic->ic_chan_availability_status[chan->ic_ieee]);
+				continue;
+			}
+		}
+
+		if (cur_bw == BW_HT20) {
+			chan_sec_ieee = -1;
+			chan_sec40u_ieee = -1;
+			chan_sec40l_ieee = -1;
+
+			chan_metric = as->as_chanmetric[i];
+			chan_metric_pref = as->as_chanmetric_pref[i];
+			selected_subchan = i;
+		} else if (cur_bw == BW_HT40) {
+			/* only calculate channel pair when low number 20M channel is already calculated */
+			if (!(chan->ic_flags & IEEE80211_CHAN_HT40D)) {
+				continue;
+			}
+			chan_sec_ieee = i - IEEE80211_CHAN_SEC_SHIFT;
+			chan_sec40u_ieee = -1;
+			chan_sec40l_ieee = -1;
+
+			/* select worse channel as primary channel within the chan pair */
+			if (ieee80211_scs_metric_compare_by_chan(ic, i, chan_sec_ieee) > 0) {
+				selected_subchan = chan_sec_ieee;
+			} else {
+				selected_subchan = i;
+			}
+
+			chan_metric = as->as_chanmetric[selected_subchan];
+			chan_metric_pref = as->as_chanmetric_pref[selected_subchan];
+		} else if (cur_bw >= BW_HT80) {
+			/* only calculate channel set when all 20M channels are already calculated */
+			if (!(chan->ic_ext_flags & IEEE80211_CHAN_VHT80_UU)) {
+				continue;
+			}
+			chan_sec_ieee = i - IEEE80211_CHAN_SEC_SHIFT;
+			chan_sec40u_ieee = i - 2 * IEEE80211_CHAN_SEC_SHIFT;
+			chan_sec40l_ieee = i - 3 * IEEE80211_CHAN_SEC_SHIFT;
+
+			/* select worst channel as primary channel within the best chan set */
+			if (ieee80211_scs_metric_compare_by_chan(ic, i, chan_sec_ieee) > 0)
+				selected_subchan = chan_sec_ieee;
+			else
+				selected_subchan = i;
+			if (ieee80211_scs_metric_compare_by_chan(ic, selected_subchan, chan_sec40u_ieee) > 0)
+				selected_subchan = chan_sec40u_ieee;
+			if (ieee80211_scs_metric_compare_by_chan(ic, selected_subchan, chan_sec40l_ieee) > 0)
+				selected_subchan = chan_sec40l_ieee;
+
+			chan_metric = as->as_chanmetric[selected_subchan];
+			chan_metric_pref = as->as_chanmetric_pref[selected_subchan];
+		} else {
+			printk("SCS: unknown bandwidth: %u\n", cur_bw);
+			continue;
+		}
+
+		/* Need to calculate DFS channel metric in case current channel is DFS channel */
+		if ((curchan == i) ||
+				(curchan == chan_sec_ieee) ||
+				(curchan == chan_sec40u_ieee) ||
+				(curchan == chan_sec40l_ieee)) {
+			is_curchan = 1;
+			curr_metric = chan_metric;
+			if (pick_anyway) {
+				continue;
+			}
+		} else {
+			is_curchan = 0;
+		}
+
+		if ((!(pick_flags & IEEE80211_SCS_PICK_AVAILABLE_ANY_CHANNEL)) &&
+				(!(pick_flags & IEEE80211_SCS_PICK_NOT_AVAILABLE_DFS_ONLY))) {
+			if (!is_match_dfs_pickflags) {
+				SCSDBG(SCSLOG_INFO, "chan %d skipped as pick flags mismatch\n", chan->ic_ieee);
+				continue;
+			} else if (pick_flags & IEEE80211_SCS_PICK_AVAILABLE_DFS_ONLY) {
+				if (!ieee80211_is_chan_available(chan)) {
+					SCSDBG(SCSLOG_INFO, "chan %d skipped as not available\n", chan->ic_ieee);
+					continue;
+				}
+			}
+		}
+
+		if (isset(ic->ic_chan_pri_inactive, i) &&
+				((chan_sec_ieee == -1) || isset(ic->ic_chan_pri_inactive, chan_sec_ieee)) &&
+				((chan_sec40u_ieee == -1) || isset(ic->ic_chan_pri_inactive, chan_sec40u_ieee)) &&
+				((chan_sec40l_ieee == -1) || isset(ic->ic_chan_pri_inactive, chan_sec40l_ieee))) {
+			/* All the sub-channel can't be primary channel */
+			SCSDBG(SCSLOG_INFO, "chan %d skipped as all inactive\n", chan->ic_ieee);
+			continue;
+		}
+
+		is_weather_chan = ieee80211_is_on_weather_channel(ic, chan);
+
+		/* radar detected on this channel, secondary channel or secondary 40MHz channels*/
+		if (chan->ic_flags & IEEE80211_CHAN_DFS) {
+			if (chan->ic_flags & IEEE80211_CHAN_RADAR)
+				continue;
+
+			if (cur_bw >= BW_HT40) {
+				chan2 = ieee80211_find_channel_by_ieee(ic, chan_sec_ieee);
+				if (chan2 && (chan2->ic_flags & IEEE80211_CHAN_RADAR))
+					continue;
+
+				if (cur_bw >= BW_HT80) {
+					chan2 = ieee80211_find_channel_by_ieee(ic, chan_sec40u_ieee);
+					if (chan2 && (chan2->ic_flags & IEEE80211_CHAN_RADAR))
+						continue;
+
+					chan2 = ieee80211_find_channel_by_ieee(ic, chan_sec40l_ieee);
+					if (chan2 && (chan2->ic_flags & IEEE80211_CHAN_RADAR))
+						continue;
+				}
+			}
+		}
+
+		if (best_chan_ieee != SCS_BEST_CHAN_INVALID &&
+				!bestchan_is_weather_chan &&
+				is_weather_chan &&
+				!is_curchan &&
+				(pick_flags & IEEE80211_SCS_PICK_NOT_AVAILABLE_DFS_ONLY) &&
+				(ic->ic_ocac.ocac_cfg.ocac_params.wea_duration_secs >
+					ic->ic_ocac.ocac_cfg.ocac_params.duration_secs)) {
+			/* Weather channel has low priority since it need too long CAC time */
+			SCSDBG(SCSLOG_INFO, "weather chan %d skipped as low priority\n", chan->ic_ieee);
+			continue;
+		}
+
+		/* Select best channel */
+		if (best_chan_ieee == SCS_BEST_CHAN_INVALID ||
+				ieee80211_scs_metric_compare_by_chan(ic,
+					selected_subchan,
+					best_chan_ieee) > 0) {
+			best_chan_ieee = selected_subchan;
+			best_metric = chan_metric;
+			best_metric_pref = chan_metric_pref;
+			bestchan_is_curchan = is_curchan;
+			bestchan_is_weather_chan = is_weather_chan;
+		}
+
+		selected_chan = ieee80211_find_channel_by_ieee(ic, selected_subchan);
+		ieee80211_update_alternate_channels(ic,
+					selected_chan,
+					&fs2_bestchan,
+					&fs2_secbestchan,
+					ieee80211_scs_metric_compare_by_chan);
+
+		selected_chan = ieee80211_scs_switch_pri_chan(ic->ic_scan, selected_chan);
+		if (best_chan == NULL || (selected_chan &&
+				ieee80211_scs_metric_compare_by_chan(ic,
+					selected_chan->ic_ieee,
+					best_chan->ic_ieee) > 0)) {
+			best_chan = selected_chan;
+		} else {
+			SCSDBG(SCSLOG_INFO, "chan %d skipped as OBSS check failed\n", selected_subchan);
+		}
+
+		ieee80211_update_alternate_channels(ic,
+					selected_chan,
+					&fs1_bestchan,
+					&fs1_secbestchan,
+					ieee80211_scs_metric_compare_by_chan);
+	}
+
+	if (best_chan) {
+		best_chan_ieee = best_chan->ic_ieee;
+		best_metric = as->as_chanmetric[best_chan_ieee];
+		best_metric_pref = as->as_chanmetric_pref[best_chan_ieee];
+		bestchan_is_curchan = ic->ic_chan_compare_equality(ic, ic->ic_curchan, best_chan);
+	} else {
+		best_chan = ieee80211_find_channel_by_ieee(ic, best_chan_ieee);
+	}
+
+	if (!pick_anyway && best_chan_ieee != SCS_BEST_CHAN_INVALID) {
+		if (bestchan_is_curchan) {
+			best_chan_ieee = SCS_BEST_CHAN_INVALID;
+			SCSDBG(SCSLOG_NOTICE, "current chan is best channel\n");
+		} else {
+			bool dfs_to_non_dfs = false;
+
+			if (curr_metric == SCS_MAX_RAW_CHAN_METRIC) {
+				/* If the IEEE80211_CHAN_HT40D channel in 40MHz or IEEE80211_CHAN_VHT80_UU
+				 * channel in 80MHz is disabled, curr_metric may be SCS_MAX_RAW_CHAN_METRIC
+				 * per above metric calculation, so need to read it from as_chanmetric table.
+				 */
+				curr_metric = as->as_chanmetric[curchan];
+			}
+
+			if (best_chan) {
+				dfs_to_non_dfs = (ic->ic_curchan->ic_flags & IEEE80211_CHAN_DFS) &&
+						 (!(best_chan->ic_flags & IEEE80211_CHAN_DFS));
+
+				if (best_metric == SCS_MAX_RAW_CHAN_METRIC ||
+						((curr_metric - best_metric) * 100) < ((dfs_to_non_dfs ?
+								(ic->ic_scs.scs_leavedfs_chan_mtrc_mrgn) :
+								(ic->ic_scs.scs_chan_mtrc_mrgn))  * IEEE80211_SCS_CCA_INTF_SCALE)) {
+					/* Avoid unnecessary channel change */
+					SCSDBG(SCSLOG_NOTICE, "best chan %u is not better enough (%u - %u) < %u%%\n",
+							best_chan_ieee, curr_metric, best_metric,
+							dfs_to_non_dfs ? ic->ic_scs.scs_leavedfs_chan_mtrc_mrgn : ic->ic_scs.scs_chan_mtrc_mrgn);
+
+					best_chan_ieee = SCS_BEST_CHAN_INVALID;
+				}
+			} else {
+				best_chan_ieee = SCS_BEST_CHAN_INVALID;
+			}
+		}
+	}
+	as->as_scs_ranking_cnt++;
+
+	if (best_chan_ieee != SCS_BEST_CHAN_INVALID) {
+		SCSDBG(SCSLOG_NOTICE, "chan %d selected as best chan\n", best_chan_ieee);
+
+		ieee80211_ap_pick_alternate_channel(ic,
+				best_chan,
+				fs1_bestchan,
+				fs1_secbestchan,
+				fs2_bestchan,
+				fs2_secbestchan);
+		SCSDBG(SCSLOG_NOTICE, "%s: Fast-switch best alt channel updated to %d\n",
+					__func__, ic->ic_ieee_best_alt_chan);
+	}
+
+	return best_chan_ieee;
+}
+
+void ieee80211_scs_contribute_randomness(uint32_t cca_intf, uint32_t lpre_err, uint32_t spre_err)
+{
+	unsigned random_buf[] = {cca_intf, lpre_err, spre_err};
+
+	add_qtn_randomness(random_buf, ARRAY_SIZE(random_buf));
+}
+
+int ieee80211_scs_get_scaled_scan_info(struct ieee80211com *ic, int chan_ieee,
+		struct qtn_scs_scan_info *p_scan_info)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_scs_info_set *scs_info_lh = sp->scs_info_lhost;
+	int ret = -1;
+
+	p_scan_info->cca_try = 0;
+	if (chan_ieee < IEEE80211_CHAN_MAX) {
+		memcpy(p_scan_info, &scs_info_lh->scan_info[chan_ieee], sizeof(struct qtn_scs_scan_info));
+	}
+
+	if (p_scan_info->cca_try) {
+		ieee80211_scs_contribute_randomness(p_scan_info->cca_intf, p_scan_info->lpre_err,
+				p_scan_info->spre_err);
+		p_scan_info->cca_intf = IEEE80211_SCS_NORMALIZE(p_scan_info->cca_intf, p_scan_info->cca_try);
+		p_scan_info->cca_busy = IEEE80211_SCS_NORMALIZE(p_scan_info->cca_busy, p_scan_info->cca_try);
+		p_scan_info->cca_idle = IEEE80211_SCS_NORMALIZE(p_scan_info->cca_idle, p_scan_info->cca_try);
+		p_scan_info->cca_tx = IEEE80211_SCS_NORMALIZE(p_scan_info->cca_tx, p_scan_info->cca_try);
+		p_scan_info->cca_pri = IEEE80211_SCS_NORMALIZE(p_scan_info->cca_pri, p_scan_info->cca_try);
+		p_scan_info->cca_sec20 = IEEE80211_SCS_NORMALIZE(p_scan_info->cca_sec20, p_scan_info->cca_try);
+		p_scan_info->cca_sec40 = IEEE80211_SCS_NORMALIZE(p_scan_info->cca_sec40, p_scan_info->cca_try);
+		p_scan_info->bcn_rcvd = IEEE80211_SCS_NORMALIZE(p_scan_info->bcn_rcvd, p_scan_info->cca_try);
+		p_scan_info->crc_err = IEEE80211_SCS_NORMALIZE(p_scan_info->crc_err, p_scan_info->cca_try);
+		p_scan_info->spre_err = IEEE80211_SCS_NORMALIZE(p_scan_info->spre_err, p_scan_info->cca_try);
+		p_scan_info->lpre_err = IEEE80211_SCS_NORMALIZE(p_scan_info->lpre_err, p_scan_info->cca_try);
+		p_scan_info->cca_try = IEEE80211_SCS_CCA_INTF_SCALE;
+		ret = 0;
+	}
+
+	return ret;
+}
+
+void ieee80211_scs_update_ranking_table_by_scan(struct ieee80211com *ic)
+{
+	struct qtn_scs_scan_info scan_info;
+	struct ap_state *as, *scs_as;
+	struct ieee80211_channel *chan;
+	int chansec_ieee;
+	uint32_t pmbl_err;
+	int i, ret;
+	uint32_t update_mode = IEEE80211_SCS_OFFCHAN;
+	struct scs_chan_intf_params intf_params= {0};
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP) {
+		return;
+	}
+
+	/* if we didn't return to bss channel then stats for the last channel were not updated */
+	if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) {
+		ic->ic_scs_update_scan_stats(ic);
+		update_mode = IEEE80211_SCS_INIT_SCAN;
+	}
+
+	as = ic->ic_scan->ss_priv;
+	scs_as = ic->ic_scan->ss_scs_priv;
+	for (i = 0; i < IEEE80211_CHAN_MAX; i++) {
+		chan = ieee80211_find_channel_by_ieee(ic, i);
+		if (chan == NULL) {
+			continue;
+		}
+		ret = ieee80211_scs_get_scaled_scan_info(ic, i, &scan_info);
+		if (ret != 0) {
+			/* use secondary channel's scan info */
+			chansec_ieee = ieee80211_find_sec_chan(chan);
+			if (chansec_ieee) {
+				ret = ieee80211_scs_get_scaled_scan_info(ic, chansec_ieee, &scan_info);
+			}
+			SCSDBG(SCSLOG_INFO, "Didn't find scan_info of channel %u, use the scan_info of channel %u\n",
+					i, chansec_ieee);
+		}
+		if (ret == 0) {
+			pmbl_err = (scan_info.spre_err * ic->ic_scs.scs_sp_wf +
+					scan_info.lpre_err * ic->ic_scs.scs_lp_wf) / 100;
+			/* update initial channel ranking table */
+			if (as) {
+				as->as_pmbl_err_ap[i] = pmbl_err;
+				as->as_cca_intf[i] = scan_info.cca_intf;
+				as->as_cca_intf_jiffies[i] = jiffies;
+			}
+
+			/* update SCS channel ranking table */
+			if (scs_as) {
+				uint32_t compound_cca_intf = scan_info.cca_intf;
+
+				/*
+				 * don't add preamble failure counts to compound_cca_intf because
+				 * the preamble failure counts got by scanning are not reliable
+				 */
+				/*
+				compound_cca_intf = ieee80211_scs_fix_cca_intf(ic, NULL, scan_info.cca_intf,
+						scan_info.spre_err, scan_info.lpre_err);
+				*/
+				intf_params.chan = chan;
+				intf_params.chan_bw = scan_info.bw_sel;
+				intf_params.cca_intf = compound_cca_intf;
+				intf_params.pmbl_err = pmbl_err;
+				intf_params.cca_dur = IEEE80211_SCS_CCA_INTF_SCALE;
+				intf_params.cca_pri = scan_info.cca_pri;
+				intf_params.cca_sec = scan_info.cca_sec20;
+				intf_params.cca_sec40 = scan_info.cca_sec40;
+				ieee80211_scs_update_chans_cca_intf(ic, &intf_params, update_mode, NULL);
+			}
+		} else {
+			SCSDBG(SCSLOG_INFO, "No available scan_info for channel %u\n", i);
+			/* clear initial channel ranking table */
+			if (as) {
+				as->as_pmbl_err_ap[i] = 0;
+				as->as_cca_intf[i] = SCS_CCA_INTF_INVALID;
+				as->as_cca_intf_jiffies[i] = jiffies;
+			}
+		}
+	}
+}
+EXPORT_SYMBOL(ieee80211_scs_update_ranking_table_by_scan);
+
+void ieee80211_scs_scale_cochan_data(struct ieee80211com *ic, struct qtn_scs_info *scs_info_read)
+{
+	int i;
+	struct qtn_scs_vsp_node_stats *stats;
+
+	if (scs_info_read->cca_try == 0)
+		return;
+
+	scs_info_read->cca_idle = IEEE80211_SCS_NORMALIZE(scs_info_read->cca_idle, scs_info_read->cca_try);
+	scs_info_read->cca_busy = IEEE80211_SCS_NORMALIZE(scs_info_read->cca_busy, scs_info_read->cca_try);
+	scs_info_read->cca_interference = IEEE80211_SCS_NORMALIZE(scs_info_read->cca_interference, scs_info_read->cca_try);
+	scs_info_read->cca_tx = IEEE80211_SCS_NORMALIZE(scs_info_read->cca_tx, scs_info_read->cca_try);
+	scs_info_read->tx_usecs = IEEE80211_SCS_NORMALIZE(scs_info_read->tx_usecs, scs_info_read->cca_try);
+	scs_info_read->rx_usecs = IEEE80211_SCS_NORMALIZE(scs_info_read->rx_usecs, scs_info_read->cca_try);
+	scs_info_read->beacon_recvd = IEEE80211_SCS_NORMALIZE(scs_info_read->beacon_recvd, scs_info_read->cca_try);
+
+	for (i = 0; i < scs_info_read->scs_vsp_info.num_of_assoc; i++) {
+		stats = &scs_info_read->scs_vsp_info.scs_vsp_node_stats[i];
+		stats->tx_usecs = IEEE80211_SCS_NORMALIZE(stats->tx_usecs, scs_info_read->cca_try);
+		stats->rx_usecs = IEEE80211_SCS_NORMALIZE(stats->rx_usecs, scs_info_read->cca_try);
+	}
+
+	scs_info_read->cca_try = IEEE80211_SCS_CCA_INTF_SCALE;
+}
+
+void ieee80211_scs_scale_offchan_data(struct ieee80211com *ic, struct qtn_scs_oc_info *scs_oc_info)
+{
+	if (scs_oc_info->off_chan_cca_try_cnt == 0)
+		return;
+
+	scs_oc_info->off_chan_cca_busy = IEEE80211_SCS_NORMALIZE(scs_oc_info->off_chan_cca_busy,
+			scs_oc_info->off_chan_cca_try_cnt);
+	scs_oc_info->off_chan_cca_sample_cnt = IEEE80211_SCS_NORMALIZE(scs_oc_info->off_chan_cca_sample_cnt,
+			scs_oc_info->off_chan_cca_try_cnt);
+	scs_oc_info->off_chan_beacon_recvd = IEEE80211_SCS_NORMALIZE(scs_oc_info->off_chan_beacon_recvd,
+			scs_oc_info->off_chan_cca_try_cnt);
+	scs_oc_info->off_chan_crc_errs = IEEE80211_SCS_NORMALIZE(scs_oc_info->off_chan_crc_errs,
+			scs_oc_info->off_chan_cca_try_cnt);
+	scs_oc_info->off_chan_sp_errs = IEEE80211_SCS_NORMALIZE(scs_oc_info->off_chan_sp_errs,
+			scs_oc_info->off_chan_cca_try_cnt);
+	scs_oc_info->off_chan_lp_errs = IEEE80211_SCS_NORMALIZE(scs_oc_info->off_chan_lp_errs,
+			scs_oc_info->off_chan_cca_try_cnt);
+
+	scs_oc_info->off_chan_cca_try_cnt = IEEE80211_SCS_CCA_INTF_SCALE;
+}
+
+static struct qtn_scs_vsp_node_stats *ieee80211_scs_find_node_stats(struct ieee80211com *ic, struct qtn_scs_info *scs_info_read, uint16_t aid)
+{
+	int i;
+	struct qtn_scs_vsp_node_stats *stats;
+
+	for (i = 0; i < scs_info_read->scs_vsp_info.num_of_assoc; i++) {
+		stats = &scs_info_read->scs_vsp_info.scs_vsp_node_stats[i];
+		if (stats->ni_associd == IEEE80211_AID(aid))
+			return stats;
+	}
+
+	return NULL;
+}
+
+uint32_t local_scs_median_parition(uint32_t *buf, uint32_t l, uint32_t r)
+{
+	uint32_t t;
+	uint32_t i;
+	uint32_t p;
+
+	if (l == r)
+		return l;
+
+	for (i = l, p = l; i < r; i++) {
+		if (buf[i] < buf[r]) {
+			t = buf[i];
+			buf[i] = buf[p];
+			buf[p] = t;
+			p++;
+		}
+	}
+
+	t = buf[r];
+	buf[r] = buf[p];
+	buf[p] = t;
+
+	return p;
+}
+
+static uint32_t local_scs_get_median(uint32_t *buf, uint32_t l, uint32_t r)
+{
+	uint32_t idx;
+
+	idx = local_scs_median_parition(buf, l, r);
+
+	if (idx == QTN_SCS_FILTER_MEDIAN_IDX)
+		return buf[idx];
+
+	if (idx > QTN_SCS_FILTER_MEDIAN_IDX)
+		return local_scs_get_median(buf, l, idx - 1);
+	else
+		return local_scs_get_median(buf, idx + 1, r);
+}
+
+static uint32_t ieee80211_scs_get_median(struct ieee80211com *ic,
+			struct qtn_scs_data_history *history)
+{
+	static uint32_t buf[QTN_SCS_FILTER_WINDOW_SZ];
+
+	memcpy(buf, history->buffer, sizeof(history->buffer));
+
+	return local_scs_get_median(buf, 0, QTN_SCS_FILTER_WINDOW_SZ - 1);
+}
+
+static int32_t ieee80211_scs_get_channel_index(struct ieee80211com *ic, struct ieee80211_channel *chan)
+{
+	uint32_t i;
+
+	for (i = 0; i < ic->ic_nchans; i++) {
+		if (ic->ic_channels[i].ic_ieee == chan->ic_ieee)
+			break;
+	}
+
+	if (i == ic->ic_nchans)
+		return -1;
+
+	return i;
+}
+
+static void ieee80211_scs_update_cochan_filter_history(struct ieee80211com *ic,
+			struct ieee80211_phy_stats  *p_phy_stats,
+			struct qtn_scs_stats_history *history,
+			int32_t idx)
+{
+	history->sp_errs[idx].buffer[history->sp_errs[idx].idx++] = p_phy_stats->cnt_sp_fail;
+	if (history->sp_errs[idx].idx == QTN_SCS_FILTER_WINDOW_SZ)
+		history->sp_errs[idx].idx = 0;
+
+	history->lp_errs[idx].buffer[history->lp_errs[idx].idx++] = p_phy_stats->cnt_lp_fail;
+	if (history->lp_errs[idx].idx == QTN_SCS_FILTER_WINDOW_SZ)
+		history->lp_errs[idx].idx = 0;
+}
+
+static uint32_t ieee80211_scs_fix_cca_intf(struct ieee80211com *ic, struct ieee80211_node *ni, uint32_t cca_intf, uint32_t sp_fail, uint32_t lp_fail)
+{
+	uint32_t pmbl_fail;
+	int pmbl_level;
+	int mapped_intf_max;
+	uint32_t compound_cca_intf;
+
+	pmbl_fail = (sp_fail * ic->ic_scs.scs_sp_wf + lp_fail * ic->ic_scs.scs_lp_wf) / 100;
+	pmbl_level = pmbl_fail * 100 / ic->ic_scs.scs_pmbl_err_range;
+	pmbl_level = MIN(pmbl_level, 100);
+	mapped_intf_max = ic->ic_scs.scs_pmbl_err_mapped_intf_range * IEEE80211_SCS_CCA_INTF_SCALE / 100;
+	compound_cca_intf = cca_intf + pmbl_level * mapped_intf_max / 100;
+	compound_cca_intf = MIN(compound_cca_intf, IEEE80211_SCS_CCA_INTF_SCALE);
+	SCSDBG(SCSLOG_INFO, "node 0x%x: node_cca_intf=%u, pmbl_smthed_weighted=%u, compound_cca_intf=%u\n",
+			(ni ? ni->ni_associd : 0),
+			cca_intf, pmbl_fail, compound_cca_intf);
+
+	return compound_cca_intf;
+}
+
+static int
+ieee80211_scs_change_channel(struct ieee80211com *ic, int newchan_ieee)
+{
+	struct ieee80211_channel *newchan;
+	struct ieee80211_channel *curchan;
+	int chan2_ieee;
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+	int ret = -1;
+	int cur_bw;
+
+	newchan = ieee80211_find_channel_by_ieee(ic, newchan_ieee);
+	if (newchan == NULL) {
+		return ret;
+	}
+
+	curchan = ic->ic_curchan;
+	ret = ieee80211_enter_csa(ic, newchan, ieee80211_scs_trigger_channel_switch,
+			ic->ic_csw_reason,
+			IEEE80211_DEFAULT_CHANCHANGE_TBTT_COUNT,
+			IEEE80211_CSA_MUST_STOP_TX,
+			IEEE80211_CSA_F_BEACON | IEEE80211_CSA_F_ACTION);
+	if (ret == 0) {
+		ic->ic_aci_cci_cce.cce_previous = curchan->ic_ieee;
+		ic->ic_aci_cci_cce.cce_current = newchan->ic_ieee;
+
+		setbit(as->as_chan_xped, curchan->ic_ieee);
+		cur_bw = ieee80211_get_bw(ic);
+		if (cur_bw >= BW_HT40) {
+			chan2_ieee = ieee80211_find_sec_chan(curchan);
+			if (chan2_ieee)
+				setbit(as->as_chan_xped, chan2_ieee);
+			if (cur_bw >= BW_HT80) {
+				chan2_ieee = ieee80211_find_sec40u_chan(curchan);
+				if (chan2_ieee)
+					setbit(as->as_chan_xped, chan2_ieee);
+				chan2_ieee = ieee80211_find_sec40l_chan(curchan);
+				if (chan2_ieee)
+					setbit(as->as_chan_xped, chan2_ieee);
+			}
+		}
+	}
+
+	return ret;
+}
+
+static void
+ieee80211_scs_update_dfs_reentry(struct ieee80211com *ic, uint32_t cc_flag, uint32_t *dfs_reentry_clear)
+{
+	int curr_chan_cca_intf;
+	int aged_num;
+	int curchan = ic->ic_curchan->ic_ieee;
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+
+	if (!ic->ic_scs.scs_thrshld_dfs_reentry)
+		return;
+
+	curr_chan_cca_intf = (as->as_cca_intf[curchan] == SCS_CCA_INTF_INVALID) ? 0 : as->as_cca_intf[curchan];
+
+	/* When request is because of interference */
+	if ((cc_flag & IEEE80211_SCS_INTF_CC) &&
+		((curr_chan_cca_intf * 100) >
+		 (ic->ic_scs.scs_thrshld_dfs_reentry_intf * IEEE80211_SCS_CCA_INTF_SCALE))) {
+		as->as_dfs_reentry_cnt++;
+		SCSDBG(SCSLOG_NOTICE, "channel picking discard counter %d\n",
+					as->as_dfs_reentry_cnt);
+		*dfs_reentry_clear = 0;
+		if ((as->as_dfs_reentry_cnt * ic->ic_scs.scs_cca_sample_dur) >=
+			ic->ic_scs.scs_thrshld_dfs_reentry) {
+			aged_num = ieee80211_scs_aging(ic, ic->ic_scs.scs_thrshld_aging_dfsreent);
+			if (aged_num > 0) {
+				/* don't clear dfs reentry and wait for next interval result */
+				SCSDBG(SCSLOG_NOTICE, "%u channel entry aged out, "
+					"postpone DFS re-entry and re-try other channel\n",
+					aged_num);
+			} else {
+				as->as_dfs_reentry_level = 1;
+				SCSDBG(SCSLOG_NOTICE, "immediately DFS re-entry triggered with "
+						"channel picking counter %d\n",
+						as->as_dfs_reentry_cnt);
+			}
+		}
+	}
+}
+
+static void ieee80211_scs_update_scs_burst_queue(struct ieee80211com *ic)
+{
+	int i = 0;
+	int new_chan;
+
+	if (unlikely(ic->ic_scs.scs_burst_is_paused)) {
+		if (time_after(jiffies, ic->ic_scs.scs_burst_pause_jiffies)) {
+			ic->ic_scs.scs_burst_is_paused = 0;
+			if (!ic->ic_scs.scs_burst_force_switch)
+				return;
+
+			new_chan = ieee80211_scs_pick_channel(ic,
+					IEEE80211_SCS_PICK_AVAILABLE_ANY_CHANNEL,
+					IEEE80211_SCS_NA_CC);
+			if (new_chan == SCS_BEST_CHAN_INVALID) {
+				SCSDBG(SCSLOG_CRIT, "Switching to channel failed\n");
+				return;
+			}
+
+			ic->ic_csw_reason = IEEE80211_CSW_REASON_SCS;
+			if (!ieee80211_scs_change_channel(ic, new_chan))
+				SCSDBG(SCSLOG_CRIT, "Switching to channel %d after pausing the burst channel switching\n", new_chan);
+		}
+
+		return;
+	}
+
+	for (i = 0; i < ic->ic_scs.scs_burst_thresh; i++) {
+		if (!ic->ic_scs.scs_burst_queue[i])
+			continue;
+		if (ic->ic_scs.scs_burst_queue[i] > ic->ic_scs.scs_cca_sample_dur)
+			ic->ic_scs.scs_burst_queue[i] -= ic->ic_scs.scs_cca_sample_dur;
+		else
+			ic->ic_scs.scs_burst_queue[i] = 0;
+	}
+
+}
+
+static int ieee80211_scs_add_event_scs_burst_queue(struct ieee80211com *ic)
+{
+	int i = 0;
+
+	if (unlikely(ic->ic_scs.scs_burst_is_paused))
+		return 1;
+
+	for (i = 0; i < ic->ic_scs.scs_burst_thresh; i++) {
+		if (!ic->ic_scs.scs_burst_queue[i]) {
+			ic->ic_scs.scs_burst_queue[i] = ic->ic_scs.scs_burst_window;
+			return 0;
+		}
+	}
+
+	ic->ic_scs.scs_burst_is_paused = 1;
+	ic->ic_scs.scs_burst_pause_jiffies = jiffies + ic->ic_scs.scs_burst_pause_time * HZ;
+	memset(ic->ic_scs.scs_burst_queue, 0 , sizeof(ic->ic_scs.scs_burst_queue));
+	return 1;
+}
+
+static void
+ieee80211_scs_start_compare(unsigned long arg)
+{
+#define OPMODE_PEER_BW_SW_CCA40_TH1 30
+#define OPMODE_PEER_BW_SW_CCA40_TH2 10
+
+	struct ieee80211com *ic = (struct ieee80211com *) arg;
+	struct ieee80211vap *vap;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	int new_chan;
+	struct qtn_scs_info_set *scs_info_lh = sp->scs_info_lhost;
+	struct qtn_scs_info *scs_info_read = NULL;
+	uint32_t cc_flag;
+	uint32_t pmbl_err;
+	uint32_t dfs_reentry_clear = 1;
+	uint32_t compound_cca_intf, raw_cca_intf;
+	uint32_t clean_level = IEEE80211_SCS_STATE_PERIOD_CLEAN;
+	struct ieee80211_phy_stats phy_stats;
+	struct ap_state *as = ic->ic_scan->ss_scs_priv;
+	uint8_t *tdls_stats_buf = NULL;
+	uint16_t tdls_stats_buf_len = IEEE80211_MAX_TDLS_NODES *
+			sizeof(struct ieee80211_tdls_scs_stats);
+	uint16_t extra_ie_len = 0;
+	int tdls_update_failed = 0;
+	uint32_t stats_unstable = 0;
+	int32_t cca_sec40 = 0;
+	static uint8_t opmode_bw = QTN_BW_80M;
+	static uint8_t debounce = 0;
+	struct scs_chan_intf_params intf_params= {0};
+
+	if (ic->ic_get_phy_stats
+			&& !ic->ic_get_phy_stats(ic->ic_dev, ic, &phy_stats, 0)) {
+		uint32_t sp_errs;
+		uint32_t lp_errs;
+		int32_t idx = ieee80211_scs_get_channel_index(ic, ic->ic_curchan);
+		if (idx < 0 || idx >= ARRAY_SIZE(scs_info_lh->stats_history.sp_errs))
+			goto compare_end;
+		ieee80211_scs_update_cochan_filter_history(ic, &phy_stats,
+					&scs_info_lh->stats_history, idx);
+		sp_errs = ieee80211_scs_get_median(ic, &scs_info_lh->stats_history.sp_errs[idx]);
+		lp_errs = ieee80211_scs_get_median(ic, &scs_info_lh->stats_history.lp_errs[idx]);
+		ic->ic_scs.scs_sp_err_smthed = IEEE80211_SCS_SMOOTH(ic->ic_scs.scs_sp_err_smthed,
+				sp_errs, ic->ic_scs.scs_pmbl_err_smth_fctr);
+		ic->ic_scs.scs_lp_err_smthed = IEEE80211_SCS_SMOOTH(ic->ic_scs.scs_lp_err_smthed,
+				lp_errs, ic->ic_scs.scs_pmbl_err_smth_fctr);
+		ieee80211_scs_contribute_randomness(phy_stats.cca_int, phy_stats.cnt_lp_fail,
+			phy_stats.cnt_sp_fail);
+	}
+
+	/* Copy scs info into a local structure so MuC can continue fill it */
+	scs_info_read = kmalloc(sizeof(*scs_info_read), GFP_ATOMIC);
+	if (!scs_info_read) {
+		SCSDBG(SCSLOG_NOTICE, "SCS info allocation failed\n");
+		goto compare_end;
+	}
+	memcpy((void *)scs_info_read, (void *)&scs_info_lh->scs_info[scs_info_lh->valid_index],
+			sizeof(*scs_info_read));
+	if (scs_info_read->cca_try == 0) {
+		/* make sure cross channel switching stats are cleared */
+		clean_level = IEEE80211_SCS_STATE_MEASUREMENT_CHANGE_CLEAN;
+		goto compare_end_free_local_info;
+	}
+
+	/* Read and process off channel stats */
+	if (scs_info_read->oc_info_count) {
+		int i;
+		uint32_t pmbl;
+		struct qtn_scs_oc_info * p_oc_info;
+		struct ieee80211_channel *off_chan;
+
+		for (i = 0; i < scs_info_read->oc_info_count; i++) {
+			p_oc_info = &scs_info_read->oc_info[i];
+			if (p_oc_info->off_chan_cca_try_cnt) {
+				SCSDBG(SCSLOG_INFO, "OC: Channel=%d, bw=%d cca_busy=%d, "
+						"cca_smpl=%d, cca_try=%d, bcn_rcvd=%d, "
+						"crc_err=%d, sp_err=%d, lp_err=%d, "
+						"cca_pri=%d, cca_sec=%d, cca_sec40=%d\n",
+						p_oc_info->off_channel,
+						p_oc_info->off_chan_bw_sel,
+						p_oc_info->off_chan_cca_busy,
+						p_oc_info->off_chan_cca_sample_cnt,
+						p_oc_info->off_chan_cca_try_cnt,
+						p_oc_info->off_chan_beacon_recvd,
+						p_oc_info->off_chan_crc_errs,
+						p_oc_info->off_chan_sp_errs,
+						p_oc_info->off_chan_lp_errs,
+						p_oc_info->off_chan_cca_pri,
+						p_oc_info->off_chan_cca_sec,
+						p_oc_info->off_chan_cca_sec40);
+				off_chan = ieee80211_find_channel_by_ieee(ic, p_oc_info->off_channel);
+				if (off_chan == NULL) {
+					continue;
+				}
+				ieee80211_scs_scale_offchan_data(ic, p_oc_info);
+				raw_cca_intf = p_oc_info->off_chan_cca_busy;
+				compound_cca_intf = ieee80211_scs_fix_cca_intf(ic, NULL, raw_cca_intf,
+						p_oc_info->off_chan_sp_errs, p_oc_info->off_chan_lp_errs);
+				pmbl = (p_oc_info->off_chan_sp_errs * ic->ic_scs.scs_sp_wf +
+						p_oc_info->off_chan_lp_errs * ic->ic_scs.scs_lp_wf) / 100;
+				memset(&intf_params, 0, sizeof(intf_params));
+				intf_params.chan = off_chan;
+				intf_params.chan_bw = p_oc_info->off_chan_bw_sel;
+				intf_params.cca_intf = compound_cca_intf;
+				intf_params.pmbl_err = pmbl;
+				intf_params.cca_dur = IEEE80211_SCS_CCA_INTF_SCALE;
+				intf_params.cca_pri = p_oc_info->off_chan_cca_pri;
+				intf_params.cca_sec = p_oc_info->off_chan_cca_sec;
+				intf_params.cca_sec40 = p_oc_info->off_chan_cca_sec40;
+				ieee80211_scs_update_chans_cca_intf(ic, &intf_params, IEEE80211_SCS_OFFCHAN, NULL);
+			}
+		}
+	}
+
+	/* Scale co-channel data */
+	ieee80211_scs_scale_cochan_data(ic, scs_info_read);
+
+	SCSDBG(SCSLOG_INFO, "cca_try=%u, cca_idle=%u cca_busy=%u cca_intf=%u cca_tx=%u tx_ms=%u rx_ms=%u\n",
+			scs_info_read->cca_try,
+			scs_info_read->cca_idle,
+			scs_info_read->cca_busy,
+			scs_info_read->cca_interference,
+			scs_info_read->cca_tx,
+			scs_info_read->tx_usecs / 1000,
+			scs_info_read->rx_usecs / 1000);
+
+	/* Dynamic peer BW scheme based on interference on secondary 40 MHz */
+	if ((ieee80211_get_bw(ic) == BW_HT80) && ic->ic_opmode_bw_switch_en) {
+		vap = TAILQ_FIRST(&ic->ic_vaps);
+		ieee80211_param_from_qdrv(vap, IEEE80211_PARAM_GET_CCA_STATS, &cca_sec40, NULL, 0);
+
+		if ((opmode_bw != QTN_BW_40M) && (cca_sec40 > OPMODE_PEER_BW_SW_CCA40_TH1)) {
+			//Switch to 40 MHz
+			ieee80211_send_vht_opmode_to_all(ic, QTN_BW_40M);
+			opmode_bw = QTN_BW_40M;
+			SCSDBG(SCSLOG_INFO,"CCA_SEC40 = %d: request peer to switch to 40 MHz\n", cca_sec40);
+			debounce = 0;
+		} else if ((opmode_bw != QTN_BW_80M) && (cca_sec40 < OPMODE_PEER_BW_SW_CCA40_TH2)) {
+			if (debounce == 1) {
+				//Switch to 80 MHz
+				ieee80211_send_vht_opmode_to_all(ic, QTN_BW_80M);
+				opmode_bw = QTN_BW_80M;
+				SCSDBG(SCSLOG_INFO,"CCA_SEC40 = %d: request peer to switch to 80 MHz\n", cca_sec40);
+				debounce = 0;
+			} else {
+				debounce = 1;
+			}
+		} else {
+			debounce = 0;
+		}
+	}
+
+	if (ic->ic_scs.scs_debug_enable >= SCSLOG_VERBOSE) {
+		int node_index;
+
+		for (node_index = 0; node_index < scs_info_read->scs_vsp_info.num_of_assoc; node_index++) {
+			printk("SCS: AssocID = 0x%04X, tx_time = %u, rx_time =%u\n",
+				scs_info_read->scs_vsp_info.scs_vsp_node_stats[node_index].ni_associd,
+				scs_info_read->scs_vsp_info.scs_vsp_node_stats[node_index].tx_usecs / 1000,
+				scs_info_read->scs_vsp_info.scs_vsp_node_stats[node_index].rx_usecs / 1000);
+		}
+	}
+
+	raw_cca_intf = scs_info_read->cca_interference;
+	if ((ic->ic_opmode == IEEE80211_M_HOSTAP) && ieee80211_scs_tdls_link_is_existing(ic, NULL)) {
+		tdls_update_failed = ieee80211_scs_update_tdls_link_time(ic);
+		compound_cca_intf = ieee80211_scs_smooth_ap_cca_intf_time(ic, raw_cca_intf,
+					&stats_unstable);
+	} else {
+		compound_cca_intf = ieee80211_scs_fix_cca_intf(ic, NULL, raw_cca_intf,
+				ic->ic_scs.scs_sp_err_smthed, ic->ic_scs.scs_lp_err_smthed);
+	}
+	pmbl_err = (ic->ic_scs.scs_sp_err_smthed * ic->ic_scs.scs_sp_wf +
+			ic->ic_scs.scs_lp_err_smthed * ic->ic_scs.scs_lp_wf) / 100;
+
+	SCSDBG(SCSLOG_INFO, "current pmbl error = %u %u, smoothed = %u %u,"
+			" raw_cca_intf = %u, comp_cca_intf = %u\n",
+			phy_stats.cnt_sp_fail, phy_stats.cnt_lp_fail,
+			ic->ic_scs.scs_sp_err_smthed, ic->ic_scs.scs_lp_err_smthed,
+			raw_cca_intf, compound_cca_intf);
+
+	/* update smoothed free airtime */
+	if (ic->ic_scs.scs_cca_idle_smthed) {
+		ic->ic_scs.scs_cca_idle_smthed = IEEE80211_SCS_SMOOTH(ic->ic_scs.scs_cca_idle_smthed,
+				scs_info_read->cca_idle, ic->ic_scs.scs_cca_idle_smth_fctr);
+	} else {
+		ic->ic_scs.scs_cca_idle_smthed = scs_info_read->cca_idle;
+	}
+	SCSDBG(SCSLOG_INFO, "cca_idle_smthed %u\n", ic->ic_scs.scs_cca_idle_smthed);
+
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
+		int cur_bw;
+
+		ieee80211_scs_collect_node_atten(ic);
+		cur_bw = ieee80211_get_bw(ic);
+
+		/* clear current cca intf at first */
+		memset(&intf_params, 0, sizeof(intf_params));
+		intf_params.chan = ic->ic_curchan;
+		intf_params.chan_bw = cur_bw;
+		intf_params.cca_intf = SCS_CCA_INTF_INVALID;
+		intf_params.pmbl_err = 0;
+		intf_params.cca_dur = IEEE80211_SCS_CCA_INTF_SCALE;
+		ieee80211_scs_update_chans_cca_intf(ic, &intf_params, IEEE80211_SCS_COCHAN, NULL);
+		/* update cca intf stats */
+		intf_params.cca_intf = compound_cca_intf;
+		intf_params.pmbl_err = pmbl_err;
+		intf_params.cca_pri = scs_info_read->cca_pri;
+		intf_params.cca_sec = scs_info_read->cca_sec20;
+		intf_params.cca_sec40 = scs_info_read->cca_sec40;
+		ieee80211_scs_update_chans_cca_intf(ic, &intf_params, IEEE80211_SCS_COCHAN, NULL);
+
+		/* update tx/rx time stats */
+		as->as_tx_ms = scs_info_read->tx_usecs / 1000;
+		as->as_rx_ms = scs_info_read->rx_usecs / 1000;
+		as->as_tx_ms_smth = IEEE80211_SCS_SMOOTH(as->as_tx_ms_smth, as->as_tx_ms,
+				ic->ic_scs.scs_as_tx_time_smth_fctr);
+		as->as_rx_ms_smth = IEEE80211_SCS_SMOOTH(as->as_rx_ms_smth, as->as_rx_ms,
+				ic->ic_scs.scs_as_rx_time_smth_fctr);
+		SCSDBG(SCSLOG_INFO, "AS tx rx = %u %u, smoothed = %u %u\n",
+				as->as_tx_ms, as->as_rx_ms, as->as_tx_ms_smth, as->as_rx_ms_smth);
+	}
+
+	cc_flag = ieee80211_is_cc_required(ic, compound_cca_intf,
+			ic->ic_scs.scs_cca_idle_smthed, pmbl_err);
+	if (cc_flag) {
+		if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
+			int is_wds_rbs;
+			cc_flag = ieee80211_scs_collect_ranking_stats(ic, scs_info_read, cc_flag,
+								compound_cca_intf);
+			if (ic->ic_scs.scs_debug_enable)
+				ieee80211_scs_show_ranking_stats(ic, 1, 0);
+
+			is_wds_rbs = ieee80211_scs_is_wds_rbs_node(ic);
+
+			/*
+			 * Pick a channel from DFS and Non-DFS set of channels;
+			 * Consider channel margins as well while we pick the channel
+			 */
+			new_chan = ieee80211_scs_pick_channel(ic, IEEE80211_SCS_PICK_AVAILABLE_ANY_CHANNEL, cc_flag);
+
+			if (!is_wds_rbs) {
+				if (!cc_flag) {
+					SCSDBG(SCSLOG_NOTICE, "all channel change request conditions are cleared\n");
+					goto compare_end_free_local_info;
+				}
+
+				if (new_chan == SCS_BEST_CHAN_INVALID) {
+					ieee80211_scs_update_dfs_reentry(ic, cc_flag, &dfs_reentry_clear);
+					goto compare_end_free_local_info;
+				}
+
+				if (ic->ic_scs.scs_report_only) {
+					SCSDBG(SCSLOG_NOTICE, "channel change is disabled under report only mode\n");
+					goto compare_end_free_local_info;
+				}
+
+				if (ieee80211_is_cac_in_progress(ic)) {
+					SCSDBG(SCSLOG_NOTICE, "Channel change is disabled during CAC\n");
+					goto compare_end_free_local_info;
+				}
+
+				if ((cc_flag == IEEE80211_SCS_SELF_CCA_CC) &&
+						ic->ic_get_cca_adjusting_status()) {
+					SCSDBG(SCSLOG_NOTICE, "Self channel change is paused while adjusting cca threshold\n");
+					goto compare_end_free_local_info;
+				}
+
+				if (tdls_update_failed || stats_unstable)
+					goto compare_end_free_local_info;
+
+				if (ic->ic_scs.scs_burst_enable && ieee80211_scs_add_event_scs_burst_queue(ic)) {
+					SCSDBG(SCSLOG_NOTICE, "Channel change is paused beacause of bursting\n");
+
+					goto compare_end_free_local_info;
+				}
+				ic->ic_csw_reason = CSW_REASON_SET_SCS_FLAG(cc_flag, IEEE80211_CSW_REASON_SCS);
+				if (!ieee80211_scs_change_channel(ic, new_chan)) {
+					int curchan_ieee = ic->ic_curchan->ic_ieee;
+
+					printk("SCS: Switching to chan %d, reason %x,"
+							" cca_intf %u %u, pmbl_ap %u %u, pmbl_sta %u %u, cca_idle %u\n",
+							new_chan, cc_flag, as->as_cca_intf[curchan_ieee], as->as_cca_intf[new_chan],
+							as->as_pmbl_err_ap[curchan_ieee], as->as_pmbl_err_ap[new_chan],
+							as->as_pmbl_err_sta[curchan_ieee], as->as_pmbl_err_sta[new_chan],
+							ic->ic_scs.scs_cca_idle_smthed);
+				}
+			} else {
+				/* In RBS mode - inform the MBS AP to change the channel */
+				/* WDS Link to MBS? */
+				if (new_chan == SCS_BEST_CHAN_INVALID) {
+					SCSDBG(SCSLOG_NOTICE, "new channel recommendation %d\n", new_chan);
+				}
+
+				SCSDBG(SCSLOG_NOTICE, "SCS: send busy_fraction %u with cca_intf %u to AP, pmbl_error=%u %u\n",
+					(raw_cca_intf * IEEE80211_11K_CCA_INTF_SCALE
+					/ IEEE80211_SCS_CCA_INTF_SCALE),
+					raw_cca_intf, ic->ic_scs.scs_sp_err_smthed, ic->ic_scs.scs_lp_err_smthed);
+
+				vap = TAILQ_FIRST(&ic->ic_vaps);
+				TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+					if (IEEE80211_VAP_WDS_IS_RBS(vap)) {
+						struct ieee80211_node *ni = NULL;
+						uint64_t tsf = 0;
+						uint16_t others_time;
+
+						ni = ieee80211_get_wds_peer_node_ref(vap);
+						if (ni) {
+							others_time = ni->ni_others_time;
+							SCSDBG(SCSLOG_NOTICE,
+								"RBS: allnodes(tx+rx)- wds %u\n", others_time);
+							/* here replace the old csa mgmt packet sending*/
+							ic->ic_get_tsf(&tsf);
+							ieee80211_send_action_cca_report(ni, 0,
+								(uint16_t)raw_cca_intf,
+								tsf, (uint16_t)scs_info_read->cca_try,
+								ic->ic_scs.scs_sp_err_smthed,
+								ic->ic_scs.scs_lp_err_smthed,
+								others_time, NULL, 0);
+							ieee80211_send_action_fat_report(ni, 0,
+								(uint16_t)raw_cca_intf,
+								tsf, (uint16_t)scs_info_read->cca_try,
+								(uint16_t)scs_info_read->cca_idle);
+							ieee80211_free_node(ni);
+						} else {
+							printk("%s: RBS: WDS Peer Node is NULL\n",__func__);
+						}
+					}
+				}
+			}
+		} else {
+			/* In STA mode - inform AP to change the channel */
+			SCSDBG(SCSLOG_NOTICE, "STA: report SCS measurements to AP\n");
+			if (!ic->ic_nonqtn_sta) {
+				tdls_stats_buf = kmalloc(tdls_stats_buf_len, GFP_ATOMIC);
+				if (!tdls_stats_buf) {
+					SCSDBG(SCSLOG_NOTICE, "TDLS stats buffer allocation failed\n");
+					goto compare_end_free_local_info;
+				}
+
+				TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+					memset(tdls_stats_buf, 0, tdls_stats_buf_len);
+					extra_ie_len = ieee80211_scs_add_tdls_stats_ie(vap, scs_info_read,
+							tdls_stats_buf, tdls_stats_buf_len);
+					/* STA should be associated */
+					if ((vap->iv_state == IEEE80211_S_RUN) && vap->iv_bss) {
+						uint64_t tsf = 0;
+						struct ieee80211_node *ni = vap->iv_bss;
+
+						ic->ic_get_tsf(&tsf);
+						SCSDBG(SCSLOG_NOTICE, "send busy_fraction %u with cca_intf %u to AP, pmbl_error=%u %u\n",
+							(raw_cca_intf * IEEE80211_11K_CCA_INTF_SCALE / IEEE80211_SCS_CCA_INTF_SCALE),
+							raw_cca_intf, ic->ic_scs.scs_sp_err_smthed, ic->ic_scs.scs_lp_err_smthed);
+						/* here replace the old csa mgmt packet sending*/
+						ieee80211_send_action_cca_report(ni, 0, (uint16_t)raw_cca_intf, tsf,
+								(uint16_t)scs_info_read->cca_try, ic->ic_scs.scs_sp_err_smthed,
+								ic->ic_scs.scs_lp_err_smthed, 0, tdls_stats_buf, extra_ie_len);
+						ieee80211_send_action_fat_report(ni, 0, (uint16_t)raw_cca_intf,
+									tsf, (uint16_t)scs_info_read->cca_try,
+									(uint16_t)scs_info_read->cca_idle);
+					}
+				}
+
+				if(tdls_stats_buf)
+					kfree(tdls_stats_buf);
+			}
+		}
+	}
+
+compare_end_free_local_info:
+	kfree(scs_info_read);
+compare_end:
+	if (ic->ic_scs.scs_burst_enable)
+		ieee80211_scs_update_scs_burst_queue(ic);
+
+	ieee80211_scs_clean_stats(ic, clean_level, dfs_reentry_clear);
+
+	mod_timer(&ic->ic_scs.scs_compare_timer,
+		  jiffies + (ic->ic_scs.scs_cca_sample_dur * HZ));
+}
+
+static void
+ieee80211_scs_switch_channel_manually(struct ieee80211com *ic, int pick_flags)
+{
+	int	new_chan;
+
+	if (!ic->ic_scs.scs_enable) {
+		SCSDBG(SCSLOG_CRIT, "Stop switching channel because SCS is disabled!\n");
+		return;
+	}
+
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
+		ieee80211_scs_collect_node_atten(ic);
+
+		if (ic->ic_scs.scs_debug_enable) {
+			ieee80211_scs_show_ranking_stats(ic, 1, 0);
+		}
+
+		/*
+		 * Pick a channel from DFS and Non-DFS sets;
+		 * If the picked channel is non-DFS, OCAC performs off-channel CAC for DFS channel;
+		 * If the picked channel is DFS, OCAC will not kickin
+		 */
+		new_chan = ieee80211_scs_pick_channel(ic,
+				pick_flags,
+				IEEE80211_SCS_NA_CC);
+		if (new_chan == SCS_BEST_CHAN_INVALID) {
+			goto sc_err;
+		}
+
+		ic->ic_csw_reason = IEEE80211_CSW_REASON_SCS;
+		if (!ieee80211_scs_change_channel(ic, new_chan)) {
+			SCSDBG(SCSLOG_CRIT, "Switching to channel %d manually\n", new_chan);
+		}
+	} else {
+		SCSDBG(SCSLOG_CRIT, "Support switch channel manually on AP side only now!\n");
+	}
+
+	return;
+
+sc_err:
+	SCSDBG(SCSLOG_CRIT, "Switch channel manually error!\n");
+}
+
+void ieee80211_scs_start_comparing_timer(struct ieee80211com *ic)
+{
+	init_timer(&ic->ic_scs.scs_compare_timer);
+	ic->ic_scs.scs_compare_timer.function = ieee80211_scs_start_compare;
+	ic->ic_scs.scs_compare_timer.data = (unsigned long) ic;
+	ic->ic_scs.scs_compare_timer.expires = jiffies + IEEE80211_SCS_COMPARE_INIT_TIMER * HZ;
+	add_timer(&ic->ic_scs.scs_compare_timer);
+}
+
+static int
+ieee80211_wireless_scs_stats_task_start(struct ieee80211vap *vap, uint8_t start)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (start && !ic->ic_scs.scs_stats_on) {
+		ieee80211_scs_clean_stats(ic, IEEE80211_SCS_STATE_RESET, 0);
+		ieee80211_scs_start_comparing_timer(ic);
+		ic->ic_scs.scs_stats_on = 1;
+		ieee80211_wireless_scs_msg_send(vap, "SCS: stats task is started");
+	} else if (!start && ic->ic_scs.scs_stats_on) {
+		del_timer(&ic->ic_scs.scs_compare_timer);
+		ic->ic_scs.scs_stats_on = 0;
+		ieee80211_wireless_scs_msg_send(vap, "SCS: stats task is stopped");
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_wireless_scs_smpl_task_start(struct ieee80211vap *vap, uint8_t start)
+{
+#define IEEE80211_SCS_BUF_LEN	256
+	char msg_buf[IEEE80211_SCS_BUF_LEN];
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (start && ((ic->ic_scs.scs_sample_intv < IEEE80211_SCS_SMPL_INTV_MIN) ||
+		(ic->ic_scs.scs_sample_intv > IEEE80211_SCS_SMPL_INTV_MAX))) {
+		return -1;
+	}
+
+	if (start) {
+		cancel_delayed_work_sync(&ic->ic_scs_sample_work);
+		snprintf(msg_buf, sizeof(msg_buf),
+			"SCS: channel sampling started - interval is %u seconds",
+			ic->ic_scs.scs_sample_intv);
+		ieee80211_wireless_scs_msg_send(vap, msg_buf);
+		INIT_DELAYED_WORK(&ic->ic_scs_sample_work,
+				  ieee80211_wireless_scs_sampling_task);
+		if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
+			schedule_delayed_work(&ic->ic_scs_sample_work, (HZ / 2));
+		}
+	} else {
+		ieee80211_wireless_scs_msg_send(vap, "SCS: channel sampling disabled");
+		cancel_delayed_work_sync(&ic->ic_scs_sample_work);
+	}
+
+	return 0;
+}
+
+static void
+ieee80211_wireless_scs_report_show(struct ieee80211com *ic, uint16_t param)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_scs_info *scs_info_read = NULL;
+	struct ieee80211vap *vap;
+	uint32_t pmbl_fail;
+
+	/* CCA information cannot match a certain channel under scan state */
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_state == IEEE80211_S_RUN) {
+			break;
+		}
+	}
+	if (vap == NULL) {
+		printk("SCS: No VAP in running state, no report available\n");
+		return;
+	}
+
+	if (param == IEEE80211_SCS_CHAN_ALL) {
+		ieee80211_show_initial_ranking_stats(ic);
+		ieee80211_scs_show_ranking_stats(ic, 0, 1);
+	} else if (param == IEEE80211_SCS_CHAN_CURRENT) {
+		if (ic->ic_scs.scs_stats_on) {
+			/* Copy scs info into a local structure so MuC can continue to fill it in */
+			scs_info_read = kmalloc(sizeof(*scs_info_read), GFP_KERNEL);
+			if (!scs_info_read) {
+				printk("SCS: info allocation failed\n");
+				return;
+			}
+			memcpy((void *)scs_info_read, &sp->scs_info_lhost->scs_info[sp->scs_info_lhost->valid_index],
+					sizeof(*scs_info_read));
+			if (scs_info_read->cca_try) {
+				ieee80211_scs_scale_cochan_data(ic, scs_info_read);
+				pmbl_fail = (ic->ic_scs.scs_sp_err_smthed * ic->ic_scs.scs_sp_wf +
+						ic->ic_scs.scs_lp_err_smthed * ic->ic_scs.scs_lp_wf) / 100;
+				printk("SCS: current channel %d, cca_try=%u, cca_idle=%u cca_busy=%u cca_intf=%u cca_tx=%u tx_ms=%u rx_ms=%u"
+					" pmbl_cnt=%u\n",
+					ic->ic_curchan->ic_ieee,
+					scs_info_read->cca_try,
+					scs_info_read->cca_idle,
+					scs_info_read->cca_busy,
+					scs_info_read->cca_interference,
+					scs_info_read->cca_tx,
+					scs_info_read->tx_usecs / 1000,
+					scs_info_read->rx_usecs / 1000,
+					pmbl_fail);
+			} else {
+				printk("Current channel report is temporarily not available, please try later\n");
+			}
+			kfree(scs_info_read);
+		} else {
+			printk("SCS is disabled, no report available for current channel\n");
+		}
+	}
+}
+
+static void
+ieee80211_wireless_scs_get_internal_stats(struct ieee80211com *ic, uint16_t param)
+{
+	int i;
+
+	if (!ic->ic_scs.scs_stats_on) {
+		printk("SCS stats is off, no stats available\n");
+		return;
+	}
+
+	printk("SCS lhost stats: off-channel sample counter:\n");
+	for (i = 0; i < IEEE80211_SCS_CNT_MAX; i++) {
+		printk("NO.%d=%u\n", i, ic->ic_scs.scs_cnt[i]);
+	}
+}
+
+int
+ieee80211_param_scs_set(struct net_device *dev, struct ieee80211vap *vap, u_int32_t value)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	uint16_t cmd =	value >> IEEE80211_SCS_COMMAND_S;
+	uint16_t arg = value & IEEE80211_SCS_VALUE_M;
+	uint8_t u8_arg0 = arg >> 8;
+	uint8_t u8_arg1 = arg & 0xFF;
+#if TOPAZ_FPGA_PLATFORM
+	printk("SCS is not supported yet on Topaz\n");
+	return -1;
+#endif
+
+	if (cmd >= IEEE80211_SCS_SET_MAX) {
+		printk(KERN_WARNING "%s: SCS: invalid config cmd %u, arg=%u\n",
+				dev->name, cmd, arg);
+		return -1;
+	}
+
+	SCSDBG(SCSLOG_INFO, "set param %u to value 0x%x\n", cmd, arg);
+
+	switch (cmd) {
+	case IEEE80211_SCS_SET_ENABLE:
+		if (arg > 1) {
+			return -1;
+		}
+
+		if (ic->ic_scs.scs_enable != arg) {
+			printk("%sabling SCS\n", arg ? "En" : "Dis");
+			ic->ic_scs.scs_enable = arg;
+			if (ic->ic_scs.scs_enable) {
+				if (ieee80211_wireless_scs_stats_task_start(vap, 1) < 0) {
+					return -1;
+				}
+			}
+			/* SCS off channel sampling follows SCS */
+			if (!ic->ic_scs.scs_smpl_enable != !arg) {
+				if (ieee80211_wireless_scs_smpl_task_start(vap, arg) < 0) {
+					return -1;
+				} else {
+					ic->ic_scs.scs_smpl_enable = arg;
+				}
+			}
+		} else {
+			return 0;
+		}
+		break;
+	case IEEE80211_SCS_SET_DEBUG_ENABLE:
+		if (arg > 3) {
+			return -1;
+		}
+		if( ic->ic_scs.scs_debug_enable != arg) {
+			ic->ic_scs.scs_debug_enable = arg;
+		}
+		break;
+	case IEEE80211_SCS_SET_SAMPLE_ENABLE:
+		if (arg > 1) {
+			return -1;
+		}
+
+		if (ic->ic_scs.scs_smpl_enable != arg) {
+			if (ieee80211_wireless_scs_smpl_task_start(vap, arg) < 0) {
+				return -1;
+			} else {
+				ic->ic_scs.scs_smpl_enable = arg;
+			}
+		}
+		break;
+	case IEEE80211_SCS_SET_SAMPLE_DWELL_TIME:
+		if (arg < IEEE80211_SCS_SMPL_DWELL_TIME_MIN ||
+			arg > IEEE80211_SCS_SMPL_DWELL_TIME_MAX) {
+			return -1;
+		}
+		if (ic->ic_scs.scs_smpl_dwell_time != arg) {
+			ic->ic_scs.scs_smpl_dwell_time = arg;
+		}
+		break;
+	case IEEE80211_SCS_SET_SAMPLE_INTERVAL:
+		if (arg < IEEE80211_SCS_SMPL_INTV_MIN ||
+			arg > IEEE80211_SCS_SMPL_INTV_MAX) {
+			return -1;
+		}
+		ic->ic_scs.scs_sample_intv = arg;
+		break;
+	case IEEE80211_SCS_SET_SAMPLE_TYPE:
+		if (arg & (~QTN_OFF_CHAN_FLAG_MASK))
+			return -1;
+		if ((arg & -arg) != arg)
+			return -1;
+		ic->ic_scs.scs_sample_type = arg;
+		break;
+	case IEEE80211_SCS_SET_THRSHLD_SMPL_PKTNUM:
+		ic->ic_scs.scs_thrshld_smpl_pktnum = arg;
+		break;
+	case IEEE80211_SCS_SET_THRSHLD_SMPL_AIRTIME:
+		ic->ic_scs.scs_thrshld_smpl_airtime = arg;
+		break;
+	case IEEE80211_SCS_SET_THRSHLD_ATTEN_INC:
+		if (arg > IEEE80211_SCS_THRSHLD_ATTEN_INC_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "attenuation increase threshold change from %u to %u\n",
+				ic->ic_scs.scs_thrshld_atten_inc, arg);
+		ic->ic_scs.scs_thrshld_atten_inc = arg;
+		break;
+	case IEEE80211_SCS_SET_THRSHLD_DFS_REENTRY:
+		SCSDBG(SCSLOG_NOTICE, "DFS reentry threshold change from %u to %u\n",
+				ic->ic_scs.scs_thrshld_dfs_reentry, arg);
+		ic->ic_scs.scs_thrshld_dfs_reentry = arg;
+		break;
+	case IEEE80211_SCS_SET_THRSHLD_DFS_REENTRY_INTF:
+		printk("DFS reentry cca intf threshold change from %u to %u\n",
+				ic->ic_scs.scs_thrshld_dfs_reentry_intf, arg);
+		ic->ic_scs.scs_thrshld_dfs_reentry_intf = arg;
+		break;
+	case IEEE80211_SCS_SET_THRSHLD_DFS_REENTRY_MINRATE:
+		SCSDBG(SCSLOG_NOTICE, "DFS reentry minrate threshold change from %u to %u, unit 100kbps\n",
+				ic->ic_scs.scs_thrshld_dfs_reentry_minrate, arg);
+		ic->ic_scs.scs_thrshld_dfs_reentry_minrate = arg;
+		break;
+	case IEEE80211_SCS_SET_THRSHLD_LOAD:
+		if (arg > IEEE80211_SCS_THRSHLD_LOADED_MAX)
+			return -1;
+		SCSDBG(SCSLOG_NOTICE, "traffic load threshold change from %u to %u\n",
+					ic->ic_scs.scs_thrshld_loaded, arg);
+		ic->ic_scs.scs_thrshld_loaded = arg;
+		break;
+	case IEEE80211_SCS_SET_THRSHLD_AGING_NOR:
+		SCSDBG(SCSLOG_NOTICE, "normal aging threshold change from %u to %u minutes\n",
+					ic->ic_scs.scs_thrshld_aging_nor, arg);
+		ic->ic_scs.scs_thrshld_aging_nor = arg;
+		break;
+	case IEEE80211_SCS_SET_THRSHLD_AGING_DFSREENT:
+		SCSDBG(SCSLOG_NOTICE, "dfs re-entry aging threshold change from %u to %u minutes\n",
+					ic->ic_scs.scs_thrshld_aging_dfsreent, arg);
+		ic->ic_scs.scs_thrshld_aging_dfsreent = arg;
+		break;
+	case IEEE80211_SCS_SET_CCA_IDLE_THRSHLD:
+		SCSDBG(SCSLOG_NOTICE, "cca idle threshold change from %u to %u\n",
+				ic->ic_scs.scs_cca_idle_thrshld, arg);
+		ic->ic_scs.scs_cca_idle_thrshld = arg;
+		break;
+	case IEEE80211_SCS_SET_PMBL_ERR_THRSHLD:
+		SCSDBG(SCSLOG_NOTICE, "pmbl error threshold change from %u to %u\n",
+				ic->ic_scs.scs_pmbl_err_thrshld, arg);
+		ic->ic_scs.scs_pmbl_err_thrshld = arg;
+		break;
+	case IEEE80211_SCS_SET_CCA_INTF_LO_THR:
+		SCSDBG(SCSLOG_NOTICE, "cca intf low threshold change from %u to %u\n",
+				ic->ic_scs.scs_cca_intf_lo_thrshld, arg);
+		ic->ic_scs.scs_cca_intf_lo_thrshld = arg;
+		break;
+	case IEEE80211_SCS_SET_CCA_INTF_HI_THR:
+		SCSDBG(SCSLOG_NOTICE, "cca intf high threshold change from %u to %u\n",
+				ic->ic_scs.scs_cca_intf_hi_thrshld, arg);
+		ic->ic_scs.scs_cca_intf_hi_thrshld = arg;
+		break;
+	case IEEE80211_SCS_SET_CCA_INTF_RATIO:
+		SCSDBG(SCSLOG_NOTICE, "cca intf ratio threshold change from %u to %u\n",
+				ic->ic_scs.scs_cca_intf_ratio, arg);
+		ic->ic_scs.scs_cca_intf_ratio = arg;
+		break;
+	case IEEE80211_SCS_SET_CCA_INTF_DFS_MARGIN:
+		SCSDBG(SCSLOG_NOTICE, "cca intf dfs margin change from %u to %u\n",
+				ic->ic_scs.scs_cca_intf_dfs_margin, arg);
+		ic->ic_scs.scs_cca_intf_dfs_margin = arg;
+		break;
+	case IEEE80211_SCS_SET_CCA_SMPL_DUR:
+		if (arg < IEEE80211_SCS_CCA_DUR_MIN ||
+			arg > IEEE80211_SCS_CCA_DUR_MAX) {
+			return -1;
+		}
+		ic->ic_scs.scs_cca_sample_dur = arg;
+		ieee80211_scs_clean_stats(ic, IEEE80211_SCS_STATE_MEASUREMENT_CHANGE_CLEAN, 0);
+		break;
+	case IEEE80211_SCS_SET_REPORT_ONLY:
+		ic->ic_scs.scs_report_only = arg;
+		break;
+	case IEEE80211_SCS_GET_REPORT:
+		ieee80211_wireless_scs_report_show(ic, arg);
+		break;
+	case IEEE80211_SCS_GET_INTERNAL_STATS:
+		ieee80211_wireless_scs_get_internal_stats(ic, arg);
+		break;
+	case IEEE80211_SCS_SET_CCA_INTF_SMTH_FCTR:
+		if ((u8_arg0 > IEEE80211_CCA_INTF_SMTH_FCTR_MAX) ||
+			(u8_arg1 > IEEE80211_CCA_INTF_SMTH_FCTR_MAX)) {
+			return -1;
+		}
+		ic->ic_scs.scs_cca_intf_smth_fctr[SCS_CCA_INTF_SMTH_FCTR_NOXP] = u8_arg0;
+		ic->ic_scs.scs_cca_intf_smth_fctr[SCS_CCA_INTF_SMTH_FCTR_XPED] = u8_arg1;
+		break;
+	case IEEE80211_SCS_RESET_RANKING_TABLE:
+		ieee80211_scs_clean_stats(ic, IEEE80211_SCS_STATE_RESET, 0);
+		break;
+	case IEEE80211_SCS_SET_CHAN_MTRC_MRGN:
+		if (arg > IEEE80211_SCS_CHAN_MTRC_MRGN_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "chan metric margin change from %u to %u\n",
+				ic->ic_scs.scs_chan_mtrc_mrgn, arg);
+		ic->ic_scs.scs_chan_mtrc_mrgn = arg;
+		break;
+	case IEEE80211_SCS_SET_LEAVE_DFS_CHAN_MTRC_MRGN:
+		if (arg > IEEE80211_SCS_CHAN_MTRC_MRGN_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "Leave DFS chan metric margin change from %u to %u\n",
+				ic->ic_scs.scs_leavedfs_chan_mtrc_mrgn, arg);
+		ic->ic_scs.scs_leavedfs_chan_mtrc_mrgn = arg;
+		break;
+	case IEEE80211_SCS_SET_RSSI_SMTH_FCTR:
+		if ((u8_arg0 > IEEE80211_SCS_RSSI_SMTH_FCTR_MAX) ||
+			(u8_arg1 > IEEE80211_SCS_RSSI_SMTH_FCTR_MAX))	{
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "rssi smoothing factor(up/down) change from %u/%u to %u/%u\n",
+				ic->ic_scs.scs_rssi_smth_fctr[SCS_RSSI_SMTH_FCTR_UP],
+				ic->ic_scs.scs_rssi_smth_fctr[SCS_RSSI_SMTH_FCTR_DOWN],
+				u8_arg0, u8_arg1);
+		ic->ic_scs.scs_rssi_smth_fctr[SCS_RSSI_SMTH_FCTR_UP] = u8_arg0;
+		ic->ic_scs.scs_rssi_smth_fctr[SCS_RSSI_SMTH_FCTR_DOWN] = u8_arg1;
+		break;
+	case IEEE80211_SCS_SET_ATTEN_ADJUST:
+		if (((int8_t)arg < IEEE80211_SCS_ATTEN_ADJUST_MIN) ||
+			((int8_t)arg > IEEE80211_SCS_ATTEN_ADJUST_MAX)) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "attenuation adjust change from %d to %d\n",
+				ic->ic_scs.scs_atten_adjust, (int8_t)arg);
+		ic->ic_scs.scs_atten_adjust = (int8_t)arg;
+		break;
+	case IEEE80211_SCS_SET_ATTEN_SWITCH_ENABLE:
+		if (arg > 1) {
+			return -1;
+		}
+
+		if (ic->ic_scs.scs_atten_sw_enable != arg) {
+			SCSDBG(SCSLOG_NOTICE, "attenuation channel change logic is %s\n",
+						arg ? "enabled" : "disabled");
+			ic->ic_scs.scs_atten_sw_enable = (uint16_t)arg;
+		}
+		break;
+	case IEEE80211_SCS_SET_PMBL_ERR_SMTH_FCTR:
+		if (arg > IEEE80211_SCS_PMBL_ERR_SMTH_FCTR_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "preamble error smoothing factor change from %u to %u\n",
+				ic->ic_scs.scs_pmbl_err_smth_fctr, arg);
+		ic->ic_scs.scs_pmbl_err_smth_fctr = arg;
+		break;
+	case IEEE80211_SCS_SET_PMP_RPT_CCA_SMTH_FCTR:
+		if (arg > IEEE80211_SCS_PMP_RPT_CCA_SMTH_FCTR_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "scs_pmp_rpt_cca_smth_fctr change from %u to %u\n",
+				ic->ic_scs.scs_pmp_rpt_cca_smth_fctr, arg);
+		ic->ic_scs.scs_pmp_rpt_cca_smth_fctr = arg;
+		break;
+	case IEEE80211_SCS_SET_PMP_RX_TIME_SMTH_FCTR:
+		if (arg > IEEE80211_SCS_PMP_RX_TIME_SMTH_FCTR_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "scs_pmp_rx_time_smth_fctr change from %u to %u\n",
+				ic->ic_scs.scs_pmp_rx_time_smth_fctr, arg);
+		ic->ic_scs.scs_pmp_rx_time_smth_fctr = arg;
+		break;
+	case IEEE80211_SCS_SET_PMP_TX_TIME_SMTH_FCTR:
+		if (arg > IEEE80211_SCS_PMP_TX_TIME_SMTH_FCTR_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "scs_pmp_tx_time_smth_fctr change from %u to %u\n",
+				ic->ic_scs.scs_pmp_tx_time_smth_fctr, arg);
+		ic->ic_scs.scs_pmp_tx_time_smth_fctr = arg;
+		break;
+	case IEEE80211_SCS_SET_PMP_STATS_STABLE_PERCENT:
+		if (arg > IEEE80211_SCS_PMP_STATS_STABLE_PERCENT_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "scs_pmp_stats_stable_percent change from %u to %u\n",
+				ic->ic_scs.scs_pmp_stats_stable_percent, arg);
+		ic->ic_scs.scs_pmp_stats_stable_percent = arg;
+		break;
+	case IEEE80211_SCS_SET_PMP_STATS_STABLE_RANGE:
+		if (arg > IEEE80211_SCS_PMP_STATS_STABLE_RANGE_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "scs_pmp_stats_stable_range change from %u to %u\n",
+				ic->ic_scs.scs_pmp_stats_stable_range, arg);
+		ic->ic_scs.scs_pmp_stats_stable_range = arg;
+		break;
+	case IEEE80211_SCS_SET_PMP_STATS_CLEAR_INTERVAL:
+		if (arg > IEEE80211_SCS_PMP_STATS_CLEAR_INTERVAL_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "scs_pmp_stats_clear_interval change from %u to %u\n",
+				ic->ic_scs.scs_pmp_stats_clear_interval, arg);
+		ic->ic_scs.scs_pmp_stats_clear_interval = arg;
+		break;
+	case IEEE80211_SCS_SET_PMP_TXTIME_COMPENSATION:
+		ieee80211_scs_set_time_compensation(SCS_TX_COMPENSTATION, u8_arg0, u8_arg1);
+		break;
+	case IEEE80211_SCS_SET_PMP_RXTIME_COMPENSATION:
+		ieee80211_scs_set_time_compensation(SCS_RX_COMPENSTATION, u8_arg0, u8_arg1);
+		break;
+	case IEEE80211_SCS_SET_PMP_TDLSTIME_COMPENSATION:
+		ieee80211_scs_set_time_compensation(SCS_TDLS_COMPENSTATION, u8_arg0, u8_arg1);
+		break;
+	case IEEE80211_SCS_SET_SWITCH_CHANNEL_MANUALLY:
+		ieee80211_scs_switch_channel_manually(ic, arg);
+		break;
+	case IEEE80211_SCS_SET_AS_RX_TIME_SMTH_FCTR:
+		if (arg > IEEE80211_SCS_AS_RX_TIME_SMTH_FCTR_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "scs_as_rx_time_smth_fctr change from %u to %u\n",
+				ic->ic_scs.scs_as_rx_time_smth_fctr, arg);
+		ic->ic_scs.scs_as_rx_time_smth_fctr = arg;
+		break;
+	case IEEE80211_SCS_SET_AS_TX_TIME_SMTH_FCTR:
+		if (arg > IEEE80211_SCS_AS_TX_TIME_SMTH_FCTR_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "scs_as_tx_time_smth_fctr change from %u to %u\n",
+				ic->ic_scs.scs_as_tx_time_smth_fctr, arg);
+		ic->ic_scs.scs_as_tx_time_smth_fctr = arg;
+		break;
+	case IEEE80211_SCS_SET_PMBL_ERR_RANGE:
+		if (arg < IEEE80211_SCS_PMBL_ERR_RANGE_MIN) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "preamble error range change from %u to %u\n",
+				ic->ic_scs.scs_pmbl_err_range, arg);
+		ic->ic_scs.scs_pmbl_err_range = arg;
+		break;
+	case IEEE80211_SCS_SET_PMBL_ERR_MAPPED_INTF_RANGE:
+		if (arg > IEEE80211_SCS_PMBL_ERR_MAPPED_INTF_RANGE_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "preamble error mapped cca_intf range change from %u to %u\n",
+				ic->ic_scs.scs_pmbl_err_mapped_intf_range, arg);
+		ic->ic_scs.scs_pmbl_err_mapped_intf_range = arg;
+		break;
+	case IEEE80211_SCS_SET_PMBL_ERR_WF:
+		if ((u8_arg0 > IEEE80211_SCS_PMBL_ERR_WF_MAX) ||
+			(u8_arg1 > IEEE80211_SCS_PMBL_ERR_WF_MAX))	{
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "preamble error weighting factor(sp/lp) change from %u/%u to %u/%u\n",
+					ic->ic_scs.scs_sp_wf,
+					ic->ic_scs.scs_lp_wf,
+					u8_arg0, u8_arg1);
+		ic->ic_scs.scs_sp_wf = u8_arg0;
+		ic->ic_scs.scs_lp_wf = u8_arg1;
+		break;
+	case IEEE80211_SCS_SET_STATS_START:
+		SCSDBG(SCSLOG_NOTICE, "%sing scs stats\n", arg ? "start" : "stopp");
+		if (ieee80211_wireless_scs_stats_task_start(vap, !!arg) < 0) {
+			return -1;
+		}
+		break;
+	case IEEE80211_SCS_SET_CCA_IDLE_SMTH_FCTR:
+		if (arg > IEEE80211_SCS_CCA_IDLE_SMTH_FCTR_MAX) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "cca idle smoothing factor change from %u to %u\n",
+				ic->ic_scs.scs_cca_idle_smth_fctr, arg);
+		ic->ic_scs.scs_cca_idle_smth_fctr = arg;
+		break;
+	case IEEE80211_SCS_SET_CCA_THRESHOLD_TYPE:
+		if (arg > 1) {
+			return -1;
+		}
+		SCSDBG(SCSLOG_NOTICE, "cca thresholds switched to %s sensitive ones\n",
+				arg ? "more" : "less");
+		ic->ic_scs.scs_cca_threshold_type = arg + 1;
+		break;
+	case IEEE80211_SCS_SET_BURST_ENABLE:
+		SCSDBG(SCSLOG_NOTICE, "burst channel switching enable: %u\n", arg);
+		if (ic->ic_scs.scs_burst_enable != !!arg) {
+			ic->ic_scs.scs_burst_enable = !!arg;
+			memset(ic->ic_scs.scs_burst_queue, 0, sizeof(ic->ic_scs.scs_burst_queue));
+		}
+		break;
+	case IEEE80211_SCS_SET_BURST_WINDOW:
+		if ((arg < IEEE80211_SCS_BURST_WINDOW_MIN) ||
+			(arg > IEEE80211_SCS_BURST_WINDOW_MAX))
+			return -1;
+		SCSDBG(SCSLOG_NOTICE, "burst sliding window of time: %u\n", arg);
+		if (ic->ic_scs.scs_burst_window != (arg * 60)) {
+			ic->ic_scs.scs_burst_window = arg * 60;
+			memset(ic->ic_scs.scs_burst_queue, 0, sizeof(ic->ic_scs.scs_burst_queue));
+		}
+		break;
+	case IEEE80211_SCS_SET_BURST_THRESH:
+		if ((arg < IEEE80211_SCS_BURST_THRESH_MIN) ||
+			(arg > IEEE80211_SCS_BURST_THRESH_MAX))
+			return -1;
+		SCSDBG(SCSLOG_NOTICE, "burst channel switching threshold: %u\n", arg);
+		if (ic->ic_scs.scs_burst_thresh != arg) {
+			ic->ic_scs.scs_burst_thresh = arg;
+			memset(ic->ic_scs.scs_burst_queue, 0, sizeof(ic->ic_scs.scs_burst_queue));
+		}
+		break;
+	case IEEE80211_SCS_SET_BURST_PAUSE:
+		if ((arg < IEEE80211_SCS_BURST_PAUSE_MIN) ||
+			(arg > IEEE80211_SCS_BURST_PAUSE_MAX))
+			return -1;
+		SCSDBG(SCSLOG_NOTICE, "burst channel switching pause time: %u\n", arg);
+		ic->ic_scs.scs_burst_pause_time = arg * 60;
+		break;
+	case IEEE80211_SCS_SET_BURST_SWITCH:
+		SCSDBG(SCSLOG_NOTICE, "burst channel switching flag: %u\n", arg);
+		ic->ic_scs.scs_burst_force_switch = !!arg;
+		break;
+	default:
+		break;
+	}
+
+	SCSDBG(SCSLOG_INFO, "set param %u to value 0x%x completed successfully\n",
+			cmd, arg);
+
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_param_scs_set);
+
+static int
+ieee80211_param_scs_get(struct net_device *dev, uint16_t cmd, uint32_t *value)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (cmd >= IEEE80211_SCS_SET_MAX) {
+		printk(KERN_WARNING "%s: SCS: invalid config cmd %u\n",
+				dev->name, cmd);
+		return -1;
+	}
+
+	SCSDBG(SCSLOG_INFO, "get param %u\n", cmd);
+
+	switch (cmd) {
+	case 0: /* compatible with iwpriv ifname scs_get */
+	case IEEE80211_SCS_SET_ENABLE:
+		*value = ic->ic_scs.scs_enable;
+		break;
+	case IEEE80211_SCS_SET_SAMPLE_DWELL_TIME:
+		*value = ic->ic_scs.scs_smpl_dwell_time;
+		break;
+	case IEEE80211_SCS_SET_SAMPLE_INTERVAL:
+		*value = ic->ic_scs.scs_sample_intv;
+		break;
+	default:
+		SCSDBG(SCSLOG_INFO, "get param %u not supported\n", cmd);
+		return -1;
+	}
+
+	SCSDBG(SCSLOG_INFO, "get param %u: value 0x%x completed successfully\n",
+			cmd, *value);
+
+	return 0;
+}
+
+static int
+ieee80211_scs_get_currchan_rpt(struct ieee80211com *ic, struct ieee80211req_scs *req, uint32_t *reason)
+{
+	struct ieee80211req_scs_currchan_rpt rpt;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct qtn_scs_info *scs_info_read = NULL;
+	struct ieee80211vap *vap;
+
+	if (!ic->ic_scs.scs_stats_on) {
+		*reason = IEEE80211REQ_SCS_RESULT_SCS_DISABLED;
+		return -EINVAL;
+	}
+
+	/* CCA information cannot match a certain channel under scan state */
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_state == IEEE80211_S_RUN) {
+			break;
+		}
+	}
+	if (vap == NULL) {
+		*reason = IEEE80211REQ_SCS_RESULT_NO_VAP_RUNNING;
+		return -EINVAL;
+	}
+
+	/* Copy scs info into a local structure so MuC can continue to fill it in */
+	scs_info_read = kmalloc(sizeof(*scs_info_read), GFP_KERNEL);
+	if (!scs_info_read) {
+		SCSDBG(SCSLOG_NOTICE, "SCS info allocation failed\n");
+		return -ENOMEM;
+	}
+	memcpy((void *)scs_info_read, &sp->scs_info_lhost->scs_info[sp->scs_info_lhost->valid_index],
+			sizeof(*scs_info_read));
+	if (!scs_info_read->cca_try) {
+		*reason = IEEE80211REQ_SCS_RESULT_TMP_UNAVAILABLE;
+		kfree(scs_info_read);
+		return -EAGAIN;
+	}
+
+	ieee80211_scs_scale_cochan_data(ic, scs_info_read);
+
+	memset(&rpt, 0x0, sizeof(struct ieee80211req_scs_currchan_rpt));
+
+	rpt.iscr_curchan = ic->ic_curchan->ic_ieee;
+	rpt.iscr_cca_try = scs_info_read->cca_try;
+	rpt.iscr_cca_idle = scs_info_read->cca_idle;
+	rpt.iscr_cca_busy = scs_info_read->cca_busy;
+	rpt.iscr_cca_intf = scs_info_read->cca_interference;
+	rpt.iscr_cca_tx = scs_info_read->cca_tx;
+	rpt.iscr_tx_ms = scs_info_read->tx_usecs / 1000;
+	rpt.iscr_rx_ms = scs_info_read->rx_usecs / 1000;
+	rpt.iscr_pmbl = (ic->ic_scs.scs_sp_err_smthed * ic->ic_scs.scs_sp_wf +
+			ic->ic_scs.scs_lp_err_smthed * ic->ic_scs.scs_lp_wf) / 100;
+
+	if (copy_to_user(req->is_data, &rpt, req->is_data_len)) {
+		SCSDBG(SCSLOG_NOTICE, "copy_to_user data failed with op=0x%x\n", req->is_op);
+		kfree(scs_info_read);
+		return -EIO;
+	}
+
+	kfree(scs_info_read);
+	return 0;
+}
+
+static int
+ieee80211_scs_get_ranking_rpt(struct ieee80211com *ic, struct ieee80211req_scs *req, uint32_t *reason)
+{
+	static struct ieee80211req_scs_ranking_rpt rpt;
+	struct ieee80211req_scs_ranking_rpt_chan *chan_rpt;
+	int i;
+	int num = 0;
+	struct ieee80211_channel *chan;
+	struct ap_state *as;
+
+	if (!ic->ic_scs.scs_stats_on) {
+		*reason = IEEE80211REQ_SCS_RESULT_SCS_DISABLED;
+		return -EINVAL;
+	}
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP) {
+		*reason = IEEE80211REQ_SCS_RESULT_APMODE_ONLY;
+		return -EINVAL;
+	}
+
+	as = ic->ic_scan->ss_scs_priv;
+
+	memset(&rpt, 0x0, sizeof(struct ieee80211req_scs_ranking_rpt));
+
+	/* the ranking table */
+	for (i = 1; i < IEEE80211_CHAN_MAX; i++) {
+		chan = ieee80211_find_channel_by_ieee(ic, i);
+		if (!is_ieee80211_chan_valid(chan)) {
+			continue;
+		}
+
+		chan_rpt = &rpt.isr_chans[num];
+		chan_rpt->isrc_chan = i;
+		chan_rpt->isrc_dfs = !!(chan->ic_flags & IEEE80211_CHAN_DFS);
+		chan_rpt->isrc_txpwr = chan->ic_maxpower;
+		chan_rpt->isrc_numbeacons = as->as_numbeacons[i];
+		chan_rpt->isrc_cca_intf = (as->as_cca_intf[i] == SCS_CCA_INTF_INVALID) ? 0 : as->as_cca_intf[i];
+		chan_rpt->isrc_metric = as->as_chanmetric[i];
+		chan_rpt->isrc_metric_age = (jiffies - as->as_chanmetric_timestamp[i]) / HZ;
+		chan_rpt->isrc_pmbl_ap = as->as_pmbl_err_ap[i];
+		chan_rpt->isrc_pmbl_sta = as->as_pmbl_err_sta[i];
+		chan_rpt->isrc_times = ic->ic_chan_occupy_record.times[i];
+		chan_rpt->isrc_duration = ic->ic_chan_occupy_record.duration[i];
+		chan_rpt->isrc_chan_avail_status = ic->ic_chan_availability_status[chan->ic_ieee];
+		if (i == ic->ic_chan_occupy_record.cur_chan) {
+			chan_rpt->isrc_duration += (jiffies - INITIAL_JIFFIES) / HZ -
+					ic->ic_chan_occupy_record.occupy_start;
+		}
+
+		num++;
+		if (num >= IEEE80211REQ_SCS_REPORT_CHAN_NUM)
+			break;
+	}
+	rpt.isr_num = num;
+
+	if (copy_to_user(req->is_data, &rpt, req->is_data_len)) {
+		SCSDBG(SCSLOG_NOTICE, "copy_to_user data failed with op=0x%x\n", req->is_op);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_scs_get_interference_rpt(struct ieee80211com *ic, struct ieee80211req_scs *req, uint32_t *reason)
+{
+	static struct ieee80211req_scs_interference_rpt rpt;
+	struct ieee80211req_scs_interference_rpt_chan *chan_rpt;
+	struct ieee80211_channel *chan;
+	struct ap_state *as;
+	int i;
+	int num = 0;
+	int cur_bw = ieee80211_get_bw(ic);
+
+	if (!ic->ic_scs.scs_stats_on) {
+		*reason = IEEE80211REQ_SCS_RESULT_SCS_DISABLED;
+		return -EINVAL;
+	}
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP) {
+		*reason = IEEE80211REQ_SCS_RESULT_APMODE_ONLY;
+		return -EINVAL;
+	}
+
+	as = ic->ic_scan->ss_scs_priv;
+
+	memset(&rpt, 0x0, sizeof(rpt));
+
+	/* the ranking table */
+	for (i = 1; i < IEEE80211_CHAN_MAX; i++) {
+		chan = ieee80211_find_channel_by_ieee(ic, i);
+		if (!is_ieee80211_chan_valid(chan))
+			continue;
+
+		chan_rpt = &rpt.isr_chans[num];
+		chan_rpt->isrc_chan = i;
+
+		chan_rpt->isrc_cca_intf_20 = as->as_cca_intf_pri[i];
+		if (cur_bw < BW_HT40) {
+			chan_rpt->isrc_cca_intf_40 = SCS_CCA_INTF_INVALID;
+		} else {
+			chan_rpt->isrc_cca_intf_40 = as->as_cca_intf_pri[i] +
+						as->as_cca_intf_sec[i];
+		}
+		if (cur_bw < BW_HT80) {
+			chan_rpt->isrc_cca_intf_80 = SCS_CCA_INTF_INVALID;
+		} else {
+			chan_rpt->isrc_cca_intf_80 = as->as_cca_intf_pri[i] +
+						as->as_cca_intf_sec[i] +
+						as->as_cca_intf_sec40[i];
+		}
+
+		num++;
+		if (num >= IEEE80211REQ_SCS_REPORT_CHAN_NUM)
+			break;
+	}
+	rpt.isr_num = num;
+
+	if (copy_to_user(req->is_data, &rpt, req->is_data_len) != 0) {
+		SCSDBG(SCSLOG_NOTICE, "copy_to_user data failed with op=0x%x\n", req->is_op);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_scs_get_score_rpt(struct ieee80211com *ic, struct ieee80211req_scs *req, uint32_t *reason)
+{
+	static struct ieee80211req_scs_score_rpt rpt;
+	struct ieee80211req_scs_score_rpt_chan *chan_rpt;
+	struct ieee80211_channel *chan;
+	int32_t chan_metric[IEEE80211REQ_SCS_REPORT_CHAN_NUM];
+	int32_t max_metric = -1;
+	int32_t min_metric = -1;
+	int32_t max_diff;
+	int iter;
+	int num = 0;
+	uint8_t rate_ratio;
+
+	if (!ic->ic_scs.scs_stats_on) {
+		*reason = IEEE80211REQ_SCS_RESULT_SCS_DISABLED;
+		return -EINVAL;
+	}
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP) {
+		*reason = IEEE80211REQ_SCS_RESULT_APMODE_ONLY;
+		return -EINVAL;
+	}
+
+	if (ic->ic_scan == NULL ||
+			ic->ic_scan->ss_scs_priv == NULL ||
+			ic->ic_curchan == IEEE80211_CHAN_ANYC) {
+		*reason = IEEE80211REQ_SCS_RESULT_TMP_UNAVAILABLE;
+		return -EINVAL;
+	}
+
+	memset(&rpt, 0x0, sizeof(struct ieee80211req_scs_score_rpt));
+
+	ieee80211_scs_aging(ic, ic->ic_scs.scs_thrshld_aging_nor);
+	rate_ratio = ieee80211_scs_calc_rate_ratio(ic);
+
+	for (iter = 0; iter < ic->ic_nchans; iter++) {
+		chan = &ic->ic_channels[iter];
+		if (!isset(ic->ic_chan_active, chan->ic_ieee) ||
+				!ieee80211_chan_allowed_in_band(ic, chan,
+								ic->ic_opmode)) {
+			continue;
+		}
+		chan_rpt = &rpt.isr_chans[num];
+		chan_rpt->isrc_chan = chan->ic_ieee;
+		chan_rpt->isrc_score = 0;
+
+		ieee80211_scs_get_chan_metric(ic, chan, rate_ratio,
+				&chan_metric[num], NULL, IEEE80211_SCS_NA_CC);
+		if (chan_metric[num] >= 0 &&
+				chan_metric[num] < SCS_MAX_RAW_CHAN_METRIC) {
+			if (max_metric == -1 ||
+					chan_metric[num] > max_metric) {
+				max_metric = chan_metric[num];
+			}
+			if (min_metric == -1 ||
+					chan_metric[num] < min_metric) {
+				min_metric = chan_metric[num];
+			}
+		}
+
+		num++;
+		if (num >= IEEE80211REQ_SCS_REPORT_CHAN_NUM) {
+			break;
+		}
+	}
+	if (num == 0) {
+		*reason = IEEE80211REQ_SCS_RESULT_TMP_UNAVAILABLE;
+		return -EINVAL;
+	}
+
+	rpt.isr_num = num;
+
+	/* For the scoring, the algorithm as below:
+	 *   1. For the channel which has the minimum valid metric, the score is 100
+	 *   2. For the channel which has the maximum valid metric, the score is 0
+	 *   3. For other channels that have valid metrics, the score is
+	 *         100 * (max_valid_metric - metric) / (max_valid_metric - min_valid_metric)
+	 *   4. For invalid metric, if it is larger than maximum valid metric, the score is 0;
+	 *      and if it is less than minimum valid metric - it may result from too large
+	 *      power difference from the current channel, the score is 100.
+	 */
+	if (max_metric != -1) {
+		max_diff = max_metric - min_metric;
+		for (iter = 0; iter < num; iter++) {
+			chan_rpt = &rpt.isr_chans[iter];
+			if (chan_metric[iter] > max_metric) {
+				chan_rpt->isrc_score = 0;
+			} else if (chan_metric[iter] < min_metric){
+				chan_rpt->isrc_score = 100;
+			} else {
+				if (max_diff == 0) {
+					chan_rpt->isrc_score = 100;
+				} else {
+					chan_rpt->isrc_score =
+							100 * (max_metric - chan_metric[iter]) / max_diff;
+				}
+			}
+		}
+	}
+
+
+	if (copy_to_user(req->is_data, &rpt, req->is_data_len)) {
+		SCSDBG(SCSLOG_NOTICE, "copy_to_user data failed with op=0x%x\n", req->is_op);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_scs_get_init_ranking_rpt(struct ieee80211com *ic, struct ieee80211req_scs *req, uint32_t *reason)
+{
+	static struct ieee80211req_scs_ranking_rpt rpt;
+	struct ieee80211req_scs_ranking_rpt_chan *chan_rpt;
+	int i;
+	int num = 0;
+	struct ieee80211_channel *chan;
+	struct ap_state *as;
+	struct ieee80211vap *vap;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP) {
+		*reason = IEEE80211REQ_SCS_RESULT_APMODE_ONLY;
+		return -EINVAL;
+	}
+
+	as = ic->ic_scan->ss_priv;
+	if (as == NULL) {
+		*reason = IEEE80211REQ_SCS_RESULT_AUTOCHAN_DISABLED;
+		return -EINVAL;
+	}
+
+	/* when in auto channel scanning */
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_state == IEEE80211_S_RUN) {
+			break;
+		}
+	}
+	if (vap == NULL) {
+		*reason = IEEE80211REQ_SCS_RESULT_TMP_UNAVAILABLE;
+		return -EAGAIN;
+	}
+
+	memset(&rpt, 0x0, sizeof(struct ieee80211req_scs_ranking_rpt));
+
+	/* the ranking table */
+	for (i = 1; i < IEEE80211_CHAN_MAX; i++) {
+		chan = ieee80211_find_channel_by_ieee(ic, i);
+		if (chan == NULL) {
+			continue;
+		}
+
+		chan_rpt = &rpt.isr_chans[num];
+		chan_rpt->isrc_chan = i;
+		chan_rpt->isrc_dfs = !!(chan->ic_flags & IEEE80211_CHAN_DFS);
+		chan_rpt->isrc_txpwr = chan->ic_maxpower;
+		chan_rpt->isrc_numbeacons = as->as_numbeacons[i];
+		chan_rpt->isrc_metric = as->as_chanmetric[i];
+		chan_rpt->isrc_cci = as->as_cci[i];
+		chan_rpt->isrc_aci = as->as_aci[i];
+
+		num++;
+		if (num >= IEEE80211REQ_SCS_REPORT_CHAN_NUM)
+			break;
+	}
+	rpt.isr_num = num;
+
+	if (copy_to_user(req->is_data, &rpt, req->is_data_len)) {
+		SCSDBG(SCSLOG_NOTICE, "copy_to_user data failed with op=0x%x\n", req->is_op);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_scs_get_param_rpt(struct ieee80211com *ic, struct ieee80211req_scs *req, uint32_t *reason)
+{
+	int retval = 0;
+	uint32_t i, len;
+	struct ieee80211req_scs_param_rpt *rpt;
+	struct ieee80211_scs *scs = &(ic->ic_scs);
+
+	len = sizeof(*rpt)*SCS_PARAM_MAX;
+	rpt = (struct ieee80211req_scs_param_rpt *)kmalloc(len, GFP_KERNEL);
+	if (rpt == NULL) {
+		retval = -EIO;
+		goto ready_to_return;
+	}
+
+	memset(rpt, 0, len);
+
+	rpt[SCS_SMPL_DWELL_TIME].cfg_param = scs->scs_smpl_dwell_time;
+	rpt[SCS_SAMPLE_INTV].cfg_param = scs->scs_sample_intv;
+	rpt[SCS_SAMPLE_TYPE].cfg_param = scs->scs_sample_type;
+	rpt[SCS_THRSHLD_SMPL_PKTNUM].cfg_param = scs->scs_thrshld_smpl_pktnum;
+	rpt[SCS_THRSHLD_SMPL_AIRTIME].cfg_param = scs->scs_thrshld_smpl_airtime;
+
+	rpt[SCS_THRSHLD_ATTEN_INC].cfg_param = scs->scs_thrshld_atten_inc;
+	rpt[SCS_THRSHLD_DFS_REENTRY].cfg_param = scs->scs_thrshld_dfs_reentry;
+	rpt[SCS_THRSHLD_DFS_REENTRY_MINRATE].cfg_param = scs->scs_thrshld_dfs_reentry_minrate;
+	rpt[SCS_THRSHLD_DFS_REENTRY_INTF].cfg_param = scs->scs_thrshld_dfs_reentry_intf;
+	rpt[SCS_THRSHLD_LOADED].cfg_param = scs->scs_thrshld_loaded;
+	rpt[SCS_THRSHLD_AGING_NOR].cfg_param = scs->scs_thrshld_aging_nor;
+	rpt[SCS_THRSHLD_AGING_DFSREENT].cfg_param = scs->scs_thrshld_aging_dfsreent;
+
+	rpt[SCS_ENABLE].cfg_param = (uint32_t)(scs->scs_enable);
+	rpt[SCS_DEBUG_ENABLE].cfg_param = (uint32_t)(scs->scs_debug_enable);
+	rpt[SCS_SMPL_ENABLE].cfg_param = (uint32_t)(scs->scs_smpl_enable);
+	rpt[SCS_REPORT_ONLY].cfg_param = (uint32_t)(scs->scs_report_only);
+
+	rpt[SCS_CCA_IDLE_THRSHLD].cfg_param = scs->scs_cca_idle_thrshld;
+	rpt[SCS_CCA_INTF_HI_THRSHLD].cfg_param = scs->scs_cca_intf_hi_thrshld;
+	rpt[SCS_CCA_INTF_LO_THRSHLD].cfg_param = scs->scs_cca_intf_lo_thrshld;
+	rpt[SCS_CCA_INTF_RATIO].cfg_param = scs->scs_cca_intf_ratio;
+	rpt[SCS_CCA_INTF_DFS_MARGIN].cfg_param = scs->scs_cca_intf_dfs_margin;
+	rpt[SCS_PMBL_ERR_THRSHLD].cfg_param = scs->scs_pmbl_err_thrshld;
+	rpt[SCS_CCA_SAMPLE_DUR].cfg_param = scs->scs_cca_sample_dur;
+	rpt[SCS_CCA_INTF_SMTH_NOXP].cfg_param = (uint32_t)(scs->scs_cca_intf_smth_fctr[0]);
+	rpt[SCS_CCA_INTF_SMTH_XPED].cfg_param = (uint32_t)(scs->scs_cca_intf_smth_fctr[1]);
+	rpt[SCS_RSSI_SMTH_UP].cfg_param = (uint32_t)(scs->scs_rssi_smth_fctr[0]);
+	rpt[SCS_RSSI_SMTH_DOWN].cfg_param = (uint32_t)(scs->scs_rssi_smth_fctr[1]);
+
+	rpt[SCS_CHAN_MTRC_MRGN].cfg_param = (uint32_t)(scs->scs_chan_mtrc_mrgn);
+	rpt[SCS_LEAVE_DFS_CHAN_MTRC_MRGN].cfg_param = (uint32_t)(scs->scs_leavedfs_chan_mtrc_mrgn);
+
+	rpt[SCS_ATTEN_ADJUST].signed_param_flag  = 1;
+	rpt[SCS_ATTEN_ADJUST].cfg_param = (uint32_t)(scs->scs_atten_adjust);
+	rpt[SCS_ATTEN_SW_ENABLE].cfg_param = (uint32_t)(scs->scs_atten_sw_enable);
+
+	rpt[SCS_PMBL_ERR_SMTH_FCTR].cfg_param = scs->scs_pmbl_err_smth_fctr;
+	rpt[SCS_PMBL_ERR_RANGE].cfg_param = scs->scs_pmbl_err_range;
+	rpt[SCS_PMBL_ERR_MAPPED_INTF_RANGE].cfg_param = scs->scs_pmbl_err_mapped_intf_range;
+	rpt[SCS_SP_WF].cfg_param = scs->scs_sp_wf;
+	rpt[SCS_LP_WF].cfg_param = scs->scs_lp_wf;
+	rpt[SCS_PMP_RPT_CCA_SMTH_FCTR].cfg_param = (uint32_t)(scs->scs_pmp_rpt_cca_smth_fctr);
+	rpt[SCS_PMP_RX_TIME_SMTH_FCTR].cfg_param = (uint32_t)(scs->scs_pmp_rx_time_smth_fctr);
+	rpt[SCS_PMP_TX_TIME_SMTH_FCTR].cfg_param = (uint32_t)(scs->scs_pmp_tx_time_smth_fctr);
+	rpt[SCS_PMP_STATS_STABLE_PERCENT].cfg_param = (uint32_t)(scs->scs_pmp_stats_stable_percent);
+	rpt[SCS_PMP_STATS_STABLE_RANGE].cfg_param = (uint32_t)(scs->scs_pmp_stats_stable_range);
+	rpt[SCS_PMP_STATS_CLEAR_INTERVAL].cfg_param = (uint32_t)(scs->scs_pmp_stats_clear_interval);
+	rpt[SCS_AS_RX_TIME_SMTH_FCTR].cfg_param = (uint32_t)(scs->scs_as_rx_time_smth_fctr);
+	rpt[SCS_AS_TX_TIME_SMTH_FCTR].cfg_param = (uint32_t)(scs->scs_as_tx_time_smth_fctr);
+	rpt[SCS_CCA_IDLE_SMTH_FCTR].cfg_param = (uint32_t)(scs->scs_cca_idle_smth_fctr);
+	rpt[SCS_CCA_THRESHOD_TYPE].cfg_param = (uint32_t)(scs->scs_cca_threshold_type);
+
+	for (i = 0; i < SCS_MAX_TXTIME_COMP_INDEX; i++) {
+		rpt[SCS_TX_TIME_COMPENSTATION_START+i].cfg_param = tx_time_compenstation[i];
+	}
+	for (i = 0; i < SCS_MAX_RXTIME_COMP_INDEX; i++) {
+		rpt[SCS_RX_TIME_COMPENSTATION_START+i].cfg_param = rx_time_compenstation[i];
+	}
+	for (i = 0; i < SCS_MAX_TDLSTIME_COMP_INDEX; i++) {
+		rpt[SCS_TDLS_TIME_COMPENSTATION_START+i].cfg_param = tdls_time_compenstation[i];
+	}
+
+	rpt[SCS_BURST_ENABLE].cfg_param = scs->scs_burst_enable;
+	rpt[SCS_BURST_WINDOW].cfg_param = scs->scs_burst_window / 60;
+	rpt[SCS_BURST_THRESH].cfg_param = scs->scs_burst_thresh;
+	rpt[SCS_BURST_PAUSE_TIME].cfg_param = scs->scs_burst_pause_time / 60;
+	rpt[SCS_BURST_FORCE_SWITCH].cfg_param = scs->scs_burst_force_switch;
+
+	if (copy_to_user(req->is_data, rpt, MIN(req->is_data_len, len))) {
+		SCSDBG(SCSLOG_NOTICE, "copy_to_user data failed with op=0x%x\n", req->is_op);
+		retval = -EIO;
+		goto ready_to_return;
+	}
+
+ready_to_return:
+	if (rpt != NULL) {
+		kfree(rpt);
+		rpt = NULL;
+	}
+	return retval;
+}
+
+/* WAR for bug16636, supposed to be removed after cca threshold re-tuned */
+void ieee80211_scs_adjust_cca_threshold(struct ieee80211com *ic)
+{
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+	uint32_t value = IEEE80211_SCS_SET_CCA_THRESHOLD_TYPE << IEEE80211_SCS_COMMAND_S;
+
+	if (ieee80211_get_type_of_neighborhood(ic) == IEEE80211_NEIGHBORHOOD_TYPE_VERY_DENSE &&
+				ic->ic_ver_hw == HARDWARE_REVISION_TOPAZ_A2) {
+		IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN, "%s: neighborhood is very dense, "
+					"switch cca thresholds to less sensitive ones\n", __func__);
+		ieee80211_param_to_qdrv(ss->ss_vap, IEEE80211_PARAM_CCA_FIXED, 0, NULL, 0);
+		if (ieee80211_param_scs_set(ss->ss_vap->iv_dev, ss->ss_vap, value) == 0) {
+			ieee80211_param_to_qdrv(ss->ss_vap, IEEE80211_PARAM_SCS, value, NULL, 0);
+			ieee80211_param_to_qdrv(ss->ss_vap, IEEE80211_PARAM_CCA_FIXED, 1, NULL, 0);
+		}
+	}
+}
+
+static int
+ieee80211_subioctl_scs(struct net_device *dev, struct ieee80211req_scs __user* ps)
+{
+	int retval = 0;
+	struct ieee80211vap	*vap = netdev_priv(dev);
+	struct ieee80211com	*ic  = vap->iv_ic;
+	struct ieee80211req_scs req;
+	uint32_t reason = IEEE80211REQ_SCS_RESULT_OK;
+
+	if (!ps) {
+		SCSDBG(SCSLOG_NOTICE, "%s: NULL pointer for user request\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	if (copy_from_user(&req, ps, sizeof(struct ieee80211req_scs))) {
+		SCSDBG(SCSLOG_NOTICE, "%s: copy_from_user failed\n", __FUNCTION__);
+		return -EIO;
+	}
+
+	if ((req.is_op & IEEE80211REQ_SCS_FLAG_GET) && (!req.is_data)) {
+		SCSDBG(SCSLOG_NOTICE, "%s: NULL pointer for GET operation\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	if (!req.is_status) {
+		SCSDBG(SCSLOG_NOTICE, "%s: NULL pointer for reason field\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	SCSDBG(SCSLOG_INFO, "%s: op=0x%x\n", __FUNCTION__, req.is_op);
+	switch (req.is_op) {
+	case IEEE80211REQ_SCS_GET_CURRCHAN_RPT:
+		retval = ieee80211_scs_get_currchan_rpt(ic, &req, &reason);
+		break;
+	case IEEE80211REQ_SCS_GET_RANKING_RPT:
+		retval = ieee80211_scs_get_ranking_rpt(ic, &req, &reason);
+		break;
+	case IEEE80211REQ_SCS_GET_INTERFERENCE_RPT:
+		retval = ieee80211_scs_get_interference_rpt(ic, &req, &reason);
+		break;
+	case IEEE80211REQ_SCS_GET_INIT_RANKING_RPT:
+		retval = ieee80211_scs_get_init_ranking_rpt(ic, &req, &reason);
+		break;
+	case IEEE80211REQ_SCS_GET_PARAM_RPT:
+		retval = ieee80211_scs_get_param_rpt(ic, &req, &reason);
+		break;
+	case IEEE80211REQ_SCS_GET_SCORE_RPT:
+		retval = ieee80211_scs_get_score_rpt(ic, &req, &reason);
+		break;
+	default:
+		SCSDBG(SCSLOG_NOTICE, "unknown ioctl op=0x%x\n", req.is_op);
+		return -EINVAL;
+	}
+
+	if (copy_to_user(req.is_status, &reason, sizeof(*req.is_status))) {
+		SCSDBG(SCSLOG_NOTICE, "copy_to_user reason failed with op=0x%x\n", req.is_op);
+		return -EIO;
+	}
+
+	return retval;
+}
+
+#endif /* QSCS_ENABLED */
+
+static int
+ieee80211_subioctl_wait_scan_complete(struct net_device *dev, char __user* p_timeout)
+{
+	int retval = 0;
+	struct ieee80211vap	*vap = netdev_priv(dev);
+	struct ieee80211com	*ic  = vap->iv_ic;
+	uint32_t timeout;
+
+	if (copy_from_user(&timeout, p_timeout, sizeof(timeout))) {
+		return -EIO;
+	};
+
+	if (((ic->ic_flags & IEEE80211_F_SCAN) == 0)
+#ifdef QTN_BG_SCAN
+			&& ((ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) == 0)
+#endif /* QTN_BG_SCAN */
+			) {
+		return -1;
+	}
+
+	retval = wait_event_interruptible_timeout(ic->ic_scan_comp,
+			(((ic->ic_flags & IEEE80211_F_SCAN) == 0)
+#ifdef QTN_BG_SCAN
+					&& ((ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) == 0)
+#endif /* QTN_BG_SCAN */
+					), timeout * HZ);
+
+	return retval;
+}
+
+static uint32_t
+maxrate(const struct ieee80211_scan_entry *se)
+{
+	int j, chan_mode = 0;
+	uint32_t max = 0;
+	uint8_t sgi = 0;
+	int k, r;
+	u_int16_t mask;
+	struct ieee80211_ie_htcap *htcap;
+	struct ieee80211_ie_vhtcap *vhtcap;
+
+	htcap = (struct ieee80211_ie_htcap *)se->se_htcap_ie;
+	vhtcap = (struct ieee80211_ie_vhtcap *)se->se_vhtcap_ie;
+	if (vhtcap) {
+		u_int16_t mcsmap = 0;
+		r = 0;
+		/* 80+80 or 160 Mhz */
+		if (IEEE80211_VHTCAP_GET_CHANWIDTH(vhtcap)) {
+			chan_mode = 1;
+			sgi = IEEE80211_VHTCAP_GET_SGI_160MHZ(vhtcap);
+		} else {
+			chan_mode = 0;
+			sgi = IEEE80211_VHTCAP_GET_SGI_80MHZ(vhtcap);
+		}
+		mask = 0xc000;
+		mcsmap = (u_int16_t)IEEE80211_VHTCAP_GET_TX_MCS_NSS(vhtcap);
+		for (k = 8; k > 0; k--) {
+			if ((mcsmap & mask) != mask) {
+				uint32_t rate = 0;
+				int val = ((mcsmap & mask)>>((k-1) * 2));
+				r = (val == 2) ? 9: (val == 1) ? 8 : 7;
+				rate = ((uint32_t)ieee80211_mcs2rate(r, chan_mode, sgi, 1) * (1000000 / 2)) * k;
+				if (max < rate)
+					max = rate;
+				break;
+			}
+			mask = mask >> 2;
+		}
+		return max;
+	} else if (htcap) {
+		r = 0;
+		if (htcap->hc_cap[0] & IEEE80211_HTCAP_C_CHWIDTH40) {
+			chan_mode = 1;
+			sgi = htcap->hc_cap[0] & IEEE80211_HTCAP_C_SHORTGI40 ? 1 : 0;
+		} else {
+			sgi = htcap->hc_cap[0] & IEEE80211_HTCAP_C_SHORTGI20 ? 1 : 0;
+		}
+		for (j = IEEE80211_HT_MCSSET_20_40_NSS1; j <= IEEE80211_HT_MCSSET_20_40_NSS4; j++) {
+			mask = 1;
+			for (k = 0; k < 8; k++, r++) {
+				if (htcap->hc_mcsset[j] & mask) {
+					/* Copy HT rates */
+					int rate = ieee80211_mcs2rate(r, chan_mode, sgi, 0) * (1000000 / 2);
+					if (max < rate) max = rate;
+				}
+				mask = mask << 1;
+			}
+		}
+		return max;
+	}
+
+	for (j = 0; j < se->se_rates[1]; j++) {
+		int r = se->se_rates[2 + j] & IEEE80211_RATE_VAL;
+		if (r != 0) {
+			r = r * (1000000 / 2);
+			if (max < r) max = r;
+		}
+	}
+	for (j = 0; j < se->se_xrates[1]; j++) {
+		int r = se->se_xrates[2+j] & IEEE80211_RATE_VAL;
+		if (r != 0) {
+			r = r * (1000000 / 2);
+			if (max < r) max = r;
+		}
+	}
+	return max;
+}
+
+static int
+ieee80211_get_scan_entry_bitrates_legacy(const struct ieee80211_scan_entry *se,
+	uint32_t *rates, uint32_t max_rates, int basic)
+{
+	int nrates = 0;
+	int i;
+
+	/* rate */
+	for (i = 0; (i < se->se_rates[1]) && (nrates < max_rates); i++) {
+		if ((!basic) || (se->se_rates[2 + i] & IEEE80211_RATE_BASIC)) {
+			rates[nrates++] = se->se_rates[2 + i] & IEEE80211_RATE_VAL;
+		}
+	}
+
+	/* extended rates */
+	for (i = 0; (i < se->se_xrates[1]) && (nrates < max_rates); i++) {
+		if ((!basic) || (se->se_xrates[2 + i] & IEEE80211_RATE_BASIC)) {
+			rates[nrates++] = se->se_xrates[2 + i] & IEEE80211_RATE_VAL;
+		}
+	}
+
+	return nrates;
+}
+
+static int
+ieee80211_get_scan_entry_bitrates_ht(const struct ieee80211_scan_entry *se,
+	uint32_t *rates, uint32_t max_rates, int basic)
+{
+	int nrates = 0;
+	int chan_mode;
+	uint8_t sgi = 0;
+	uint8_t *ht_mcsset = NULL;
+	int max_mcs;
+	int mcs;
+	struct ieee80211_ie_htcap *htcap;
+	struct ieee80211_ie_htinfo *htinfo;
+
+	htcap = (struct ieee80211_ie_htcap *)se->se_htcap_ie;
+	htinfo = (struct ieee80211_ie_htinfo *)se->se_htinfo_ie;
+
+	/* HT rates */
+	if (htcap && (htinfo || (!basic))) {
+		if (htcap->hc_cap[0] & IEEE80211_HTCAP_C_CHWIDTH40) {
+			chan_mode = 1;
+			sgi = htcap->hc_cap[0] & IEEE80211_HTCAP_C_SHORTGI40 ? 1 : 0;
+		} else {
+			chan_mode = 0;
+			sgi = htcap->hc_cap[0] & IEEE80211_HTCAP_C_SHORTGI20 ? 1 : 0;
+		}
+
+		ht_mcsset = basic ? htinfo->hi_basicmcsset : htcap->hc_mcsset;
+		max_mcs = (IEEE80211_HT_MCSSET_20_40_NSS4 + 1) * 8 - 1;
+		for (mcs = 0; mcs <= max_mcs; mcs++) {
+			if (nrates >= max_rates) {
+				break;
+			}
+			if (isset(ht_mcsset, mcs)) {
+				rates[nrates++] = ieee80211_mcs2rate(mcs, chan_mode, sgi, 0);
+			}
+		}
+	}
+
+	return nrates;
+}
+
+static int
+ieee80211_get_scan_entry_bitrates_vht(const struct ieee80211_scan_entry *se,
+	uint32_t *rates, uint32_t max_rates, int basic)
+{
+	int nrates = 0;
+	int chan_mode;
+	uint8_t sgi = 0;
+	uint16_t vht_mcsmap;
+	int max_mcs;
+	int mcs, nss;
+	struct ieee80211_ie_vhtcap *vhtcap;
+	struct ieee80211_ie_vhtop *vhtop;
+
+	vhtcap = (struct ieee80211_ie_vhtcap *)se->se_vhtcap_ie;
+	vhtop = (struct ieee80211_ie_vhtop *)se->se_vhtop_ie;
+
+	if (vhtcap && (vhtop || (!basic))) {
+		/* 80+80 or 160 Mhz */
+		if (IEEE80211_VHTCAP_GET_CHANWIDTH(vhtcap)) {
+			chan_mode = 1;
+			sgi = IEEE80211_VHTCAP_GET_SGI_160MHZ(vhtcap);
+		} else {
+			chan_mode = 0;
+			sgi = IEEE80211_VHTCAP_GET_SGI_80MHZ(vhtcap);
+		}
+
+		if (basic) {
+			vht_mcsmap = (uint16_t)IEEE80211_VHTOP_GET_BASIC_MCS_NSS(vhtop);
+		}
+		else {
+			vht_mcsmap = (uint16_t)IEEE80211_VHTCAP_GET_TX_MCS_NSS(vhtcap);
+		}
+
+		/* get max nss supported, only output rates for highest spatial stream */
+		for (nss = IEEE80211_VHT_NSS8; nss >= IEEE80211_VHT_NSS1; nss--) {
+			max_mcs = IEEE80211_VHTCAP_GET_MCS_MAP_ENTRY(vht_mcsmap, (nss - 1));
+			if (max_mcs != IEEE80211_VHT_MCS_NA) {
+				break;
+			}
+		}
+
+		if (nss) {
+			switch (max_mcs) {
+				case IEEE80211_VHT_MCS_0_7:
+					max_mcs = 7;
+					break;
+				case IEEE80211_VHT_MCS_0_8:
+					max_mcs = 8;
+					break;
+				case IEEE80211_VHT_MCS_0_9:
+					max_mcs = 9;
+					break;
+				default:
+					goto out;
+					break;
+			}
+			for (mcs = 0; mcs <= max_mcs; mcs++) {
+				if (nrates >= max_rates) {
+					goto out;
+				}
+				rates[nrates++] = ieee80211_mcs2rate(mcs, chan_mode, sgi, 1) * nss;
+			}
+		}
+	}
+
+out:
+	return nrates;
+}
+
+static int
+ieee80211_get_scan_entry_bitrates_all(const struct ieee80211_scan_entry *se,
+	uint32_t *rates, uint32_t max_rates, int basic)
+{
+	int nrates = 0;
+
+	if ((se == NULL) || (rates == NULL)) {
+		return 0;
+	}
+
+	nrates = ieee80211_get_scan_entry_bitrates_legacy(se, rates, max_rates, basic);
+	nrates += ieee80211_get_scan_entry_bitrates_ht(se, rates + nrates, max_rates - nrates, basic);
+	nrates += ieee80211_get_scan_entry_bitrates_vht(se, rates + nrates, max_rates - nrates, basic);
+
+	return nrates;
+}
+
+static int
+push_scan_results(void *arg, const struct ieee80211_scan_entry *se)
+{
+	struct ap_scan_iter			*piter = (struct ap_scan_iter*)arg;
+	struct ieee80211_per_ap_scan_result	*pap;
+	struct ieee80211_ie_htinfo *htinfo =
+			(struct ieee80211_ie_htinfo *)se->se_htinfo_ie;
+	struct ieee80211_ie_vhtop *vhtop =
+			(struct ieee80211_ie_vhtop *)se->se_vhtop_ie;
+	struct ap_scan_entry *apse;
+	struct sta_entry *stase;
+
+	pap = (struct ieee80211_per_ap_scan_result *)piter->current_env;
+
+	if (piter->current_env >= piter->end_buf)
+		return E2BIG;
+
+	/* check length, set macaddr, ssid, channel, rssi, flags,htcap*/
+	if (piter->current_env + sizeof(*pap) >= piter->end_buf)
+		return E2BIG;
+
+	/* ap mac addr */
+	if (piter->vap->iv_opmode == IEEE80211_M_HOSTAP)
+		IEEE80211_ADDR_COPY(pap->ap_addr_mac, se->se_macaddr);
+	else
+		IEEE80211_ADDR_COPY(pap->ap_addr_mac, se->se_bssid);
+
+	/* ssid */
+	memset(pap->ap_name_ssid, 0, sizeof(pap->ap_name_ssid));
+	memcpy(pap->ap_name_ssid, se->se_ssid + 2, se->se_ssid[1]);
+
+	/* channel ieee */
+	pap->ap_channel_ieee = se->se_chan->ic_ieee;
+
+	/* max bandwidth */
+	pap->ap_max_bw = ieee80211_get_max_ap_bw(se);
+
+	/* rssi */
+	pap->ap_rssi = se->se_rssi;
+
+	/* flags:privacy */
+	pap->ap_flags = !!(se->se_capinfo & IEEE80211_CAPINFO_PRIVACY);
+
+	/* htcap available */
+	pap->ap_htcap = (se->se_htcap_ie != NULL);
+
+	/* vhtcap available */
+	pap->ap_vhtcap = (se->se_vhtcap_ie != NULL);
+
+	pap->ap_basicrates_num = ieee80211_get_scan_entry_bitrates_all(se, pap->ap_basicrates, AP_SCAN_MAX_NUM_RATES, 1);
+	pap->ap_suprates_num = ieee80211_get_scan_entry_bitrates_all(se, pap->ap_suprates, AP_SCAN_MAX_NUM_RATES, 0);
+
+	pap->ap_qhop_role = se->se_ext_role;
+
+	piter->current_env += sizeof(*pap);
+	pap->ap_bestrate = maxrate(se);
+
+	/* check length, copy wpa_ie, wsc_ie and rsn_ie to buffer if exist */
+	pap->ap_num_genies = 0;
+	if (se->se_rsn_ie != NULL) {
+		if (piter->current_env + se->se_rsn_ie[1] + 2 >= piter->end_buf)
+			return E2BIG;
+		memcpy(piter->current_env, se->se_rsn_ie, se->se_rsn_ie[1] + 2);
+		piter->current_env += se->se_rsn_ie[1] + 2;
+		pap->ap_num_genies++;
+	}
+
+	if (se->se_wpa_ie != NULL) {
+		if (piter->current_env + se->se_wpa_ie[1] + 2 >= piter->end_buf)
+			return E2BIG;
+		memcpy(piter->current_env, se->se_wpa_ie, se->se_wpa_ie[1] + 2);
+		piter->current_env += se->se_wpa_ie[1] + 2;
+		pap->ap_num_genies++;
+	}
+
+	if (se->se_wsc_ie != NULL) {
+		if (piter->current_env + se->se_wsc_ie[1] + 2 >= piter->end_buf)
+			return E2BIG;
+		memcpy(piter->current_env, se->se_wsc_ie, se->se_wsc_ie[1] + 2);
+		piter->current_env += se->se_wsc_ie[1] + 2;
+		pap->ap_num_genies++;
+	}
+
+	/* FIXME: use default noise for now */
+	pap->ap_noise = QNT_DEFAULT_NOISE;
+
+	pap->ap_beacon_intval = se->se_intval;
+	pap->ap_dtim_intval = se->se_dtimperiod;
+	pap->ap_nonerp_present = se->se_erp & IEEE80211_ERP_NON_ERP_PRESENT;
+	pap->ap_is_ess = se->se_capinfo & IEEE80211_CAPINFO_ESS;
+
+	if (htinfo)
+		pap->ap_ht_secoffset = IEEE80211_HTINFO_B1_EXT_CHOFFSET(htinfo);
+	else
+		pap->ap_ht_secoffset = IEEE80211_HTINFO_EXTOFFSET_NA;
+
+	if (vhtop && pap->ap_max_bw >= BW_HT80) {
+		pap->ap_chan_center1 = IEEE80211_VHTOP_GET_CENTERFREQ0(vhtop);
+		pap->ap_chan_center2 = IEEE80211_VHTOP_GET_CENTERFREQ1(vhtop);
+	} else {
+		pap->ap_chan_center1 = ieee80211_find_ht_center_chan(piter->vap, se);
+		pap->ap_chan_center2 = 0;
+	}
+
+	if (piter->vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		apse = container_of(se, struct ap_scan_entry, base);
+		pap->ap_last_beacon = (apse->se_lastupdate - INITIAL_JIFFIES) / HZ;
+	} else {
+		stase = container_of(se, struct sta_entry, base);
+		pap->ap_last_beacon = (stase->se_lastupdate - INITIAL_JIFFIES) / HZ;
+	}
+
+	/* keep address 4-byte aligned*/
+	piter->current_env = (char *)(((int)piter->current_env + 3) & (~3));
+
+	piter->ap_counts++;
+	return 0;
+}
+
+static inline int
+ieee80211_set_threshold_of_neighborhood_type(struct ieee80211com *ic, uint32_t type, uint32_t value)
+{
+	if (IEEE80211_NEIGHBORHOOD_TYPE_SPARSE == type)
+		ic->ic_neighbor_cnt_sparse = value;
+	else if (IEEE80211_NEIGHBORHOOD_TYPE_DENSE == type)
+		ic->ic_neighbor_cnt_dense = value;
+	else
+		return 1;
+
+	return 0;
+}
+
+static inline uint32_t
+ieee80211_get_threshold_of_neighborhood_type(struct ieee80211com *ic, uint32_t type)
+{
+	if (IEEE80211_NEIGHBORHOOD_TYPE_SPARSE == type)
+		return ic->ic_neighbor_cnt_sparse;
+	else if (IEEE80211_NEIGHBORHOOD_TYPE_DENSE == type)
+		return ic->ic_neighbor_cnt_dense;
+
+	return 0;
+}
+
+static qfdr_remote_ap_scan_results_hook_t qfdr_remote_ap_scan_results_hook = NULL;
+void ieee80211_register_qfdr_remote_ap_scan_results_hook(qfdr_remote_ap_scan_results_hook_t hook)
+{
+	qfdr_remote_ap_scan_results_hook = hook;
+}
+EXPORT_SYMBOL(ieee80211_register_qfdr_remote_ap_scan_results_hook);
+
+struct qfdr_remote_aplist_rep *qfdr_ap_scan_results_for_remote(struct qfdr_remote_aplist_req *remote_req)
+{
+	struct net_device *dev;
+	struct ieee80211vap *vap;
+	struct ieee80211com *ic;
+	struct qfdr_remote_aplist_rep *rep;
+	char *extra;
+	struct ap_scan_iter iter;
+
+	dev = dev_get_by_name(&init_net, remote_req->dev_name);
+	if (!dev)
+		return NULL;
+	vap = netdev_priv(dev);
+	dev_put(dev);
+	ic = vap->iv_ic;
+
+	rep = kmalloc(remote_req->extra_len + sizeof(struct qfdr_remote_aplist_rep), GFP_KERNEL);
+	if (!rep) {
+		printk(KERN_ERR "%s: Failed to alloc buf.\n", __func__);
+		return NULL;
+	}
+	extra = rep->extra;
+
+	iter.ap_counts = 0;
+	iter.current_env = extra;
+	iter.end_buf = extra + remote_req->extra_len;
+	iter.vap = vap;
+
+	rep->res = ieee80211_scan_iterate(ic, push_scan_results, &iter);
+	rep->type = QFDR_AP_SCAN_RESULT;
+
+	rep->ap_counts = iter.ap_counts;
+	rep->length = iter.current_env - extra;
+	return rep;
+}
+EXPORT_SYMBOL(qfdr_ap_scan_results_for_remote);
+
+static int
+ieee80211_subioctl_ap_scan_results(struct net_device *dev, char __user* data, int32_t len)
+{
+	int retval;
+	int i, r, chan_mode = 0;;
+	uint8_t sgi = 0;
+	char *kdata;
+	struct ieee80211vap			*vap = netdev_priv(dev);
+	struct ieee80211com			*ic  = vap->iv_ic;
+	struct ieee80211_rateset		*rs;
+	struct ap_scan_iter			iter;
+	struct ieee80211_general_ap_scan_result *ge_ap_scan_result;
+	uint32_t offchan;
+	struct ieee80211_scan_state *ss = ic->ic_scan;
+	struct ap_state *as;
+	struct ap_state *as_bak = NULL;
+
+	kdata = kmalloc(len, GFP_KERNEL);
+	if (NULL == kdata)
+		return -ENOMEM;
+
+	if (copy_from_user(&offchan, data, sizeof(offchan)) != 0) {
+		return -EIO;
+	}
+
+	if (offchan > 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	/* get bit rates from ic->ic_sup_rates[ic->ic_des_mode] */
+	ge_ap_scan_result = (struct ieee80211_general_ap_scan_result *)kdata;
+	ge_ap_scan_result->num_ap_results = 0;
+	rs = &ic->ic_sup_rates[ic->ic_des_mode];
+	ge_ap_scan_result->num_bitrates = rs->rs_nrates;
+
+	if (ge_ap_scan_result->num_bitrates > MIN(IEEE80211_RATE_MAXSIZE, AP_SCAN_MAX_NUM_RATES)) {
+		ge_ap_scan_result->num_bitrates = MIN(IEEE80211_RATE_MAXSIZE, AP_SCAN_MAX_NUM_RATES);
+	}
+
+	if (vap->iv_ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) {
+		chan_mode = 1;
+		sgi = vap->iv_ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI40 ? 1 : 0;
+	} else {
+		sgi = vap->iv_ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI20 ? 1 : 0;
+	}
+	for (i = 0; i < ge_ap_scan_result->num_bitrates; i++) {
+		r = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+
+		/* Skip legacy rates */
+		if(i >= (rs->rs_legacy_nrates))
+		{
+			r = ieee80211_mcs2rate(r, chan_mode, sgi, 0);
+		}
+		ge_ap_scan_result->bitrates[i] = (r * 1000000) / 2;
+	}
+
+	/* initialize ap_scan_iter */
+	iter.ap_counts = 0;
+	iter.current_env = kdata + sizeof(*ge_ap_scan_result);
+	iter.end_buf = kdata + len;
+	iter.vap = vap;
+
+	/*
+	 * iterate scan results to push per-ap data into buffer
+	 *
+	 * Don't need do WPA/RSN sort any more since the original scan list
+	 * has been sorted.
+	 */
+	as = (struct ap_state *)ss->ss_scs_priv;
+	if (offchan) {
+		as_bak = ss->ss_priv;
+		ss->ss_priv = as;
+	}
+	retval = ieee80211_scan_iterate(ic, push_scan_results, &iter);
+	if (offchan)
+		ss->ss_priv = as_bak;
+
+	if (retval == 0 && qfdr_remote_ap_scan_results_hook != NULL) {
+		retval = qfdr_remote_ap_scan_results_hook(&iter);
+	}
+
+	ge_ap_scan_result->num_ap_results = iter.ap_counts;
+	if (copy_to_user(data, kdata, iter.current_env - kdata))
+		retval = -EIO;
+
+	if (retval > 0)
+		retval = -retval;
+
+exit:
+	kfree(kdata);
+	return retval;
+}
+
+static int
+ieee80211_ocac_clean_stats(struct ieee80211com *ic, int clean_level)
+{
+	switch (clean_level) {
+	case IEEE80211_OCAC_CLEAN_STATS_STOP:
+		ic->ic_ocac.ocac_counts.clean_stats_stop++;
+		break;
+	case IEEE80211_OCAC_CLEAN_STATS_START:
+		ic->ic_ocac.ocac_counts.clean_stats_start++;
+		break;
+	case IEEE80211_OCAC_CLEAN_STATS_RESET:
+		ic->ic_ocac.ocac_counts.clean_stats_reset++;
+		break;
+	}
+
+	if (clean_level <= IEEE80211_OCAC_CLEAN_STATS_RESET) {
+		ic->ic_ocac.ocac_accum_duration_secs = 0;
+		ic->ic_ocac.ocac_accum_cac_time_ms = 0;
+	}
+
+	return 0;
+}
+
+static struct ieee80211_channel *
+ieee80211_ocac_pick_dfs_channel(struct ieee80211com *ic, int chan_dfs)
+{
+	int chan_ieee;
+	struct ieee80211_channel *chan = NULL;
+
+	ic->ic_ocac.ocac_counts.pick_offchan++;
+	if (chan_dfs) {
+		chan_ieee = chan_dfs;
+	} else if (ic->ic_ocac.ocac_cfg.ocac_params.auto_first_dfs_channel) {
+		chan_ieee = ic->ic_ocac.ocac_cfg.ocac_params.auto_first_dfs_channel;
+		ic->ic_ocac.ocac_cfg.ocac_params.auto_first_dfs_channel = 0;
+	} else {
+		/* Pick a DFS channel on which OCAC will be performed */
+		chan_ieee = ieee80211_scs_pick_channel(ic,
+				(IEEE80211_SCS_PICK_NOT_AVAILABLE_DFS_ONLY | IEEE80211_SCS_PICK_ANYWAY),
+				IEEE80211_SCS_NA_CC);
+	}
+	chan = ieee80211_find_channel_by_ieee(ic, chan_ieee);
+
+	if (chan && (isset(ic->ic_chan_pri_inactive, chan->ic_ieee))) {
+                ic->ic_ocac.ocac_counts.invalid_offchan++;
+		return NULL;
+	}
+	/*
+	 * Select a DFS channel for OCAC, only if CAC is not already done;
+	 * Initial CAC might have cleared the channel already;
+	 */
+	if ((chan && (ic->ic_dfs_chans_available_for_cac(ic, chan) == false))) {
+		ic->ic_ocac.ocac_counts.invalid_offchan++;
+		return NULL;
+	}
+
+	return chan;
+}
+
+static void ieee80211_ocac_set_beacon_interval(struct ieee80211com *ic)
+{
+	if (ic->ic_lintval != ic->ic_ocac.ocac_cfg.ocac_params.beacon_interval &&
+			ic->ic_lintval == ic->ic_lintval_backup) {
+		ieee80211_beacon_interval_set(ic,
+				ic->ic_ocac.ocac_cfg.ocac_params.beacon_interval);
+		ic->ic_ocac.ocac_bcn_intval_set = 1;
+		ic->ic_ocac.ocac_counts.set_bcn_intval++;
+		OCACDBG(OCACLOG_NOTICE, "set beacon interval to %u\n",
+				ic->ic_ocac.ocac_cfg.ocac_params.beacon_interval);
+	}
+}
+
+static void ieee80211_ocac_restore_beacon_interval(struct ieee80211com *ic)
+{
+	if (ic->ic_lintval == ic->ic_ocac.ocac_cfg.ocac_params.beacon_interval &&
+			ic->ic_lintval != ic->ic_lintval_backup &&
+			ic->ic_ocac.ocac_bcn_intval_set) {
+		ieee80211_beacon_interval_set(ic, ic->ic_lintval_backup);
+		ic->ic_ocac.ocac_bcn_intval_set = 0;
+		ic->ic_ocac.ocac_counts.restore_bcn_intval++;
+		OCACDBG(OCACLOG_NOTICE, "restore beacon interval to %u\n",
+				ic->ic_lintval_backup);
+	}
+}
+
+static void ieee80211_ocac_trigger_channel_switch(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *)arg;
+	struct ieee80211_channel *chan = ic->ic_csa_chan;
+
+	/* don't run CAC on new channel */
+	chan->ic_flags |= IEEE80211_CHAN_DFS_OCAC_DONE;
+	ieee80211_finish_csa((unsigned long)ic);
+	chan->ic_flags &= ~IEEE80211_CHAN_DFS_OCAC_DONE;
+
+	ieee80211_ocac_restore_beacon_interval(ic);
+
+	return;
+}
+
+static int
+ieee80211_ocac_check_radar_by_chan_ieee(struct ieee80211com *ic, int chan_ieee)
+{
+	struct ieee80211_channel *chan;
+
+	if (chan_ieee) {
+		chan = ieee80211_find_channel_by_ieee(ic, chan_ieee);
+		if (chan && (chan->ic_flags & IEEE80211_CHAN_RADAR)) {
+			ic->ic_ocac.ocac_counts.csw_fail_radar++;
+			OCACDBG(OCACLOG_NOTICE, "switch channel failed "
+					"because radar detected on channel: %u\n",
+					chan_ieee);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Change channel to DFS channel after off-channel CAC is completed
+ * return:
+ * -1: channel switch failed
+ * 0 : channel switch succeeded.
+ */
+static int
+ieee80211_ocac_change_channel(struct ieee80211com *ic, struct ieee80211_channel *newchan)
+{
+	int ret;
+	uint32_t cur_cca_intf = 0;
+	uint32_t new_cca_intf = 0;
+	int chan2_ieee = 0;
+
+	struct ap_state *as;
+
+	if (ic->ic_ocac.ocac_cfg.ocac_report_only) {
+		ic->ic_ocac.ocac_counts.csw_rpt_only++;
+		OCACDBG(OCACLOG_NOTICE, "didn't switch channel for report only\n");
+		return -1;
+	}
+
+	if (ic->ic_dfs_is_eu_region() == true) {
+		ic->ic_ocac.ocac_counts.no_channel_change_eu++;
+		OCACDBG(OCACLOG_NOTICE, "Setting state of dfs channel as AVAILABLE\n");
+		if (ic->ic_mark_channel_availability_status) {
+			ic->ic_mark_channel_availability_status(ic, ic->ic_ocac.ocac_chan,
+					IEEE80211_CHANNEL_STATUS_AVAILABLE);
+		}
+		return -1;
+	}
+
+	as = ic->ic_scan->ss_scs_priv;
+	if (as->as_cca_intf[ic->ic_curchan->ic_ieee] != SCS_CCA_INTF_INVALID) {
+		cur_cca_intf = 100 * as->as_cca_intf[ic->ic_curchan->ic_ieee]
+						     / IEEE80211_SCS_CCA_INTF_SCALE;
+	}
+	if (as->as_cca_intf[newchan->ic_ieee] != SCS_CCA_INTF_INVALID) {
+		new_cca_intf = 100 * as->as_cca_intf[newchan->ic_ieee]
+						     / IEEE80211_SCS_CCA_INTF_SCALE;
+	}
+
+	if ((new_cca_intf > ic->ic_ocac.ocac_cfg.ocac_params.thresh_cca_intf)
+			&& (new_cca_intf > cur_cca_intf)) {
+		ic->ic_ocac.ocac_counts.csw_fail_intf++;
+		OCACDBG(OCACLOG_NOTICE, "can't switch to channel %u, "
+				"cur_intf: %u, new_intf: %u\n",
+				newchan->ic_ieee, cur_cca_intf, new_cca_intf);
+		return -1;
+	}
+
+	if (ieee80211_get_bw(ic) >= BW_HT40) {
+		chan2_ieee = ieee80211_find_sec_chan(newchan);
+		if (ieee80211_ocac_check_radar_by_chan_ieee(ic, chan2_ieee)) {
+			return -1;
+		}
+	}
+
+	if (ieee80211_get_bw(ic) >= BW_HT80) {
+		chan2_ieee = ieee80211_find_sec40u_chan(newchan);
+		if (ieee80211_ocac_check_radar_by_chan_ieee(ic, chan2_ieee)) {
+			return -1;
+		}
+		chan2_ieee = ieee80211_find_sec40l_chan(newchan);
+		if (ieee80211_ocac_check_radar_by_chan_ieee(ic, chan2_ieee)) {
+			return -1;
+		}
+	}
+
+	ret = ieee80211_enter_csa(ic, newchan, ieee80211_ocac_trigger_channel_switch,
+			IEEE80211_CSW_REASON_OCAC,
+			IEEE80211_DEFAULT_CHANCHANGE_TBTT_COUNT,
+			IEEE80211_CSA_MUST_STOP_TX,
+			IEEE80211_CSA_F_BEACON | IEEE80211_CSA_F_ACTION);
+	if (ret == 0) {
+		ic->ic_ocac.ocac_counts.csw_success++;
+		DFS_S_DBG_QEVT(ic2dev(ic), "DFS_s_radio: CAC completed and start working on "
+			       "channel %u\n", newchan->ic_ieee);
+	} else {
+		ic->ic_ocac.ocac_counts.csw_fail_csa++;
+		OCACDBG(OCACLOG_NOTICE, "switch to channel %u failed\n", newchan->ic_ieee);
+	}
+
+	return ret;
+}
+
+/*
+ *  * Stop off-channel CAC
+ *   */
+static int
+ieee80211_wireless_stop_ocac(struct ieee80211vap *vap)
+{
+        struct ieee80211com *ic = vap->iv_ic;
+
+        if (vap->iv_opmode != IEEE80211_M_HOSTAP) {
+                printk("DFS seamless radio is only supported on APs");
+                return -1;
+        }
+
+        del_timer(&ic->ic_ocac.ocac_timer);
+
+        if (ic->ic_set_ocac(vap, NULL)) {
+                return -1;
+        }
+
+        if (ic->ic_ocac.ocac_running) {
+                ic->ic_ocac.ocac_running = 0;
+		ic->ic_pm_reason = IEEE80211_PM_LEVEL_STOP_OCAC_SDFS;
+                ieee80211_pm_queue_work(ic);
+                ic->ic_ocac.ocac_counts.pm_update++;
+        }
+        ieee80211_ocac_restore_beacon_interval(ic);
+        ieee80211_ocac_clean_stats(ic, IEEE80211_OCAC_CLEAN_STATS_STOP);
+	ic->ic_ocac_release_frame(ic, 1);
+
+	printk("DFS seamless radio is stopped\n");
+
+        return 0;
+}
+
+static __inline__ uint32_t
+ieee80211_ocac_get_param_duration(struct ieee80211com *ic, struct ieee80211_channel *dfs_chan)
+{
+	return (ieee80211_is_on_weather_channel(ic, dfs_chan) ?
+			ic->ic_ocac.ocac_cfg.ocac_params.wea_duration_secs :
+			ic->ic_ocac.ocac_cfg.ocac_params.duration_secs);
+}
+
+static __inline__ uint32_t
+ieee80211_ocac_get_param_cac_time(struct ieee80211com *ic, struct ieee80211_channel *dfs_chan)
+{
+	return (ieee80211_is_on_weather_channel(ic, dfs_chan) ?
+			ic->ic_ocac.ocac_cfg.ocac_params.wea_cac_time_secs :
+			ic->ic_ocac.ocac_cfg.ocac_params.cac_time_secs);
+}
+
+static __inline uint8_t
+ieee80211_ocac_get_backoff_count(void)
+{
+	uint8_t count;
+
+	/* Random number in [8, 64] */
+	get_random_bytes(&count, sizeof(count));
+	count = (count % 56) + 8;
+
+	return count;
+}
+
+static void
+ieee80211_ocac_check_simul_cac(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	/* Prevent multiple AP's from performing simultaneous S-DFS based on traffic_ctrl */
+	if (!ic->ic_ocac.ocac_cfg.ocac_params.traffic_ctrl) {
+		ic->ic_ocac.ocac_available = OCAC_AVAILABLE;
+		return;
+	}
+
+	if (ic->ic_ocac.ocac_available == OCAC_AVAILABLE) {
+		spin_lock(&ic->ic_ocac.ocac_lock);
+
+		if (ic->ic_ocac.ocac_rx_state.state == OCAC_STATE_ONGOING) {
+			OCACDBG(OCACLOG_NOTICE, "DFS seamless radio is pending because another AP is doing "
+				"OCAC at this moment\n");
+			ic->ic_ocac.ocac_counts.cac_in_neighbourhood++;
+
+			ic->ic_ocac.ocac_available = OCAC_UNAVAILABLE;
+		}
+
+		spin_unlock(&ic->ic_ocac.ocac_lock);
+	} else {
+
+		if (!ic->ic_ocac.ocac_backoff_in_progress) {
+			ic->ic_ocac.ocac_backoff_count = ieee80211_ocac_get_backoff_count();
+
+			spin_lock(&ic->ic_ocac.ocac_lock);
+
+			switch (ic->ic_ocac.ocac_rx_state.state) {
+			case OCAC_STATE_NONE:
+				ic->ic_ocac.ocac_backoff_in_progress = 1;
+				break;
+			case OCAC_STATE_BACKOFF:
+				if (ic->ic_ocac.ocac_backoff_count < ic->ic_ocac.ocac_rx_state.param)
+					ic->ic_ocac.ocac_backoff_in_progress = 1;
+				break;
+			case OCAC_STATE_ONGOING:
+				break;
+			}
+
+			spin_unlock(&ic->ic_ocac.ocac_lock);
+
+			if (ic->ic_ocac.ocac_backoff_in_progress) {
+				init_completion(&ic->ic_ocac.ocac_backoff_completion);
+
+				/* MuC will trigger an event and move to state NONE when param reaches 0 */
+				ic->ic_update_ocac_state_ie(ic, OCAC_STATE_BACKOFF,
+					ic->ic_ocac.ocac_backoff_count);
+			}
+
+		} else {
+			if (try_wait_for_completion(&ic->ic_ocac.ocac_backoff_completion)) {
+				spin_lock(&ic->ic_ocac.ocac_lock);
+
+				switch (ic->ic_ocac.ocac_rx_state.state) {
+				case OCAC_STATE_NONE:
+					ic->ic_ocac.ocac_available = OCAC_AVAILABLE;
+					break;
+				case OCAC_STATE_BACKOFF:
+				case OCAC_STATE_ONGOING:
+					break;
+				}
+
+				spin_unlock(&ic->ic_ocac.ocac_lock);
+
+				ic->ic_ocac.ocac_backoff_in_progress = 0;
+			}
+		}
+	}
+}
+
+static inline int ieee80211_ocac_check_bcn_scheme(struct ieee80211vap *vap, uint32_t num_vap,
+						uint32_t *reset_duration)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	/* If two VAPs are configured, the beaconing scheme should be scheme 1 */
+	if (num_vap == OCAC_MAX_SUPPORTED_VAPS && ic->ic_beaconing_scheme == QTN_BEACONING_SCHEME_0) {
+		OCACDBG(OCACLOG_NOTICE, "DFS seamless radio with two MBSSIDs is pending because"
+				" of beaconing scheme initiated at boot-up. Updating scheme...\n");
+		ic->ic_ocac.ocac_counts.beacon_scheme0++;
+		*reset_duration = 1;
+
+		if (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] < BOARD_PM_LEVEL_DUTY) {
+			/* Force disassoc of all devices to allow resynchronisation to the beacons */
+			ieee80211_wireless_reassoc_all_vaps(ic);
+			ic->ic_pm_reason = IEEE80211_PM_LEVEL_BCN_SCHEME_CHANGED_FOR_2VAPS;
+			ieee80211_pm_queue_work_custom(ic, BOARD_PM_WLAN_AP_IDLE_AFTER_BEACON_SCHEME);
+
+			if (ic->ic_set_beaconing_scheme(vap, IEEE80211_PARAM_BEACONING_SCHEME,
+						QTN_BEACONING_SCHEME_1) < 0) {
+				OCACDBG(OCACLOG_NOTICE, "DFS seamless radio is pending due to beaconing "
+						"scheme update failure\n");
+				OCACDBG(OCACLOG_NOTICE, "To enable DFS seamless radio with two MBSSIDs,"
+						"please save DFS seamless radio configuration and reboot the board"
+						"or restart DFS seamless radio\n");
+				return -1;
+			}
+		} else {
+			OCACDBG(OCACLOG_NOTICE, "DFS seamless radio is pending due to wrong "
+					"beaconing scheme\n");
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static void
+ieee80211_ocac_timer_func(unsigned long arg)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *)arg;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *chan = NULL;
+	struct ieee80211_channel *dfs_chan = NULL;
+	struct ieee80211vap *tmp_vap;
+	uint32_t num_vap = 0;
+	uint32_t vap_idx;
+	uint32_t radar_detected;
+	uint32_t reset_duration = 0;
+	uint8_t prev_ocac_running;
+
+	if (vap->iv_state != IEEE80211_S_RUN) {
+		OCACDBG(OCACLOG_NOTICE, "DFS seamless radio is pending"
+				" because the AP is not in running state\n");
+		ic->ic_ocac.ocac_counts.ap_not_running++;
+		goto set_ocac;
+	}
+
+	if (ic->ic_flags & IEEE80211_F_SCAN) {
+		OCACDBG(OCACLOG_NOTICE, "DFS seamless radio is pending"
+				" because channel scanning is in progress\n");
+		ic->ic_ocac.ocac_counts.chan_scanning++;
+		goto set_ocac;
+	}
+
+	if ((ic->ic_curchan != IEEE80211_CHAN_ANYC) &&
+			(ic->ic_curchan->ic_flags & IEEE80211_CHAN_DFS)) {
+		OCACDBG(OCACLOG_NOTICE, "DFS seamless radio is pending"
+				" because the current channel is a DFS channel\n");
+		ic->ic_ocac.ocac_counts.curchan_dfs++;
+		ieee80211_ocac_restore_beacon_interval(ic);
+		reset_duration = 1;
+		goto set_ocac;
+	}
+
+	TAILQ_FOREACH(tmp_vap, &ic->ic_vaps, iv_next) {
+		if (tmp_vap->iv_opmode == IEEE80211_M_WDS &&
+				!IEEE80211_VAP_WDS_IS_MBS(tmp_vap)) {
+			OCACDBG(OCACLOG_NOTICE, "DFS seamless radio is pending"
+					" because a WDS interface exists\n");
+			ic->ic_ocac.ocac_counts.wds_exist++;
+			ieee80211_ocac_restore_beacon_interval(ic);
+			reset_duration = 1;
+			goto set_ocac;
+		}
+		if (tmp_vap->iv_opmode != IEEE80211_M_HOSTAP)
+			continue;
+		if ((tmp_vap->iv_state != IEEE80211_S_RUN)
+				&& (tmp_vap->iv_state != IEEE80211_S_SCAN))
+			continue;
+		num_vap++;
+		vap_idx = ic->ic_get_vap_idx(tmp_vap);
+		if (vap_idx > 1) {
+			OCACDBG(OCACLOG_NOTICE, "DFS seamless radio is pending because"
+					" unsupported MBSSID(wifi%d) is configured. Supported "
+					"MBSSIDs are wifi0, wifi1\n", vap_idx);
+			/* Support OCAC with two VAPs only - wifi0 and wifi1 */
+			ic->ic_ocac.ocac_counts.unsupported_mbssid++;
+			ieee80211_ocac_restore_beacon_interval(ic);
+			reset_duration = 1;
+			goto set_ocac;
+		}
+	}
+
+	if (ieee80211_ocac_check_bcn_scheme(vap, num_vap, &reset_duration) < 0) {
+		ieee80211_ocac_restore_beacon_interval(ic);
+		goto set_ocac;
+	}
+
+	if (is_ieee80211_chan_valid(ic->ic_ocac.ocac_chan)
+		&& ieee80211_is_chan_available(ic->ic_ocac.ocac_chan)) {
+			ic->ic_ocac.ocac_repick_dfs_chan = 1;
+	}
+
+	if (ic->ic_ocac.ocac_chan == NULL ||
+			ic->ic_ocac.ocac_repick_dfs_chan) {
+		/* initial off channel selection */
+		ic->ic_ocac.ocac_counts.init_offchan++;
+		ic->ic_ocac.ocac_chan = ieee80211_ocac_pick_dfs_channel(ic,
+				ic->ic_ocac.ocac_cfg.ocac_chan_ieee);
+		if (ic->ic_ocac.ocac_chan == NULL) {
+			OCACDBG(OCACLOG_NOTICE, "Init DFS channel selection (%d) error\n",
+						ic->ic_ocac.ocac_cfg.ocac_chan_ieee);
+			ic->ic_ocac.ocac_counts.no_offchan++;
+			reset_duration = 1;
+			goto set_ocac;
+		}
+		ic->ic_ocac.ocac_repick_dfs_chan = 0;
+		ieee80211_ocac_clean_stats(ic, IEEE80211_OCAC_CLEAN_STATS_RESET);
+		DFS_S_DBG_QEVT(ic2dev(ic), "DFS_s_radio: CAC started for channel %u\n",
+				ic->ic_ocac.ocac_chan->ic_ieee);
+		OCACDBG(OCACLOG_NOTICE, "CAC duration: %u secs, minimal valid CAC time: %u secs\n",
+				ieee80211_ocac_get_param_duration(ic, ic->ic_ocac.ocac_chan),
+				ieee80211_ocac_get_param_cac_time(ic, ic->ic_ocac.ocac_chan));
+	}
+
+	ieee80211_ocac_check_simul_cac(vap);
+	if (ic->ic_ocac.ocac_available != OCAC_AVAILABLE) {
+		ieee80211_ocac_restore_beacon_interval(ic);
+		reset_duration = 1;
+		goto set_ocac;
+	}
+
+	ieee80211_ocac_set_beacon_interval(ic);
+
+	radar_detected = ic->ic_ocac.ocac_chan->ic_flags & IEEE80211_CHAN_RADAR;
+	if (radar_detected) {
+		ic->ic_ocac.ocac_counts.radar_detected++;
+		OCACDBG(OCACLOG_NOTICE, "Radar was detected on channel %u\n",
+				ic->ic_ocac.ocac_chan->ic_ieee);
+		if (ic->ic_ocac.ocac_cfg.ocac_chan_ieee) {
+			OCACDBG(OCACLOG_NOTICE, "CAC stops when radar detected for test mode\n");
+			goto stop_ocac;
+		}
+		chan = ieee80211_ocac_pick_dfs_channel(ic,
+				ic->ic_ocac.ocac_cfg.ocac_chan_ieee);
+		if (chan && ic->ic_ocac.ocac_chan != chan) {
+			ic->ic_ocac.ocac_chan = chan;
+			DFS_S_DBG_QEVT(ic2dev(ic), "DFS_s_radio: CAC restarted for channel %u\n",
+					     chan->ic_ieee);
+			OCACDBG(OCACLOG_NOTICE, "CAC duration: %u secs, minimal valid CAC time: %u secs\n",
+					ieee80211_ocac_get_param_duration(ic, ic->ic_ocac.ocac_chan),
+					ieee80211_ocac_get_param_cac_time(ic, ic->ic_ocac.ocac_chan));
+		}
+		ieee80211_ocac_clean_stats(ic, IEEE80211_OCAC_CLEAN_STATS_RESET);
+		if (chan == NULL) {
+			reset_duration = 1;
+			goto set_ocac;
+		}
+	} else {
+#define CAC_STATUS_COMPLETE	0x1
+#define CAC_STATUS_SUCCESS	0x2
+		uint8_t cac_status = 0;
+		int retval;
+
+		if (ic->ic_ocac.ocac_accum_cac_time_ms >=
+			ieee80211_ocac_get_param_cac_time(ic, ic->ic_ocac.ocac_chan) * 1000) {
+
+			cac_status = CAC_STATUS_SUCCESS | CAC_STATUS_COMPLETE;
+
+		} else {
+			if (ic->ic_ocac.ocac_accum_duration_secs >=
+				ieee80211_ocac_get_param_duration(ic, ic->ic_ocac.ocac_chan)) {
+
+				cac_status = CAC_STATUS_COMPLETE;
+			}
+		}
+
+		if (cac_status) {
+			ic->ic_ocac.ocac_available = OCAC_UNAVAILABLE;
+
+			reset_duration = 1;
+
+			if (cac_status & CAC_STATUS_SUCCESS) {
+				ic->ic_ocac.ocac_counts.cac_success++;
+				OCACDBG(OCACLOG_NOTICE, "CAC succeed and no radar\n");
+
+				retval = ieee80211_ocac_change_channel(ic, ic->ic_ocac.ocac_chan);
+				if (ic->ic_ocac.ocac_cfg.ocac_chan_ieee) {
+					if (retval == 0) {
+						OCACDBG(OCACLOG_NOTICE, "CAC stops after switching"
+							" to dfs channel for test mode\n");
+						goto stop_ocac;
+					} else {
+						goto set_ocac;
+					}
+				} else {
+					if (!ic->ic_ocac.ocac_cfg.ocac_report_only) {
+						ic->ic_ocac.ocac_repick_dfs_chan = 1;
+					}
+					goto set_ocac;
+				}
+			} else {
+				ic->ic_ocac.ocac_counts.cac_failed++;
+				OCACDBG(OCACLOG_NOTICE, "CAC failed and restarted, CAC accumulated %u, CAC desired %u\n",
+						ic->ic_ocac.ocac_accum_cac_time_ms / 1000,
+						ieee80211_ocac_get_param_cac_time(ic, ic->ic_ocac.ocac_chan));
+
+				goto set_ocac;
+			}
+		}
+	}
+
+	dfs_chan = ic->ic_ocac.ocac_chan;
+
+set_ocac:
+	ic->ic_set_ocac(vap, dfs_chan);
+
+	prev_ocac_running = ic->ic_ocac.ocac_running;
+	ic->ic_ocac.ocac_running = dfs_chan ? 1 : 0;
+
+	if (prev_ocac_running != ic->ic_ocac.ocac_running) {
+		ic->ic_pm_reason = IEEE80211_PM_LEVEL_OCAC_SDFS_TIMER;
+		ieee80211_pm_queue_work(ic);
+		ic->ic_ocac.ocac_counts.pm_update++;
+	}
+
+	if (reset_duration) {
+		ieee80211_ocac_clean_stats(ic, IEEE80211_OCAC_CLEAN_STATS_RESET);
+	} else {
+		ic->ic_ocac.ocac_accum_duration_secs +=
+				ic->ic_ocac.ocac_cfg.ocac_params.timer_interval;
+	}
+
+	mod_timer(&ic->ic_ocac.ocac_timer,
+			jiffies + (ic->ic_ocac.ocac_cfg.ocac_params.timer_interval * HZ));
+
+	return;
+
+stop_ocac:
+	ieee80211_wireless_stop_ocac(vap);
+	ic->ic_ocac.ocac_cfg.ocac_enable = 0;
+	ic->ic_beacon_update(vap);
+}
+
+struct ieee80211_ocac_params_dflt {
+	char			region[4];
+	struct ieee80211_ocac_params	dflt_params;
+} ocac_params_dflt[] = {
+	{
+		"EU",
+		{
+			1,	/* traffic control */
+			23,	/* secure dwell */
+			40,	/* dwell time */
+			720,	/* duration */
+			240,	/* cac time */
+			46,	/* dwell time for weather channel */
+			11520,	/* duration for weather channel */
+			4329,	/* cac time for weather channel */
+			90,	/* thresh fat */
+			30,	/* thresh traffic */
+			10,	/* thresh fat dec */
+			20,	/* thresh cca intf */
+			10,	/* offset txhalt */
+			7,	/* offset offchan */
+			2,	/* timer interval */
+			100	/* beacon interval */
+		}
+	},
+	{
+		"US",
+		{
+			0,	/* traffic control */
+			23,	/* secure dwell */
+			80,	/* dwell time */
+			70,	/* duration */
+			50,	/* cac time */
+			80,	/* dwell time for weather channel*/
+			70,	/* duration for weather channel */
+			50,	/* cac time for weather channel */
+			75,	/* thresh fat */
+			3,	/* thresh traffic */
+			10,	/* thresh fat dec */
+			20,	/* thresh cca intf */
+			5,	/* offset txhalt */
+			5,	/* offset offchan */
+			2,	/* timer interval */
+			100	/* beacon interval */
+		}
+	}
+};
+
+static int
+ieee80211_ocac_is_region_supported(const char *region)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ocac_params_dflt); i++) {
+		if (!strcasecmp(region, ocac_params_dflt[i].region)) {
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+void
+ieee80211_ocac_update_params(struct ieee80211com *ic, const char *region)
+{
+	int i;
+
+	if (ic->ic_ocac.ocac_cfg.ocac_enable) {
+		return;
+	}
+
+	if (!strcasecmp(ic->ic_ocac.ocac_cfg.ocac_region, region)) {
+		return;
+	}
+
+	strncpy(ic->ic_ocac.ocac_cfg.ocac_region, region,
+			sizeof(ic->ic_ocac.ocac_cfg.ocac_region));
+
+	for (i = 0; i < ARRAY_SIZE(ocac_params_dflt); i++) {
+		if (!strcasecmp(ic->ic_ocac.ocac_cfg.ocac_region, ocac_params_dflt[i].region)) {
+			memcpy(&ic->ic_ocac.ocac_cfg.ocac_params, &ocac_params_dflt[i].dflt_params,
+					sizeof(ic->ic_ocac.ocac_cfg.ocac_params));
+			printk("DFS_s_radio: parameters updated, region: %s\n",
+					ic->ic_ocac.ocac_cfg.ocac_region);
+			break;
+		}
+	}
+}
+EXPORT_SYMBOL(ieee80211_ocac_update_params);
+
+enum qtn_ocac_unsupported_reason {
+	QTN_OCAC_REASON_AP_MODE = 1,
+	QTN_OCAC_REASON_REGION,
+	QTN_OCAC_REASON_WDS,
+	QTN_OCAC_REASON_MBSSID,
+	QTN_OCAC_REASON_NO_DFS_CHAN,
+	QTN_OCAC_REASON_NO_NON_DFS_CHAN,
+	QTN_OCAC_REASON_MAX = QTN_OCAC_REASON_NO_NON_DFS_CHAN
+};
+
+static char *qtn_ocac_reason_str[QTN_OCAC_REASON_MAX] = {
+	"supported in AP mode only",
+	"not supported for current region",
+	"not supported in the case of WDS interface exist",
+	"not supported in the case of unsupported MBSSID(except wifi0 and wifi1) is configured",
+	"not supported because no DFS channel",
+	"not supported because no non-DFS channel"
+};
+
+static unsigned int
+ieee80211_wireless_is_ocac_unsupported(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211vap *tmp_vap;
+	struct ieee80211_channel *c;
+	uint32_t num_dfs_chan = 0;
+	uint32_t num_non_dfs_chan = 0;
+	unsigned int ret = 0;
+	int i;
+
+	if (vap->iv_opmode != IEEE80211_M_HOSTAP) {
+		ret = QTN_OCAC_REASON_AP_MODE;
+		goto done;
+	}
+
+	if (!ieee80211_ocac_is_region_supported(ic->ic_ocac.ocac_cfg.ocac_region)) {
+		ret = QTN_OCAC_REASON_REGION;
+		goto done;
+	}
+
+	TAILQ_FOREACH(tmp_vap, &ic->ic_vaps, iv_next) {
+		if (tmp_vap->iv_opmode == IEEE80211_M_WDS &&
+				!IEEE80211_VAP_WDS_IS_MBS(tmp_vap)) {
+			ic->ic_ocac.ocac_counts.wds_exist++;
+			ret = QTN_OCAC_REASON_WDS;
+			goto done;
+		}
+		if (tmp_vap->iv_opmode != IEEE80211_M_HOSTAP)
+			continue;
+		if ((tmp_vap->iv_state != IEEE80211_S_RUN)
+				&& (tmp_vap->iv_state != IEEE80211_S_SCAN))
+			continue;
+		if (ic->ic_get_vap_idx(tmp_vap) > 1) {
+			/* Only support OCAC with two VAPs - wifi0 and wifi1 */
+			ic->ic_ocac.ocac_counts.unsupported_mbssid++;
+			ret = QTN_OCAC_REASON_MBSSID;
+			goto done;
+		}
+	}
+
+	for (i = 0; i < ic->ic_nchans; i++) {
+		c = &ic->ic_channels[i];
+		if (c == NULL || isclr(ic->ic_chan_active, c->ic_ieee) ||
+				!ieee80211_chan_allowed_in_band(ic, c, vap->iv_opmode)) {
+			continue;
+		}
+		if (c->ic_flags & IEEE80211_CHAN_DFS) {
+			num_dfs_chan++;
+		} else {
+			num_non_dfs_chan++;
+		}
+	}
+	if (num_dfs_chan == 0) {
+		ret = QTN_OCAC_REASON_NO_DFS_CHAN;
+		goto done;
+	}
+	if (num_non_dfs_chan == 0) {
+		ret = QTN_OCAC_REASON_NO_NON_DFS_CHAN;
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static int
+ieee80211_wireless_check_if_ocac_supported(struct ieee80211vap *vap, int chan_ieee)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *chan;
+	unsigned int reason;
+
+	reason = ieee80211_wireless_is_ocac_unsupported(vap);
+	if (reason) {
+		if (reason <= QTN_OCAC_REASON_MAX) {
+			printk("DFS seamless radio is %s\n", qtn_ocac_reason_str[reason - 1]);
+		}
+		return -1;
+	}
+
+	if (chan_ieee) {
+		chan = ieee80211_find_channel_by_ieee(ic, chan_ieee);
+		if (chan == NULL || !(chan->ic_flags & IEEE80211_CHAN_DFS)) {
+			DFS_S_DBG_QEVT(ic2dev(ic), "DFS_s_radio: channel %u is not a valid DFS channel\n",
+					chan_ieee);
+			return -1;
+		} else if (chan->ic_flags & IEEE80211_CHAN_RADAR) {
+			DFS_S_DBG_QEVT(ic2dev(ic), "DFS_s_radio: radar detected on channel %u\n",
+					     chan_ieee);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Start off-channel CAC
+ */
+static void
+ieee80211_wireless_start_ocac(struct ieee80211vap *vap, int chan_ieee)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	ic->ic_ocac.ocac_available = OCAC_UNAVAILABLE;
+
+	ic->ic_ocac.ocac_cfg.ocac_chan_ieee = chan_ieee;
+	ic->ic_ocac.ocac_chan = NULL;	/* reset the dfs channel */
+
+	ieee80211_ocac_clean_stats(ic, IEEE80211_OCAC_CLEAN_STATS_START);
+
+	init_timer(&ic->ic_ocac.ocac_timer);
+	ic->ic_ocac.ocac_timer.function = ieee80211_ocac_timer_func;
+	ic->ic_ocac.ocac_timer.data = (unsigned long) vap;
+	ic->ic_ocac.ocac_timer.expires = jiffies +
+			ic->ic_ocac.ocac_cfg.ocac_timer_expire_init * HZ;
+	add_timer(&ic->ic_ocac.ocac_timer);
+
+	printk("Starting DFS seamless radio...\n");
+}
+
+static void
+ieee80211_ocac_set_dump_counts(struct ieee80211com *ic, int value)
+{
+	struct ieee80211_ocac_counts *ocac_counts = &ic->ic_ocac.ocac_counts;
+
+	if (value) {
+		printk("DFS_s_radio counts:\n");
+		printk("  ap_not_running:       %u\n", ocac_counts->ap_not_running);
+		printk("  chan_scanning:        %u\n", ocac_counts->chan_scanning);
+		printk("  curchan_dfs:          %u\n", ocac_counts->curchan_dfs);
+		printk("  init_offchan:         %u\n", ocac_counts->init_offchan);
+		printk("  no_offchan:           %u\n", ocac_counts->no_offchan);
+		printk("  pick_offchan:         %u\n", ocac_counts->pick_offchan);
+		printk("  invalid_offchan:      %u\n", ocac_counts->invalid_offchan);
+		printk("  set_bcn_intval:       %u\n", ocac_counts->set_bcn_intval);
+		printk("  restore_bcn_intval:   %u\n", ocac_counts->restore_bcn_intval);
+		printk("  pm_update:            %u\n", ocac_counts->pm_update);
+		printk("  unsupported_mbssid:   %u\n", ocac_counts->unsupported_mbssid);
+		printk("  beacon_scheme0:       %u\n", ocac_counts->beacon_scheme0);
+		printk("  wds_exist:            %u\n", ocac_counts->wds_exist);
+		printk("  set_run:              %u\n", ocac_counts->set_run);
+		printk("  set_pend:             %u\n", ocac_counts->set_pend);
+		printk("  skip_set_run:         %u\n", ocac_counts->skip_set_run);
+		printk("  skip_set_pend:        %u\n", ocac_counts->skip_set_pend);
+		printk("  alloc_skb_error:      %u\n", ocac_counts->alloc_skb_error);
+		printk("  set_frame_error:      %u\n", ocac_counts->set_frame_error);
+		printk("  hostlink_err:         %u\n", ocac_counts->hostlink_err);
+		printk("  hostlink_ok:          %u\n", ocac_counts->hostlink_ok);
+		printk("  radar_detected:       %u\n", ocac_counts->radar_detected);
+		printk("  cac_failed:           %u\n", ocac_counts->cac_failed);
+		printk("  cac_success:          %u\n", ocac_counts->cac_success);
+		printk("  csw_rpt_only:         %u\n", ocac_counts->csw_rpt_only);
+		printk("  csw_fail_intf:        %u\n", ocac_counts->csw_fail_intf);
+		printk("  csw_fail_radar:       %u\n", ocac_counts->csw_fail_radar);
+		printk("  csw_fail_csa:         %u\n", ocac_counts->csw_fail_csa);
+		printk("  csw_success:          %u\n", ocac_counts->csw_success);
+		printk("  clean_stats_reset:    %u\n", ocac_counts->clean_stats_reset);
+		printk("  clean_stats_start:    %u\n", ocac_counts->clean_stats_start);
+		printk("  clean_stats_stop:     %u\n", ocac_counts->clean_stats_stop);
+		printk("  tasklet_off_chan:     %u\n", ocac_counts->tasklet_off_chan);
+		printk("  tasklet_data_chan:    %u\n", ocac_counts->tasklet_data_chan);
+		printk("  intr_off_chan:        %u\n", ocac_counts->intr_off_chan);
+		printk("  intr_data_chan:       %u\n", ocac_counts->intr_data_chan);
+		printk("  no_channel_change_eu: %u\n", ocac_counts->no_channel_change_eu);
+	} else {
+		/* clear ocac counts */
+		memset(ocac_counts, 0, sizeof(ic->ic_ocac.ocac_counts));
+	}
+}
+
+static void
+ieee80211_ocac_dump_tsflog(struct ieee80211com *ic)
+{
+	int i;
+	int cur_index;
+	int next_index;
+	uint32_t time_sw_offchan;
+	uint32_t time_sw_datachan;
+	uint32_t time_on_offchan;
+	uint32_t time_on_datachan;
+	struct ieee80211_ocac_tsflog tsflog;
+
+	IEEE80211_LOCK_BH(ic);
+	memcpy(&tsflog, &ic->ic_ocac.ocac_tsflog, sizeof(struct ieee80211_ocac_tsflog));
+	IEEE80211_UNLOCK_BH(ic);
+
+	printk("  sw_offchan  on_offchan  sw_datachan  on_datachan\n");
+	cur_index = tsflog.log_index;
+	for (i = 0; i < QTN_OCAC_TSF_LOG_DEPTH; i++)
+	{
+		next_index = (cur_index + 1) % QTN_OCAC_TSF_LOG_DEPTH;
+
+		time_sw_offchan = (uint32_t)(tsflog.tsf_log[cur_index][OCAC_TSF_LOG_GOTO_OFF_CHAN_DONE] -
+				tsflog.tsf_log[cur_index][OCAC_TSF_LOG_GOTO_OFF_CHAN]);
+		time_on_offchan = (uint32_t)(tsflog.tsf_log[cur_index][OCAC_TSF_LOG_GOTO_DATA_CHAN] -
+				tsflog.tsf_log[cur_index][OCAC_TSF_LOG_GOTO_OFF_CHAN_DONE]);
+		time_on_datachan = (uint32_t)(tsflog.tsf_log[cur_index][OCAC_TSF_LOG_GOTO_OFF_CHAN] -
+				tsflog.tsf_log[cur_index][OCAC_TSF_LOG_GOTO_DATA_CHAN_DONE]);
+		time_sw_datachan = (uint32_t)(tsflog.tsf_log[next_index][OCAC_TSF_LOG_GOTO_DATA_CHAN_DONE] -
+				tsflog.tsf_log[cur_index][OCAC_TSF_LOG_GOTO_DATA_CHAN]);
+		cur_index = next_index;
+		printk("  %10u  %10u  %10u  %10u\n", time_sw_offchan, time_on_offchan,
+				time_sw_datachan, time_on_datachan);
+	}
+}
+
+static void
+ieee80211_ocac_dump_cfg(struct ieee80211com *ic)
+{
+	printk("DFS_s_radio cfg:\n");
+	printk("  region:            %s\n", ic->ic_ocac.ocac_cfg.ocac_region);
+	printk("  started:           %u\n", ic->ic_ocac.ocac_cfg.ocac_enable);
+	printk("  debug_level:       %u\n", ic->ic_ocac.ocac_cfg.ocac_debug_level);
+	printk("  report_only:       %u\n", ic->ic_ocac.ocac_cfg.ocac_report_only);
+	printk("  off_channel:       %u\n", ic->ic_ocac.ocac_cfg.ocac_chan_ieee);
+	printk("  timer_expire_init: %u\n", ic->ic_ocac.ocac_cfg.ocac_timer_expire_init);
+	printk("  secure_dwell_ms:   %u\n", ic->ic_ocac.ocac_cfg.ocac_params.secure_dwell_ms);
+	printk("  dwell_time_ms:     %u\n", ic->ic_ocac.ocac_cfg.ocac_params.dwell_time_ms);
+	printk("  duration_secs:     %u\n", ic->ic_ocac.ocac_cfg.ocac_params.duration_secs);
+	printk("  cac_time_secs:     %u\n", ic->ic_ocac.ocac_cfg.ocac_params.cac_time_secs);
+	printk("  wea_dwell_time_ms: %u\n", ic->ic_ocac.ocac_cfg.ocac_params.wea_dwell_time_ms);
+	printk("  wea_duration_secs: %u\n", ic->ic_ocac.ocac_cfg.ocac_params.wea_duration_secs);
+	printk("  wea_cac_time_secs: %u\n", ic->ic_ocac.ocac_cfg.ocac_params.wea_cac_time_secs);
+	printk("  thrshld_fat:       %u\n", ic->ic_ocac.ocac_cfg.ocac_params.thresh_fat);
+	printk("  thrshld_traffic:   %u\n", ic->ic_ocac.ocac_cfg.ocac_params.thresh_traffic);
+	printk("  thrshld_cca_intf:  %u\n", ic->ic_ocac.ocac_cfg.ocac_params.thresh_cca_intf);
+	printk("  thrshld_fat_dec:   %u\n", ic->ic_ocac.ocac_cfg.ocac_params.thresh_fat_dec);
+	printk("  timer_interval:    %u\n", ic->ic_ocac.ocac_cfg.ocac_params.timer_interval);
+	printk("  traffic_ctrl:      %u\n", ic->ic_ocac.ocac_cfg.ocac_params.traffic_ctrl);
+	printk("  offset_txhalt:     %u\n", ic->ic_ocac.ocac_cfg.ocac_params.offset_txhalt);
+	printk("  offset_offchan:    %u\n", ic->ic_ocac.ocac_cfg.ocac_params.offset_offchan);
+	printk("  beacon_interval:   %u\n", ic->ic_ocac.ocac_cfg.ocac_params.beacon_interval);
+}
+
+int
+ieee80211_param_ocac_set(struct net_device *dev, struct ieee80211vap *vap, u_int32_t value)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	uint32_t cmd =	value >> IEEE80211_OCAC_COMMAND_S;
+	uint32_t arg = value & IEEE80211_OCAC_VALUE_M;
+
+	if (!ieee80211_swfeat_is_supported(SWFEAT_ID_OCAC, 1))
+		return -1;
+
+	if (cmd >= IEEE80211_OCAC_SET_MAX) {
+		printk("%s: invalid DFS_s_radio setparam cmd %u, arg=%u\n",
+				dev->name, cmd, arg);
+		return -1;
+	}
+
+	OCACDBG(OCACLOG_NOTICE, "setparam command: %u, value: 0x%x\n", cmd, arg);
+
+	switch (cmd) {
+	case IEEE80211_OCAC_SET_ENABLE_AUTO_DFS:
+		if (ieee80211_wireless_check_if_ocac_supported(vap, arg) < 0)
+			return -1;
+
+		ic->ic_ocac.ocac_cfg.ocac_params.auto_first_dfs_channel = arg & 0xFF;
+		arg = 0;
+		/* Fall through */
+	case IEEE80211_OCAC_SET_ENABLE:
+		if (ic->ic_ocac.ocac_cfg.ocac_enable) {
+			printk("DFS seamless radio is already running\n");
+			return -1;
+		}
+		if (ieee80211_wireless_check_if_ocac_supported(vap, arg) < 0)
+			return -1;
+
+		ieee80211_wireless_start_ocac(vap, arg);
+		ic->ic_ocac.ocac_cfg.ocac_enable = 1;
+		ic->ic_beacon_update(vap);
+		break;
+	case IEEE80211_OCAC_SET_DISABLE:
+		if (ic->ic_ocac.ocac_cfg.ocac_enable) {
+			ieee80211_wireless_stop_ocac(vap);
+			ic->ic_ocac.ocac_cfg.ocac_enable = 0;
+			ic->ic_beacon_update(vap);
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.auto_first_dfs_channel = 0;
+		break;
+	case IEEE80211_OCAC_SET_DEBUG_LEVEL:
+		ic->ic_ocac.ocac_cfg.ocac_debug_level = arg;
+		break;
+	case IEEE80211_OCAC_SET_DWELL_TIME:
+		if (arg < IEEE80211_OCAC_DWELL_TIME_MIN ||
+				arg > IEEE80211_OCAC_DWELL_TIME_MAX) {
+			printk("Invalid DFS_s_radio dwell time: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.dwell_time_ms = arg;
+		break;
+	case IEEE80211_OCAC_SET_SECURE_DWELL_TIME:
+		if (arg < IEEE80211_OCAC_SECURE_DWELL_TIME_MIN ||
+				arg > IEEE80211_OCAC_SECURE_DWELL_TIME_MAX) {
+			printk("Invalid DFS_s_radio secure dwell time: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.secure_dwell_ms = arg;
+		break;
+	case IEEE80211_OCAC_SET_DURATION:
+		if (arg < IEEE80211_OCAC_DURATION_MIN ||
+				arg > IEEE80211_OCAC_DURATION_MAX) {
+			printk("Invalid DFS_s_radio duration: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.duration_secs = arg;
+		break;
+	case IEEE80211_OCAC_SET_CAC_TIME:
+		if (arg < IEEE80211_OCAC_CAC_TIME_MIN ||
+				arg > IEEE80211_OCAC_CAC_TIME_MAX) {
+			printk("Invalid DFS_s_radio cac time: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.cac_time_secs = arg;
+		break;
+	case IEEE80211_OCAC_SET_WEATHER_DWELL_TIME:
+		if (arg < IEEE80211_OCAC_DWELL_TIME_MIN ||
+				arg > IEEE80211_OCAC_DWELL_TIME_MAX) {
+			printk("Invalid DFS_s_radio dwell time: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.wea_dwell_time_ms = arg;
+		break;
+	case IEEE80211_OCAC_SET_WEATHER_DURATION:
+		if (arg & IEEE80211_OCAC_COMPRESS_VALUE_F) {
+			arg = (arg & IEEE80211_OCAC_COMPRESS_VALUE_M) << 2;
+		}
+		if (arg < IEEE80211_OCAC_WEA_DURATION_MIN ||
+				arg > IEEE80211_OCAC_WEA_DURATION_MAX) {
+			printk("Invalid DFS_s_radio duration for weather channel: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.wea_duration_secs = arg;
+		break;
+	case IEEE80211_OCAC_SET_WEATHER_CAC_TIME:
+		if (arg & IEEE80211_OCAC_COMPRESS_VALUE_F) {
+			arg = (arg & IEEE80211_OCAC_COMPRESS_VALUE_M) << 2;
+		}
+		if (arg < IEEE80211_OCAC_WEA_CAC_TIME_MIN ||
+				arg > IEEE80211_OCAC_WEA_CAC_TIME_MAX) {
+			printk("Invalid DFS_s_radio cac time for weather channel: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.wea_cac_time_secs = arg;
+		break;
+	case IEEE80211_OCAC_SET_THRESHOLD_FAT:
+		if (arg < IEEE80211_OCAC_THRESHOLD_FAT_MIN ||
+				arg > IEEE80211_OCAC_THRESHOLD_FAT_MAX) {
+			printk("Invalid DFS_s_radio fat threshold: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.thresh_fat = arg;
+		break;
+	case IEEE80211_OCAC_SET_THRESHOLD_TRAFFIC:
+		if (arg < IEEE80211_OCAC_THRESHOLD_TRAFFIC_MIN ||
+				arg > IEEE80211_OCAC_THRESHOLD_TRAFFIC_MAX) {
+			printk("Invalid DFS_s_radio traffic threshold: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.thresh_traffic = arg;
+		break;
+	case IEEE80211_OCAC_SET_THRESHOLD_CCA_INTF:
+		if (arg < IEEE80211_OCAC_THRESHOLD_CCA_INTF_MIN ||
+				arg > IEEE80211_OCAC_THRESHOLD_CCA_INTF_MAX) {
+			printk("Invalid DFS_s_radio cca_intf threshold: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.thresh_cca_intf = arg;
+		break;
+	case IEEE80211_OCAC_SET_THRESHOLD_FAT_DEC:
+		if (arg < IEEE80211_OCAC_THRESHOLD_FAT_DEC_MIN ||
+				arg > IEEE80211_OCAC_THRESHOLD_FAT_DEC_MAX) {
+			printk("Invalid DFS_s_radio fat_dec threshold: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.thresh_fat_dec = arg;
+		break;
+	case IEEE80211_OCAC_SET_TIMER_INTERVAL:
+		if (arg < IEEE80211_OCAC_TIMER_INTERVAL_MIN ||
+				arg > IEEE80211_OCAC_TIMER_INTERVAL_MAX) {
+			printk("Invalid DFS_s_radio timer interval: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.timer_interval = arg;
+		break;
+	case IEEE80211_OCAC_SET_TIMER_EXPIRE_INIT:
+		if (arg < IEEE80211_OCAC_TIMER_EXPIRE_INIT_MIN ||
+				arg > IEEE80211_OCAC_TIMER_EXPIRE_INIT_MAX) {
+			printk("Invalid DFS_s_radio timer expire init: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_timer_expire_init = arg;
+		break;
+	case IEEE80211_OCAC_SET_OFFSET_TXHALT:
+		if (arg < IEEE80211_OCAC_OFFSET_TXHALT_MIN ||
+				arg > IEEE80211_OCAC_OFFSET_TXHALT_MAX) {
+			printk("Invalid DFS_s_radio offset for txhalt: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.offset_txhalt = arg;
+		break;
+	case IEEE80211_OCAC_SET_OFFSET_OFFCHAN:
+		if (arg < IEEE80211_OCAC_OFFSET_OFFCHAN_MIN ||
+				arg > IEEE80211_OCAC_OFFSET_OFFCHAN_MAX) {
+			printk("Invalid DFS_s_radio offset for switch off channel: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.offset_offchan = arg;
+		break;
+	case IEEE80211_OCAC_SET_BEACON_INTERVAL:
+		if (arg < IEEE80211_OCAC_BEACON_INTERVAL_MIN ||
+				arg > IEEE80211_OCAC_BEACON_INTERVAL_MAX) {
+			printk("Invalid DFS_s_radio beacon interval: %u\n", arg);
+			return -1;
+		}
+		ic->ic_ocac.ocac_cfg.ocac_params.beacon_interval = arg;
+		break;
+	case IEEE80211_OCAC_SET_DUMP_COUNTS:
+		ieee80211_ocac_set_dump_counts(ic, arg);
+		break;
+	case IEEE80211_OCAC_SET_DUMP_TSFLOG:
+		ieee80211_ocac_dump_tsflog(ic);
+		break;
+	case IEEE80211_OCAC_SET_DUMP_CFG:
+		ieee80211_ocac_dump_cfg(ic);
+		break;
+	case IEEE80211_OCAC_SET_TRAFFIC_CONTROL:
+		ic->ic_ocac.ocac_cfg.ocac_params.traffic_ctrl = arg ? 1 : 0;
+		break;
+	case IEEE80211_OCAC_SET_REPORT_ONLY:
+		ic->ic_ocac.ocac_cfg.ocac_report_only = arg ? 1 : 0;
+		break;
+	case IEEE80211_OCAC_SET_DUMP_CCA_COUNTS:
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_set_dfs_fast_switch(struct ieee80211com *ic)
+{
+	if (ic->ic_ieee_alt_chan != 0) {
+		struct ieee80211_channel *chan = findchannel(ic, ic->ic_ieee_alt_chan, ic->ic_des_mode);
+
+		if ((chan != NULL) && !(ieee80211_is_chan_available(chan))) {
+			return EINVAL;
+		}
+	}
+
+	ic->ic_flags_ext |= IEEE80211_FEXT_DFS_FAST_SWITCH;
+	return 0;
+}
+
+static int
+ieee80211_ioctl_set_alt_chan(struct ieee80211com *ic, uint8_t ieee_alt_chan)
+{
+	struct ieee80211_channel *chan = NULL;
+
+	if (ieee_alt_chan == 0) {
+		ic->ic_ieee_alt_chan = ieee_alt_chan;
+		return 0;
+	}
+
+	if (ic->ic_curchan->ic_ieee == ieee_alt_chan) {
+		return EINVAL;
+	}
+
+	chan = findchannel(ic, ieee_alt_chan, ic->ic_des_mode);
+
+	if (chan == NULL) {
+		return EINVAL;
+	}
+
+	if ((ic->ic_flags_ext & IEEE80211_FEXT_DFS_FAST_SWITCH)
+		&& !(ieee80211_is_chan_available(chan))) {
+		return EINVAL;
+	}
+
+	ic->ic_ieee_alt_chan = ieee_alt_chan;
+
+	return 0;
+}
+
+
+static int
+apply_tx_power(struct ieee80211vap *vap, int enc_val, int flag)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *c;
+	int8_t *bw_powers;
+	uint8_t start_chan = (enc_val & 0xFF000000) >> 24;
+	uint8_t stop_chan = (enc_val & 0x00FF0000) >> 16;
+	uint8_t max_power = (enc_val & 0x0000FF00) >> 8;
+	uint8_t min_power = (enc_val & 0x000000FF);
+	int iter;
+	int cur_bw;
+	int idx_bf;
+	int idx_ss;
+	int idx_bw;
+
+	if (start_chan > stop_chan)
+		return -EINVAL;
+
+	for (iter = 0; iter < ic->ic_nchans; iter++) {
+		c = ic->ic_channels + iter;
+		if (start_chan <= c->ic_ieee && c->ic_ieee <= stop_chan) {
+			switch (flag) {
+			case IEEE80211_BKUP_TXPOWER_NORMAL:
+				c->ic_maxpower = max_power;
+				c->ic_minpower = min_power;
+				c->ic_maxpower_normal = max_power;
+				c->ic_minpower_normal = min_power;
+
+				cur_bw = ieee80211_get_bw(ic);
+				switch (cur_bw) {
+				case BW_HT20:
+					idx_bw = PWR_IDX_20M;
+					break;
+				case BW_HT40:
+					idx_bw = PWR_IDX_40M;
+					break;
+				case BW_HT80:
+					idx_bw = PWR_IDX_80M;
+					break;
+				default:
+					idx_bw = PWR_IDX_BW_MAX; /* Invalid case */
+					break;
+				}
+				if (idx_bw < PWR_IDX_BW_MAX) {
+					c->ic_maxpower_table[PWR_IDX_BF_OFF][PWR_IDX_1SS][idx_bw] = max_power;
+					if (ic->ic_power_table_update) {
+						ic->ic_power_table_update(vap, c);
+					}
+				}
+				break;
+			case IEEE80211_APPLY_LOWGAIN_TXPOWER:
+				if (c->ic_maxpower_normal) {
+					c->ic_maxpower = IEEE80211_LOWGAIN_TXPOW_MAX;
+					c->ic_minpower = IEEE80211_LOWGAIN_TXPOW_MIN;
+				}
+				break;
+			case IEEE80211_APPLY_TXPOWER_NORMAL:
+				if (c->ic_maxpower_normal) {
+					c->ic_maxpower = c->ic_maxpower_normal;
+					c->ic_minpower = c->ic_minpower_normal;
+				}
+				break;
+			case IEEE80211_INIT_TXPOWER_TABLE:
+				c->ic_maxpower = max_power;
+				c->ic_minpower = min_power;
+				c->ic_maxpower_normal = max_power;
+				c->ic_minpower_normal = min_power;
+				for (idx_bf = PWR_IDX_BF_OFF; idx_bf < PWR_IDX_BF_MAX; idx_bf++) {
+					for (idx_ss = PWR_IDX_1SS; idx_ss < PWR_IDX_SS_MAX; idx_ss++) {
+						bw_powers = c->ic_maxpower_table[idx_bf][idx_ss];
+						bw_powers[PWR_IDX_20M] = max_power;
+						bw_powers[PWR_IDX_40M] = max_power;
+						bw_powers[PWR_IDX_80M] = max_power;
+					}
+				}
+				if (ic->ic_power_table_update) {
+					ic->ic_power_table_update(vap, c);
+				}
+				break;
+			default:
+				printk("%s: Invalid flag", __func__);
+			}
+		}
+	}
+	return 0;
+}
+
+static int ieee80211_set_bw_txpower(struct ieee80211vap *vap, unsigned int enc_val)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	uint8_t channel = (enc_val >> 24) & 0xFF;
+	uint8_t bf_on = (enc_val >> 20) & 0xF;
+	uint8_t num_ss = (enc_val >> 16) & 0xF;
+	uint8_t bandwidth = (enc_val >> 8) & 0xF;
+	uint8_t power = enc_val & 0xFF;
+	uint8_t idx_bf = PWR_IDX_BF_OFF + bf_on;
+	uint8_t idx_ss = PWR_IDX_1SS + num_ss - 1;
+	uint8_t idx_bw = PWR_IDX_20M + bandwidth - QTN_BW_20M;
+	int iter;
+	int retval = -EINVAL;
+
+	if (idx_bf >= PWR_IDX_BF_MAX ||
+			idx_ss >= PWR_IDX_SS_MAX ||
+			idx_bw >= PWR_IDX_BW_MAX) {
+		return retval;
+	}
+
+	for (iter = 0; iter < ic->ic_nchans; iter++) {
+		if (ic->ic_channels[iter].ic_ieee == channel) {
+			ic->ic_channels[iter].ic_maxpower_table[idx_bf][idx_ss][idx_bw] = power;
+			/*
+			 * Update the maxpower of current bandwidth if it is for bfoff and 1ss
+			 */
+			if (idx_bf == PWR_IDX_BF_OFF &&	idx_ss == PWR_IDX_1SS) {
+				int cur_bw = ieee80211_get_bw(ic);
+
+				if ((cur_bw == BW_HT20 && bandwidth == QTN_BW_20M) ||
+						(cur_bw == BW_HT40 && bandwidth == QTN_BW_40M) ||
+						(cur_bw == BW_HT80 && bandwidth == QTN_BW_80M)) {
+					ic->ic_channels[iter].ic_maxpower = power;
+					ic->ic_channels[iter].ic_maxpower_normal = power;
+				}
+			}
+			retval = 0;
+
+			break;
+		}
+	}
+
+	return retval;
+}
+
+static int ieee80211_dump_tx_power(struct ieee80211com *ic)
+{
+	struct ieee80211_channel *chan;
+	int iter;
+	int idx_bf;
+	int idx_ss;
+
+	printk("channel   max_pwr   min_pwr   pwr_80M   pwr_40M   pwr_20M\n");
+	for (iter = 0; iter < ic->ic_nchans; iter++) {
+		chan = &ic->ic_channels[iter];
+		if (!isset(ic->ic_chan_active, chan->ic_ieee)) {
+			continue;
+		}
+		for (idx_bf = PWR_IDX_BF_OFF; idx_bf < PWR_IDX_BF_MAX; idx_bf++) {
+			for (idx_ss = PWR_IDX_1SS; idx_ss < PWR_IDX_SS_MAX; idx_ss++) {
+				printk("%7d   %7d   %7d   %7d   %7d   %7d\n",
+					chan->ic_ieee,
+					chan->ic_maxpower,
+					chan->ic_minpower,
+					chan->ic_maxpower_table[idx_bf][idx_ss][PWR_IDX_80M],
+					chan->ic_maxpower_table[idx_bf][idx_ss][PWR_IDX_40M],
+					chan->ic_maxpower_table[idx_bf][idx_ss][PWR_IDX_20M]);
+			}
+		}
+	}
+	return 0;
+}
+
+static int set_regulatory_tx_power(struct ieee80211com *ic, int enc_val)
+{
+	u_int8_t start_chan = (enc_val & 0x00FF0000) >> 16;
+	u_int8_t stop_chan = (enc_val & 0x0000FF00) >> 8;
+	u_int8_t reg_power = (enc_val & 0x000000FF);
+	int iter;
+
+	if (start_chan > stop_chan)
+		return -EINVAL;
+
+	for (iter = 0; iter < ic->ic_nchans; iter++) {
+		if (start_chan <= ic->ic_channels[iter].ic_ieee &&
+		    ic->ic_channels[iter].ic_ieee <= stop_chan) {
+			ic->ic_channels[iter].ic_maxregpower = reg_power;
+		}
+	}
+
+	return 0;
+}
+
+void ieee80211_doth_meas_callback_success(void *ctx)
+{
+	struct ieee80211_node *ni = (struct ieee80211_node *)ctx;
+
+	if (ni->ni_meas_info.pending) {
+		ni->ni_meas_info.pending = 0;
+		ni->ni_meas_info.reason = 0;
+		wake_up_interruptible(&ni->ni_meas_info.meas_waitq);
+	}
+}
+
+void ieee80211_doth_meas_callback_fail(void *ctx, int32_t reason)
+{
+	struct ieee80211_node *ni = (struct ieee80211_node *)ctx;
+
+	if (ni->ni_meas_info.pending) {
+		ni->ni_meas_info.pending = 0;
+		ni->ni_meas_info.reason = reason;
+		wake_up_interruptible(&ni->ni_meas_info.meas_waitq);
+	}
+}
+
+void ioctl_tpc_report_callback_success(void *ctx)
+{
+	struct ieee80211_node *ni = (struct ieee80211_node *)ctx;
+
+	if (ni->ni_tpc_info.tpc_wait_info.tpc_pending) {
+		ni->ni_tpc_info.tpc_wait_info.tpc_pending = 0;
+		ni->ni_tpc_info.tpc_wait_info.reason = 0;
+		wake_up_interruptible(&ni->ni_tpc_info.tpc_wait_info.tpc_waitq);
+	}
+}
+
+void ioctl_tpc_report_callback_fail(void *ctx, int32_t reason)
+{
+	struct ieee80211_node *ni = (struct ieee80211_node *)ctx;
+
+	if (ni->ni_tpc_info.tpc_wait_info.tpc_pending) {
+		ni->ni_tpc_info.tpc_wait_info.tpc_pending = 0;
+		ni->ni_tpc_info.tpc_wait_info.reason = reason;
+		wake_up_interruptible(&ni->ni_tpc_info.tpc_wait_info.tpc_waitq);
+	}
+}
+
+static int
+ieee80211_subioctl_get_doth_dotk_report(struct net_device *dev, char __user *user_pointer)
+{
+	int ret = 0;
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211req_node_info	req_info;
+	union ieee80211rep_node_info	rep_info;
+	union ieee80211rep_node_info	*p_rep_info;
+
+	if (copy_from_user(&req_info, user_pointer, sizeof(struct ieee80211req_node_info)))
+		return -EFAULT;
+
+	p_rep_info = &rep_info;
+	switch (req_info.req_type) {
+	case IOCTL_REQ_MEASUREMENT:
+	{
+		struct ieee80211_node *ni;
+
+		ni = ieee80211_find_node(&ic->ic_sta, req_info.u_req_info.req_node_meas.mac_addr);
+		if (NULL == ni)
+			return -EINVAL;
+
+		switch (req_info.u_req_info.req_node_meas.type) {
+		case IOCTL_MEAS_TYPE_BASIC:
+			ieee80211_send_meas_request_basic(ni,
+					req_info.u_req_info.req_node_meas.ioctl_basic.channel,
+					req_info.u_req_info.req_node_meas.ioctl_basic.start_offset_ms,
+					req_info.u_req_info.req_node_meas.ioctl_basic.duration_ms,
+					IEEE80211_MEASUREMENT_REQ_TIMEOUT(req_info.u_req_info.req_node_meas.ioctl_basic.start_offset_ms,
+						req_info.u_req_info.req_node_meas.ioctl_basic.duration_ms),
+					(void *)ieee80211_doth_meas_callback_success,
+					(void *)ieee80211_doth_meas_callback_fail);
+			break;
+		case IOCTL_MEAS_TYPE_CCA:
+			ieee80211_send_meas_request_cca(ni,
+					req_info.u_req_info.req_node_meas.ioctl_cca.channel,
+					req_info.u_req_info.req_node_meas.ioctl_cca.start_offset_ms,
+					req_info.u_req_info.req_node_meas.ioctl_cca.duration_ms,
+					IEEE80211_MEASUREMENT_REQ_TIMEOUT(req_info.u_req_info.req_node_meas.ioctl_cca.start_offset_ms,
+						req_info.u_req_info.req_node_meas.ioctl_cca.duration_ms),
+					(void *)ieee80211_doth_meas_callback_success,
+					(void *)ieee80211_doth_meas_callback_fail);
+			break;
+		case IOCTL_MEAS_TYPE_RPI:
+			ieee80211_send_meas_request_rpi(ni,
+					req_info.u_req_info.req_node_meas.ioctl_rpi.channel,
+					req_info.u_req_info.req_node_meas.ioctl_rpi.start_offset_ms,
+					req_info.u_req_info.req_node_meas.ioctl_rpi.duration_ms,
+					IEEE80211_MEASUREMENT_REQ_TIMEOUT(req_info.u_req_info.req_node_meas.ioctl_rpi.start_offset_ms,
+						req_info.u_req_info.req_node_meas.ioctl_rpi.duration_ms),
+					(void *)ieee80211_doth_meas_callback_success,
+					(void *)ieee80211_doth_meas_callback_fail);
+			break;
+		case IOCTL_MEAS_TYPE_CHAN_LOAD:
+			ieee80211_send_rm_req_chan_load(ni,
+					req_info.u_req_info.req_node_meas.ioctl_chan_load.channel,
+					req_info.u_req_info.req_node_meas.ioctl_chan_load.duration_ms,
+					IEEE80211_MEASUREMENT_REQ_TIMEOUT(0,
+						req_info.u_req_info.req_node_meas.ioctl_chan_load.duration_ms),
+					(void *)ieee80211_doth_meas_callback_success,
+					(void *)ieee80211_doth_meas_callback_fail);
+			break;
+		case IOCTL_MEAS_TYPE_NOISE_HIS:
+			ieee80211_send_rm_req_noise_his(ni,
+					req_info.u_req_info.req_node_meas.ioctl_noise_his.channel,
+					req_info.u_req_info.req_node_meas.ioctl_noise_his.duration_ms,
+					IEEE80211_MEASUREMENT_REQ_TIMEOUT(0,
+						req_info.u_req_info.req_node_meas.ioctl_noise_his.duration_ms),
+					(void *)ieee80211_doth_meas_callback_success,
+					(void *)ieee80211_doth_meas_callback_fail);
+			break;
+		case IOCTL_MEAS_TYPE_BEACON:
+			ieee80211_send_rm_req_beacon(ni,
+					req_info.u_req_info.req_node_meas.ioctl_beacon.op_class,
+					req_info.u_req_info.req_node_meas.ioctl_beacon.channel,
+					req_info.u_req_info.req_node_meas.ioctl_beacon.duration_ms,
+					req_info.u_req_info.req_node_meas.ioctl_beacon.mode,
+					req_info.u_req_info.req_node_meas.ioctl_beacon.bssid,
+					NULL,
+					0,
+					IEEE80211_MEASUREMENT_REQ_TIMEOUT(0,
+						req_info.u_req_info.req_node_meas.ioctl_beacon.duration_ms),
+					(void *)ieee80211_doth_meas_callback_success,
+					(void *)ieee80211_doth_meas_callback_fail);
+			break;
+		case IOCTL_MEAS_TYPE_FRAME:
+			ieee80211_send_rm_req_frame(ni,
+					req_info.u_req_info.req_node_meas.ioctl_frame.op_class,
+					req_info.u_req_info.req_node_meas.ioctl_frame.channel,
+					req_info.u_req_info.req_node_meas.ioctl_frame.duration_ms,
+					req_info.u_req_info.req_node_meas.ioctl_frame.type,
+					req_info.u_req_info.req_node_meas.ioctl_frame.mac_address,
+					IEEE80211_MEASUREMENT_REQ_TIMEOUT(0,
+						req_info.u_req_info.req_node_meas.ioctl_frame.duration_ms),
+					(void *)ieee80211_doth_meas_callback_success,
+					(void *)ieee80211_doth_meas_callback_fail);
+			break;
+		case  IOCTL_MEAS_TYPE_CAT:
+			ieee80211_send_rm_req_tran_stream_cat(ni,
+					req_info.u_req_info.req_node_meas.ioctl_tran_stream_cat.duration_ms,
+					req_info.u_req_info.req_node_meas.ioctl_tran_stream_cat.peer_sta,
+					req_info.u_req_info.req_node_meas.ioctl_tran_stream_cat.tid,
+					req_info.u_req_info.req_node_meas.ioctl_tran_stream_cat.bin0,
+					IEEE80211_MEASUREMENT_REQ_TIMEOUT(0,
+						req_info.u_req_info.req_node_meas.ioctl_tran_stream_cat.duration_ms),
+					(void *)ieee80211_doth_meas_callback_success,
+					(void *)ieee80211_doth_meas_callback_fail);
+			break;
+		case IOCTL_MEAS_TYPE_MUL_DIAG:
+			ieee80211_send_rm_req_multicast_diag(ni,
+					req_info.u_req_info.req_node_meas.ioctl_multicast_diag.duration_ms,
+					req_info.u_req_info.req_node_meas.ioctl_multicast_diag.group_mac,
+					IEEE80211_MEASUREMENT_REQ_TIMEOUT(0,
+						req_info.u_req_info.req_node_meas.ioctl_multicast_diag.duration_ms),
+					(void *)ieee80211_doth_meas_callback_success,
+					(void *)ieee80211_doth_meas_callback_fail);
+			break;
+		case IOCTL_MEAS_TYPE_LINK:
+			ieee80211_send_link_measure_request(ni,
+					HZ / 10,
+					(void *)ieee80211_doth_meas_callback_success,
+					(void *)ieee80211_doth_meas_callback_fail);
+			break;
+		case IOCTL_MEAS_TYPE_NEIGHBOR:
+			ieee80211_send_neighbor_report_request(ni,
+					HZ / 10,
+					(void *)ieee80211_doth_meas_callback_success,
+					(void *)ieee80211_doth_meas_callback_fail);
+			break;
+		default:
+			ieee80211_free_node(ni);
+			return -EOPNOTSUPP;
+		}
+
+		ni->ni_meas_info.pending = 1;
+		ret = wait_event_interruptible(ni->ni_meas_info.meas_waitq,
+				ni->ni_meas_info.pending == 0);
+
+		if (ret ==  0) {
+			if (ni->ni_meas_info.reason != 0) {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH | IEEE80211_MSG_DEBUG,
+					"[%s]Measurement Request Fail:timeout waiting for response\n",
+					__func__);
+				switch (ni->ni_meas_info.reason) {
+				case PPQ_FAIL_TIMEOUT:
+					p_rep_info->meas_result.status = IOCTL_MEAS_STATUS_TIMEOUT;
+					break;
+				case PPQ_FAIL_NODELEAVE:
+					p_rep_info->meas_result.status = IOCTL_MEAS_STATUS_NODELEAVE;
+					break;
+				case PPQ_FAIL_STOP:
+					p_rep_info->meas_result.status = IOCTL_MEAS_STATUS_STOP;
+				default:
+					break;
+				}
+			} else {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH | IEEE80211_MSG_DEBUG,
+					"[%s]Measurement Request SUCC ret = %d\n",
+					__func__, ret);
+
+				if (req_info.u_req_info.req_node_meas.type == IOCTL_MEAS_TYPE_LINK) {
+					p_rep_info->meas_result.status = IOCTL_MEAS_STATUS_SUCC;
+					p_rep_info->meas_result.report_mode = 0;
+					p_rep_info->meas_result.u_data.link_measure.tpc_report.link_margin = ni->ni_lm.tpc_report.link_margin;
+					p_rep_info->meas_result.u_data.link_measure.tpc_report.tx_power = ni->ni_lm.tpc_report.tx_power;
+					p_rep_info->meas_result.u_data.link_measure.recv_antenna_id = ni->ni_lm.recv_antenna_id;
+					p_rep_info->meas_result.u_data.link_measure.tran_antenna_id = ni->ni_lm.tran_antenna_id;
+					p_rep_info->meas_result.u_data.link_measure.rcpi = ni->ni_lm.rcpi;
+					p_rep_info->meas_result.u_data.link_measure.rsni = ni->ni_lm.rsni;
+
+					break;
+				}
+
+				if (req_info.u_req_info.req_node_meas.type == IOCTL_MEAS_TYPE_NEIGHBOR) {
+					uint8_t i;
+
+					p_rep_info->meas_result.status = IOCTL_MEAS_STATUS_SUCC;
+					p_rep_info->meas_result.report_mode = 0;
+					p_rep_info->meas_result.u_data.neighbor_report.item_num = 0;
+					for (i = 0; i < ni->ni_neighbor.report_count && i < IEEE80211_MAX_NEIGHBOR_REPORT_ITEM; i++) {
+						memcpy(&p_rep_info->meas_result.u_data.neighbor_report.item[i],
+								ni->ni_neighbor.item_table[i],
+								sizeof(p_rep_info->meas_result.u_data.neighbor_report.item[i]));
+						kfree(ni->ni_neighbor.item_table[i]);
+						ni->ni_neighbor.item_table[i] = NULL;
+					}
+					p_rep_info->meas_result.u_data.neighbor_report.item_num = i;
+					ni->ni_neighbor.report_count = 0;
+					break;
+				}
+
+				p_rep_info->meas_result.status = IOCTL_MEAS_STATUS_SUCC;
+				p_rep_info->meas_result.report_mode = ni->ni_meas_info.ni_meas_rep_mode;
+				if (ni->ni_meas_info.ni_meas_rep_mode == 0) {
+					switch (req_info.u_req_info.req_node_meas.type) {
+					case IOCTL_MEAS_TYPE_BASIC:
+						p_rep_info->meas_result.u_data.basic = ni->ni_meas_info.rep.basic;
+						break;
+					case IOCTL_MEAS_TYPE_CCA:
+						p_rep_info->meas_result.u_data.cca = ni->ni_meas_info.rep.cca;
+						break;
+					case IOCTL_MEAS_TYPE_RPI:
+						memcpy(p_rep_info->meas_result.u_data.rpi,
+								ni->ni_meas_info.rep.rpi,
+								sizeof(p_rep_info->meas_result.u_data.rpi));
+						break;
+					case IOCTL_MEAS_TYPE_CHAN_LOAD:
+						p_rep_info->meas_result.u_data.chan_load = ni->ni_meas_info.rep.chan_load;
+						break;
+					case IOCTL_MEAS_TYPE_NOISE_HIS:
+						p_rep_info->meas_result.u_data.noise_his.antenna_id = ni->ni_meas_info.rep.noise_his.antenna_id;
+						p_rep_info->meas_result.u_data.noise_his.anpi = ni->ni_meas_info.rep.noise_his.anpi;
+						memcpy(p_rep_info->meas_result.u_data.noise_his.ipi,
+								ni->ni_meas_info.rep.noise_his.ipi,
+								sizeof(p_rep_info->meas_result.u_data.noise_his.ipi));
+						break;
+					case IOCTL_MEAS_TYPE_BEACON:
+						p_rep_info->meas_result.u_data.beacon.reported_frame_info = ni->ni_meas_info.rep.beacon.reported_frame_info;
+						p_rep_info->meas_result.u_data.beacon.rcpi = ni->ni_meas_info.rep.beacon.rcpi;
+						p_rep_info->meas_result.u_data.beacon.rsni = ni->ni_meas_info.rep.beacon.rsni;
+						memcpy(p_rep_info->meas_result.u_data.beacon.bssid,
+								ni->ni_meas_info.rep.beacon.bssid,
+								sizeof(p_rep_info->meas_result.u_data.beacon.bssid));
+						p_rep_info->meas_result.u_data.beacon.antenna_id = ni->ni_meas_info.rep.beacon.antenna_id;
+						p_rep_info->meas_result.u_data.beacon.parent_tsf = ni->ni_meas_info.rep.beacon.parent_tsf;
+						break;
+					case IOCTL_MEAS_TYPE_FRAME:
+						p_rep_info->meas_result.u_data.frame.sub_ele_report = ni->ni_meas_info.rep.frame_count.sub_ele_flag;
+						memcpy(p_rep_info->meas_result.u_data.frame.ta, ni->ni_meas_info.rep.frame_count.ta, IEEE80211_ADDR_LEN);
+						memcpy(p_rep_info->meas_result.u_data.frame.bssid, ni->ni_meas_info.rep.frame_count.bssid, IEEE80211_ADDR_LEN);
+						p_rep_info->meas_result.u_data.frame.phy_type = ni->ni_meas_info.rep.frame_count.phy_type;
+						p_rep_info->meas_result.u_data.frame.avg_rcpi = ni->ni_meas_info.rep.frame_count.avg_rcpi;
+						p_rep_info->meas_result.u_data.frame.last_rsni = ni->ni_meas_info.rep.frame_count.last_rsni;
+						p_rep_info->meas_result.u_data.frame.last_rcpi = ni->ni_meas_info.rep.frame_count.last_rcpi;
+						p_rep_info->meas_result.u_data.frame.antenna_id = ni->ni_meas_info.rep.frame_count.antenna_id;
+						p_rep_info->meas_result.u_data.frame.frame_count = ni->ni_meas_info.rep.frame_count.frame_count;
+						break;
+					case IOCTL_MEAS_TYPE_CAT:
+						memcpy(&p_rep_info->meas_result.u_data.tran_stream_cat,
+								&ni->ni_meas_info.rep.tran_stream_cat,
+								sizeof(p_rep_info->meas_result.u_data.tran_stream_cat));
+						break;
+					case IOCTL_MEAS_TYPE_MUL_DIAG:
+						p_rep_info->meas_result.u_data.multicast_diag.reason = ni->ni_meas_info.rep.multicast_diag.reason;
+						p_rep_info->meas_result.u_data.multicast_diag.mul_rec_msdu_cnt = ni->ni_meas_info.rep.multicast_diag.mul_rec_msdu_cnt;
+						p_rep_info->meas_result.u_data.multicast_diag.first_seq_num = ni->ni_meas_info.rep.multicast_diag.first_seq_num;
+						p_rep_info->meas_result.u_data.multicast_diag.last_seq_num = ni->ni_meas_info.rep.multicast_diag.last_seq_num;
+						p_rep_info->meas_result.u_data.multicast_diag.mul_rate = ni->ni_meas_info.rep.multicast_diag.mul_rate;
+
+						break;
+					default:
+						break;
+					}
+				}
+			}
+		} else {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH | IEEE80211_MSG_DEBUG,
+				"[%s]Measurement Request Fail:waiting for response cancelled\n",
+				__func__);
+			ni->ni_meas_info.pending = 0;
+			ret = -ECANCELED;
+		}
+		ieee80211_free_node(ni);
+		break;
+	}
+	case IOCTL_REQ_TPC:
+	{
+		struct ieee80211_action_tpc_request request;
+		struct ieee80211_action_data	action_data;
+		struct ieee80211_node		*ni;
+
+		if (((ic->ic_flags & IEEE80211_F_DOTH) == 0) ||
+				((ic->ic_flags_ext & IEEE80211_FEXT_TPC) == 0)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH | IEEE80211_MSG_DEBUG,
+				"[%s]TPC Request fail:802.11 disabled\n",
+				__func__);
+			return -EOPNOTSUPP;
+		}
+
+		ni = ieee80211_find_node(&ic->ic_sta, req_info.u_req_info.req_node_tpc.mac_addr);
+		if (NULL == ni) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH | IEEE80211_MSG_DEBUG,
+				"[%s]TPC Request Fail:no such node %s\n",
+				__func__,
+				ether_sprintf(req_info.u_req_info.req_node_tpc.mac_addr));
+			return -EINVAL;
+		}
+		if (((IEEE80211_CAPINFO_SPECTRUM_MGMT & ni->ni_capinfo) == 0) ||
+				((ni->ni_flags & IEEE80211_NODE_TPC) == 0)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH | IEEE80211_MSG_DEBUG,
+				"[%s]TPC Request Fail:node don't support 802.11h\n",
+				__func__);
+			ieee80211_free_node(ni);
+			return -EOPNOTSUPP;
+		}
+
+		request.expire = HZ / 10;
+		request.fn_success = ioctl_tpc_report_callback_success;
+		request.fn_fail = ioctl_tpc_report_callback_fail;
+		action_data.cat = IEEE80211_ACTION_CAT_SPEC_MGMT;
+		action_data.action = IEEE80211_ACTION_S_TPC_REQUEST;
+		action_data.params = &request;
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+
+		ni->ni_tpc_info.tpc_wait_info.tpc_pending = 1;
+		ni->ni_tpc_info.tpc_wait_info.reason = 0;
+		ret = wait_event_interruptible(ni->ni_tpc_info.tpc_wait_info.tpc_waitq,
+				ni->ni_tpc_info.tpc_wait_info.tpc_pending == 0);
+
+		if (ret == 0) {
+			if (ni->ni_tpc_info.tpc_wait_info.reason != 0) {
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH | IEEE80211_MSG_DEBUG,
+					"[%s]TPC Request Fail:timeout waiting for response\n",
+					__func__);
+				p_rep_info->tpc_result.status = 1;
+			} else {
+				p_rep_info->tpc_result.status = 0;
+				p_rep_info->tpc_result.link_margin = ni->ni_tpc_info.tpc_report.node_link_margin;
+				p_rep_info->tpc_result.tx_power	= ni->ni_tpc_info.tpc_report.node_txpow;
+			}
+		} else {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH | IEEE80211_MSG_DEBUG,
+				"[%s]TPC Request Fail:waiting for response cancelled\n",
+				__func__);
+			ni->ni_tpc_info.tpc_wait_info.tpc_pending = 0;
+			ret = -ECANCELED;
+		}
+		ieee80211_free_node(ni);
+		break;
+	}
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+	if (ret == 0)
+		ret = copy_to_user(user_pointer, p_rep_info, sizeof(union ieee80211rep_node_info));
+
+	return ret;
+}
+
+void ieee80211_beacon_interval_set(struct ieee80211com *ic, int value)
+{
+	struct ieee80211vap *vap_each;
+
+	ic->ic_lintval = value;
+
+	TAILQ_FOREACH(vap_each, &ic->ic_vaps, iv_next) {
+		if ((vap_each->iv_opmode == IEEE80211_M_HOSTAP) ||
+				(vap_each->iv_opmode == IEEE80211_M_IBSS) ){
+			vap_each->iv_bss->ni_intval = value;
+			ic->ic_beacon_update(vap_each);
+		}
+	}
+}
+EXPORT_SYMBOL(ieee80211_beacon_interval_set);
+
+static int ieee80211_11ac_mcs_format(int mcs, int bw)
+{
+	int retval = IEEE80211_11AC_MCS_VAL_ERR;
+	int mcs_val, mcs_nss;
+
+	/* Check for unequal MCS */
+	if (mcs & 0x100) {
+		mcs &=0xFF;
+		if ((mcs >= IEEE80211_UNEQUAL_MCS_START) && (mcs <= IEEE80211_UNEQUAL_MCS_MAX)) {
+			if (ieee80211_swfeat_is_supported(SWFEAT_ID_2X2, 0) ||
+					ieee80211_swfeat_is_supported(SWFEAT_ID_2X4, 0)) {
+				if (mcs > IEEE80211_HT_UNEQUAL_MCS_2SS_MAX) {
+					return retval;
+				}
+			} else if (ieee80211_swfeat_is_supported(SWFEAT_ID_3X3, 0)) {
+				if (mcs > IEEE80211_HT_UNEQUAL_MCS_3SS_MAX) {
+					return retval;
+				}
+			}
+			retval = (mcs - IEEE80211_UNEQUAL_MCS_START) | IEEE80211_UNEQUAL_MCS_BIT;
+		}
+	} else {
+		mcs_val = mcs & IEEE80211_AC_MCS_VAL_MASK;
+		mcs_nss = (mcs & IEEE80211_AC_MCS_NSS_MASK) >> IEEE80211_11AC_MCS_NSS_SHIFT;
+
+		if (!ieee80211_vht_tx_mcs_is_valid(mcs_val, mcs_nss)) {
+			return retval;
+		}
+
+		retval = (bw == 20) ? wlan_11ac_20M_mcs_nss_tbl[(mcs_nss * IEEE80211_AC_MCS_MAX) + mcs_val] : mcs;
+	}
+	return retval;
+}
+
+static int
+ieee80211_ioctl_setchan_inactive_pri(struct ieee80211com *ic, struct ieee80211vap *vap, uint32_t value)
+{
+	uint16_t chan = value & 0xFFFF;
+	uint8_t active = (value >> 16) & 0xFF;
+	uint8_t flags = (value >> 24) & 0xFF;
+
+	int reselect = 0;
+	struct ieee80211_channel *c;
+	int cur_bw;
+	int set_inactive = 0;
+
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP) {
+		return 0;
+	}
+
+	c = findchannel_any(ic, chan, ic->ic_des_mode);
+	if (!is_ieee80211_chan_valid(c)) {
+		return -EINVAL;
+	}
+
+	if (active) {
+		if (isset(ic->ic_chan_pri_inactive, chan)) {
+			/*
+			 * clrbit if flag indicates user configurtion.
+			 * And save the bit to is_inactive_usercfg to honor user configurtion
+			 * so as to override regulatory db.
+			 *
+			 * Otherwise clrbit only if user has not cfg-ed.
+			 */
+			if (flags & CHAN_PRI_INACTIVE_CFG_USER_OVERRIDE) {
+				clrbit(ic->ic_chan_pri_inactive, chan);
+				printk("channel %u is removed from non-primary channel list\n", chan);
+				setbit(ic->ic_is_inactive_usercfg, chan);
+			} else if (!isset(ic->ic_is_inactive_usercfg, chan)) {
+				clrbit(ic->ic_chan_pri_inactive, chan);
+				printk("channel %u is removed from non-primary channel list\n", chan);
+			}
+		}
+		return 0;
+	} else {
+		if (isclr(ic->ic_chan_pri_inactive, chan) ||
+				!!(flags & CHAN_PRI_INACTIVE_CFG_AUTOCHAN_ONLY) !=
+						!!isset(ic->ic_is_inactive_autochan_only, chan)) {
+			/*
+			 * setbit if flag indicates user configurtion.
+			 * And save the bit to is_inactive_usercfg to honor user configurtion
+			 * so as to override regulatory db.
+			 *
+			 * Otherwise setbit only if user has not cfg-ed.
+			 */
+			if (flags & CHAN_PRI_INACTIVE_CFG_USER_OVERRIDE) {
+				setbit(ic->ic_chan_pri_inactive, chan);
+				printk("channel %u is added into non-primary channel list\n", chan);
+				setbit(ic->ic_is_inactive_usercfg, chan);
+				set_inactive = 1;
+			} else if (!isset(ic->ic_is_inactive_usercfg, chan)) {
+				setbit(ic->ic_chan_pri_inactive, chan);
+				printk("channel %u is added into non-primary channel list\n", chan);
+				set_inactive = 1;
+			}
+			if (set_inactive) {
+				if (flags & CHAN_PRI_INACTIVE_CFG_AUTOCHAN_ONLY) {
+					setbit(ic->ic_is_inactive_autochan_only, chan);
+					printk("channel %u is non-primary for auto channel selection only\n", chan);
+				} else {
+					clrbit(ic->ic_is_inactive_autochan_only, chan);
+				}
+			}
+		} else {
+			return 0;
+		}
+	}
+
+	cur_bw = ieee80211_get_bw(ic);
+	if (cur_bw >= BW_HT40) {
+		int no_pri_chan = 1;
+		int c_ieee = ieee80211_find_sec_chan(c);
+
+		if (c_ieee && isclr(ic->ic_chan_pri_inactive, c_ieee)) {
+			no_pri_chan = 0;
+		}
+		if (cur_bw >= BW_HT80) {
+			c_ieee = ieee80211_find_sec40u_chan(c);
+			if (c_ieee && isclr(ic->ic_chan_pri_inactive, c_ieee)) {
+				no_pri_chan = 0;
+			}
+			c_ieee = ieee80211_find_sec40l_chan(c);
+			if (c_ieee && isclr(ic->ic_chan_pri_inactive, c_ieee)) {
+				no_pri_chan = 0;
+			}
+		}
+		if (no_pri_chan) {
+			printk("Warning: all the sub channels are not in primary channel list!\n");
+		}
+	}
+
+	if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+			isset(ic->ic_chan_pri_inactive, ic->ic_bsschan->ic_ieee)) {
+		printk("current channel is %d, cannot be used as primary channel\n", ic->ic_bsschan->ic_ieee);
+		reselect = 1;
+	}
+	if(ic->ic_des_chan != IEEE80211_CHAN_ANYC &&
+			isset(ic->ic_chan_pri_inactive, ic->ic_des_chan->ic_ieee)) {
+		printk("Channel %d cannot be used as desired channel\n", ic->ic_des_chan->ic_ieee);
+		ic->ic_des_chan = IEEE80211_CHAN_ANYC;
+		reselect = 1;
+	}
+
+	if (reselect && IS_UP_AUTO(vap)) {
+		ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+	}
+
+	return 0;
+}
+
+static int
+local_get_inactive_primary_chan_num(struct ieee80211com *ic, struct ieee80211_inactive_chanlist *chanlist)
+{
+	int i;
+	int num = 0;
+
+	if (chanlist) {
+		memset(chanlist, 0, sizeof(struct ieee80211_inactive_chanlist));
+	}
+
+	for (i = 1; i < IEEE80211_CHAN_MAX; i++) {
+		if (isset(ic->ic_chan_pri_inactive, i)) {
+			if (chanlist)
+				chanlist->channels[i] = CHAN_PRI_INACTIVE_CFG_USER_OVERRIDE;
+			if (chanlist && isset(ic->ic_is_inactive_autochan_only, i))
+				chanlist->channels[i] |= CHAN_PRI_INACTIVE_CFG_AUTOCHAN_ONLY;
+			num++;
+		}
+	}
+
+	return num;
+}
+
+static int
+ieee80211_get_inactive_primary_chan_num(struct ieee80211com *ic)
+{
+	return local_get_inactive_primary_chan_num(ic, NULL);
+}
+
+static void
+ieee80211_training_restart_by_node_idx(struct ieee80211vap *vap, uint16_t node_idx)
+{
+	struct ieee80211_node *ni;
+
+	ni = ieee80211_find_node_by_node_idx(vap, node_idx);
+	if (ni) {
+		ieee80211_node_training_start(ni, 1);
+		ieee80211_free_node(ni);
+	}
+}
+
+static int
+ieee80211_subioctl_set_chan_dfs_required(struct net_device *dev, void __user *pointer, int cnt)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	int channels[IEEE80211_CHAN_MAX];
+	int i;
+
+	if (cnt > ARRAY_SIZE(channels)) {
+		printk("%s: max number of supported channels is %d\n", __func__,
+			ARRAY_SIZE(channels));
+		cnt = ARRAY_SIZE(channels);
+	}
+
+	if (copy_from_user(channels, pointer, cnt*sizeof(int)))
+		return -EFAULT;
+
+	for (i = 0; i < cnt; i++) {
+		if (channels[i] < IEEE80211_CHAN_MAX && channels[i] > 0) {
+			setbit(ic->ic_chan_dfs_required, channels[i]);
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+				"Mark DFS channel %d\n", channels[i]);
+		}
+	}
+
+	ic->ic_mark_dfs_channels(ic, ic->ic_nchans, ic->ic_channels);
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_set_chan_weather_radar(struct net_device *dev, void __user *pointer, int cnt)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	int channels[IEEE80211_CHAN_MAX];
+	int i;
+
+	if (cnt > ARRAY_SIZE(channels)) {
+		printk("%s: max number of supported channels is %d\n", __func__,
+			ARRAY_SIZE(channels));
+		cnt = ARRAY_SIZE(channels);
+	}
+
+	if (copy_from_user(channels, pointer, cnt*sizeof(int)))
+		return -EFAULT;
+
+	for (i = 0; i < cnt; i++) {
+		if (channels[i] < IEEE80211_CHAN_MAX && channels[i] > 0) {
+			setbit(ic->ic_chan_weather_radar, channels[i]);
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+				"Mark weather channel %d\n", channels[i]);
+		}
+	}
+
+	ic->ic_mark_weather_radar_chans(ic, ic->ic_nchans, ic->ic_channels);
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_setget_chan_disabled(struct net_device *dev, void __user *pointer, int len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieeee80211_disabled_chanlist channels;
+	int i, j = 0, cnt;
+
+	if (copy_from_user(&channels, pointer, len)) {
+		printk("%s: copy_from_user failed\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	if (channels.dir == SET_CHAN_DISABLED) {
+		if ((vap->iv_opmode == IEEE80211_M_STA && vap->iv_state == IEEE80211_S_RUN) ||
+				(vap->iv_opmode == IEEE80211_M_HOSTAP && ic->ic_sta_assoc != 0)) {
+			printk("%s: channel disable settings is not allowed during associated\n", __FUNCTION__);
+			return -EPERM;
+		}
+
+		cnt = channels.list_len;
+		if (cnt > ARRAY_SIZE(channels.chan)) {
+			printk("%s: max number of supported channels is %d\n", __func__,
+				ARRAY_SIZE(channels.chan));
+			return -EFAULT;
+		}
+
+		for (i = 0; i < cnt; i++) {
+			if (channels.chan[i] >= IEEE80211_CHAN_MAX)
+				return -EFAULT;
+			if (!isset(ic->ic_chan_avail, channels.chan[i]))
+				return -EFAULT;
+		}
+
+		for (i = 0; i < cnt; i++) {
+			if (!channels.flag) {
+				clrbit(ic->ic_chan_disabled, channels.chan[i]);
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+					"Mark channel %d enabled\n", channels.chan[i]);
+			} else {
+				setbit(ic->ic_chan_disabled, channels.chan[i]);
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
+					"Mark channel %d disabled\n", channels.chan[i]);
+			}
+		}
+	} else if (channels.dir == GET_CHAN_DISABLED) {
+		for (i = 1; i <= IEEE80211_CHAN_MAX; i++) {
+			if (isset(ic->ic_chan_disabled, i)) {
+				channels.chan[j++] = i;
+			}
+		}
+		channels.list_len = j;
+
+		if (copy_to_user(pointer, &channels, sizeof(channels))) {
+			printk("%s: copy_to_user failed\n", __FUNCTION__);
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_wowlan_setget(struct net_device *dev, struct ieee80211req_wowlan __user* ps, int len)
+{
+	struct ieee80211vap	*vap = netdev_priv(dev);
+	struct ieee80211com	*ic  = vap->iv_ic;
+	struct ieee80211req_wowlan req;
+	int ret = 0;
+
+	if (!ps) {
+		printk("%s: NULL pointer for user request\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	if ((sizeof(req) > len)) {
+		printk("%s: low memory for request's result\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	if (copy_from_user(&req, ps, sizeof(struct ieee80211req_wowlan))) {
+		printk("%s: copy_from_user failed\n", __FUNCTION__);
+		return -EIO;
+	}
+
+	if (!req.is_data) {
+		printk("%s: user space buffer invalid\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	switch (req.is_op) {
+	case IEEE80211_WOWLAN_MAGIC_PATTERN:
+		if (req.is_data) {
+			uint32_t len = MIN(MAX_USER_DEFINED_MAGIC_LEN, req.is_data_len);
+			memset(ic->ic_wowlan.pattern.magic_pattern, 0, MAX_USER_DEFINED_MAGIC_LEN);
+			if (copy_from_user(ic->ic_wowlan.pattern.magic_pattern, req.is_data, len)) {
+				printk("%s: copy_from_user pattern copy failed\n", __FUNCTION__);
+				return -EIO;
+			}
+			ic->ic_wowlan.pattern.len = len;
+		}
+		return 0;
+	case IEEE80211_WOWLAN_HOST_POWER_SAVE:
+		ret = copy_to_user(req.is_data, &(ic->ic_wowlan.host_state),
+				MIN(req.is_data_len, sizeof(ic->ic_wowlan.host_state)));
+		break;
+	case IEEE80211_WOWLAN_MATCH_TYPE:
+		ret = copy_to_user(req.is_data, &(ic->ic_wowlan.wowlan_match),
+				MIN(req.is_data_len, sizeof(ic->ic_wowlan.wowlan_match)));
+		break;
+	case IEEE80211_WOWLAN_L2_ETHER_TYPE:
+		ret = copy_to_user(req.is_data, &(ic->ic_wowlan.L2_ether_type),
+				MIN(req.is_data_len, sizeof(ic->ic_wowlan.L2_ether_type)));
+		break;
+	case IEEE80211_WOWLAN_L3_UDP_PORT:
+		ret = copy_to_user(req.is_data, &(ic->ic_wowlan.L3_udp_port),
+				MIN(req.is_data_len, sizeof(ic->ic_wowlan.L3_udp_port)));
+		break;
+	case IEEE80211_WOWLAN_MAGIC_PATTERN_GET:
+		req.is_data_len = MIN(req.is_data_len, ic->ic_wowlan.pattern.len);
+		ret = copy_to_user(req.is_data, &(ic->ic_wowlan.pattern.magic_pattern), req.is_data_len);
+		break;
+	default:
+		break;
+	}
+
+	if (ret) {
+		printk("%s: buffer content: copy_to_user failed\n", __FUNCTION__);
+		return -EIO;
+	}
+
+	if (copy_to_user(ps, &req, sizeof(req))) {
+		printk("%s: copy_to_user failed\n", __FUNCTION__);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int
+ieee80211_subioctl_get_sta_auth(struct net_device *dev, struct ieee80211req_auth_description __user* ps, int cnt)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+	struct ieee80211req_auth_description auth_description;
+	uint8_t *casted_ptr = (uint8_t*)&auth_description.description;
+
+	if (!ps) {
+		printk("%s: NULL pointer for user request\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	if (sizeof(auth_description) > cnt) {
+		printk("%s: low memory for request's result\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	if (copy_from_user(&auth_description, ps, sizeof(auth_description))) {
+		printk("%s: copy_from_user failed\n", __FUNCTION__);
+		return -EIO;
+	}
+
+	ni = ieee80211_find_node(&ic->ic_sta, auth_description.macaddr);
+	if (NULL == ni) {
+		printk("%s: client not found\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	casted_ptr[IEEE80211_AUTHDESCR_ALGO_POS] = ni->ni_used_auth_algo;
+
+	if (ni->ni_rsn_ie != NULL || ni->ni_wpa_ie != NULL) {
+		casted_ptr[IEEE80211_AUTHDESCR_KEYMGMT_POS] = ni->ni_rsn.rsn_keymgmt;
+		casted_ptr[IEEE80211_AUTHDESCR_KEYPROTO_POS] = ni->ni_rsn_ie != NULL ?
+								(uint8_t)IEEE80211_AUTHDESCR_KEYPROTO_RSN :
+								(uint8_t)IEEE80211_AUTHDESCR_KEYPROTO_WPA;
+		casted_ptr[IEEE80211_AUTHDESCR_CIPHER_POS] = (uint8_t)ni->ni_rsn.rsn_ucastcipher;
+	} else {
+		if (vap->iv_flags & IEEE80211_F_PRIVACY)
+			casted_ptr[IEEE80211_AUTHDESCR_KEYMGMT_POS] = (uint8_t)IEEE80211_AUTHDESCR_KEYMGMT_WEP;
+	}
+
+	ieee80211_free_node(ni);
+
+	if (copy_to_user(ps, &auth_description, sizeof(auth_description))) {
+		printk("%s: copy_to_user failed\n", __FUNCTION__);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int
+ieee80211_subioctl_get_sta_vendor(struct net_device *dev, uint8_t __user* ps)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+	uint8_t macaddr[IEEE80211_ADDR_LEN];
+
+	if (!ps) {
+		printk("%s: NULL pointer for user request\n", __FUNCTION__);
+		return -EFAULT;
+	}
+	if (copy_from_user(macaddr, ps, sizeof(macaddr))) {
+		printk("%s: copy_from_user failed\n", __FUNCTION__);
+		return -EIO;
+	}
+
+	ni = ieee80211_find_node(&ic->ic_sta, macaddr);
+	if (NULL == ni) {
+		printk("%s: client not found\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	if (copy_to_user(ps, &ni->ni_vendor, sizeof(ni->ni_vendor))) {
+		printk("%s: copy_to_user failed\n", __FUNCTION__);
+		ieee80211_free_node(ni);
+		return -EIO;
+	}
+
+	ieee80211_free_node(ni);
+	return 0;
+}
+
+int
+ieee80211_is_channel_disabled(struct ieee80211com *ic, int channel, int bw)
+{
+	if (isset(ic->ic_chan_disabled, channel)) {
+		return 1;
+	}
+
+	/* based on BW, need to check if more channels need to be disabled*/
+	if (bw >= BW_HT40) {
+		uint32_t chan_sec = 0;
+		uint32_t chan_sec40u = 0;
+		uint32_t chan_sec40l = 0;
+		struct ieee80211_channel *chan;
+		chan = findchannel(ic, channel, ic->ic_des_mode);
+		if (chan == NULL) {
+			chan = findchannel(ic, channel, IEEE80211_MODE_AUTO);
+		}
+
+		if (chan) {
+			chan_sec = ieee80211_find_sec_chan(chan);
+			if (unlikely(chan_sec && isset(ic->ic_chan_disabled, chan_sec))) {
+				return 1;
+			}
+			if (bw >= BW_HT80) {
+				chan_sec40u = ieee80211_find_sec40u_chan(chan);
+				chan_sec40l = ieee80211_find_sec40l_chan(chan);
+				if (unlikely(chan_sec40u && isset(ic->ic_chan_disabled, chan_sec40u)) ||
+						(chan_sec40l && isset(ic->ic_chan_disabled, chan_sec40l))) {
+					return 1;
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+static int
+ieee80211_subioctl_set_active_chanlist_by_bw(struct net_device *dev, void __user *pointer)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_active_chanlist list;
+	uint8_t chanlist[IEEE80211_CHAN_BYTES];
+	int i;
+	int j;
+	int nchan = 0;
+
+	memset(chanlist, 0, sizeof(chanlist));
+
+	if (copy_from_user(&list, pointer, sizeof(list)))
+		return -EFAULT;
+
+	if ((ic->ic_phytype == IEEE80211_T_DS) || (ic->ic_phytype == IEEE80211_T_OFDM))
+		i = 1;
+	else
+		i = 0;
+
+	for (j = 0; i <= IEEE80211_CHAN_MAX; i++, j++) {
+		if (isset(list.channels, j) && isset(ic->ic_chan_avail, i) &&
+				!ieee80211_is_channel_disabled(ic, i, list.bw)) {
+			setbit(chanlist, i);
+			nchan++;
+		}
+	}
+
+	if (nchan == 0)
+		return -EINVAL;
+
+	switch (list.bw) {
+	case BW_HT80:
+		memcpy(ic->ic_chan_active_80, chanlist, sizeof(ic->ic_chan_active_80));
+		break;
+	case BW_HT40:
+		memcpy(ic->ic_chan_active_40, chanlist, sizeof(ic->ic_chan_active_40));
+		break;
+	case BW_HT20:
+		memcpy(ic->ic_chan_active_20, chanlist, sizeof(ic->ic_chan_active_20));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_get_sta_tput_caps(struct net_device *dev,
+				struct ieee8011req_sta_tput_caps __user* ps, int cnt)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = NULL;
+	struct ieee8011req_sta_tput_caps tput_caps;
+
+	if (!ps) {
+		printk("%s: NULL pointer for user request\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	if (sizeof(tput_caps) > cnt) {
+		printk("%s: return buffer is too small\n", __FUNCTION__);
+		return -EFAULT;
+	}
+
+	if (copy_from_user(&tput_caps, ps, sizeof(tput_caps))) {
+		printk("%s: copy_from_user failed\n", __FUNCTION__);
+		return -EIO;
+	}
+
+	ni = ieee80211_find_node(&ic->ic_sta, tput_caps.macaddr);
+	if (!ni) {
+		tput_caps.mode = IEEE80211_WIFI_MODE_NONE;
+		printk("%s: station %pM not found\n", __FUNCTION__, tput_caps.macaddr);
+		goto copy_and_exit;
+	}
+
+	COMPILE_TIME_ASSERT(sizeof(ni->ni_ie_htcap) == sizeof(tput_caps.htcap_ie) &&
+			sizeof(ni->ni_ie_vhtcap) == sizeof(tput_caps.vhtcap_ie));
+
+	tput_caps.mode = ni->ni_wifi_mode;
+	if (IEEE80211_NODE_IS_VHT(ni)) {
+		memcpy(tput_caps.htcap_ie, &ni->ni_ie_htcap, sizeof(tput_caps.htcap_ie));
+		memcpy(tput_caps.vhtcap_ie, &ni->ni_ie_vhtcap, sizeof(tput_caps.vhtcap_ie));
+	} else if (IEEE80211_NODE_IS_HT(ni)) {
+		memcpy(tput_caps.htcap_ie, &ni->ni_ie_htcap, sizeof(tput_caps.htcap_ie));
+	}
+	ieee80211_free_node(ni);
+copy_and_exit:
+	if (copy_to_user(ps, &tput_caps, sizeof(tput_caps))) {
+		printk("%s: copy_to_user failed\n", __FUNCTION__);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int
+ieee80211_subioctl_get_chan_power_table(struct net_device *dev, void __user *pointer, uint32_t len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *c = NULL;
+	struct ieee80211_chan_power_table power_table;
+	int iter;
+	int idx_bf;
+	int idx_ss;
+
+	if (copy_from_user(&power_table, pointer, sizeof(power_table)))
+		return -EFAULT;
+
+	for (iter = 0; iter < ic->ic_nchans; iter++) {
+		c = ic->ic_channels + iter;
+		if (c->ic_ieee == power_table.chan_ieee) {
+			break;
+		}
+	}
+
+	if (iter >= ic->ic_nchans || c == NULL) {
+		/*
+		 * Didn't find this channel, set the channel as
+		 * 0 to indicate there is no such channel
+		 */
+		power_table.chan_ieee = 0;
+	} else {
+		for (idx_bf = PWR_IDX_BF_OFF; idx_bf < PWR_IDX_BF_MAX; idx_bf++) {
+			for (idx_ss = PWR_IDX_1SS; idx_ss < PWR_IDX_SS_MAX; idx_ss++) {
+				power_table.maxpower_table[idx_bf][idx_ss][PWR_IDX_20M] =
+						c->ic_maxpower_table[idx_bf][idx_ss][PWR_IDX_20M];
+				power_table.maxpower_table[idx_bf][idx_ss][PWR_IDX_40M] =
+						c->ic_maxpower_table[idx_bf][idx_ss][PWR_IDX_40M];
+				power_table.maxpower_table[idx_bf][idx_ss][PWR_IDX_80M] =
+						c->ic_maxpower_table[idx_bf][idx_ss][PWR_IDX_80M];
+			}
+		}
+	}
+
+	if (copy_to_user(pointer, &power_table, len))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_set_chan_power_table(struct net_device *dev, void __user *pointer)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *c;
+	struct ieee80211_chan_power_table power_table;
+	int8_t *s_pwrs;
+	int8_t *d_pwrs;
+	int iter;
+	int cur_bw;
+	int idx_bf;
+	int idx_ss;
+
+	if (copy_from_user(&power_table, pointer, sizeof(power_table)))
+		return -EFAULT;
+
+	for (iter = 0; iter < ic->ic_nchans; iter++) {
+		c = ic->ic_channels + iter;
+		if (c->ic_ieee == power_table.chan_ieee) {
+			for (idx_bf = PWR_IDX_BF_OFF; idx_bf < PWR_IDX_BF_MAX; idx_bf++) {
+				for (idx_ss = PWR_IDX_1SS; idx_ss < PWR_IDX_SS_MAX; idx_ss++) {
+					s_pwrs = power_table.maxpower_table[idx_bf][idx_ss];
+					d_pwrs = c->ic_maxpower_table[idx_bf][idx_ss];
+					d_pwrs[PWR_IDX_20M] = s_pwrs[PWR_IDX_20M];
+					d_pwrs[PWR_IDX_40M] = s_pwrs[PWR_IDX_40M];
+					d_pwrs[PWR_IDX_80M] = s_pwrs[PWR_IDX_80M];
+					/*
+					 * Update the maxpower of current bandwidth
+					 */
+					if (idx_bf == PWR_IDX_BF_OFF && idx_ss == PWR_IDX_1SS) {
+						cur_bw = ieee80211_get_bw(ic);
+						switch (cur_bw) {
+						case BW_HT20:
+							c->ic_maxpower = s_pwrs[PWR_IDX_20M];
+							break;
+						case BW_HT40:
+							c->ic_maxpower = s_pwrs[PWR_IDX_40M];
+							break;
+						case BW_HT80:
+							c->ic_maxpower = s_pwrs[PWR_IDX_80M];
+							break;
+						}
+					}
+				}
+			}
+			if (ic->ic_power_table_update) {
+				ic->ic_power_table_update(vap, c);
+			}
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void
+get_txrx_airtime_ioctl(void *s, struct ieee80211_node *ni)
+{
+	struct node_txrx_airtime __user *u_airtime;
+	struct iwreq *iwr = (struct iwreq *)s;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct node_txrx_airtime airtime;
+	uint16_t __user *u_nr_nodes;
+	struct txrx_airtime *txrxat;
+	uint16_t nr_nodes;
+
+	/* Ignore node that has associd = 0 */
+	if (ni->ni_associd == 0)
+		return;
+
+	memcpy(airtime.macaddr, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+	airtime.tx_airtime = ic->ic_tx_airtime(ni);
+	airtime.tx_airtime_accum = ic->ic_tx_accum_airtime(ni);
+	airtime.rx_airtime = ic->ic_rx_airtime(ni);
+	airtime.rx_airtime_accum = ic->ic_rx_accum_airtime(ni);
+
+	txrxat = (struct txrx_airtime *)iwr->u.data.pointer;
+
+	txrxat->total_cli_tx_airtime += airtime.tx_airtime_accum;
+	txrxat->total_cli_rx_airtime += airtime.rx_airtime_accum;
+
+        txrxat = (struct txrx_airtime *)iwr->u.data.pointer;
+        u_nr_nodes = &txrxat->nr_nodes;
+
+	if (copy_from_user(&nr_nodes, u_nr_nodes, sizeof(nr_nodes)))
+		return;
+
+	u_airtime = txrxat->nodes + nr_nodes;
+
+	if (nr_nodes++ > QTN_ASSOC_LIMIT - 1)
+		return;
+
+	if (copy_to_user(u_airtime, &airtime, sizeof(airtime)))
+		return;
+
+	if (copy_to_user(u_nr_nodes, &nr_nodes, sizeof(nr_nodes)))
+		return;
+}
+
+static int
+ieee80211_subioctl_set_sec_chan(struct net_device *dev,
+		void __user *pointer, uint32_t len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *chan = NULL;
+	int chan_off_pair[2];
+	int chan_ieee;
+	int offset;
+
+	if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(chan_off_pair, pointer, sizeof(chan_off_pair)))
+		return -EFAULT;
+
+	chan_ieee = chan_off_pair[0];
+	offset = chan_off_pair[1];
+	if (!ieee80211_dual_sec_chan_supported(ic, chan_ieee))
+		return -EINVAL;
+
+	chan = findchannel(ic, chan_ieee, ic->ic_des_mode);
+	if (!chan)
+		return -EINVAL;
+
+	offset = (offset == 0) ? IEEE80211_HTINFO_CHOFF_SCA : IEEE80211_HTINFO_CHOFF_SCB;
+	ieee80211_update_sec_chan_offset(chan, offset);
+
+	if (chan_ieee == ic->ic_curchan->ic_ieee) {
+		if (vap->iv_state == IEEE80211_S_RUN)
+			ic->ic_set_channel(ic);
+		ieee80211_wireless_reassoc(vap, 0, 1);
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_get_sec_chan(struct net_device *dev,
+		void __user *pointer, uint32_t len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *chan = NULL;
+	int cur_bw = ieee80211_get_bw(ic);
+	int chan_off_pair[2];
+	int chan_ieee;
+	int offset;
+
+	if (copy_from_user(chan_off_pair, pointer, sizeof(chan_off_pair)))
+		return -EFAULT;
+
+	chan_ieee = chan_off_pair[0];
+	offset = chan_ieee;
+	chan = findchannel(ic, chan_ieee, ic->ic_des_mode);
+	if (!chan)
+		return -EINVAL;
+
+	if (cur_bw >= BW_HT40) {
+		if (chan->ic_flags & IEEE80211_CHAN_HT40U)
+			offset = chan_ieee + IEEE80211_SEC_CHAN_OFFSET;
+		else if (chan->ic_flags & IEEE80211_CHAN_HT40D)
+			offset = chan_ieee - IEEE80211_SEC_CHAN_OFFSET;
+	}
+	chan_off_pair[1] = offset;
+
+	if (copy_to_user(pointer, chan_off_pair, sizeof(chan_off_pair)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_get_txrx_airtime(struct net_device *dev, struct iwreq *iwr)
+{
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic  = vap->iv_ic;
+	struct txrx_airtime *txrxat;
+	uint16_t __user *u_fat;
+	uint16_t fat;
+
+	txrxat = (struct txrx_airtime *)iwr->u.data.pointer;
+	u_fat = &txrxat->free_airtime;
+
+	if (copy_from_user(&fat, u_fat, sizeof(fat)))
+		return -EFAULT;
+	fat = sp->free_airtime;
+	copy_to_user(u_fat, &fat, sizeof(fat));
+
+	ic->ic_iterate_nodes(&ic->ic_sta, get_txrx_airtime_ioctl, iwr, 1);
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_get_chan_pri_inact(struct net_device *dev,
+		void __user *pointer, uint32_t len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_inactive_chanlist chanlist;
+
+	if (sizeof(chanlist) > len)
+		return -ENOMEM;
+
+	memset(&chanlist, 0, sizeof(chanlist));
+
+	local_get_inactive_primary_chan_num(ic, &chanlist);
+
+	if (copy_to_user(pointer, &chanlist, sizeof(chanlist)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_di_dfs_channels(struct net_device *dev, void __user *pointer, int cnt)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	int flag_deactive;
+
+	if (copy_from_user(&flag_deactive, pointer, sizeof(int)))
+		return -EFAULT;
+
+	ic->ic_dfs_channels_deactive = !!flag_deactive;
+
+	return 0;
+}
+
+#define MAX_CLIENT_LIST 200
+static int
+ieee80211_subioctl_get_client_mac_list(struct net_device *dev,
+		void __user *pointer, uint32_t len)
+{
+	int rval = 0;
+	uint32_t num_of_entries = 0;
+	struct ieee80211_mac_list mlist;
+	uint32_t flags = 0;
+
+	if (copy_from_user(&mlist, pointer, sizeof(mlist)) != 0)
+		return -EFAULT;
+
+	rval = fwt_db_get_macs_behind_node(mlist.num_entries, &num_of_entries, MAX_CLIENT_LIST,
+					&flags,	(uint8_t *)&mlist.macaddr[0]);
+	mlist.num_entries = num_of_entries;
+	mlist.flags = flags;
+
+	if (copy_to_user(pointer, &mlist, sizeof(mlist)) != 0)
+		return -EFAULT;
+
+	return rval;
+}
+
+static int
+ieee80211_set_nss_cap(struct ieee80211vap *vap, const int param, const int nss)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (nss < 1 || nss > QTN_GLOBAL_RATE_NSS_MAX)
+		return EINVAL;
+
+	if (ieee80211_swfeat_is_supported(SWFEAT_ID_2X2, 0) ||
+			ieee80211_swfeat_is_supported(SWFEAT_ID_2X4, 0)) {
+		if (nss > QTN_2X2_GLOBAL_RATE_NSS_MAX) {
+			return EINVAL;
+		}
+	} else if (ieee80211_swfeat_is_supported(SWFEAT_ID_3X3, 0)) {
+		if (nss > QTN_3X3_GLOBAL_RATE_NSS_MAX) {
+			return EINVAL;
+		}
+	}
+
+	if (param == IEEE80211_PARAM_HT_NSS_CAP)
+		ic->ic_ht_nss_cap = nss;
+	else if (param == IEEE80211_PARAM_VHT_NSS_CAP) {
+		ic->ic_vht_nss_cap = nss;
+		ic->ic_vhtcap.numsounding = nss -1;
+		ic->ic_vhtcap_24g.numsounding = nss -1;
+	} else
+		return EINVAL;
+
+	ieee80211_param_to_qdrv(vap, param, nss, NULL, 0);
+	ieee80211_wireless_reassoc(vap, 0, 1);
+
+	return 0;
+}
+
+#if defined(QBMPS_ENABLE)
+int ieee80211_wireless_set_sta_bmps(struct ieee80211vap *vap, struct ieee80211com *ic,
+						int value)
+{
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+
+	/* valid setting is: 0 - disable */
+	/*                   1 - manual mode */
+	/*                   2 - auto mode */
+	if ((unsigned)value > BMPS_MODE_AUTO)
+		return EINVAL;
+
+	/* BMPS power-saving only works in STA mode */
+	if (vap->iv_opmode != IEEE80211_M_STA)
+		return EINVAL;
+
+	if (qv->qv_bmps_mode == value)
+		return 0;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_POWER,
+				"%s: qv bmps mode=%d, wowlan status %d, arg %d\n", __func__,
+				qv->qv_bmps_mode, ic->ic_wowlan.host_state, value);
+
+	if (value == 0) {
+		/* disable power-saving */
+		ic->ic_flags_qtn &= ~IEEE80211_QTN_BMPS;
+	} else {
+		/* enable power-saving */
+		ic->ic_flags_qtn |= IEEE80211_QTN_BMPS;
+	}
+
+	if (qv->qv_bmps_mode == BMPS_MODE_AUTO) {
+		/* stop tput measurement if previously in auto mode */
+		del_timer(&ic->ic_bmps_tput_check.tput_timer);
+	}
+
+	/* exit power-saving first if previously power-saving is enabled */
+	if (qv->qv_bmps_mode != BMPS_MODE_OFF)
+		pm_qos_update_requirement(PM_QOS_POWER_SAVE,
+						BOARD_PM_GOVERNOR_WLAN,
+						BOARD_PM_LEVEL_NO);
+
+	qv->qv_bmps_mode = value;
+	/* Update station WoWLAN status here; otherwise state would go out of sync if
+	 * ps state is changed via set_bmps
+	 */
+	ic->ic_wowlan.host_state = value;
+	g_wowlan_host_state = value;
+
+	if (vap->iv_state == IEEE80211_S_RUN) {
+
+		/* update null frame */
+		ieee80211_sta_bmps_update(vap);
+
+		if (value == BMPS_MODE_MANUAL) {
+			/* manual mode, start power-saving immediately */
+			/* only when there is only one STA VAP and no SWBMISS */
+			if (ieee80211_is_idle_state(ic))
+				pm_qos_update_requirement(PM_QOS_POWER_SAVE,
+							BOARD_PM_GOVERNOR_WLAN,
+							BOARD_PM_LEVEL_IDLE);
+		} else if (value == BMPS_MODE_AUTO){
+			/* auto mode, start tput measurement */
+			vap->iv_bmps_tput_high = -1;
+			ic->ic_bmps_tput_check.tput_timer.expires = jiffies +
+						(BMPS_TPUT_MEASURE_PERIOD_MS / 1000) * HZ;
+			add_timer(&ic->ic_bmps_tput_check.tput_timer);
+		}
+	}
+
+	return 0;
+
+}
+EXPORT_SYMBOL(ieee80211_wireless_set_sta_bmps);
+#endif
+
+static int
+ieee80211_subioctl_set_dscp2tid_map(struct net_device *dev, void __user *pointer, uint16_t len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	uint8_t dscp2tid[IP_DSCP_NUM];
+
+	if (len != IP_DSCP_NUM)
+		return -EINVAL;
+
+	if (copy_from_user(dscp2tid, pointer, sizeof(dscp2tid)))
+		return -EFAULT;
+
+	if (!ic->ic_set_dscp2tid_map)
+		return -EINVAL;
+
+	ic->ic_set_dscp2tid_map(qv->qv_vap_idx, dscp2tid);
+
+	return 0;
+}
+
+static int
+ieee80211_set_vht_mcs_cap(struct ieee80211vap *vap, const int mcs)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	switch(mcs) {
+	case IEEE80211_VHT_MCS_0_7:
+	case IEEE80211_VHT_MCS_0_8:
+	case IEEE80211_VHT_MCS_0_9:
+		ic->ic_vht_mcs_cap = mcs;
+		break;
+	default:
+		return EINVAL;
+	}
+
+	ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_VHT_MCS_CAP, mcs, NULL, 0);
+	ieee80211_wireless_reassoc(vap, 0, 1);
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_get_dscp2tid_map(struct net_device *dev, void __user *pointer, uint16_t len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+	uint8_t dscp2tid[IP_DSCP_NUM];
+
+	if (len != IP_DSCP_NUM)
+		return -EINVAL;
+
+	if (!ic->ic_get_dscp2tid_map)
+		return -EINVAL;
+
+	ic->ic_get_dscp2tid_map(qv->qv_vap_idx, dscp2tid);
+
+	if(copy_to_user(pointer, dscp2tid, len))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int
+ieee80211_param_wowlan_set(struct net_device *dev, struct ieee80211vap *vap, u_int32_t value)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	uint16_t cmd =	value >> 16;
+	uint16_t arg = value & 0xffff;
+
+	if (cmd >= IEEE80211_WOWLAN_SET_MAX) {
+		printk(KERN_WARNING "%s: WOWLAN set: invalid config cmd %u, arg=%u\n",
+				dev->name, cmd, arg);
+		return -1;
+	}
+
+	switch (cmd) {
+	case IEEE80211_WOWLAN_HOST_POWER_SAVE:
+		if (arg > 1)
+			return -1;
+
+		if (ic->ic_wowlan.host_state != arg) {
+			/* trigger WLAN manual mode STA power-saving */
+#if defined(QBMPS_ENABLE)
+			ieee80211_wireless_set_sta_bmps(vap, ic, arg);
+#endif
+		}
+		break;
+	case IEEE80211_WOWLAN_MATCH_TYPE:
+		if (arg > 2)
+			return -1;
+		if (ic->ic_wowlan.wowlan_match != arg) {
+			ic->ic_wowlan.wowlan_match = arg;
+			g_wowlan_match_type = arg;
+		}
+		break;
+	case IEEE80211_WOWLAN_L2_ETHER_TYPE:
+		if (arg < 0x600)
+			return -1;
+		if (ic->ic_wowlan.L2_ether_type != arg) {
+			ic->ic_wowlan.L2_ether_type = arg;
+			g_wowlan_l2_ether_type = arg;
+		}
+		break;
+	case IEEE80211_WOWLAN_L3_UDP_PORT:
+		if (ic->ic_wowlan.L3_udp_port != arg) {
+			ic->ic_wowlan.L3_udp_port = arg;
+			g_wowlan_l3_udp_port = arg;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void
+ieee80211_ioctl_swfeat_disable(struct ieee80211vap *vap, const int param, int feat)
+{
+	if (feat >= SWFEAT_ID_MAX)
+		printk("%s: feature id %d is invalid\n", __func__, feat);
+	else
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_SWFEAT_DISABLE, feat, NULL, 0);
+}
+
+void ieee80211_send_vht_opmode_to_all(struct ieee80211com *ic, uint8_t bw)
+{
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211_node *ni;
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (ni && IEEE80211_NODE_IS_VHT(ni)) {
+			ieee80211_send_vht_opmode_action(ni->ni_vap, ni, bw, 3);
+		}
+	}
+}
+
+static int
+ieee80211_ioctl_set_assoc_limit(struct ieee80211com *ic,
+				struct ieee80211vap *vap, int value)
+{
+	struct ieee80211_node *ni;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	int i;
+
+	if ((vap->iv_opmode != IEEE80211_M_HOSTAP) &&
+			!(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER)) {
+		return EINVAL;
+	}
+
+	if (value < 0  || value > QTN_ASSOC_LIMIT) {
+		printk("%s: Max configurable limit is %d\n",
+					__func__, QTN_ASSOC_LIMIT);
+		return EINVAL;
+	}
+
+	ic->ic_sta_assoc_limit = value;
+
+	for (i = 0; i < IEEE80211_MAX_BSS_GROUP; i++) {
+		ic->ic_ssid_grp[i].limit = ic->ic_sta_assoc_limit;
+		ic->ic_ssid_grp[i].reserve = 0;
+	}
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		ieee80211_wireless_reassoc(ni->ni_vap, 0, 0);
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_set_bss_grp_assoc_limit(struct ieee80211com *ic,
+					struct ieee80211vap *vap, int value)
+{
+	struct ieee80211_node *ni;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	int limit, grp;
+
+	if ((vap->iv_opmode != IEEE80211_M_HOSTAP) &&
+			!(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER)) {
+		return EINVAL;
+	}
+
+	limit = (value & 0xffff);
+	grp = ((value >> 16) & 0xffff);
+
+	if (grp < IEEE80211_MIN_BSS_GROUP || grp >= IEEE80211_MAX_BSS_GROUP) {
+		return EINVAL;
+	}
+
+	if (limit < 0 || limit > ic->ic_sta_assoc_limit
+			|| limit < ic->ic_ssid_grp[grp].reserve) {
+		return EINVAL;
+	}
+
+	ic->ic_ssid_grp[grp].limit = limit;
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (ni->ni_vap->iv_ssid_group == grp) {
+			ieee80211_wireless_reassoc(ni->ni_vap, 0, 0);
+		}
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_set_bss_grpid(struct ieee80211com *ic,
+				struct ieee80211vap *vap, int value)
+{
+	struct ieee80211_node *ni;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+
+	if (vap->iv_opmode != IEEE80211_M_HOSTAP) {
+		return EINVAL;
+	}
+
+	if (value < IEEE80211_MIN_BSS_GROUP || value >= IEEE80211_MAX_BSS_GROUP) {
+		return EINVAL;
+	}
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		ieee80211_wireless_reassoc(ni->ni_vap, 0, 0);
+	}
+
+	vap->iv_ssid_group = value;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_set_bss_grp_assoc_reserve(struct ieee80211com *ic,
+					struct ieee80211vap *vap, int value)
+{
+	struct ieee80211_node *ni;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	int grp;
+	int reserve;
+	int tot_reserve = 0;
+	int i;
+
+	if ((vap->iv_opmode != IEEE80211_M_HOSTAP) &&
+			!(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER)) {
+		return EINVAL;
+	}
+
+	reserve = (value & 0xffff);
+	grp = ((value >> 16) & 0xffff);
+
+	if (grp < IEEE80211_MIN_BSS_GROUP || grp >= IEEE80211_MAX_BSS_GROUP) {
+		return EINVAL;
+	}
+
+	tot_reserve = reserve;
+	for (i = IEEE80211_MIN_BSS_GROUP; i < IEEE80211_MAX_BSS_GROUP; i++) {
+		if (i == grp)
+			continue;
+		tot_reserve += ic->ic_ssid_grp[i].reserve;
+	}
+
+	if (reserve > ic->ic_ssid_grp[grp].limit
+			|| tot_reserve > ic->ic_sta_assoc_limit) {
+		return EINVAL;
+	}
+
+	ic->ic_ssid_grp[grp].reserve = reserve;
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		ieee80211_wireless_reassoc(ni->ni_vap, 0, 0);
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_extender_role(struct ieee80211com *ic, struct ieee80211vap *vap, int value)
+{
+	struct ieee80211vap *vap_tmp;
+
+	if (!ieee80211_swfeat_is_supported(SWFEAT_ID_QHOP, 1))
+		return EOPNOTSUPP;
+
+	if (ic->ic_extender_role == value)
+		return 0;
+
+	ic->ic_extender_role = value;
+	switch(ic->ic_extender_role) {
+	case IEEE80211_EXTENDER_ROLE_RBS:
+	case IEEE80211_EXTENDER_ROLE_MBS:
+	case IEEE80211_EXTENDER_ROLE_NONE:
+		if (ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_MBS)
+			IEEE80211_ADDR_COPY(ic->ic_extender_mbs_bssid, ic->ic_myaddr);
+		else
+			IEEE80211_ADDR_SET_NULL(ic->ic_extender_mbs_bssid);
+		ic->ic_extender_rbs_num = 0;
+		memset(ic->ic_extender_rbs_bssid[0], 0, sizeof(ic->ic_extender_rbs_bssid));
+
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+			ic->ic_beacon_update(vap);
+		break;
+	default:
+		return EINVAL;
+		break;
+	}
+
+	ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_EXTENDER_ROLE, value, NULL, 0);
+
+	/* change mode of existing WDS links */
+	TAILQ_FOREACH(vap_tmp, &ic->ic_vaps, iv_next) {
+		if (!IEEE80211_VAP_WDS_ANY(vap_tmp))
+			continue;
+		ieee80211_vap_wds_mode_change(vap_tmp);
+	}
+
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		ieee80211_extender_cleanup_wds_link(vap);
+		if (ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_RBS)
+			mod_timer(&ic->ic_extender_scan_timer, jiffies);
+	}
+
+	return 0;
+}
+
+static int ieee80211_ioctl_set_vap_pri(struct ieee80211com *ic, struct ieee80211vap *vap, int value)
+{
+	if (!ieee80211_swfeat_is_supported(SWFEAT_ID_QTM_PRIO, 1))
+		return EOPNOTSUPP;
+
+	if (value >= QTN_VAP_PRIORITY_NUM)
+		return EINVAL;
+
+	vap->iv_pri = value;
+
+	IEEE80211_LOCK_IRQ(ic);
+
+	ieee80211_adjust_wme_by_vappri(ic);
+
+	IEEE80211_UNLOCK_IRQ(ic);
+
+	ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_VAP_PRI, value, NULL, 0);
+
+	return 0;
+}
+
+static int ieee80211_wireless_set_mu_tx_rate(struct ieee80211vap *vap, struct ieee80211com *ic,
+						const int value)
+{
+	int mcs1 = value & IEEE80211_AC_MCS_MASK;
+	int mcs2 = (value >> IEEE80211_AC_MCS_SHIFT) & IEEE80211_AC_MCS_MASK;
+	int mcs_to_muc1 = ieee80211_11ac_mcs_format(mcs1, 80);
+	int mcs_to_muc2 = ieee80211_11ac_mcs_format(mcs2, 80);
+	int mcs_to_muc;
+
+	if (!ieee80211_swfeat_is_supported(SWFEAT_ID_MU_MIMO, 1))
+		return EOPNOTSUPP;
+
+	if (!IS_IEEE80211_VHT_ENABLED(ic))
+		return EINVAL;
+
+	if ((mcs_to_muc1 < 0) || (mcs_to_muc2 < 0))
+		return EINVAL;
+
+	mcs_to_muc = (mcs_to_muc2 << IEEE80211_AC_MCS_SHIFT) | mcs_to_muc1;
+
+	ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_FIXED_11AC_MU_TX_RATE, mcs_to_muc, NULL, 0);
+
+	return 0;
+}
+
+int ieee80211_mac_acl(struct ieee80211vap *vap, int value)
+{
+	const struct ieee80211_aclator *acl = vap->iv_acl;
+
+	switch (value) {
+	case IEEE80211_MACCMD_POLICY_OPEN:
+	case IEEE80211_MACCMD_POLICY_ALLOW:
+	case IEEE80211_MACCMD_POLICY_DENY:
+		if (acl == NULL) {
+			/* Don't load module until ACL feature is used */
+			if (value == IEEE80211_MACCMD_POLICY_OPEN)
+				return 0;
+
+			acl = ieee80211_aclator_get("mac");
+			if (acl == NULL || !acl->iac_attach(vap))
+				return -EINVAL;
+			vap->iv_acl = acl;
+		}
+		acl->iac_setpolicy(vap, value);
+		break;
+	case IEEE80211_MACCMD_FLUSH:
+		if (acl != NULL)
+			acl->iac_flush(vap);
+		/* NB: silently ignore when not in use */
+		break;
+	case IEEE80211_MACCMD_DETACH:
+		if (acl != NULL) {
+			vap->iv_acl = NULL;
+			acl->iac_detach(vap);
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void ieee80211_wireless_set_40mhz_intolerant(struct ieee80211vap *vap,
+		                                    int intol)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	/*
+	 * 80211 specifies 5G STA shall set 40MHz intolerant to 0
+	 */
+	if (!(ic->ic_curchan->ic_flags & IEEE80211_CHAN_2GHZ))
+		return;
+
+	if (!!intol == !!(vap->iv_coex & WLAN_20_40_BSS_COEX_40MHZ_INTOL) &&
+	    !!intol == !!(ic->ic_htcap.cap & IEEE80211_HTCAP_C_40_INTOLERANT))
+		return;
+
+	if (intol) {
+		vap->iv_coex |= WLAN_20_40_BSS_COEX_40MHZ_INTOL;
+		ic->ic_htcap.cap |= IEEE80211_HTCAP_C_40_INTOLERANT;
+	} else {
+		vap->iv_coex &= ~WLAN_20_40_BSS_COEX_40MHZ_INTOL;
+		ic->ic_htcap.cap &= ~IEEE80211_HTCAP_C_40_INTOLERANT;
+	}
+
+	ieee80211_change_bw(vap, BW_HT20, 0);
+
+	if (vap->iv_opmode == IEEE80211_M_STA)
+		ieee80211_send_20_40_bss_coex(vap);
+}
+
+static int
+ieee80211_fix_legacy_rate(struct ieee80211vap *vap, int param, int rate_index)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	int new_phy_mode = -1;
+	int value;
+	int vht_flag = 0;
+
+	if (rate_index >= IEEE80211_AG_START_RATE_INDEX) {
+		if (rate_index > IEEE80211_AG_RATE_MAXSIZE)
+			return EINVAL;
+
+		value = IEEE80211_N_RATE_PREFIX | rate_index << 8 | rate_index << 16 | rate_index;
+
+		if (ic->ic_curmode >= IEEE80211_MODE_11NA || ic->ic_curmode == IEEE80211_MODE_AUTO) {
+			if (IS_IEEE80211_5G_BAND(ic)) {
+				new_phy_mode = IEEE80211_MODE_11A;
+			} else {
+				new_phy_mode = IEEE80211_MODE_11G;
+			}
+		}
+
+		if (!ic->fixed_legacy_rate_mode) {
+			ic->ic_phymode_save = ic->ic_curmode;
+			ic->fixed_legacy_rate_mode = 1;
+		}
+	} else {
+		if (!ic->fixed_legacy_rate_mode)
+			return EINVAL;
+
+		if (ic->ic_phymode_save >= IEEE80211_MODE_11NA || ic->ic_phymode_save == IEEE80211_MODE_AUTO)
+			new_phy_mode = ic->ic_phymode_save;
+
+		ic->ic_phymode_save = 0;
+		ic->fixed_legacy_rate_mode = 0;
+		value = 0xFF; /* Disable fixed rate */
+	}
+
+	ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_FIXED_TX_RATE, value, NULL, 0);
+
+	if (new_phy_mode != -1) {
+		if (new_phy_mode >= IEEE80211_MODE_11AC_VHT20PM)
+			vht_flag = 1;
+
+		if ((ic->ic_rf_chipid == CHIPID_DUAL) &&
+				(ic->ic_opmode == IEEE80211_M_STA)) {
+			if (IS_IEEE80211_5G_BAND(ic))
+				vap->iv_5ghz_prof.vht = vht_flag;
+			else
+				vap->iv_2_4ghz_prof.vht = vht_flag;
+		}
+		ic->ic_des_mode = ic->ic_phymode = new_phy_mode;
+		ieee80211_setmode(ic, new_phy_mode);
+
+		ic->ic_csw_reason = IEEE80211_CSW_REASON_CONFIG;
+		ieee80211_wireless_reassoc(vap, 0, 1);
+
+		/* On AP, change channel to take new bw into effect by BB */
+		/* On STA, reassociation would result into re-scannning so not required */
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			ic->ic_set_channel(ic);
+		}
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setparam(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_rsnparms *rsn = NULL;
+	int *i = (int *) extra;
+	int param = i[0];		/* parameter id is 1st */
+	int value = i[1];		/* NB: most values are TYPE_INT */
+	int temp_value;
+	int retv = 0;
+	int j, caps;
+	const struct ieee80211_authenticator *auth;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct ieee80211vap *vap_each;
+	struct ieee80211vap *vap_tmp;
+	struct ieee80211vap *first_vap;
+
+	if (vap->iv_bss)
+		rsn = &vap->iv_bss->ni_rsn;
+
+	switch (param) {
+	case IEEE80211_PARAM_AP_ISOLATE:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+			br_set_ap_isolate(value);
+		else
+			return -EINVAL;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_AUTHMODE:
+		if (!vap->iv_bss)
+			return -EFAULT;
+
+		switch (value) {
+		case IEEE80211_AUTH_WPA:	/* WPA */
+		case IEEE80211_AUTH_8021X:	/* 802.1x */
+		case IEEE80211_AUTH_OPEN:	/* open */
+		case IEEE80211_AUTH_SHARED:	/* shared-key */
+		case IEEE80211_AUTH_AUTO:	/* auto */
+			auth = ieee80211_authenticator_get(value);
+			if (auth == NULL)
+				return -EINVAL;
+			break;
+		default:
+			return -EINVAL;
+		}
+		switch (value) {
+		case IEEE80211_AUTH_WPA:	/* WPA w/ 802.1x */
+			value = IEEE80211_AUTH_8021X;
+			break;
+		case IEEE80211_AUTH_OPEN:	/* open */
+			vap->iv_flags &= ~(IEEE80211_F_WPA);
+			break;
+		case IEEE80211_AUTH_SHARED:	/* shared-key */
+		case IEEE80211_AUTH_AUTO:	/* auto */
+		case IEEE80211_AUTH_8021X:	/* 802.1x */
+			vap->iv_flags &= ~IEEE80211_F_WPA;
+			break;
+		}
+		/* NB: authenticator attach/detach happens on state change */
+		vap->iv_bss->ni_authmode = value;
+		/* XXX mixed/mode/usage? */
+		vap->iv_auth = auth;
+		vap->iv_osen = 0;
+		retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_PROTMODE:
+		if (value > IEEE80211_PROT_RTSCTS)
+			return -EINVAL;
+		ic->ic_protmode = value;
+		/* NB: if not operating in 11g this can wait */
+		if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+		    IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan))
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_MCASTCIPHER:
+		if ((vap->iv_caps & cipher2cap(value)) == 0 &&
+		    !ieee80211_crypto_available(value))
+			return -EINVAL;
+		if (!rsn)
+			return -EFAULT;
+		if (!IEEE80211_IS_TKIP_ALLOWED(ic)) {
+			if (value == IEEE80211_CIPHER_AES_CCM)
+				rsn->rsn_mcastcipher = value;
+			else
+				printk("%s: invalid cipher %d ignored\n", __FUNCTION__, value);
+		} else {
+			rsn->rsn_mcastcipher = value;
+		}
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_MCASTKEYLEN:
+		if (!(0 < value && value <= IEEE80211_KEYBUF_SIZE))
+			return -EINVAL;
+		if (!rsn)
+			return -EFAULT;
+		/* XXX no way to verify driver capability */
+		rsn->rsn_mcastkeylen = value;
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_UCASTCIPHERS:
+		if (!rsn)
+			return -EFAULT;
+		/*
+		 * NB: this logic intentionally ignores unknown and
+		 * unsupported ciphers so folks can specify 0xff or
+		 * similar and get all available ciphers.
+		 */
+		/* caps are really ciphers */
+		caps = 0;
+		for (j = 1; j < 32; j++)	/* NB: skip WEP */
+			if ((value & (1 << j)) &&
+			    ((vap->iv_caps & cipher2cap(j)) ||
+			     ieee80211_crypto_available(j)))
+				caps |= 1 << j;
+		if (caps == 0)			/* nothing available */
+			return -EINVAL;
+		/* XXX verify ciphers ok for unicast use? */
+		/* XXX disallow if running as it'll have no effect */
+		rsn->rsn_ucastcipherset = caps;
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_UCASTCIPHER:
+		if ((vap->iv_caps & cipher2cap(value)) == 0 &&
+		    !ieee80211_crypto_available(value))
+			return -EINVAL;
+		if (!rsn)
+			return -EFAULT;
+
+		rsn->rsn_ucastcipher = value;
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_UCASTKEYLEN:
+		if (!(0 < value && value <= IEEE80211_KEYBUF_SIZE))
+			return -EINVAL;
+		if (!rsn)
+			return -EFAULT;
+		/* XXX no way to verify driver capability */
+		rsn->rsn_ucastkeylen = value;
+		break;
+	case IEEE80211_PARAM_KEYMGTALGS:
+		if (!rsn)
+			return -EFAULT;
+		/*
+		 * Map supplcant values to RSN values. Included only the currently
+		 * used mappings. But, need to increases the cases as we support more.
+		 * */
+		switch (value) {
+			case WPA_KEY_MGMT_PSK:
+				if (vap->iv_pmf == IEEE80211_MFP_PROTECT_REQUIRE)
+					rsn->rsn_keymgmtset = RSN_ASE_8021X_PSK_SHA256;
+				else
+					rsn->rsn_keymgmtset = RSN_ASE_8021X_PSK;
+				break;
+			case WPA_KEY_MGMT_IEEE8021X:
+				rsn->rsn_keymgmtset = RSN_ASE_8021X_UNSPEC;
+				break;
+			case WPA_KEY_MGMT_NONE:
+				rsn->rsn_keymgmtset = RSN_ASE_NONE;
+				break;
+			case WPA_KEY_MGMT_IEEE8021X_SHA256:
+				rsn->rsn_keymgmtset = RSN_ASE_8021X_SHA256;
+				break;
+			case WPA_KEY_MGMT_PSK_SHA256:
+				rsn->rsn_keymgmtset = RSN_ASE_8021X_PSK_SHA256;
+				break;
+			case (WPA_KEY_MGMT_FT_IEEE8021X | WPA_KEY_MGMT_IEEE8021X):
+				rsn->rsn_keymgmtset =
+					(RSN_ASE_8021X_UNSPEC | WPA_KEY_MGMT_FT_IEEE8021X);
+				break;
+			case (WPA_KEY_MGMT_FT_PSK | WPA_KEY_MGMT_PSK):
+				rsn->rsn_keymgmtset = (RSN_ASE_8021X_PSK | WPA_KEY_MGMT_FT_PSK);
+				break;
+			default:
+				rsn->rsn_keymgmtset = value;
+				break;
+		}
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_RSNCAPS:
+		if (!rsn)
+			return -EFAULT;
+		/* XXX check */
+		rsn->rsn_caps = value;
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_WPA:
+		if (value > 3)
+			return -EINVAL;
+		/* XXX verify ciphers available */
+		vap->iv_flags &= ~IEEE80211_F_WPA;
+		switch (value) {
+		case 1:
+			vap->iv_flags |= IEEE80211_F_WPA1;
+			break;
+		case 2:
+			vap->iv_flags |= IEEE80211_F_WPA2;
+			break;
+		case 3:
+			vap->iv_flags |= IEEE80211_F_WPA1 | IEEE80211_F_WPA2;
+			break;
+		}
+		retv = ENETRESET;		/* XXX? */
+		break;
+	case IEEE80211_PARAM_ROAMING:
+		if (!(IEEE80211_ROAMING_DEVICE <= value &&
+		    value <= IEEE80211_ROAMING_MANUAL))
+			return -EINVAL;
+		ic->ic_roaming = value;
+		break;
+	case IEEE80211_PARAM_PRIVACY:
+		if (value) {
+			/* XXX check for key state? */
+			vap->iv_flags |= IEEE80211_F_PRIVACY;
+		} else
+			vap->iv_flags &= ~IEEE80211_F_PRIVACY;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_DROPUNENCRYPTED:
+		if (value)
+			vap->iv_flags |= IEEE80211_F_DROPUNENC;
+		else
+			vap->iv_flags &= ~IEEE80211_F_DROPUNENC;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_DROPUNENC_EAPOL:
+		if (value)
+			IEEE80211_VAP_DROPUNENC_EAPOL_ENABLE(vap);
+		else
+			IEEE80211_VAP_DROPUNENC_EAPOL_DISABLE(vap);
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_COUNTERMEASURES:
+		{
+			int invoked = 0;
+			int cleared = 0;
+			static const char *tag = QEVT_COMMON_PREFIX;
+
+			if (value) {
+				if ((vap->iv_flags & IEEE80211_F_WPA) == 0)
+					return -EINVAL;
+				vap->iv_flags |= IEEE80211_F_COUNTERM;
+				invoked = 1;
+			} else {
+				if (vap->iv_flags & IEEE80211_F_COUNTERM) {
+					cleared = 1;
+				}
+				vap->iv_flags &= ~IEEE80211_F_COUNTERM;
+			}
+			if (invoked || cleared)
+			{
+				ieee80211_eventf(dev, "%sTKIP countermeasures %s", tag,
+						 invoked ? "invoked" : "cleared");
+			}
+		}
+		break;
+	case IEEE80211_PARAM_DRIVER_CAPS:
+		vap->iv_caps = value;		/* NB: for testing */
+		break;
+	case IEEE80211_PARAM_WMM:
+		if (ic->ic_caps & IEEE80211_C_WME){
+			if (value) {
+				vap->iv_flags |= IEEE80211_F_WME;
+				vap->iv_ic->ic_flags |= IEEE80211_F_WME; /* XXX needed by ic_reset */
+			} else {
+				vap->iv_flags &= ~IEEE80211_F_WME;
+				vap->iv_ic->ic_flags &= ~IEEE80211_F_WME; /* XXX needed by ic_reset */
+			}
+			retv = ENETRESET;	/* Renegotiate for capabilities */
+		}
+		break;
+	case IEEE80211_PARAM_HIDESSID:
+	{
+		int	beacon_update_required= 0;
+
+		if ((!!(vap->iv_flags & IEEE80211_F_HIDESSID)) ^ (!!value))
+			beacon_update_required = 1;
+
+		if (value)
+			vap->iv_flags |= IEEE80211_F_HIDESSID;
+		else
+			vap->iv_flags &= ~IEEE80211_F_HIDESSID;
+
+		if (beacon_update_required && (vap->iv_state & IEEE80211_S_RUN))
+			ic->ic_beacon_update(vap);
+
+	}
+		break;
+	case IEEE80211_PARAM_APBRIDGE:
+		if (value == 0)
+			vap->iv_flags |= IEEE80211_F_NOBRIDGE;
+		else
+			vap->iv_flags &= ~IEEE80211_F_NOBRIDGE;
+		break;
+	case IEEE80211_PARAM_INACT:
+		vap->iv_inact_run = value / IEEE80211_INACT_WAIT;
+		break;
+	case IEEE80211_PARAM_INACT_AUTH:
+		vap->iv_inact_auth = value / IEEE80211_INACT_WAIT;
+		break;
+	case IEEE80211_PARAM_INACT_INIT:
+		vap->iv_inact_init = value / IEEE80211_INACT_WAIT;
+		break;
+	case IEEE80211_PARAM_DTIM_PERIOD:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
+		    vap->iv_opmode != IEEE80211_M_IBSS)
+			return -EINVAL;
+		if (IEEE80211_DTIM_MIN <= value &&
+		    value <= IEEE80211_DTIM_MAX) {
+			vap->iv_dtim_period = value;
+			ic->ic_beacon_update(vap);
+			retv = -EINVAL;		/* requires restart */
+		} else
+			retv = EINVAL;
+		break;
+	case IEEE80211_PARAM_BEACON_INTERVAL:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
+		    vap->iv_opmode != IEEE80211_M_IBSS)
+			return -EINVAL;
+
+		if (IEEE80211_BINTVAL_VALID(value)) {
+			vap->iv_ic->ic_lintval_backup = value;
+			ieee80211_beacon_interval_set(vap->iv_ic, value);
+			retv = -EINVAL;
+		} else {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_DOTH:
+		if (value)
+			ic->ic_flags |= IEEE80211_F_DOTH;
+		else
+			ic->ic_flags &= ~IEEE80211_F_DOTH;
+		break;
+	case IEEE80211_PARAM_SHPREAMBLE:
+		if (value) {
+			ic->ic_caps |= IEEE80211_C_SHPREAMBLE;
+			ic->ic_flags |= IEEE80211_F_SHPREAMBLE;
+			ic->ic_flags &= ~IEEE80211_F_USEBARKER;
+		} else {
+			ic->ic_caps &= ~IEEE80211_C_SHPREAMBLE;
+			ic->ic_flags &= ~IEEE80211_F_SHPREAMBLE;
+			ic->ic_flags |= IEEE80211_F_USEBARKER;
+		}
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		ieee80211_wireless_reassoc(vap, 0, 1);
+		break;
+	case IEEE80211_PARAM_PWRCONSTRAINT:
+		{
+			uint16_t pwr_constraint = (value & 0xffff);
+			struct pwr_info_per_vap pwr;
+			pwr.vap = vap;
+			pwr.max_in_minpwr = -100; /* a value that never be real */
+
+			if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+				if (vap->iv_opmode == IEEE80211_M_HOSTAP && ic->ic_bsschan != IEEE80211_CHAN_ANYC) {
+					ieee80211_iterate_nodes(&ic->ic_sta, get_max_in_minpwr, &pwr, 1);
+					if (pwr_constraint >= ic->ic_bsschan->ic_maxregpower) {
+						printk("power constraint(%d) >= current channel max regulatory power(%d)\n", pwr_constraint, ic->ic_bsschan->ic_maxregpower);
+						retv = EINVAL;
+					}
+					else if ((pwr.max_in_minpwr != -100) &&
+							((ic->ic_bsschan->ic_maxregpower - pwr_constraint) < pwr.max_in_minpwr)) {
+						printk("power constraint(%d) make local max transmit power(%d) less than the max value(%d) of min power in associated STAs\n",
+								pwr_constraint,
+								(ic->ic_bsschan->ic_maxregpower - pwr_constraint),
+								pwr.max_in_minpwr);
+						retv = EINVAL;
+					}
+					else {
+						ic->ic_pwr_constraint = pwr_constraint;
+						if (vap->iv_state == IEEE80211_S_RUN)
+							ic->ic_beacon_update(vap);
+					}
+				} else {
+					retv = EOPNOTSUPP;
+				}
+			} else {
+					retv = EOPNOTSUPP;
+			}
+		}
+		break;
+	case IEEE80211_PARAM_GENREASSOC:
+		if (!vap->iv_bss)
+			return -EFAULT;
+
+		IEEE80211_SEND_MGMT(vap->iv_bss, IEEE80211_FC0_SUBTYPE_REASSOC_REQ, 0);
+		break;
+	case IEEE80211_PARAM_REPEATER:
+		if (IEEE80211_VAP_WDS_IS_RBS(vap) || IEEE80211_VAP_WDS_IS_MBS(vap)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,
+					"%s can't config repeater since "
+					"it's a extender\n", vap->iv_dev->name);
+			return -EINVAL;
+		}
+
+		first_vap = TAILQ_FIRST(&ic->ic_vaps);
+		if (first_vap->iv_opmode != IEEE80211_M_STA)
+			return -EINVAL;
+
+		if (value) {
+			ic->ic_flags_ext |= IEEE80211_FEXT_REPEATER;
+			ieee80211_new_state(first_vap, IEEE80211_S_INIT, 0);
+		} else {
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_REPEATER;
+		}
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_REPEATER, value, NULL, 0);
+
+		break;
+	case IEEE80211_PARAM_WDS:
+		if (value) {
+			vap->iv_qtn_flags &= ~IEEE80211_QTN_BRIDGEMODE_DISABLED;
+		} else {
+			vap->iv_qtn_flags |= IEEE80211_QTN_BRIDGEMODE_DISABLED;
+		}
+		ieee80211_bridgemode_set(vap, 1);
+		break;
+	case IEEE80211_PARAM_BGSCAN:
+		if (value) {
+			if ((vap->iv_caps & IEEE80211_C_BGSCAN) == 0)
+				return -EINVAL;
+			vap->iv_flags |= IEEE80211_F_BGSCAN;
+		} else {
+			/* XXX racey? */
+			vap->iv_flags &= ~IEEE80211_F_BGSCAN;
+			ieee80211_cancel_scan(vap);	/* anything current */
+		}
+		break;
+	case IEEE80211_PARAM_BGSCAN_IDLE:
+		if (value >= IEEE80211_BGSCAN_IDLE_MIN)
+			vap->iv_bgscanidle = msecs_to_jiffies(value);
+		else
+			retv = EINVAL;
+		break;
+	case IEEE80211_PARAM_BGSCAN_INTERVAL:
+		if (value >= IEEE80211_BGSCAN_INTVAL_MIN) {
+			vap->iv_bgscanintvl = value * HZ;
+			ic->ic_extender_bgscanintvl = vap->iv_bgscanintvl;
+		} else {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_SCAN_OPCHAN:
+		ic->ic_scan_opchan_enable = !!(value);
+		break;
+	case IEEE80211_PARAM_EXTENDER_MBS_RSSI_MARGIN:
+		if (ieee80211_swfeat_is_supported(SWFEAT_ID_QHOP, 1))
+			ic->ic_extender_mbs_rssi_margin = value;
+		break;
+	case IEEE80211_PARAM_MCAST_RATE:
+		/* units are in KILObits per second */
+		if (value >= 256 && value <= 54000)
+			vap->iv_mcast_rate = value;
+		else
+			retv = EINVAL;
+		break;
+	case IEEE80211_PARAM_COVERAGE_CLASS:
+		if (value >= 0 && value <= IEEE80211_COVERAGE_CLASS_MAX) {
+			ic->ic_coverageclass = value;
+			if (IS_UP_AUTO(vap))
+				ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+			retv = 0;
+		} else
+			retv = EINVAL;
+		break;
+	case IEEE80211_PARAM_COUNTRY_IE:
+		if (value)
+			ic->ic_flags_ext |= IEEE80211_FEXT_COUNTRYIE;
+		else
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_COUNTRYIE;
+		retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_REGCLASS:
+		if (value)
+			ic->ic_flags_ext |= IEEE80211_FEXT_REGCLASS;
+		else
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_REGCLASS;
+		retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_SCANVALID:
+		vap->iv_scanvalid = value * HZ;
+		break;
+	case IEEE80211_PARAM_ROAM_RSSI_11A:
+		vap->iv_roam.rssi11a = value;
+		break;
+	case IEEE80211_PARAM_ROAM_RSSI_11B:
+		vap->iv_roam.rssi11bOnly = value;
+		break;
+	case IEEE80211_PARAM_ROAM_RSSI_11G:
+		vap->iv_roam.rssi11b = value;
+		break;
+	case IEEE80211_PARAM_ROAM_RATE_11A:
+		vap->iv_roam.rate11a = value;
+		break;
+	case IEEE80211_PARAM_ROAM_RATE_11B:
+		vap->iv_roam.rate11bOnly = value;
+		break;
+	case IEEE80211_PARAM_ROAM_RATE_11G:
+		vap->iv_roam.rate11b = value;
+		break;
+	case IEEE80211_PARAM_UAPSDINFO:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (ic->ic_caps & IEEE80211_C_UAPSD) {
+				if (value)
+					IEEE80211_VAP_UAPSD_ENABLE(vap);
+				else
+					IEEE80211_VAP_UAPSD_DISABLE(vap);
+				retv = ENETRESET;
+			}
+		} else if (vap->iv_opmode == IEEE80211_M_STA) {
+			vap->iv_uapsdinfo = value;
+			IEEE80211_VAP_UAPSD_ENABLE(vap);
+			retv = ENETRESET;
+		}
+		break;
+	case IEEE80211_PARAM_SLEEP:
+		if (!vap->iv_bss)
+			return -EFAULT;
+
+		/* XXX: Forced sleep for testing. Does not actually place the
+		 *      HW in sleep mode yet. this only makes sense for STAs.
+		 */
+		if (value) {
+			/* goto sleep */
+			IEEE80211_VAP_GOTOSLEEP(vap);
+		} else {
+			/* wakeup */
+			IEEE80211_VAP_WAKEUP(vap);
+		}
+		ieee80211_ref_node(vap->iv_bss);
+		ieee80211_send_nulldata(vap->iv_bss);
+		break;
+	case IEEE80211_PARAM_TUNEPD:
+		if (!vap->iv_bss)
+			return -EFAULT;
+
+		/* Send specified number of frames */
+		for (j = 0; j < value; j++)
+			ieee80211_send_tuning_data(vap->iv_bss);
+		break;
+	case IEEE80211_PARAM_QOSNULL:
+		{
+			struct ieee80211_node *ni_sta;
+			if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+				/* Tx a QoS null frame to an STA by passing node_index */
+				ni_sta = ieee80211_find_node_by_idx(ic, NULL, value);
+				if (ni_sta) {
+					ieee80211_send_qosnulldata(ni_sta, WMM_AC_BK);
+				}
+			} else {
+				if (!vap->iv_bss)
+					return -EFAULT;
+
+				/* Force a QoS Null for testing. */
+				ieee80211_ref_node(vap->iv_bss);
+				ieee80211_send_qosnulldata(vap->iv_bss, value);
+			}
+		}
+		break;
+	case IEEE80211_PARAM_PSPOLL:
+		if (!vap->iv_bss)
+			return -EFAULT;
+
+		/* Force a PS-POLL for testing. */
+		ieee80211_send_pspoll(vap->iv_bss);
+		break;
+	case IEEE80211_PARAM_EOSPDROP:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (value)
+				IEEE80211_VAP_EOSPDROP_ENABLE(vap);
+			else
+				IEEE80211_VAP_EOSPDROP_DISABLE(vap);
+		}
+		break;
+	case IEEE80211_PARAM_MARKDFS:
+		if (value)
+			ic->ic_flags_ext |= IEEE80211_FEXT_MARKDFS;
+		else
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_MARKDFS;
+
+		/* set radar mode for qdrv */
+		ic->ic_set_radar(value);
+		break;
+	case IEEE80211_PARAM_RADAR_BW:
+		ic->ic_radar_bw = value;
+		break;
+	case IEEE80211_PARAM_STA_DFS:
+		if (vap->iv_opmode == IEEE80211_M_STA) {
+			struct ieee80211_node *ni = vap->iv_bss;
+			if (value)
+				ic->ic_flags_ext |= IEEE80211_FEXT_MARKDFS;
+			else
+				ic->ic_flags_ext &= ~IEEE80211_FEXT_MARKDFS;
+
+			if (vap->iv_state == IEEE80211_S_RUN) {
+				SCSDBG(SCSLOG_NOTICE, "send qtn DFS report (DFS %s)\n", value ?
+							"Enabled" : "Disabled");
+				ieee80211_send_action_dfs_report(ni);
+			}
+
+			ic->ic_enable_sta_dfs(value);
+		}
+		break;
+	case IEEE80211_PARAM_DYNAMIC_AC:
+		if (ic->ic_rf_chipid == CHIPID_DUAL)
+			return -EOPNOTSUPP;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_AMPDU_DENSITY:
+		if (value < IEEE80211_AMPDU_MIN_DENSITY ||
+		    value > IEEE80211_AMPDU_MAX_DENSITY) {
+			return -EINVAL;
+		} else {
+			if (ic->ic_htcap.mpduspacing != value) {
+				ic->ic_htcap.mpduspacing = value;
+				ieee80211_wireless_reassoc(vap, 0, 1);
+			}
+		}
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_BA_SETUP_ENABLE:
+		ieee80211_ba_setup_detect_set(vap, value);
+		break;
+	case IEEE80211_PARAM_AGGREGATION:
+		if (value == 0 && IS_IEEE80211_VHT_ENABLED(ic))
+			return -EOPNOTSUPP;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_MCS_CAP:
+		/*
+		 * Updating iv_mcs_config to have sync with other MCS
+		 * commands, call_qcsapi and iwpriv
+		 */
+		if (!ieee80211_ht_tx_mcs_is_valid(value)) {
+			printk("Invalid MCS in 11n mode\n");
+			return -EINVAL;
+		}
+
+		vap->iv_mcs_config = IEEE80211_N_RATE_PREFIX | ((value << 16) & 0xff0000) |
+				((value << 8) & 0xff00) | (value & 0xff);
+
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_MU_ENABLE:
+		if (!ieee80211_swfeat_is_supported(SWFEAT_ID_MU_MIMO, 1) ||
+			(vap->iv_opmode == IEEE80211_M_STA &&
+			get_hardware_revision() < HARDWARE_REVISION_TOPAZ_A2)) {
+			return -EOPNOTSUPP;
+		} else {
+			uint32_t mu_bf_cap_flag = ic->ic_vhtcap.cap_flags &
+						IEEE80211_VHTCAP_C_MU_BEAM_FORMXX_CAP_MASK;
+
+			ic->ic_vhtcap.cap_flags &= ~IEEE80211_VHTCAP_C_MU_BEAM_FORMXX_CAP_MASK;
+			ic->ic_mu_enable = value;
+
+			/* when MU is enabled, make sure txBF STS cap is advertising 4SS */
+			if (ic->ic_mu_enable) {
+				if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+					mu_bf_cap_flag = IEEE80211_VHTCAP_C_MU_BEAM_FORMER_CAP;
+				} else if (vap->iv_opmode == IEEE80211_M_STA) {
+					mu_bf_cap_flag = IEEE80211_VHTCAP_C_MU_BEAM_FORMEE_CAP;
+				}
+
+				ic->ic_vhtcap.bfstscap_save = ic->ic_vhtcap.bfstscap;
+				ic->ic_vhtcap.bfstscap = IEEE80211_VHTCAP_RX_STS_4;
+				ic->ic_vhtcap.cap_flags |= mu_bf_cap_flag;
+
+				ic->ic_vhtcap_24g.bfstscap_save = ic->ic_vhtcap_24g.bfstscap;
+				ic->ic_vhtcap_24g.bfstscap = IEEE80211_VHTCAP_RX_STS_4;
+			} else {
+				if (ic->ic_vhtcap.bfstscap_save != IEEE80211_VHTCAP_RX_STS_INVALID) {
+					ic->ic_vhtcap.bfstscap = ic->ic_vhtcap.bfstscap_save;
+				}
+
+				if (ic->ic_vhtcap_24g.bfstscap_save != IEEE80211_VHTCAP_RX_STS_INVALID) {
+					ic->ic_vhtcap_24g.bfstscap = ic->ic_vhtcap_24g.bfstscap_save;
+				}
+			}
+
+			ieee80211_param_to_qdrv(vap, param, ic->ic_mu_enable, NULL, 0);
+
+			/* Force a reassociation to use the new BEAM FORMEE/ER settings */
+			TAILQ_FOREACH_SAFE(vap_each, &ic->ic_vaps, iv_next, vap_tmp) {
+				ieee80211_wireless_reassoc(vap_each, 0, 1);
+			}
+		}
+		break;
+	case IEEE80211_PARAM_TXBF_PERIOD:
+		ic->ic_vopt.bf = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_PS_CMD:
+		ic->ic_vopt.bbf = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_MIMOMODE:
+	case IEEE80211_PARAM_SHORT_RETRY_LIMIT:
+	case IEEE80211_PARAM_LONG_RETRY_LIMIT:
+	case IEEE80211_PARAM_RETRY_COUNT:
+	case IEEE80211_PARAM_RG:
+	case IEEE80211_PARAM_ACK_POLICY:
+	case IEEE80211_PARAM_EXP_MAT_SEL:
+	case IEEE80211_PARAM_LEGACY_MODE:
+	case IEEE80211_PARAM_MAX_AGG_SUBFRM:
+	case IEEE80211_PARAM_MAX_AGG_SIZE:
+	case IEEE80211_PARAM_TXBF_CTRL:
+	case IEEE80211_PARAM_HTBA_SEQ_CTRL:
+	case IEEE80211_PARAM_HTBA_SIZE_CTRL:
+	case IEEE80211_PARAM_HTBA_TIME_CTRL:
+	case IEEE80211_PARAM_HT_ADDBA:
+	case IEEE80211_PARAM_HT_DELBA:
+	case IEEE80211_PARAM_MUC_PROFILE:
+	case IEEE80211_PARAM_MUC_PHY_STATS:
+	case IEEE80211_PARAM_MUC_SET_PARTNUM:
+	case IEEE80211_PARAM_ENABLE_GAIN_ADAPT:
+	case IEEE80211_PARAM_FORCEMICERROR:
+	case IEEE80211_PARAM_ENABLECOUNTERMEASURES:
+	case IEEE80211_PARAM_RATE_CTRL_FLAGS:
+	case IEEE80211_PARAM_CONFIG_BB_INTR_DO_SRESET:
+	case IEEE80211_PARAM_CONFIG_MAC_INTR_DO_SRESET:
+	case IEEE80211_PARAM_CONFIG_WDG_DO_SRESET:
+	case IEEE80211_PARAM_TRIGGER_RESET:
+	case IEEE80211_PARAM_INJECT_INVALID_FCS:
+	case IEEE80211_PARAM_CONFIG_WDG_SENSITIVITY:
+	case IEEE80211_PARAM_MAX_MGMT_FRAMES:
+	case IEEE80211_PARAM_MCS_ODD_EVEN:
+	case IEEE80211_PARAM_RESTRICTED_MODE:
+	case IEEE80211_PARAM_RESTRICT_RTS:
+	case IEEE80211_PARAM_RESTRICT_LIMIT:
+	case IEEE80211_PARAM_RESTRICT_RATE:
+	case IEEE80211_PARAM_SWRETRY_AGG_MAX:
+	case IEEE80211_PARAM_SWRETRY_NOAGG_MAX:
+	case IEEE80211_PARAM_SWRETRY_SUSPEND_XMIT:
+	case IEEE80211_PARAM_BB_MAC_RESET_MSGS:
+	case IEEE80211_PARAM_BB_MAC_RESET_DONE_WAIT:
+	case IEEE80211_PARAM_TX_AGG_TIMEOUT:
+	case IEEE80211_PARAM_LEGACY_RETRY_LIMIT:
+	case IEEE80211_PARAM_RX_CTRL_FILTER:
+	case IEEE80211_PARAM_DUMP_TCM_FD:
+	case IEEE80211_PARAM_RXCSR_ERR_ALLOW:
+	case IEEE80211_PARAM_DUMP_TRIGGER:
+	case IEEE80211_PARAM_STOP_FLAGS:
+	case IEEE80211_PARAM_CHECK_FLAGS:
+	case IEEE80211_PARAM_PWR_ADJUST:
+	case IEEE80211_PARAM_PWR_ADJUST_AUTO:
+	case IEEE80211_PARAM_RTS_CTS:
+	case IEEE80211_PARAM_TX_QOS_SCHED:
+	case IEEE80211_PARAM_PEER_RTS_MODE:
+	case IEEE80211_PARAM_DYN_WMM:
+	case IEEE80211_PARAM_GET_CH_INUSE:
+	case IEEE80211_PARAM_RX_AGG_TIMEOUT:
+	case IEEE80211_PARAM_FORCE_MUC_HALT:
+	case IEEE80211_PARAM_FORCE_ENABLE_TRIGGERS:
+	case IEEE80211_PARAM_FORCE_MUC_TRACE:
+	case IEEE80211_PARAM_BK_BITMAP_MODE:
+	case IEEE80211_PARAM_TEST_LNCB:
+	case IEEE80211_PARAM_UNKNOWN_DEST_ARP:
+	case IEEE80211_PARAM_UNKNOWN_DEST_FWD:
+	case IEEE80211_PARAM_DBG_MODE_FLAGS:
+	case IEEE80211_PARAM_PWR_SAVE:
+	case IEEE80211_PARAM_DBG_FD:
+	case IEEE80211_PARAM_CCA_PRI:
+	case IEEE80211_PARAM_CCA_SEC:
+	case IEEE80211_PARAM_CCA_SEC40:
+	case IEEE80211_PARAM_CCA_FIXED:
+	case IEEE80211_PARAM_DYN_AGG_TIMEOUT:
+	case IEEE80211_PARAM_SIFS_TIMING:
+	case IEEE80211_PARAM_TEST_TRAFFIC:
+	case IEEE80211_PARAM_TX_AMSDU:
+	case IEEE80211_PARAM_QCAT_STATE:
+	case IEEE80211_PARAM_RALG_DBG:
+	case IEEE80211_PARAM_SINGLE_AGG_QUEUING:
+	case IEEE80211_PARAM_BA_THROT:
+	case IEEE80211_PARAM_TX_QUEUING_ALG:
+	case IEEE80211_PARAM_DAC_DBG:
+	case IEEE80211_PARAM_CARRIER_ID:
+	case IEEE80211_PARAM_WME_THROT:
+	case IEEE80211_PARAM_GENPCAP:
+	case IEEE80211_PARAM_CCA_DEBUG:
+	case IEEE80211_PARAM_CCA_STATS_PERIOD:
+	case IEEE80211_PARAM_TUNEPD_DONE:
+	case IEEE80211_PARAM_AUC_RX_DBG:
+	case IEEE80211_PARAM_AUC_TX_DBG:
+	case IEEE80211_PARAM_RX_ACCELERATE:
+	case IEEE80211_PARAM_RX_ACCEL_LOOKUP_SA:
+	case IEEE80211_PARAM_BR_IP_ADDR:
+	case IEEE80211_PARAM_AC_INHERITANCE:
+	case IEEE80211_PARAM_AC_Q2Q_INHERITANCE:
+	case IEEE80211_PARAM_1SS_AMSDU_SUPPORT:
+	case IEEE80211_PARAM_TACMAP:
+	case IEEE80211_PARAM_AUC_QOS_SCH:
+	case IEEE80211_PARAM_TXBF_IOT:
+	case IEEE80211_PARAM_AGGRESSIVE_AGG:
+	case IEEE80211_PARAM_MU_DEBUG_FLAG:
+	case IEEE80211_PARAM_INST_MU_GRP_QMAT:
+	case IEEE80211_PARAM_DELE_MU_GRP_QMAT:
+	case IEEE80211_PARAM_EN_MU_GRP_QMAT:
+	case IEEE80211_PARAM_DIS_MU_GRP_QMAT:
+	case IEEE80211_PARAM_SET_CRC_ERR:
+	case IEEE80211_PARAM_MU_SWITCH_USR_POS:
+	case IEEE80211_PARAM_SET_GRP_SND_PERIOD:
+	case IEEE80211_PARAM_SET_PREC_SND_PERIOD:
+	case IEEE80211_PARAM_SET_MU_RANK_TOLERANCE:
+	case IEEE80211_PARAM_AUTO_CCA_ENABLE:
+	case IEEE80211_PARAM_AUTO_CCA_PARAMS:
+	case IEEE80211_PARAM_AUTO_CCA_DEBUG:
+	case IEEE80211_PARAM_AUTO_CS_ENABLE:
+	case IEEE80211_PARAM_AUTO_CS_PARAMS:
+	case IEEE80211_PARAM_CS_THRESHOLD:
+	case IEEE80211_PARAM_CS_THRESHOLD_DBM:
+	case IEEE80211_PARAM_DUMP_PPPC_TX_SCALE_BASES:
+	case IEEE80211_PARAM_GLOBAL_FIXED_TX_SCALE_INDEX:
+	case IEEE80211_PARAM_NDPA_LEGACY_FORMAT:
+	case IEEE80211_PARAM_SFS:
+	case IEEE80211_PARAM_INST_1SS_DEF_MAT_ENABLE:
+	case IEEE80211_PARAM_INST_1SS_DEF_MAT_THRESHOLD:
+	case IEEE80211_PARAM_RATE_TRAIN_DBG:
+	case IEEE80211_PARAM_MU_USE_EQ:
+	case IEEE80211_PARAM_MU_AIRTIME_PADDING:
+	case IEEE80211_PARAM_MU_AMSDU_SIZE:
+	case IEEE80211_PARAM_RESTRICT_WLAN_IP:
+	case IEEE80211_PARAM_MUC_SYS_DEBUG:
+	case IEEE80211_PARAM_AUC_TX_AGG_DURATION:
+	case IEEE80211_PARAM_1BIT_PKT_DETECT:
+	case IEEE80211_PARAM_ANTENNA_USAGE:
+	case IEEE80211_PARAM_VSP_NOD_DEBUG:
+	case IEEE80211_PARAM_DYNAMIC_SIFS_TIMING:
+	case IEEE80211_PARAM_BEACON_HANG_TIMEOUT:
+	case IEEE80211_PARAM_BB_DEAFNESS_WAR_EN:
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_OFF_CHAN_SUSPEND:
+		if (value & IEEE80211_OFFCHAN_SUSPEND_MASK) {
+			value = value & IEEE80211_OFFCHAN_TIMEOUT_MASK;
+			if (value > IEEE80211_OFFCHAN_TIMEOUT_MAX)
+				value = IEEE80211_OFFCHAN_TIMEOUT_MAX;
+			else if (value < IEEE80211_OFFCHAN_TIMEOUT_MIN)
+				value = IEEE80211_OFFCHAN_TIMEOUT_MIN;
+			ieee80211_off_channel_suspend(vap, value);
+		} else {
+			ieee80211_off_channel_resume(vap);
+		}
+		break;
+	case IEEE80211_PARAM_QTN_OPTI_MODE:
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		ieee80211_wireless_reassoc(vap, 0, 1);
+		break;
+	case IEEE80211_PARAM_SET_RTS_BW_DYN:
+		ic->ic_rts_bw_dyn = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_SET_DUP_RTS:
+		ic->ic_dup_rts = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_SET_CTS_BW:
+		ic->ic_cts_bw = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_NDPA_DUR:
+		ic->ic_ndpa_dur = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_SU_TXBF_PKT_CNT:
+		ic->ic_su_txbf_pkt_cnt = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_MU_TXBF_PKT_CNT:
+		ic->ic_mu_txbf_pkt_cnt = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_MU_DEBUG_LEVEL:
+		ic->ic_mu_debug_level = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_FIXED_SGI:
+		ic->ic_gi_fixed = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_FIXED_BW:
+		if (value & QTN_BW_FIXED_EN) {
+			int bw = ieee80211_get_bw(ic);
+			int qtn_bw = MS(value, QTN_BW_FIXED_BW);
+			if (bw == BW_INVALID) {
+				printk("current bw is invalid!\n");
+				break;
+			}
+			if ((bw == BW_HT20 && qtn_bw > QTN_BW_20M) ||
+					(bw == BW_HT40 && qtn_bw > QTN_BW_40M) ||
+					(bw == BW_HT80 && qtn_bw > QTN_BW_80M)) {
+				printk("Can't set fixed qtn_bw %u because current bw is %u\n",
+						qtn_bw, bw);
+				break;
+			}
+		}
+		ic->ic_bw_fixed = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_RTSTHRESHOLD:
+		if (value == vap->iv_rtsthreshold) // Nothing to do
+			break;
+
+		if (IEEE80211_RTS_MIN <= value && value <= IEEE80211_RTS_MAX) {
+			vap->iv_rtsthreshold = value;
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		} else {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_PWR_ADJUST_SCANCNT:
+		ic->ic_pwr_adjust_scancnt = value;
+		break;
+	case IEEE80211_PARAM_MUC_FLAGS:
+		if (value & QTN_FLAG_MCS_UEQM_DISABLE) {
+			ic->ic_caps &= ~IEEE80211_C_UEQM;
+		} else {
+			ic->ic_caps |= IEEE80211_C_UEQM;
+		}
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		ieee80211_wireless_reassoc(vap, 0, 1);
+		break;
+	case IEEE80211_PARAM_HT_NSS_CAP:
+	case IEEE80211_PARAM_VHT_NSS_CAP:
+		retv = ieee80211_set_nss_cap(vap, param, value);
+		break;
+	case IEEE80211_PARAM_VHT_MCS_CAP:
+		retv = ieee80211_set_vht_mcs_cap(vap, value);
+		break;
+	case IEEE80211_PARAM_LDPC:
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+
+		/* Update the HT capabilities */
+		TAILQ_FOREACH_SAFE(vap_each, &ic->ic_vaps, iv_next, vap_tmp) {
+			if (value) {
+				vap_each->iv_ht_flags |= IEEE80211_HTF_LDPC_ENABLED;
+				vap_each->iv_vht_flags |= IEEE80211_VHTCAP_C_RX_LDPC;
+				ic->ic_vhtcap.cap_flags |= IEEE80211_VHTCAP_C_RX_LDPC;
+				ic->ic_vhtcap_24g.cap_flags |= IEEE80211_VHTCAP_C_RX_LDPC;
+			} else {
+				vap_each->iv_ht_flags &= ~IEEE80211_HTF_LDPC_ENABLED;
+				vap_each->iv_vht_flags &= ~IEEE80211_VHTCAP_C_RX_LDPC;
+				ic->ic_vhtcap.cap_flags &= ~IEEE80211_VHTCAP_C_RX_LDPC;
+				ic->ic_vhtcap_24g.cap_flags &= ~IEEE80211_VHTCAP_C_RX_LDPC;
+			}
+			if ((vap_each->iv_opmode == IEEE80211_M_HOSTAP) &&
+				(vap_each->iv_state == IEEE80211_S_RUN)) {
+				ic->ic_beacon_update(vap_each);
+			}
+			/* Force a reassociation to use the new LDPC setting */
+			ieee80211_wireless_reassoc(vap_each, 0, 1);
+		}
+		break;
+	case IEEE80211_PARAM_STBC:
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+
+		/* Update the HT capabilities */
+		TAILQ_FOREACH_SAFE(vap_each, &ic->ic_vaps, iv_next, vap_tmp) {
+			if (value) {
+				vap_each->iv_ht_flags |= IEEE80211_HTF_STBC_ENABLED;
+				vap_each->iv_vht_flags |= IEEE80211_VHTCAP_C_TX_STBC;
+			} else {
+				vap_each->iv_ht_flags &= ~IEEE80211_HTF_STBC_ENABLED;
+				vap_each->iv_vht_flags &= ~IEEE80211_VHTCAP_C_TX_STBC;
+			}
+			if ((vap_each->iv_opmode == IEEE80211_M_HOSTAP) &&
+				(vap_each->iv_state == IEEE80211_S_RUN)) {
+				ic->ic_beacon_update(vap_each);
+			}
+			/* Force a reassociation to use the new STBC setting */
+			ieee80211_wireless_reassoc(vap_each, 0, 1);
+		}
+		break;
+	case IEEE80211_PARAM_LDPC_ALLOW_NON_QTN:
+		if (value) {
+			vap->iv_ht_flags |= IEEE80211_HTF_LDPC_ALLOW_NON_QTN;
+		} else {
+			vap->iv_ht_flags &= ~IEEE80211_HTF_LDPC_ALLOW_NON_QTN;
+		}
+		/* Force a reassociation to use the new LDPC setting */
+		ieee80211_wireless_reassoc(vap, 0, 1);
+		break;
+	case IEEE80211_PARAM_TRAINING_COUNT:
+		vap->iv_rate_training_count = value;
+		break;
+	case IEEE80211_PARAM_FIXED_TX_RATE:
+		{
+			int mcs_val, mcs_nss;
+
+			if ((value & IEEE80211_RATE_PREFIX_MASK) == IEEE80211_AC_RATE_PREFIX) {
+				if (!IS_IEEE80211_VHT_ENABLED(ic))
+					return -EINVAL;
+
+				mcs_val = value & IEEE80211_AC_MCS_VAL_MASK;
+				mcs_nss = (value & IEEE80211_AC_MCS_NSS_MASK) >> IEEE80211_11AC_MCS_NSS_SHIFT;
+
+				if (!ieee80211_vht_tx_mcs_is_valid(mcs_val, mcs_nss)) {
+					return -EINVAL;
+				}
+			} else if ((value & IEEE80211_RATE_PREFIX_MASK) == IEEE80211_N_RATE_PREFIX) {
+				mcs_val = (value & 0xFF) & 0x7f;
+				if (!ieee80211_ht_tx_mcs_is_valid(mcs_val)) {
+					return -EINVAL;
+				}
+			}
+			printk("Warning: %s MCS rate is fixed at 0x%08x\n", __func__, value);
+			/* Forward fixed MCS rate configuration to the driver and MuC */
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+			/* Remember current configuration in the VAP */
+			vap->iv_mcs_config = value;
+		}
+		break;
+	case IEEE80211_PARAM_SHORT_GI:
+		/* Forward short GI configuration to the driver and MuC */
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+
+		TAILQ_FOREACH_SAFE(vap_each, &ic->ic_vaps, iv_next, vap_tmp) {
+			if (value) {
+				vap_each->iv_ht_flags |= IEEE80211_HTF_SHORTGI_ENABLED;
+				vap_each->iv_vht_flags |= IEEE80211_VHTCAP_C_SHORT_GI_80;
+			} else {
+				vap_each->iv_ht_flags &= ~IEEE80211_HTF_SHORTGI_ENABLED;
+				vap_each->iv_vht_flags &= ~IEEE80211_VHTCAP_C_SHORT_GI_80;
+			}
+
+			if ((vap_each->iv_opmode == IEEE80211_M_HOSTAP) &&
+				(vap_each->iv_state == IEEE80211_S_RUN)) {
+				ic->ic_beacon_update(vap_each);
+			}
+			/* Force a reassociation to use the new SGI setting */
+			ieee80211_wireless_reassoc(vap_each, 0, 1);
+		}
+		break;
+	case IEEE80211_PARAM_BW_SEL_MUC:
+	case IEEE80211_PARAM_BW_SEL:
+		if ((value != BW_HT160) && (value != BW_HT80) &&
+				(value != BW_HT40) && (value != BW_HT20))
+			return -EINVAL;
+
+		/* update station profile */
+		if ((ic->ic_rf_chipid == CHIPID_DUAL) &&
+				(ic->ic_opmode == IEEE80211_M_STA)) {
+			if (IS_IEEE80211_5G_BAND(ic))
+				vap->iv_5ghz_prof.bw = value;
+			else
+				vap->iv_2_4ghz_prof.bw = value;
+		}
+
+		if ((value > BW_HT40) &&
+				!ieee80211_swfeat_is_supported(SWFEAT_ID_VHT, 1)) {
+			retv = EOPNOTSUPP;
+			break;
+		}
+		/*
+		 * Blocking 40MHZ and 80MHZ in 2.4 band and legacy 11A
+		 * only mode
+		 */
+		if ((value >= BW_HT40) && ((ic->ic_curmode == IEEE80211_MODE_11A) ||
+				(!IS_IEEE80211_11NG(ic) && IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))))
+			return -EOPNOTSUPP;
+
+		ic->ic_max_system_bw = value;
+
+		/* Forward bandwidth configuration to the driver and MuC */
+		ieee80211_change_bw(vap, value, 1);
+
+		ieee80211_start_obss_scan_timer(vap);
+		ic->ic_csw_reason = IEEE80211_CSW_REASON_CONFIG;
+		if (IS_UP_AUTO(vap)) {
+			ieee80211_wireless_reassoc(vap, 0, 1);
+			ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+		} else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			ic->ic_set_channel(ic);
+		}
+		break;
+	case IEEE80211_PARAM_PHY_STATS_MODE:
+		if (value != MUC_PHY_STATS_ALTERNATE &&
+		    value != MUC_PHY_STATS_RSSI_RCPI_ONLY &&
+		    value != MUC_PHY_STATS_ERROR_SUM_ONLY) {
+			retv = EINVAL;
+		} else {
+			/* Forward Phy Stats configuration to the driver and MuC */
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+			ic->ic_mode_get_phy_stats = value;
+		}
+		break;
+	case IEEE80211_PARAM_FORCE_SMPS:
+		/* Reflect the forced value in the local structures */
+		ieee80211_forcesmps(vap, value);
+		break;
+	case IEEE80211_PARAM_CHANNEL_NOSCAN:
+		g_channel_fixed = value;
+		break;
+	case IEEE80211_PARAM_LINK_LOSS:
+		vap->iv_link_loss_enabled = value;
+		break;
+	case IEEE80211_PARAM_BCN_MISS_THR:
+		if (!vap->iv_bss)
+			return -EFAULT;
+
+		if (vap->iv_opmode == IEEE80211_M_STA) {
+			if (value < 1) {
+				printk(KERN_ERR "%s: bad value %d, "
+						"beacon miss threshold must be >= 1\n",
+						dev->name, value);
+				break;
+			}
+			printk(KERN_INFO "%s: set beacon miss threshold to %d\n",
+					dev->name, value);
+			vap->iv_bcn_miss_thr = value;
+
+			/* Recalculate swbmiss period with new value */
+			vap->iv_swbmiss_period =
+					IEEE80211_TU_TO_JIFFIES(vap->iv_bss->ni_intval *
+					vap->iv_bcn_miss_thr);
+
+			if (vap->iv_swbmiss_warnings)
+				vap->iv_swbmiss_period /= (vap->iv_swbmiss_warnings + 1);
+		} else {
+			printk(KERN_ERR "%s: can't set beacon miss threshold for non STA\n",
+					dev->name);
+		}
+		break;
+	case IEEE80211_PARAM_IMPLICITBA:
+		if (vap->iv_implicit_ba != (value & 0xFFFF))
+		{
+			printk("New implicit BA value (%04X) - remove all associations\n", (value & 0xFFFF));
+			vap->iv_implicit_ba = value & 0xFFFF;
+			ieee80211_wireless_reassoc(vap, 0, 1);
+		}
+		break;
+	case IEEE80211_PARAM_SHOWMEM:
+		if (value == 0x1) {
+#ifdef WLAN_MALLOC_FREE_TOT_DEBUG
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+		uint32_t refcnt = atomic_read(&vap->iv_dev->refcnt);
+#else
+		uint32_t refcnt = netdev_refcnt_read(vap->iv_dev);
+#endif
+
+			printk("WLAN bytes: allocated=%d freed=%d balance=%d\n"
+				"     times: allocated=%d freed=%d\n"
+				"     nodes: allocated=%d freed=%d current=%d refs=%u\n"
+				"     tmp:   allocated=%d freed=%d\n",
+				g_wlan_tot_alloc, g_wlan_tot_free, g_wlan_balance,
+				g_wlan_tot_alloc_cnt, g_wlan_tot_free_cnt,
+				g_wlan_tot_node_alloc, g_wlan_tot_node_free, ic->ic_node_count,
+				refcnt,
+				g_wlan_tot_node_alloc_tmp, g_wlan_tot_node_free_tmp);
+#else
+			printk("Memory debug statistics disabled\n");
+#endif
+		}
+		break;
+	case IEEE80211_PARAM_RX_AMSDU_ENABLE:
+		vap->iv_rx_amsdu_enable = value;
+		ieee80211_wireless_reassoc(vap, 1, 0);
+		break;
+	case IEEE80211_PARAM_RX_AMSDU_THRESHOLD_CCA:
+		vap->iv_rx_amsdu_threshold_cca = value;
+		break;
+	case IEEE80211_PARAM_RX_AMSDU_THRESHOLD_PMBL:
+		vap->iv_rx_amsdu_threshold_pmbl = value;
+		break;
+	case IEEE80211_PARAM_RX_AMSDU_PMBL_WF_SP:
+		vap->iv_rx_amsdu_pmbl_wf_sp = value;
+		break;
+	case IEEE80211_PARAM_RX_AMSDU_PMBL_WF_LP:
+		vap->iv_rx_amsdu_pmbl_wf_lp = value;
+		break;
+	case IEEE80211_PARAM_CLIENT_REMOVE:
+		printk("Removing clients, not forcing deauth\n");
+		ieee80211_wireless_reassoc(vap, 1, 0);
+		break;
+	case IEEE80211_PARAM_VAP_DBG:
+		vap->iv_debug = value;
+		break;
+	case IEEE80211_PARAM_NODEREF_DBG:
+		ieee80211_node_dbgref_history_dump();
+		break;
+	case IEEE80211_PARAM_GLOBAL_BA_CONTROL:
+		if (vap->iv_ba_control != (value & 0xFFFF))
+		{
+			vap->iv_ba_old_control = vap->iv_ba_control;
+			vap->iv_ba_control = value & 0xFFFF;
+			ieee80211_wireless_ba_change(vap);
+		}
+		break;
+	case IEEE80211_PARAM_NO_SSID_ASSOC:
+		if (value) {
+			vap->iv_qtn_options &= ~IEEE80211_QTN_NO_SSID_ASSOC_DISABLED;
+			printk("Enabling associations with no SSID\n");
+		} else {
+			vap->iv_qtn_options |= IEEE80211_QTN_NO_SSID_ASSOC_DISABLED;
+			printk("Disabling associations with no SSID\n");
+		}
+		break;
+	case IEEE80211_PARAM_CONFIG_TXPOWER:
+		retv = apply_tx_power(vap, value, IEEE80211_BKUP_TXPOWER_NORMAL);
+		if (retv) {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_INITIATE_TXPOWER_TABLE:
+		retv = apply_tx_power(vap, value, IEEE80211_INIT_TXPOWER_TABLE);
+		if (retv) {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_CONFIG_BW_TXPOWER:
+		retv = ieee80211_set_bw_txpower(vap, value);
+		if (!retv) {
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		} else {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_DUMP_CONFIG_TXPOWER:
+		if (value == 1) {
+			ieee80211_dump_tx_power(ic);
+		} else {
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		}
+		break;
+	case IEEE80211_PARAM_TPC:
+		if (ic->ic_flags & IEEE80211_F_DOTH) {
+			int			last_tpc_state;
+			struct ieee80211vap	*each_vap;
+
+			value = !!value;
+			last_tpc_state = !!(ic->ic_flags_ext & IEEE80211_FEXT_TPC);
+			if (value != last_tpc_state) {
+				if (value) {
+					printk("Enable tpc feature\n");
+					ic->ic_flags_ext |= IEEE80211_FEXT_TPC;
+					ic->ic_pppc_select_enable_backup = ic->ic_pppc_select_enable;
+					if (ic->ic_pppc_select_enable) {
+						ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_PPPC_SELECT, 0, NULL, 0);
+						ic->ic_pppc_select_enable = 0;
+					}
+				} else {
+					printk("Disable tpc feature\n");
+					ic->ic_flags_ext &= ~IEEE80211_FEXT_TPC;
+					ieee80211_tpc_query_stop(&ic->ic_tpc_query_info);
+					ic->ic_pppc_select_enable = ic->ic_pppc_select_enable_backup;
+					if (ic->ic_pppc_select_enable)
+						ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_PPPC_SELECT, 1, NULL, 0);
+				}
+				TAILQ_FOREACH(each_vap, &ic->ic_vaps, iv_next) {
+					if ((each_vap->iv_opmode == IEEE80211_M_HOSTAP) && (each_vap->iv_state == IEEE80211_S_RUN)) {
+						if (!value) {
+							ieee80211_ppqueue_remove_with_cat_action(&each_vap->iv_ppqueue,
+									IEEE80211_ACTION_CAT_SPEC_MGMT,
+									IEEE80211_ACTION_S_TPC_REPORT);
+						}
+						ic->ic_beacon_update(vap);
+					}
+				}
+			}
+		} else {
+			retv = EOPNOTSUPP;
+		}
+		break;
+	case IEEE80211_PARAM_CONFIG_TPC_INTERVAL:
+		if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC))
+			retv = ieee80211_tpc_query_config_interval(&ic->ic_tpc_query_info, value);
+		else
+			retv = EOPNOTSUPP;
+		break;
+	case IEEE80211_PARAM_TPC_QUERY:
+		if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+			if (value == 0) {
+				ieee80211_tpc_query_stop(&ic->ic_tpc_query_info);
+			} else {
+				ieee80211_tpc_query_start(&ic->ic_tpc_query_info);
+			}
+		}
+		else
+			retv = EOPNOTSUPP;
+		break;
+	case IEEE80211_PARAM_CONFIG_REGULATORY_TXPOWER:
+		retv = set_regulatory_tx_power(ic, value);
+		break;
+	case IEEE80211_PARAM_SKB_LIST_MAX:
+		{
+			struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
+			recycle_list->max = (int)value;
+		}
+		break;
+	case IEEE80211_PARAM_DFS_FAST_SWITCH:
+		if (value) {
+			retv = ieee80211_ioctl_set_dfs_fast_switch(ic);
+		} else {
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_DFS_FAST_SWITCH;
+		}
+		break;
+	case IEEE80211_PARAM_SCAN_NO_DFS:
+		if (value) {
+			ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_NO_DFS;
+		} else {
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_SCAN_NO_DFS;
+		}
+		break;
+	case IEEE80211_PARAM_11N_40_ONLY_MODE:
+		if (value && !(ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40)) {
+			retv = EINVAL;
+			break;
+		}
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		if ((vap->iv_opmode == IEEE80211_M_HOSTAP) &&
+			(vap->iv_state == IEEE80211_S_RUN)) {
+			ic->ic_beacon_update(vap);
+		}
+		break;
+	case IEEE80211_PARAM_REGULATORY_REGION:
+		{
+			u_int16_t iso_code = CTRY_DEFAULT;
+			union {
+				char		as_chars[ 4 ];
+				u_int32_t	as_u32;
+			} region;
+
+			region.as_u32 = (u_int32_t) value;
+			region.as_chars[ 3 ] = '\0';
+
+			retv = ieee80211_country_string_to_countryid( region.as_chars, &iso_code );
+			if (retv == 0) {
+				ic->ic_country_code = iso_code;
+				ic->ic_mark_dfs_channels(ic, ic->ic_nchans, ic->ic_channels);
+				ic->ic_mark_weather_radar_chans(ic, ic->ic_nchans, ic->ic_channels);
+				retv = ieee80211_region_to_operating_class(ic, region.as_chars);
+				if (retv < 0)
+					vap->iv_flags_ext |= IEEE80211_FEXT_TDLS_CS_PROHIB;
+			}
+		}
+		break;
+	case IEEE80211_PARAM_SAMPLE_RATE:
+		ic->ic_sample_rate = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_BA_MAX_WIN_SIZE:
+		if (value < IEEE80211_MAX_BA_WINSIZE) {
+			vap->iv_max_ba_win_size = value;
+		} else {
+			vap->iv_max_ba_win_size = IEEE80211_MAX_BA_WINSIZE;
+		}
+		ieee80211_wireless_reassoc(vap, 0, 0);
+		break;
+#ifdef QSCS_ENABLED
+	case IEEE80211_PARAM_SCS:
+		if (!ieee80211_should_disable_scs(ic))
+			return -EOPNOTSUPP;
+
+		if (ieee80211_param_scs_set(dev, vap, value) == 0) {
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+			ic->ic_vopt.scs = value & IEEE80211_SCS_VALUE_M;
+		} else {
+			retv = EINVAL;
+		}
+		break;
+#endif /* QSCS_ENABLED */
+	case IEEE80211_PARAM_MIN_DWELL_TIME_ACTIVE:
+		ic->ic_mindwell_active = (u_int16_t) value;
+		break;
+	case IEEE80211_PARAM_MIN_DWELL_TIME_PASSIVE:
+		ic->ic_mindwell_passive = (u_int16_t) value;
+		break;
+	case IEEE80211_PARAM_MAX_DWELL_TIME_ACTIVE:
+		ic->ic_maxdwell_active = (u_int16_t) value;
+		break;
+	case IEEE80211_PARAM_MAX_DWELL_TIME_PASSIVE:
+		ic->ic_maxdwell_passive = (u_int16_t) value;
+		break;
+#ifdef QTN_BG_SCAN
+	case IEEE80211_PARAM_QTN_BGSCAN_DWELL_TIME_ACTIVE:
+		if (value) {
+			ic->ic_qtn_bgscan.dwell_msecs_active = (u_int16_t) value;
+		}
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_DWELL_TIME_PASSIVE:
+		if (value) {
+			ic->ic_qtn_bgscan.dwell_msecs_passive = (u_int16_t) value;
+		}
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_DURATION_ACTIVE:
+		if (value) {
+			ic->ic_qtn_bgscan.duration_msecs_active = (u_int16_t) value;
+		}
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_FAST:
+		if (value) {
+			ic->ic_qtn_bgscan.duration_msecs_passive_fast = (u_int16_t) value;
+		}
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_NORMAL:
+		if (value) {
+			ic->ic_qtn_bgscan.duration_msecs_passive_normal = (u_int16_t) value;
+		}
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_SLOW:
+		if (value) {
+			ic->ic_qtn_bgscan.duration_msecs_passive_slow = (u_int16_t) value;
+		}
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_THRSHLD_PASSIVE_FAST:
+		ic->ic_qtn_bgscan.thrshld_fat_passive_fast = (u_int16_t) value;
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_THRSHLD_PASSIVE_NORMAL:
+		ic->ic_qtn_bgscan.thrshld_fat_passive_normal = (u_int16_t) value;
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_DEBUG:
+		ic->ic_qtn_bgscan.debug_flags = (u_int16_t) value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+#endif /*QTN_BG_SCAN */
+	case IEEE80211_PARAM_QTN_BCM_WAR:
+		if (value)
+			ic->ic_flags_qtn |= IEEE80211_QTN_BCM_WAR;
+		else
+			ic->ic_flags_qtn &= ~IEEE80211_QTN_BCM_WAR;
+		break;
+	case IEEE80211_PARAM_ALT_CHAN:
+		retv = ieee80211_ioctl_set_alt_chan(ic, (uint8_t) value);
+		break;
+	case IEEE80211_PARAM_GI_SELECT:
+		ic->ic_gi_select_enable = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_RADAR_NONOCCUPY_PERIOD:
+		if ((value >= IEEE80211_MIN_NON_OCCUPANCY_PERIOD) &&
+		    (value <= IEEE80211_MAX_NON_OCCUPANCY_PERIOD)) {
+			ic->ic_non_occupancy_period = value * HZ;
+		} else {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_MC_LEGACY_RATE:
+		{
+			u_int8_t mc_rate = 0;
+			static const char *idx2lr[] = {"6", "9", "12", "18", "24", "36", "48", "54", "1", "2", "5.5", "11"};
+			int i;
+			for (i = 0; i < 4; i++) {
+				u_int8_t rate = (value >> i*8) & 0xFF;
+				if (rate > 11) {
+					retv = EINVAL;
+					break;
+				}
+			}
+			if (retv != EINVAL) {
+				mc_rate = (value >> 24) & 0xFF;
+				printk("Forcing multicast rate to %sMbps\n", idx2lr[mc_rate]);
+				vap->iv_mc_legacy_rate = value;
+				ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+				if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+					ic->ic_beacon_update(vap);
+				}
+			}
+		}
+		break;
+	case IEEE80211_PARAM_RADAR_NONOCCUPY_ACT_SCAN:
+		if (value)
+			ic->ic_flags_qtn |= IEEE80211_QTN_RADAR_SCAN_START;
+		else
+			ic->ic_flags_qtn &= ~IEEE80211_QTN_RADAR_SCAN_START;
+		break;
+	case IEEE80211_PARAM_FWD_UNKNOWN_MC:
+		vap->iv_forward_unknown_mc = !!value;
+		break;
+	case IEEE80211_PARAM_MC_TO_UC:
+		if (value < IEEE80211_QTN_MC_TO_UC_LEGACY || value > IEEE80211_QTN_MC_TO_UC_ALWAYS)
+			return -EINVAL;
+		vap->iv_mc_to_uc = value;
+		break;
+	case IEEE80211_PARAM_ENABLE_BC_IOT_WAR:
+		if (value < 0)
+			return -EINVAL;
+		vap->enable_iot_sts_war = value;
+		break;
+	case IEEE80211_PARAM_BCST_4:
+		vap->iv_reliable_bcst = !!value;
+		break;
+	case IEEE80211_PARAM_AP_FWD_LNCB:
+		vap->iv_ap_fwd_lncb = !!value;
+		break;
+	case IEEE80211_PARAM_PPPC_SELECT:
+		if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+			retv = EOPNOTSUPP;
+		} else {
+			ic->ic_pppc_select_enable = value;
+			ic->ic_vopt.pppc = value;
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		}
+		break;
+	case IEEE80211_PARAM_PPPC_STEP:
+		if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+			retv = EOPNOTSUPP;
+		} else {
+			if ((value >= QTN_SEL_PPPC_MAX_STEPS) || (value < 0)) {
+				return -EINVAL;
+			}
+			ic->ic_pppc_step_db = value;
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+			ieee80211_wireless_reassoc(vap, 0, 1);
+		}
+		break;
+	case IEEE80211_PARAM_EMI_POWER_SWITCHING:
+		ic->ic_emi_power_switch_enable = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_ASSOC_LIMIT:
+		retv = ieee80211_ioctl_set_assoc_limit(ic, vap, value);
+		break;
+	case IEEE80211_PARAM_BSS_ASSOC_LIMIT:
+		retv = ieee80211_ioctl_set_bss_grp_assoc_limit(ic, vap, value);
+		break;
+	case IEEE80211_PARAM_ASSOC_HISTORY:
+		{
+			int i;
+
+			memset(&ic->ic_assoc_history.ah_macaddr_table[0][0],
+				0, sizeof(ic->ic_assoc_history.ah_macaddr_table));
+			for(i = 0; i < IEEE80211_MAX_ASSOC_HISTORY; i++) {
+				ic->ic_assoc_history.ah_timestamp[i] = 0;
+			}
+		}
+		break;
+	case IEEE80211_PARAM_CSW_RECORD:
+		memset(&ic->ic_csw_record, 0, sizeof(ic->ic_csw_record));
+		break;
+	case IEEE80211_PARAM_IOT_TWEAKS:
+		qtn_mproc_sync_shared_params_get()->iot_tweaks = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_FAST_REASSOC:
+		if (value) {
+			ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_FAST_REASS;
+		} else {
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_SCAN_FAST_REASS;
+		}
+		break;
+	case IEEE80211_PARAM_CSA_FLAG:
+		ic->ic_csa_flag = value;
+		break;
+	case IEEE80211_PARAM_DEF_MATRIX:
+		ic->ic_def_matrix = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		ieee80211_wireless_reassoc(vap, 0, 1);
+		break;
+	case IEEE80211_PARAM_MODE:
+		if (ic->fixed_legacy_rate_mode)
+			return -EOPNOTSUPP;
+
+		/* update 5ghz station profile */
+		if ((ic->ic_rf_chipid == CHIPID_DUAL) &&
+				(ic->ic_opmode == IEEE80211_M_STA))
+			vap->iv_5ghz_prof.vht = value;
+
+		if (chip_id() >= QTN_BBIC_11AC) {
+			ic->ic_csw_reason = IEEE80211_CSW_REASON_CONFIG;
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+			ieee80211_wireless_reassoc(vap, 0, 1);
+
+			/* On AP, change channel to take new bw into effect by BB */
+			/* On STA, reassociation would result into re-scannning so not required */
+			if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+				ic->ic_set_channel(ic);
+			}
+		} else {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_ENABLE_11AC:
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		vap->iv_11ac_enabled = value;
+		break;
+	case IEEE80211_PARAM_FIXED_11AC_MU_TX_RATE:
+		retv = ieee80211_wireless_set_mu_tx_rate(vap, ic, value);
+		break;
+	case IEEE80211_PARAM_FIXED_11AC_TX_RATE:
+		if (IS_IEEE80211_DUALBAND_VHT_ENABLED(ic)) {
+			int mcs_to_muc = ieee80211_11ac_mcs_format(value, 40);
+			if (mcs_to_muc < 0) {
+				retv = EINVAL;
+				break;
+			}
+			/* Forward fixed MCS rate configuration to the driver and MuC */
+			ieee80211_param_to_qdrv(vap, param, mcs_to_muc, NULL, 0);
+			/* Remember current configuration in the VAP */
+			vap->iv_mcs_config = (value & 0x0FFFFFF) | IEEE80211_AC_RATE_PREFIX;
+		} else {
+			retv = EINVAL;
+		}
+
+		break;
+	case IEEE80211_PARAM_VAP_PRI:
+		retv = ieee80211_ioctl_set_vap_pri(ic, vap, value);
+		break;
+	case IEEE80211_PARAM_AIRFAIR:
+		if (value <= 1) {
+			ic->ic_airfair = value;
+			ic->ic_vopt.airfair = value;
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		} else {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_VAP_PRI_WME:
+		printk("Change auto WME param based on VAP priority from %u to %u\n",
+				ic->ic_vap_pri_wme, value);
+		ic->ic_vap_pri_wme = value;
+		IEEE80211_LOCK_IRQ(ic);
+		ieee80211_adjust_wme_by_vappri(ic);
+		IEEE80211_UNLOCK_IRQ(ic);
+		break;
+	case IEEE80211_PARAM_TDLS_DISC_INT:
+		if (ieee80211_tdls_cfg_disc_int(vap, value) != 0) {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_TDLS_PATH_SEL_WEIGHT:
+		vap->tdls_path_sel_weight = value;
+		break;
+	case IEEE80211_PARAM_TDLS_MODE:
+		vap->tdls_path_sel_prohibited = !!value;
+		if (vap->tdls_path_sel_prohibited == 1)
+			ieee80211_tdls_free_peer_ps_info(vap);
+		break;
+	case IEEE80211_PARAM_TDLS_TIMEOUT_TIME:
+		vap->tdls_timeout_time = value;
+		if ((vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) == 0)
+			ieee80211_tdls_update_link_timeout(vap);
+		break;
+	case IEEE80211_PARAM_TDLS_TRAINING_PKT_CNT:
+		vap->tdls_training_pkt_cnt = value;
+		break;
+	case IEEE80211_PARAM_TDLS_PATH_SEL_PPS_THRSHLD:
+		vap->tdls_path_sel_pps_thrshld = value;
+		break;
+	case IEEE80211_PARAM_TDLS_PATH_SEL_RATE_THRSHLD:
+		vap->tdls_path_sel_rate_thrshld = value;
+		break;
+	case IEEE80211_PARAM_TDLS_VERBOSE:
+		if (!vap->iv_bss)
+			return -EINVAL;
+		vap->tdls_verbose = value;
+		ic->ic_set_tdls_param(vap->iv_bss, IOCTL_TDLS_DBG_LEVEL, value);
+		break;
+	case IEEE80211_PARAM_TDLS_MIN_RSSI:
+		vap->tdls_min_valid_rssi = value;
+		break;
+	case IEEE80211_PARAM_TDLS_UAPSD_INDICAT_WND:
+		if (vap->tdls_uapsd_indicat_wnd != value) {
+			vap->tdls_uapsd_indicat_wnd = value;
+			ieee80211_tdls_update_uapsd_indicication_windows(vap);
+		}
+		break;
+	case IEEE80211_PARAM_TDLS_SWITCH_INTS:
+		vap->tdls_switch_ints = value;
+		break;
+	case IEEE80211_PARAM_TDLS_RATE_WEIGHT:
+		vap->tdls_phy_rate_wgt = value;
+		break;
+	case IEEE80211_PARAM_TDLS_CS_MODE:
+		if (value == 2) {
+			vap->iv_flags_ext |= IEEE80211_FEXT_TDLS_CS_PASSIVE;
+			vap->iv_flags_ext &= ~IEEE80211_FEXT_TDLS_CS_PROHIB;
+		} else if (value == 1){
+			vap->iv_flags_ext |= IEEE80211_FEXT_TDLS_CS_PROHIB;
+			vap->iv_flags_ext &= ~IEEE80211_FEXT_TDLS_CS_PASSIVE;
+		} else {
+			vap->iv_flags_ext &= ~IEEE80211_FEXT_TDLS_CS_PROHIB;
+			vap->iv_flags_ext &= ~IEEE80211_FEXT_TDLS_CS_PASSIVE;
+		}
+		break;
+	case IEEE80211_PARAM_TDLS_OFF_CHAN:
+		vap->tdls_fixed_off_chan = value;
+		break;
+	case IEEE80211_PARAM_TDLS_OFF_CHAN_BW:
+		if ((value == BW_INVALID) || (value == BW_HT20) ||
+				(value == BW_HT40) || (value == BW_HT80) ||
+					(value == BW_HT160))
+			vap->tdls_fixed_off_chan_bw = value;
+		else
+			retv = EINVAL;
+		break;
+	case IEEE80211_PARAM_TDLS_NODE_LIFE_CYCLE:
+		vap->tdls_node_life_cycle = value;
+		if ((vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) == 0)
+			ieee80211_tdls_start_node_expire_timer(vap);
+		break;
+	case IEEE80211_PARAM_TDLS_OVER_QHOP_ENABLE:
+		vap->tdls_over_qhop_en = value;
+		break;
+	case IEEE80211_PARAM_OCAC:
+	case IEEE80211_PARAM_SDFS:
+		if (ieee80211_param_ocac_set(dev, vap, value) == 0) {
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+			ic->ic_vopt.ocac = value;
+		} else {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_DEACTIVE_CHAN_PRI:
+		retv = ieee80211_ioctl_setchan_inactive_pri(ic, vap, value);
+		break;
+	case IEEE80211_PARAM_SPECIFIC_SCAN:
+		if (!!value)
+			vap->iv_flags_ext |= IEEE80211_FEXT_SPECIFIC_SCAN;
+		else
+			vap->iv_flags_ext &= ~IEEE80211_FEXT_SPECIFIC_SCAN;
+		break;
+	case IEEE80211_PARAM_SCAN_TBL_LEN_MAX:
+		if (value != ic->ic_scan_tbl_len_max) {
+			ic->ic_scan_tbl_len_max = value;
+			ieee80211_scan_flush(ic);
+		}
+		break;
+	case IEEE80211_PARAM_TRAINING_START:
+		ieee80211_training_restart_by_node_idx(vap, value);
+		break;
+	case IEEE80211_PARAM_SPEC_COUNTRY_CODE:
+		{
+			uint16_t iso_code = CTRY_DEFAULT;
+			union {
+				char as_chars[4];
+				uint32_t as_u32;
+			} region;
+
+			region.as_u32 = (uint32_t)value;
+			region.as_chars[3] = '\0';
+
+			retv = ieee80211_country_string_to_countryid(region.as_chars, &iso_code);
+			if (retv == 0) {
+				ic->ic_spec_country_code = iso_code;
+				ieee80211_build_countryie(ic);
+			} else {
+				retv = -retv;
+			}
+		}
+		break;
+        case IEEE80211_PARAM_VCO_LOCK_DETECT_MODE:
+		sp->vco_lock_detect_mode = value;
+		break;
+	case IEEE80211_PARAM_CONFIG_PMF:
+		/* enable/disable PMF per VAP */
+		if((value == IEEE80211_MFP_NO_PROTECT) ||
+				(value == IEEE80211_MFP_PROTECT_CAPABLE) ||
+				(value == IEEE80211_MFP_PROTECT_REQUIRE)) {
+			vap->iv_pmf = value;
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		} else {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_SCAN_CANCEL:
+		if (value) {
+			/* Force canceling immediately */
+			ieee80211_cancel_scan_no_wait(vap);
+		} else {
+			ieee80211_cancel_scan(vap);
+		}
+		break;
+
+	case IEEE80211_PARAM_DSP_DEBUG_LEVEL:
+		DSP_PARAM_SET(debug_level, value);
+		break;
+
+	case IEEE80211_PARAM_DSP_DEBUG_FLAG:
+		DSP_PARAM_SET(debug_flag, value);
+		break;
+
+	case IEEE80211_PARAM_DSP_MU_RANK_CRITERIA:
+		DSP_PARAM_SET(rank_criteria_to_use, value);
+		break;
+
+	case IEEE80211_PARAM_DSP_PRECODING_ALGORITHM:
+		if ( MU_ALLOWED_ALG(value) ) {
+			DSP_PARAM_SET(precoding_algorithm_to_use, value);
+		} else {
+			return -EINVAL;
+		}
+		break;
+
+	case IEEE80211_PARAM_DSP_RANKING_ALGORITHM:
+		if ( MU_ALLOWED_ALG(value) ) {
+			DSP_PARAM_SET(ranking_algorithm_to_use, value);
+		} else {
+			return -EINVAL;
+		}
+		break;
+
+	case IEEE80211_PARAM_INTRA_BSS_ISOLATE:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (value)
+				dev->qtn_flags |= QTN_FLAG_INTRA_BSS_ISOLATE;
+			else
+				dev->qtn_flags &= ~QTN_FLAG_INTRA_BSS_ISOLATE;
+		} else {
+			return -EINVAL;
+		}
+
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_BSS_ISOLATE:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (value)
+				dev->qtn_flags |= QTN_FLAG_BSS_ISOLATE;
+			else
+				dev->qtn_flags &= ~QTN_FLAG_BSS_ISOLATE;
+		} else {
+			return -EINVAL;
+		}
+
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_BF_RX_STS:
+		if ((value > 0) && (value <= (IEEE80211_VHTCAP_RX_STS_4 + 1))) {
+			ic->ic_vhtcap.bfstscap = value - 1;
+			ic->ic_vhtcap_24g.bfstscap = value - 1;
+			TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+				if ((vap->iv_opmode != IEEE80211_M_HOSTAP) || (vap->iv_state != IEEE80211_S_RUN))
+					continue;
+				ic->ic_beacon_update(vap);
+			}
+			retv = 0;
+		} else {
+			retv = -EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_PC_OVERRIDE:
+	{
+		uint16_t pwr_constraint = (value & 0xffff);
+		uint8_t rssi_threshold = ((value >> 16) & 0xff);
+		uint8_t sec_offset = ((value >> 24) & 0xff);
+
+		KASSERT(ic->ic_bsschan != IEEE80211_CHAN_ANYC, ("bss channel not set"));
+
+		/*
+		 * Default mode when value = 1
+		 **/
+		if (pwr_constraint && !rssi_threshold && !sec_offset) {
+			pwr_constraint = PWR_CONSTRAINT_PC_DEF;
+			rssi_threshold = PWR_CONSTRAINT_RSSI_DEF;
+			sec_offset = PWR_CONSTRAINT_OFFSET;
+		}
+		/*
+		 * Hack for ASUS/Broadcomm 3ss client to turn down power
+		 * Made sure this does not affect any other code like tpc/pppc
+		 * by placing code here.
+		 */
+		if (((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_COUNTRYIE)) &&
+			!((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC))) {
+				if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+					if(value && ic->ic_pco.pco_pwr_constraint_save == PWR_CONSTRAINT_SAVE_INIT) {
+						printk("pwr-cons=%d rssi-thr=%d sec-off=%d\n", pwr_constraint, -rssi_threshold, sec_offset);
+						if(pwr_constraint < ic->ic_bsschan->ic_maxregpower) {
+							ic->ic_pco.pco_pwr_constraint_save = ic->ic_pwr_constraint;
+							ic->ic_pco.pco_rssi_threshold = rssi_threshold;
+							ic->ic_pco.pco_pwr_constraint = pwr_constraint;
+							ic->ic_pco.pco_sec_offset = sec_offset;
+							init_timer(&ic->ic_pco.pco_timer);
+							ic->ic_pco.pco_timer.function = ieee80211_pco_timer_func;
+							ic->ic_pco.pco_timer.data = (unsigned long) vap;
+							ic->ic_pco.pco_timer.expires = jiffies + (5 * HZ);
+							add_timer(&ic->ic_pco.pco_timer);
+						} else {
+							printk("power constraint(%d) >= current channel max regulatory power(%d)\n", pwr_constraint, ic->ic_bsschan->ic_maxregpower);
+							retv = EINVAL;
+						}
+					} else {
+						if (!value && ic->ic_pco.pco_pwr_constraint_save != PWR_CONSTRAINT_SAVE_INIT) {
+							printk("PCO Disabled\n");
+							ic->ic_pco.pco_pwr_constraint = 0;
+							ic->ic_pco.pco_rssi_threshold = 0;
+							ic->ic_pco.pco_sec_offset = 0;
+							ieee80211_pco_timer_func((unsigned long)vap);
+							del_timer(&ic->ic_pco.pco_timer);
+							ic->ic_pwr_constraint = ic->ic_pco.pco_pwr_constraint_save;
+							ic->ic_pco.pco_pwr_constraint_save = PWR_CONSTRAINT_SAVE_INIT;
+						} else {
+							retv = EINVAL;
+						}
+					}
+				} else {
+					retv = EOPNOTSUPP;
+				}
+		} else {
+				printk("power constraint override needs to have TPC disabled\n");
+				retv = EOPNOTSUPP;
+		}
+		break;
+	}
+	case IEEE80211_PARAM_WOWLAN:
+		if (ieee80211_param_wowlan_set(dev, vap, value) == 0) {
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		} else {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_WDS_MODE:
+		if (ieee80211_is_repeater(ic)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,
+					"%s can't config WDS mode since it's a"
+					" repeater\n", vap->iv_dev->name);
+			retv = EPERM;
+			break;
+		}
+#ifdef CONFIG_QVSP
+		temp_value = !!IEEE80211_VAP_WDS_IS_RBS(vap);
+
+		ieee80211_vap_set_extdr_flags(vap, value);
+
+		value = !!IEEE80211_VAP_WDS_IS_RBS(vap);
+		if (vap->iv_opmode == IEEE80211_M_WDS && temp_value ^ value)
+			ic->ic_vsp_change_stamode(ic, value);
+#else
+		ieee80211_vap_set_extdr_flags(vap, value);
+
+#endif
+		break;
+	case IEEE80211_PARAM_EXTENDER_ROLE:
+		if (ieee80211_is_repeater(ic)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,
+					"%s can't config extender role since "
+					"it's a repeater\n", vap->iv_dev->name);
+			retv = EPERM;
+			break;
+		}
+		retv = ieee80211_ioctl_extender_role(ic, vap, value);
+		break;
+	case IEEE80211_PARAM_EXTENDER_MBS_BEST_RSSI:
+		if (ieee80211_swfeat_is_supported(SWFEAT_ID_QHOP, 1))
+			ic->ic_extender_mbs_best_rssi = value;
+		break;
+	case IEEE80211_PARAM_EXTENDER_RBS_BEST_RSSI:
+		if (ieee80211_swfeat_is_supported(SWFEAT_ID_QHOP, 1))
+			ic->ic_extender_rbs_best_rssi = value;
+		break;
+	case IEEE80211_PARAM_EXTENDER_MBS_WGT:
+		if (ieee80211_swfeat_is_supported(SWFEAT_ID_QHOP, 1))
+			ic->ic_extender_mbs_wgt = value;
+		break;
+	case IEEE80211_PARAM_VAP_TX_AMSDU:
+		if (vap->iv_tx_amsdu != value) {
+			vap->iv_tx_amsdu = value;
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+			ieee80211_wireless_reassoc(vap, 0, 1);
+		}
+		break;
+	case IEEE80211_PARAM_TX_MAXMPDU:
+		if ((value < IEEE80211_VHTCAP_MAX_MPDU_3895) ||
+			(value > IEEE80211_VHTCAP_MAX_MPDU_11454)) {
+			retv = EINVAL;
+		} else if (vap->iv_tx_max_amsdu != value) {
+			vap->iv_tx_max_amsdu = value;
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		}
+		break;
+	case IEEE80211_PARAM_EXTENDER_RBS_WGT:
+		if (ieee80211_swfeat_is_supported(SWFEAT_ID_QHOP, 1))
+			ic->ic_extender_rbs_wgt = value;
+		break;
+	case IEEE80211_PARAM_EXTENDER_VERBOSE:
+		if (ieee80211_swfeat_is_supported(SWFEAT_ID_QHOP, 1))
+			ic->ic_extender_verbose = value;
+		break;
+        case IEEE80211_PARAM_BB_PARAM:
+                {
+			/* Channel will check bb_param and change default max gain for high band channel */
+                        sp->bb_param = value;
+                }
+                break;
+	 case IEEE80211_PARAM_TQEW_DESCR_LIMIT:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if ((value > 0) && (value <= 100)) {
+				ic->ic_tqew_descr_limit = value;
+				ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+			} else {
+				retv = EINVAL;
+			}
+		} else {
+			retv = EOPNOTSUPP;
+		}
+		break;
+	case IEEE80211_PARAM_SWFEAT_DISABLE:
+		ieee80211_ioctl_swfeat_disable(vap, param, value);
+		break;
+	case IEEE80211_PARAM_HS2:
+		if (!ieee80211_swfeat_is_supported(SWFEAT_ID_HS20, 1)) {
+			retv = EOPNOTSUPP;
+			break;
+		}
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+			vap->hs20_enable = value;
+		break;
+	case IEEE80211_PARAM_DGAF_CONTROL:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			vap->disable_dgaf = !!value;
+		} else {
+			return -EINVAL;
+		}
+		break;
+        case IEEE80211_PARAM_11N_AMSDU_CTRL:
+		if (value)
+			ic->ic_flags_qtn |= QTN_NODE_11N_TXAMSDU_OFF;
+		else
+			ic->ic_flags_qtn &= ~QTN_NODE_11N_TXAMSDU_OFF;
+                break;
+	case IEEE80211_PARAM_SCAN_RESULTS_CHECK_INV :
+		if (value > 0 && value != ic->ic_scan_results_check) {
+			ic->ic_scan_results_check = value;
+			mod_timer(&ic->ic_scan_results_expire,
+					jiffies + ic->ic_scan_results_check * HZ);
+		}
+		break;
+	case IEEE80211_PARAM_FLUSH_SCAN_ENTRY:
+		if (value)
+			ieee80211_scan_flush(ic);
+		break;
+	case IEEE80211_PARAM_VHT_OPMODE_BW:
+		ieee80211_send_vht_opmode_to_all(ic, (uint8_t)value);
+		break;
+	case IEEE80211_PARAM_PROXY_ARP:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			vap->proxy_arp = !!value;
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_QTN_HAL_PM_CORRUPT_DEBUG:
+#ifdef QTN_HAL_PM_CORRUPT_DEBUG
+		if (value)
+			sp->qtn_hal_pm_corrupt_debug = 1;
+		else
+			sp->qtn_hal_pm_corrupt_debug = 0;
+#else
+		sp->qtn_hal_pm_corrupt_debug = 0;
+		printk("pm_corrupt_debug can't be activated when QTN_HAL_PM_CORRUPT_DEBUG "
+				"is undefined\n");
+#endif
+		break;
+	case IEEE80211_PARAM_L2_EXT_FILTER:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			return ic->ic_set_l2_ext_filter(vap, value);
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_L2_EXT_FILTER_PORT:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+			return ic->ic_set_l2_ext_filter_port(vap, value);
+		else
+			return -EINVAL;
+		break;
+	case IEEE80211_PARAM_ENABLE_RX_OPTIM_STATS:
+		if (value >= 0) {
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		} else {
+			retv = EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_SET_UNICAST_QUEUE_NUM:
+		topaz_congest_set_unicast_queue_count(value);
+                break;
+	case IEEE80211_PARAM_MRC_ENABLE:
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		ieee80211_wireless_reassoc(vap, 0, 1);
+		break;
+	case IEEE80211_PARAM_OBSS_EXEMPT_REQ:
+		if (ic->ic_opmode == IEEE80211_M_STA) {
+			if (value) {
+				vap->iv_coex |= WLAN_20_40_BSS_COEX_OBSS_EXEMPT_REQ;
+				ieee80211_send_20_40_bss_coex(vap);
+			} else {
+				vap->iv_coex &= ~WLAN_20_40_BSS_COEX_OBSS_EXEMPT_REQ;
+			}
+		} else
+			retv = EOPNOTSUPP;
+		break;
+	case IEEE80211_PARAM_OBSS_TRIGG_SCAN_INT:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			ic->ic_obss_ie.obss_trigger_interval = value;
+			ic->ic_beacon_update(vap);
+		} else
+			retv = EOPNOTSUPP;
+		break;
+	case IEEE80211_PARAM_PREF_BAND:
+		vap->iv_pref_band = value;
+		break;
+	case IEEE80211_PARAM_BW_2_4GHZ:
+		if ((value != BW_HT160) && (value != BW_HT80) &&
+				(value != BW_HT40) && (value != BW_HT20))
+			return -EINVAL;
+
+		/* update 2.4ghz station profile */
+		if ((ic->ic_rf_chipid == CHIPID_DUAL) &&
+				(ic->ic_opmode == IEEE80211_M_STA))
+			vap->iv_2_4ghz_prof.bw = value;
+
+		/* check if phymode is also in 2.4ghz mode then set bw */
+		if (IS_IEEE80211_24G_BAND(ic)) {
+			/* Blocking 40MHZ in 2.4 11G only mode */
+			if ((value >= BW_HT40) && IEEE80211_IS_11G(ic))
+				return -EOPNOTSUPP;
+
+			ic->ic_max_system_bw = value;
+
+			/* Forward bandwidth configuration to the driver and MuC */
+			ieee80211_change_bw(vap, value, 1);
+
+			ieee80211_start_obss_scan_timer(vap);
+			ic->ic_csw_reason = IEEE80211_CSW_REASON_CONFIG;
+			if (IS_UP_AUTO(vap)) {
+				ieee80211_wireless_reassoc(vap, 0, 1);
+				ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+			}
+		}
+		break;
+	case IEEE80211_PARAM_ALLOW_VHT_TKIP:
+		if (!IEEE80211_IS_TKIP_ALLOWED(ic))
+			return -EOPNOTSUPP;
+
+		TAILQ_FOREACH_SAFE(vap_each, &ic->ic_vaps, iv_next, vap_tmp) {
+			vap_each->allow_tkip_for_vht = value;
+			if ((vap_each->iv_opmode == IEEE80211_M_HOSTAP) &&
+				(vap_each->iv_state == IEEE80211_S_RUN)) {
+				ic->ic_beacon_update(vap_each);
+			}
+
+			ieee80211_wireless_reassoc(vap_each, 0, 1);
+		}
+		break;
+	case IEEE80211_PARAM_VHT_OPMODE_NOTIF:
+		ic->ic_vht_opmode_notif = (uint16_t)value;
+		TAILQ_FOREACH_SAFE(vap_each, &ic->ic_vaps, iv_next, vap_tmp) {
+			if ((vap_each->iv_opmode == IEEE80211_M_HOSTAP) &&
+				(vap_each->iv_state == IEEE80211_S_RUN)) {
+				ic->ic_beacon_update(vap_each);
+			}
+		}
+		break;
+	case IEEE80211_PARAM_USE_NON_HT_DUPLICATE_MU:
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		if (ic->use_non_ht_duplicate_for_mu != value) {
+			ieee80211_wireless_reassoc(vap, 0, 1);
+		}
+
+		ic->use_non_ht_duplicate_for_mu = value;
+		break;
+	case IEEE80211_PARAM_QTN_BLOCK_BSS:
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG, "%s block state is from %d to %d\n",
+			vap->iv_dev->name, vap->is_block_all_assoc, value);
+
+		vap->is_block_all_assoc = !!value;
+		break;
+	case IEEE80211_PARAM_VHT_2_4GHZ:
+		if (value)
+			ic->ic_flags_ext |= IEEE80211_FEXT_24GVHT;
+		else
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_24GVHT;
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_VHT_2_4GHZ, value, NULL, 0);
+
+		TAILQ_FOREACH(vap_each, &ic->ic_vaps, iv_next)
+			ieee80211_wireless_reassoc(vap_each, 0, 0);
+		break;
+	case IEEE80211_PARAM_BEACONING_SCHEME:
+		value = value ? QTN_BEACONING_SCHEME_1 : QTN_BEACONING_SCHEME_0;
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP && ic->ic_beaconing_scheme != value) {
+			if (ic->ic_pm_state[QTN_PM_CURRENT_LEVEL] < BOARD_PM_LEVEL_DUTY) {
+				ieee80211_wireless_reassoc_all_vaps(ic);
+				ic->ic_pm_reason = IEEE80211_PM_LEVEL_BCN_SCHEME_CHANGED;
+				ieee80211_pm_queue_work_custom(ic, BOARD_PM_WLAN_AP_IDLE_AFTER_BEACON_SCHEME);
+			}
+
+			if (ic->ic_set_beaconing_scheme(vap, param, value) < 0) {
+				retv = -EINVAL;
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG, "%s: Beaconing scheme "
+						"update to value [%d] failed\n", __func__, value);
+			}
+		}
+		break;
+#if defined(QBMPS_ENABLE)
+	case IEEE80211_PARAM_STA_BMPS:
+		retv = ieee80211_wireless_set_sta_bmps(vap, ic, value);
+		break;
+#endif
+	case IEEE80211_PARAM_40MHZ_INTOLERANT:
+		ieee80211_wireless_set_40mhz_intolerant(vap, value);
+		break;
+	case IEEE80211_PARAM_DISABLE_TX_BA:
+		vap->tx_ba_disable = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		printk("%s: TX Block Ack establishment disable configured to %u\n",dev->name, value);
+		ieee80211_wireless_reassoc(vap, 0, 0);
+		break;
+	case IEEE80211_PARAM_DECLINE_RX_BA:
+		vap->rx_ba_decline = value;
+		printk("%s: RX Block Ack decline configured to %u\n",dev->name, value);
+		ieee80211_wireless_reassoc(vap, 0, 0);
+		break;
+	case IEEE80211_PARAM_VAP_STATE:
+		vap->iv_vap_state = !!value;
+		break;
+	case IEEE80211_PARAM_TX_AIRTIME_CONTROL:
+		ic->ic_tx_airtime_control(vap, value);
+		break;
+	case IEEE80211_PARAM_OSEN:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			vap->iv_osen = value;
+			if (vap->iv_flags & IEEE80211_F_WPA)
+				retv = ENETRESET;
+		} else {
+			retv = EOPNOTSUPP;
+		}
+		break;
+	case IEEE80211_PARAM_OBSS_SCAN:
+		if (ic->ic_obss_scan_enable != !!value) {
+			ic->ic_obss_scan_enable = !!value;
+			if (ic->ic_obss_scan_enable)
+				ieee80211_start_obss_scan_timer(vap);
+			else
+				del_timer_sync(&ic->ic_obss_timer);
+		}
+		break;
+	case IEEE80211_PARAM_SHORT_SLOT:
+		if (value) {
+			ic->ic_caps |= IEEE80211_C_SHSLOT;
+			ic->ic_flags |= IEEE80211_F_SHSLOT;
+		} else {
+			ic->ic_caps &= ~IEEE80211_C_SHSLOT;
+			ic->ic_flags &= ~IEEE80211_F_SHSLOT;
+		}
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		ieee80211_wireless_reassoc(vap, 0, 1);
+		break;
+	case IEEE80211_PARAM_BG_PROTECT:
+		if (ic->ic_rf_chipid == CHIPID_DUAL
+				&& IS_IEEE80211_24G_BAND(ic)) {
+			if (value) {
+				ic->ic_flags_ext |= IEEE80211_FEXT_BG_PROTECT;
+			} else {
+				ic->ic_flags_ext &= ~IEEE80211_FEXT_BG_PROTECT;
+				ic->ic_set_11g_erp(vap, 0);
+			}
+			ieee80211_wireless_reassoc(vap, 0, 1);
+		} else {
+			retv = EOPNOTSUPP;
+		}
+		break;
+	case IEEE80211_PARAM_11N_PROTECT:
+		if (value) {
+			ic->ic_flags_ext |= IEEE80211_FEXT_11N_PROTECT;
+		} else {
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_11N_PROTECT;
+			if (ic->ic_local_rts &&
+				(vap->iv_opmode == IEEE80211_M_STA ||
+				vap->iv_opmode == IEEE80211_M_WDS)) {
+				ic->ic_local_rts = 0;
+				ic->ic_use_rtscts(ic);
+			}
+		}
+		ieee80211_wireless_reassoc(vap, 0, 1);
+		break;
+	case IEEE80211_PARAM_MU_NDPA_BW_SIGNALING_SUPPORT:
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		ic->rx_bws_support_for_mu_ndpa = value;
+		break;
+	case IEEE80211_PARAM_WPA_STARTED:
+	case IEEE80211_PARAM_HOSTAP_STARTED:
+		ic->hostap_wpa_state = value;
+		break;
+	case IEEE80211_PARAM_BSS_GROUP_ID:
+		retv = ieee80211_ioctl_set_bss_grpid(ic, vap, value);
+		break;
+	case IEEE80211_PARAM_BSS_ASSOC_RESERVE:
+		retv = ieee80211_ioctl_set_bss_grp_assoc_reserve(ic, vap, value);
+		break;
+	case IEEE80211_PARAM_MAX_BCAST_PPS:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP) {
+			return -EINVAL;
+		}
+		vap->bcast_pps.max_bcast_pps = value;
+		if (vap->bcast_pps.max_bcast_pps) {
+			vap->bcast_pps.rx_bcast_counter = 0;
+			vap->bcast_pps.rx_bcast_pps_start_time = jiffies + HZ;
+			vap->bcast_pps.tx_bcast_counter = 0;
+			vap->bcast_pps.tx_bcast_pps_start_time = jiffies + HZ;
+		}
+		break;
+	case IEEE80211_PARAM_MAX_BOOT_CAC_DURATION:
+		if ((vap->iv_opmode != IEEE80211_M_HOSTAP)
+			|| ((value > 0) && (value < MIN_CAC_PERIOD))
+			|| ((value > 0) && (!(ic->ic_dfs_is_eu_region())))) {
+			return -EINVAL;
+		}
+		if (ic->ic_set_init_cac_duration) {
+			ic->ic_set_init_cac_duration(ic, value);
+		}
+		/* Start ICAC procedures */
+		if (ic->ic_start_icac_procedure) {
+			ic->ic_start_icac_procedure(ic);
+		}
+
+		break;
+	case IEEE80211_PARAM_RX_BAR_SYNC:
+		ic->ic_rx_bar_sync = value;
+		ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		break;
+	case IEEE80211_PARAM_STOP_ICAC:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (ic->ic_get_init_cac_duration(ic) > 0) {
+				ic->ic_stop_icac_procedure(ic);
+				printk(KERN_DEBUG "ICAC: Aborted ICAC due to set channel request\n");
+			}
+		}
+		break;
+	case IEEE80211_PARAM_STA_DFS_STRICT_MODE:
+		if ((vap->iv_opmode != IEEE80211_M_STA)	|| (!(ic->ic_dfs_is_eu_region()))) {
+			return -EOPNOTSUPP;
+		}
+		if (value) {
+			/* sta_dfs must be enabled to support sta_dfs_strict mode */
+			ic->ic_flags_ext |= IEEE80211_FEXT_MARKDFS;
+		} else {
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_MARKDFS;
+		}
+
+		if (vap->iv_state == IEEE80211_S_RUN) {
+			SCSDBG(SCSLOG_NOTICE, "send qtn DFS report (DFS %s)\n", value ?
+						"Enabled" : "Disabled");
+			ieee80211_send_action_dfs_report(vap->iv_bss);
+		}
+
+		sp->csa_lhost->sta_dfs_strict_mode = !!value;
+		ic->ic_enable_sta_dfs(!!value);
+		ic->ic_set_radar(!!value);
+		ic->sta_dfs_info.sta_dfs_strict_mode = !!value;
+		break;
+	case IEEE80211_PARAM_STA_DFS_STRICT_MEASUREMENT_IN_CAC:
+		if ((vap->iv_opmode != IEEE80211_M_STA) || (!(ic->ic_dfs_is_eu_region()))) {
+			return -EINVAL;
+		}
+		ic->sta_dfs_info.sta_dfs_strict_msr_cac = !!value;
+		break;
+	case IEEE80211_PARAM_STA_DFS_STRICT_TX_CHAN_CLOSE_TIME:
+		if ((value >= STA_DFS_STRICT_TX_CHAN_CLOSE_TIME_MIN)
+			&& (value <= STA_DFS_STRICT_TX_CHAN_CLOSE_TIME_MAX)) {
+			ic->sta_dfs_info.sta_dfs_tx_chan_close_time = value;
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_NEIGHBORHOOD_THRSHD:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			return -EINVAL;
+
+		ieee80211_set_threshold_of_neighborhood_type(ic, value >> 16, value & 0xFFFF);
+		break;
+	case IEEE80211_PARAM_DFS_CSA_CNT:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			return -EINVAL;
+
+		if (value <= 0)
+			return -EINVAL;
+
+		/* Should not longer than Channel Closing Transmission Time (1s) */
+		TAILQ_FOREACH_SAFE(vap_each, &ic->ic_vaps, iv_next, vap_tmp) {
+			if (NULL == vap_each->iv_bss)
+				continue;
+			temp_value = IEEE80211_TU_TO_JIFFIES(vap_each->iv_bss->ni_intval);
+			temp_value *= value;
+			if (temp_value >= HZ)
+				return -EINVAL;
+		}
+
+		ic->ic_dfs_csa_cnt = value;
+		break;
+	case IEEE80211_PARAM_COEX_20_40_SUPPORT:
+		ic->ic_20_40_coex_enable = !!value;
+		break;
+	case IEEE80211_PARAM_SYNC_CONFIG:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			return -EOPNOTSUPP;
+
+		if (value != 0)
+			vap->iv_flags_ext2 |= IEEE80211_FEXT_SYNC_CONFIG;
+		else
+			vap->iv_flags_ext2 &= ~IEEE80211_FEXT_SYNC_CONFIG;
+
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_DBG_LEVEL:
+		ic->ic_autochan_dbg_level = value;
+		break;
+#ifdef CONFIG_NAC_MONITOR
+	case IEEE80211_PARAM_NAC_MONITOR_MODE:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+		}
+		break;
+#endif
+	case IEEE80211_PARAM_OPMODE_BW_SW_EN:
+		ic->ic_opmode_bw_switch_en = value;
+		break;
+	case IEEE80211_PARAM_MAX_DEVICE_BW:
+		ic->ic_max_system_bw = value;
+		break;
+	case IEEE80211_PARAM_VOPT:
+		if ((ic->ic_opmode == IEEE80211_M_HOSTAP) &&
+				(value <= IEEE80211_VOPT_AUTO) &&
+				(value >= IEEE80211_VOPT_DISABLED))
+			ic->ic_vopt.state = value;
+		else
+			return -EOPNOTSUPP;
+		break;
+	case IEEE80211_PARAM_BW_AUTO_SELECT:
+		ic->ic_bw_auto_select = value;
+		break;
+	case IEEE80211_PARAM_CUR_CHAN_CHECK_REQUIRED:
+		ic->ic_chan_is_set = value;
+		break;
+#ifdef CONFIG_QHOP
+	case IEEE80211_PARAM_RBS_MBS_ALLOW_TX_FRMS_IN_CAC:
+		ic->rbs_mbs_dfs_info.rbs_mbs_allow_tx_frms_in_cac= !!value;
+		break;
+	case IEEE80211_PARAM_RBS_DFS_TX_CHAN_CLOSE_TIME:
+		if ((value >= RBS_DFS_TX_CHAN_CLOSE_TIME_MIN)
+			&& (value <= RBS_DFS_TX_CHAN_CLOSE_TIME_MAX)) {
+			ic->rbs_mbs_dfs_info.rbs_dfs_tx_chan_close_time = value;
+		} else {
+			return -EINVAL;
+		}
+		break;
+#endif
+	case IEEE80211_PARAM_AUTOCHAN_CCI_INSTNT:
+		ic->ic_autochan_ranking_params.cci_instnt_factor = value;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_ACI_INSTNT:
+		ic->ic_autochan_ranking_params.aci_instnt_factor = value;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_CCI_LONGTERM:
+		ic->ic_autochan_ranking_params.cci_longterm_factor = value;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_ACI_LONGTERM:
+		ic->ic_autochan_ranking_params.aci_longterm_factor = value;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_RANGE_COST:
+		ic->ic_autochan_ranking_params.range_factor = value;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_DFS_COST:
+		ic->ic_autochan_ranking_params.dfs_factor = value;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_MIN_CCI_RSSI:
+		ic->ic_autochan_ranking_params.min_cochan_rssi = value;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_MAXBW_MINBENEFIT:
+		ic->ic_autochan_ranking_params.maxbw_minbenefit = value;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_DENSE_CCI_SPAN:
+		ic->ic_autochan_ranking_params.dense_cci_span = value;
+		break;
+	case IEEE80211_PARAM_WEATHERCHAN_CAC_ALLOWED:
+		ic->ic_weachan_cac_allowed = !!value;
+		break;
+	case IEEE80211_PARAM_VAP_TX_AMSDU_11N:
+		if (vap->iv_tx_amsdu_11n != !!value) {
+			vap->iv_tx_amsdu_11n = !!value;
+			ieee80211_param_to_qdrv(vap, param, value, NULL, 0);
+			ieee80211_wireless_reassoc(vap, 0, 1);
+		}
+		break;
+	case IEEE80211_PARAM_COC_MOVE_TO_NONDFS_CHANNEL:
+		ic->ic_coc_move_to_ndfs = value;
+		break;
+	case IEEE80211_PARAM_80211K_NEIGH_REPORT:
+		if (value == 1)
+			IEEE80211_COM_NEIGHREPORT_ENABLE(ic);
+		else
+			IEEE80211_COM_NEIGHREPORT_DISABLE(ic);
+		ieee80211_beacon_update_all(ic);
+		break;
+	case IEEE80211_PARAM_80211V_BTM:
+		if (value == 1)
+			IEEE80211_COM_BTM_ENABLE(ic);
+		else
+			IEEE80211_COM_BTM_DISABLE(ic);
+		ieee80211_beacon_update_all(ic);
+		break;
+	case IEEE80211_PARAM_MOBILITY_DOMAIN:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			return -EINVAL;
+		vap->iv_mdid = value;
+		ic->ic_beacon_update(vap);
+		break;
+	case IEEE80211_PARAM_FT_OVER_DS:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			return -EINVAL;
+		vap->iv_ft_over_ds = value;
+		break;
+#if defined(PLATFORM_QFDR)
+	case IEEE80211_PARAM_REJECT_AUTH:
+		ic->ic_reject_auth = (uint8_t)value;
+		break;
+	case IEEE80211_PARAM_SCAN_ONLY_FREQ:
+		vap->iv_scan_only_freq = (uint16_t)value;
+		if (vap->iv_scan_only_freq)
+			vap->iv_scan_only_cnt = QFDR_SCAN_ONLY_FREQ_ATTEMPTS;
+		break;
+#endif
+	case IEEE80211_PARAM_FIX_LEGACY_RATE:
+		retv = ieee80211_fix_legacy_rate(vap, param, value);
+		break;
+	default:
+		retv = EOPNOTSUPP;
+		break;
+	}
+
+	if (retv == ENETRESET)
+		retv = IS_UP_AUTO(vap) ? ieee80211_open(vap->iv_dev) : 0;
+
+	return -retv;
+}
+
+/*
+ * Issue two commands to overcome the short range association issue:
+ * 1) Change the transmit power level
+ * 2) Change the rx gain and agc
+ */
+int ieee80211_pwr_adjust(struct ieee80211vap *vap,  int rxgain_state)
+{
+	int args[2];
+	int retval = 0;
+	int anychan = 0;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s: Enabling %s gain Settings\n",
+			  __func__, rxgain_state ? "Low" : "High");
+
+	if (rxgain_state)
+		retval = apply_tx_power(vap, IEEE80211_TXPOW_ENCODE(anychan),
+					IEEE80211_APPLY_LOWGAIN_TXPOWER);
+	else
+		retval = apply_tx_power(vap, IEEE80211_TXPOW_ENCODE(anychan),
+					IEEE80211_APPLY_TXPOWER_NORMAL);
+
+	if (retval >= 0) {
+		args[0] = IEEE80211_PARAM_PWR_ADJUST;
+		args[1] = rxgain_state;
+		retval = ieee80211_ioctl_setparam(vap->iv_dev, NULL, NULL, (char*)args);
+	}
+
+	return retval;
+}
+EXPORT_SYMBOL(ieee80211_pwr_adjust);
+
+static int
+ieee80211_ioctl_getparam_txpower(struct ieee80211vap *vap, int *param)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	int chan = (param[0] >> 16);
+	int retval = -EINVAL;
+
+	/* Retrieve and see if we are in low gain state and return power accordingly */
+	ieee80211_param_from_qdrv(vap, param[0] & 0xffff, &param[0], NULL, 0);
+
+	if (chan <= IEEE80211_CHAN_MAX && chan > 0 && isset(ic->ic_chan_active, chan)) {
+		const struct ieee80211_channel *c = findchannel(ic, chan, IEEE80211_MODE_AUTO);
+
+		if (c != NULL) {
+			if (param[0] == 1)
+				param[0] = IEEE80211_LOWGAIN_TXPOW_MAX;
+			else
+				param[0] = c->ic_maxpower_normal;
+			retval = 0;
+		}
+	}
+
+	return retval;
+}
+static int
+ieee80211_ioctl_getparam_bw_txpower(struct ieee80211vap *vap, int *param)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	uint32_t chan = (((uint32_t)param[0]) >> 24) & 0xff;
+	uint32_t bf_on = (param[0] >> 20) & 0xf;
+	uint32_t num_ss = (param[0] >> 16) & 0xf;
+	int retval = -EINVAL;
+
+	if (chan <= IEEE80211_CHAN_MAX &&
+			isset(ic->ic_chan_active, chan) &&
+			num_ss <= IEEE80211_QTN_NUM_RF_STREAMS) {
+		const struct ieee80211_channel *c = findchannel(ic, chan, IEEE80211_MODE_AUTO);
+		uint32_t idx_bf = PWR_IDX_BF_OFF + bf_on;
+		uint32_t idx_ss = PWR_IDX_1SS + num_ss - 1;
+
+		if (c != NULL && idx_bf < PWR_IDX_BF_MAX && idx_ss < PWR_IDX_SS_MAX) {
+			param[0] = ((c->ic_maxpower_table[idx_bf][idx_ss][PWR_IDX_80M] & 0xff) << 16) |
+					((c->ic_maxpower_table[idx_bf][idx_ss][PWR_IDX_40M] & 0xff) << 8) |
+					(c->ic_maxpower_table[idx_bf][idx_ss][PWR_IDX_20M] & 0xff);
+			retval = 0;
+		}
+	}
+
+	return retval;
+}
+
+static int
+ieee80211_param_ocac_get(struct ieee80211vap *vap, int *param)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	uint32_t param_id = (((uint32_t)param[0]) >> 16) & 0xffff;
+	int retval = 0;
+
+	switch(param_id) {
+	case IEEE80211_OCAC_GET_STATUS:
+		param[0] = ic->ic_ocac.ocac_cfg.ocac_enable;
+		break;
+	case IEEE80211_OCAC_GET_AVAILABILITY:
+		param[0] = !ieee80211_wireless_is_ocac_unsupported(vap);
+		break;
+	default:
+		retval = -EINVAL;
+		break;
+	}
+
+	return retval;
+}
+
+static int
+ieee80211_ioctl_getmode(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *wri, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	switch (vap->iv_ic->ic_phymode ) {
+		case IEEE80211_MODE_11A:
+			strcpy(extra, "11a");
+			break;
+		case IEEE80211_MODE_11B:
+			strcpy(extra, "11b");
+			break;
+		case IEEE80211_MODE_11NG_HT40PM:
+			strcpy(extra, "11ng40");
+			break;
+		case IEEE80211_MODE_11NA_HT40PM:
+			if (vap->iv_11ac_and_11n_flag & IEEE80211_11N_ONLY) {
+				strcpy(extra, "11nOnly40");
+			} else {
+				strcpy(extra, "11na40");
+			}
+			break;
+		case IEEE80211_MODE_11NG:
+			strcpy(extra, "11ng");
+			break;
+		case IEEE80211_MODE_11G:
+			strcpy(extra, "11g");
+			break;
+		case IEEE80211_MODE_11NA:
+			if (vap->iv_11ac_and_11n_flag & IEEE80211_11N_ONLY) {
+				strcpy(extra, "11nOnly20");
+			} else {
+				strcpy(extra, "11na20");
+			}
+			break;
+		case IEEE80211_MODE_FH:
+			strcpy(extra, "FH");
+			break;
+		case IEEE80211_MODE_11AC_VHT20PM:
+			if (vap->iv_11ac_and_11n_flag & IEEE80211_11AC_ONLY) {
+				strcpy(extra, "11acOnly20");
+			} else {
+				strcpy(extra, "11ac20");
+			}
+			break;
+		case IEEE80211_MODE_11AC_VHT40PM:
+			if (vap->iv_11ac_and_11n_flag & IEEE80211_11AC_ONLY) {
+				strcpy(extra, "11acOnly40");
+			} else {
+				strcpy(extra, "11ac40");
+			}
+			break;
+		case IEEE80211_MODE_11AC_VHT80PM:
+			if (vap->iv_11ac_and_11n_flag & IEEE80211_11AC_ONLY) {
+				strcpy(extra, "11acOnly80");
+			} else {
+				if (IEEE80211_IS_CHAN_VHT80_EDGEPLUS(vap->iv_ic->ic_curchan))
+					strcpy(extra, "11ac80Edge+");
+				else if (IEEE80211_IS_CHAN_VHT80_CNTRPLUS(vap->iv_ic->ic_curchan))
+					strcpy(extra, "11ac80Cntr+");
+				else if (IEEE80211_IS_CHAN_VHT80_CNTRMINUS(vap->iv_ic->ic_curchan))
+					strcpy(extra, "11ac80Cntr-");
+				else if (IEEE80211_IS_CHAN_VHT80_EDGEMINUS(vap->iv_ic->ic_curchan))
+					strcpy(extra, "11ac80Edge-");
+			}
+			break;
+		case IFM_AUTO:
+			strcpy(extra, "auto");
+			break;
+		default:
+			return -EINVAL;
+	}
+	wri->length = strlen(extra);
+	strncpy(wri->pointer, extra, wri->length);
+	return 0;
+}
+
+static void
+ieee80211_blacklist_node_print(void *arg, struct ieee80211_node *ni)
+{
+	if (ni->ni_blacklist_timeout > 0) {
+		printf("%s\n", ether_sprintf(ni->ni_macaddr));
+	}
+}
+
+static uint32_t
+ieee80211_param_wowlan_get(struct ieee80211com *ic)
+{
+	return ((ic->ic_wowlan.host_state << 31) |
+			(ic->ic_wowlan.wowlan_match << 29) |
+			((ic->ic_wowlan.L3_udp_port&0x1fff) << 16) |
+			ic->ic_wowlan.L2_ether_type);
+}
+
+static void
+ieee80211_extdr_dump_flags(struct ieee80211vap *vap)
+{
+	const char *wds_mode;
+
+	if (IEEE80211_VAP_WDS_IS_MBS(vap))
+		wds_mode = "MBS";
+	else if (IEEE80211_VAP_WDS_IS_RBS(vap))
+		wds_mode = "RBS";
+	else
+		wds_mode = "WDS";
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_EXTDR,
+			"Legay VAP QHOP mode: %s\n", wds_mode);
+}
+
+static int
+ieee80211_ioctl_getparam(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_rsnparms *rsn = NULL;
+	int *param = (int *) extra;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+#if defined(QBMPS_ENABLE)
+	struct qdrv_vap *qv = container_of(vap, struct qdrv_vap, iv);
+#endif
+
+	if (vap->iv_bss) {
+		rsn = &vap->iv_bss->ni_rsn;
+	}
+	switch (param[0] & 0xffff) {
+	case IEEE80211_PARAM_AP_ISOLATE:
+		param[0] = br_get_ap_isolate();
+		break;
+	case IEEE80211_PARAM_AUTHMODE:
+		if (vap->iv_flags & IEEE80211_F_WPA) {
+			param[0] = IEEE80211_AUTH_WPA;
+		} else {
+			if (NULL == vap->iv_bss)
+				return -EINVAL;
+			param[0] = vap->iv_bss->ni_authmode;
+		}
+		break;
+	case IEEE80211_PARAM_PROTMODE:
+		param[0] = ic->ic_protmode;
+		break;
+	case IEEE80211_PARAM_MCASTCIPHER:
+		if (!rsn)
+			return -EINVAL;
+		param[0] = rsn->rsn_mcastcipher;
+		break;
+	case IEEE80211_PARAM_MCASTKEYLEN:
+		if (!rsn)
+			return -EINVAL;
+		param[0] = rsn->rsn_mcastkeylen;
+		break;
+	case IEEE80211_PARAM_UCASTCIPHERS:
+		if (!rsn)
+			return -EINVAL;
+		param[0] = rsn->rsn_ucastcipherset;
+		break;
+	case IEEE80211_PARAM_UCASTCIPHER:
+		if (!rsn)
+			return -EINVAL;
+		param[0] = rsn->rsn_ucastcipher;
+		break;
+	case IEEE80211_PARAM_UCASTKEYLEN:
+		if (!rsn)
+			return -EINVAL;
+		param[0] = rsn->rsn_ucastkeylen;
+		break;
+	case IEEE80211_PARAM_KEYMGTALGS:
+		if (!rsn)
+			return -EINVAL;
+		param[0] = rsn->rsn_keymgmtset;
+		break;
+	case IEEE80211_PARAM_RSNCAPS:
+		if (!rsn)
+			return -EINVAL;
+		param[0] = rsn->rsn_caps;
+		break;
+	case IEEE80211_PARAM_WPA:
+		switch (vap->iv_flags & IEEE80211_F_WPA) {
+		case IEEE80211_F_WPA1:
+			param[0] = 1;
+			break;
+		case IEEE80211_F_WPA2:
+			param[0] = 2;
+			break;
+		case IEEE80211_F_WPA1 | IEEE80211_F_WPA2:
+			param[0] = 3;
+			break;
+		default:
+			param[0] = 0;
+			break;
+		}
+		break;
+	case IEEE80211_PARAM_ROAMING:
+		param[0] = ic->ic_roaming;
+		break;
+	case IEEE80211_PARAM_PRIVACY:
+		param[0] = (vap->iv_flags & IEEE80211_F_PRIVACY) != 0;
+		break;
+	case IEEE80211_PARAM_DROPUNENCRYPTED:
+		param[0] = (vap->iv_flags & IEEE80211_F_DROPUNENC) != 0;
+		break;
+	case IEEE80211_PARAM_DROPUNENC_EAPOL:
+		param[0] = IEEE80211_VAP_DROPUNENC_EAPOL(vap);
+		break;
+	case IEEE80211_PARAM_COUNTERMEASURES:
+		param[0] = (vap->iv_flags & IEEE80211_F_COUNTERM) != 0;
+		break;
+	case IEEE80211_PARAM_DRIVER_CAPS:
+		param[0] = vap->iv_caps;
+		break;
+	case IEEE80211_PARAM_WMM:
+		param[0] = (vap->iv_flags & IEEE80211_F_WME) != 0;
+		break;
+	case IEEE80211_PARAM_HIDESSID:
+		param[0] = (vap->iv_flags & IEEE80211_F_HIDESSID) != 0;
+		break;
+	case IEEE80211_PARAM_APBRIDGE:
+		param[0] = (vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0;
+		break;
+	case IEEE80211_PARAM_INACT:
+		param[0] = vap->iv_inact_run * IEEE80211_INACT_WAIT;
+		break;
+	case IEEE80211_PARAM_INACT_AUTH:
+		param[0] = vap->iv_inact_auth * IEEE80211_INACT_WAIT;
+		break;
+	case IEEE80211_PARAM_INACT_INIT:
+		param[0] = vap->iv_inact_init * IEEE80211_INACT_WAIT;
+		break;
+	case IEEE80211_PARAM_DTIM_PERIOD:
+		param[0] = vap->iv_dtim_period;
+		break;
+	case IEEE80211_PARAM_BEACON_INTERVAL:
+		/* NB: get from ic_bss for station mode */
+		param[0] = ic->ic_lintval_backup;
+		break;
+	case IEEE80211_PARAM_DOTH:
+		param[0] = (ic->ic_flags & IEEE80211_F_DOTH) != 0;
+		break;
+	case IEEE80211_PARAM_SHPREAMBLE:
+		param[0] = (ic->ic_caps & IEEE80211_C_SHPREAMBLE) != 0;
+		break;
+	case IEEE80211_PARAM_PWRCONSTRAINT:
+		if (ic->ic_flags & IEEE80211_F_DOTH && ic->ic_flags_ext & IEEE80211_FEXT_TPC)
+			param[0] = ic->ic_pwr_constraint;
+		else
+			return -EOPNOTSUPP;
+		break;
+	case IEEE80211_PARAM_PUREG:
+		param[0] = (vap->iv_flags & IEEE80211_F_PUREG) != 0;
+		break;
+	case IEEE80211_PARAM_WDS:
+		param[0] = ((vap->iv_flags_ext & IEEE80211_FEXT_WDS) == IEEE80211_FEXT_WDS);
+		break;
+	case IEEE80211_PARAM_REPEATER:
+		param[0] = !!(ic->ic_flags_ext & IEEE80211_FEXT_REPEATER);
+		break;
+	case IEEE80211_PARAM_BGSCAN:
+		param[0] = (vap->iv_flags & IEEE80211_F_BGSCAN) != 0;
+		break;
+	case IEEE80211_PARAM_BGSCAN_IDLE:
+		param[0] = jiffies_to_msecs(vap->iv_bgscanidle); /* ms */
+		break;
+	case IEEE80211_PARAM_BGSCAN_INTERVAL:
+		param[0] = vap->iv_bgscanintvl / HZ;	/* seconds */
+		break;
+	case IEEE80211_PARAM_SCAN_OPCHAN:
+		param[0] = ic->ic_scan_opchan_enable;
+		break;
+	case IEEE80211_PARAM_EXTENDER_MBS_RSSI_MARGIN:
+		param[0] = ic->ic_extender_mbs_rssi_margin;
+		break;
+	case IEEE80211_PARAM_MCAST_RATE:
+		param[0] = vap->iv_mcast_rate;	/* seconds */
+		break;
+	case IEEE80211_PARAM_COVERAGE_CLASS:
+		param[0] = ic->ic_coverageclass;
+		break;
+	case IEEE80211_PARAM_COUNTRY_IE:
+		param[0] = (ic->ic_flags_ext & IEEE80211_FEXT_COUNTRYIE) != 0;
+		break;
+	case IEEE80211_PARAM_REGCLASS:
+		param[0] = (ic->ic_flags_ext & IEEE80211_FEXT_REGCLASS) != 0;
+		break;
+	case IEEE80211_PARAM_SCANVALID:
+		param[0] = vap->iv_scanvalid / HZ;	/* seconds */
+		break;
+	case IEEE80211_PARAM_ROAM_RSSI_11A:
+		param[0] = vap->iv_roam.rssi11a;
+		break;
+	case IEEE80211_PARAM_ROAM_RSSI_11B:
+		param[0] = vap->iv_roam.rssi11bOnly;
+		break;
+	case IEEE80211_PARAM_ROAM_RSSI_11G:
+		param[0] = vap->iv_roam.rssi11b;
+		break;
+	case IEEE80211_PARAM_ROAM_RATE_11A:
+		param[0] = vap->iv_roam.rate11a;
+		break;
+	case IEEE80211_PARAM_ROAM_RATE_11B:
+		param[0] = vap->iv_roam.rate11bOnly;
+		break;
+	case IEEE80211_PARAM_ROAM_RATE_11G:
+		param[0] = vap->iv_roam.rate11b;
+		break;
+	case IEEE80211_PARAM_UAPSDINFO:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (IEEE80211_VAP_UAPSD_ENABLED(vap))
+				param[0] = 1;
+			else
+				param[0] = 0;
+		} else if (vap->iv_opmode == IEEE80211_M_STA)
+			param[0] = vap->iv_uapsdinfo;
+		break;
+	case IEEE80211_PARAM_SLEEP:
+		if (NULL == vap->iv_bss)
+			return -EINVAL;
+		param[0] = vap->iv_bss->ni_flags & IEEE80211_NODE_PWR_MGT;
+		break;
+	case IEEE80211_PARAM_EOSPDROP:
+		param[0] = IEEE80211_VAP_EOSPDROP_ENABLED(vap);
+		break;
+	case IEEE80211_PARAM_STA_DFS:
+	case IEEE80211_PARAM_MARKDFS:
+		if (ic->ic_flags_ext & IEEE80211_FEXT_MARKDFS)
+			param[0] = 1;
+		else
+			param[0] = 0;
+		break;
+	case IEEE80211_PARAM_SHORT_GI:
+		/* Checking both SGI flags, if SGI is enabled/disabled */
+		if (IS_IEEE80211_VHT_ENABLED(ic)) {
+			param[0] = (ic->ic_vhtcap.cap_flags & IEEE80211_VHTCAP_C_SHORT_GI_80) ? 1 : 0;
+		} else {
+			param[0] = (vap->iv_ht_flags & IEEE80211_HTF_SHORTGI_ENABLED &&
+				    (ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI20 ||
+				     ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI40)) ? 1 : 0;
+		}
+		break;
+	case IEEE80211_PARAM_MCS_CAP:
+		if ((vap->iv_mcs_config & IEEE80211_RATE_PREFIX_MASK) == IEEE80211_N_RATE_PREFIX) {
+			param[0] = (vap->iv_mcs_config & 0xFF);
+		} else {
+			printk("11N MCS is not set\n");
+			return -EOPNOTSUPP;
+		}
+		break;
+	case IEEE80211_PARAM_LEGACY_RETRY_LIMIT:
+	case IEEE80211_PARAM_MIMOMODE:
+	case IEEE80211_PARAM_SHORT_RETRY_LIMIT:
+	case IEEE80211_PARAM_LONG_RETRY_LIMIT:
+	case IEEE80211_PARAM_RETRY_COUNT:
+	case IEEE80211_PARAM_TXBF_PERIOD:
+	case IEEE80211_PARAM_TXBF_CTRL:
+	case IEEE80211_PARAM_GET_RFCHIP_ID:
+	case IEEE80211_PARAM_GET_RFCHIP_VERID:
+	case IEEE80211_PARAM_LDPC:
+	case IEEE80211_PARAM_STBC:
+	case IEEE80211_PARAM_RTS_CTS:
+	case IEEE80211_PARAM_TX_QOS_SCHED:
+	case IEEE80211_PARAM_PEER_RTS_MODE:
+	case IEEE80211_PARAM_DYN_WMM:
+	case IEEE80211_PARAM_11N_40_ONLY_MODE:
+	case IEEE80211_PARAM_MAX_MGMT_FRAMES:
+	case IEEE80211_PARAM_MCS_ODD_EVEN:
+	case IEEE80211_PARAM_RESTRICTED_MODE:
+	case IEEE80211_PARAM_RESTRICT_RTS:
+	case IEEE80211_PARAM_RESTRICT_LIMIT:
+	case IEEE80211_PARAM_RESTRICT_RATE:
+	case IEEE80211_PARAM_SWRETRY_AGG_MAX:
+	case IEEE80211_PARAM_SWRETRY_NOAGG_MAX:
+	case IEEE80211_PARAM_SWRETRY_SUSPEND_XMIT:
+	case IEEE80211_PARAM_RX_AGG_TIMEOUT:
+	case IEEE80211_PARAM_CARRIER_ID:
+	case IEEE80211_PARAM_TX_QUEUING_ALG:
+	case IEEE80211_PARAM_CONGEST_IDX:
+	case IEEE80211_PARAM_MICHAEL_ERR_CNT:
+	case IEEE80211_PARAM_MAX_AGG_SIZE:
+	case IEEE80211_PARAM_MU_ENABLE:
+	case IEEE80211_PARAM_MU_USE_EQ:
+	case IEEE80211_PARAM_RESTRICT_WLAN_IP:
+	case IEEE80211_PARAM_CCA_FIXED:
+	case IEEE80211_PARAM_AUTO_CCA_ENABLE:
+	case IEEE80211_PARAM_GET_CCA_STATS:
+	case IEEE80211_PARAM_GET_MU_GRP_QMAT:
+	case IEEE80211_PARAM_CACSTATUS:
+	case IEEE80211_PARAM_EP_STATUS:
+	case IEEE80211_PARAM_BEACON_HANG_TIMEOUT:
+	case IEEE80211_PARAM_BB_DEAFNESS_WAR_EN:
+		ieee80211_param_from_qdrv(vap, param[0] & 0xffff, &param[0], NULL, 0);
+		break;
+	case IEEE80211_PARAM_BW_SEL_MUC:
+		param[0] = ieee80211_get_bw(ic);
+		break;
+	case IEEE80211_PARAM_MODE:
+		if (IS_IEEE80211_5G_BAND(ic))
+			ieee80211_param_from_qdrv(vap, param[0] & 0xffff, &param[0], NULL, 0);
+		else
+			param[0] = !!(ic->ic_flags_ext & IEEE80211_FEXT_24GVHT);
+		break;
+	case IEEE80211_PARAM_HT_NSS_CAP:
+		param[0] = ic->ic_ht_nss_cap;
+		break;
+	case IEEE80211_PARAM_VHT_NSS_CAP:
+		param[0] = ic->ic_vht_nss_cap;
+		break;
+	case IEEE80211_PARAM_VHT_MCS_CAP:
+		param[0] = ic->ic_vht_mcs_cap;
+		break;
+	case IEEE80211_PARAM_RTSTHRESHOLD:
+		param[0] = vap->iv_rtsthreshold;
+		break;
+	case IEEE80211_PARAM_AMPDU_DENSITY:
+		param[0] = ic->ic_htcap.mpduspacing;
+		break;
+	case IEEE80211_PARAM_SCANSTATUS:
+		if (ic->ic_flags & IEEE80211_F_SCAN) {
+			param[0] = 1;
+		} else {
+			param[0] = 0;
+		}
+		break;
+	case IEEE80211_PARAM_IMPLICITBA:
+		param[0] = vap->iv_implicit_ba;
+		break;
+	case IEEE80211_PARAM_GLOBAL_BA_CONTROL:
+		param[0] = vap->iv_ba_control;
+		break;
+	case IEEE80211_PARAM_VAP_STATS:
+		{
+			param[0] = 0; // no meaning
+
+			printk("RX stats (delta)\n");
+			printk("  dup:\t%u\n", vap->iv_stats.is_rx_dup);
+			printk("  beacon:\t%u\n", vap->iv_stats.is_rx_beacon);
+			printk("  elem_missing:\t%u\n", vap->iv_stats.is_rx_elem_missing);
+			printk("  badchan:\t%u\n", vap->iv_stats.is_rx_badchan);
+			printk("  chanmismatch:\t%u\n", vap->iv_stats.is_rx_chanmismatch);
+
+			// clear
+			memset(&vap->iv_stats, 0, sizeof(vap->iv_stats));
+		}
+		break;
+	case IEEE80211_PARAM_DFS_FAST_SWITCH:
+		param[0] = ((ic->ic_flags_ext & IEEE80211_FEXT_DFS_FAST_SWITCH) != 0);
+		break;
+	case IEEE80211_PARAM_SCAN_NO_DFS:
+		param[0] = ((ic->ic_flags_ext & IEEE80211_FEXT_SCAN_NO_DFS) != 0);
+		break;
+	case IEEE80211_PARAM_BLACKLIST_GET:
+		ieee80211_iterate_dev_nodes(dev,
+			&ic->ic_sta, ieee80211_blacklist_node_print, NULL, 0);
+		param[0] = 0;
+		break;
+	case IEEE80211_PARAM_FIXED_TX_RATE:
+		param[0] = vap->iv_mcs_config;
+		break;
+	case IEEE80211_PARAM_REGULATORY_REGION:
+		{
+			union {
+				char		as_chars[ 4 ];
+				u_int32_t	as_u32;
+			} region;
+
+			if (ieee80211_countryid_to_country_string( ic->ic_country_code, region.as_chars ) != 0) {
+				/*
+				 * If we can't get a country string from the current code,
+				 * return the NULL string as the region.
+				 */
+				region.as_u32 = 0;
+			}
+
+			param[0] = (int) region.as_u32;
+		}
+		break;
+	case IEEE80211_PARAM_SAMPLE_RATE:
+		param[0] = ic->ic_sample_rate;
+		break;
+	case IEEE80211_PARAM_CONFIG_TXPOWER:
+		{
+			int retval = ieee80211_ioctl_getparam_txpower(vap, param);
+			if (retval != 0) {
+				return retval;
+			}
+		}
+		break;
+	case IEEE80211_PARAM_CONFIG_BW_TXPOWER:
+		{
+			int retval = ieee80211_ioctl_getparam_bw_txpower(vap, param);
+			if (retval != 0) {
+				return retval;
+			}
+		}
+		break;
+	case IEEE80211_PARAM_TPC:
+		if (ic->ic_flags & IEEE80211_F_DOTH) {
+			param[0] = !!(ic->ic_flags_ext & IEEE80211_FEXT_TPC);
+		} else {
+			return -EOPNOTSUPP;
+		}
+		break;
+	case IEEE80211_PARAM_CONFIG_TPC_INTERVAL:
+		if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+			param[0] = ieee80211_tpc_query_get_interval(&ic->ic_tpc_query_info);
+		} else {
+			return -EOPNOTSUPP;
+		}
+		break;
+	case IEEE80211_PARAM_TPC_QUERY:
+		if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+			param[0] = ieee80211_tpc_query_state(&ic->ic_tpc_query_info);
+		} else {
+			return -EOPNOTSUPP;
+		}
+		break;
+	case IEEE80211_PARAM_CONFIG_REGULATORY_TXPOWER:
+		{
+			int chan = (param[0] >> 16);
+
+			if (chan <= IEEE80211_CHAN_MAX && chan > 0 && isset(ic->ic_chan_active, chan)) {
+				const struct ieee80211_channel *c = findchannel(ic, chan, IEEE80211_MODE_AUTO);
+
+				if (c != NULL) {
+					param[0] = c->ic_maxregpower;
+				} else {
+					return -EINVAL;
+				}
+			}
+		}
+		break;
+	case IEEE80211_PARAM_BA_MAX_WIN_SIZE:
+		param[0] = vap->iv_max_ba_win_size;
+		break;
+	case IEEE80211_PARAM_MIN_DWELL_TIME_ACTIVE:
+		param[0] = ic->ic_mindwell_active;
+		break;
+	case IEEE80211_PARAM_MIN_DWELL_TIME_PASSIVE:
+		param[0] = ic->ic_mindwell_passive;
+		break;
+	case IEEE80211_PARAM_MAX_DWELL_TIME_ACTIVE:
+		param[0] = ic->ic_maxdwell_active;
+		break;
+#ifdef QTN_BG_SCAN
+	case IEEE80211_PARAM_QTN_BGSCAN_DWELL_TIME_ACTIVE:
+		param[0] = ic->ic_qtn_bgscan.dwell_msecs_active;
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_DWELL_TIME_PASSIVE:
+		param[0] = ic->ic_qtn_bgscan.dwell_msecs_passive;
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_DURATION_ACTIVE:
+		param[0] = ic->ic_qtn_bgscan.duration_msecs_active;
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_FAST:
+		param[0] = ic->ic_qtn_bgscan.duration_msecs_passive_fast;
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_NORMAL:
+		param[0] = ic->ic_qtn_bgscan.duration_msecs_passive_normal;
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_SLOW:
+		param[0] = ic->ic_qtn_bgscan.duration_msecs_passive_slow;
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_THRSHLD_PASSIVE_FAST:
+		param[0] = ic->ic_qtn_bgscan.thrshld_fat_passive_fast;
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_THRSHLD_PASSIVE_NORMAL:
+		param[0] = ic->ic_qtn_bgscan.thrshld_fat_passive_normal;
+		break;
+	case IEEE80211_PARAM_QTN_BGSCAN_DEBUG:
+		param[0] = ic->ic_qtn_bgscan.debug_flags;
+		break;
+#endif /*QTN_BG_SCAN */
+	case IEEE80211_PARAM_MAX_DWELL_TIME_PASSIVE:
+		param[0] = ic->ic_maxdwell_passive;
+		break;
+#ifdef QSCS_ENABLED
+	case IEEE80211_PARAM_SCS:
+		if (ieee80211_param_scs_get(dev, param[0] >> IEEE80211_SCS_COMMAND_S,
+						(uint32_t *)&param[0]) < 0) {
+			return -EINVAL;
+		}
+		break;
+	case IEEE80211_PARAM_SCS_DFS_REENTRY_REQUEST:
+		param[0] = ((struct ap_state *)(ic->ic_scan->ss_scs_priv))->as_dfs_reentry_level;
+		break;
+	case IEEE80211_PARAM_SCS_CCA_INTF:
+		{
+			int chan = (param[0] >> 16);
+			struct ap_state *as = ic->ic_scan->ss_scs_priv;
+
+			if (as && chan < IEEE80211_CHAN_MAX && chan > 0 && isset(ic->ic_chan_active, chan)) {
+				if (as->as_cca_intf[chan] == SCS_CCA_INTF_INVALID)
+					param[0] = -1;
+				else
+					param[0] = as->as_cca_intf[chan];
+			} else {
+				return -EINVAL;
+			}
+		}
+		break;
+#endif /* QSCS_ENABLED */
+	case IEEE80211_PARAM_ALT_CHAN:
+		param[0] = ic->ic_ieee_alt_chan;
+		break;
+	case IEEE80211_PARAM_LDPC_ALLOW_NON_QTN:
+		param[0] = (vap->iv_ht_flags & IEEE80211_HTF_LDPC_ALLOW_NON_QTN) ? 1 : 0;
+		break;
+	case IEEE80211_PARAM_FWD_UNKNOWN_MC:
+		param[0] = vap->iv_forward_unknown_mc;
+		break;
+	case IEEE80211_PARAM_MC_TO_UC:
+		param[0] = vap->iv_mc_to_uc;
+		break;
+	case IEEE80211_PARAM_BCST_4:
+		param[0] = vap->iv_reliable_bcst;
+		break;
+	case IEEE80211_PARAM_AP_FWD_LNCB:
+		param[0] = vap->iv_ap_fwd_lncb;
+		break;
+	case IEEE80211_PARAM_GI_SELECT:
+		param[0] = ic->ic_gi_select_enable;
+		break;
+	case IEEE80211_PARAM_PPPC_SELECT:
+		if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+			return -EOPNOTSUPP;
+		} else {
+			param[0] = ic->ic_pppc_select_enable;
+		}
+		break;
+	case IEEE80211_PARAM_PPPC_STEP:
+		if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags_ext & IEEE80211_FEXT_TPC)) {
+			return -EOPNOTSUPP;
+		} else {
+			param[0] = ic->ic_pppc_step_db;
+		}
+		break;
+	case IEEE80211_PARAM_EMI_POWER_SWITCHING:
+		param[0] = ic->ic_emi_power_switch_enable;
+		break;
+	case IEEE80211_PARAM_GET_DFS_CCE:
+		param[0] = (ic->ic_dfs_cce.cce_previous << IEEE80211_CCE_PREV_CHAN_SHIFT) |
+			    ic->ic_dfs_cce.cce_current;
+		break;
+	case IEEE80211_PARAM_GET_SCS_CCE:
+		param[0] = (ic->ic_aci_cci_cce.cce_previous << IEEE80211_CCE_PREV_CHAN_SHIFT) |
+			    ic->ic_aci_cci_cce.cce_current;
+		break;
+	case IEEE80211_PARAM_ASSOC_LIMIT:
+		param[0] = ic->ic_sta_assoc_limit;
+		break;
+	case IEEE80211_PARAM_HW_BONDING:
+		param[0] = soc_shared_params->hardware_options;
+		break;
+	case IEEE80211_PARAM_BSS_ASSOC_LIMIT:
+		{
+			uint32_t group = (((uint32_t)param[0]) >> 16) & 0xffff;
+
+			if (group < IEEE80211_MIN_BSS_GROUP
+					|| group >= IEEE80211_MAX_BSS_GROUP) {
+				return -EINVAL;
+			}
+
+			param[0] = ic->ic_ssid_grp[group].limit;
+		}
+		break;
+	case IEEE80211_PARAM_BSS_GROUP_ID:
+		param[0] = vap->iv_ssid_group;
+		break;
+	case IEEE80211_PARAM_BSS_ASSOC_RESERVE:
+		{
+			uint32_t group = (((uint32_t)param[0]) >> 16) & 0xffff;
+
+			if (group < IEEE80211_MIN_BSS_GROUP
+					|| group >= IEEE80211_MAX_BSS_GROUP) {
+				return -EINVAL;
+			}
+
+			param[0] = ic->ic_ssid_grp[group].reserve;
+		}
+		break;
+	case IEEE80211_PARAM_IOT_TWEAKS:
+		param[0] = qtn_mproc_sync_shared_params_get()->iot_tweaks;
+		break;
+        case IEEE80211_PARAM_FAST_REASSOC:
+                param[0] = !!(ic->ic_flags_ext & IEEE80211_FEXT_SCAN_FAST_REASS);
+                break;
+	case IEEE80211_PARAM_CSA_FLAG:
+		param[0] = ic->ic_csa_flag;
+		break;
+	case IEEE80211_PARAM_DEF_MATRIX:
+		param[0] = ic->ic_def_matrix;
+		break;
+	case IEEE80211_PARAM_ENABLE_11AC:
+		param[0] = vap->iv_11ac_enabled;
+		break;
+	case IEEE80211_PARAM_FIXED_11AC_TX_RATE:
+		if ((vap->iv_mcs_config & IEEE80211_RATE_PREFIX_MASK) ==
+				IEEE80211_AC_RATE_PREFIX) {
+			param[0] = vap->iv_mcs_config & 0x0F;
+		} else {
+			printk("VHT rate is not set\n");
+			return -EOPNOTSUPP;
+		}
+		break;
+	case IEEE80211_PARAM_VAP_PRI:
+		param[0] = vap->iv_pri;
+		break;
+	case IEEE80211_PARAM_AIRFAIR:
+		param[0] = ic->ic_airfair;
+		break;
+	case IEEE80211_PARAM_TDLS_STATUS:
+		if ((vap->iv_flags_ext & IEEE80211_FEXT_TDLS_PROHIB) == IEEE80211_FEXT_TDLS_PROHIB)
+			param[0] = 0;
+		else
+			param[0] = 1;
+		break;
+	case IEEE80211_PARAM_TDLS_MODE:
+		param[0] = vap->tdls_path_sel_prohibited;
+		break;
+	case IEEE80211_PARAM_TDLS_TIMEOUT_TIME:
+		param[0] = vap->tdls_timeout_time;
+		break;
+	case IEEE80211_PARAM_TDLS_PATH_SEL_WEIGHT:
+		param[0] = vap->tdls_path_sel_weight;
+		break;
+	case IEEE80211_PARAM_TDLS_TRAINING_PKT_CNT:
+		param[0] = vap->tdls_training_pkt_cnt;
+		break;
+	case IEEE80211_PARAM_TDLS_DISC_INT:
+		param[0] = vap->tdls_discovery_interval;
+		break;
+	case IEEE80211_PARAM_TDLS_PATH_SEL_PPS_THRSHLD:
+		param[0] = vap->tdls_path_sel_pps_thrshld;
+		break;
+	case IEEE80211_PARAM_TDLS_PATH_SEL_RATE_THRSHLD:
+		param[0] = vap->tdls_path_sel_rate_thrshld;
+		break;
+	case IEEE80211_PARAM_TDLS_VERBOSE:
+		param[0] = vap->tdls_verbose;
+		break;
+	case IEEE80211_PARAM_TDLS_MIN_RSSI:
+		param[0] = vap->tdls_min_valid_rssi;
+		break;
+	case IEEE80211_PARAM_TDLS_SWITCH_INTS:
+		param[0] = vap->tdls_switch_ints;
+		break;
+	case IEEE80211_PARAM_TDLS_RATE_WEIGHT:
+		param[0] = vap->tdls_phy_rate_wgt;
+		break;
+	case IEEE80211_PARAM_TDLS_UAPSD_INDICAT_WND:
+		param[0] = vap->tdls_uapsd_indicat_wnd;
+		break;
+	case IEEE80211_PARAM_TDLS_CS_MODE:
+		if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PASSIVE)
+			param[0] = 2;
+		else if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_CS_PROHIB)
+			param[0] = 1;
+		else
+			param[0] = 0;
+		break;
+	case IEEE80211_PARAM_TDLS_OFF_CHAN:
+		param[0] = vap->tdls_fixed_off_chan;
+		break;
+	case IEEE80211_PARAM_TDLS_OFF_CHAN_BW:
+		param[0] = vap->tdls_fixed_off_chan_bw;
+		break;
+	case IEEE80211_PARAM_TDLS_NODE_LIFE_CYCLE:
+		param[0] = vap->tdls_node_life_cycle;
+		break;
+	case IEEE80211_PARAM_TDLS_OVER_QHOP_ENABLE:
+		param[0] = vap->tdls_over_qhop_en;
+		break;
+	case IEEE80211_PARAM_OCAC:
+	case IEEE80211_PARAM_SDFS:
+		{
+			int retval = ieee80211_param_ocac_get(vap, param);
+			if (retval != 0) {
+				return retval;
+			}
+		}
+		break;
+	case IEEE80211_PARAM_DEACTIVE_CHAN_PRI:
+		param[0] = ieee80211_get_inactive_primary_chan_num(ic);
+		break;
+	case IEEE80211_PARAM_SPECIFIC_SCAN:
+		param[0] = (vap->iv_flags_ext & IEEE80211_FEXT_SPECIFIC_SCAN) ? 1 : 0;
+		break;
+	case IEEE80211_PARAM_FIXED_SGI:
+		param[0] = ic->ic_gi_fixed;
+		break;
+	case IEEE80211_PARAM_FIXED_BW:
+		param[0] = ic->ic_bw_fixed;
+		break;
+	case IEEE80211_PARAM_SPEC_COUNTRY_CODE:
+		{
+			union {
+				char as_chars[4];
+				uint32_t as_u32;
+			} region;
+
+			if (ieee80211_countryid_to_country_string(ic->ic_spec_country_code,
+					region.as_chars) != 0) {
+				/*
+				 * If we can't get a country string from the current code,
+				 * return the NULL string as the region.
+				 */
+				region.as_u32 = 0;
+			}
+
+			param[0] = (int)region.as_u32;
+		}
+		break;
+        case IEEE80211_PARAM_VCO_LOCK_DETECT_MODE:
+        {
+            param[0] = sp->vco_lock_detect_mode;
+        }
+        break;
+	case IEEE80211_PARAM_CONFIG_PMF:
+		param[0] = vap->iv_pmf;
+		break;
+	case IEEE80211_PARAM_RX_AMSDU_ENABLE:
+		param[0] = vap->iv_rx_amsdu_enable;
+		break;
+	case IEEE80211_PARAM_RX_AMSDU_THRESHOLD_CCA:
+		param[0] = vap->iv_rx_amsdu_threshold_cca;
+		break;
+	case IEEE80211_PARAM_RX_AMSDU_THRESHOLD_PMBL:
+		param[0] = vap->iv_rx_amsdu_threshold_pmbl;
+		break;
+	case IEEE80211_PARAM_RX_AMSDU_PMBL_WF_SP:
+		param[0] = vap->iv_rx_amsdu_pmbl_wf_sp;
+		break;
+	case IEEE80211_PARAM_RX_AMSDU_PMBL_WF_LP:
+		param[0] = vap->iv_rx_amsdu_pmbl_wf_lp;
+		break;
+	case IEEE80211_PARAM_INTRA_BSS_ISOLATE:
+		param[0] = !!(dev->qtn_flags & QTN_FLAG_INTRA_BSS_ISOLATE);
+		break;
+	case IEEE80211_PARAM_BSS_ISOLATE:
+		param[0] = !!(dev->qtn_flags & QTN_FLAG_BSS_ISOLATE);
+		break;
+	case IEEE80211_PARAM_BF_RX_STS:
+		param[0] = ic->ic_vhtcap.bfstscap + 1;
+		break;
+	case IEEE80211_PARAM_PC_OVERRIDE:
+		param[0] = ((ic->ic_pwr_constraint)|(ic->ic_pco.pco_set<<8));
+		break;
+	case IEEE80211_PARAM_WOWLAN:
+		param[0] = ieee80211_param_wowlan_get(ic);
+		break;
+	case IEEE80211_PARAM_SCAN_TBL_LEN_MAX:
+		param[0] = ic->ic_scan_tbl_len_max;
+		break;
+	case IEEE80211_PARAM_WDS_MODE:
+		param[0] = IEEE80211_VAP_WDS_IS_RBS(vap) ? 1 : IEEE80211_VAP_WDS_IS_MBS(vap) ? 0 : 2;
+		ieee80211_extdr_dump_flags(vap);
+		break;
+	case IEEE80211_PARAM_EXTENDER_ROLE:
+		param[0] = ic->ic_extender_role;
+		break;
+	case IEEE80211_PARAM_EXTENDER_MBS_BEST_RSSI:
+		param[0] = ic->ic_extender_mbs_best_rssi;
+		break;
+	case IEEE80211_PARAM_EXTENDER_RBS_BEST_RSSI:
+		param[0] = ic->ic_extender_rbs_best_rssi;
+		break;
+	case IEEE80211_PARAM_EXTENDER_MBS_WGT:
+		param[0] = ic->ic_extender_mbs_wgt;
+		break;
+	case IEEE80211_PARAM_EXTENDER_RBS_WGT:
+		param[0] = ic->ic_extender_rbs_wgt;
+		break;
+	case IEEE80211_PARAM_EXTENDER_VERBOSE:
+		param[0] = ic->ic_extender_verbose;
+		break;
+	case IEEE80211_PARAM_VAP_TX_AMSDU:
+		param[0] = vap->iv_tx_amsdu;
+		break;
+	case IEEE80211_PARAM_TX_MAXMPDU:
+		param[0] = vap->iv_tx_max_amsdu;
+		break;
+	case IEEE80211_PARAM_DISASSOC_REASON:
+		param[0] = vap->iv_disassoc_reason;
+		break;
+        case IEEE80211_PARAM_BB_PARAM:
+                {
+                        param[0] = sp->bb_param;
+                }
+                break;
+	case IEEE80211_PARAM_NDPA_DUR:
+		param[0] = ic->ic_ndpa_dur;
+		break;
+	case IEEE80211_PARAM_SU_TXBF_PKT_CNT:
+		param[0] = ic->ic_su_txbf_pkt_cnt;
+		break;
+	case IEEE80211_PARAM_MU_TXBF_PKT_CNT:
+		param[0] = ic->ic_mu_txbf_pkt_cnt;
+		break;
+	case IEEE80211_PARAM_SCAN_RESULTS_CHECK_INV:
+		param[0] = ic->ic_scan_results_check;
+		break;
+	case IEEE80211_PARAM_TQEW_DESCR_LIMIT:
+		param[0] = ic->ic_tqew_descr_limit;
+		break;
+	case IEEE80211_PARAM_CS_THRESHOLD:
+		param[0] = sp->cs_thresh_base_val;
+		break;
+	case IEEE80211_PARAM_L2_EXT_FILTER:
+		param[0] = g_l2_ext_filter;
+		break;
+	case IEEE80211_PARAM_L2_EXT_FILTER_PORT:
+		param[0] = ic->ic_get_l2_ext_filter_port();
+		break;
+	case IEEE80211_PARAM_OBSS_TRIGG_SCAN_INT:
+		if ((ic->ic_opmode == IEEE80211_M_STA) &&
+				(vap->iv_bss) &&
+				(IEEE80211_AID(vap->iv_bss->ni_associd)))
+			param[0] = vap->iv_bss->ni_obss_ie.obss_trigger_interval;
+		else
+			param[0] = ic->ic_obss_ie.obss_trigger_interval;
+		break;
+	case IEEE80211_PARAM_PREF_BAND:
+		param[0] = vap->iv_pref_band;
+		break;
+	case IEEE80211_PARAM_BW_2_4GHZ:
+		if (IS_IEEE80211_24G_BAND(ic))
+			param[0] = ieee80211_get_bw(ic);
+		else
+			return -EINVAL;
+		break;
+	case IEEE80211_PARAM_ALLOW_VHT_TKIP:
+		param[0] = vap->allow_tkip_for_vht;
+		break;
+	case IEEE80211_PARAM_VHT_OPMODE_NOTIF:
+		param[0] = ic->ic_vht_opmode_notif;
+		break;
+	case IEEE80211_PARAM_QTN_BLOCK_BSS:
+		param[0] = !!(vap->is_block_all_assoc);
+		break;
+	case IEEE80211_PARAM_VHT_2_4GHZ:
+		param[0] = !!(ic->ic_flags_ext & IEEE80211_FEXT_24GVHT);
+		break;
+	case IEEE80211_PARAM_BEACONING_SCHEME:
+		param[0] = ic->ic_beaconing_scheme;
+		break;
+#if defined(QBMPS_ENABLE)
+	case IEEE80211_PARAM_STA_BMPS:
+		param[0] = qv->qv_bmps_mode;
+		break;
+#endif
+	case IEEE80211_PARAM_40MHZ_INTOLERANT:
+		if (IEEE80211_IS_11B(ic) || IEEE80211_IS_11G(ic))
+			param[0] = !!(vap->iv_coex & WLAN_20_40_BSS_COEX_40MHZ_INTOL);
+		else if (IS_IEEE80211_11NG(ic))
+			param[0] = !!(ic->ic_htcap.cap & IEEE80211_HTCAP_C_40_INTOLERANT);
+		else
+			param[0] = 0;
+		break;
+	case IEEE80211_PARAM_SET_RTS_BW_DYN:
+		param[0] = ic->ic_rts_bw_dyn;
+		break;
+	case IEEE80211_PARAM_SET_DUP_RTS:
+		param[0] = ic->ic_dup_rts;
+		break;
+	case IEEE80211_PARAM_SET_CTS_BW:
+		param[0] = ic->ic_cts_bw;
+		break;
+	case IEEE80211_PARAM_USE_NON_HT_DUPLICATE_MU:
+		param[0] = ic->use_non_ht_duplicate_for_mu;
+		break;
+	case IEEE80211_PARAM_DISABLE_TX_BA:
+		param[0] = vap->tx_ba_disable;
+		break;
+	case IEEE80211_PARAM_DECLINE_RX_BA:
+		param[0] = vap->rx_ba_decline;
+		break;
+	case IEEE80211_PARAM_VAP_STATE:
+		param[0] = vap->iv_vap_state;
+		break;
+	case IEEE80211_PARAM_OBSS_SCAN:
+		param[0] = ic->ic_obss_scan_enable;
+		break;
+	case IEEE80211_PARAM_SHORT_SLOT:
+		param[0] = !!(ic->ic_caps & IEEE80211_C_SHSLOT);
+		break;
+	case IEEE80211_PARAM_BG_PROTECT:
+		param[0] = !!(ic->ic_flags_ext & IEEE80211_FEXT_BG_PROTECT);
+		break;
+	case IEEE80211_PARAM_11N_PROTECT:
+		param[0] = !!(ic->ic_flags_ext & IEEE80211_FEXT_11N_PROTECT);
+		break;
+	case IEEE80211_PARAM_MU_NDPA_BW_SIGNALING_SUPPORT:
+		param[0] = ic->rx_bws_support_for_mu_ndpa;
+		break;
+	case IEEE80211_PARAM_WPA_STARTED:
+	case IEEE80211_PARAM_HOSTAP_STARTED:
+		param[0] = ic->hostap_wpa_state;
+		break;
+	case IEEE80211_PARAM_RX_BAR_SYNC:
+		param[0] = ic->ic_rx_bar_sync;
+		break;
+	case IEEE80211_PARAM_GET_REG_DOMAIN_IS_EU:
+		param[0] = ic->ic_dfs_is_eu_region();
+		break;
+	case IEEE80211_PARAM_GET_CHAN_AVAILABILITY_STATUS:
+		param[0] = 0;
+		ic->ic_dump_chan_availability_status(ic);
+		break;
+	case IEEE80211_PARAM_NEIGHBORHOOD_THRSHD:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			return -EINVAL;
+
+		param[0] = ieee80211_get_threshold_of_neighborhood_type(ic, (((uint32_t)param[0]) >> 16) & 0xFFFF);
+		break;
+	case IEEE80211_PARAM_NEIGHBORHOOD_TYPE:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP && vap->iv_opmode != IEEE80211_M_STA)
+			return -EINVAL;
+
+		param[0] = ieee80211_get_type_of_neighborhood(ic);
+		break;
+	case IEEE80211_PARAM_NEIGHBORHOOD_COUNT:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP && vap->iv_opmode != IEEE80211_M_STA)
+			return -EINVAL;
+
+		param[0] = ic->ic_neighbor_count;
+		break;
+	case IEEE80211_PARAM_STA_DFS_STRICT_MODE:
+		if ((vap->iv_opmode != IEEE80211_M_STA)	|| (!(ic->ic_dfs_is_eu_region()))) {
+			return -EINVAL;
+		}
+		param[0] = ic->sta_dfs_info.sta_dfs_strict_mode;
+		break;
+	case IEEE80211_PARAM_STA_DFS_STRICT_MEASUREMENT_IN_CAC:
+		if ((vap->iv_opmode != IEEE80211_M_STA)	|| (!(ic->ic_dfs_is_eu_region()))) {
+			return -EINVAL;
+		}
+		param[0] = ic->sta_dfs_info.sta_dfs_strict_msr_cac;
+		break;
+	case IEEE80211_PARAM_STA_DFS_STRICT_TX_CHAN_CLOSE_TIME:
+		if ((vap->iv_opmode != IEEE80211_M_STA)	|| (!(ic->ic_dfs_is_eu_region()))) {
+			return -EINVAL;
+		}
+		param[0] = ic->sta_dfs_info.sta_dfs_tx_chan_close_time;
+		break;
+	case IEEE80211_PARAM_RADAR_NONOCCUPY_PERIOD:
+		param[0] = (ic->ic_non_occupancy_period / HZ);
+		break;
+	case IEEE80211_PARAM_DFS_CSA_CNT:
+		param[0] = ic->ic_dfs_csa_cnt;
+		break;
+	case IEEE80211_PARAM_IS_WEATHER_CHANNEL:
+		{
+			struct ieee80211_channel *chan = ieee80211_find_channel_by_ieee(ic,
+						(((uint32_t)param[0]) >> 16) & 0xFFFF);
+			if (chan)
+				param[0] = ieee80211_is_on_weather_channel(ic, chan);
+			else
+				return -EINVAL;
+			break;
+		}
+	case IEEE80211_PARAM_VAP_DBG:
+		param[0] = vap->iv_debug;
+		break;
+	case IEEE80211_PARAM_MIN_CAC_PERIOD:
+		param[0] = MIN_CAC_PERIOD;
+		break;
+	case IEEE80211_PARAM_DEVICE_MODE:
+		if (ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_MBS)
+			param[0] = IEEE80211_DEV_MODE_MBS;
+		else if (ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_RBS)
+			param[0] = IEEE80211_DEV_MODE_RBS;
+		else if (ieee80211_is_repeater(ic))
+			param[0] = IEEE80211_DEV_MODE_REPEATER;
+		else
+			param[0] = IEEE80211_DEV_MODE_UNKNOWN;
+		break;
+	case IEEE80211_PARAM_SYNC_CONFIG:
+		param[0] = (vap->iv_flags_ext2 & IEEE80211_FEXT_SYNC_CONFIG) != 0;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_DBG_LEVEL:
+		param[0] = ic->ic_autochan_dbg_level;
+		break;
+#ifdef CONFIG_NAC_MONITOR
+	case IEEE80211_PARAM_NAC_MONITOR_MODE:
+		{
+			struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+			struct nac_mon_info *info = sp->nac_mon_info;
+			param[0] = (info->nac_monitor_on & 0xff) |
+				(((((int)info->nac_on_time * 100) / (int)info->nac_cycle_time) & 0xff) << 8) |
+				(((int)info->nac_cycle_time & 0xffff) << 16);
+
+		}
+		break;
+#endif
+	case IEEE80211_PARAM_MAX_DEVICE_BW:
+		param[0] = ic->ic_max_system_bw;
+		break;
+	case IEEE80211_PARAM_VOPT:
+		if (ic->ic_opmode == IEEE80211_M_HOSTAP)
+			param[0] = (ic->ic_vopt.state & 0x0f) << 4 | ic->ic_vopt.cur_state;
+		else
+			param[0] = 0;
+		break;
+	case IEEE80211_PARAM_BW_AUTO_SELECT:
+		param[0] = ic->ic_bw_auto_select;
+		break;
+        case IEEE80211_PARAM_DFS_CHANS_AVAILABLE_FOR_DFS_REENTRY:
+		param[0] = ic->ic_is_dfs_chans_available_for_dfs_reentry(ic, vap);
+		break;
+#ifdef CONFIG_QHOP
+	case IEEE80211_PARAM_RBS_MBS_ALLOW_TX_FRMS_IN_CAC:
+		param[0] = ic->rbs_mbs_dfs_info.rbs_mbs_allow_tx_frms_in_cac;
+		break;
+	case IEEE80211_PARAM_RBS_DFS_TX_CHAN_CLOSE_TIME:
+		param[0] = ic->rbs_mbs_dfs_info.rbs_dfs_tx_chan_close_time;
+		break;
+#endif
+	case IEEE80211_PARAM_AUTOCHAN_CCI_INSTNT:
+		param[0] = ic->ic_autochan_ranking_params.cci_instnt_factor;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_ACI_INSTNT:
+		param[0] = ic->ic_autochan_ranking_params.aci_instnt_factor;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_CCI_LONGTERM:
+		param[0] = ic->ic_autochan_ranking_params.cci_longterm_factor;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_ACI_LONGTERM:
+		param[0] = ic->ic_autochan_ranking_params.aci_longterm_factor;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_RANGE_COST:
+		param[0] = ic->ic_autochan_ranking_params.range_factor;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_DFS_COST:
+		param[0] = ic->ic_autochan_ranking_params.dfs_factor;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_MIN_CCI_RSSI:
+		param[0] = ic->ic_autochan_ranking_params.min_cochan_rssi;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_MAXBW_MINBENEFIT:
+		param[0] = ic->ic_autochan_ranking_params.maxbw_minbenefit;
+		break;
+	case IEEE80211_PARAM_AUTOCHAN_DENSE_CCI_SPAN:
+		param[0] = ic->ic_autochan_ranking_params.dense_cci_span;
+		break;
+	case IEEE80211_PARAM_WEATHERCHAN_CAC_ALLOWED:
+		param[0] = ic->ic_weachan_cac_allowed;
+		break;
+	case IEEE80211_PARAM_VAP_TX_AMSDU_11N:
+		param[0] = vap->iv_tx_amsdu_11n;
+		break;
+	case IEEE80211_PARAM_COC_MOVE_TO_NONDFS_CHANNEL:
+		param[0] = ic->ic_coc_move_to_ndfs;
+		break;
+	case IEEE80211_PARAM_80211K_NEIGH_REPORT:
+		param[0] = IEEE80211_COM_NEIGHREPORT_ENABLED(ic)? 1 : 0;
+		break;
+	case IEEE80211_PARAM_80211V_BTM:
+		param[0] = IEEE80211_COM_BTM_ENABLED(ic)? 1 : 0 ;
+		break;
+	case IEEE80211_PARAM_MOBILITY_DOMAIN:
+		param[0] = vap->iv_mdid;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getblockdata(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct iw_point *iwp = (struct iw_point *)w;
+	int subcmd = iwp->flags;
+
+	(void) ic;
+
+	switch (subcmd) {
+	case IEEE80211_PARAM_ASSOC_HISTORY:
+		{
+			struct ieee80211_assoc_history	*ah = &ic->ic_assoc_history;
+
+			iwp->length = sizeof(*ah);
+			memcpy(extra, ah, iwp->length);
+		}
+		break;
+	case IEEE80211_PARAM_CSW_RECORD:
+		{
+			struct ieee80211req_csw_record * record = &ic->ic_csw_record;
+
+			iwp->length = sizeof(struct ieee80211req_csw_record);
+			memcpy(extra, record, iwp->length);
+		}
+		break;
+	case IEEE80211_PARAM_PWR_SAVE:
+		{
+			iwp->length = sizeof(ic->ic_pm_state);
+			memcpy(extra, ic->ic_pm_state, sizeof(ic->ic_pm_state));
+		}
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+/* returns non-zero if ID is for a system IE (not for app use) */
+static int
+is_sys_ie(u_int8_t ie_id)
+{
+	/* XXX review this list */
+	switch (ie_id) {
+	case IEEE80211_ELEMID_SSID:
+	case IEEE80211_ELEMID_RATES:
+	case IEEE80211_ELEMID_FHPARMS:
+	case IEEE80211_ELEMID_DSPARMS:
+	case IEEE80211_ELEMID_CFPARMS:
+	case IEEE80211_ELEMID_TIM:
+	case IEEE80211_ELEMID_IBSSPARMS:
+	case IEEE80211_ELEMID_COUNTRY:
+	case IEEE80211_ELEMID_REQINFO:
+	case IEEE80211_ELEMID_CHALLENGE:
+	case IEEE80211_ELEMID_PWRCNSTR:
+	case IEEE80211_ELEMID_PWRCAP:
+	case IEEE80211_ELEMID_TPCREQ:
+	case IEEE80211_ELEMID_TPCREP:
+	case IEEE80211_ELEMID_SUPPCHAN:
+	case IEEE80211_ELEMID_CHANSWITCHANN:
+	case IEEE80211_ELEMID_MEASREQ:
+	case IEEE80211_ELEMID_MEASREP:
+	case IEEE80211_ELEMID_QUIET:
+	case IEEE80211_ELEMID_IBSSDFS:
+	case IEEE80211_ELEMID_ERP:
+	case IEEE80211_ELEMID_RSN:
+	case IEEE80211_ELEMID_XRATES:
+	case IEEE80211_ELEMID_TPC:
+	case IEEE80211_ELEMID_CCKM:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+/* returns non-zero if the buffer appears to contain a valid IE list */
+static int
+is_valid_ie_list(u_int32_t buf_len, void *buf, int exclude_sys_ies)
+{
+	struct ieee80211_ie *ie = (struct ieee80211_ie *)buf;
+
+	while (buf_len >= sizeof(*ie)) {
+		int ie_elem_len = sizeof(*ie) + ie->len;
+		if (buf_len < ie_elem_len)
+			break;
+		if (exclude_sys_ies && is_sys_ie(ie->id))
+			break;
+		buf_len -= ie_elem_len;
+		ie = (struct ieee80211_ie *)(ie->info + ie->len);
+	}
+
+	return (buf_len == 0) ? 1 : 0;
+}
+
+static int
+ieee80211_ioctl_setoptie(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *wri, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	void *ie;
+
+	/*
+	 * NB: Doing this for ap operation could be useful (e.g. for
+	 *     WPA and/or WME) except that it typically is worthless
+	 *     without being able to intervene when processing
+	 *     association response frames--so disallow it for now.
+	 */
+	if (vap->iv_opmode != IEEE80211_M_STA)
+		return -EINVAL;
+	if (! is_valid_ie_list(wri->length, extra, 0))
+		return -EINVAL;
+	/* NB: wri->length is validated by the wireless extensions code */
+	MALLOC(ie, void *, wri->length, M_DEVBUF, M_WAITOK);
+	if (ie == NULL)
+		return -ENOMEM;
+	memcpy(ie, extra, wri->length);
+	if (vap->iv_opt_ie != NULL)
+		FREE(vap->iv_opt_ie, M_DEVBUF);
+	vap->iv_opt_ie = ie;
+	vap->iv_opt_ie_len = wri->length;
+
+	ieee80211_parse_cipher_key(vap, vap->iv_opt_ie, vap->iv_opt_ie_len);
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getoptie(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *wri, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	if (vap->iv_opt_ie == NULL) {
+		wri->length = 0;
+		return 0;
+	}
+	wri->length = vap->iv_opt_ie_len;
+	memcpy(extra, vap->iv_opt_ie, vap->iv_opt_ie_len);
+	return 0;
+}
+
+/* the following functions are used by the set/get appiebuf functions */
+static int
+add_app_ie(unsigned int frame_type_index, struct ieee80211vap *vap,
+	struct ieee80211req_getset_appiebuf *iebuf)
+{
+	struct ieee80211_ie *ie;
+
+	if (! is_valid_ie_list(iebuf->app_buflen, iebuf->app_buf, 1))
+		return -EINVAL;
+	/* NB: data.length is validated by the wireless extensions code */
+	MALLOC(ie, struct ieee80211_ie *, iebuf->app_buflen, M_DEVBUF, M_WAITOK);
+	if (ie == NULL)
+		return -ENOMEM;
+
+	memcpy(ie, iebuf->app_buf, iebuf->app_buflen);
+	if (vap->app_ie[frame_type_index].ie != NULL)
+		FREE(vap->app_ie[frame_type_index].ie, M_DEVBUF);
+	vap->app_ie[frame_type_index].ie = ie;
+	vap->app_ie[frame_type_index].length = iebuf->app_buflen;
+
+	return 0;
+}
+
+static int
+remove_app_ie(unsigned int frame_type_index, struct ieee80211vap *vap)
+{
+	struct ieee80211_app_ie_t *app_ie = &vap->app_ie[frame_type_index];
+	if (app_ie->ie != NULL) {
+		FREE(app_ie->ie, M_DEVBUF);
+		app_ie->ie = NULL;
+		app_ie->length = 0;
+	}
+	return 0;
+}
+
+static int
+get_app_ie(unsigned int frame_type_index, struct ieee80211vap *vap,
+	struct ieee80211req_getset_appiebuf *iebuf)
+{
+	struct ieee80211_app_ie_t *app_ie = &vap->app_ie[frame_type_index];
+	if (iebuf->app_buflen < app_ie->length)
+		return -EINVAL;
+
+	iebuf->app_buflen = app_ie->length;
+	memcpy(iebuf->app_buf, app_ie->ie, app_ie->length);
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setappiebuf(struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211req_getset_appiebuf *iebuf =
+		(struct ieee80211req_getset_appiebuf *)extra;
+	struct ieee80211_ie *ie;
+	enum ieee80211_opmode chk_opmode;
+	int iebuf_len;
+	int rc = 0;
+
+	iebuf_len = data->length - sizeof(struct ieee80211req_getset_appiebuf);
+	if ( iebuf_len < 0 || iebuf_len != iebuf->app_buflen ||
+		 iebuf->app_buflen > IEEE80211_APPIE_MAX )
+		return -EINVAL;
+
+	switch (iebuf->app_frmtype) {
+	case IEEE80211_APPIE_FRAME_BEACON:
+	case IEEE80211_APPIE_FRAME_PROBE_RESP:
+	case IEEE80211_APPIE_FRAME_ASSOC_RESP:
+		chk_opmode = IEEE80211_M_HOSTAP;
+		break;
+	case IEEE80211_APPIE_FRAME_PROBE_REQ:
+	case IEEE80211_APPIE_FRAME_ASSOC_REQ:
+	case IEEE80211_APPIE_FRAME_TDLS_ACT:
+		chk_opmode = IEEE80211_M_STA;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (vap->iv_opmode != chk_opmode)
+		return -EINVAL;
+
+	if (iebuf->app_frmtype == IEEE80211_APPIE_FRAME_TDLS_ACT) {
+		rc = ieee80211_tdls_send_action_frame(dev,
+			(struct ieee80211_tdls_action_data *)iebuf->app_buf);
+		return rc;
+	}
+
+	if (iebuf->app_buflen) {
+
+		if ((iebuf->app_frmtype == IEEE80211_APPIE_FRAME_ASSOC_REQ ||
+				iebuf->app_frmtype == IEEE80211_APPIE_FRAME_ASSOC_RESP) &&
+				iebuf->flags == F_QTN_IEEE80211_PAIRING_IE) {
+
+			MALLOC(ie, struct ieee80211_ie *, iebuf->app_buflen, M_DEVBUF, M_WAITOK);
+
+			if (ie == NULL)
+				return -ENOMEM;
+
+			memcpy(ie, iebuf->app_buf, iebuf->app_buflen);
+
+			if (vap->qtn_pairing_ie.ie != NULL)
+				FREE(vap->qtn_pairing_ie.ie, M_DEVBUF);
+			vap->qtn_pairing_ie.ie = ie;
+			vap->qtn_pairing_ie.length = iebuf->app_buflen;
+
+			return 0;
+
+		}
+
+		rc = add_app_ie(iebuf->app_frmtype, vap, iebuf);
+	} else {
+		if ((iebuf->app_frmtype == IEEE80211_APPIE_FRAME_ASSOC_REQ ||
+				iebuf->app_frmtype == IEEE80211_APPIE_FRAME_ASSOC_RESP) &&
+				iebuf->flags == F_QTN_IEEE80211_PAIRING_IE) {
+			if (vap->qtn_pairing_ie.ie != NULL) {
+				FREE(vap->qtn_pairing_ie.ie, M_DEVBUF);
+				vap->qtn_pairing_ie.ie = NULL;
+				vap->qtn_pairing_ie.length = 0;
+			}
+
+			return 0;
+		}
+
+		rc = remove_app_ie(iebuf->app_frmtype, vap);
+	}
+	if ((iebuf->app_frmtype == IEEE80211_APPIE_FRAME_BEACON) && (rc == 0)) {
+		struct ieee80211com *ic = vap->iv_ic;
+
+		vap->iv_flags_ext |= IEEE80211_FEXT_APPIE_UPDATE;
+		if ((vap->iv_opmode == IEEE80211_M_HOSTAP) &&
+			(vap->iv_state == IEEE80211_S_RUN)) {
+
+			ic->ic_beacon_update(vap);
+		}
+	}
+
+	return rc;
+}
+
+static int
+ieee80211_ioctl_startcca(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *wri, char *extra)
+
+{
+	struct qtn_cca_args *ccaval = (struct qtn_cca_args *)extra;
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *chan = NULL;
+	uint64_t tsf = 0;
+
+	if (copy_from_user(ccaval, wri->pointer, sizeof(struct qtn_cca_args))) {
+		return -EINVAL;
+	}
+
+	chan = findchannel(ic, ccaval->cca_channel, ic->ic_des_mode);
+	if (chan == NULL) {
+		printk(KERN_ERR "Invalid channel %d ? \n", ccaval->cca_channel);
+		return -EINVAL;
+	}
+
+	ic->ic_get_tsf(&tsf);
+	tsf = tsf + IEEE80211_SEC_TO_USEC(1); /* after 1 second */
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ROAM,
+		"CCA channel change scheduled at tsf %016llX \n", tsf);
+
+	ic->ic_cca_start_tsf = tsf;
+	ic->ic_cca_duration_tu = IEEE80211_MS_TO_TU(ccaval->duration);
+	ic->ic_cca_chan = chan->ic_ieee;
+
+	ic->ic_flags |= IEEE80211_F_CCA;
+	ic->ic_cca_token++;
+
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			continue;
+
+		if ((vap->iv_state != IEEE80211_S_RUN) && (vap->iv_state != IEEE80211_S_SCAN))
+			continue;
+		ic->ic_beacon_update(vap);
+	}
+
+	ic->ic_set_start_cca_measurement(ic, chan, tsf, ccaval->duration);
+	wri->length = sizeof(struct qtn_cca_args);
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getappiebuf(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211req_getset_appiebuf *iebuf =
+		(struct ieee80211req_getset_appiebuf *)extra;
+	int max_iebuf_len;
+	int rc = 0;
+
+	max_iebuf_len = data->length - sizeof(struct ieee80211req_getset_appiebuf);
+	if (max_iebuf_len < 0)
+		return -EINVAL;
+	if (copy_from_user(iebuf, data->pointer, sizeof(struct ieee80211req_getset_appiebuf)))
+		return -EFAULT;
+	if (iebuf->app_buflen > max_iebuf_len)
+		iebuf->app_buflen = max_iebuf_len;
+
+	switch (iebuf->app_frmtype) {
+	case IEEE80211_APPIE_FRAME_BEACON:
+	case IEEE80211_APPIE_FRAME_PROBE_RESP:
+	case IEEE80211_APPIE_FRAME_ASSOC_RESP:
+		if (vap->iv_opmode == IEEE80211_M_STA)
+			return -EINVAL;
+		break;
+	case IEEE80211_APPIE_FRAME_PROBE_REQ:
+	case IEEE80211_APPIE_FRAME_ASSOC_REQ:
+		if (vap->iv_opmode != IEEE80211_M_STA)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rc = get_app_ie(iebuf->app_frmtype, vap, iebuf);
+
+	data->length = sizeof(struct ieee80211req_getset_appiebuf) + iebuf->app_buflen;
+
+	return rc;
+}
+
+
+static void
+wpa_hexdump_key(struct ieee80211vap *vap, int level,
+	const char *title, const u8 *buf, size_t len)
+{
+#ifdef IEEE80211_DEBUG
+	int show = level;
+	size_t i;
+
+	if (!ieee80211_msg_debug(vap)) {
+		return;
+	}
+
+	printk("\n%s - hexdump(len=%lu):", title, (unsigned long) len);
+	if (buf == NULL) {
+		printk(" [NULL]");
+	} else if (show) {
+        	for (i = 0; i < len; i++)
+			printk("%s%02x", i%4==0? "\n":" ", buf[i]);
+	} else {
+        	printk(" [REMOVED]");
+	}
+	printk("\n");
+#endif
+}
+
+static int
+ieee80211_ioctl_setfilter(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211req_set_filter *app_filter = (struct ieee80211req_set_filter *)extra;
+
+	if ((extra == NULL) || (app_filter->app_filterype & ~IEEE80211_FILTER_TYPE_ALL))
+		return -EINVAL;
+
+	vap->app_filter = app_filter->app_filterype;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setkey(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211req_key *ik = (struct ieee80211req_key *)extra;
+	struct ieee80211_node *ni;
+	struct ieee80211_key *wk;
+	u_int8_t kid = ik->ik_keyix;
+	int i;
+	int error;
+
+	if ((ik->ik_keylen > sizeof(ik->ik_keydata)) ||
+			(ik->ik_keylen > sizeof(wk->wk_key))) {
+		return -E2BIG;
+	}
+
+	if(ik->ik_type == IEEE80211_CIPHER_AES_CMAC) {
+		// 802.11w CMAC / IGTK ignore for now.
+		return 0;
+	}
+
+	if (kid == IEEE80211_KEYIX_NONE) {
+		/* Unicast key */
+		kid = 0;
+	} else if (kid >= IEEE80211_WEP_NKID) {
+		return -EINVAL;
+	}
+
+	/* Group keys */
+	if (((ik->ik_flags & IEEE80211_KEY_XMIT) == 0) ||
+			(ik->ik_flags & IEEE80211_KEY_GROUP) ||
+			IEEE80211_IS_MULTICAST(ik->ik_macaddr)) {
+
+		ik->ik_flags |= IEEE80211_KEY_GROUP;
+		if (vap->iv_opmode == IEEE80211_M_STA) {
+			memset(ik->ik_macaddr, 0xff, IEEE80211_ADDR_LEN);
+		} else if(ik->ik_flags & IEEE80211_KEY_VLANGROUP) {
+			qtn_vlan_gen_group_addr(ik->ik_macaddr, ik->ik_vlan, vap->iv_dev->dev_id);
+		} else {
+			if (NULL == vap->iv_bss)
+				return -EINVAL;
+			IEEE80211_ADDR_COPY(ik->ik_macaddr, vap->iv_bss->ni_macaddr);
+		}
+	}
+
+	/* wk must be set to ni->ni_ucastkey for sw crypto */
+	wk = &vap->iv_nw_keys[kid];
+	wk->wk_ciphertype = ik->ik_type;
+	wk->wk_keylen = ik->ik_keylen;
+	wk->wk_flags = ik->ik_flags;
+	wk->wk_keyix = kid;
+	for (i = 0; i < WME_NUM_TID; i++) {
+		wk->wk_keyrsc[i] = ik->ik_keyrsc;
+	}
+	wk->wk_keytsc = 0;
+	memset(wk->wk_key, 0, sizeof(wk->wk_key));
+	memcpy(wk->wk_key, ik->ik_keydata, ik->ik_keylen);
+
+	if (!(ik->ik_flags & IEEE80211_KEY_GROUP)) {
+		ni = ieee80211_find_node(&vap->iv_ic->ic_sta, ik->ik_macaddr);
+		if (ni) {
+			memcpy(&ni->ni_ucastkey, wk, sizeof(ni->ni_ucastkey));
+			ieee80211_free_node(ni);
+		}
+		if (vap->iv_opmode == IEEE80211_M_WDS)
+			memcpy(&vap->iv_wds_peer_key, wk, sizeof(vap->iv_wds_peer_key));
+	}
+
+	wpa_hexdump_key(vap, 0,
+			(ik->ik_flags & IEEE80211_KEY_GROUP) ? "GTK" : "PTK",
+			ik->ik_keydata, ik->ik_keylen);
+
+	ieee80211_key_update_begin(vap);
+	error = vap->iv_key_set(vap, wk, ik->ik_macaddr);
+	ieee80211_key_update_end(vap);
+
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+	if ((vap->bsa_status == BSA_STATUS_ACTIVE) && !(ik->ik_flags & IEEE80211_KEY_GROUP)) {
+		ni = ieee80211_find_node(&vap->iv_ic->ic_sta, ik->ik_macaddr);
+		if (ni && (ni->ni_associd != 0)) {
+			ieee80211_bsa_connect_complete_event_send(vap, ni);
+			ieee80211_free_node(ni);
+		}
+	}
+#endif
+
+	return error;
+}
+
+static int
+ieee80211_ioctl_getkey(struct net_device *dev, struct iwreq *iwr)
+{
+#ifndef IEEE80211_UNUSED_CRYPTO_COMMANDS
+	/*
+	 * This code is put under conditional section as this
+	 * may introduce issues with IOT devices and need
+	 * more testing with IOT devices.
+	 */
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211req_key ik;
+	int keyrsc;
+	struct ieee80211_key *wk;
+
+	if (iwr->u.data.length != sizeof(ik))
+		return -EINVAL;
+
+	if (copy_from_user(&ik, iwr->u.data.pointer, sizeof(ik)) != 0)
+		return -EFAULT;
+
+	ieee80211_param_from_qdrv(vap, IEEE80211_IOCTL_GETKEY, &keyrsc, NULL, 0);
+
+	ik.ik_keyrsc = (uint64_t)keyrsc;
+	ik.ik_keytsc = (uint64_t)keyrsc;
+
+	if (vap->iv_opmode == IEEE80211_M_WDS) {
+		if (ik.ik_keyix >= IEEE80211_WEP_NKID)
+			return -EINVAL;
+
+		wk = &vap->iv_nw_keys[ik.ik_keyix];
+		ik.ik_keylen = wk->wk_keylen;
+
+		memcpy(ik.ik_keydata, wk->wk_key, wk->wk_keylen);
+	}
+
+	return (copy_to_user(iwr->u.data.pointer, &ik, sizeof(ik)));
+#else
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+	struct ieee80211req_key ik;
+	struct ieee80211_key *wk;
+	const struct ieee80211_cipher *cip;
+	u_int8_t kid;
+
+	if (iwr->u.data.length != sizeof(ik))
+		return -EINVAL;
+	if (copy_from_user(&ik, iwr->u.data.pointer, sizeof(ik)))
+		return -EFAULT;
+	kid = ik.ik_keyix;
+	if (kid == IEEE80211_KEYIX_NONE) {
+		ni = ieee80211_find_node(&ic->ic_sta, ik.ik_macaddr);
+		if (ni == NULL)
+			return -EINVAL;
+		wk = &ni->ni_ucastkey;
+	} else {
+		if (kid >= IEEE80211_WEP_NKID)
+			return -EINVAL;
+		wk = &vap->iv_nw_keys[kid];
+		if (NULL == vap->iv_bss)
+			return -EINVAL;
+		IEEE80211_ADDR_COPY(&ik.ik_macaddr, vap->iv_bss->ni_macaddr);
+		ni = NULL;
+	}
+	cip = wk->wk_cipher;
+	ik.ik_type = cip->ic_cipher;
+	ik.ik_keylen = wk->wk_keylen;
+	ik.ik_flags = wk->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV);
+	if (wk->wk_keyix == vap->iv_def_txkey)
+		ik.ik_flags |= IEEE80211_KEY_DEFAULT;
+	if (capable(CAP_NET_ADMIN)) {
+		/* NB: only root can read key data */
+		ik.ik_keyrsc = wk->wk_keyrsc[0];
+		ik.ik_keytsc = wk->wk_keytsc;
+		memcpy(ik.ik_keydata, wk->wk_key, wk->wk_keylen);
+		if (cip->ic_cipher == IEEE80211_CIPHER_TKIP) {
+			memcpy(ik.ik_keydata+wk->wk_keylen,
+				wk->wk_key + IEEE80211_KEYBUF_SIZE,
+				IEEE80211_MICBUF_SIZE);
+			ik.ik_keylen += IEEE80211_MICBUF_SIZE;
+		}
+	} else {
+		ik.ik_keyrsc = 0;
+		ik.ik_keytsc = 0;
+		memset(ik.ik_keydata, 0, sizeof(ik.ik_keydata));
+	}
+	if (ni != NULL)
+		ieee80211_free_node(ni);
+	return (copy_to_user(iwr->u.data.pointer, &ik, sizeof(ik)) ? -EFAULT : 0);
+#endif /* IEEE80211_UNUSED_CRYPTO_COMMANDS */
+}
+
+static int
+ieee80211_ioctl_delkey(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211req_del_key *dk = (struct ieee80211req_del_key *)extra;
+	struct ieee80211_node *ni;
+	struct ieee80211_key *wk;
+	uint8_t kid = dk->idk_keyix;
+	uint32_t is_group_key = 0;
+	int error;
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_WPA | IEEE80211_MSG_DEBUG,
+		"[%s] deleting key wepkid=%d bcast=%u\n",
+		ether_sprintf(dk->idk_macaddr),
+		dk->idk_keyix,
+		IEEE80211_IS_MULTICAST(dk->idk_macaddr));
+
+	if (kid == IEEE80211_KEYIX_NONE) {
+		/* Unicast key */
+		kid = 0;
+	} else if (kid >= IEEE80211_WEP_NKID) {
+		return -EINVAL;
+	}
+
+	/*
+	 * hostapd sends a delete request for each of the four WEP global keys
+	 * during initialisation.  WEP is not supported and each vap node entry has its
+	 * own global key, so the same key will be deleted four times.
+	 */
+	if (IEEE80211_IS_MULTICAST(dk->idk_macaddr)) {
+		is_group_key = 1;
+		if (NULL == vap->iv_bss)
+			return -EINVAL;
+		IEEE80211_ADDR_COPY(dk->idk_macaddr, vap->iv_bss->ni_macaddr);
+	}
+
+	/* wk must be set to ni->ni_ucastkey for sw crypto */
+	wk = &vap->iv_nw_keys[kid];
+	wk->wk_ciphertype = 0;
+	wk->wk_keytsc = 0;
+	wk->wk_keylen = sizeof(wk->wk_key);
+	memset(wk->wk_key, 0, sizeof(wk->wk_key));
+
+	if (!is_group_key) {
+		ni = ieee80211_find_node(&vap->iv_ic->ic_sta, dk->idk_macaddr);
+		if (ni) {
+			memcpy(&ni->ni_ucastkey, wk, sizeof(ni->ni_ucastkey));
+			ieee80211_free_node(ni);
+		}
+		if (vap->iv_opmode == IEEE80211_M_WDS) {
+			vap->iv_wds_peer_key.wk_ciphertype = 0;
+			vap->iv_wds_peer_key.wk_keytsc = 0;
+			vap->iv_wds_peer_key.wk_keylen = 0;
+			ieee80211_crypto_resetkey(vap, &vap->iv_wds_peer_key,
+				IEEE80211_KEYIX_NONE);
+		}
+	}
+
+	ieee80211_key_update_begin(vap);
+	error = vap->iv_key_delete(vap, wk, dk->idk_macaddr);
+	ieee80211_key_update_end(vap);
+
+	return error;
+}
+
+struct scanlookup {		/* XXX: right place for declaration? */
+	const u_int8_t *mac;
+	int esslen;
+	const char *essid;
+	const struct ieee80211_scan_entry *se;
+};
+
+/*
+ * Match mac address and any ssid.
+ */
+static int
+mlmelookup(void *arg, const struct ieee80211_scan_entry *se)
+{
+	struct scanlookup *look = arg;
+
+	if (!IEEE80211_ADDR_EQ(look->mac, se->se_macaddr))
+		return 0;
+	if (look->esslen != 0) {
+		if (se->se_ssid[1] != look->esslen)
+			return 0;
+		if (memcmp(look->essid, se->se_ssid + 2, look->esslen))
+			return 0;
+	}
+	look->se = se;
+
+	return 0;
+}
+
+/*
+ * Set operational Bridge Mode for:
+ * - an AP when the config is changed
+ * - a station when the config is changed or when associating with a new AP
+ * In Bridge Mode, eligible frames (non-1X, unicast data frames) are
+ * transmitted using the 4-address header format.
+ */
+u_int8_t ieee80211_bridgemode_set(struct ieee80211vap *vap, u_int8_t config_change)
+{
+	u_int8_t op_bridgemode;
+
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		op_bridgemode = !(vap->iv_qtn_flags & IEEE80211_QTN_BRIDGEMODE_DISABLED);
+	} else {
+		op_bridgemode = !(vap->iv_qtn_flags & IEEE80211_QTN_BRIDGEMODE_DISABLED) &&
+						(vap->iv_qtn_ap_cap & IEEE80211_QTN_BRIDGEMODE);
+	}
+
+	/* Has bridge mode changed? */
+	if ((op_bridgemode && !(vap->iv_flags_ext & IEEE80211_FEXT_WDS)) ||
+		(!op_bridgemode && (vap->iv_flags_ext & IEEE80211_FEXT_WDS))) {
+
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
+			"%s: %s Bridge Mode\n", __func__, op_bridgemode ? "Enabling" : "Disabling");
+
+		if (op_bridgemode) {
+			vap->iv_flags_ext |= IEEE80211_FEXT_WDS;
+		} else {
+			vap->iv_flags_ext &= ~IEEE80211_FEXT_WDS;
+		}
+
+		/* Notify the MuC */
+		ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_WDS, op_bridgemode, NULL, 0);
+
+		/*
+		 * If the change was caused by a configuration change, force
+		 * reassociation to ensure that everyone is in sync.
+		 */
+		if (config_change) {
+			ieee80211_wireless_reassoc(vap, 0, 0);
+		}
+		return 1;
+	}
+
+	return 0;
+}
+
+void ieee80211_sta_fast_rejoin(unsigned long arg)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *) arg;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct scanlookup lookup;
+
+	if (vap->iv_state >= IEEE80211_S_AUTH) {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH, "state(%d) not expected\n",
+				vap->iv_state);
+		return;
+	}
+
+	lookup.se = NULL;
+	lookup.mac = vap->iv_sta_fast_rejoin_bssid;
+	if (vap->iv_des_nssid != 0) {
+		lookup.esslen = vap->iv_des_ssid[0].len;
+		lookup.essid = (char *)vap->iv_des_ssid[0].ssid;
+	} else {
+		lookup.esslen = 0;
+		lookup.essid = (char *)"";
+	}
+	ieee80211_scan_iterate(ic, mlmelookup, &lookup);
+	if (lookup.se != NULL) {
+		vap->iv_nsdone = 0;
+		vap->iv_nsparams.result = 0;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH, "fast rejoin bssid "MACSTR"\n",
+				MAC2STR(vap->iv_sta_fast_rejoin_bssid));
+		if (!ieee80211_sta_join(vap, lookup.se)) {
+			IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH, "fast rejoin bssid "MACSTR" failed\n",
+					MAC2STR(vap->iv_sta_fast_rejoin_bssid));
+		}
+	} else {
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH, "fast rejoin bssid "MACSTR" not found\n",
+				MAC2STR(vap->iv_sta_fast_rejoin_bssid));
+	}
+}
+
+void ieee80211_ba_setup_detect_rssi(unsigned long arg)
+{
+	struct ieee80211com *ic = (struct ieee80211com *) arg;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211_node *ni;
+	int32_t rssi;
+
+	TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		if (ni && (ni->ni_qtn_flags & QTN_IS_INTEL_NODE)
+				&& !IEEE80211_NODE_IS_VHT(ni)) {
+			rssi = ni->ni_shared_stats->rx[STATS_SU].last_rssi_dbm[NUM_ANT];
+			if (ni->rssi_avg_dbm) {
+				ni->rssi_avg_dbm = (ni->rssi_avg_dbm *
+							(QTN_RSSI_SAMPLE_TH - 1) + rssi)
+									/ QTN_RSSI_SAMPLE_TH;
+			} else {
+				ni->rssi_avg_dbm = rssi;
+			}
+		}
+	}
+
+	mod_timer(&ic->ic_ba_setup_detect, jiffies + HZ * QTN_AMPDU_DETECT_PERIOD);
+}
+
+static int ieee80211_ba_setup_detect_set(struct ieee80211vap *vap, int enable)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (enable) {
+		init_timer(&ic->ic_ba_setup_detect);
+		ic->ic_ba_setup_detect.function = ieee80211_ba_setup_detect_rssi;
+		ic->ic_ba_setup_detect.data = (unsigned long) ic;
+		ic->ic_ba_setup_detect.expires = jiffies;
+		add_timer(&ic->ic_ba_setup_detect);
+	} else {
+		del_timer(&ic->ic_ba_setup_detect);
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setmlme(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211req_mlme *mlme = (struct ieee80211req_mlme *)extra;
+	struct ieee80211_node *ni;
+
+	if (!IS_UP(dev))
+		return -EINVAL;
+
+	if (ic->ic_flags_qtn & IEEE80211_QTN_MONITOR)
+		return 0;
+
+	switch (mlme->im_op) {
+	case IEEE80211_MLME_ASSOC:
+		if (vap->iv_opmode == IEEE80211_M_STA) {
+			struct scanlookup lookup;
+
+			lookup.se = NULL;
+			lookup.mac = mlme->im_macaddr;
+			/* XXX use revised api w/ explicit ssid */
+			if (vap->iv_des_nssid != 0) {
+				lookup.esslen = vap->iv_des_ssid[0].len;
+				lookup.essid = (char *)vap->iv_des_ssid[0].ssid;
+			} else {
+				lookup.esslen = 0;
+				lookup.essid = (char *)"";
+			}
+			ieee80211_scan_iterate(ic, mlmelookup, &lookup);
+			if (lookup.se != NULL) {
+				vap->iv_nsdone = 0;
+				vap->iv_nsparams.result = 0;
+				if (ieee80211_sta_join(vap, lookup.se))
+					while (!vap->iv_nsdone)
+						IEEE80211_RESCHEDULE();
+				if (vap->iv_nsparams.result == 0)
+					return 0;
+			}
+		}
+		return -EINVAL;
+	case IEEE80211_MLME_DEBUG_CLEAR:
+	case IEEE80211_MLME_DISASSOC:
+	case IEEE80211_MLME_DEAUTH:
+		switch (vap->iv_opmode) {
+		case IEEE80211_M_STA:
+			/* XXX not quite right */
+			ieee80211_new_state(vap, IEEE80211_S_INIT,
+				mlme->im_reason);
+			break;
+		case IEEE80211_M_HOSTAP:
+			/* NB: the broadcast address means do 'em all */
+			IEEE80211_NODE_LOCK_BH(&ic->ic_sta);
+			if (!IEEE80211_ADDR_EQ(mlme->im_macaddr, vap->iv_dev->broadcast)) {
+				ni = ieee80211_find_node(&ic->ic_sta,
+					mlme->im_macaddr);
+				if (ni == NULL) {
+					IEEE80211_NODE_UNLOCK_BH(&ic->ic_sta);
+					return -EINVAL;
+				}
+				if (dev == ni->ni_vap->iv_dev) {
+					ieee80211_domlme(mlme, ni);
+				}
+				ieee80211_free_node(ni);
+			} else {
+				ieee80211_iterate_dev_nodes(dev, &ic->ic_sta, ieee80211_domlme, mlme, 0);
+			}
+			IEEE80211_NODE_UNLOCK_BH(&ic->ic_sta);
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case IEEE80211_MLME_AUTHORIZE:
+	case IEEE80211_MLME_UNAUTHORIZE:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			return -EINVAL;
+		ni = ieee80211_find_node(&ic->ic_sta, mlme->im_macaddr);
+		if (ni == NULL)
+			return -ENOENT;
+		if (mlme->im_op == IEEE80211_MLME_AUTHORIZE)
+			ieee80211_node_authorize(ni);
+		else
+			ieee80211_node_unauthorize(ni);
+		ieee80211_free_node(ni);
+		break;
+	case IEEE80211_MLME_CLEAR_STATS:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			return -EINVAL;
+		ni = ieee80211_find_node(&ic->ic_sta, mlme->im_macaddr);
+		if (ni == NULL)
+			return -ENOENT;
+
+		/* clear statistics */
+		memset(&ni->ni_stats, 0, sizeof(struct ieee80211_nodestats));
+		ieee80211_free_node(ni);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* 'iwpriv wifi0 doth_radar X' simulates a radar detection on current channel
+ * triggers a channel switch to a random if X is 0 or to the IEEE channel X */
+static int
+ieee80211_ioctl_radar(struct net_device *dev, struct iw_request_info *info,
+        void *w, char *extra)
+{
+	int *params = (int *) extra;
+	struct ieee80211vap *vap = netdev_priv(dev);
+        struct ieee80211com *ic = vap->iv_ic;
+	u_int8_t new_ieee = params[0];
+
+	if (ic->ic_curchan->ic_flags & IEEE80211_CHAN_DFS) {
+		ic->ic_radar_detected(ic, new_ieee);
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
+/* 'iwpriv wifi0 dfsactscan 1' will have STA do active scan on DFS channels,
+ * and 'iwpriv wifi0 dfsactscan 0' revert it to the default
+ * behavior (passive scan on DFS channels) */
+static int
+ieee80211_ioctl_dfsactscan(struct net_device *dev, struct iw_request_info *info,
+        void *w, char *extra)
+{
+        int* params = (int *) extra;
+        struct ieee80211vap *vap = netdev_priv(dev);
+        struct ieee80211com *ic = vap->iv_ic;
+        u_int8_t dfsactscan = params[0];
+	int i;
+
+	if (ic->ic_opmode != IEEE80211_M_STA)
+		printk("%s: this command can be used only for STA\n", __FUNCTION__);
+
+/* Note: this logic should be same with qdrv_radar_is_dfs_required(), but wlan-to-qdrv
+ * dependency is considered not desirable, so the logic is duplicated here
+ */
+#define IS_DFS_CHAN(chan) ((5250 <= (chan)->ic_freq) && ((chan)->ic_freq <= 5725))
+
+	for (i = 0; i < ic->ic_nchans; i++) {
+		if (IS_DFS_CHAN(&ic->ic_channels[i])) {
+			if (dfsactscan) {
+				ic->ic_channels[i].ic_flags &= ~IEEE80211_CHAN_PASSIVE;
+			} else {
+				ic->ic_channels[i].ic_flags |= IEEE80211_CHAN_PASSIVE;
+			}
+		}
+	}
+
+	if (dfsactscan)
+		printk("STA is now configured to do an active scan on DFS channels\n");
+	else
+		printk("STA is now configured to do a passive scan on DFS channels (default)\n");
+
+        return 0;
+
+#undef IS_DFS_CHAN
+}
+
+static int
+ieee80211_ioctl_wdsmac(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct sockaddr *sa = (struct sockaddr *)extra;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211vap *vap_tmp;
+	struct ieee80211vap *vap_found = NULL;
+
+	if (vap->iv_opmode != IEEE80211_M_WDS)
+		return -EOPNOTSUPP;
+
+	if (!IEEE80211_ADDR_NULL(vap->wds_mac)) {
+		printk("%s: Failed to add WDS MAC: %s\n", dev->name,
+			ether_sprintf((u_int8_t *)sa->sa_data));
+		printk("%s: Device already has WDS mac address attached,"
+			" remove first\n", dev->name);
+		return -1;
+	}
+
+	TAILQ_FOREACH(vap_tmp, &ic->ic_vaps, iv_next) {
+		if (IEEE80211_ADDR_EQ(vap_tmp->iv_myaddr, sa->sa_data)) {
+			vap_found = vap_tmp;
+			break;
+		}
+
+		if ((vap_tmp->iv_opmode == IEEE80211_M_WDS) &&
+				(IEEE80211_ADDR_EQ(vap_tmp->wds_mac, sa->sa_data)) ) {
+			vap_found = vap_tmp;
+			break;
+		}
+	}
+
+	if (vap_found) {
+		printk("%s: The mac address(%s) has been used by device(%s)\n",
+				dev->name,
+				ether_sprintf((u_int8_t *)sa->sa_data),
+				vap_found->iv_dev->name);
+		return -EINVAL;
+	}
+
+	memcpy(vap->wds_mac, sa->sa_data, IEEE80211_ADDR_LEN);
+
+	printk("%s: Added WDS MAC: %s\n", dev->name,
+		ether_sprintf(vap->wds_mac));
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_wdsdelmac(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct sockaddr *sa = (struct sockaddr *)extra;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (IEEE80211_ADDR_NULL(vap->wds_mac))
+		return 0;
+
+	if (vap->iv_opmode != IEEE80211_M_WDS)
+		return -EOPNOTSUPP;
+
+	/*
+	 * Compare supplied MAC address with WDS MAC of this interface
+	 * remove when mac address is known
+	 */
+	if (IEEE80211_ADDR_EQ(vap->wds_mac, sa->sa_data)) {
+		ieee80211_extender_remove_peer_wds_info(ic, vap->wds_mac);
+
+		IEEE80211_ADDR_SET_NULL(vap->wds_mac);
+		return 0;
+	}
+
+	printk("%s: WDS MAC address %s is not known by this interface\n",
+	       dev->name, ether_sprintf((u_int8_t *)sa->sa_data));
+
+	return -1;
+}
+
+/*
+ * kick associated station with the given MAC address.
+ */
+static int
+ieee80211_ioctl_kickmac(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct sockaddr *sa = (struct sockaddr *)extra;
+	struct ieee80211req_mlme mlme;
+
+	if (sa->sa_family != ARPHRD_ETHER)
+		return -EINVAL;
+
+	/* Setup a MLME request for disassociation of the given MAC */
+	mlme.im_op = IEEE80211_MLME_DISASSOC;
+	mlme.im_reason = IEEE80211_REASON_UNSPECIFIED;
+	IEEE80211_ADDR_COPY(&(mlme.im_macaddr), sa->sa_data);
+
+	/* Send the MLME request and return the result. */
+	return ieee80211_ioctl_setmlme(dev, info, w, (char *)&mlme);
+}
+
+/* Currently this function is used to associate with an AP with given bssid */
+static int
+ieee80211_ioctl_addmac(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct sockaddr *sa = (struct sockaddr *)extra;
+
+#ifdef DEMO_CONTROL
+	struct ieee80211com *ic = vap->iv_ic;
+	memcpy(vap->iv_des_bssid, sa->sa_data, IEEE80211_ADDR_LEN);
+
+
+	ieee80211_param_to_qdrv(vap, IEEE80211_PARAM_BSSID,
+		0, vap->iv_des_bssid, IEEE80211_ADDR_LEN);
+
+	if (IS_UP(vap->iv_dev))
+		return ic->ic_reset(ic);
+#else
+	const struct ieee80211_aclator *acl = vap->iv_acl;
+	if (acl == NULL) {
+		acl = ieee80211_aclator_get("mac");
+		if (acl == NULL || !acl->iac_attach(vap))
+			return -EINVAL;
+		vap->iv_acl = acl;
+	}
+	acl->iac_add(vap, sa->sa_data);
+#endif
+	return 0;
+}
+
+static int
+ieee80211_ioctl_delmac(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct sockaddr *sa = (struct sockaddr *)extra;
+	const struct ieee80211_aclator *acl = vap->iv_acl;
+
+	if (acl == NULL) {
+		acl = ieee80211_aclator_get("mac");
+		if (acl == NULL || !acl->iac_attach(vap))
+			return -EINVAL;
+		vap->iv_acl = acl;
+	}
+	acl->iac_remove(vap, (u_int8_t *)sa->sa_data);
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setchanlist(struct net_device *dev,
+	struct iw_request_info *info, void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211req_chanlist *list =
+		(struct ieee80211req_chanlist *)extra;
+	u_char chanlist[IEEE80211_CHAN_BYTES];
+	int i, j, k = 0, nchan;
+	struct ieee80211_channel *ch;
+	int bw = ieee80211_get_bw(ic);
+
+	memset(chanlist, 0, sizeof(chanlist));
+	/*
+	 * Since channel 0 is not available for DS, channel 1
+	 * is assigned to LSB on WaveLAN.
+	 */
+	if ((ic->ic_phytype == IEEE80211_T_DS) || (ic->ic_phytype == IEEE80211_T_OFDM))
+		i = 1;
+	else
+		i = 0;
+	nchan = 0;
+	for (j = 0; i <= IEEE80211_CHAN_MAX; i++, j++) {
+		/*
+		 * NB: silently discard unavailable channels so users
+		 *     can specify 1-255 to get all available channels.
+		 */
+		if (isset(list->ic_channels, j) && isset(ic->ic_chan_avail, i)) {
+			if (ic->ic_dfs_channels_deactive && isset(ic->ic_chan_dfs_required, i)) {
+				continue;
+			}
+			if (ieee80211_is_channel_disabled(ic, i, bw)) {
+				continue;
+			}
+			setbit(chanlist, i);
+			nchan++;
+		}
+	}
+
+	if (nchan == 0)			/* no valid channels, disallow */
+		return -EINVAL;
+	if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&	/* XXX */
+	    isclr(chanlist, ic->ic_bsschan->ic_ieee)) {
+		ic->ic_bsschan = IEEE80211_CHAN_ANYC;	/* invalidate */
+		k = 1;
+	}
+
+	memset(ic->ic_chan_active_80, 0, sizeof(ic->ic_chan_active_80));
+	memset(ic->ic_chan_active_40, 0, sizeof(ic->ic_chan_active_40));
+	memset(ic->ic_chan_active_20, 0, sizeof(ic->ic_chan_active_20));
+
+	for (i = 0; i < ic->ic_nchans; i++) {
+		ch = &ic->ic_channels[i];
+		if (isset(chanlist, ch->ic_ieee)) {
+			if (IEEE80211_IS_CHAN_HT40(ch))
+				setbit(ic->ic_chan_active_40, ch->ic_ieee);
+
+			if (IEEE80211_IS_CHAN_VHT80(ch))
+				setbit(ic->ic_chan_active_80, ch->ic_ieee);
+
+			setbit(ic->ic_chan_active_20, ch->ic_ieee);
+		}
+	}
+
+	ieee80211_update_active_chanlist(ic, bw);
+	if (IS_UP_AUTO(vap)) {
+		if (ic->ic_des_chan != IEEE80211_CHAN_ANYC &&
+				isclr(ic->ic_chan_active, ic->ic_des_chan->ic_ieee)) {
+			ic->ic_des_chan = IEEE80211_CHAN_ANYC;
+		}
+		if (ic->ic_des_chan != IEEE80211_CHAN_ANYC) {
+			if (((bw >= BW_HT80) && !(ic->ic_des_chan->ic_flags & IEEE80211_CHAN_VHT80)) ||
+					((bw >= BW_HT40) && !(ic->ic_des_chan->ic_flags & IEEE80211_CHAN_HT40))) {
+				ic->ic_des_chan = IEEE80211_CHAN_ANYC;
+			}
+		}
+		ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+	/* send disassoc to ap when BSS channel is invalid. */
+	} else if (k && vap->iv_state == IEEE80211_S_RUN &&
+			vap->iv_opmode != IEEE80211_M_MONITOR) {
+		ieee80211_new_state(vap, IEEE80211_S_INIT, 0);
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getchanlist(struct net_device *dev,
+	struct iw_request_info *info, void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	union iwreq_data *iwr = (union iwreq_data *)w;
+	u_int8_t chanlist[IEEE80211_CHAN_BYTES];
+	int i;
+
+	memcpy(chanlist, ic->ic_chan_active, sizeof(ic->ic_chan_active));
+
+	for (i = 0; i < ic->ic_nchans; i++) {
+		struct ieee80211_channel *c = &ic->ic_channels[i];
+
+		if (isset(chanlist, c->ic_ieee) &&
+			(vap->iv_opmode != IEEE80211_M_STA) &&
+			(ieee80211_check_mode_consistency(ic, ic->ic_des_mode, c))) {
+			clrbit(chanlist, c->ic_ieee);
+		}
+	}
+
+	memcpy(extra, chanlist, sizeof(chanlist));
+	iwr->data.length = sizeof(chanlist);
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getchaninfo(struct net_device *dev,
+	struct iw_request_info *info, void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	union iwreq_data *iwr = (union iwreq_data *)w;
+	struct ieee80211req_chaninfo *chans =
+		(struct ieee80211req_chaninfo *) extra;
+	u_int8_t reported[IEEE80211_CHAN_BYTES];	/* XXX stack usage? */
+	int i;
+
+	memset(chans, 0, sizeof(*chans));
+	memset(reported, 0, sizeof(reported));
+	for (i = 0; i < ic->ic_nchans; i++) {
+		const struct ieee80211_channel *c = &ic->ic_channels[i];
+		const struct ieee80211_channel *c1 = c;
+
+		if (isclr(reported, c->ic_ieee)) {
+			setbit(reported, c->ic_ieee);
+
+			/* pick turbo channel over non-turbo channel, and
+			 * 11g channel over 11b channel */
+			if (IEEE80211_IS_CHAN_A(c))
+				c1 = findchannel(ic, c->ic_ieee, IEEE80211_MODE_TURBO_A);
+			if (IEEE80211_IS_CHAN_ANYG(c))
+				c1 = findchannel(ic, c->ic_ieee, IEEE80211_MODE_TURBO_G);
+			else if (IEEE80211_IS_CHAN_B(c)) {
+				c1 = findchannel(ic, c->ic_ieee, IEEE80211_MODE_TURBO_G);
+				if (!c1)
+					c1 = findchannel(ic, c->ic_ieee, IEEE80211_MODE_11G);
+			}
+
+			if (c1)
+				c = c1;
+			chans->ic_chans[chans->ic_nchans].ic_ieee = c->ic_ieee;
+			chans->ic_chans[chans->ic_nchans].ic_freq = c->ic_freq;
+			chans->ic_chans[chans->ic_nchans].ic_flags = c->ic_flags;
+			if (++chans->ic_nchans >= IEEE80211_CHAN_MAX)
+				break;
+		}
+	}
+
+	iwr->data.length = chans->ic_nchans * sizeof(struct ieee80211_chan);
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setwmmparams(struct net_device *dev,
+	struct iw_request_info *info, void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	int *param = (int *) extra;
+	int ac = (param[1] >= 0 && param[1] < WME_NUM_AC) ?
+		param[1] : WME_AC_BE;
+	int bss = param[2];
+	struct ieee80211_wme_state *wme = ieee80211_vap_get_wmestate(vap);
+#ifdef CONFIG_QVSP
+	struct ieee80211com *ic = vap->iv_ic;
+#endif
+
+	switch (param[0]) {
+	case IEEE80211_WMMPARAMS_CWMIN:
+		if (param[3] < 0 || param[3] > 15)
+			return -EINVAL;
+		if (bss) {
+			wme->wme_wmeBssChanParams.cap_wmeParams[ac].wmm_logcwmin = param[3];
+			wme->wme_wmeBssChanParams.cap_info_count++;
+			if ((wme->wme_flags & WME_F_AGGRMODE) == 0) {
+				wme->wme_bssChanParams.cap_wmeParams[ac].wmm_logcwmin = param[3];
+			}
+
+		} else {
+			wme->wme_wmeChanParams.cap_wmeParams[ac].wmm_logcwmin = param[3];
+			wme->wme_wmeChanParams.cap_info_count++;
+			wme->wme_chanParams.cap_wmeParams[ac].wmm_logcwmin = param[3];
+		}
+		ieee80211_wme_updateparams(vap, !bss);
+		break;
+	case IEEE80211_WMMPARAMS_CWMAX:
+		if (param[3] < 0 || param[3] > 15)
+			return -EINVAL;
+		if (bss) {
+			wme->wme_wmeBssChanParams.cap_wmeParams[ac].wmm_logcwmax = param[3];
+			wme->wme_wmeBssChanParams.cap_info_count++;
+			if ((wme->wme_flags & WME_F_AGGRMODE) == 0) {
+				wme->wme_bssChanParams.cap_wmeParams[ac].wmm_logcwmax = param[3];
+			}
+		} else {
+			wme->wme_wmeChanParams.cap_wmeParams[ac].wmm_logcwmax = param[3];
+			wme->wme_wmeChanParams.cap_info_count++;
+			wme->wme_chanParams.cap_wmeParams[ac].wmm_logcwmax = param[3];
+		}
+		ieee80211_wme_updateparams(vap, !bss);
+		break;
+	case IEEE80211_WMMPARAMS_AIFS:
+		if (param[3] < 0 || param[3] > 15)
+			return -EINVAL;
+		if (bss) {
+			wme->wme_wmeBssChanParams.cap_wmeParams[ac].wmm_aifsn = param[3];
+			wme->wme_wmeBssChanParams.cap_info_count++;
+			if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+				wme->wme_bssChanParams.cap_wmeParams[ac].wmm_aifsn = param[3];
+		} else {
+			wme->wme_wmeChanParams.cap_wmeParams[ac].wmm_aifsn = param[3];
+			wme->wme_wmeChanParams.cap_info_count++;
+			wme->wme_chanParams.cap_wmeParams[ac].wmm_aifsn = param[3];
+		}
+		ieee80211_wme_updateparams(vap, !bss);
+		break;
+	case IEEE80211_WMMPARAMS_TXOPLIMIT:
+		if (param[3] < 0 || param[3] > 8192)
+			return -EINVAL;
+		if (bss) {
+			wme->wme_wmeBssChanParams.cap_wmeParams[ac].wmm_txopLimit
+				= IEEE80211_US_TO_TXOP(param[3]);
+			wme->wme_wmeBssChanParams.cap_info_count++;
+			if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+				wme->wme_bssChanParams.cap_wmeParams[ac].wmm_txopLimit =
+					IEEE80211_US_TO_TXOP(param[3]);
+		} else {
+			wme->wme_wmeChanParams.cap_wmeParams[ac].wmm_txopLimit
+				= IEEE80211_US_TO_TXOP(param[3]);
+			wme->wme_wmeChanParams.cap_info_count++;
+			wme->wme_chanParams.cap_wmeParams[ac].wmm_txopLimit
+				= IEEE80211_US_TO_TXOP(param[3]);
+		}
+		ieee80211_wme_updateparams(vap, !bss);
+		break;
+	case IEEE80211_WMMPARAMS_ACM:
+		if (!bss || param[3] < 0 || param[3] > 1)
+			return -EINVAL;
+		/* ACM bit applies to BSS case only */
+		wme->wme_wmeBssChanParams.cap_wmeParams[ac].wmm_acm = param[3];
+		wme->wme_wmeBssChanParams.cap_info_count++;
+		if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+			wme->wme_bssChanParams.cap_wmeParams[ac].wmm_acm = param[3];
+		ieee80211_wme_updateparams(vap, 0);
+		break;
+	case IEEE80211_WMMPARAMS_NOACKPOLICY:
+		if (bss || param[3] < 0 || param[3] > 1)
+			return -EINVAL;
+		/* ack policy applies to non-BSS case only */
+		wme->wme_wmeChanParams.cap_wmeParams[ac].wmm_noackPolicy = param[3];
+		wme->wme_wmeChanParams.cap_info_count++;
+		wme->wme_chanParams.cap_wmeParams[ac].wmm_noackPolicy = param[3];
+		ieee80211_vap_sync_chan_wmestate(vap);
+		break;
+	default:
+		break;
+	}
+
+#ifdef CONFIG_QVSP
+	if (ic->ic_vsp_reset) {
+		ic->ic_vsp_reset(ic);
+	}
+#endif
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getwmmparams(struct net_device *dev,
+	struct iw_request_info *info, void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	int *param = (int *) extra;
+	int ac = (param[1] >= 0 && param[1] < WME_NUM_AC) ?
+		param[1] : WME_AC_BE;
+	int bss = param[2];
+	struct ieee80211_wme_state *wme = ieee80211_vap_get_wmestate(vap);
+	struct chanAccParams *chanParams = (!bss) ?
+		&(wme->wme_chanParams) : &(wme->wme_bssChanParams);
+
+	switch (param[0]) {
+        case IEEE80211_WMMPARAMS_CWMIN:
+		param[0] = chanParams->cap_wmeParams[ac].wmm_logcwmin;
+		break;
+        case IEEE80211_WMMPARAMS_CWMAX:
+		param[0] = chanParams->cap_wmeParams[ac].wmm_logcwmax;
+		break;
+        case IEEE80211_WMMPARAMS_AIFS:
+		param[0] = chanParams->cap_wmeParams[ac].wmm_aifsn;
+		break;
+        case IEEE80211_WMMPARAMS_TXOPLIMIT:
+		param[0] = IEEE80211_TXOP_TO_US(chanParams->cap_wmeParams[ac].wmm_txopLimit);
+		break;
+        case IEEE80211_WMMPARAMS_ACM:
+		param[0] = wme->wme_wmeBssChanParams.cap_wmeParams[ac].wmm_acm;
+		break;
+        case IEEE80211_WMMPARAMS_NOACKPOLICY:
+		param[0] = wme->wme_wmeChanParams.cap_wmeParams[ac].wmm_noackPolicy;
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getwpaie(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+	struct ieee80211req_wpaie wpaie = {{0}};
+	struct scanlookup lookup;
+	struct ieee80211_ie_qtn_pairing *hash_ie;
+
+	if (iwr->u.data.length != sizeof(wpaie))
+		return -EINVAL;
+	if (copy_from_user(&wpaie, iwr->u.data.pointer, IEEE80211_ADDR_LEN))
+		return -EFAULT;
+	ni = ieee80211_find_node(&ic->ic_sta, wpaie.wpa_macaddr);
+	if ((ni == NULL) && (vap->iv_opmode == IEEE80211_M_STA)) {
+		lookup.mac = wpaie.wpa_macaddr;
+		lookup.esslen = 0;
+		lookup.se = NULL;
+
+		ieee80211_scan_iterate(ic, mlmelookup, &lookup);
+
+		if (lookup.se != NULL) {
+			hash_ie = (struct ieee80211_ie_qtn_pairing *)
+						lookup.se->se_pairing_ie;
+			if (hash_ie) {
+				memcpy(wpaie.qtn_pairing_ie,
+						hash_ie->qtn_pairing_tlv.qtn_pairing_tlv_hash,
+						QTN_PAIRING_TLV_HASH_LEN);
+				wpaie.has_pairing_ie = QTN_PAIRING_IE_EXIST;
+			} else {
+				wpaie.has_pairing_ie = QTN_PAIRING_IE_ABSENT;
+			}
+
+			return (copy_to_user(iwr->u.data.pointer, &wpaie,
+						sizeof(wpaie)) ? -EFAULT : 0);
+		} else {
+			return -EINVAL;		/* XXX */
+		}
+	}
+	if (ni == NULL)
+		return -EINVAL;
+	if (ni->ni_wpa_ie != NULL) {
+		int ielen = ni->ni_wpa_ie[1] + 2;
+		if (ielen > sizeof(wpaie.wpa_ie))
+			ielen = sizeof(wpaie.wpa_ie);
+		memcpy(wpaie.wpa_ie, ni->ni_wpa_ie, ielen);
+	}
+	if (ni->ni_rsn_ie != NULL) {
+		int ielen = ni->ni_rsn_ie[1] + 2;
+		if (ielen > sizeof(wpaie.rsn_ie))
+			ielen = sizeof(wpaie.rsn_ie);
+		memcpy(wpaie.rsn_ie, ni->ni_rsn_ie, ielen);
+	}
+	if (ni->ni_osen_ie != NULL) {
+		int ielen = ni->ni_osen_ie[1] + 2;
+		if (ielen > sizeof(wpaie.osen_ie))
+			ielen = sizeof(wpaie.osen_ie);
+		memcpy(wpaie.osen_ie, ni->ni_osen_ie, ielen);
+	}
+	if (ni->ni_wsc_ie != NULL) {
+		int ielen = ni->ni_wsc_ie[1] + 2;
+
+		if (ielen > sizeof(wpaie.wps_ie)) {
+			ielen = sizeof(wpaie.wps_ie);
+		}
+		memcpy(wpaie.wps_ie, ni->ni_wsc_ie, ielen);
+	}
+	if (ni->ni_qtn_pairing_ie != NULL) {
+		hash_ie = (struct ieee80211_ie_qtn_pairing *)ni->ni_qtn_pairing_ie;
+
+		memcpy(wpaie.qtn_pairing_ie, hash_ie->qtn_pairing_tlv.qtn_pairing_tlv_hash, QTN_PAIRING_TLV_HASH_LEN);
+		wpaie.has_pairing_ie = QTN_PAIRING_IE_EXIST;
+	} else {
+		wpaie.has_pairing_ie = QTN_PAIRING_IE_ABSENT;
+	}
+	if (ni->ni_rx_md_ie != NULL) {
+		int ielen = ni->ni_rx_md_ie[1] + 2;
+		memcpy(wpaie.mdie, ni->ni_rx_md_ie, ielen);
+	} else {
+		memset(wpaie.mdie, 0, 5);
+	}
+	if (ni->ni_rx_ft_ie != NULL) {
+		int ielen = ni->ni_rx_ft_ie[1] + 2;
+		memcpy(wpaie.ftie, ni->ni_rx_ft_ie, ielen);
+	}
+
+
+	ieee80211_free_node(ni);
+	return (copy_to_user(iwr->u.data.pointer, &wpaie, sizeof(wpaie)) ?
+		-EFAULT : 0);
+}
+
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+static int
+ieee80211_ioctl_getstastatistic(struct net_device *dev,
+	struct iw_request_info *info, struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+	struct ieee80211_node *ni = NULL;
+	struct ieee80211req_qtn_rmt_sta_stats *sta_statistic = NULL;
+	struct ieee80211req_qtn_rmt_sta_stats_setpara setpara;
+	int ret = 0;
+
+	if (copy_from_user(&setpara, data->pointer, sizeof(struct ieee80211req_qtn_rmt_sta_stats_setpara))) {
+		return -EFAULT;
+	}
+
+	ni = ieee80211_find_node(nt, setpara.macaddr);
+
+	if (ni) {
+		if (setpara.flags == RM_STANDARD_CCA) {
+			ieee80211_send_rm_req_cca(ni);
+		} else {
+			if (ni->ni_qtn_assoc_ie == NULL) {
+				if (setpara.flags == BIT(RM_QTN_RSSI_DBM)) {
+					setpara.flags = BIT(RM_GRP221_RSSI);
+				} else if (setpara.flags == BIT(RM_QTN_HW_NOISE)) {
+					setpara.flags = BIT(RM_GRP221_PHY_NOISE);
+				} else if (setpara.flags == BIT(RM_QTN_SOC_MACADDR)) {
+					setpara.flags = BIT(RM_GRP221_SOC_MAC);
+				}
+			}
+
+			sta_statistic = (struct ieee80211req_qtn_rmt_sta_stats *)extra;
+
+			/* Set pending flag and clear status */
+			ni->ni_dotk_meas_state.meas_state_sta.pending = 1;
+			ni->ni_dotk_meas_state.meas_state_sta.status = 0;
+
+			ieee80211_send_rm_req_stastats(ni, setpara.flags);
+
+			/*
+			 * ret = 0, thread is waked up
+			 * ret < 0, interrupted by SIGNAL
+			 * */
+			ret = wait_event_interruptible(ni->ni_dotk_waitq,
+					ni->ni_dotk_meas_state.meas_state_sta.pending == 0);
+
+			data->length = sizeof(struct ieee80211req_qtn_rmt_sta_stats);
+			if (ret == 0) {
+				if (ni->ni_dotk_meas_state.meas_state_sta.status == 0) {
+					memset(sta_statistic, 0, sizeof(struct ieee80211req_qtn_rmt_sta_stats));
+
+					if (setpara.flags & RM_QTN_MEASURE_MASK) {
+						memcpy(&(sta_statistic->rmt_sta_stats),
+								&(ni->ni_qtn_rm_sta_all),
+								sizeof(struct ieee80211_ie_qtn_rm_sta_all));
+					} else {
+						if (setpara.flags & BIT(RM_GRP221_RSSI)) {
+							int rm_rssi = *(int8_t *)&ni->ni_rm_sta_grp221.rssi;
+							if (rm_rssi < 0) {
+								sta_statistic->rmt_sta_stats.rssi_dbm = rm_rssi * 10 + 5;
+							} else {
+								sta_statistic->rmt_sta_stats.rssi_dbm = rm_rssi * 10 - 5;
+							}
+						}
+
+						if (setpara.flags & BIT(RM_GRP221_PHY_NOISE)) {
+							int rm_noise = *(int8_t *)&ni->ni_rm_sta_grp221.phy_noise;
+							sta_statistic->rmt_sta_stats.hw_noise = rm_noise * 10;
+						}
+
+						if (setpara.flags & BIT(RM_GRP221_SOC_MAC)) {
+							memcpy(sta_statistic->rmt_sta_stats.soc_macaddr,
+									ni->ni_rm_sta_grp221.soc_macaddr,
+									IEEE80211_ADDR_LEN);
+						}
+					}
+
+					/* Success */
+					sta_statistic->status = 0;
+				} else {
+					/* Timer expired / peer don't support */
+					sta_statistic->status = ni->ni_dotk_meas_state.meas_state_sta.status;
+				}
+			} else {
+				/* Canceled by signal */
+				sta_statistic->status = -ECANCELED;
+			}
+			ret = 0;
+		}
+		ieee80211_free_node(ni);
+	} else {
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+#endif
+
+static int
+ieee80211_ioctl_getstastats(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+	u_int8_t macaddr[IEEE80211_ADDR_LEN];
+	const int off = __offsetof(struct ieee80211req_sta_stats, is_stats);
+	int error;
+
+	if (iwr->u.data.length < off)
+		return -EINVAL;
+	if (copy_from_user(macaddr, iwr->u.data.pointer, IEEE80211_ADDR_LEN))
+		return -EFAULT;
+	ni = ieee80211_find_node(&ic->ic_sta, macaddr);
+	if (ni == NULL)
+		return -EINVAL;		/* XXX */
+	if (ic->ic_get_shared_node_stats)
+		ic->ic_get_shared_node_stats(ni);
+	if (iwr->u.data.length > sizeof(struct ieee80211req_sta_stats))
+		iwr->u.data.length = sizeof(struct ieee80211req_sta_stats);
+	/* NB: copy out only the statistics */
+	error = copy_to_user(iwr->u.data.pointer + off, &ni->ni_stats,
+		iwr->u.data.length - off);
+	ieee80211_free_node(ni);
+	return (error ? -EFAULT : 0);
+}
+
+struct scanreq {			/* XXX: right place for declaration? */
+	struct ieee80211req_scan_result *sr;
+	size_t space;
+};
+
+static size_t
+scan_space(const struct ieee80211_scan_entry *se, int *ielen)
+{
+	*ielen = 0;
+	if (se->se_rsn_ie != NULL)
+		*ielen += 2 + se->se_rsn_ie[1];
+	if (se->se_wpa_ie != NULL)
+		*ielen += 2 + se->se_wpa_ie[1];
+	if (se->se_wme_ie != NULL)
+		*ielen += 2 + se->se_wme_ie[1];
+	if (se->se_ath_ie != NULL)
+		*ielen += 2 + se->se_ath_ie[1];
+	if (se->se_htcap_ie != NULL)
+		*ielen += 2 + se->se_htcap_ie[1];
+
+	return roundup(sizeof(struct ieee80211req_scan_result) +
+		se->se_ssid[1] + *ielen, sizeof(u_int32_t));
+}
+
+static int
+get_scan_space(void *arg, const struct ieee80211_scan_entry *se)
+{
+	struct scanreq *req = arg;
+	int ielen;
+
+	req->space += scan_space(se, &ielen);
+
+	return 0;
+}
+
+static int
+get_scan_result(void *arg, const struct ieee80211_scan_entry *se)
+{
+	struct scanreq *req = arg;
+	struct ieee80211req_scan_result *sr;
+	int ielen, len, nr, nxr;
+	u_int8_t *cp;
+
+	len = scan_space(se, &ielen);
+	if (len > req->space) {
+	  printk("[madwifi] %s() : Not enough space.\n", __FUNCTION__);
+		return 0;
+	}
+
+	sr = req->sr;
+	memset(sr, 0, sizeof(*sr));
+	sr->isr_ssid_len = se->se_ssid[1];
+	/* XXX watch for overflow */
+	sr->isr_ie_len = ielen;
+	sr->isr_len = len;
+	sr->isr_freq = se->se_chan->ic_freq;
+	sr->isr_flags = se->se_chan->ic_flags;
+	sr->isr_rssi = se->se_rssi;
+	sr->isr_intval = se->se_intval;
+	sr->isr_capinfo = se->se_capinfo;
+	sr->isr_erp = se->se_erp;
+	IEEE80211_ADDR_COPY(sr->isr_bssid, se->se_bssid);
+	/* XXX bounds check */
+	nr = se->se_rates[1];
+	memcpy(sr->isr_rates, se->se_rates + 2, nr);
+	nxr = se->se_xrates[1];
+	memcpy(sr->isr_rates+nr, se->se_xrates + 2, nxr);
+	sr->isr_nrates = nr + nxr;
+
+	cp = (u_int8_t *)(sr + 1);
+	memcpy(cp, se->se_ssid + 2, sr->isr_ssid_len);
+	cp += sr->isr_ssid_len;
+	if (se->se_rsn_ie != NULL) {
+		memcpy(cp, se->se_rsn_ie, 2 + se->se_rsn_ie[1]);
+		cp += 2 + se->se_rsn_ie[1];
+	}
+	if (se->se_wpa_ie != NULL) {
+		memcpy(cp, se->se_wpa_ie, 2 + se->se_wpa_ie[1]);
+		cp += 2 + se->se_wpa_ie[1];
+	}
+	if (se->se_wme_ie != NULL) {
+		memcpy(cp, se->se_wme_ie, 2 + se->se_wme_ie[1]);
+		cp += 2 + se->se_wme_ie[1];
+	}
+	if (se->se_ath_ie != NULL) {
+		memcpy(cp, se->se_ath_ie, 2 + se->se_ath_ie[1]);
+		cp += 2 + se->se_ath_ie[1];
+	}
+	if (se->se_htcap_ie != NULL) {
+		memcpy(cp, se->se_htcap_ie, 2 + se->se_htcap_ie[1]);
+		cp += 2 + se->se_htcap_ie[1];
+	}
+
+	req->space -= len;
+	req->sr = (struct ieee80211req_scan_result *)(((u_int8_t *)sr) + len);
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getscanresults(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct scanreq req;
+	int error;
+
+	if (iwr->u.data.length < sizeof(struct scanreq))
+		return -EFAULT;
+
+	error = 0;
+	req.space = 0;
+	ieee80211_scan_iterate(ic, get_scan_space, &req);
+	if (req.space > iwr->u.data.length)
+		req.space = iwr->u.data.length;
+	if (req.space > 0) {
+		size_t space;
+		void *p;
+
+		space = req.space;
+		MALLOC(p, void *, space, M_TEMP, M_WAITOK);
+		if (p == NULL)
+			return -ENOMEM;
+		req.sr = p;
+		ieee80211_scan_iterate(ic, get_scan_result, &req);
+		iwr->u.data.length = space - req.space;
+		error = copy_to_user(iwr->u.data.pointer, p, iwr->u.data.length);
+		FREE(p, M_TEMP);
+	} else
+		iwr->u.data.length = 0;
+
+	return (error ? -EFAULT : 0);
+}
+
+struct stainforeq {		/* XXX: right place for declaration? */
+	struct ieee80211vap *vap;
+	struct ieee80211req_sta_info *si;
+	size_t	space;
+};
+
+static size_t
+sta_space(const struct ieee80211_node *ni, size_t *ielen)
+{
+	*ielen = 0;
+	if (ni->ni_rsn_ie != NULL)
+		*ielen += 2+ni->ni_rsn_ie[1];
+	if (ni->ni_wpa_ie != NULL)
+		*ielen += 2+ni->ni_wpa_ie[1];
+	if (ni->ni_wme_ie != NULL)
+		*ielen += 2+ni->ni_wme_ie[1];
+	if (ni->ni_ath_ie != NULL)
+		*ielen += 2+ni->ni_ath_ie[1];
+	return roundup(sizeof(struct ieee80211req_sta_info) + *ielen,
+		      sizeof(u_int32_t));
+}
+
+static void
+get_sta_space(void *arg, struct ieee80211_node *ni)
+{
+	struct stainforeq *req = arg;
+	struct ieee80211vap *vap = ni->ni_vap;
+	size_t ielen;
+
+	if (vap != req->vap && vap != req->vap->iv_xrvap)	/* only entries for this vap */
+		return;
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP ||
+	     vap->iv_opmode == IEEE80211_M_WDS) &&
+	    ni->ni_associd == 0)				/* only associated stations or a WDS peer */
+		return;
+	req->space += sta_space(ni, &ielen);
+}
+
+static void
+get_sta_info(void *arg, struct ieee80211_node *ni)
+{
+	struct stainforeq *req = arg;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211req_sta_info *si;
+	size_t ielen, len;
+	u_int8_t *cp;
+
+	if (vap != req->vap && vap != req->vap->iv_xrvap)	/* only entries for this vap (or) xrvap */
+		return;
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP ||
+	     vap->iv_opmode == IEEE80211_M_WDS) &&
+	    ni->ni_associd == 0)				/* only associated stations or a WDS peer */
+		return;
+	if (ni->ni_chan == IEEE80211_CHAN_ANYC)			/* XXX bogus entry */
+		return;
+	len = sta_space(ni, &ielen);
+	if (len > req->space)
+		return;
+	si = req->si;
+	si->isi_len = len;
+	si->isi_ie_len = ielen;
+	si->isi_freq = ni->ni_chan->ic_freq;
+	si->isi_flags = ni->ni_chan->ic_flags;
+	si->isi_state = ni->ni_flags;
+	si->isi_authmode = ni->ni_authmode;
+	si->isi_rssi = ic->ic_node_getrssi(ni);
+	si->isi_capinfo = ni->ni_capinfo;
+	si->isi_athflags = ni->ni_ath_flags;
+	si->isi_erp = ni->ni_erp;
+	IEEE80211_ADDR_COPY(si->isi_macaddr, ni->ni_macaddr);
+	si->isi_nrates = ni->ni_rates.rs_nrates;
+	if (si->isi_nrates > 15)
+		si->isi_nrates = 15;
+	memcpy(si->isi_rates, ni->ni_rates.rs_rates, si->isi_nrates);
+	si->isi_txrate = ni->ni_txrate;
+	si->isi_ie_len = ielen;
+	si->isi_associd = ni->ni_associd;
+	si->isi_txpower = ni->ni_txpower;
+	si->isi_vlan = ni->ni_vlan;
+	if (ni->ni_flags & IEEE80211_NODE_QOS) {
+		memcpy(si->isi_txseqs, ni->ni_txseqs, sizeof(ni->ni_txseqs));
+		memcpy(si->isi_rxseqs, ni->ni_rxseqs, sizeof(ni->ni_rxseqs));
+	} else {
+		si->isi_txseqs[0] = ni->ni_txseqs[0];
+		si->isi_rxseqs[0] = ni->ni_rxseqs[0];
+	}
+	si->isi_uapsd = ni->ni_uapsd;
+	if ( vap == req->vap->iv_xrvap)
+		si->isi_opmode = IEEE80211_STA_OPMODE_XR;
+	else
+		si->isi_opmode = IEEE80211_STA_OPMODE_NORMAL;
+	/* NB: leave all cases in case we relax ni_associd == 0 check */
+	if (ieee80211_node_is_authorized(ni))
+		si->isi_inact = vap->iv_inact_run;
+	else if (ni->ni_associd != 0)
+		si->isi_inact = vap->iv_inact_auth;
+	else
+		si->isi_inact = vap->iv_inact_init;
+	si->isi_inact = (si->isi_inact - ni->ni_inact) * IEEE80211_INACT_WAIT;
+
+	cp = (u_int8_t *)(si+1);
+	if (ni->ni_rsn_ie != NULL) {
+		memcpy(cp, ni->ni_rsn_ie, 2 + ni->ni_rsn_ie[1]);
+		cp += 2 + ni->ni_rsn_ie[1];
+        }
+	if (ni->ni_wpa_ie != NULL) {
+		memcpy(cp, ni->ni_wpa_ie, 2 + ni->ni_wpa_ie[1]);
+		cp += 2 + ni->ni_wpa_ie[1];
+	}
+	if (ni->ni_wme_ie != NULL) {
+		memcpy(cp, ni->ni_wme_ie, 2 + ni->ni_wme_ie[1]);
+		cp += 2 + ni->ni_wme_ie[1];
+	}
+	if (ni->ni_ath_ie != NULL) {
+		memcpy(cp, ni->ni_ath_ie, 2 + ni->ni_ath_ie[1]);
+		cp += 2 + ni->ni_ath_ie[1];
+	}
+
+	req->si = (struct ieee80211req_sta_info *)(((u_int8_t *)si) + len);
+	req->space -= len;
+}
+
+static int
+ieee80211_ioctl_getstainfo(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct stainforeq req;
+	int error;
+
+	if (iwr->u.data.length < sizeof(struct stainforeq))
+		return -EFAULT;
+
+	/* estimate space required for station info */
+	error = 0;
+	req.space = sizeof(struct stainforeq);
+	req.vap = vap;
+	ieee80211_iterate_nodes(&ic->ic_sta, get_sta_space, &req, 1);
+	if (req.space > iwr->u.data.length)
+		req.space = iwr->u.data.length;
+	if (req.space > 0) {
+		size_t space;
+		void *p;
+
+		space = req.space;
+		MALLOC(p, void *, space, M_TEMP, M_WAITOK);
+		req.si = (struct ieee80211req_sta_info *)p;
+		ieee80211_iterate_nodes(&ic->ic_sta, get_sta_info, &req, 1);
+		iwr->u.data.length = space - req.space;
+		error = copy_to_user(iwr->u.data.pointer, p, iwr->u.data.length);
+		FREE(p, M_TEMP);
+	} else
+		iwr->u.data.length = 0;
+
+	return (error ? -EFAULT : 0);
+}
+
+static __inline const char *
+ieee80211_get_vendor_str(uint8_t vendor)
+{
+	const char *vendor_str = "unknown";
+
+	switch (vendor) {
+	case PEER_VENDOR_QTN:
+		vendor_str = "qtn";
+		break;
+	case PEER_VENDOR_BRCM:
+		vendor_str = "brcm";
+		break;
+	case PEER_VENDOR_ATH:
+		vendor_str = "ath";
+		break;
+	case PEER_VENDOR_RLNK:
+		vendor_str = "rlnk";
+		break;
+	case PEER_VENDOR_RTK:
+		vendor_str = "rtk";
+		break;
+	case PEER_VENDOR_INTEL:
+		vendor_str = "intel";
+		break;
+	default:
+		break;
+	}
+
+	return vendor_str;
+}
+
+/* This array must be kept in sync with the IEEE80211_NODE_TYPE_xxx values */
+static const char *
+ieee80211_node_type_str[] = {
+	"none",
+	"vap",
+	"sta",
+	"wds",
+	"tdls"
+};
+
+static __inline const char *
+ieee80211_get_node_type_str(uint8_t type)
+{
+	if (type >= ARRAY_SIZE(ieee80211_node_type_str))
+		type = IEEE80211_NODE_TYPE_NONE;
+
+	return ieee80211_node_type_str[type];
+}
+
+static void
+get_node_ht_bw_and_sgi(struct ieee80211com *ic, struct ieee80211_node *ni,
+		uint8_t *bw, uint8_t *assoc_bw, uint8_t *sgi)
+{
+	if (ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40 &&
+			ni->ni_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40) {
+		*assoc_bw = 40;
+		*bw = IEEE80211_CWM_WIDTH40;
+		if (ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI40 &&
+				ni->ni_htcap.cap & IEEE80211_HTCAP_C_SHORTGI40)
+			*sgi = 1;
+		else
+			*sgi = 0;
+	} else {
+		*assoc_bw = 20;
+		*bw = IEEE80211_CWM_WIDTH20;
+		if (ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI20 &&
+				ni->ni_htcap.cap & IEEE80211_HTCAP_C_SHORTGI20)
+			*sgi = 1;
+		else
+			*sgi = 0;
+	}
+}
+
+static void
+get_node_vht_bw_and_sgi(struct ieee80211_node *ni, uint8_t *bw,
+		uint8_t *assoc_bw, uint8_t *sgi)
+{
+	switch (ni->ni_vhtop.chanwidth) {
+	case IEEE80211_VHTOP_CHAN_WIDTH_160MHZ:
+	case IEEE80211_VHTOP_CHAN_WIDTH_80PLUS80MHZ:
+		*bw = IEEE80211_CWM_WIDTH160;
+		*assoc_bw = 160;
+		if (ni->ni_vhtcap.cap_flags & IEEE80211_VHTCAP_C_SHORT_GI_160)
+			*sgi = 1;
+		else
+			*sgi = 0;
+		break;
+	case IEEE80211_VHTOP_CHAN_WIDTH_80MHZ:
+		*bw = IEEE80211_CWM_WIDTH80;
+		*assoc_bw = 80;
+		if (ni->ni_vhtcap.cap_flags & IEEE80211_VHTCAP_C_SHORT_GI_80)
+			*sgi = 1;
+		else
+			*sgi = 0;
+		break;
+	case IEEE80211_VHTOP_CHAN_WIDTH_20_40MHZ:
+		get_node_ht_bw_and_sgi(ni->ni_ic, ni, bw, assoc_bw, sgi);
+		break;
+	default:
+		break;
+	}
+}
+
+static void
+get_node_ht_max_mcs(struct ieee80211com *ic, struct ieee80211_node *ni,
+		uint8_t *max_mcs)
+{
+	int i;
+
+	for (i = IEEE80211_HT_MCSSET_20_40_NSS1; i <= IEEE80211_HT_MCSSET_20_40_NSS4; i++) {
+		if ((ic->ic_htcap.mcsset[i] & ni->ni_htcap.mcsset[i]) == 0xff)
+			*max_mcs = 8 * (i - IEEE80211_HT_MCSSET_20_40_NSS1 + 1) - 1;
+	}
+}
+
+static void
+get_node_vht_max_nss_mcs(struct ieee80211_node *ni, uint8_t *max_nss, uint8_t *max_mcs)
+{
+	uint8_t mcs = 0;
+	uint8_t nss = 0;
+	uint16_t tx_mcsnss_map;
+	int i;
+
+	tx_mcsnss_map = ni->ni_vhtcap.txmcsmap;
+
+	for (i = IEEE80211_VHT_NSS1; i <= IEEE80211_VHT_NSS8; i++) {
+		switch (tx_mcsnss_map & 0x03) {
+		case IEEE80211_VHT_MCS_0_7:
+			mcs = 7;
+			nss++;
+			break;
+		case IEEE80211_VHT_MCS_0_8:
+			mcs = 8;
+			nss++;
+			break;
+		case IEEE80211_VHT_MCS_0_9:
+			mcs = 9;
+			nss++;
+			break;
+		case IEEE80211_VHT_MCS_NA:
+		default:
+			/* do nothing */
+			break;
+		}
+		tx_mcsnss_map >>= 2;
+	}
+
+	if (mcs != 0) {
+		*max_mcs = mcs;
+		*max_nss = nss - 1; /* Nss index starts from 0 */
+	}
+}
+
+static void get_node_max_mimo(struct ieee80211_node *ni, uint8_t *tx, uint8_t *rx)
+{
+	uint8_t tx_max = 0;
+	uint8_t rx_max = 0;
+	uint16_t vht_mcsmap = 0;
+        struct ieee80211_ie_htcap *htcap = (struct ieee80211_ie_htcap *)&ni->ni_ie_htcap;
+        struct ieee80211_ie_vhtcap *vhtcap =
+				(struct ieee80211_ie_vhtcap *)&ni->ni_ie_vhtcap;
+
+	switch (ni->ni_wifi_mode) {
+	case IEEE80211_WIFI_MODE_AC:
+		vht_mcsmap = IEEE80211_VHTCAP_GET_RX_MCS_NSS(vhtcap);
+		for (rx_max = 0; rx_max < IEEE80211_VHTCAP_MCS_MAX; ++rx_max) {
+			if (IEEE80211_VHTCAP_GET_MCS_MAP_ENTRY(vht_mcsmap,
+					rx_max) == IEEE80211_VHTCAP_MCS_DISABLED)
+				break;
+		}
+		vht_mcsmap = IEEE80211_VHTCAP_GET_TX_MCS_NSS(vhtcap);
+		for (tx_max = 0; tx_max < IEEE80211_VHTCAP_MCS_MAX; ++tx_max) {
+			if (IEEE80211_VHTCAP_GET_MCS_MAP_ENTRY(vht_mcsmap,
+					tx_max) == IEEE80211_VHTCAP_MCS_DISABLED)
+				break;
+		}
+		break;
+	case IEEE80211_WIFI_MODE_NA:
+	case IEEE80211_WIFI_MODE_NG:
+		if (IEEE80211_HT_IS_4SS_NODE(htcap->hc_mcsset)) {
+			rx_max = 4;
+		} else if (IEEE80211_HT_IS_3SS_NODE(htcap->hc_mcsset)) {
+			rx_max = 3;
+		} else if (IEEE80211_HT_IS_2SS_NODE(htcap->hc_mcsset)) {
+			rx_max = 2;
+		}
+		if ((IEEE80211_HTCAP_MCS_PARAMS(htcap) &
+				IEEE80211_HTCAP_MCS_TX_SET_DEFINED) &&
+				(IEEE80211_HTCAP_MCS_PARAMS(htcap) &
+				IEEE80211_HTCAP_MCS_TX_RX_SET_NEQ)) {
+			tx_max = IEEE80211_HTCAP_MCS_STREAMS(htcap) + 1;
+		} else if (IEEE80211_HTCAP_MCS_PARAMS(htcap) &
+				IEEE80211_HTCAP_MCS_TX_RX_SET_NEQ) {
+			tx_max = 0;
+		} else {
+			tx_max = rx_max;
+		}
+		break;
+	default:
+		/* Non ht mode */
+		tx_max = 1;
+		rx_max = 1;
+		break;
+	}
+
+	*tx = tx_max;
+	*rx = rx_max;
+}
+
+static void get_node_achievable_phyrate_and_bw(struct ieee80211_node *ni,
+						uint32_t *tx_rate,
+						uint32_t *rx_rate,
+						uint8_t *node_bw)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint8_t bw = 0;
+	uint8_t assoc_bw = 0;
+	uint8_t sgi = 0;
+	uint8_t max_mcs = 0;
+	uint8_t max_nss = 0;
+	uint8_t vht = 0;
+
+	if (ni->ni_wifi_mode != IEEE80211_WIFI_MODE_NG &&
+		ni->ni_wifi_mode != IEEE80211_WIFI_MODE_NA &&
+		ni->ni_wifi_mode != IEEE80211_WIFI_MODE_AC) {
+			*tx_rate =
+				(ni->ni_rates.rs_rates[ni->ni_rates.rs_nrates - 1] &
+					IEEE80211_RATE_VAL) / 2;
+			*rx_rate = *tx_rate;
+			*node_bw = 20;
+			return;
+	}
+
+	if (IS_IEEE80211_VHT_ENABLED(ic) && (ni->ni_flags & IEEE80211_NODE_VHT)) {
+		vht = 1;
+		get_node_vht_bw_and_sgi(ni, &bw, &assoc_bw, &sgi);
+		get_node_vht_max_nss_mcs(ni, &max_nss, &max_mcs);
+	} else {
+		get_node_ht_bw_and_sgi(ic, ni, &bw, &assoc_bw, &sgi);
+		get_node_ht_max_mcs(ic, ni, &max_mcs);
+	}
+
+	*tx_rate = ic->ic_mcs_to_phyrate(bw, sgi, max_mcs, max_nss, vht);
+	*rx_rate = *tx_rate;
+	*node_bw = assoc_bw;
+}
+
+void sample_rel_client_data(struct ieee80211vap *vap)
+{
+	struct node_client_data *ncd, *ncd_tmp;
+
+	spin_lock(&vap->sample_sta_lock);
+	list_for_each_entry_safe(ncd, ncd_tmp, &vap->sample_sta_list, node_list) {
+		list_del(&ncd->node_list);
+		kfree(ncd);
+	}
+	vap->sample_sta_count = 0;
+	spin_unlock(&vap->sample_sta_lock);
+}
+
+static void sample_iterate_client_data(void *s, struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *)s;
+	struct node_client_data *clt = NULL;
+	int i;
+	struct qtn_node_shared_stats_rx *rx = &ni->ni_shared_stats->rx[STATS_SU];
+
+	/* Skipping other interface node and self vap node */
+	if ((ni->ni_vap != vap) ||
+		(memcmp(ni->ni_macaddr, vap->iv_myaddr, IEEE80211_ADDR_LEN) == 0)) {
+		return;
+	}
+
+	MALLOC(clt, struct node_client_data *, sizeof(struct node_client_data),
+							M_TEMP, M_WAITOK | M_ZERO);
+
+	if (clt == NULL) {
+		printk("Failed to alloc client data\n");
+		return;
+	}
+
+	IEEE80211_ADDR_COPY(clt->data.mac_addr, ni->ni_macaddr);
+
+	clt->data.assoc_id = IEEE80211_AID(ni->ni_associd);
+	clt->data.protocol = ni->ni_wifi_mode;
+	clt->data.time_associated = (u_int32_t)div_u64(get_jiffies_64() -
+							ni->ni_start_time_assoc, HZ);
+
+	get_node_max_mimo(ni, &clt->data.tx_stream, &clt->data.rx_stream);
+	get_node_achievable_phyrate_and_bw(ni, &clt->data.achievable_tx_phy_rate,
+						&clt->data.achievable_rx_phy_rate,
+						&clt->data.bw);
+
+	clt->data.rx_bytes = ni->ni_stats.ns_rx_bytes;
+	clt->data.tx_bytes = ni->ni_stats.ns_tx_bytes;
+	clt->data.rx_packets = ni->ni_stats.ns_rx_data;
+	clt->data.tx_packets = ni->ni_stats.ns_tx_data;
+	clt->data.rx_errors = ni->ni_stats.ns_rx_errors;
+	clt->data.tx_errors = ni->ni_stats.ns_tx_errors;
+	clt->data.rx_dropped = ni->ni_stats.ns_rx_dropped;
+	clt->data.tx_dropped = ni->ni_stats.ns_tx_dropped;
+	clt->data.rx_ucast = ni->ni_stats.ns_rx_ucast;
+	clt->data.tx_ucast = ni->ni_stats.ns_tx_ucast;
+	clt->data.rx_mcast = ni->ni_stats.ns_rx_mcast;
+	clt->data.tx_mcast = ni->ni_stats.ns_tx_mcast;
+	clt->data.rx_bcast = ni->ni_stats.ns_rx_bcast;
+	clt->data.tx_bcast = ni->ni_stats.ns_tx_bcast;
+	clt->data.link_quality = (uint16_t) ni->ni_linkqual;
+	clt->data.ip_addr = ni->ni_ip_addr;
+	clt->data.vendor = ni->ni_vendor;
+
+	for (i = 0; i < WME_AC_NUM; i++)
+		clt->data.tx_wifi_drop[i] = ni->ni_stats.ns_tx_wifi_drop[i];
+
+	for (i = 0; i <= NUM_ANT; i++) {
+		clt->data.last_rssi_dbm[i] = rx->last_rssi_dbm[i];
+		clt->data.last_rcpi_dbm[i] = rx->last_rcpi_dbm[i];
+		clt->data.last_evm_dbm[i] = rx->last_evm_dbm[i];
+		clt->data.last_hw_noise[i] = rx->last_hw_noise[i];
+	}
+
+	spin_lock(&vap->sample_sta_lock);
+	list_add(&clt->node_list, &vap->sample_sta_list);
+	vap->sample_sta_count++;
+	spin_unlock(&vap->sample_sta_lock);
+}
+
+static int
+ieee80211_subioctl_sample_all_clients(struct ieee80211vap *vap, struct iwreq *iwr)
+{
+	struct ieee80211com *ic  = vap->iv_ic;
+	uint8_t __user *sta_count = iwr->u.data.pointer;
+
+	sample_rel_client_data(vap);
+	ic->ic_iterate_nodes(&ic->ic_sta, sample_iterate_client_data, vap, 1);
+
+	if (copy_to_user(sta_count, &vap->sample_sta_count, sizeof(*sta_count)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_get_assoc_data(struct ieee80211vap *vap, void __user *pointer, uint32_t len)
+{
+	struct node_client_data *ncd;
+	int count = 0;
+	int u_count = 0;
+	int offset;
+	int num_entry;
+	struct sample_assoc_user_data __user *u_pointer =
+					(struct sample_assoc_user_data *)pointer;
+
+	if (copy_from_user(&num_entry, &u_pointer->num_entry, sizeof(num_entry)) != 0)
+		return -EFAULT;
+
+	if (copy_from_user(&offset, &u_pointer->offset, sizeof(offset)) != 0)
+		return -EFAULT;
+
+	if (vap->sample_sta_count < (num_entry + offset))
+		return -EINVAL;
+
+	if (len < (num_entry * sizeof(struct sample_assoc_data)
+				+ sizeof(u_pointer->num_entry)
+				+ sizeof(u_pointer->offset)))
+		return -EINVAL;
+
+	spin_lock(&vap->sample_sta_lock);
+	list_for_each_entry(ncd, &vap->sample_sta_list, node_list) {
+		if ((count >= offset) && (count < (num_entry + offset))) {
+			if (copy_to_user(((u_pointer->data) + (u_count)),
+				&(ncd->data), sizeof(struct sample_assoc_data)) != 0) {
+				spin_unlock(&vap->sample_sta_lock);
+				return -EFAULT;
+			}
+			u_count++;
+		}
+		count++;
+	}
+
+	spin_unlock(&vap->sample_sta_lock);
+
+	return 0;
+}
+
+static void
+get_interface_wmmac_stats(void *s, struct ieee80211_node *ni)
+{
+	struct ieee80211req_interface_wmmac_stats wmmac_stats;
+	struct iwreq *iwr = (struct iwreq *)s;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	void __user *pointer;
+	int i;
+
+	pointer	= iwr->u.data.pointer;
+	if (copy_from_user(&wmmac_stats, pointer, sizeof(wmmac_stats)))
+		return;
+
+	if (ic->ic_get_shared_node_stats)
+		ic->ic_get_shared_node_stats(ni);
+
+	for (i = 0; i < WMM_AC_NUM; i++) {
+		wmmac_stats.tx_wifi_drop[i] += ni->ni_stats.ns_tx_wifi_drop[i];
+		wmmac_stats.tx_wifi_sent[i] += ni->ni_shared_stats->tx[STATS_SU].tx_sent_data_msdu[i];
+	}
+
+	copy_to_user(pointer, &wmmac_stats, sizeof(wmmac_stats));
+}
+
+static int
+ieee80211_subioctl_get_interface_wmmac_stats(struct net_device *dev,
+					     struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+
+	ic->ic_iterate_dev_nodes(vap->iv_dev, &ic->ic_sta, get_interface_wmmac_stats, iwr, 1);
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_get_freq_range(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_freq_range range;
+	uint8_t reported[IEEE80211_CHAN_BYTES];
+	uint8_t *chan_active;
+	int i;
+
+	if (iwr->u.data.length < sizeof(range))
+		return -ENOMEM;
+
+	if (vap->iv_opmode == IEEE80211_M_STA)
+		chan_active = ic->ic_chan_active_20;
+	else
+		chan_active = ic->ic_chan_active;
+
+	memset(reported, 0, sizeof(reported));
+	memset(&range, 0, sizeof(range));
+	for (i = 0; i < ic->ic_nchans; i++) {
+		const struct ieee80211_channel *c = &ic->ic_channels[i];
+
+		/* discard if previously reported (e.g. b/g) */
+		if (isclr(reported, c->ic_ieee) &&
+				isset(chan_active, c->ic_ieee)) {
+			setbit(reported, c->ic_ieee);
+			range.freq[range.num_freq].i = c->ic_ieee;
+			range.freq[range.num_freq].m =
+				ic->ic_channels[i].ic_freq * 100000;
+			range.freq[range.num_freq].e = 1;
+			if (++range.num_freq == QTN_FREQ_RANGE_MAX_NUM)
+				break;
+		}
+	}
+
+	
+	iwr->u.data.length = sizeof(range);
+	if (copy_to_user(iwr->u.data.pointer, &range, sizeof(range)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+#ifdef CONFIG_NAC_MONITOR
+#define NAC_MAX_STATIONS 128
+static int
+ieee80211_subioctl_get_nac_stats(struct net_device *dev, struct iwreq *iwr)
+{
+	void __user *pointer;
+	struct ieee80211_nac_stats_report *report = NULL;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct nac_stats_entry *entry = &sp->nac_mon_info->nac_stats[0];
+	int i,j;
+	int retval = 0;
+
+	report = kmalloc(sizeof(*report), GFP_KERNEL);
+	if (!report) {
+		retval = -ENOMEM;
+		goto END;
+	}
+	pointer	= iwr->u.data.pointer;
+	if (copy_from_user(report, pointer, sizeof(*report))) {
+		retval = -EFAULT;
+		goto END;
+	}
+	report->nac_entries = 0;
+	for (i = 0,j=0; i < NAC_MAX_STATIONS; i++, entry++) {
+		if(entry->nac_valid) {
+			memcpy(&report->nac_stats[i].nac_txmac[0],
+					&entry->nac_txmac[0],
+					IEEE80211_ADDR_LEN);
+			report->nac_stats[j].nac_avg_rssi = entry->nac_avg_rssi;
+			report->nac_stats[j].nac_timestamp = entry->nac_timestamp;
+			report->nac_stats[j].nac_channel = entry->nac_channel;
+			report->nac_stats[j].nac_packet_type = entry->nac_packet_type;
+			j++;
+		}
+	}
+	report->nac_entries = j;
+
+	if (copy_to_user(pointer, report, sizeof(*report)) != 0)
+		retval = -EFAULT;
+END:
+	if (report) {
+		kfree(report);
+	}
+	return retval;
+}
+#endif
+
+static int
+ieee80211_subioctl_ft_add_node(struct net_device *dev,
+					void __user *pointer, uint32_t len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = NULL;
+	uint8_t addr[IEEE80211_ADDR_LEN];
+
+	int retval = 0;
+
+	if ( len < IEEE80211_ADDR_LEN)
+		return -1;
+
+	if (copy_from_user(&addr[0], pointer, len) != 0) {
+		retval = -EFAULT;
+	}
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_AUTH, "[%s] FT add node req\n", ether_sprintf(&addr[0]));
+	ni = ieee80211_find_node(&ic->ic_sta, &addr[0]);
+	if (!ni) {
+		ni = ieee80211_dup_bss(vap, &addr[0]);
+		if (ni == NULL) {
+			mlme_stats_delayed_update(&addr[0], MLME_STAT_AUTH_FAILS, 1);
+			return -1;
+		}
+		ni->ni_node_type = IEEE80211_NODE_TYPE_STA;
+		ni->ni_used_auth_algo = IEEE80211_AUTH_ALG_FT;
+	}
+	IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH,
+			ni, "station authenticated (%s)", "FT");
+	mlme_stats_delayed_update(ni->ni_macaddr, MLME_STAT_AUTH, 1);
+	ieee80211_free_node(ni);
+	return retval;
+}
+
+static int
+ieee80211_subioctl_send_ft_auth_response(struct net_device *dev,
+					void __user *pointer, uint32_t len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_frame *frm = NULL;;
+	struct ieee80211_node *ni = NULL;
+	int subtype = 0;
+	struct ieee80211_auth *auth = NULL;
+	uint8_t *buf = NULL;
+	int retval = 0;
+	uint8_t *data = NULL;
+	uint8_t *efrm = NULL;
+
+	if (len < (sizeof(*frm) + sizeof(*auth)))
+		return -EINVAL;
+
+	MALLOC(buf, u_int8_t *, len, M_DEVBUF, M_WAITOK);
+	if(!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, pointer, len) != 0) {
+		retval = -EFAULT;
+		goto END;
+	}
+
+	data = buf;
+	frm = (struct ieee80211_frame *)data;
+	subtype = frm->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+	data += sizeof(*frm);
+	auth = (struct ieee80211_auth *)data;
+	ni = ieee80211_find_node(&ic->ic_sta, frm->i_addr1);
+	if (!ni) {
+		retval = 1;
+		goto END;
+	}
+	data += sizeof(*auth);
+
+	if (auth->status_code == 0 && (auth->auth_alg == IEEE80211_AUTH_ALG_FT)) {
+		uint8_t ielen = 0;
+
+		efrm = buf + len;
+		ielen = data[1] + 2;
+		while ((data < efrm) && (ielen < 255) && ((data + ielen) <= efrm)) {
+			switch (*data) {
+			case IEEE80211_ELEMID_RSN:
+				if (ni->ni_tx_rsn_ie)
+					FREE(ni->ni_tx_rsn_ie, M_DEVBUF);
+				MALLOC(ni->ni_tx_rsn_ie, void*, ielen, M_DEVBUF, M_ZERO);
+				if (ni->ni_tx_rsn_ie)
+					memcpy(ni->ni_tx_rsn_ie, data, ielen);
+				break;
+			case IEEE80211_ELEMID_MOBILITY_DOMAIN:
+				if (ni->ni_tx_md_ie)
+					FREE(ni->ni_tx_md_ie, M_DEVBUF);
+				MALLOC(ni->ni_tx_md_ie, void*, ielen, M_DEVBUF, M_ZERO);
+				if (ni->ni_tx_md_ie)
+					memcpy(ni->ni_tx_md_ie, data, ielen);
+				break;
+			case IEEE80211_ELEMID_FTIE:
+				if (ni->ni_tx_ft_ie)
+					FREE(ni->ni_tx_ft_ie, M_DEVBUF);
+				MALLOC(ni->ni_tx_ft_ie, void*, ielen, M_DEVBUF, M_ZERO);
+				if (ni->ni_tx_ft_ie)
+					memcpy(ni->ni_tx_ft_ie, data, ielen);
+				break;
+			default:
+				break;
+			}
+			data += ielen;
+			ielen = data[1] + 2;
+		}
+	}
+	if ((subtype == IEEE80211_FC0_SUBTYPE_AUTH)  && (auth->auth_alg == IEEE80211_AUTH_ALG_FT)) {
+		IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH,
+			(auth->status_code << 16) | IEEE80211_AUTH_FT);
+		mlme_stats_delayed_update(ni->ni_macaddr, MLME_STAT_AUTH, 1);
+	}
+	ieee80211_free_node(ni);
+
+END:
+	FREE(buf, M_DEVBUF);
+	return retval;
+}
+
+static int
+ieee80211_subioctl_send_ft_assoc_response(struct net_device *dev,
+					void __user *pointer, uint32_t len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_frame *frm = NULL;
+	struct ieee80211_node *ni = NULL;
+	uint8_t *buf = NULL;
+	struct ieee80211_assoc *assoc = NULL;
+	int retval = 0;
+	uint8_t *data = NULL;
+	uint8_t *efrm = NULL;
+
+	if (len < (sizeof(*frm) + sizeof(*assoc)))
+		return -EINVAL;
+
+	MALLOC(buf, u_int8_t *, len, M_DEVBUF, M_WAITOK);
+	if(!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, pointer, len) != 0) {
+		retval = -EFAULT;
+		goto END;
+	}
+
+	data = buf;
+	frm = (struct ieee80211_frame *)buf;
+	data += sizeof(*frm);
+	assoc = (struct ieee80211_assoc *)data;
+
+	ni = ieee80211_find_node(&ic->ic_sta, frm->i_addr1);
+	if (!ni) {
+		retval = 1;
+		goto END;
+	}
+	data += sizeof(*assoc);
+
+	if (assoc->assoc_status_code == IEEE80211_STATUS_SUCCESS) {
+		uint8_t ielen = 0;
+
+		efrm = buf + len;
+		ielen = data[1] + 2;
+		while ((data < efrm) && (ielen < 255) && ((data + ielen) <= efrm)) {
+			switch (*data) {
+			case IEEE80211_ELEMID_MOBILITY_DOMAIN:
+				if (ni->ni_tx_md_ie)
+					FREE(ni->ni_tx_md_ie, M_DEVBUF);
+				MALLOC(ni->ni_tx_md_ie, void*, ielen, M_DEVBUF, M_ZERO);
+				if (ni->ni_tx_md_ie)
+					memcpy(ni->ni_tx_md_ie, data, ielen);
+				break;
+			case IEEE80211_ELEMID_FTIE:
+				if (ni->ni_tx_ft_ie)
+					FREE(ni->ni_tx_ft_ie, M_DEVBUF);
+				MALLOC(ni->ni_tx_ft_ie, void*, ielen, M_DEVBUF, M_ZERO);
+				if (ni->ni_tx_ft_ie)
+					memcpy(ni->ni_tx_ft_ie, data, ielen);
+				break;
+			default:
+				break;
+			}
+			data += ielen;
+			ielen = data[1] + 2;
+		}
+		ieee80211_node_join(ni, IEEE80211_FC0_SUBTYPE_ASSOC_RESP);
+	} else {
+		IEEE80211_SEND_MGMT(ni,	IEEE80211_FC0_SUBTYPE_DEAUTH, assoc->assoc_status_code);
+	}
+	ieee80211_free_node(ni);
+END:
+	FREE(buf, M_DEVBUF);
+	return retval;
+}
+
+static int
+ieee80211_subioctl_send_ft_reassoc_response(struct net_device *dev,
+					void __user *pointer, uint32_t len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_frame *frm = NULL;
+	struct ieee80211_node *ni = NULL;
+	struct ieee80211_assoc *assoc = NULL;
+	int retval = 0;
+	uint8_t *buf = NULL;
+	uint8_t *data = NULL;
+	uint8_t *efrm = NULL;
+
+	if (len < (sizeof(*frm) + sizeof(*assoc)))
+		return -EINVAL;
+
+	MALLOC(buf, u_int8_t *, len, M_DEVBUF, M_WAITOK);
+	if(!buf)
+		return -ENOMEM;
+	if (copy_from_user(buf, pointer,len) != 0) {
+		retval = -EFAULT;
+		goto END;
+	}
+
+	data = buf;
+	frm = (struct ieee80211_frame *)buf;
+	data += sizeof(*frm);
+	assoc = (struct ieee80211_assoc *)data;
+
+	ni = ieee80211_find_node(&ic->ic_sta, frm->i_addr1);
+	if (!ni) {
+		retval = 1;
+		goto END;
+	}
+	data += sizeof(*assoc);
+
+	if (assoc->assoc_status_code == IEEE80211_STATUS_SUCCESS) {
+		uint8_t ielen = 0;
+
+		efrm = buf + len;
+		ielen = data[1] + 2;
+		while ((data < efrm) && (ielen < 255) && ((data + ielen) <= efrm)) {
+			switch (*data) {
+			case IEEE80211_ELEMID_RSN:
+				if (ni->ni_tx_rsn_ie)
+					FREE(ni->ni_tx_rsn_ie, M_DEVBUF);
+				MALLOC(ni->ni_tx_rsn_ie, void*, ielen, M_DEVBUF, M_ZERO);
+				if (ni->ni_tx_rsn_ie)
+					memcpy(ni->ni_tx_rsn_ie, data, ielen);
+				break;
+			case IEEE80211_ELEMID_MOBILITY_DOMAIN:
+				if (ni->ni_tx_md_ie)
+					FREE(ni->ni_tx_md_ie, M_DEVBUF);
+				MALLOC(ni->ni_tx_md_ie, void*, ielen, M_DEVBUF, M_ZERO);
+				if (ni->ni_tx_md_ie)
+					memcpy(ni->ni_tx_md_ie, data, ielen);
+				break;
+			case IEEE80211_ELEMID_FTIE:
+				if (ni->ni_tx_ft_ie)
+					FREE(ni->ni_tx_ft_ie, M_DEVBUF);
+				MALLOC(ni->ni_tx_ft_ie, void*, ielen, M_DEVBUF, M_ZERO);
+				if (ni->ni_tx_ft_ie)
+					memcpy(ni->ni_tx_ft_ie, data, ielen);
+				break;
+			default:
+				break;
+			}
+			data += ielen;
+			ielen = data[1] + 2;
+		}
+		ieee80211_node_join(ni, IEEE80211_FC0_SUBTYPE_REASSOC_RESP);
+	} else {
+		IEEE80211_SEND_MGMT(ni,	IEEE80211_FC0_SUBTYPE_DEAUTH, assoc->assoc_status_code);
+	}
+	ieee80211_free_node(ni);
+END:
+	FREE(buf, M_DEVBUF);
+	return retval;
+}
+
+/*
+ * This function will be called when processes write following command:
+ *  get <unit> assoc_info.
+ * into file in sysfs.
+ *
+ * This function is to report all nodes in the table. User can do further filtering.
+ * E.g. user only need MAC address on designated WiFi interface.
+ *
+ * This function works for both AP and STA.
+ */
+void
+get_node_info(void *s, struct ieee80211_node *ni)
+{
+	struct seq_file *sq = (struct seq_file *)s;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	uint8_t *mac = ni->ni_macaddr;
+	uint8_t assoc_bw = 20;
+	uint8_t bw = 0;
+	uint8_t sgi = 0;
+	uint8_t max_mcs = 0;
+	uint8_t max_nss = 0;
+	uint8_t vht;
+	uint32_t achievable_tx_phy_rate;
+	uint32_t achievable_rx_phy_rate;  /* Unit: in Kbps */
+	uint32_t time_associated = 0;		/* Unit: second, 32bits should be up to 136 years */
+	uint32_t combined_ba_state = 0;
+	int32_t i;
+	uint32_t current_tx_phy_rate = 0;
+	uint32_t current_rx_phy_rate = 0;
+	int32_t current_rssi = ic->ic_rssi(ni);
+	int32_t current_snr = ic->ic_snr(ni);
+	int32_t current_max_queue = ic->ic_max_queue(ni);
+	uint32_t tx_failed = ic->ic_tx_failed(ni);
+
+	ic->ic_rxtx_phy_rate(ni, 0, NULL, NULL, &current_tx_phy_rate);
+	ic->ic_rxtx_phy_rate(ni, 1, NULL, NULL, &current_rx_phy_rate);
+
+	if (IS_IEEE80211_VHT_ENABLED(ic) && (ni->ni_flags & IEEE80211_NODE_VHT)) {
+		vht = 1;
+		get_node_vht_bw_and_sgi(ni, &bw, &assoc_bw, &sgi);
+	} else {
+		vht = 0;
+		get_node_ht_bw_and_sgi(ic, ni, &bw, &assoc_bw, &sgi);
+	}
+
+	if (ieee80211_node_is_authorized(ni)) {
+		if (vht == 0)
+			get_node_ht_max_mcs(ic, ni, &max_mcs);
+		else
+			get_node_vht_max_nss_mcs(ni, &max_nss, &max_mcs);
+
+		achievable_tx_phy_rate = ic->ic_mcs_to_phyrate(bw, sgi, max_mcs, max_nss, vht);
+		achievable_rx_phy_rate = achievable_tx_phy_rate;
+	} else {
+		achievable_tx_phy_rate = 0;
+		achievable_rx_phy_rate = 0;
+	}
+
+	if (current_rssi < -1 && current_rssi > -1200) {
+		ni->ni_rssi = current_rssi;
+	} else if (ni->ni_rssi > 0) {
+		/* correct pseudo RSSIs that apparently still get into the node table */
+		ni->ni_rssi = (ni->ni_rssi * 10) - 900;
+	}
+
+	ni->ni_smthd_rssi = ic->ic_smoothed_rssi(ni);
+	if ((ni->ni_smthd_rssi > -1) || (ni->ni_smthd_rssi < -1200)) {
+		ni->ni_smthd_rssi = ni->ni_rssi;
+	}
+
+	if (current_tx_phy_rate > 0) {
+		ni->ni_linkqual = (uint16_t) current_tx_phy_rate;
+	}
+
+	if (current_rx_phy_rate > 0) {
+		ni->ni_rx_phy_rate = (uint16_t) current_rx_phy_rate;
+	}
+
+	if (current_snr < -1) {
+		ni->ni_snr = current_snr;
+	}
+
+	if (current_max_queue > -1) {
+		ni->ni_max_queue = current_max_queue;
+	}
+
+	if(vap->iv_opmode == IEEE80211_M_STA) {
+		time_associated = (vap->iv_state == IEEE80211_S_RUN) ?
+				(u_int32_t)div_u64(get_jiffies_64() - ni->ni_start_time_assoc, HZ) : 0;
+	} else {
+		time_associated = (vap->iv_bss == ni) ?
+				0 : (u_int32_t)div_u64(get_jiffies_64() - ni->ni_start_time_assoc, HZ);
+	}
+
+	COMPILE_TIME_ASSERT(WME_NUM_TID <= 16);
+
+	for (i = 0; i < WME_NUM_TID; i++) {
+		combined_ba_state |= (int) (ni->ni_ba_rx[i].state == IEEE80211_BA_ESTABLISHED)
+				<< (WME_NUM_TID + i);
+		combined_ba_state |= (int) (ni->ni_ba_tx[i].state == IEEE80211_BA_ESTABLISHED) << i;
+	}
+
+	if (ic->ic_get_shared_node_stats)
+		ic->ic_get_shared_node_stats(ni);
+
+	if (sq != NULL) {
+		/* NOTE: if this output format changes, there are flow-on effects to qcsapi. */
+		seq_printf(sq,
+			"%02X:%02X:%02X:%02X:%02X:%02X "
+			"%4u %3u %10s %5u %5u %6u %6u %5d %5d %10u %12llu %10u %12llu "
+			"%5u %5u %5u %5u %7u %7u %7u %7u %7u %7u %5d %6u %2u %8u %4u %08x %s\n",
+			mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+			IEEE80211_AID(ni->ni_associd),
+			IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx),
+			ieee80211_tdls_status_string_get(ni->tdls_status),
+			ni->ni_linkqual,
+			ni->ni_rx_phy_rate,
+			achievable_tx_phy_rate,
+			achievable_rx_phy_rate,
+			ni->ni_smthd_rssi,
+			ni->ni_snr,
+			ni->ni_stats.ns_rx_data,
+			ni->ni_stats.ns_rx_bytes,
+			ni->ni_stats.ns_tx_data,
+			ni->ni_stats.ns_tx_bytes,
+			ni->ni_stats.ns_rx_errors,
+			ni->ni_stats.ns_rx_dropped,
+			ni->ni_stats.ns_tx_errors,
+			ni->ni_stats.ns_tx_dropped,
+			ni->ni_stats.ns_tx_ucast,
+			ni->ni_stats.ns_rx_ucast,
+			ni->ni_stats.ns_tx_mcast,
+			ni->ni_stats.ns_rx_mcast,
+			ni->ni_stats.ns_tx_bcast,
+			ni->ni_stats.ns_rx_bcast,
+			ni->ni_max_queue,
+			tx_failed,
+			assoc_bw,
+			time_associated,
+			ieee80211_node_is_authorized(ni),
+			combined_ba_state,
+			vap->iv_dev->name
+		);
+	}
+}
+EXPORT_SYMBOL(get_node_info);
+
+void get_node_assoc_state(void *s, struct ieee80211_node *ni)
+{
+	struct seq_file *sq = (struct seq_file *)s;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint8_t *mac = ni->ni_macaddr;
+	uint8_t assoc_bw = 20;
+	uint8_t bw = 0;
+	uint8_t sgi = 0;
+	uint32_t time_associated = 0;	/* Unit: second, 32bits should be up to 136 years */
+	uint32_t combined_ba_state = 0;
+	uint32_t i;
+
+	const char *wifi_modes_strings[] = WLAN_WIFI_MODES_STRINGS;
+	COMPILE_TIME_ASSERT(ARRAY_SIZE(wifi_modes_strings) == IEEE80211_WIFI_MODE_MAX);
+
+	if (!sq)
+		return;
+
+	if (IS_IEEE80211_VHT_ENABLED(ic) && (ni->ni_flags & IEEE80211_NODE_VHT))
+		get_node_vht_bw_and_sgi(ni, &bw, &assoc_bw, &sgi);
+	else
+		get_node_ht_bw_and_sgi(ic, ni, &bw, &assoc_bw, &sgi);
+
+	if (vap->iv_opmode == IEEE80211_M_STA)
+		time_associated = (vap->iv_state == IEEE80211_S_RUN) ?
+			(u_int32_t)div_u64(get_jiffies_64()-ni->ni_start_time_assoc, HZ) : 0;
+	else
+		time_associated = (vap->iv_bss == ni) ?
+			0 : (u_int32_t)div_u64(get_jiffies_64()-ni->ni_start_time_assoc, HZ);
+
+	COMPILE_TIME_ASSERT(WME_NUM_TID <= 16);
+
+	for (i = 0; i < WME_NUM_TID; i++) {
+		combined_ba_state |= (int) (ni->ni_ba_rx[i].state == IEEE80211_BA_ESTABLISHED)
+				<< (WME_NUM_TID + i);
+		combined_ba_state |= (int) (ni->ni_ba_tx[i].state == IEEE80211_BA_ESTABLISHED) << i;
+	}
+
+	seq_printf(sq,
+		"%pM %4u %4u %6s %4s %8s %4u %7u %6u   %08x %12s %10s %16u\n",
+		mac,
+		IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx),
+		IEEE80211_AID(ni->ni_associd),
+		ieee80211_get_node_type_str(ni->ni_node_type),
+		wifi_modes_strings[ni->ni_wifi_mode],
+		ieee80211_get_vendor_str(ni->ni_vendor),
+		assoc_bw,
+		time_associated,
+		ieee80211_node_is_authorized(ni),
+		combined_ba_state,
+		ieee80211_tdls_status_string_get(ni->tdls_status),
+		vap->iv_dev->name,
+		ieee80211_node_power_save_scheme(ni));
+}
+EXPORT_SYMBOL(get_node_assoc_state);
+
+void get_node_ver(void *s, struct ieee80211_node *ni)
+{
+	struct seq_file *sq = (struct seq_file *)s;
+#define IEEE80211_VER_SW_STR_LEN 15
+	char sw_str[IEEE80211_VER_SW_STR_LEN];
+	struct ieee80211com *ic = ni->ni_ic;
+	uint32_t *ver_sw;
+	uint16_t ver_platform_id;
+	uint16_t ver_hw;
+	uint32_t timestamp;
+	uint32_t flags;
+
+	if (sq == NULL) {
+		return;
+	}
+
+	if (IEEE80211_AID(ni->ni_associd) == 0) {
+		ver_sw = &ic->ic_ver_sw;
+		ver_platform_id = ic->ic_ver_platform_id;
+		ver_hw = ic->ic_ver_hw;
+		timestamp = ic->ic_ver_timestamp;
+		flags = ic->ic_ver_flags;
+	} else {
+		ver_sw = &ni->ni_ver_sw;
+		ver_platform_id = ni->ni_ver_platform_id;
+		ver_hw = ni->ni_ver_hw;
+		timestamp = ni->ni_ver_timestamp;
+		flags = ni->ni_ver_flags;
+	}
+
+	snprintf(sw_str, sizeof(sw_str),
+		DBGFMT_BYTEFLD4_P,
+		DBGFMT_BYTEFLD4_V(*ver_sw));
+
+	seq_printf(sq, "%pM %4u %-*s %-8u %-6u %-10u 0x%08x\n",
+		ni->ni_macaddr,
+		IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx),
+		IEEE80211_VER_SW_STR_LEN, *ver_sw ? sw_str : "-",
+		ver_platform_id,
+		ver_hw,
+		timestamp,
+		flags);
+}
+EXPORT_SYMBOL(get_node_ver);
+
+void
+ieee80211_update_node_assoc_qual(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint32_t current_tx_phy_rate = 0;
+	int32_t current_rssi = ic->ic_rssi(ni);
+	int32_t current_snr = ic->ic_snr(ni);
+
+	ic->ic_rxtx_phy_rate(ni, 0, NULL, NULL, &current_tx_phy_rate);
+
+	if (current_rssi < -1 && current_rssi > -1200)
+		ni->ni_rssi = current_rssi;
+	else if (ni->ni_rssi > 0)
+		/* correct pseudo RSSIs that apparently still get into the node table */
+		ni->ni_rssi = (ni->ni_rssi * 10) - 900;
+
+	ni->ni_smthd_rssi = ic->ic_smoothed_rssi(ni);
+	if ((ni->ni_smthd_rssi > -1) || (ni->ni_smthd_rssi < -1200))
+		ni->ni_smthd_rssi = ni->ni_rssi;
+
+	if (current_tx_phy_rate > 0)
+		ni->ni_linkqual = (uint16_t) current_tx_phy_rate;
+
+	if (current_snr < -1)
+		ni->ni_snr = current_snr;
+}
+EXPORT_SYMBOL(ieee80211_update_node_assoc_qual);
+
+void
+get_node_tx_stats(void *s, struct ieee80211_node *ni)
+{
+	struct seq_file *sq = (struct seq_file *)s;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint8_t *mac = ni->ni_macaddr;
+	int32_t current_max_queue = ic->ic_max_queue(ni);
+	uint32_t tx_failed = ic->ic_tx_failed(ni);
+	uint32_t achievable_tx_phy_rate; /* Kbps */
+	uint8_t assoc_bw = 20;
+	uint8_t bw = 0;
+	uint8_t sgi = 0;
+	uint8_t max_mcs = 0;
+	uint8_t max_nss = 0;
+	uint8_t vht;
+
+	if (!sq)
+		return;
+
+	if (IS_IEEE80211_VHT_ENABLED(ic) && (ni->ni_flags & IEEE80211_NODE_VHT)) {
+		vht = 1;
+		get_node_vht_bw_and_sgi(ni, &bw, &assoc_bw, &sgi);
+	} else {
+		vht = 0;
+		get_node_ht_bw_and_sgi(ic, ni, &bw, &assoc_bw, &sgi);
+	}
+
+	if (ieee80211_node_is_authorized(ni)) {
+		if (vht == 0)
+			get_node_ht_max_mcs(ic, ni, &max_mcs);
+		else
+			get_node_vht_max_nss_mcs(ni, &max_nss, &max_mcs);
+
+		achievable_tx_phy_rate = ic->ic_mcs_to_phyrate(bw, sgi, max_mcs, max_nss, vht);
+	} else {
+		achievable_tx_phy_rate = 0;
+	}
+
+	if (current_max_queue > -1)
+		ni->ni_max_queue = current_max_queue;
+
+	seq_printf(sq,
+		"%pM %4u %8u %10u %12llu %8u %8u %10u %10u %10u %5u %7u\n",
+		mac,
+		IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx),
+		achievable_tx_phy_rate,
+		ni->ni_stats.ns_tx_data,
+		ni->ni_stats.ns_tx_bytes,
+		ni->ni_stats.ns_tx_errors,
+		ni->ni_stats.ns_tx_dropped,
+		ni->ni_stats.ns_tx_ucast,
+		ni->ni_stats.ns_tx_mcast,
+		ni->ni_stats.ns_tx_bcast,
+		ni->ni_max_queue,
+		tx_failed);
+}
+EXPORT_SYMBOL(get_node_tx_stats);
+
+void
+get_node_rx_stats(void *s, struct ieee80211_node *ni)
+{
+	struct seq_file *sq = (struct seq_file *)s;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint8_t *mac = ni->ni_macaddr;
+	uint32_t achievable_rx_phy_rate; /* Kbps */
+	uint32_t current_rx_phy_rate = 0;
+	uint8_t assoc_bw = 20;
+	uint8_t bw = 0;
+	uint8_t sgi = 0;
+	uint8_t max_mcs = 0;
+	uint8_t max_nss = 0;
+	uint8_t vht;
+
+	if (!sq)
+		return;
+
+	ic->ic_rxtx_phy_rate(ni, 1, NULL, NULL, &current_rx_phy_rate);
+
+	if (IS_IEEE80211_VHT_ENABLED(ic) && (ni->ni_flags & IEEE80211_NODE_VHT)) {
+		vht = 1;
+		get_node_vht_bw_and_sgi(ni, &bw, &assoc_bw, &sgi);
+	} else {
+		vht = 0;
+		get_node_ht_bw_and_sgi(ic, ni, &bw, &assoc_bw, &sgi);
+	}
+
+	if (ieee80211_node_is_authorized(ni)) {
+		if (vht == 0)
+			get_node_ht_max_mcs(ic, ni, &max_mcs);
+		else
+			get_node_vht_max_nss_mcs(ni, &max_nss, &max_mcs);
+
+		achievable_rx_phy_rate = ic->ic_mcs_to_phyrate(bw, sgi, max_mcs, max_nss, vht);
+	} else {
+		achievable_rx_phy_rate = 0;
+	}
+
+	if (current_rx_phy_rate > 0)
+		ni->ni_rx_phy_rate = (uint16_t) current_rx_phy_rate;
+
+	seq_printf(sq,
+		"%pM %4u %8d %8d %10u %12llu %8u %8u %10u %10u %10u\n",
+		mac,
+		IEEE80211_NODE_IDX_UNMAP(ni->ni_node_idx),
+		achievable_rx_phy_rate,
+		ni->ni_rx_phy_rate,
+		ni->ni_stats.ns_rx_data,
+		ni->ni_stats.ns_rx_bytes,
+		ni->ni_stats.ns_rx_errors,
+		ni->ni_stats.ns_rx_dropped,
+		ni->ni_stats.ns_rx_ucast,
+		ni->ni_stats.ns_rx_mcast,
+		ni->ni_stats.ns_rx_bcast);
+}
+EXPORT_SYMBOL(get_node_rx_stats);
+
+static void
+get_node_max_rssi (void *arg, struct ieee80211_node *ni) {
+
+	int32_t *max_rssi = (int32_t *)arg;
+	int32_t cur_rssi = (ni->ni_ic->ic_rssi(ni)/IEEE80211_RSSI_FACTOR);
+
+	if (cur_rssi != 0 &&
+		(ni->ni_associd != 0) &&
+		(ni->ni_vendor == PEER_VENDOR_BRCM) &&
+		(cur_rssi > *max_rssi)) {
+		*max_rssi = cur_rssi;
+	}
+}
+
+static void
+ieee80211_pco_timer_func ( unsigned long arg ) {
+	struct ieee80211vap *vap = (struct ieee80211vap *) arg;
+	struct ieee80211com *ic = vap->iv_ic;
+	uint16_t pwr_constraint = ic->ic_pco.pco_pwr_constraint;
+	uint8_t rssi_threshold = ic->ic_pco.pco_rssi_threshold;
+	uint16_t pwr_constraint_sec = (ic->ic_pco.pco_pwr_constraint > ic->ic_pco.pco_sec_offset) ? (ic->ic_pco.pco_pwr_constraint-ic->ic_pco.pco_sec_offset):0;
+	uint8_t rssi_threshold_sec = ic->ic_pco.pco_rssi_threshold+ic->ic_pco.pco_sec_offset;
+	uint8_t next_update = 1;
+	int32_t max_rssi = -100;
+
+	// Apply WAR for single STA only
+	if( ic->ic_sta_assoc == 1 ) {
+		// check for brcm node with RSSI > rssi_threshold
+		ieee80211_iterate_nodes(&ic->ic_sta, get_node_max_rssi, &max_rssi, 1);
+	}
+
+	/*
+	 * Apply contraint WAR for higher channels only.
+	 * The backoff dB value comparing with max reg tx power is a safety check
+	 * to make sure the STAs local_tx_power won't be less than 0dBm
+	 */
+	if ((max_rssi > -rssi_threshold) && !(ic->ic_pco.pco_set) &&
+		(pwr_constraint < ic->ic_bsschan->ic_maxregpower) &&
+		(ic->ic_curchan->ic_ieee > QTN_5G_LAST_UNII2_OPERATING_CHAN)) {
+		/* Back down the STA power using Power constraint */
+		ic->ic_pwr_constraint = pwr_constraint;
+		if (vap->iv_state == IEEE80211_S_RUN)
+			ic->ic_beacon_update(vap);
+		ic->ic_pco.pco_set = 1;
+		next_update = 60;
+	} else if ((max_rssi > -rssi_threshold_sec) && !(ic->ic_pco.pco_set) &&
+		(pwr_constraint_sec < ic->ic_bsschan->ic_maxregpower) &&
+		(ic->ic_curchan->ic_ieee > QTN_5G_LAST_UNII2_OPERATING_CHAN)) {
+		/* Back down the STA power using Secondary Power constraint */
+		ic->ic_pwr_constraint = pwr_constraint_sec;
+		if (vap->iv_state == IEEE80211_S_RUN)
+			ic->ic_beacon_update(vap);
+		ic->ic_pco.pco_set = 1;
+		next_update = 60;
+	} else if ((ic->ic_pco.pco_set) && (max_rssi <= -rssi_threshold_sec)) {
+		ic->ic_pwr_constraint = ic->ic_pco.pco_pwr_constraint_save;
+		if (vap->iv_state == IEEE80211_S_RUN)
+			ic->ic_beacon_update(vap);
+		ic->ic_pco.pco_set = 0;
+		next_update = 1;
+	}
+
+	mod_timer(&ic->ic_pco.pco_timer,
+		jiffies + (next_update * HZ));
+}
+/*
+ * Implementation of ioctl command: IEEE80211_IOCTL_GET_ASSOC_TBL
+ * This command is to report all nodes in the table. User can do further filtering nodes.
+ * E.g. user only need MAC address on designated WiFi interface.
+ *
+ * This function works for both AP and STA.
+ */
+static void get_node_info_ioctl(void *s, struct ieee80211_node *ni)
+{
+	struct iwreq *iwr = (struct iwreq *)s;
+	struct assoc_info_report __user	*u_record;
+	struct assoc_info_report record;
+	uint16_t __user	*u_cnt;
+	uint16_t cnt;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic	= vap->iv_ic;
+	uint8_t *mac = ni->ni_macaddr;
+	uint8_t assoc_bw = 20;
+	uint8_t bw = 0;
+	uint8_t sgi = 0;
+	uint8_t max_mcs = 0;
+	uint8_t max_nss = 0;
+	uint8_t tx_mcs = 0;
+	uint8_t rx_mcs = 0;
+	uint8_t vht;
+	uint32_t max_tx_rate;
+	uint32_t max_rx_rate;		/* Unit: Kbps */
+	uint32_t assoc_time = 0;		/* Unit: second, 32bits should be up to 136 years */
+	uint32_t cur_tx_rate = 0;
+	uint32_t cur_rx_rate = 0;
+	int32_t rssi = ic->ic_rssi(ni);
+	int32_t snr = ic->ic_snr(ni);
+	int32_t max_queue = ic->ic_max_queue(ni);
+	uint32_t tx_failed = ic->ic_tx_failed(ni);
+	int i;
+
+	ni->ni_hw_noise = ic->ic_hw_noise(ni);
+	ic->ic_rxtx_phy_rate(ni, 0, NULL, &tx_mcs, &cur_tx_rate);
+	ic->ic_rxtx_phy_rate(ni, 1, NULL, &rx_mcs, &cur_rx_rate);
+
+	if (IS_IEEE80211_VHT_ENABLED(ic) && (ni->ni_flags & IEEE80211_NODE_VHT)) {
+		vht = 1;
+		get_node_vht_bw_and_sgi(ni, &bw, &assoc_bw, &sgi);
+	} else {
+		vht = 0;
+		get_node_ht_bw_and_sgi(ic, ni, &bw, &assoc_bw, &sgi);
+	}
+
+	if (ieee80211_node_is_authorized(ni)) {
+		if (vht == 0)
+			get_node_ht_max_mcs(ic, ni, &max_mcs);
+		else
+			get_node_vht_max_nss_mcs(ni, &max_nss, &max_mcs);
+
+		max_tx_rate = ic->ic_mcs_to_phyrate(bw, sgi, max_mcs, max_nss, vht);
+		max_rx_rate = max_tx_rate;
+	} else {
+		max_tx_rate = 0;
+		max_rx_rate = 0;
+	}
+
+	if (rssi < -1 && rssi > -1200)
+		ni->ni_rssi = rssi;
+	else if (ni->ni_rssi > 0) {
+		/* Correct pseudo RSSIs that apparently still get into the node table */
+		ni->ni_rssi = (ni->ni_rssi * 10) - 900;
+	}
+
+	ni->ni_smthd_rssi = ic->ic_smoothed_rssi(ni);
+	if ((ni->ni_smthd_rssi > -1) || (ni->ni_smthd_rssi < -1200)) {
+		ni->ni_smthd_rssi = ni->ni_rssi;
+	}
+
+	if (cur_tx_rate > 0)
+		ni->ni_linkqual    = (uint16_t) cur_tx_rate;
+
+	if (cur_rx_rate > 0)
+		ni->ni_rx_phy_rate = (uint16_t) cur_rx_rate;
+
+	if (snr < -1)
+		ni->ni_snr	   = snr;
+
+	if (max_queue > -1)
+		ni->ni_max_queue   = max_queue;
+
+	if (vap->iv_opmode == IEEE80211_M_STA) {
+		assoc_time = (vap->iv_state == IEEE80211_S_RUN) ?
+			(u_int32_t)div_u64(get_jiffies_64() - ni->ni_start_time_assoc, HZ) : 0;
+	} else {
+		assoc_time = (vap->iv_bss == ni) ?
+			0 : (u_int32_t)div_u64(get_jiffies_64() - ni->ni_start_time_assoc, HZ);
+	}
+
+	if (ic->ic_get_shared_node_stats)
+		ic->ic_get_shared_node_stats(ni);
+
+	memcpy(record.ai_mac_addr, mac, IEEE80211_ADDR_LEN);
+
+	record.ai_assoc_id		= IEEE80211_AID(ni->ni_associd);
+	record.ai_link_quality		= (uint16_t) ni->ni_linkqual;
+	record.ai_rx_phy_rate		= ni->ni_rx_phy_rate;
+	record.ai_tx_phy_rate		= (uint16_t) ni->ni_linkqual;
+	record.ai_achievable_tx_phy_rate = max_tx_rate;
+	record.ai_achievable_rx_phy_rate = max_rx_rate;
+	record.ai_rssi			= ni->ni_rssi;
+	record.ai_smthd_rssi		= ni->ni_smthd_rssi;
+	record.ai_snr			= ni->ni_snr;
+	record.ai_rx_packets		= ni->ni_stats.ns_rx_data;
+	record.ai_rx_bytes		= ni->ni_stats.ns_rx_bytes;
+	record.ai_tx_packets		= ni->ni_stats.ns_tx_data;
+	record.ai_tx_bytes		= ni->ni_stats.ns_tx_bytes;
+	record.ai_rx_errors		= ni->ni_stats.ns_rx_errors;
+	record.ai_rx_dropped		= ni->ni_stats.ns_rx_dropped;
+	record.ai_tx_ucast		= ni->ni_stats.ns_tx_ucast;
+	record.ai_rx_ucast		= ni->ni_stats.ns_rx_ucast;
+	record.ai_tx_mcast		= ni->ni_stats.ns_tx_mcast;
+	record.ai_rx_mcast		= ni->ni_stats.ns_rx_mcast;
+	record.ai_tx_bcast		= ni->ni_stats.ns_tx_bcast;
+	record.ai_rx_bcast		= ni->ni_stats.ns_rx_bcast;
+	record.ai_tx_errors		= ni->ni_stats.ns_tx_errors;
+	record.ai_tx_dropped		= ni->ni_stats.ns_tx_dropped;
+	for (i = 0; i < WME_AC_NUM; i++) {
+		record.ai_tx_wifi_drop[i] = ni->ni_stats.ns_tx_wifi_drop[i];
+		record.ai_tx_wifi_sent[i] = ni->ni_shared_stats->tx[STATS_SU].tx_sent_data_msdu[i];
+	}
+	record.ai_rx_fragment_pkts	= ni->ni_stats.ns_rx_fragment_pkts;
+	record.ai_rx_vlan_pkts		= ni->ni_stats.ns_rx_vlan_pkts;
+	record.ai_max_queued		= ni->ni_max_queue;
+	record.ai_tx_failed		= tx_failed;
+	record.ai_bw			= assoc_bw;
+	record.ai_tx_mcs		= tx_mcs;
+	record.ai_rx_mcs		= rx_mcs;
+	record.ai_time_associated	= assoc_time;
+	record.ai_auth			= ieee80211_node_is_authorized(ni);
+	record.ai_ip_addr		= ni->ni_ip_addr;
+	record.ai_hw_noise		= ni->ni_hw_noise;
+	record.ai_is_qtn_node		= (ni->ni_qtn_assoc_ie) ? 1 : 0;
+
+	strncpy(record.ai_ifname, vap->iv_dev->name, strlen(vap->iv_dev->name) + 1);
+
+	/* Update record to user space  */
+	u_cnt = &(( (struct assoc_info_table *)iwr->u.data.pointer )->cnt);
+	if (copy_from_user(&cnt, u_cnt, sizeof(cnt)))
+		return;
+
+	u_record = ( (struct assoc_info_table *)iwr->u.data.pointer )->array + cnt;
+
+	if (cnt++ > QTN_ASSOC_LIMIT - 1)
+		return;
+
+	if (copy_to_user(u_record, &record, sizeof(record)))
+		return;
+
+	copy_to_user( u_cnt, &cnt, sizeof(cnt) );
+}
+
+static int
+ieee80211_ioctl_getassoctbl(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic  = vap->iv_ic;
+	uint16_t	   unit_size = ( (struct assoc_info_table *)iwr->u.data.pointer )->unit_size;
+
+	if (unit_size != sizeof( struct assoc_info_report )) {
+		printk(KERN_ERR "The size of structure assoc_info_report doesn't match\n");
+		return -EPERM;
+	}
+
+	ic->ic_iterate_nodes(&ic->ic_sta, get_node_info_ioctl, iwr, 1);
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_rst_queue(struct net_device *dev, char __user* mac)
+{
+	struct ieee80211vap	*vap = netdev_priv(dev);
+	struct ieee80211com	*ic  = vap->iv_ic;
+	struct ieee80211_node	*ni  = NULL;
+	uint8_t		mac_addr[IEEE80211_ADDR_LEN];
+
+	if (copy_from_user(mac_addr, mac, IEEE80211_ADDR_LEN))
+		return -EFAULT;
+
+	ni = ieee80211_find_node(&ic->ic_sta, mac_addr);
+	if (ni == NULL)
+		return -ENOENT;
+
+	ic->ic_queue_reset(ni);
+
+	ieee80211_free_node(ni);
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_radar_status(struct net_device *dev, struct ieee80211req_radar_status __user* status)
+{
+	int retval = 0;
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211req_radar_status rdstatus;
+	int chan_idx=0;
+
+	if (copy_from_user(&rdstatus, status, sizeof(rdstatus)) != 0)
+		return -EFAULT;
+
+	for (chan_idx = 0; chan_idx < ic->ic_nchans; chan_idx++) {
+		if (ic->ic_channels[chan_idx].ic_ieee == rdstatus.channel)
+			break;
+	}
+
+	if (chan_idx >= ic->ic_nchans) {
+		retval = -EINVAL;
+	} else {
+		if (!(ic->ic_channels[chan_idx].ic_flags & IEEE80211_CHAN_DFS))
+			retval = -EINVAL;
+	}
+
+	if (retval >= 0) {
+		rdstatus.flags = ((ic->ic_channels[chan_idx].ic_flags) & (IEEE80211_CHAN_RADAR)) ? 1 : 0;
+		rdstatus.ic_radardetected = ic->ic_channels[chan_idx].ic_radardetected;
+		if (copy_to_user(status, &rdstatus, sizeof(rdstatus)) != 0)
+			retval = -EFAULT;
+	}
+
+	return retval;
+}
+
+static int
+ieee80211_subioctl_get_phy_stats(struct net_device *dev, struct ieee80211_phy_stats __user* ps)
+{
+	int retval = 0;
+	struct ieee80211vap	*vap = netdev_priv(dev);
+	struct ieee80211com	*ic  = vap->iv_ic;
+	struct ieee80211_phy_stats phy_stats;
+
+	if (ic->ic_get_phy_stats) {
+		retval = ic->ic_get_phy_stats(dev, ic, &phy_stats, 1);
+	} else
+		return -EINVAL;
+
+	if (retval >= 0) {
+		if (copy_to_user(ps, &phy_stats, sizeof(struct ieee80211_phy_stats)))
+			retval = -EFAULT;
+	}
+
+	return retval;
+}
+
+static int
+ieee80211_subioctl_get_dscp2ac_map(struct net_device *dev, uint8_t __user* ps)
+{
+	int retval = 0;
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic  = vap->iv_ic;
+	uint8_t dscp2ac_map[IP_DSCP_NUM] = {0};
+	uint8_t vap_idx = ic->ic_get_vap_idx(vap);
+
+	if (!ic->ic_get_dscp2ac_map)
+		return -EINVAL;
+
+	ic->ic_get_dscp2ac_map(vap_idx, dscp2ac_map);
+
+	if (copy_to_user(ps, dscp2ac_map, IP_DSCP_NUM) != 0)
+		retval = -EIO;
+
+	return retval;
+}
+
+static int
+ieee80211_subioctl_set_dscp2ac_map(struct net_device *dev, struct ieeee80211_dscp2ac __user* ps)
+{
+	int retval = 0;
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic  = vap->iv_ic;
+	struct ieeee80211_dscp2ac dscp2ac;
+	uint8_t vap_idx = ic->ic_get_vap_idx(vap);
+
+	if (!ps) {
+		return -EFAULT;
+	}
+
+	if (copy_from_user(&dscp2ac, ps, sizeof(struct ieeee80211_dscp2ac))) {
+		return -EIO;
+	}
+
+	if (dscp2ac.list_len > IP_DSCP_NUM) {
+		printk(KERN_WARNING "%s: DSCP list size %u larger then max allowed %d\n",
+			 dev->name, dscp2ac.list_len, IP_DSCP_NUM);
+		return -EINVAL;
+	}
+
+	if (ic->ic_set_dscp2ac_map) {
+		ic->ic_set_dscp2ac_map(vap_idx, dscp2ac.dscp, dscp2ac.list_len, dscp2ac.ac);
+	} else
+		return -EINVAL;
+
+	return retval;
+}
+
+
+static int
+ieee80211_subioctl_brcm(struct net_device *dev, struct ieee80211req_brcm __user* ps)
+{
+	int retval = 0;
+	struct ieee80211vap	*vap = netdev_priv(dev);
+	struct ieee80211com	*ic  = vap->iv_ic;
+	struct ieee80211req_brcm req;
+	struct ieee80211_node *ni;
+
+	if (copy_from_user(&req, ps, sizeof(struct ieee80211req_brcm))) {
+		return -EINVAL;
+	};
+
+	switch (req.ib_op) {
+	case IEEE80211REQ_BRCM_INFO:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			return -EINVAL;
+		ni = ieee80211_find_node(&ic->ic_sta, req.ib_macaddr);
+		if (ni == NULL)
+			return -ENOENT;
+		ieee80211_scs_brcm_info_report(ic, ni, req.ib_rssi, req.ib_rxglitch);
+		ieee80211_free_node(ni);
+		break;
+	case IEEE80211REQ_BRCM_PKT:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			return -EINVAL;
+		ieee80211_send_usr_l2_pkt(vap, req.ib_pkt, req.ib_pkt_len);
+		SCSDBG(SCSLOG_INFO, "send brcm info pkt in vap %u\n", vap->iv_unit);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return retval;
+}
+
+static int
+ieee80211_subioctl_disconn_info(struct net_device *dev, struct ieee80211req_disconn_info __user* disconn_info)
+{
+	int retval = 0;
+	struct ieee80211req_disconn_info info;
+	struct ieee80211vap	*vap = netdev_priv(dev);
+
+	if (copy_from_user(&info, disconn_info, sizeof(info)) != 0)
+		return -EFAULT;
+
+	if (info.resetflag) {
+		vap->iv_disconn_cnt = 0;
+		vap->iv_disconn_seq = 0;
+	} else {
+		vap->iv_disconn_seq++;
+		if (vap->iv_opmode == IEEE80211_M_STA) {
+			info.asso_sta_count = (vap->iv_state == IEEE80211_S_RUN) ? 1 : 0;
+		} else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			info.asso_sta_count = vap->iv_sta_assoc;
+		}
+
+		info.disconn_count = vap->iv_disconn_cnt;
+		info.sequence = vap->iv_disconn_seq;
+		info.up_time = (jiffies - INITIAL_JIFFIES) / HZ;
+
+		if (copy_to_user(disconn_info, &info, sizeof(info)) != 0)
+			retval = -EFAULT;
+	}
+
+	return retval;
+}
+
+static int
+ieee80211_subioctl_tdls_operation(struct net_device *dev, char __user* data)
+{
+	struct ieee80211vap	*vap = netdev_priv(dev);
+	struct ieee80211com	*ic  = vap->iv_ic;
+	struct ieee80211_node *peer_ni  = NULL;
+	struct ieee80211_tdls_oper_data oper_data;
+	uint64_t tbtt;
+	uint64_t cur_tsf;
+	unsigned long duration;
+	unsigned long cur_jiffies;
+
+	if (copy_from_user(&oper_data.dest_mac, data, IEEE80211_ADDR_LEN))
+		return -EFAULT;
+	if (copy_from_user(&oper_data.oper, data + IEEE80211_ADDR_LEN, sizeof(oper_data.oper)))
+		return -EFAULT;
+
+	if((oper_data.oper != IEEE80211_TDLS_ENABLE) && (oper_data.oper != IEEE80211_TDLS_DISABLE)) {
+		peer_ni = ieee80211_find_node(&ic->ic_sta, oper_data.dest_mac);
+		if (peer_ni == NULL || IEEE80211_NODE_IS_NONE_TDLS(peer_ni)) {
+			if (peer_ni)
+			      ieee80211_free_node(peer_ni);
+			return -ENOENT;
+		}
+	}
+
+	switch (oper_data.oper) {
+	case IEEE80211_TDLS_SETUP:
+		if (IEEE80211_NODE_IS_TDLS_INACTIVE(peer_ni) ||
+				IEEE80211_NODE_IS_TDLS_IDLE(peer_ni)) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: setting up data link with peer %pM\n",
+				__func__, peer_ni->ni_macaddr);
+		}
+		break;
+	case IEEE80211_TDLS_TEARDOWN:
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: tearing down data link with peer %pM\n",
+			__func__, peer_ni->ni_macaddr);
+		/*
+		 * Just remove the fdb entry form bridge module,
+		 * delay the node free in later node_expire timer callback.
+		 */
+		ieee80211_tdls_disable_peer_link(peer_ni);
+
+		if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED)
+			ieee80211_tdls_node_leave(vap, peer_ni);
+		break;
+	case IEEE80211_TDLS_ENABLE_LINK:
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: data link established successfully with peer %pM\n",
+			__func__, peer_ni->ni_macaddr);
+
+		ieee80211_tdls_enable_peer_link(vap, peer_ni);
+		break;
+	case IEEE80211_TDLS_DISABLE_LINK:
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+			"TDLS %s: data link removed with peer %pM\n",
+			__func__, peer_ni->ni_macaddr);
+
+		/*
+		 * Just remove the fdb entry form bridge module,
+		 * delay the node free in later node_expire timer callback.
+		 */
+		ieee80211_tdls_disable_peer_link(peer_ni);
+
+		if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED)
+			ieee80211_tdls_node_leave(vap, peer_ni);
+		break;
+	case IEEE80211_TDLS_ENABLE:
+		if (!ieee80211_swfeat_is_supported(SWFEAT_ID_TDLS, 1))
+			return -EPERM;
+		if (vap->iv_flags_ext & IEEE80211_FEXT_TDLS_PROHIB) {
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: TDLS function is enabled\n", __func__);
+
+			/* attention: must set the flag firstly */
+			vap->iv_flags_ext &= ~IEEE80211_FEXT_TDLS_PROHIB;
+			if ((vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) == 0) {
+				ieee80211_tdls_start_disc_timer(vap);
+				ieee80211_tdls_start_node_expire_timer(vap);
+			}
+		}
+		break;
+	case IEEE80211_TDLS_DISABLE:
+		if ((vap->iv_flags_ext & IEEE80211_FEXT_TDLS_PROHIB) == 0) {
+			if ((vap->iv_flags_ext & IEEE80211_FEXT_TDLS_DISABLED) == 0) {
+				IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+					"TDLS %s: TDLS function is disabled\n", __func__);
+
+				/* teardown the link and clear timer */
+				ieee80211_tdls_teardown_all_link(vap);
+				ieee80211_tdls_clear_disc_timer(vap);
+				ieee80211_tdls_clear_node_expire_timer(vap);
+				ieee80211_tdls_free_all_inactive_peers(vap);
+			}
+			vap->iv_flags_ext |= IEEE80211_FEXT_TDLS_PROHIB;
+		}
+		break;
+	case IEEE80211_TDLS_SWITCH_CHAN:
+		ic->ic_get_tsf(&cur_tsf);
+		cur_jiffies = jiffies;
+		if (NULL == vap->iv_bss)
+			return -EINVAL;
+		tbtt = vap->iv_bss->ni_shared_stats->beacon_tbtt;
+
+		if (tbtt > cur_tsf) {
+			duration = IEEE80211_USEC_TO_MS((uint32_t)(tbtt - cur_tsf));
+		} else {
+			duration = 0;
+			IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
+				"TDLS %s: Get wrong TBTT\n", __func__);
+		}
+
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+				"TDLS %s: cur_tsf = %016llx, tbtt = %016llx, duration = %08lx\n",
+				__func__, cur_tsf, tbtt, duration);
+
+		while (time_before(jiffies, cur_jiffies + duration * HZ / 1000))
+			msleep(5);
+		ieee80211_tdls_start_channel_switch(vap, peer_ni);
+		break;
+	default:
+		break;
+	}
+
+	if (peer_ni) {
+		IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
+			"TDLS %s: TDLS operation: %d, TDLS status: %d\n",
+			__func__, oper_data.oper, peer_ni->tdls_status);
+		ieee80211_free_node(peer_ni);
+	}
+
+	return 0;
+}
+
+/*  app_buf = [struct app_action_frm_buf] + [Action Frame Payload]
+*   Action Frame Payload = IEEE80211_ACTION_CAT_* (u8) + action (u8) + dialog token (u8) +
+*                  status code (u8) + Info
+*   Action Frame Payload is constructed in hostapd
+*/
+static int
+ieee80211_subioctl_send_action_frame(struct net_device *dev, u8 *app_buf,
+					u32 app_buf_len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct app_action_frame_buf *app_action_frm_buf;
+	struct ieee80211_node *ni = NULL;
+	struct ieee80211_action_data action_data;
+	int retval = 0;
+
+	/* kzalloc() alloctes memory to copy data from user. */
+	app_action_frm_buf = (struct app_action_frame_buf *) kzalloc(app_buf_len, GFP_KERNEL);
+	if (!app_action_frm_buf)
+		return -ENOMEM;
+
+	if (copy_from_user(app_action_frm_buf, app_buf, app_buf_len) != 0) {
+		retval = -EFAULT;
+		goto buf_free;
+	}
+
+	memset(&action_data, 0, sizeof(action_data));
+	action_data.cat = app_action_frm_buf->cat;
+	action_data.action = app_action_frm_buf->action;
+
+	/* Pointer to action frm payload passed to next function */
+	action_data.params = &app_action_frm_buf->frm_payload;
+
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		ni = ieee80211_find_node(&ic->ic_sta, app_action_frm_buf->dst_mac_addr);
+
+		/* Public action frames are handled before association */
+		if (ni == NULL && action_data.cat == IEEE80211_ACTION_CAT_PUBLIC) {
+			ni = ieee80211_tmp_node(vap, app_action_frm_buf->dst_mac_addr);
+		}
+
+		if (ni == NULL) {
+			retval = -ENOENT;
+			goto buf_free;
+		}
+
+		if ((action_data.cat == IEEE80211_ACTION_CAT_PUBLIC)
+			|| (action_data.cat == IEEE80211_ACTION_CAT_FBSS)
+			|| (ni->ni_associd && ieee80211_node_is_authorized(ni)))
+			IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_ACTION, (int)&action_data);
+		else
+			retval = -ENOENT;
+
+		ieee80211_free_node(ni);
+	} else {
+		/* Currently sending action frames for AP mode only */
+		retval = -EINVAL;
+	}
+
+buf_free:
+	kfree(app_action_frm_buf);
+
+	return retval;
+}
+
+/*
+ * Function to get the driver capabilities. Currently extended
+ * capabilities IE is sent here
+ **/
+static int
+ieee80211_subioctl_get_driver_capa(struct net_device *dev,
+					uint8_t __user *app_buf,
+					uint32_t app_buf_len)
+{
+#define DRV_EXT_CAPABILITY_LEN 8
+	u_int8_t ext_capability[DRV_EXT_CAPABILITY_LEN] = {0};
+	u_int8_t *buf = NULL;
+	u_int8_t *pos;
+	u_int32_t len = 0;
+	int retval = 0;
+
+	MALLOC(buf, u_int8_t *, app_buf_len, M_DEVBUF, M_WAITOK);
+	if(!buf)
+		return -ENOMEM;
+
+	/*Keep buffer in the form of Type(4 bytes):Length:Value format */
+
+	pos = buf;
+	pos += sizeof(u_int32_t); /* total data len in buf */
+
+	*pos++ = IEEE80211_ELEMID_EXTCAP;
+	*pos++ = DRV_EXT_CAPABILITY_LEN;
+
+	ext_capability[0] = IEEE80211_EXTCAP_20_40_COEXISTENCE;
+	/* max msdu in amsdu 0 = unlimited */
+	ext_capability[7] = IEEE80211_EXTCAP_OPMODE_NOTIFICATION;
+
+	/* extended capability supported by driver */
+	memcpy(pos, ext_capability, DRV_EXT_CAPABILITY_LEN);
+	pos +=  DRV_EXT_CAPABILITY_LEN;
+
+	/* extended capability mask */
+	memcpy(pos, ext_capability, DRV_EXT_CAPABILITY_LEN);
+	pos +=  DRV_EXT_CAPABILITY_LEN;
+
+	/* TODO Can send more data to application from here */
+
+	len = (u_int32_t)(pos - buf);
+	*(u_int32_t *)buf = len;
+
+	if (len > app_buf_len) {
+		printk (KERN_WARNING "length is more than app_buf_len\n");
+		retval = -1;
+		goto done;
+	}
+
+	if (copy_to_user(app_buf, buf, len)) {
+		retval = -EIO;
+	}
+
+done:
+	FREE(buf, M_DEVBUF);
+
+	return retval;
+
+#undef DRV_EXT_CAPABILITY_LEN
+}
+
+struct get_links_max_quality_request {
+	uint32_t max_quality;
+	uint8_t mac_addr[IEEE80211_ADDR_LEN];
+};
+
+static void get_link_quality_max_callback(void *s, struct ieee80211_node *ni)
+{
+	struct get_links_max_quality_request* cmd
+		= (struct get_links_max_quality_request*)s;
+
+	if (ieee80211_node_is_authorized(ni) != 0 &&
+			IEEE80211_AID(ni->ni_associd) != 0 &&
+			!IEEE80211_ADDR_EQ(ni->ni_macaddr, cmd->mac_addr)) {
+		cmd->max_quality = MAX(cmd->max_quality, (uint32_t)ni->ni_linkqual);
+	}
+}
+
+static int
+ieee80211_subioctl_get_link_quality_max(struct net_device *dev, uint8_t __user *app_buf,
+	uint32_t app_buf_len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic  = vap->iv_ic;
+	struct get_links_max_quality_request request;
+
+	if (app_buf_len != sizeof(request.max_quality))
+		return -EINVAL;
+
+	IEEE80211_ADDR_COPY(request.mac_addr, dev->dev_addr);
+	request.max_quality = 0;
+
+	ic->ic_iterate_dev_nodes(dev, &ic->ic_sta, get_link_quality_max_callback, &request, 1);
+
+	if (copy_to_user(app_buf, &request.max_quality, sizeof(request.max_quality)))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+ieee80211_subioctl_set_ap_info(struct net_device *dev, uint8_t __user* app_buf,
+						uint32_t app_buf_len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct app_ie *ie = NULL;
+	int retval = 0;
+
+	if (app_buf_len < 4)
+		return -EINVAL;
+
+	MALLOC(ie, struct app_ie *, app_buf_len, M_DEVBUF, M_WAITOK);
+	if (!ie)
+		return -ENOMEM;
+
+	if (copy_from_user(ie, app_buf, app_buf_len) != 0) {
+		retval = -EFAULT;
+		goto buf_free;
+	}
+
+	/*TODO: This function can be used to handle other Elements from hostapd */
+	switch (ie->id) {
+	case IEEE80211_ELEMID_INTERWORKING:
+		memset(&vap->interw_info, 0, sizeof(struct interworking_info));
+		vap->interworking = ie->u.interw.interworking;
+		if (vap->interworking) {
+			vap->interw_info.an_type = ie->u.interw.an_type;
+			if (ie->len > 2) {
+				IEEE80211_ADDR_COPY(vap->interw_info.hessid,
+							ie->u.interw.hessid);
+			}
+		}
+		break;
+	default:
+		retval = -EOPNOTSUPP;
+		break;
+	}
+
+buf_free:
+	FREE(ie, M_DEVBUF);
+	return retval;
+}
+
+static int
+ieee80211_get_supp_chans(struct ieee80211vap *vap, struct iwreq *iwr)
+{
+	int8_t mac_addr[IEEE80211_ADDR_LEN];
+	struct ieee80211_node *ni;
+	const char *errmsg = " [buffer overflow]";
+	void __user* pointer;
+	char *buffer;
+	char *tmp_buf;
+	int buf_len;
+	int chan;
+	int len;
+	int retval;
+
+	pointer = iwr->u.data.pointer;
+	if (copy_from_user(mac_addr, pointer, IEEE80211_ADDR_LEN) != 0)
+		return -EINVAL;
+
+	ni = ieee80211_find_node(&vap->iv_ic->ic_sta, (const uint8_t*)mac_addr);
+	if (!ni)
+		return -EINVAL;
+
+	buf_len = iwr->u.data.length;
+	buffer = kmalloc(buf_len + 1, GFP_KERNEL);
+	if (!buffer) {
+		ieee80211_free_node(ni);
+		return -EINVAL;
+	}
+	memset(buffer, 0, buf_len);
+
+	len = 0;
+	for (chan = 0; chan < IEEE80211_CHAN_MAX; chan++) {
+		if (isset(ni->ni_supp_chans, chan)) {
+			tmp_buf = buffer + len;
+			len += snprintf(tmp_buf, buf_len - len, "%d,", chan);
+		}
+	}
+
+	ieee80211_free_node(ni);
+	if (len > buf_len) {
+		len = strlen(errmsg);
+		tmp_buf = buffer + buf_len - len;
+		while (len < buf_len) {
+			if (tmp_buf[0] == ',')
+				break;
+			tmp_buf--;
+			len++;
+		}
+
+		memset(tmp_buf, 0, len);
+		strcpy(tmp_buf, errmsg);
+		len = strlen(buffer);
+	} else if (len > 0) {
+		buffer[len - 1] = '\0';
+	} else {
+		len = snprintf(buffer, buf_len, "Not available");
+	}
+
+	retval = copy_to_user(pointer, buffer, len);
+	kfree(buffer);
+
+	if (retval)
+		return -EINVAL;
+
+	return 0;
+}
+
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+void ieee80211_bsa_macfilter_detach(struct ieee80211vap *vap)
+{
+	struct bsa_deny_sta *sta;
+	struct bsa_deny_sta *tmp;
+	if (vap->deny_sta_list_inited) {
+		LIST_FOREACH_SAFE(sta, &vap->deny_sta_list, list, tmp) {
+			if (sta != NULL) {
+				LIST_REMOVE(sta, list);
+				FREE(sta, M_DEVBUF);
+			}
+		}
+	}
+}
+
+static int ieee80211_bsa_macfilter_attach(struct ieee80211vap *vap)
+{
+	if (vap->deny_sta_list_inited == 0) {
+		LIST_INIT(&vap->deny_sta_list);
+		vap->deny_sta_list_inited = 1;
+	} else {
+		ieee80211_bsa_macfilter_detach(vap);
+	}
+	return 0;
+}
+
+
+static int
+ieee80211_subioctl_set_bsa_module(struct net_device *dev, uint8_t __user *buf)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	char bsa_status[3] = {0};
+	int bsa_enable_flag = 0;
+	int retval = 0;
+
+	if (!buf)
+		return -EFAULT;
+
+	if (copy_from_user(bsa_status, buf, sizeof(bsa_status)) != 0)
+		return -EFAULT;
+
+	sscanf(bsa_status, "%d", &bsa_enable_flag);
+	vap->bsa_status = bsa_enable_flag;
+
+	if (vap->bsa_status == BSA_STATUS_ACTIVE)
+		ieee80211_bsa_macfilter_attach(vap);
+	else if (vap->bsa_status != BSA_STATUS_ACTIVE)
+		ieee80211_bsa_macfilter_detach(vap);
+
+	return retval;
+}
+
+static int
+ieee80211_subioctl_update_macfilter_table(struct net_device *dev, uint8_t __user *buf)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211_bsa_mac_filter bsa_mf_filter;
+	int retval = -1;
+
+	if (!vap->bsa_status)
+		return -EINVAL;
+
+	if (copy_from_user(&bsa_mf_filter, buf, sizeof(struct ieee80211_bsa_mac_filter)) != 0)
+		return -EFAULT;
+
+	if (bsa_mf_filter.allow_mac == BSA_MACFILTER_ALLOW)
+		/* remove from the deny_list */
+		retval = ieee80211_bsa_macfilter_remove(vap,bsa_mf_filter.sta_mac);
+	else if(bsa_mf_filter.allow_mac == BSA_MACFILTER_DENY)
+		/* add to the deny list */
+		retval = ieee80211_bsa_macfilter_add(vap, bsa_mf_filter.sta_mac);
+
+	return retval;
+}
+
+static int
+ieee80211_subioctl_send_btm_req_frm(struct net_device *dev, struct
+		ieee80211_bsa_btm_req_frm __user *buf, uint16_t len)
+{
+	struct ieee80211vap *vap = NULL;
+	struct ieee80211_bsa_btm_req_frm bsa_btm_req_frm;
+	uint8_t sta_mac[IEEE80211_ADDR_LEN];
+	struct ieee80211_node *ni;
+	uint8_t mode;
+	uint16_t disassoc_timer;
+	uint8_t valid_int;
+	uint8_t bss_term_dur = 0;
+	struct ieee80211_ie_neighbor_report *neigh = NULL;
+	int retval = 0;
+	uint8_t *data = NULL;
+
+	if (!dev || !buf || (len  < sizeof (bsa_btm_req_frm)))
+		return -EINVAL;
+
+	vap = netdev_priv(dev);
+	if (!vap)
+		return -EINVAL;
+
+	if (copy_from_user(&bsa_btm_req_frm, buf, sizeof(bsa_btm_req_frm)) != 0)
+		return -EFAULT;
+
+	memcpy(sta_mac, bsa_btm_req_frm.sta_mac, IEEE80211_ADDR_LEN);
+	ni = ieee80211_find_node(&vap->iv_ic->ic_sta, (const uint8_t*)sta_mac);
+	if (!ni)
+		return -EINVAL;
+
+	disassoc_timer = get_unaligned(&bsa_btm_req_frm.dis_assoc_timer);
+	mode = bsa_btm_req_frm.req_mode;
+	valid_int = bsa_btm_req_frm.val_intvl;
+
+	MALLOC(neigh, struct ieee80211_ie_neighbor_report *, \
+			(sizeof (struct ieee80211_ie_neighbor_report) + BSA_BTM_CAND_PREF),\
+			M_DEVBUF, M_WAITOK | M_ZERO);
+
+	if (!neigh) {
+		ieee80211_free_node(ni);
+		return -ENOMEM;
+	}
+
+	neigh->id = IEEE80211_ELEMID_NEIGHBOR_REP;
+	neigh->len = sizeof (struct ieee80211_ie_neighbor_report) + BSA_BTM_CAND_PREF - 2;
+
+	memcpy(neigh->bssid, bsa_btm_req_frm.bssid, IEEE80211_ADDR_LEN);
+	put_unaligned(get_unaligned(&bsa_btm_req_frm.bssid_info), &neigh->bssid_info);
+	neigh->operating_class = bsa_btm_req_frm.opclass;
+	neigh->channel = bsa_btm_req_frm.channel;
+	neigh->phy_type = bsa_btm_req_frm.phytype;
+
+	data = (uint8_t *)&neigh->data;
+
+	/* BSS Transition Candidate Preference sub element */
+	*data++ = BSA_BTM_CAND_PREF_ID;
+	*data++ = BSA_BTM_CAND_PREF_LEN;
+	*data = BSA_BTM_CAND_PREF_VAL;
+	retval = ieee80211_send_wnm_bss_tm_unsolicited_req(ni, mode, disassoc_timer, valid_int,
+			&bss_term_dur, NULL, (uint8_t *)neigh, neigh->len + 2, 0);
+
+	ieee80211_free_node(ni);
+	FREE(neigh, M_DEVBUF);
+	return retval;
+}
+
+struct ieee80211_bsa_sta_statinfo {
+	struct ieee80211vap *vap;
+	void *bsa_stats;
+};
+
+static void get_node_bsa_sta_stats(void *s, struct ieee80211_node *ni)
+{
+	struct ieee80211_bsa_sta_statinfo *bsa_statinfo = (struct ieee80211_bsa_sta_statinfo *)s;
+	struct ieee80211_bsa_sta_info k_stainfo;
+	struct ieee80211_bsa_sta_info __user *u_pstainfo;
+	struct ieee80211_bsa_sta_stats *u_stastats
+			= (struct ieee80211_bsa_sta_stats *)bsa_statinfo->bsa_stats;
+	uint16_t cnt;
+	uint16_t __user *u_pcnt = &u_stastats->num_sta;
+	uint32_t tx_phyrate;
+	uint32_t rx_phyrate;
+	int32_t rssi;
+	struct ieee80211vap *vap;
+	struct ieee80211com *ic;
+
+	if (copy_from_user(&cnt, u_pcnt, sizeof(cnt)))
+		return;
+
+	if (cnt>=IEEE80211_AID_DEF)
+		return;
+	if (ni->ni_vap != bsa_statinfo->vap)
+		return;
+	if ((ni->ni_vap) && (ni->ni_vap->iv_bss == ni))
+		return;
+	vap = ni->ni_vap;
+	ic = vap->iv_ic;
+
+	u_pstainfo = &u_stastats->ieee80211_bsa_sta_info_var[cnt];
+	ic->ic_rxtx_phy_rate(ni, 0, NULL, NULL, &tx_phyrate);
+	ic->ic_rxtx_phy_rate(ni, 1, NULL, NULL, &rx_phyrate);
+	rssi = ic->ic_rssi(ni);
+	memcpy(k_stainfo.sta_mac, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+	put_unaligned(rx_phyrate, &k_stainfo.rx_phy_rate);
+	put_unaligned(0, &k_stainfo.ts_last_rx_pkt);
+	put_unaligned(tx_phyrate, &k_stainfo.tx_phy_rate);
+	put_unaligned(0, &k_stainfo.ts_last_tx_pkt);
+	put_unaligned((rssi - 5) / 10, &k_stainfo.rssi_dbm);
+
+	if (copy_to_user(u_pstainfo, &k_stainfo, sizeof(struct ieee80211_bsa_sta_info)))
+		return;
+
+	cnt++;
+	if (copy_to_user(u_pcnt, &cnt, sizeof(cnt)))
+		return;
+}
+
+static void get_node_bsa_associated_sta_stats(void *s, struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = (struct ieee80211vap *)s;
+
+	if (ni->ni_vap != vap)
+		return;
+	if ((ni->ni_vap) && (ni->ni_vap->iv_bss == ni))
+		return;
+	if (ni->ni_associd == 0)
+		return;
+
+	ieee80211_bsa_connect_complete_event_send(vap, ni);
+}
+
+static int
+ieee80211_subioctl_get_bsa_sta_stats(struct net_device *dev, struct
+		ieee80211_bsa_sta_stats __user *buf, uint16_t len)
+{
+	int retval = 0;
+	struct ieee80211vap *vap;
+	struct ieee80211com *ic;
+	struct ieee80211_bsa_sta_statinfo bsa_statinfo;
+
+	if (!dev || !buf || (len < sizeof (struct ieee80211_bsa_sta_stats)))
+		return -EINVAL;
+
+	vap = netdev_priv(dev);
+	ic = vap->iv_ic;
+	bsa_statinfo.vap = vap;
+	bsa_statinfo.bsa_stats = buf;
+
+	ic->ic_iterate_nodes(&ic->ic_sta, get_node_bsa_sta_stats, &bsa_statinfo, 1);
+
+	return retval;
+}
+
+static int
+ieee80211_subioctl_get_bsa_associated_sta_stats(struct net_device *dev,
+		void __user *buf, uint16_t len)
+{
+	int retval = 0;
+	struct ieee80211vap *vap;
+	struct ieee80211com *ic;
+	struct ieee80211_bsa_sta_statinfo bsa_statinfo;
+	if (!dev)
+		return -EINVAL;
+	vap = netdev_priv(dev);
+	if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+		return -EFAULT;
+	ic = vap->iv_ic;
+	bsa_statinfo.vap = vap;
+	bsa_statinfo.bsa_stats = buf;
+	ic->ic_iterate_nodes(&ic->ic_sta, get_node_bsa_associated_sta_stats, vap, 1);
+
+	return retval;
+}
+
+static int
+ieee80211_subioctl_get_bsa_interface_fat_info(struct net_device *dev, struct
+		ieee80211_bsa_interface_status __user *buf, uint16_t len)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct shared_params *sp = qtn_mproc_sync_shared_params_get();
+	struct ieee80211_bsa_interface_fat_info bsa_intf_fat_req;
+	int retval = 0;
+	uint16_t fat = 0;
+
+	if (!dev || !buf || (len  < sizeof (bsa_intf_fat_req)))
+		return -EINVAL;
+
+	if (copy_from_user(&bsa_intf_fat_req, buf, sizeof(bsa_intf_fat_req)) != 0)
+		return -EFAULT;
+
+	fat = sp->free_airtime;
+	bsa_intf_fat_req.channel = ic->ic_curchan->ic_ieee;
+
+	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
+		bsa_intf_fat_req.band = BSA_OPER_BAND_5G;
+	else
+		bsa_intf_fat_req.band = BSA_OPER_BAND_2G;
+
+	put_unaligned(fat, &bsa_intf_fat_req.avg_fat);
+
+	if (copy_to_user(buf, &bsa_intf_fat_req, sizeof(bsa_intf_fat_req)) != 0)
+		return -EFAULT;
+
+	return retval;
+}
+
+static uint16_t
+ieee80211_bsa_get_bss_capability(struct ieee80211vap *vap, struct ieee80211com *ic)
+{
+	uint16_t capinfo;
+	if (vap->iv_opmode == IEEE80211_M_IBSS)
+		capinfo = IEEE80211_CAPINFO_IBSS;
+	else
+		capinfo = IEEE80211_CAPINFO_ESS;
+	if (vap->iv_flags & IEEE80211_F_PRIVACY)
+		capinfo |= IEEE80211_CAPINFO_PRIVACY;
+	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+			IEEE80211_IS_CHAN_2GHZ(ic->ic_bsschan))
+		capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
+	if (ic->ic_flags & IEEE80211_F_SHSLOT)
+		capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
+	if (ic->ic_flags & IEEE80211_F_DOTH)
+		capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;
+	return capinfo;
+}
+
+static int
+ieee80211_subioctl_get_bsa_interface_info(struct net_device *dev, struct
+			ieee80211_bsa_interface_status __user *buf, uint16_t len)
+
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = ieee80211_get_vap_node(vap);
+	struct ieee80211_bsa_interface_status bsa_intf_req;
+	int bw;
+	int retval = 0;
+
+	if(!dev || !buf || (len < sizeof (bsa_intf_req)))
+		return -EINVAL;
+
+	if (copy_from_user(&bsa_intf_req, buf, sizeof(bsa_intf_req)) != 0)
+		return -EFAULT;
+
+	if (vap->iv_opmode != IEEE80211_M_HOSTAP) {
+		if (ni)
+			ieee80211_free_node(ni);
+		return -EFAULT;
+	}
+
+	IEEE80211_ADDR_COPY(bsa_intf_req.bssid, vap->iv_myaddr);
+	put_unaligned(0, &bsa_intf_req.mdid);	/* MDID */
+	bsa_intf_req.channel = ic->ic_curchan->ic_ieee;
+
+	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
+		bsa_intf_req.band = BSA_OPER_BAND_5G;
+	else
+		bsa_intf_req.band = BSA_OPER_BAND_2G;
+
+	bw = ieee80211_get_bw(ic);
+	put_unaligned(ieee80211_get_current_operating_class(ic->ic_country_code,
+					ic->ic_bsschan->ic_ieee, bw),
+				&bsa_intf_req.opclass);	/* operating class */
+
+	bsa_intf_req.drivercap = BIT(BSA_DRIVER_CAP_BTM_SHIFT);
+
+	if (IS_IEEE80211_VHT_ENABLED(ic))
+		bsa_intf_req.phytype = 9; /* vht */
+	else
+		bsa_intf_req.phytype = 7; /* ht */
+	put_unaligned(ieee80211_bsa_get_bss_capability(vap, ic), &bsa_intf_req.capinfo);
+	put_unaligned(ic->ic_lintval, &bsa_intf_req.beacon_interval);
+	if (ni) {
+		ieee80211_add_htcap(ni, (uint8_t *)&bsa_intf_req.htcap,
+			&ic->ic_htcap, IEEE80211_FC0_SUBTYPE_PROBE_REQ);
+		ieee80211_add_htinfo(ni, (uint8_t *)&bsa_intf_req.htop, &ic->ic_htinfo);
+		if (IS_IEEE80211_VHT_ENABLED(ic)) {
+			ieee80211_add_vhtcap(ni, (uint8_t *)&bsa_intf_req.vhtcap,
+				&ic->ic_vhtcap, IEEE80211_FC0_SUBTYPE_PROBE_REQ);
+			ieee80211_add_vhtop(ni, (uint8_t *)&bsa_intf_req.vhtop, &ic->ic_vhtop);
+		}
+		ieee80211_free_node(ni);
+	}
+
+	if (copy_to_user(buf, &bsa_intf_req, sizeof(bsa_intf_req)) != 0) {
+		return -EFAULT;
+	}
+
+	return retval;
+}
+#endif
+
+static int
+ieee80211_subioctl_get_cca_stats(struct net_device *dev, struct qtn_exp_cca_stats __user* cs)
+{
+	int retval = 0;
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic  = vap->iv_ic;
+	struct qtn_exp_cca_stats cca_stats;
+
+	if (ic->ic_get_cca_stats) {
+		retval = ic->ic_get_cca_stats(dev, ic, &cca_stats);
+	} else {
+		return -EINVAL;
+	}
+
+	if (retval >= 0) {
+		if (copy_to_user(cs, &cca_stats, sizeof(struct qtn_exp_cca_stats)) != 0)
+			retval = -EFAULT;
+	}
+
+	return retval;
+}
+
+
+static int
+ieee80211_subioctl_set_mac_acl(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211_acl_params *params;
+	int retval = 0;
+
+	MALLOC(params, struct ieee80211_acl_params *, iwr->u.data.length, M_DEVBUF, M_WAITOK);
+	if (!params)
+		return -ENOMEM;
+
+	if (copy_from_user(params, iwr->u.data.pointer, iwr->u.data.length) != 0) {
+		retval = -EFAULT;
+		goto buf_free;
+	}
+
+	if (iwr->u.data.length < (sizeof(*params) + sizeof(params->mac_acl[0]) * params->num_mac_acl)) {
+		retval = -EINVAL;
+		goto buf_free;
+	}
+
+	retval = ieee80211_mac_acl(vap, params->acl_policy);
+	if (retval < 0)
+		goto buf_free;
+
+	if (params->acl_policy == IEEE80211_MACCMD_POLICY_ALLOW ||
+			params->acl_policy == IEEE80211_MACCMD_POLICY_DENY) {
+		const struct ieee80211_aclator *acl = vap->iv_acl;
+
+		acl->iac_flush(vap);
+		if (params->num_mac_acl > 0)
+			retval = acl->iac_add_mac_list(vap, params->num_mac_acl, params->mac_acl);
+	}
+
+buf_free:
+	FREE(params, M_DEVBUF);
+	return retval;
+}
+
+/**
+ *  ieee80211_ioctl_ext - dispatch function for sub-ioctl commands
+ *  dev:	network device descriptor
+ *  iwr:	request information
+ *
+ *	Three parameters are used to support sub-ioctls:
+ *		iwr->u.data.flags  : the command of sub-ioctl operation
+ *		iwr->u.data.pointer: void* pointer to generic object
+ *		iwr->u.data.length : the size of generic object
+ */
+static int
+ieee80211_ioctl_ext(struct net_device *dev, struct iwreq *iwr)
+{
+	int retval = 0;
+	int16_t sub_io_cmd = iwr->u.data.flags;
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	switch (sub_io_cmd) {
+	case SIOCDEV_SUBIO_RST_QUEUE:
+		retval = ieee80211_subioctl_rst_queue(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_RADAR_STATUS:
+		retval = ieee80211_subioctl_radar_status(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_GET_PHY_STATS:
+		retval = ieee80211_subioctl_get_phy_stats(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_DISCONN_INFO:
+		retval = ieee80211_subioctl_disconn_info(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_SET_BRCM_IOCTL:
+		retval = ieee80211_subioctl_brcm(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_SCS:
+		retval = ieee80211_subioctl_scs(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_WAIT_SCAN_TIMEOUT:
+		retval = ieee80211_subioctl_wait_scan_complete(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_AP_SCAN_RESULTS:
+		retval = ieee80211_subioctl_ap_scan_results(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+	case SIOCDEV_SUBIO_SET_SOC_ADDR_IOCTL:
+	{
+		u_int8_t addr_from_user[IEEE80211_ADDR_LEN];
+		if (copy_from_user(addr_from_user, iwr->u.data.pointer, IEEE80211_ADDR_LEN)) {
+			retval = -EFAULT;
+		} else {
+			memcpy(vap->iv_ic->soc_addr, addr_from_user, IEEE80211_ADDR_LEN);
+		}
+		break;
+	}
+#endif
+	case SIOCDEV_SUBIO_SET_TDLS_OPER:
+		retval = ieee80211_subioctl_tdls_operation(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_GET_11H_11K_NODE_INFO:
+		retval = ieee80211_subioctl_get_doth_dotk_report(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_GET_DSCP2AC_MAP:
+		retval = ieee80211_subioctl_get_dscp2ac_map(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_SET_DSCP2AC_MAP:
+		retval = ieee80211_subioctl_set_dscp2ac_map(dev, iwr->u.data.pointer);
+		break;
+
+	case SIOCDEV_SUBIO_SET_MARK_DFS_CHAN:
+		retval = ieee80211_subioctl_set_chan_dfs_required(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_SET_WEATHER_CHAN:
+		retval = ieee80211_subioctl_set_chan_weather_radar(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_SETGET_CHAN_DISABLED:
+		retval = ieee80211_subioctl_setget_chan_disabled(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_WOWLAN:
+		retval = ieee80211_subioctl_wowlan_setget(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+
+	case SIOCDEV_SUBIO_GET_STA_AUTH:
+		retval = ieee80211_subioctl_get_sta_auth(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_GET_STA_VENDOR:
+		retval = ieee80211_subioctl_get_sta_vendor(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_GET_STA_TPUT_CAPS:
+		retval = ieee80211_subioctl_get_sta_tput_caps(dev, iwr->u.data.pointer,
+				iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_GET_SWFEAT_MAP:
+		retval = ieee80211_subioctl_get_swfeat_map(dev, iwr->u.data.pointer,
+				iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_PRINT_SWFEAT_MAP:
+		retval = ieee80211_subioctl_print_swfeat_map(dev, iwr->u.data.pointer,
+				iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_DI_DFS_CHANNELS:
+		retval = ieee80211_ioctl_di_dfs_channels(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_SEND_ACTION_FRAME:
+		ieee80211_subioctl_send_action_frame(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_GET_DRIVER_CAPABILITY:
+		ieee80211_subioctl_get_driver_capa(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_SET_AP_INFO:
+		ieee80211_subioctl_set_ap_info(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_SET_ACTIVE_CHANNEL_LIST:
+		retval = ieee80211_subioctl_set_active_chanlist_by_bw(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_GET_LINK_QUALITY_MAX:
+		retval = ieee80211_subioctl_get_link_quality_max(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_GET_CHANNEL_POWER_TABLE:
+		retval = ieee80211_subioctl_get_chan_power_table(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_SET_CHANNEL_POWER_TABLE:
+		retval = ieee80211_subioctl_set_chan_power_table(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_SET_SEC_CHAN:
+		retval = ieee80211_subioctl_set_sec_chan(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_GET_SEC_CHAN:
+		retval = ieee80211_subioctl_get_sec_chan(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_GET_DSCP2TID_MAP:
+		retval = ieee80211_subioctl_get_dscp2tid_map(dev, iwr->u.data.pointer,
+				iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_SET_DSCP2TID_MAP:
+		retval = ieee80211_subioctl_set_dscp2tid_map(dev, iwr->u.data.pointer,
+				iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_GET_TX_AIRTIME:
+		retval = ieee80211_subioctl_get_txrx_airtime(dev, iwr);
+		break;
+	case SIOCDEV_SUBIO_GET_CHAN_PRI_INACT:
+		retval = ieee80211_subioctl_get_chan_pri_inact(dev, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_GET_SUPP_CHAN:
+		retval = ieee80211_get_supp_chans(vap, iwr);
+		break;
+	case SIOCDEV_SUBIO_GET_CLIENT_MACS:
+		retval = ieee80211_subioctl_get_client_mac_list(dev, iwr->u.data.pointer,
+							iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_SAMPLE_ALL_DATA:
+		retval = ieee80211_subioctl_sample_all_clients(vap, iwr);
+		break;
+	case SIOCDEV_SUBIO_GET_ASSOC_DATA:
+		retval = ieee80211_subioctl_get_assoc_data(vap, iwr->u.data.pointer, iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_GET_INTERFACE_WMMAC_STATS:
+		retval = ieee80211_subioctl_get_interface_wmmac_stats(dev, iwr);
+		break;
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+	case SIOCDEV_SUBIO_SET_BSA_STATUS:
+		retval = ieee80211_subioctl_set_bsa_module(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_GET_BSA_INTF_INFO:
+		retval = ieee80211_subioctl_get_bsa_interface_info(dev, iwr->u.data.pointer,
+							iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_GET_BSA_FAT_INFO:
+		retval = ieee80211_subioctl_get_bsa_interface_fat_info(dev, iwr->u.data.pointer,
+				iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_UPDATE_MACFILTER_LIST:
+		retval = ieee80211_subioctl_update_macfilter_table(dev, iwr->u.data.pointer);
+		break;
+	case SIOCDEV_SUBIO_GET_BSA_STA_STATS:
+		retval = ieee80211_subioctl_get_bsa_sta_stats(dev, iwr->u.data.pointer,
+				iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_GET_BSA_ASSOC_STA_STATS:
+		retval = ieee80211_subioctl_get_bsa_associated_sta_stats(dev, iwr->u.data.pointer,
+				iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_SEND_BTM_REQ_FRM:
+		retval = ieee80211_subioctl_send_btm_req_frm(dev, iwr->u.data.pointer,
+				iwr->u.data.length);
+		break;
+#endif
+	case SIOCDEV_SUBIO_GET_FREQ_RANGE:
+		retval = ieee80211_subioctl_get_freq_range(dev, iwr);
+		break;
+#ifdef CONFIG_NAC_MONITOR
+	case SIOCDEV_SUBIO_GET_NAC_STATS:
+		retval = ieee80211_subioctl_get_nac_stats(dev, iwr);
+		break;
+#endif
+	case SIOCDEV_SUBIO_SET_MAC_ADDR_ACL:
+		retval = ieee80211_subioctl_set_mac_acl(dev, iwr);
+		break;
+	case SIOCDEV_SUBIO_SET_FT_ASSOC_RESP:
+		retval = ieee80211_subioctl_send_ft_assoc_response(dev, iwr->u.data.pointer,
+									iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_SET_FT_REASSOC_RESP:
+		retval = ieee80211_subioctl_send_ft_reassoc_response(dev, iwr->u.data.pointer,
+									iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_SET_FT_AUTH_RESP:
+		retval = ieee80211_subioctl_send_ft_auth_response(dev, iwr->u.data.pointer,
+									iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_SET_FT_ADD_NODE:
+		retval = ieee80211_subioctl_ft_add_node(dev, iwr->u.data.pointer,
+									iwr->u.data.length);
+		break;
+	case SIOCDEV_SUBIO_GET_CCA_STATS:
+		retval = ieee80211_subioctl_get_cca_stats(dev, iwr->u.data.pointer);
+		break;
+	default:
+		retval = -EOPNOTSUPP;
+	}
+
+	return retval;
+}
+
+#define MAX_NUM_SUPPORTED_RATES		512
+#define GET_SET_BASIC_RATE		1
+#define GET_SET_OPERATIONAL_RATE	2
+#define GET_SET_MCS_RATE		3
+#define MAX_MCS_SIZE			77
+#define USEC_PER_SECOND			1000000
+
+static int
+is_duplicate_rate(const uint32_t *rate_set, int num, uint32_t rate)
+{
+	int i;
+
+	for (i = 0; i < num; i++) {
+		if (rate_set[i] == rate) {
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static int ieee80211_ioctl_get_ht_rates(struct ieee80211com *ic,
+					uint32_t *ht_rates, int pos)
+{
+        int i = 0;
+        int k = 0, r = 0;
+	u_int16_t chan_20 = 0;
+	u_int16_t mask;
+	u_int16_t chan_40 = 0;
+	int sgi_40 = 0;
+	int sgi_20 = 0;
+	uint32_t rate;
+
+	/* HT 20 MHz band LGI and SGI rates */
+	sgi_20 = ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI20 ? 1 : 0;
+	chan_40 = ic->ic_htcap.cap & IEEE80211_HTCAP_C_CHWIDTH40 ? 1 : 0;
+	sgi_40 = ic->ic_htcap.cap & IEEE80211_HTCAP_C_SHORTGI40 ? 1 : 0;
+
+	for (r = 0, i = IEEE80211_HT_MCSSET_20_40_NSS1;
+			i <= IEEE80211_HT_MCSSET_20_40_UEQM6; i++) {
+		mask = 1;
+		for (k = 0; k < 8; k++, r++) {
+			if (ic->ic_htcap.mcsset[i] & mask) {
+				rate = ieee80211_mcs2rate(r,
+						chan_20, 0, 0) * (USEC_PER_SECOND / 2);
+				if (!is_duplicate_rate(ht_rates, pos, rate)) {
+					ht_rates[pos++] = rate;
+				}
+
+				if (sgi_20) {
+					rate = ieee80211_mcs2rate(r,
+						chan_20, sgi_20, 0) * (USEC_PER_SECOND / 2);
+					if (!is_duplicate_rate(ht_rates, pos, rate)) {
+						ht_rates[pos++] = rate;
+					}
+				}
+
+				if (chan_40) {
+					rate = ieee80211_mcs2rate(r,
+							chan_40, 0, 0) * (USEC_PER_SECOND / 2);
+					if (!is_duplicate_rate(ht_rates, pos, rate)) {
+						ht_rates[pos++] = rate;
+					}
+
+					if (sgi_40) {
+						rate = ieee80211_mcs2rate(r,
+							chan_40, sgi_40, 0) * (USEC_PER_SECOND / 2);
+						if (!is_duplicate_rate(ht_rates, pos, rate)) {
+							ht_rates[pos++] = rate;
+						}
+					}
+
+				}
+			}
+			mask = mask << 1;
+		}
+	}
+
+	return pos;
+}
+
+static int ieee80211_ioctl_get_vht_rates(struct ieee80211com *ic,
+					uint32_t *vht_rates, int pos)
+{
+        int k = 0, r = 0;
+	u_int16_t mcsmap = 0;
+	int sgi_80;
+	int sgi_160;
+	u_int16_t mask;
+	u_int16_t chan_80 = 0;
+	u_int16_t chan_160 = 0;
+	uint32_t rate;
+
+	if (ic->ic_vhtcap.cap_flags & IEEE80211_VHTCAP_C_CHWIDTH) {
+		chan_160 = 1;
+	}
+	sgi_80 = (ic->ic_vhtcap.cap_flags & IEEE80211_VHTCAP_C_SHORT_GI_80) ? 1 : 0;
+	sgi_160 = (ic->ic_vhtcap.cap_flags & IEEE80211_VHTCAP_C_SHORT_GI_160) ? 1 : 0;
+	mask = 0x3;
+	mcsmap = ic->ic_vhtcap.txmcsmap;
+	for (k = 0; k < 8; k++) {
+		if ((mcsmap & mask) != mask) {
+			int m;
+			int val = (mcsmap & mask)>>(k * 2);
+			r = (val == 2) ? 9: (val == 1) ? 8 : 7;
+			for (m = 0; m <= r; m++) {
+				rate = (ieee80211_mcs2rate(m,
+						chan_80, 0, 1) * (USEC_PER_SECOND / 2)) * (k+1);
+				if (!is_duplicate_rate(vht_rates, pos, rate)) {
+					vht_rates[pos++] = rate;
+				}
+				if (sgi_80) {
+					rate = (ieee80211_mcs2rate(m, chan_80,
+						sgi_80, 1) * (USEC_PER_SECOND / 2)) * (k+1);
+					if (!is_duplicate_rate(vht_rates, pos, rate)) {
+						vht_rates[pos++] = rate;
+					}
+				}
+
+				/* 160/80+80 MHz rates */
+				if (chan_160) {
+					rate  = (ieee80211_mcs2rate(m,
+							chan_160, 0, 1) * (USEC_PER_SECOND / 2)) * (k+1);
+					if (!is_duplicate_rate(vht_rates, pos, rate)) {
+						vht_rates[pos++] = rate;
+					}
+					if (sgi_160) {
+						rate = (ieee80211_mcs2rate(m,
+							chan_160, sgi_160, 1) * (USEC_PER_SECOND / 2)) * (k+1);
+						if (!is_duplicate_rate(vht_rates, pos, rate)) {
+							vht_rates[pos++] = rate;
+						}
+					}
+
+				}
+			}
+			mask = mask << 2;
+		} else {
+			break;
+		}
+	}
+
+	return pos;
+}
+
+/**
+ *  ieee80211_ioctl_get_rates - function to get the rates
+ */
+static int
+ieee80211_ioctl_get_rates(struct net_device *dev, struct iwreq *iwr)
+{
+        struct ieee80211vap *vap = netdev_priv(dev);
+        struct ieee80211com *ic  = vap->iv_ic;
+        uint32_t achievable_tx_phy_rates[MAX_NUM_SUPPORTED_RATES];
+        uint32_t achievable_rates_num = iwr->u.data.length / sizeof(uint32_t);
+        int mode, nrates, i = 0, j = 0;
+        int flags = iwr->u.data.flags;
+        struct ieee80211_rateset *rs;
+
+        mode = ic->ic_curmode; // Get the current mode
+        rs = &ic->ic_sup_rates[mode]; // Get the supported rates depending on the mode
+        nrates = rs->rs_legacy_nrates;
+
+#if 0
+	/* NB: not sorted */
+	/* Basic and extended rates */
+	for (i = 0; i < nrates ; i++) {
+		if ( (flags == GET_SET_BASIC_RATE) &&
+				!(rs->rs_rates[i] & IEEE80211_RATE_BASIC) ) {
+			continue;
+		}
+
+		achievable_tx_phy_rates[j] = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+		// Keep the rates in Mbps. Multiply the rate by 1M
+		achievable_tx_phy_rates[j] *= USEC_PER_SECOND / 2;
+		j++;
+	}
+
+        if ((flags == GET_SET_OPERATIONAL_RATE &&
+			mode >= IEEE80211_MODE_11NA) ||
+			flags == GET_SET_MCS_RATE) {
+		/* HT rates */
+		j = ieee80211_ioctl_get_ht_rates(ic, &achievable_tx_phy_rates[0], j);
+
+        }
+
+        if ((flags == GET_SET_OPERATIONAL_RATE &&
+			mode >= IEEE80211_MODE_11AC_VHT20PM) ||
+			flags == GET_SET_MCS_RATE) {
+		/* VHT rates */
+		j = ieee80211_ioctl_get_vht_rates(ic, &achievable_tx_phy_rates[0], j);
+	}
+
+	if (achievable_rates_num > j) {
+		achievable_rates_num = j;
+	}
+#else
+	if (flags == GET_SET_MCS_RATE) {
+		/* NB: not sorted */
+		/* Basic and extended rates */
+		for (i = 0; i < nrates ; i++) {
+			achievable_tx_phy_rates[j] = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+			// Keep the rates in Mbps. Multiply the rate by 1M
+			achievable_tx_phy_rates[j] *= (USEC_PER_SECOND / 2);
+			j++;
+		}
+
+		/* Possible HT rates */
+		j = ieee80211_ioctl_get_ht_rates(ic, &achievable_tx_phy_rates[0], j);
+
+		/* Possible VHT rates */
+		j = ieee80211_ioctl_get_vht_rates(ic, &achievable_tx_phy_rates[0], j);
+
+		if (achievable_rates_num > j) {
+			achievable_rates_num = j;
+		}
+
+		iwr->u.data.length = achievable_rates_num * sizeof(achievable_rates_num);
+
+	} else { /* Basic or operational rates */
+		for (i = 0; i < nrates ; i++) {
+			if ( (flags == GET_SET_BASIC_RATE) && !(rs->rs_rates[i] & IEEE80211_RATE_BASIC) ) {
+				continue;
+			}
+
+			achievable_tx_phy_rates[j] = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+			/* Keep the rates in Mbps. Multiply the rate by 1M */
+			achievable_tx_phy_rates[j] *= USEC_PER_SECOND;
+			j++;
+		}
+
+		iwr->u.data.length = j * sizeof(int32_t);
+	}
+
+#endif
+	iwr->u.data.length = achievable_rates_num * sizeof(achievable_rates_num);
+
+	if (copy_to_user(iwr->u.data.pointer, achievable_tx_phy_rates,
+			MIN(sizeof(achievable_tx_phy_rates), iwr->u.data.length)))
+		return -EFAULT;
+
+        return 0;
+}
+
+/**
+ * ieee80211_ioctl_set_rates - function to set the rates. This can be used for setting the
+ * rates either to basic or operational rates.
+ */
+static int
+ieee80211_ioctl_set_rates(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic  = vap->iv_ic;
+	struct ieee80211_rateset *rs;
+	char *ptr = ((char *)iwr->u.data.pointer);
+	uint32_t num_rates = iwr->u.data.length;
+	uint32_t flags = iwr->u.data.flags;
+	unsigned long rate;
+	enum ieee80211_phymode mode;
+	int retval = 0;
+	int i = 0;
+
+	mode = ic->ic_curmode;
+	rs = &ic->ic_sup_rates[mode];
+
+	/* Set rates is allowed for only Basic and operational rates (non 11n) */
+	if (num_rates > IEEE80211_AG_RATE_MAXSIZE) {
+		num_rates = IEEE80211_AG_RATE_MAXSIZE;
+	}
+
+	while ( num_rates-- ) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+		if (kstrtoul(ptr, 0, &rate)) {
+#else
+		if (strict_strtoul(ptr, 0, &rate)) {
+#endif
+			printk(KERN_WARNING "Invalide input string\n");
+			retval = -EINVAL;
+			break;
+		}
+
+		for (i = 0; i < rs->rs_legacy_nrates; i++) {
+			if (rate == (rs->rs_rates[i] & IEEE80211_RATE_VAL)) {
+
+				if (flags == GET_SET_BASIC_RATE) {
+					rs->rs_rates[i] |= IEEE80211_RATE_BASIC;
+				} else if (flags == GET_SET_OPERATIONAL_RATE ) {
+					rs->rs_rates[i] &= ~IEEE80211_RATE_BASIC;
+
+				} else {
+					printk(KERN_WARNING "Not supported to change the MCS rates\n");
+					retval = -EINVAL;
+					break;
+				}
+			}
+		}
+		ptr += strlen(ptr) + 1;
+	}
+
+	/* Update the beacon. This will dynamically change the rates
+	 * in probe response and beacons */
+	if (!retval) {
+		ic->ic_beacon_update(vap);
+	}
+
+	return retval;
+}
+
+static void
+pre_announced_chanswitch(struct net_device *dev, u_int32_t channel, u_int32_t tbtt) {
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	/* now flag the beacon update to include the channel switch IE */
+	ic->ic_flags |= IEEE80211_F_CHANSWITCH;
+	ic->ic_chanchange_chan = channel;
+	ic->ic_chanchange_tbtt = tbtt;
+}
+
+static int
+ieee80211_ioctl_chanswitch(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct ieee80211com *ic = vap->iv_ic;
+	int *param = (int *) extra;
+
+	if (!(ic->ic_flags & IEEE80211_F_DOTH))
+		return 0;
+
+	pre_announced_chanswitch(dev, param[0], param[1]);
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwmlme(struct net_device *dev,
+	struct iw_request_info *info, struct iw_point *erq, char *data)
+{
+	struct ieee80211req_mlme mlme;
+	struct iw_mlme *wextmlme = (struct iw_mlme *)data;
+
+	memset(&mlme, 0, sizeof(mlme));
+
+	switch(wextmlme->cmd) {
+	case IW_MLME_DEAUTH:
+		mlme.im_op = IEEE80211_MLME_DEAUTH;
+		break;
+	case IW_MLME_DISASSOC:
+		mlme.im_op = IEEE80211_MLME_DISASSOC;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mlme.im_reason = wextmlme->reason_code;
+
+	memcpy(mlme.im_macaddr, wextmlme->addr.sa_data, IEEE80211_ADDR_LEN);
+
+	return ieee80211_ioctl_setmlme(dev, NULL, NULL, (char*)&mlme);
+}
+
+
+static int
+ieee80211_ioctl_giwgenie(struct net_device *dev,
+	struct iw_request_info *info, struct iw_point *out, char *buf)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+
+	if (out->length < vap->iv_opt_ie_len)
+		return -E2BIG;
+
+	return ieee80211_ioctl_getoptie(dev, info, out, buf);
+}
+
+static int
+ieee80211_ioctl_siwgenie(struct net_device *dev,
+	struct iw_request_info *info, struct iw_point *erq, char *data)
+{
+	return ieee80211_ioctl_setoptie(dev, info, erq, data);
+}
+
+
+static int
+siwauth_wpa_version(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int ver = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_WPA;
+
+	if ((ver & IW_AUTH_WPA_VERSION_WPA) && (ver & IW_AUTH_WPA_VERSION_WPA2))
+		args[1] = 3;
+	else if (ver & IW_AUTH_WPA_VERSION_WPA2)
+		args[1] = 2;
+	else if (ver & IW_AUTH_WPA_VERSION_WPA)
+		args[1] = 1;
+	else
+		args[1] = 0;
+
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+iwcipher2ieee80211cipher(int iwciph)
+{
+	switch(iwciph) {
+	case IW_AUTH_CIPHER_NONE:
+		return IEEE80211_CIPHER_NONE;
+	case IW_AUTH_CIPHER_WEP40:
+	case IW_AUTH_CIPHER_WEP104:
+		return IEEE80211_CIPHER_WEP;
+	case IW_AUTH_CIPHER_TKIP:
+		return IEEE80211_CIPHER_TKIP;
+	case IW_AUTH_CIPHER_CCMP:
+		return IEEE80211_CIPHER_AES_CCM;
+	}
+	return -1;
+}
+
+static int
+ieee80211cipher2iwcipher(int ieee80211ciph)
+{
+	switch(ieee80211ciph) {
+	case IEEE80211_CIPHER_NONE:
+		return IW_AUTH_CIPHER_NONE;
+	case IEEE80211_CIPHER_WEP:
+		return IW_AUTH_CIPHER_WEP104;
+	case IEEE80211_CIPHER_TKIP:
+		return IW_AUTH_CIPHER_TKIP;
+	case IEEE80211_CIPHER_AES_CCM:
+		return IW_AUTH_CIPHER_CCMP;
+	}
+	return -1;
+}
+
+/* TODO We don't enforce wep key lengths. */
+static int
+siwauth_cipher_pairwise(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int iwciph = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_UCASTCIPHER;
+	args[1] = iwcipher2ieee80211cipher(iwciph);
+	if (args[1] < 0) {
+		printk(KERN_WARNING "%s: unknown pairwise cipher %d\n",
+		       dev->name, iwciph);
+		return -EINVAL;
+	}
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+/* TODO We don't enforce wep key lengths. */
+static int
+siwauth_cipher_group(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int iwciph = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_MCASTCIPHER;
+	args[1] = iwcipher2ieee80211cipher(iwciph);
+	if (args[1] < 0) {
+		printk(KERN_WARNING "%s: unknown group cipher %d\n",
+		       dev->name, iwciph);
+		return -EINVAL;
+	}
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_key_mgmt(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int iwkm = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_KEYMGTALGS;
+	args[1] = WPA_ASE_NONE;
+	if (iwkm & IW_AUTH_KEY_MGMT_802_1X)
+		args[1] |= WPA_ASE_8021X_UNSPEC;
+	if (iwkm & IW_AUTH_KEY_MGMT_PSK)
+		args[1] |= WPA_ASE_8021X_PSK;
+
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_tkip_countermeasures(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int args[2];
+	args[0] = IEEE80211_PARAM_COUNTERMEASURES;
+	args[1] = erq->value;
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_drop_unencrypted(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int args[2];
+	args[0] = IEEE80211_PARAM_DROPUNENCRYPTED;
+	args[1] = erq->value;
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+
+static int
+siwauth_80211_auth_alg(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+#define VALID_ALGS_MASK (IW_AUTH_ALG_OPEN_SYSTEM|IW_AUTH_ALG_SHARED_KEY|IW_AUTH_ALG_LEAP)
+	int mode = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_AUTHMODE;
+
+	if (mode & ~VALID_ALGS_MASK) {
+		return -EINVAL;
+	}
+	if (mode & IW_AUTH_ALG_LEAP) {
+		args[1] = IEEE80211_AUTH_8021X;
+	} else if ((mode & IW_AUTH_ALG_SHARED_KEY) &&
+		  (mode & IW_AUTH_ALG_OPEN_SYSTEM)) {
+		args[1] = IEEE80211_AUTH_AUTO;
+	} else if (mode & IW_AUTH_ALG_SHARED_KEY) {
+		args[1] = IEEE80211_AUTH_SHARED;
+	} else {
+		args[1] = IEEE80211_AUTH_OPEN;
+	}
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_wpa_enabled(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int enabled = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_WPA;
+	if (enabled)
+		args[1] = 3; /* enable WPA1 and WPA2 */
+	else
+		args[1] = 0; /* disable WPA1 and WPA2 */
+
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_rx_unencrypted_eapol(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rxunenc = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_DROPUNENC_EAPOL;
+	if (rxunenc)
+		args[1] = 1;
+	else
+		args[1] = 0;
+
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_roaming_control(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int roam = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_ROAMING;
+	switch(roam) {
+	case IW_AUTH_ROAMING_ENABLE:
+		args[1] = IEEE80211_ROAMING_AUTO;
+		break;
+	case IW_AUTH_ROAMING_DISABLE:
+		args[1] = IEEE80211_ROAMING_MANUAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_privacy_invoked(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int args[2];
+	args[0] = IEEE80211_PARAM_PRIVACY;
+	args[1] = erq->value;
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+/*
+ * If this function is invoked it means someone is using the wireless extensions
+ * API instead of the private madwifi ioctls.  That's fine.  We translate their
+ * request into the format used by the private ioctls.  Note that the
+ * iw_request_info and iw_param structures are not the same ones as the
+ * private ioctl handler expects.  Luckily, the private ioctl handler doesn't
+ * do anything with those at the moment.  We pass NULL for those, because in
+ * case someone does modify the ioctl handler to use those values, a null
+ * pointer will be easier to debug than other bad behavior.
+ */
+static int
+ieee80211_ioctl_siwauth(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc = -EINVAL;
+
+	switch(erq->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_WPA_VERSION:
+		rc = siwauth_wpa_version(dev, info, erq, buf);
+		break;
+	case IW_AUTH_CIPHER_PAIRWISE:
+		rc = siwauth_cipher_pairwise(dev, info, erq, buf);
+		break;
+	case IW_AUTH_CIPHER_GROUP:
+		rc = siwauth_cipher_group(dev, info, erq, buf);
+		break;
+	case IW_AUTH_KEY_MGMT:
+		rc = siwauth_key_mgmt(dev, info, erq, buf);
+		break;
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		rc = siwauth_tkip_countermeasures(dev, info, erq, buf);
+		break;
+	case IW_AUTH_DROP_UNENCRYPTED:
+		rc = siwauth_drop_unencrypted(dev, info, erq, buf);
+		break;
+	case IW_AUTH_80211_AUTH_ALG:
+		rc = siwauth_80211_auth_alg(dev, info, erq, buf);
+		break;
+	case IW_AUTH_WPA_ENABLED:
+		rc = siwauth_wpa_enabled(dev, info, erq, buf);
+		break;
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		rc = siwauth_rx_unencrypted_eapol(dev, info, erq, buf);
+		break;
+	case IW_AUTH_ROAMING_CONTROL:
+		rc = siwauth_roaming_control(dev, info, erq, buf);
+		break;
+	case IW_AUTH_PRIVACY_INVOKED:
+		rc = siwauth_privacy_invoked(dev, info, erq, buf);
+		break;
+	default:
+		printk(KERN_WARNING "%s: unknown SIOCSIWAUTH flag %d\n",
+			dev->name, erq->flags);
+		break;
+	}
+
+	return rc;
+}
+
+static int
+giwauth_wpa_version(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int ver;
+	int rc;
+	int arg = IEEE80211_PARAM_WPA;
+
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+
+	switch(arg) {
+	case 1:
+	    	ver = IW_AUTH_WPA_VERSION_WPA;
+		break;
+	case 2:
+	    	ver = IW_AUTH_WPA_VERSION_WPA2;
+		break;
+	case 3:
+	    	ver = IW_AUTH_WPA_VERSION|IW_AUTH_WPA_VERSION_WPA2;
+		break;
+	default:
+		ver = IW_AUTH_WPA_VERSION_DISABLED;
+		break;
+	}
+
+	erq->value = ver;
+	return rc;
+}
+
+static int
+giwauth_cipher_pairwise(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc;
+	int arg = IEEE80211_PARAM_UCASTCIPHER;
+
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+
+	erq->value = ieee80211cipher2iwcipher(arg);
+	if (erq->value < 0)
+		return -EINVAL;
+	return 0;
+}
+
+
+static int
+giwauth_cipher_group(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc;
+	int arg = IEEE80211_PARAM_MCASTCIPHER;
+
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+
+	erq->value = ieee80211cipher2iwcipher(arg);
+	if (erq->value < 0)
+		return -EINVAL;
+	return 0;
+}
+
+static int
+giwauth_key_mgmt(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int arg;
+	int rc;
+
+	arg = IEEE80211_PARAM_KEYMGTALGS;
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+	erq->value = 0;
+	if (arg & WPA_ASE_8021X_UNSPEC)
+		erq->value |= IW_AUTH_KEY_MGMT_802_1X;
+	if (arg & WPA_ASE_8021X_PSK)
+		erq->value |= IW_AUTH_KEY_MGMT_PSK;
+	return 0;
+}
+
+static int
+giwauth_tkip_countermeasures(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int arg;
+	int rc;
+
+	arg = IEEE80211_PARAM_COUNTERMEASURES;
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+	erq->value = arg;
+	return 0;
+}
+
+static int
+giwauth_drop_unencrypted(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int arg;
+	int rc;
+	arg = IEEE80211_PARAM_DROPUNENCRYPTED;
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+	erq->value = arg;
+	return 0;
+}
+
+static int
+giwauth_80211_auth_alg(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+giwauth_wpa_enabled(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc;
+	int arg = IEEE80211_PARAM_WPA;
+
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+
+	erq->value = arg;
+	return 0;
+
+}
+
+static int
+giwauth_rx_unencrypted_eapol(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+giwauth_roaming_control(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc;
+	int arg;
+
+	arg = IEEE80211_PARAM_ROAMING;
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+
+	switch(arg) {
+	case IEEE80211_ROAMING_DEVICE:
+	case IEEE80211_ROAMING_AUTO:
+		erq->value = IW_AUTH_ROAMING_ENABLE;
+		break;
+	default:
+		erq->value = IW_AUTH_ROAMING_DISABLE;
+		break;
+	}
+
+	return 0;
+}
+
+static int
+giwauth_privacy_invoked(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc;
+	int arg;
+	arg = IEEE80211_PARAM_PRIVACY;
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+	erq->value = arg;
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwauth(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc = -EOPNOTSUPP;
+
+	switch(erq->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_WPA_VERSION:
+		rc = giwauth_wpa_version(dev, info, erq, buf);
+		break;
+	case IW_AUTH_CIPHER_PAIRWISE:
+		rc = giwauth_cipher_pairwise(dev, info, erq, buf);
+		break;
+	case IW_AUTH_CIPHER_GROUP:
+		rc = giwauth_cipher_group(dev, info, erq, buf);
+		break;
+	case IW_AUTH_KEY_MGMT:
+		rc = giwauth_key_mgmt(dev, info, erq, buf);
+		break;
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		rc = giwauth_tkip_countermeasures(dev, info, erq, buf);
+		break;
+	case IW_AUTH_DROP_UNENCRYPTED:
+		rc = giwauth_drop_unencrypted(dev, info, erq, buf);
+		break;
+	case IW_AUTH_80211_AUTH_ALG:
+		rc = giwauth_80211_auth_alg(dev, info, erq, buf);
+		break;
+	case IW_AUTH_WPA_ENABLED:
+		rc = giwauth_wpa_enabled(dev, info, erq, buf);
+		break;
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		rc = giwauth_rx_unencrypted_eapol(dev, info, erq, buf);
+		break;
+	case IW_AUTH_ROAMING_CONTROL:
+		rc = giwauth_roaming_control(dev, info, erq, buf);
+		break;
+	case IW_AUTH_PRIVACY_INVOKED:
+		rc = giwauth_privacy_invoked(dev, info, erq, buf);
+		break;
+	default:
+		printk(KERN_WARNING "%s: unknown SIOCGIWAUTH flag %d\n",
+			dev->name, erq->flags);
+		break;
+	}
+
+	return rc;
+}
+
+/*
+ * Retrieve information about a key.  Open question: should we allow
+ * callers to retrieve unicast keys based on a supplied MAC address?
+ * The ipw2200 reference implementation doesn't, so we don't either.
+ *
+ * Not currently used
+ */
+static int
+ieee80211_ioctl_giwencodeext(struct net_device *dev,
+	struct iw_request_info *info, struct iw_point *erq, char *extra)
+{
+#ifndef IEEE80211_UNUSED_CRYPTO_COMMANDS
+	return -EOPNOTSUPP;
+#else
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct iw_encode_ext *ext;
+	struct ieee80211_key *wk;
+	int error;
+	int kid;
+	int max_key_len;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	max_key_len = erq->length - sizeof(*ext);
+	if (max_key_len < 0)
+		return -EINVAL;
+	ext = (struct iw_encode_ext *)extra;
+
+	error = getiwkeyix(vap, erq, &kid);
+	if (error < 0)
+		return error;
+
+	wk = &vap->iv_nw_keys[kid];
+	if (wk->wk_keylen > max_key_len)
+		return -E2BIG;
+
+	erq->flags = kid+1;
+	memset(ext, 0, sizeof(*ext));
+
+	ext->key_len = wk->wk_keylen;
+	memcpy(ext->key, wk->wk_key, wk->wk_keylen);
+
+	/* flags */
+	if (wk->wk_flags & IEEE80211_KEY_GROUP)
+		ext->ext_flags |= IW_ENCODE_EXT_GROUP_KEY;
+
+	/* algorithm */
+	switch(wk->wk_cipher->ic_cipher) {
+	case IEEE80211_CIPHER_NONE:
+		ext->alg = IW_ENCODE_ALG_NONE;
+		erq->flags |= IW_ENCODE_DISABLED;
+		break;
+	case IEEE80211_CIPHER_WEP:
+		ext->alg = IW_ENCODE_ALG_WEP;
+		break;
+	case IEEE80211_CIPHER_TKIP:
+		ext->alg = IW_ENCODE_ALG_TKIP;
+		break;
+	case IEEE80211_CIPHER_AES_OCB:
+	case IEEE80211_CIPHER_AES_CCM:
+	case IEEE80211_CIPHER_CKIP:
+		ext->alg = IW_ENCODE_ALG_CCMP;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+#endif /* IEEE80211_UNUSED_CRYPTO_COMMANDS */
+}
+
+static int
+ieee80211_ioctl_siwencodeext(struct net_device *dev,
+	struct iw_request_info *info, struct iw_point *erq, char *extra)
+{
+#ifndef IEEE80211_UNUSED_CRYPTO_COMMANDS
+	return -EOPNOTSUPP;
+#else
+	struct ieee80211vap *vap = netdev_priv(dev);
+	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+	struct ieee80211req_key kr;
+	int error;
+	int kid;
+
+	error = getiwkeyix(vap, erq, &kid);
+	if (error < 0)
+		return error;
+
+	if (ext->key_len > (erq->length - sizeof(struct iw_encode_ext)))
+		return -EINVAL;
+
+	if (ext->alg == IW_ENCODE_ALG_NONE) {
+		/* convert to the format used by IEEE_80211_IOCTL_DELKEY */
+		struct ieee80211req_del_key dk;
+
+		memset(&dk, 0, sizeof(dk));
+		dk.idk_keyix = kid;
+		memcpy(&dk.idk_macaddr, ext->addr.sa_data, IEEE80211_ADDR_LEN);
+
+		return ieee80211_ioctl_delkey(dev, NULL, NULL, (char*)&dk);
+	}
+
+	/* TODO This memcmp for the broadcast address seems hackish, but
+	 * mimics what wpa supplicant was doing.  The wpa supplicant comments
+	 * make it sound like they were having trouble with
+	 * IEEE80211_IOCTL_SETKEY and static WEP keys.  It might be worth
+	 * figuring out what their trouble was so the rest of this function
+	 * can be implemented in terms of ieee80211_ioctl_setkey */
+	if (ext->alg == IW_ENCODE_ALG_WEP &&
+	    memcmp(ext->addr.sa_data, "\xff\xff\xff\xff\xff\xff",
+		   IEEE80211_ADDR_LEN) == 0) {
+		/* convert to the format used by SIOCSIWENCODE.  The old
+		 * format just had the key in the extra buf, whereas the
+		 * new format has the key tacked on to the end of the
+		 * iw_encode_ext structure */
+		struct iw_request_info oldinfo;
+		struct iw_point olderq;
+		char *key;
+
+		memset(&oldinfo, 0, sizeof(oldinfo));
+		oldinfo.cmd = SIOCSIWENCODE;
+		oldinfo.flags = info->flags;
+
+		memset(&olderq, 0, sizeof(olderq));
+		olderq.flags = erq->flags;
+		olderq.pointer = erq->pointer;
+		olderq.length = ext->key_len;
+
+		key = (char *)ext->key;
+
+		return ieee80211_ioctl_siwencode(dev, &oldinfo, &olderq, key);
+	}
+
+	/* convert to the format used by IEEE_80211_IOCTL_SETKEY */
+	memset(&kr, 0, sizeof(kr));
+
+	switch(ext->alg) {
+	case IW_ENCODE_ALG_WEP:
+		kr.ik_type = IEEE80211_CIPHER_WEP;
+		break;
+	case IW_ENCODE_ALG_TKIP:
+		kr.ik_type = IEEE80211_CIPHER_TKIP;
+		break;
+	case IW_ENCODE_ALG_CCMP:
+		kr.ik_type = IEEE80211_CIPHER_AES_CCM;
+		break;
+	default:
+		printk(KERN_WARNING "%s: unknown algorithm %d\n",
+		       dev->name, ext->alg);
+		return -EINVAL;
+	}
+
+	kr.ik_keyix = kid;
+
+	if (ext->key_len > sizeof(kr.ik_keydata)) {
+		printk(KERN_WARNING "%s: key size %d is too large\n",
+		       dev->name, ext->key_len);
+		return -E2BIG;
+	}
+	memcpy(kr.ik_keydata, ext->key, ext->key_len);
+	kr.ik_keylen = ext->key_len;
+
+	kr.ik_flags = IEEE80211_KEY_RECV;
+
+	if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
+		kr.ik_flags |= IEEE80211_KEY_GROUP;
+
+	if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+		kr.ik_flags |= IEEE80211_KEY_XMIT | IEEE80211_KEY_DEFAULT;
+		memcpy(kr.ik_macaddr, ext->addr.sa_data, IEEE80211_ADDR_LEN);
+	}
+
+	if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+		memcpy(&kr.ik_keyrsc, ext->rx_seq, sizeof(kr.ik_keyrsc));
+	}
+
+	return ieee80211_ioctl_setkey(dev, NULL, NULL, (char*)&kr);
+#endif /* IEEE80211_UNUSED_CRYPTO_COMMANDS */
+}
+
+#define	IW_PRIV_TYPE_OPTIE	IW_PRIV_TYPE_BYTE | IEEE80211_MAX_OPT_IE
+#define	IW_PRIV_TYPE_KEY \
+	IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_key)
+#define	IW_PRIV_TYPE_DELKEY \
+	IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_del_key)
+#define	IW_PRIV_TYPE_MLME \
+	IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_mlme)
+#define	IW_PRIV_TYPE_CHANLIST \
+	IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_chanlist)
+#define	IW_PRIV_TYPE_CHANINFO \
+	IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_chaninfo)
+#define IW_PRIV_TYPE_APPIEBUF \
+	(IW_PRIV_TYPE_BYTE | (sizeof(struct ieee80211req_getset_appiebuf) + IEEE80211_APPIE_MAX))
+#define IW_PRIV_TYPE_FILTER \
+	IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_set_filter)
+#define IW_PRIV_TYPE_CCA \
+	(IW_PRIV_TYPE_BYTE | sizeof(struct qtn_cca_args))
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+#define IW_PRIV_TYPE_STASTATISTIC_SETPARA \
+	(IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_qtn_rmt_sta_stats_setpara))
+#define IW_PRIV_TYPE_STASTATISTIC_GETPARA \
+	(IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_qtn_rmt_sta_stats))
+#endif
+
+/* make sure the size of each block_data member is large then IFNAMSIZ */
+union block_data {
+	struct ieee80211req_csw_record record;
+	struct ieee80211_assoc_history assoc_history;
+	uint32_t pm_state[QTN_PM_IOCTL_MAX];
+};
+#define IW_PRIV_BLOCK_DATASIZE (sizeof(union block_data))
+
+static const struct iw_priv_args ieee80211_priv_args[] =
+{
+	/* NB: setoptie & getoptie are !IW_PRIV_SIZE_FIXED */
+	{ IEEE80211_IOCTL_SETOPTIE,
+	  IW_PRIV_TYPE_OPTIE, 0,			"setoptie" },
+	{ IEEE80211_IOCTL_GETOPTIE,
+	  0, IW_PRIV_TYPE_OPTIE,			"getoptie" },
+	{ IEEE80211_IOCTL_SETKEY,
+	  IW_PRIV_TYPE_KEY | IW_PRIV_SIZE_FIXED, 0,	"setkey" },
+	{ IEEE80211_IOCTL_DELKEY,
+	  IW_PRIV_TYPE_DELKEY | IW_PRIV_SIZE_FIXED, 0,	"delkey" },
+	{ IEEE80211_IOCTL_SETMLME,
+	  IW_PRIV_TYPE_MLME | IW_PRIV_SIZE_FIXED, 0,	"setmlme" },
+	{ IEEE80211_IOCTL_ADDMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,"setbssid" },
+	{ IEEE80211_IOCTL_DELMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,"delmac" },
+	{ IEEE80211_IOCTL_KICKMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "kickmac"},
+	{ IEEE80211_IOCTL_WDSADDMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,"wds_add" },
+	{ IEEE80211_IOCTL_WDSDELMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,"wds_del" },
+	{ IEEE80211_IOCTL_SETCHANLIST,
+	  IW_PRIV_TYPE_CHANLIST | IW_PRIV_SIZE_FIXED, 0,"setchanlist" },
+	{ IEEE80211_IOCTL_GETCHANLIST,
+	  0, IW_PRIV_TYPE_CHANLIST | IW_PRIV_SIZE_FIXED,"getchanlist" },
+	{ IEEE80211_IOCTL_STARTCCA,
+	  IW_PRIV_TYPE_CCA | IW_PRIV_SIZE_FIXED | 1, 0, "startcca" },
+	{ IEEE80211_IOCTL_GETCHANINFO,
+	  0, IW_PRIV_TYPE_CHANINFO | IW_PRIV_SIZE_FIXED,"getchaninfo" },
+	{ IEEE80211_IOCTL_SETMODE,
+	  IW_PRIV_TYPE_CHAR |  12, 0, "mode" },
+	{ IEEE80211_IOCTL_GETMODE,
+	  0, IW_PRIV_TYPE_CHAR | 6, "get_mode" },
+	{ IEEE80211_IOCTL_POSTEVENT,
+	  IW_PRIV_TYPE_CHAR | 256, 0,			"postevent" },
+	{ IEEE80211_IOCTL_TXEAPOL,
+	  IW_PRIV_TYPE_BYTE | 2047, 0,			"txeapol" },
+	{ IEEE80211_IOCTL_SETWMMPARAMS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 4, 0,"setwmmparams" },
+	{ IEEE80211_IOCTL_GETWMMPARAMS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "getwmmparams" },
+	{ IEEE80211_IOCTL_RADAR,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "doth_radar" },
+	{ IEEE80211_IOCTL_DFSACTSCAN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dfsactscan" },
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+	{ IEEE80211_IOCTL_GETSTASTATISTIC,
+	  IW_PRIV_TYPE_STASTATISTIC_SETPARA,
+	  IW_PRIV_TYPE_STASTATISTIC_GETPARA | IW_PRIV_SIZE_FIXED, "getstastatistic" },
+#endif
+	/*
+	 * These depends on sub-ioctl support which added in version 12.
+	 */
+	{ IEEE80211_IOCTL_SETWMMPARAMS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"" },
+	{ IEEE80211_IOCTL_GETWMMPARAMS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "" },
+	/* sub-ioctl handlers */
+	{ IEEE80211_WMMPARAMS_CWMIN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"cwmin" },
+	{ IEEE80211_WMMPARAMS_CWMIN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "get_cwmin" },
+	{ IEEE80211_WMMPARAMS_CWMAX,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"cwmax" },
+	{ IEEE80211_WMMPARAMS_CWMAX,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "get_cwmax" },
+	{ IEEE80211_WMMPARAMS_AIFS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"aifs" },
+	{ IEEE80211_WMMPARAMS_AIFS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "get_aifs" },
+	{ IEEE80211_WMMPARAMS_TXOPLIMIT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"txoplimit" },
+	{ IEEE80211_WMMPARAMS_TXOPLIMIT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "get_txoplimit" },
+	{ IEEE80211_WMMPARAMS_ACM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"acm" },
+	{ IEEE80211_WMMPARAMS_ACM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "get_acm" },
+	{ IEEE80211_WMMPARAMS_NOACKPOLICY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"noackpolicy" },
+	{ IEEE80211_WMMPARAMS_NOACKPOLICY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "get_noackpolicy" },
+
+	{ IEEE80211_IOCTL_SETPARAM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "setparam" },
+	/*
+	 * These depends on sub-ioctl support which added in version 12.
+	 */
+	{ IEEE80211_IOCTL_GETPARAM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,	"getparam" },
+
+	/* sub-ioctl handlers */
+	{ IEEE80211_IOCTL_SETPARAM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "" },
+	{ IEEE80211_IOCTL_GETPARAM,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "" },
+
+	/* sub-ioctl definitions */
+	{ IEEE80211_PARAM_AUTHMODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "authmode" },
+	{ IEEE80211_PARAM_AUTHMODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_authmode" },
+	{ IEEE80211_PARAM_PROTMODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "protmode" },
+	{ IEEE80211_PARAM_PROTMODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_protmode" },
+	{ IEEE80211_PARAM_MCASTCIPHER,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mcastcipher" },
+	{ IEEE80211_PARAM_MCASTCIPHER,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_mcastcipher" },
+	{ IEEE80211_PARAM_MCASTKEYLEN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mcastkeylen" },
+	{ IEEE80211_PARAM_MCASTKEYLEN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_mcastkeylen" },
+	{ IEEE80211_PARAM_UCASTCIPHERS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ucastciphers" },
+	{ IEEE80211_PARAM_UCASTCIPHERS,
+	/*
+	 * NB: can't use "get_ucastciphers" due to iwpriv command names
+	 *     must be <IFNAMESIZ which is 16.
+	 */
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_uciphers" },
+	{ IEEE80211_PARAM_UCASTCIPHER,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ucastcipher" },
+	{ IEEE80211_PARAM_UCASTCIPHER,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ucastcipher" },
+	{ IEEE80211_PARAM_UCASTKEYLEN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ucastkeylen" },
+	{ IEEE80211_PARAM_UCASTKEYLEN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ucastkeylen" },
+	{ IEEE80211_PARAM_KEYMGTALGS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "keymgtalgs" },
+	{ IEEE80211_PARAM_KEYMGTALGS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_keymgtalgs" },
+	{ IEEE80211_PARAM_RSNCAPS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rsncaps" },
+	{ IEEE80211_PARAM_RSNCAPS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rsncaps" },
+	{ IEEE80211_PARAM_ROAMING,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostroaming" },
+	{ IEEE80211_PARAM_ROAMING,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_hostroaming" },
+	{ IEEE80211_PARAM_PRIVACY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "privacy" },
+	{ IEEE80211_PARAM_PRIVACY,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_privacy" },
+	{ IEEE80211_PARAM_COUNTERMEASURES,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "countermeasures" },
+	{ IEEE80211_PARAM_COUNTERMEASURES,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_countermeas" },
+	{ IEEE80211_PARAM_DROPUNENCRYPTED,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dropunencrypted" },
+	{ IEEE80211_PARAM_DROPUNENCRYPTED,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_dropunencry" },
+	{ IEEE80211_PARAM_WPA,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wpa" },
+	{ IEEE80211_PARAM_WPA,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_wpa" },
+	{ IEEE80211_PARAM_DRIVER_CAPS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "driver_caps" },
+	{ IEEE80211_PARAM_DRIVER_CAPS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_driver_caps" },
+	{ IEEE80211_PARAM_WMM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wmm" },
+	{ IEEE80211_PARAM_WMM,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_wmm" },
+	{ IEEE80211_PARAM_HIDESSID,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hide_ssid" },
+	{ IEEE80211_PARAM_HIDESSID,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_hide_ssid" },
+	{ IEEE80211_PARAM_APBRIDGE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_bridge" },
+	{ IEEE80211_PARAM_APBRIDGE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ap_bridge" },
+	{ IEEE80211_PARAM_INACT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "inact" },
+	{ IEEE80211_PARAM_INACT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_inact" },
+	{ IEEE80211_PARAM_INACT_AUTH,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "inact_auth" },
+	{ IEEE80211_PARAM_INACT_AUTH,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_inact_auth" },
+	{ IEEE80211_PARAM_INACT_INIT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "inact_init" },
+	{ IEEE80211_PARAM_INACT_INIT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_inact_init" },
+	{ IEEE80211_PARAM_ABOLT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "abolt" },
+	{ IEEE80211_PARAM_ABOLT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_abolt" },
+	{ IEEE80211_PARAM_DTIM_PERIOD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dtim_period" },
+	{ IEEE80211_PARAM_DTIM_PERIOD,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_dtim_period" },
+	{ IEEE80211_PARAM_ASSOC_LIMIT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "assoc_limit" },
+	{ IEEE80211_PARAM_ASSOC_LIMIT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_assoc_limit" },
+	{ IEEE80211_PARAM_BSS_ASSOC_LIMIT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bss_assoc_limit" },
+	{ IEEE80211_PARAM_BSS_ASSOC_LIMIT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bss_assolmt" },
+	/* XXX bintval chosen to avoid 16-char limit */
+	{ IEEE80211_PARAM_BEACON_INTERVAL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bintval" },
+	{ IEEE80211_PARAM_BEACON_INTERVAL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bintval" },
+	{ IEEE80211_PARAM_DOTH,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "doth" },
+	{ IEEE80211_PARAM_DOTH,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_doth" },
+	{ IEEE80211_PARAM_PWRCONSTRAINT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "doth_pwrcst" },
+	{ IEEE80211_PARAM_PWRCONSTRAINT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_doth_pwrcst" },
+	{ IEEE80211_PARAM_GENREASSOC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "doth_reassoc" },
+#ifdef MATS
+	{ IEEE80211_PARAM_COMPRESSION,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "compression" },
+	{ IEEE80211_PARAM_COMPRESSION,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_compression" },
+	{ IEEE80211_PARAM_FF,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ff" },
+	{ IEEE80211_PARAM_FF,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ff" },
+	{ IEEE80211_PARAM_TURBO,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "turbo" },
+	{ IEEE80211_PARAM_TURBO,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_turbo" },
+	{ IEEE80211_PARAM_XR,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "xr" },
+	{ IEEE80211_PARAM_XR,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_xr" },
+	{ IEEE80211_PARAM_BURST,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "burst" },
+	{ IEEE80211_PARAM_BURST,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_burst" },
+#endif
+	{ IEEE80211_IOCTL_CHANSWITCH,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0,	"doth_chanswitch" },
+	{ IEEE80211_PARAM_PUREG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pureg" },
+	{ IEEE80211_PARAM_PUREG,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_pureg" },
+	{ IEEE80211_PARAM_REPEATER,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "repeater" },
+	{ IEEE80211_PARAM_REPEATER,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_repeater" },
+	{ IEEE80211_PARAM_WDS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wds" },
+	{ IEEE80211_PARAM_WDS,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_wds" },
+	{ IEEE80211_PARAM_BGSCAN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bgscan" },
+	{ IEEE80211_PARAM_BGSCAN,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bgscan" },
+	{ IEEE80211_PARAM_BGSCAN_IDLE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bgscanidle" },
+	{ IEEE80211_PARAM_BGSCAN_IDLE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bgscanidle" },
+	{ IEEE80211_PARAM_BGSCAN_INTERVAL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bgscanintvl" },
+	{ IEEE80211_PARAM_BGSCAN_INTERVAL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bgscanintvl" },
+	{ IEEE80211_PARAM_MCAST_RATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mcast_rate" },
+	{ IEEE80211_PARAM_MCAST_RATE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_mcast_rate" },
+	{ IEEE80211_PARAM_COVERAGE_CLASS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "coverageclass" },
+	{ IEEE80211_PARAM_COVERAGE_CLASS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_coveragecls" },
+	{ IEEE80211_PARAM_COUNTRY_IE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "countryie" },
+	{ IEEE80211_PARAM_COUNTRY_IE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_countryie" },
+	{ IEEE80211_PARAM_SCANVALID,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "scanvalid" },
+	{ IEEE80211_PARAM_SCANVALID,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_scanvalid" },
+	{ IEEE80211_PARAM_REGCLASS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "regclass" },
+	{ IEEE80211_PARAM_REGCLASS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_regclass" },
+	{ IEEE80211_PARAM_DROPUNENC_EAPOL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dropunenceapol" },
+	{ IEEE80211_PARAM_DROPUNENC_EAPOL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_dropunencea" },
+	{ IEEE80211_PARAM_SHPREAMBLE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "shpreamble" },
+	{ IEEE80211_PARAM_SHPREAMBLE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_shpreamble" },
+	/*
+	 * NB: these should be roamrssi* etc, but iwpriv usurps all
+	 *     strings that start with roam!
+	 */
+	{ IEEE80211_PARAM_ROAM_RSSI_11A,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rssi11a" },
+	{ IEEE80211_PARAM_ROAM_RSSI_11A,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rssi11a" },
+	{ IEEE80211_PARAM_ROAM_RSSI_11B,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rssi11b" },
+	{ IEEE80211_PARAM_ROAM_RSSI_11B,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rssi11b" },
+	{ IEEE80211_PARAM_ROAM_RSSI_11G,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rssi11g" },
+	{ IEEE80211_PARAM_ROAM_RSSI_11G,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rssi11g" },
+	{ IEEE80211_PARAM_ROAM_RATE_11A,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rate11a" },
+	{ IEEE80211_PARAM_ROAM_RATE_11A,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rate11a" },
+	{ IEEE80211_PARAM_ROAM_RATE_11B,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rate11b" },
+	{ IEEE80211_PARAM_ROAM_RATE_11B,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rate11b" },
+	{ IEEE80211_PARAM_ROAM_RATE_11G,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rate11g" },
+	{ IEEE80211_PARAM_ROAM_RATE_11G,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rate11g" },
+	{ IEEE80211_PARAM_UAPSDINFO,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "uapsd" },
+	{ IEEE80211_PARAM_UAPSDINFO,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_uapsd" },
+	{ IEEE80211_PARAM_SLEEP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "sleep" },
+	{ IEEE80211_PARAM_SLEEP,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_sleep" },
+	{ IEEE80211_PARAM_QOSNULL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "qosnull" },
+	{ IEEE80211_PARAM_PSPOLL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pspoll" },
+	{ IEEE80211_PARAM_EOSPDROP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "eospdrop" },
+	{ IEEE80211_PARAM_EOSPDROP,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_eospdrop" },
+	{ IEEE80211_PARAM_STA_DFS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "sta_dfs"},
+	{ IEEE80211_PARAM_STA_DFS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_sta_dfs"},
+	{ IEEE80211_PARAM_MARKDFS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "markdfs" },
+	{ IEEE80211_PARAM_MARKDFS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_markdfs" },
+	{ IEEE80211_PARAM_RADAR_BW,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "radar_bw" },
+	{ IEEE80211_IOCTL_SET_APPIEBUF,
+	  IW_PRIV_TYPE_APPIEBUF, 0, "setiebuf" },
+	{ IEEE80211_IOCTL_GET_APPIEBUF,
+	  0, IW_PRIV_TYPE_APPIEBUF, "getiebuf" },
+	{ IEEE80211_IOCTL_FILTERFRAME,
+	  IW_PRIV_TYPE_FILTER , 0, "setfilter" },
+	{ IEEE80211_PARAM_FIXED_TX_RATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "fixedtxrate" },
+	{ IEEE80211_PARAM_FIXED_TX_RATE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_fixedtxrate" },
+	{ IEEE80211_PARAM_MIMOMODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mimomode" },
+	{ IEEE80211_PARAM_MIMOMODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_mimomode" },
+	{ IEEE80211_PARAM_AGGREGATION,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "aggregation" },
+	{ IEEE80211_PARAM_AGGREGATION,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_aggregation" },
+	{ IEEE80211_PARAM_RETRY_COUNT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "retrycount" },
+	{ IEEE80211_PARAM_RETRY_COUNT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_retrycount" },
+	{ IEEE80211_PARAM_VAP_DBG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "vapdebug" },
+	{ IEEE80211_PARAM_VAP_DBG,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_vapdebug" },
+	{ IEEE80211_PARAM_NODEREF_DBG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "noderef_dbg" },
+	{ IEEE80211_PARAM_EXP_MAT_SEL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "expmattype" },
+	{ IEEE80211_PARAM_EXP_MAT_SEL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_expmattype" },
+	{ IEEE80211_PARAM_BW_SEL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bwselect" },
+	{ IEEE80211_PARAM_BW_SEL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bwselect" },
+	{ IEEE80211_PARAM_RG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rgselect" },
+	{ IEEE80211_PARAM_RG,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rgselect" },
+	{ IEEE80211_PARAM_BW_SEL_MUC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bwselect_muc" },
+	{ IEEE80211_PARAM_BW_SEL_MUC,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bwselect_muc" },
+	{ IEEE80211_PARAM_ACK_POLICY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ackpolicy" },
+	{ IEEE80211_PARAM_ACK_POLICY,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ackpolicy" },
+	{ IEEE80211_PARAM_LEGACY_MODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "legacyselect" },
+	{ IEEE80211_PARAM_LEGACY_MODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_legacyselect" },
+	{ IEEE80211_PARAM_MAX_AGG_SIZE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_aggsize" },
+	{ IEEE80211_PARAM_MAX_AGG_SIZE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_maxaggsize" },
+	{ IEEE80211_PARAM_TXBF_CTRL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "txbf_ctrl" },
+	{ IEEE80211_PARAM_TXBF_CTRL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_txbfctrl" },
+	{ IEEE80211_PARAM_TXBF_PERIOD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "txbf_period" },
+	{ IEEE80211_PARAM_TXBF_PERIOD,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_txbfperiod" },
+	{ IEEE80211_PARAM_HTBA_SEQ_CTRL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "htba_seq" },
+	{ IEEE80211_PARAM_HTBA_SEQ_CTRL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_htba_seq" },
+	{ IEEE80211_PARAM_HTBA_SIZE_CTRL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "htba_size" },
+	{ IEEE80211_PARAM_HTBA_SIZE_CTRL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_htba_size" },
+	{ IEEE80211_PARAM_HTBA_TIME_CTRL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "htba_time" },
+	{ IEEE80211_PARAM_HTBA_TIME_CTRL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_htba_time" },
+	{ IEEE80211_PARAM_HT_ADDBA,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "htba_addba" },
+	{ IEEE80211_PARAM_HT_ADDBA,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_htba_addba" },
+	{ IEEE80211_PARAM_HT_DELBA,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "htba_delba" },
+	{ IEEE80211_PARAM_HT_DELBA,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_htba_delba" },
+	{ IEEE80211_PARAM_CHANNEL_NOSCAN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "disablescan" },
+	{ IEEE80211_PARAM_CHANNEL_NOSCAN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_disablescan" },
+	{ IEEE80211_PARAM_MUC_PROFILE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "muc_profile" },
+	{ IEEE80211_PARAM_MUC_PROFILE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "muc_profile" },
+	{ IEEE80211_PARAM_MUC_PHY_STATS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "muc_set_phystat" },
+	{ IEEE80211_PARAM_MUC_PHY_STATS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "muc_get_phystat" },
+	{ IEEE80211_PARAM_MUC_SET_PARTNUM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "muc_set_partnum" },
+	{ IEEE80211_PARAM_MUC_SET_PARTNUM,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "muc_get_partnum" },
+	{ IEEE80211_PARAM_ENABLE_GAIN_ADAPT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ena_gain_adapt" },
+	{ IEEE80211_PARAM_ENABLE_GAIN_ADAPT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_gain_adapt" },
+	{ IEEE80211_PARAM_GET_RFCHIP_ID,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rfchipid" },
+	{ IEEE80211_PARAM_GET_RFCHIP_ID,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rfchipid" },
+	{ IEEE80211_PARAM_GET_RFCHIP_VERID,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rfchip_verid" },
+	{ IEEE80211_PARAM_GET_RFCHIP_VERID,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rfchip_verid" },
+	{ IEEE80211_PARAM_SHORT_GI,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "short_gi" },
+	{ IEEE80211_PARAM_SHORT_GI,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_short_gi" },
+	{ IEEE80211_PARAM_FORCE_SMPS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcesmps" },
+	{ IEEE80211_PARAM_FORCEMICERROR,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcemicerror" },
+	{ IEEE80211_PARAM_ENABLECOUNTERMEASURES,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "en_cmsures" },
+	{ IEEE80211_PARAM_IMPLICITBA,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "implicit_ba" },
+	{ IEEE80211_PARAM_IMPLICITBA,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_implicit_ba" },
+	{ IEEE80211_PARAM_CLIENT_REMOVE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "cl_remove" },
+	{ IEEE80211_PARAM_SHOWMEM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "showmem" },
+	{ IEEE80211_PARAM_SCANSTATUS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "scanstatus" },
+	{ IEEE80211_PARAM_SCANSTATUS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_scanstatus" },
+	{ IEEE80211_PARAM_CACSTATUS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_cacstatus" },
+	{ IEEE80211_PARAM_GLOBAL_BA_CONTROL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ba_control" },
+	{ IEEE80211_PARAM_GLOBAL_BA_CONTROL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ba_control" },
+	{ IEEE80211_PARAM_NO_SSID_ASSOC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "no_ssid_assoc" },
+	{ IEEE80211_PARAM_CONFIG_TXPOWER,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "cfg_txpower" },
+	{ IEEE80211_PARAM_INITIATE_TXPOWER_TABLE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "init_txpower" },
+	{ IEEE80211_PARAM_CONFIG_BW_TXPOWER,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "cfg_bw_power" },
+	{ IEEE80211_PARAM_CONFIG_TPC_INTERVAL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_tpc_intvl" },
+	{ IEEE80211_PARAM_CONFIG_TPC_INTERVAL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tpc_interval" },
+	{ IEEE80211_PARAM_TPC,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_tpc" },
+	{ IEEE80211_PARAM_TPC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tpc" },
+	{ IEEE80211_PARAM_TPC_QUERY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tpc_query" },
+	{ IEEE80211_PARAM_TPC_QUERY,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_tpc_query" },
+	{ IEEE80211_PARAM_CONFIG_REGULATORY_TXPOWER,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "cfg_reg_txpower" },
+	{ IEEE80211_PARAM_SKB_LIST_MAX,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "skb_list_max" },
+	{ IEEE80211_PARAM_VAP_STATS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "vapstats" },
+	{ IEEE80211_PARAM_RATE_CTRL_FLAGS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rate_ctrl_flags" },
+	{ IEEE80211_PARAM_LDPC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_ldpc" },
+	{ IEEE80211_PARAM_LDPC,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ldpc" },
+	{ IEEE80211_PARAM_DFS_FAST_SWITCH,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dfs_fast_switch" },
+	{ IEEE80211_PARAM_DFS_FAST_SWITCH,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_dfs_switch" },
+	{ IEEE80211_PARAM_BLACKLIST_GET,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_blacklist" },
+	{ IEEE80211_PARAM_SCAN_NO_DFS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "scan_no_dfs" },
+	{ IEEE80211_PARAM_SCAN_NO_DFS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_scan_dfs" },
+	{ IEEE80211_PARAM_SAMPLE_RATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "sample_rate" },
+	{ IEEE80211_PARAM_SAMPLE_RATE, 0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_sample_rate" },
+	{ IEEE80211_PARAM_11N_40_ONLY_MODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_40only_mode" },
+	{ IEEE80211_PARAM_11N_40_ONLY_MODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_40only_mode" },
+	{ IEEE80211_PARAM_AMPDU_DENSITY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_ampdu_dens" },
+	{ IEEE80211_PARAM_AMPDU_DENSITY,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ampdu_dens" },
+	{ IEEE80211_PARAM_REGULATORY_REGION,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "region" },
+	{ IEEE80211_PARAM_REGULATORY_REGION,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_region" },
+	{ IEEE80211_PARAM_SPEC_COUNTRY_CODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "country_code" },
+	{ IEEE80211_PARAM_SPEC_COUNTRY_CODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_country_code" },
+	{ IEEE80211_PARAM_MCS_CAP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mcs_cap" },
+	{ IEEE80211_PARAM_MCS_CAP,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_mcs_cap" },
+	{ IEEE80211_PARAM_MAX_MGMT_FRAMES,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_mgmtfrms" },
+	{ IEEE80211_PARAM_MAX_MGMT_FRAMES,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_max_mgtfrms" },
+	{ IEEE80211_PARAM_MCS_ODD_EVEN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mcs_odd_even" },
+	{ IEEE80211_PARAM_MCS_ODD_EVEN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_mcs_oddeven" },
+	{ IEEE80211_PARAM_BA_MAX_WIN_SIZE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ba_max" },
+	{ IEEE80211_PARAM_BA_MAX_WIN_SIZE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ba_max" },
+	{ IEEE80211_PARAM_RESTRICTED_MODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tx_restrict" },
+	{ IEEE80211_PARAM_RESTRICTED_MODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_tx_restrict" },
+	{ IEEE80211_PARAM_PHY_STATS_MODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mode_phy_stats" },
+	{ IEEE80211_PARAM_MIN_DWELL_TIME_ACTIVE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "min_dt_act" },
+	{ IEEE80211_PARAM_MIN_DWELL_TIME_ACTIVE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_min_dt_act" },
+	{ IEEE80211_PARAM_MIN_DWELL_TIME_PASSIVE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "min_dt_pas" },
+	{ IEEE80211_PARAM_MIN_DWELL_TIME_PASSIVE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_min_dt_pas" },
+	{ IEEE80211_PARAM_MAX_DWELL_TIME_ACTIVE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_dt_act" },
+	{ IEEE80211_PARAM_MAX_DWELL_TIME_ACTIVE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_max_dt_act" },
+	{ IEEE80211_PARAM_MAX_DWELL_TIME_PASSIVE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_dt_pas" },
+	{ IEEE80211_PARAM_MAX_DWELL_TIME_PASSIVE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_max_dt_pas" },
+#ifdef QTN_BG_SCAN
+	{ IEEE80211_PARAM_QTN_BGSCAN_DWELL_TIME_ACTIVE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bg_dt_act" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DWELL_TIME_ACTIVE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bg_dt_act" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DWELL_TIME_PASSIVE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bg_dt_pas" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DWELL_TIME_PASSIVE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bg_dt_pas" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DURATION_ACTIVE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bg_dur_act" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DURATION_ACTIVE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bg_dur_act" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_FAST,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bg_dur_pf" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_FAST,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bg_dur_pf" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_NORMAL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bg_dur_pn" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_NORMAL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bg_dur_pn" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_SLOW,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bg_dur_ps" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DURATION_PASSIVE_SLOW,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bg_dur_ps" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_THRSHLD_PASSIVE_FAST,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bg_thr_fst" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_THRSHLD_PASSIVE_FAST,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bg_thr_fst" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_THRSHLD_PASSIVE_NORMAL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bg_thr_nor" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_THRSHLD_PASSIVE_NORMAL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bg_thr_nor" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DEBUG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bg_debug" },
+	{ IEEE80211_PARAM_QTN_BGSCAN_DEBUG,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bg_debug" },
+#endif /* QTN_BG_SCAN */
+	{ IEEE80211_PARAM_LEGACY_RETRY_LIMIT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "legacy_retry" },
+	{ IEEE80211_PARAM_LEGACY_RETRY_LIMIT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_legacyretry" },
+#ifdef QSCS_ENABLED
+	{ IEEE80211_PARAM_SCS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "scs_set" },
+	{ IEEE80211_PARAM_SCS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "scs_get" },
+	{ IEEE80211_PARAM_SCS_DFS_REENTRY_REQUEST,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "scs_get_reent" },
+#endif /* QSCS_ENABLED */
+	{ IEEE80211_PARAM_TRAINING_COUNT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "training_count" },
+	{ IEEE80211_PARAM_DYNAMIC_AC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dynamic_ac" },
+	{ IEEE80211_PARAM_DUMP_TRIGGER,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dbg_dump" },
+	{ IEEE80211_PARAM_DUMP_TCM_FD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dbg_dump_tcm_fd" },
+	{ IEEE80211_PARAM_RXCSR_ERR_ALLOW,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rxcsr_err_allow" },
+	{ IEEE80211_PARAM_STOP_FLAGS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dbg_stop" },
+	{ IEEE80211_PARAM_CHECK_FLAGS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dbg_check" },
+	{ IEEE80211_PARAM_RX_CTRL_FILTER,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ctrl_filter" },
+	{ IEEE80211_PARAM_ALT_CHAN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_alt_chan" },
+	{ IEEE80211_PARAM_ALT_CHAN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_alt_chan" },
+	{ IEEE80211_PARAM_QTN_BCM_WAR,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bcm_fixup" },
+	{ IEEE80211_PARAM_GI_SELECT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "gi_select" },
+	{ IEEE80211_PARAM_GI_SELECT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_gi_select" },
+	{ IEEE80211_PARAM_FIXED_SGI,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "gi_fixed" },
+	{ IEEE80211_PARAM_FIXED_SGI,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_gi_fixed" },
+	{ IEEE80211_PARAM_FIXED_BW,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bw_fixed" },
+	{ IEEE80211_PARAM_FIXED_BW,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bw_fixed" },
+	{ IEEE80211_PARAM_LDPC_ALLOW_NON_QTN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_ldpc_nonqtn" },
+	{ IEEE80211_PARAM_LDPC_ALLOW_NON_QTN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ldpc_nonqtn" },
+	{ IEEE80211_PARAM_FWD_UNKNOWN_MC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "fwd_unknown_mc" },
+	{ IEEE80211_PARAM_FWD_UNKNOWN_MC,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_unknown_mc" },
+	{ IEEE80211_PARAM_BCST_4,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "reliable_bc" },
+	{ IEEE80211_PARAM_BCST_4,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_reliable_bc" },
+	{ IEEE80211_PARAM_AP_FWD_LNCB,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_fwd_lncb" },
+	{ IEEE80211_PARAM_AP_FWD_LNCB,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ap_fwd_lncb" },
+	{ IEEE80211_PARAM_PPPC_SELECT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pppc" },
+	{ IEEE80211_PARAM_PPPC_SELECT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_pppc" },
+	{ IEEE80211_PARAM_PPPC_STEP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pppc_step" },
+	{ IEEE80211_PARAM_PPPC_STEP,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_pppc_step" },
+	{ IEEE80211_PARAM_TEST_LNCB,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "test_lncb" },
+	{ IEEE80211_PARAM_STBC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_stbc" },
+	{ IEEE80211_PARAM_STBC,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_stbc" },
+	{ IEEE80211_PARAM_RTS_CTS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_rtscts" },
+	{ IEEE80211_PARAM_RTS_CTS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rtscts" },
+	{ IEEE80211_PARAM_TX_QOS_SCHED,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_txqos_sched" },
+	{ IEEE80211_PARAM_TX_QOS_SCHED,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_txqos_sched" },
+	{ IEEE80211_PARAM_GET_DFS_CCE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_dfs_cce" },
+#ifdef QSCS_ENABLED
+	{ IEEE80211_PARAM_GET_SCS_CCE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_scs_cce" },
+#endif /* QSCS_ENABLED */
+	{ IEEE80211_PARAM_RX_AGG_TIMEOUT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rx_agg_to" },
+	{ IEEE80211_PARAM_RX_AGG_TIMEOUT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rx_agg_to" },
+	{ IEEE80211_PARAM_FORCE_MUC_HALT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "muc_halt" },
+	{ IEEE80211_PARAM_FORCE_ENABLE_TRIGGERS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tr_trig" },
+	{ IEEE80211_PARAM_FORCE_MUC_TRACE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "muc_tb" },
+	{ IEEE80211_PARAM_BK_BITMAP_MODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_bkbitmap" },
+	{ IEEE80211_PARAM_MUC_FLAGS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "muc_flags" },
+	{ IEEE80211_PARAM_HT_NSS_CAP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_ht_nss_cap" },
+	{ IEEE80211_PARAM_HT_NSS_CAP,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ht_nss_cap" },
+	{ IEEE80211_PARAM_VHT_NSS_CAP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_vht_nss_cap" },
+	{ IEEE80211_PARAM_VHT_NSS_CAP,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_vht_nss_cap" },
+	{ IEEE80211_PARAM_UNKNOWN_DEST_ARP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "unknown_dst_arp" },
+	{ IEEE80211_PARAM_UNKNOWN_DEST_FWD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "unknown_dst_fwd" },
+	{ IEEE80211_PARAM_ASSOC_HISTORY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "reset_assoc_his" },
+	{ IEEE80211_PARAM_CSW_RECORD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "clean_csw" },
+
+	{ IEEE80211_PARAM_UPDATE_MU_GRP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_grp_upd" },
+	{ IEEE80211_PARAM_FIXED_11AC_MU_TX_RATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_tx_rate_set" },
+	{ IEEE80211_PARAM_MU_DEBUG_LEVEL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_dbg_lvl_set" },
+	{ IEEE80211_PARAM_MU_ENABLE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_enable_set" },
+	{ IEEE80211_PARAM_MU_ENABLE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "mu_enable_get" },
+	{ IEEE80211_PARAM_INST_MU_GRP_QMAT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_grp_qmt_inst" },
+	{ IEEE80211_PARAM_DELE_MU_GRP_QMAT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_grp_qmt_del" },
+	{ IEEE80211_PARAM_GET_MU_GRP_QMAT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "mu_grp_qmt_get" },
+	{ IEEE80211_PARAM_EN_MU_GRP_QMAT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_grp_qmt_ena" },
+	{ IEEE80211_PARAM_DIS_MU_GRP_QMAT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_grp_qmt_dis" },
+	{ IEEE80211_PARAM_MU_DEBUG_FLAG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_dbg_flg_set" },
+	{ IEEE80211_PARAM_MU_AIRTIME_PADDING,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_airtime_pad" },
+	{ IEEE80211_PARAM_DSP_DEBUG_LEVEL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dsp_dbg_lvl_set" },
+	{ IEEE80211_PARAM_DSP_DEBUG_FLAG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dsp_dbg_flg_set" },
+	{ IEEE80211_PARAM_SET_CRC_ERR,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_crc_error" },
+	{ IEEE80211_PARAM_MU_SWITCH_USR_POS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_sw_usr_pos" },
+	{ IEEE80211_PARAM_SET_GRP_SND_PERIOD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_grp_snd_per" },
+	{ IEEE80211_PARAM_SET_PREC_SND_PERIOD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_prec_snd_per" },
+	{ IEEE80211_PARAM_SET_MU_RANK_TOLERANCE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_rank_toleran" },
+	{ IEEE80211_PARAM_DSP_PRECODING_ALGORITHM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dsp_prc_alg_set" },
+	{ IEEE80211_PARAM_DSP_RANKING_ALGORITHM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dsp_rnk_alg_set" },
+	{ IEEE80211_PARAM_MU_USE_EQ,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_set_use_eq" },
+	{ IEEE80211_PARAM_MU_USE_EQ,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "mu_get_use_eq" },
+	{ IEEE80211_PARAM_MU_AMSDU_SIZE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mu_amsdu_size" },
+#if defined(QBMPS_ENABLE)
+	{ IEEE80211_PARAM_STA_BMPS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_bmps" },
+	{ IEEE80211_PARAM_STA_BMPS,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bmps" },
+#endif
+
+	{ IEEE80211_IOCTL_GETBLOCK,
+	  0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | IW_PRIV_BLOCK_DATASIZE, "" },
+	/* sub-ioctl */
+	{ IEEE80211_PARAM_ASSOC_HISTORY,
+	  0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | IW_PRIV_BLOCK_DATASIZE, "assoc_history" },
+	{ IEEE80211_PARAM_CSW_RECORD,
+	  0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | IW_PRIV_BLOCK_DATASIZE, "get_csw_record" },
+	{ IEEE80211_PARAM_RESTRICT_RTS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "restrict_rts" },
+	{ IEEE80211_PARAM_RESTRICT_RTS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rstrict_rts" },
+	{ IEEE80211_PARAM_RESTRICT_LIMIT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "restrict_max" },
+	{ IEEE80211_PARAM_RESTRICT_LIMIT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rstrict_max" },
+	{ IEEE80211_PARAM_SWRETRY_AGG_MAX,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "swret_agg" },
+	{ IEEE80211_PARAM_SWRETRY_AGG_MAX,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_swret_agg" },
+	{ IEEE80211_PARAM_SWRETRY_NOAGG_MAX,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "swret_noagg" },
+	{ IEEE80211_PARAM_SWRETRY_NOAGG_MAX,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_swret_noagg" },
+	{ IEEE80211_PARAM_CCA_PRI,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_cca_pri" },
+	{ IEEE80211_PARAM_CCA_SEC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_cca_sec" },
+	{ IEEE80211_PARAM_CCA_SEC40,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_cca_sec40" },
+	{ IEEE80211_PARAM_CCA_FIXED,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_cca_fixed" },
+	{ IEEE80211_PARAM_CCA_FIXED,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_cca_fixed" },
+	{ IEEE80211_PARAM_PWR_SAVE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pm" },
+	{ IEEE80211_PARAM_PWR_SAVE,
+	  0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | IW_PRIV_BLOCK_DATASIZE, "get_pm" },
+	{ IEEE80211_PARAM_PS_CMD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ps_cmd" },
+	{ IEEE80211_PARAM_FAST_REASSOC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "fast_reassoc" },
+	{ IEEE80211_PARAM_TEST_TRAFFIC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "test_traffic" },
+	{ IEEE80211_PARAM_QCAT_STATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "qcat_state" },
+	{ IEEE80211_PARAM_RALG_DBG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ralg_dbg" },
+	{ IEEE80211_PARAM_CSA_FLAG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "csa_flag" },
+	{ IEEE80211_PARAM_CSA_FLAG,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_csa_flag" },
+	{ IEEE80211_PARAM_DEF_MATRIX,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "def_matrix" },
+	{ IEEE80211_PARAM_DEF_MATRIX,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_def_matrix" },
+	{ IEEE80211_PARAM_TUNEPD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tunepd" },
+	{ IEEE80211_PARAM_TUNEPD_DONE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tunepd_done" },
+	{ IEEE80211_PARAM_RTSTHRESHOLD,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rtshold" },
+	{ IEEE80211_PARAM_CARRIER_ID,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "carrier_id" },
+	{ IEEE80211_PARAM_CARRIER_ID,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_carrier_id" },
+	{ IEEE80211_PARAM_BA_THROT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ba_throt" },
+	{ IEEE80211_PARAM_TX_QUEUING_ALG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "queuing_alg" },
+	{ IEEE80211_PARAM_TX_QUEUING_ALG,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_queuing_alg" },
+	{ IEEE80211_PARAM_WME_THROT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wme_throt" },
+	{ IEEE80211_PARAM_MODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_vht" },
+	{ IEEE80211_PARAM_MODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_vht" },
+	{ IEEE80211_PARAM_ENABLE_11AC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "enable_11ac" },
+	{ IEEE80211_PARAM_ENABLE_11AC,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_11ac_status" },
+	{ IEEE80211_PARAM_FIXED_11AC_TX_RATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_11ac_mcs" },
+	{ IEEE80211_PARAM_FIXED_11AC_TX_RATE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_11ac_mcs" },
+	{ IEEE80211_PARAM_FIXED_11AC_MU_TX_RATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_mu_mcs" },
+	{ IEEE80211_PARAM_AUC_RX_DBG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "auc_rx_dbg" },
+	{ IEEE80211_PARAM_AUC_TX_DBG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "auc_tx_dbg" },
+	{ IEEE80211_PARAM_TX_MAXMPDU,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tx_maxmpdu" },
+	{ IEEE80211_PARAM_TX_MAXMPDU,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_tx_maxmpdu" },
+	{ IEEE80211_PARAM_RX_ACCELERATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rx_accel" },
+	{ IEEE80211_PARAM_RX_ACCEL_LOOKUP_SA,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rx_accel_lu_sa" },
+	{ IEEE80211_PARAM_TACMAP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tacmap" },
+	{ IEEE80211_PARAM_VAP_PRI,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "vap_pri" },
+	{ IEEE80211_PARAM_VAP_PRI,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_vap_pri" },
+	{ IEEE80211_PARAM_AIRFAIR,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "airfair" },
+	{ IEEE80211_PARAM_AIRFAIR,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_airfair" },
+	{ IEEE80211_PARAM_AUC_QOS_SCH,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "auc_qos_sch" },
+	{ IEEE80211_PARAM_VAP_PRI_WME,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "vap_pri_wme" },
+	{ IEEE80211_PARAM_EMI_POWER_SWITCHING,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "emi_pwr_sw" },
+	{ IEEE80211_PARAM_EMI_POWER_SWITCHING,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_emi_pwr" },
+	{ IEEE80211_PARAM_AGGRESSIVE_AGG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "aggressive_agg" },
+	{ IEEE80211_PARAM_TQEW_DESCR_LIMIT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tqew_descrs" },
+	{ IEEE80211_PARAM_TQEW_DESCR_LIMIT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_tqew_descrs" },
+	{ IEEE80211_PARAM_BR_IP_ADDR,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "br_ip" },
+	{ IEEE80211_PARAM_GENPCAP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "genpcap" },
+	{ IEEE80211_PARAM_TDLS_DISC_INT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tdls_disc_int" },
+	{ IEEE80211_PARAM_TDLS_PATH_SEL_WEIGHT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tdls_path_wgt" },
+	{ IEEE80211_PARAM_TDLS_MIN_RSSI,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tdls_min_rssi" },
+	{ IEEE80211_PARAM_TDLS_SWITCH_INTS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tdls_swit_ints" },
+	{ IEEE80211_PARAM_TDLS_RATE_WEIGHT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tdls_rate_wgt" },
+	{ IEEE80211_PARAM_TDLS_OFF_CHAN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tdls_offchan" },
+	{ IEEE80211_PARAM_TDLS_OFF_CHAN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_tdls_offchan" },
+	{ IEEE80211_PARAM_TDLS_OFF_CHAN_BW,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tdls_offchbw" },
+	{ IEEE80211_PARAM_TDLS_OFF_CHAN_BW,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_tdls_offchbw" },
+	{ IEEE80211_PARAM_OCAC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ocac_set" },
+	{ IEEE80211_PARAM_OCAC,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "ocac_get" },
+	{ IEEE80211_PARAM_SDFS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "sdfs_set" },
+	{ IEEE80211_PARAM_SDFS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "sdfs_get" },
+	{ IEEE80211_PARAM_DEACTIVE_CHAN_PRI,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "deact_chan_pri" },
+	{ IEEE80211_PARAM_RESTRICT_RATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "restrict_rt" },
+	{ IEEE80211_PARAM_RESTRICT_RATE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_restrict_rt" },
+	{ IEEE80211_PARAM_TRAINING_START,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "training_restart" },
+    { IEEE80211_PARAM_VCO_LOCK_DETECT_MODE,
+      IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_vco_lock" },
+    { IEEE80211_PARAM_VCO_LOCK_DETECT_MODE,
+      0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_vco_lock" },
+	{ IEEE80211_PARAM_CONFIG_PMF,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pmf_set" },
+	{ IEEE80211_PARAM_CONFIG_PMF,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "pmf_get" },
+	{ IEEE80211_PARAM_SCAN_CANCEL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "scan_cancel" },
+	{ IEEE80211_PARAM_AUTO_CCA_ENABLE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "auto_cca_enable" },
+	{ IEEE80211_PARAM_AUTO_CCA_ENABLE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_auto_cca_en" },
+	{ IEEE80211_PARAM_AUTO_CCA_PARAMS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "auto_cca_param" },
+	{ IEEE80211_PARAM_AUTO_CCA_DEBUG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "auto_cca_dbg" },
+	{ IEEE80211_PARAM_AUTO_CS_ENABLE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "autocs_enable" },
+	{ IEEE80211_PARAM_AUTO_CS_PARAMS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "autocs_param" },
+	{ IEEE80211_PARAM_INTRA_BSS_ISOLATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "intra_bss" },
+	{ IEEE80211_PARAM_INTRA_BSS_ISOLATE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_intra_bss" },
+	{ IEEE80211_PARAM_BSS_ISOLATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bss_isolate" },
+	{ IEEE80211_PARAM_BSS_ISOLATE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bss_iso" },
+	{ IEEE80211_PARAM_BF_RX_STS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bf_rxsts" },
+	{ IEEE80211_PARAM_BF_RX_STS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bf_rxsts" },
+	{ IEEE80211_PARAM_PC_OVERRIDE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pc_override" },
+	{ IEEE80211_PARAM_PC_OVERRIDE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_pc_override" },
+	{ IEEE80211_PARAM_WOWLAN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wowlan_set" },
+	{ IEEE80211_PARAM_WOWLAN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "wowlan_get" },
+	{ IEEE80211_PARAM_RX_AMSDU_ENABLE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rx_amsdu" },
+	{ IEEE80211_PARAM_RX_AMSDU_ENABLE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rx_amsdu" },
+	{ IEEE80211_PARAM_DISASSOC_REASON,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "disassoc_reason" },
+	{ IEEE80211_PARAM_PEER_RTS_MODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "peer_rts" },
+	{ IEEE80211_PARAM_PEER_RTS_MODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_peer_rts" },
+	{ IEEE80211_PARAM_DYN_WMM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dyn_wmm" },
+	{ IEEE80211_PARAM_DYN_WMM,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_dyn_wmm" },
+	{ IEEE80211_PARAM_BA_SETUP_ENABLE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rssi_for_ba_set" },
+        { IEEE80211_PARAM_BB_PARAM,
+          IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_bb_param" },
+        { IEEE80211_PARAM_BB_PARAM,
+          0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bb_param" },
+	{ IEEE80211_PARAM_VAP_TX_AMSDU,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "vap_txamsdu" },
+	{ IEEE80211_PARAM_VAP_TX_AMSDU,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_vap_txamsdu" },
+	{ IEEE80211_PARAM_VAP_TX_AMSDU_11N,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "txamsdu_11n" },
+	{ IEEE80211_PARAM_VAP_TX_AMSDU_11N,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_txamsdu_11n" },
+	{ IEEE80211_PARAM_CS_THRESHOLD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "cs_thres" },
+	{ IEEE80211_PARAM_CS_THRESHOLD,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_cs_thres" },
+	{ IEEE80211_PARAM_CS_THRESHOLD_DBM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "cs_thres_dbm" },
+	{ IEEE80211_PARAM_SCAN_RESULTS_CHECK_INV,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_scan_inv" },
+	{ IEEE80211_PARAM_SCAN_RESULTS_CHECK_INV,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_scan_inv" },
+	{ IEEE80211_PARAM_SFS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "sfs" },
+	{ IEEE80211_PARAM_INST_1SS_DEF_MAT_ENABLE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "def_1ss_mat_en" },
+	{ IEEE80211_PARAM_INST_1SS_DEF_MAT_THRESHOLD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "def_1ss_mat_th" },
+	{ IEEE80211_PARAM_SWFEAT_DISABLE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "swfeat_disable"},
+	{ IEEE80211_PARAM_FLUSH_SCAN_ENTRY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "flush_scan" },
+	{ IEEE80211_PARAM_SCAN_OPCHAN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_scan_opch" },
+	{ IEEE80211_PARAM_SCAN_OPCHAN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_scan_opch" },
+	{ IEEE80211_PARAM_DUMP_PPPC_TX_SCALE_BASES,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dump_scale_base" },
+	{ IEEE80211_PARAM_GLOBAL_FIXED_TX_SCALE_INDEX,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tx_scale_index" },
+	{ IEEE80211_PARAM_QTN_HAL_PM_CORRUPT_DEBUG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pm_corrupt_debug"},
+	{ IEEE80211_PARAM_L2_EXT_FILTER,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "l2_ext_filter" },
+	{ IEEE80211_PARAM_L2_EXT_FILTER,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_l2_ext_filt" },
+        { IEEE80211_PARAM_ENABLE_RX_OPTIM_STATS,
+          IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_optim_stats" },
+	{ IEEE80211_PARAM_SET_UNICAST_QUEUE_NUM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "unicast_qcnt" },
+	{ IEEE80211_PARAM_OBSS_EXEMPT_REQ,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "obss_exe_req" },
+	{ IEEE80211_PARAM_OBSS_TRIGG_SCAN_INT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_obss_int" },
+	{ IEEE80211_PARAM_OBSS_TRIGG_SCAN_INT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_obss_int" },
+	{ IEEE80211_PARAM_ALLOW_VHT_TKIP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_vht_tkip" },
+	{ IEEE80211_PARAM_ALLOW_VHT_TKIP,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_vht_tkip" },
+	{ IEEE80211_PARAM_VHT_2_4GHZ,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_vht_24g" },
+	{ IEEE80211_PARAM_VHT_2_4GHZ,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_vht_24g" },
+	{ IEEE80211_PARAM_BEACONING_SCHEME,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bcn_scheme" },
+	{ IEEE80211_PARAM_BEACONING_SCHEME,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bcn_scheme" },
+	{ IEEE80211_PARAM_40MHZ_INTOLERANT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "40mhz_intol" },
+	{ IEEE80211_PARAM_40MHZ_INTOLERANT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_40mhz_intol" },
+	{ IEEE80211_PARAM_VAP_STATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "vap_state" },
+	{ IEEE80211_PARAM_VAP_STATE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_vap_state" },
+	{ IEEE80211_PARAM_ANTENNA_USAGE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_antenna_usg" },
+	{ IEEE80211_PARAM_ANTENNA_USAGE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_antenna_usg" },
+	{ IEEE80211_PARAM_SET_RTS_BW_DYN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_rts_bw" },
+	{ IEEE80211_PARAM_SET_RTS_BW_DYN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rts_bw" },
+	{ IEEE80211_PARAM_SET_DUP_RTS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_dup_rts" },
+	{ IEEE80211_PARAM_SET_DUP_RTS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_dup_rts" },
+	{ IEEE80211_PARAM_SET_CTS_BW,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_cts_bw" },
+	{ IEEE80211_PARAM_SET_CTS_BW,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_cts_bw" },
+	{ IEEE80211_PARAM_VHT_MCS_CAP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_vht_mcs_cap" },
+	{ IEEE80211_PARAM_VHT_MCS_CAP,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_vht_mcs_cap" },
+	{ IEEE80211_PARAM_VHT_OPMODE_NOTIF,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_vht_opmntf" },
+	{ IEEE80211_PARAM_VHT_OPMODE_NOTIF,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_vht_opmntf" },
+	{ IEEE80211_PARAM_USE_NON_HT_DUPLICATE_MU,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_mu_non_ht" },
+	{ IEEE80211_PARAM_USE_NON_HT_DUPLICATE_MU,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_mu_non_ht" },
+	{ IEEE80211_PARAM_BG_PROTECT,
+	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "11g_protect" },
+	{ IEEE80211_PARAM_BG_PROTECT, 0,
+	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_11g_protect" },
+	{ IEEE80211_PARAM_MU_NDPA_BW_SIGNALING_SUPPORT,
+	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_rx_bws_ndpa" },
+	{ IEEE80211_PARAM_MU_NDPA_BW_SIGNALING_SUPPORT,
+	 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rx_bws_ndpa" },
+	{ IEEE80211_PARAM_RESTRICT_WLAN_IP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_rstrict_wip" },
+	{ IEEE80211_PARAM_RESTRICT_WLAN_IP,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rstrict_wip" },
+	{ IEEE80211_PARAM_MC_TO_UC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_mc_to_uc" },
+	{ IEEE80211_PARAM_MC_TO_UC,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_mc_to_uc" },
+	{ IEEE80211_PARAM_HOSTAP_STARTED,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostap_state" },
+	{ IEEE80211_PARAM_HOSTAP_STARTED,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_hostap_state" },
+	{ IEEE80211_PARAM_WPA_STARTED,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wpa_state" },
+	{ IEEE80211_PARAM_WPA_STARTED,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_wpa_state" },
+	{ IEEE80211_PARAM_EP_STATUS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ep_status" },
+	{ IEEE80211_PARAM_MAX_BCAST_PPS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_bcast_pps" },
+	{ IEEE80211_PARAM_BSS_GROUP_ID,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_ssid_grpid" },
+	{ IEEE80211_PARAM_BSS_GROUP_ID,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ssid_grpid" },
+	{ IEEE80211_PARAM_BSS_ASSOC_RESERVE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_assoc_rsrv" },
+	{ IEEE80211_PARAM_BSS_ASSOC_RESERVE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_assoc_rsrv" },
+	{ IEEE80211_PARAM_MAX_BOOT_CAC_DURATION,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_boot_cac" },
+	{ IEEE80211_PARAM_RX_BAR_SYNC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_rx_bar_sync" },
+	{ IEEE80211_PARAM_RX_BAR_SYNC,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rx_bar_sync" },
+	{ IEEE80211_PARAM_GET_REG_DOMAIN_IS_EU,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_reg_is_eu" },
+	{ IEEE80211_PARAM_AUC_TX_AGG_DURATION,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "auc_tx_agg_dur" },
+	{ IEEE80211_PARAM_GET_CHAN_AVAILABILITY_STATUS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "chan_avail_stat" },
+	{ IEEE80211_PARAM_STOP_ICAC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1 , 0, "stop_icac" },
+	{ IEEE80211_PARAM_STA_DFS_STRICT_MODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "sta_dfs_strict" },
+	{ IEEE80211_PARAM_STA_DFS_STRICT_MEASUREMENT_IN_CAC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "sta_dfs_msr_cac" },
+	{ IEEE80211_PARAM_STA_DFS_STRICT_TX_CHAN_CLOSE_TIME,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "sta_dfs_end_tx" },
+	{ IEEE80211_PARAM_NEIGHBORHOOD_THRSHD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0,	"set_nbr_thrshd" },
+	{ IEEE80211_PARAM_NEIGHBORHOOD_TYPE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,	"get_nbr_type" },
+	{ IEEE80211_PARAM_NEIGHBORHOOD_COUNT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,	"get_nbr_cnt" },
+	{ IEEE80211_PARAM_RADAR_NONOCCUPY_PERIOD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "radar_nop_time" },
+	{ IEEE80211_PARAM_RADAR_NONOCCUPY_PERIOD,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gt_radar_nop" },
+	{ IEEE80211_PARAM_DFS_CSA_CNT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dfs_csa_cnt" },
+	{ IEEE80211_PARAM_DFS_CSA_CNT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_dfs_csa_cnt" },
+	{ IEEE80211_PARAM_STA_DFS_STRICT_MODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gt_stdfs_strict" },
+	{ IEEE80211_PARAM_STA_DFS_STRICT_MEASUREMENT_IN_CAC,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gt_stdfs_msrcac" },
+	{ IEEE80211_PARAM_STA_DFS_STRICT_TX_CHAN_CLOSE_TIME,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gt_stdfs_end_tx" },
+	{ IEEE80211_PARAM_COEX_20_40_SUPPORT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "20_40_bss_coex" },
+	{ IEEE80211_PARAM_MIN_CAC_PERIOD,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_min_cac" },
+#ifdef CONFIG_NAC_MONITOR
+	{ IEEE80211_PARAM_NAC_MONITOR_MODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0,	"nac_mode" },
+	{ IEEE80211_PARAM_NAC_MONITOR_MODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,	"get_nac_mode" },
+#endif
+	{ IEEE80211_PARAM_DEVICE_MODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_dev_mode" },
+	{ IEEE80211_PARAM_MAX_DEVICE_BW,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_max_bw" },
+	{ IEEE80211_PARAM_MAX_DEVICE_BW,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_max_bw" },
+	{ IEEE80211_PARAM_BW_AUTO_SELECT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_bw_autosel" },
+	{ IEEE80211_PARAM_BW_AUTO_SELECT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bw_autosel" },
+        { IEEE80211_PARAM_IGNORE_ICAC_SELECTION,
+          IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1 , 0, "ignr_icac_sel" },
+        { IEEE80211_PARAM_DFS_CHANS_AVAILABLE_FOR_DFS_REENTRY,
+          0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "dfs_chan_avail" },
+
+	{ IEEE80211_PARAM_CUR_CHAN_CHECK_REQUIRED,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "check_cur_chan" },
+#ifdef CONFIG_QHOP
+	{ IEEE80211_PARAM_RBS_MBS_ALLOW_TX_FRMS_IN_CAC,
+          IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rbs_dfs_msr_cac" },
+        { IEEE80211_PARAM_RBS_MBS_ALLOW_TX_FRMS_IN_CAC,
+          0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "g_rbsdfs_msrcac" },
+	{ IEEE80211_PARAM_RBS_DFS_TX_CHAN_CLOSE_TIME,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rbs_dfs_end_tx" },
+	{ IEEE80211_PARAM_RBS_DFS_TX_CHAN_CLOSE_TIME,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "g_rbsdfs_end_tx" },
+#endif
+	{ IEEE80211_PARAM_80211K_NEIGH_REPORT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "neigh_repo" },
+	{ IEEE80211_PARAM_80211K_NEIGH_REPORT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_neigh_repo" },
+	{ IEEE80211_PARAM_DYNAMIC_SIFS_TIMING,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_dyn_sifs" },
+	{ IEEE80211_PARAM_WEATHERCHAN_CAC_ALLOWED,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wea_cac_en" },
+	{ IEEE80211_PARAM_WEATHERCHAN_CAC_ALLOWED,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_wea_cac_en" },
+	{ IEEE80211_PARAM_VOPT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_vopt" },
+	{ IEEE80211_PARAM_VOPT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_vopt" },
+	{ IEEE80211_PARAM_BB_DEAFNESS_WAR_EN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bbdf_war_en" },
+	{ IEEE80211_PARAM_BB_DEAFNESS_WAR_EN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bbdfwar_en" },
+	{ IEEE80211_PARAM_COC_MOVE_TO_NONDFS_CHANNEL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "coc_mv_to_ndfs" },
+	{ IEEE80211_PARAM_COC_MOVE_TO_NONDFS_CHANNEL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "g_coc_mv_to_ndfs" },
+	{ IEEE80211_PARAM_80211V_BTM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bss_tm" },
+	{ IEEE80211_PARAM_80211V_BTM,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bss_tm" },
+	{ IEEE80211_PARAM_MOBILITY_DOMAIN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mdid" },
+	{ IEEE80211_PARAM_MOBILITY_DOMAIN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_mdid" },
+	{ IEEE80211_PARAM_FT_OVER_DS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ft_over_ds" },
+	{ IEEE80211_PARAM_FT_OVER_DS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ft_over_ds" },
+	{ IEEE80211_PARAM_SHORT_RETRY_LIMIT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_sretry_lmt" },
+	{ IEEE80211_PARAM_LONG_RETRY_LIMIT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_lretry_lmt" },
+#ifdef PLATFORM_QFDR
+	{ IEEE80211_PARAM_REJECT_AUTH,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "reject_auth" },
+	{ IEEE80211_PARAM_SCAN_ONLY_FREQ,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "scan_only_freq" },
+#endif
+	{ IEEE80211_PARAM_FIX_LEGACY_RATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "fix_legacy_rate" },
+};
+
+static const iw_handler ieee80211_handlers[] = {
+	(iw_handler) NULL,				/* SIOCSIWCOMMIT */
+	(iw_handler) ieee80211_ioctl_giwname,		/* SIOCGIWNAME */
+	(iw_handler) NULL,				/* SIOCSIWNWID */
+	(iw_handler) NULL,				/* SIOCGIWNWID */
+	(iw_handler) ieee80211_ioctl_siwfreq,		/* SIOCSIWFREQ */
+	(iw_handler) ieee80211_ioctl_giwfreq,		/* SIOCGIWFREQ */
+	(iw_handler) ieee80211_ioctl_siwmode,		/* SIOCSIWMODE */
+	(iw_handler) ieee80211_ioctl_giwmode,		/* SIOCGIWMODE */
+	(iw_handler) ieee80211_ioctl_siwsens,		/* SIOCSIWSENS */
+	(iw_handler) ieee80211_ioctl_giwsens,		/* SIOCGIWSENS */
+	(iw_handler) NULL /* not used */,		/* SIOCSIWRANGE */
+	(iw_handler) ieee80211_ioctl_giwrange,		/* SIOCGIWRANGE */
+	(iw_handler) NULL /* not used */,		/* SIOCSIWPRIV */
+	(iw_handler) NULL /* kernel code */,		/* SIOCGIWPRIV */
+	(iw_handler) NULL /* not used */,		/* SIOCSIWSTATS */
+	(iw_handler) NULL /* kernel code */,		/* SIOCGIWSTATS */
+	(iw_handler) ieee80211_ioctl_setspy,		/* SIOCSIWSPY */
+	(iw_handler) ieee80211_ioctl_getspy,		/* SIOCGIWSPY */
+	(iw_handler) ieee80211_ioctl_setthrspy,		/* SIOCSIWTHRSPY */
+	(iw_handler) ieee80211_ioctl_getthrspy,		/* SIOCGIWTHRSPY */
+	(iw_handler) ieee80211_ioctl_siwap,		/* SIOCSIWAP */
+	(iw_handler) ieee80211_ioctl_giwap,		/* SIOCGIWAP */
+#ifdef SIOCSIWMLME
+	(iw_handler) ieee80211_ioctl_siwmlme,		/* SIOCSIWMLME */
+#else
+	(iw_handler) NULL,				/* -- hole -- */
+#endif
+	(iw_handler) ieee80211_ioctl_iwaplist,		/* SIOCGIWAPLIST */
+#ifdef SIOCGIWSCAN
+	(iw_handler) ieee80211_ioctl_siwscan,		/* SIOCSIWSCAN */
+	(iw_handler) ieee80211_ioctl_giwscan,		/* SIOCGIWSCAN */
+#else
+	(iw_handler) NULL,				/* SIOCSIWSCAN */
+	(iw_handler) NULL,				/* SIOCGIWSCAN */
+#endif /* SIOCGIWSCAN */
+	(iw_handler) ieee80211_ioctl_siwessid,		/* SIOCSIWESSID */
+	(iw_handler) ieee80211_ioctl_giwessid,		/* SIOCGIWESSID */
+	(iw_handler) ieee80211_ioctl_siwnickn,		/* SIOCSIWNICKN */
+	(iw_handler) ieee80211_ioctl_giwnickn,		/* SIOCGIWNICKN */
+	(iw_handler) NULL,				/* -- hole -- */
+	(iw_handler) NULL,				/* -- hole -- */
+	(iw_handler) ieee80211_ioctl_siwrate,		/* SIOCSIWRATE */
+	(iw_handler) ieee80211_ioctl_giwrate,		/* SIOCGIWRATE */
+	(iw_handler) ieee80211_ioctl_siwrts,		/* SIOCSIWRTS */
+	(iw_handler) ieee80211_ioctl_giwrts,		/* SIOCGIWRTS */
+	(iw_handler) ieee80211_ioctl_siwfrag,		/* SIOCSIWFRAG */
+	(iw_handler) ieee80211_ioctl_giwfrag,		/* SIOCGIWFRAG */
+	(iw_handler) ieee80211_ioctl_siwtxpow,		/* SIOCSIWTXPOW */
+	(iw_handler) ieee80211_ioctl_giwtxpow,		/* SIOCGIWTXPOW */
+	(iw_handler) ieee80211_ioctl_siwretry,		/* SIOCSIWRETRY */
+	(iw_handler) ieee80211_ioctl_giwretry,		/* SIOCGIWRETRY */
+	(iw_handler) ieee80211_ioctl_siwencode,		/* SIOCSIWENCODE */
+	(iw_handler) ieee80211_ioctl_giwencode,		/* SIOCGIWENCODE */
+	(iw_handler) ieee80211_ioctl_siwpower,		/* SIOCSIWPOWER */
+	(iw_handler) ieee80211_ioctl_giwpower,		/* SIOCGIWPOWER */
+	(iw_handler) NULL,				/* -- hole -- */
+	(iw_handler) NULL,				/* -- hole -- */
+	(iw_handler) ieee80211_ioctl_siwgenie,		/* SIOCSIWGENIE */
+	(iw_handler) ieee80211_ioctl_giwgenie,		/* SIOCGIWGENIE */
+	(iw_handler) ieee80211_ioctl_siwauth,		/* SIOCSIWAUTH */
+	(iw_handler) ieee80211_ioctl_giwauth,		/* SIOCGIWAUTH */
+	(iw_handler) ieee80211_ioctl_siwencodeext,	/* SIOCSIWENCODEEXT */
+	(iw_handler) ieee80211_ioctl_giwencodeext,	/* SIOCGIWENCODEEXT */
+};
+
+static const iw_handler ieee80211_priv_handlers[] = {
+	(iw_handler) ieee80211_ioctl_setparam,		/* SIOCIWFIRSTPRIV+0 */
+	(iw_handler) ieee80211_ioctl_getparam,		/* SIOCIWFIRSTPRIV+1 */
+	(iw_handler) ieee80211_ioctl_setmode,		/* SIOCIWFIRSTPRIV+2 */
+	(iw_handler) ieee80211_ioctl_getmode,		/* SIOCIWFIRSTPRIV+3 */
+	(iw_handler) ieee80211_ioctl_setwmmparams,	/* SIOCIWFIRSTPRIV+4 */
+	(iw_handler) ieee80211_ioctl_getwmmparams,	/* SIOCIWFIRSTPRIV+5 */
+	(iw_handler) ieee80211_ioctl_setchanlist,	/* SIOCIWFIRSTPRIV+6 */
+	(iw_handler) ieee80211_ioctl_getchanlist,	/* SIOCIWFIRSTPRIV+7 */
+	(iw_handler) ieee80211_ioctl_chanswitch,	/* SIOCIWFIRSTPRIV+8 */
+	(iw_handler) ieee80211_ioctl_getappiebuf,	/* SIOCIWFIRSTPRIV+9 */
+	(iw_handler) ieee80211_ioctl_setappiebuf,	/* SIOCIWFIRSTPRIV+10 */
+	(iw_handler) ieee80211_ioctl_getscanresults,	/* SIOCIWFIRSTPRIV+11 */
+	(iw_handler) ieee80211_ioctl_setfilter,		/* SIOCIWFIRSTPRIV+12 */
+	(iw_handler) ieee80211_ioctl_getchaninfo,	/* SIOCIWFIRSTPRIV+13 */
+	(iw_handler) ieee80211_ioctl_setoptie,		/* SIOCIWFIRSTPRIV+14 */
+	(iw_handler) ieee80211_ioctl_getoptie,		/* SIOCIWFIRSTPRIV+15 */
+	(iw_handler) ieee80211_ioctl_setmlme,		/* SIOCIWFIRSTPRIV+16 */
+	(iw_handler) ieee80211_ioctl_radar,		/* SIOCIWFIRSTPRIV+17 */
+	(iw_handler) ieee80211_ioctl_setkey,		/* SIOCIWFIRSTPRIV+18 */
+	(iw_handler) ieee80211_ioctl_postevent,		/* SIOCIWFIRSTPRIV+19 */
+	(iw_handler) ieee80211_ioctl_delkey,		/* SIOCIWFIRSTPRIV+20 */
+	(iw_handler) ieee80211_ioctl_txeapol,		/* SIOCIWFIRSTPRIV+21 */
+	(iw_handler) ieee80211_ioctl_addmac,		/* SIOCIWFIRSTPRIV+22 */
+	(iw_handler) ieee80211_ioctl_startcca,		/* SIOCIWFIRSTPRIV+23 */
+	(iw_handler) ieee80211_ioctl_delmac,		/* SIOCIWFIRSTPRIV+24 */
+#if defined(CONFIG_QTN_80211K_SUPPORT)
+	(iw_handler) ieee80211_ioctl_getstastatistic,	/* SIOCIWFIRSTPRIV+25 */
+#else
+	(iw_handler) NULL,				/* SIOCIWFIRSTPRIV+25 */
+#endif
+	(iw_handler) ieee80211_ioctl_wdsmac,		/* SIOCIWFIRSTPRIV+26 */
+	(iw_handler) NULL,				/* SIOCIWFIRSTPRIV+27 */
+	(iw_handler) ieee80211_ioctl_wdsdelmac,		/* SIOCIWFIRSTPRIV+28 */
+	(iw_handler) ieee80211_ioctl_getblockdata,	/* SIOCIWFIRSTPRIV+29 */
+	(iw_handler) ieee80211_ioctl_kickmac,		/* SIOCIWFIRSTPRIV+30 */
+	(iw_handler) ieee80211_ioctl_dfsactscan,	/* SIOCIWFIRSTPRIV+31 */
+};
+static struct iw_handler_def ieee80211_iw_handler_def = {
+#define	N(a)	(sizeof (a) / sizeof (a[0]))
+	.standard		= (iw_handler *) ieee80211_handlers,
+	.num_standard		= N(ieee80211_handlers),
+	.private		= (iw_handler *) ieee80211_priv_handlers,
+	.num_private		= N(ieee80211_priv_handlers),
+	.private_args		= (struct iw_priv_args *) ieee80211_priv_args,
+	.num_private_args	= N(ieee80211_priv_args),
+#if IW_HANDLER_VERSION >= 7
+	.get_wireless_stats	= ieee80211_iw_getstats,
+#endif
+#undef N
+};
+
+static	void ieee80211_delete_wlanunit(u_int);
+
+/**
+ * The interface stats and local node's stats are reset here
+ * */
+int ieee80211_rst_dev_stats(struct ieee80211vap *vap)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+	struct rtnl_link_stats64 *stats = &vap->iv_devstats;
+#else
+	struct net_device_stats *stats = &vap->iv_devstats;
+#endif
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni, *next;
+	struct ieee80211_node_table *nt = &ic->ic_sta;
+
+	/*The interface's statics will be cleared*/
+	stats->rx_packets = 0;
+	stats->tx_packets = 0;
+	stats->rx_bytes = 0;
+	stats->tx_bytes = 0;
+	stats->rx_unicast_packets = 0;
+	stats->tx_unicast_packets = 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+	watch64_reset(&stats->rx_packets, NULL);
+	watch64_reset(&stats->tx_packets, NULL);
+	watch64_reset(&stats->rx_bytes, NULL);
+	watch64_reset(&stats->tx_bytes, NULL);
+	watch64_reset(&stats->rx_unicast_packets, NULL);
+	watch64_reset(&stats->tx_unicast_packets, NULL);
+#endif
+
+	stats->tx_multicast_packets = 0;
+	stats->multicast = 0;
+	stats->rx_broadcast_packets = 0;
+	stats->tx_broadcast_packets = 0;
+	stats->rx_unknown_packets = 0;
+	stats->rx_dropped = 0;
+
+	vap->iv_stats.is_tx_nodefkey = 0;
+	vap->iv_stats.is_tx_noheadroom = 0;
+	vap->iv_stats.is_crypto_enmicfail = 0;
+	stats->tx_errors = 0;
+
+	vap->iv_stats.is_tx_nobuf = 0;
+	vap->iv_stats.is_tx_nonode = 0;
+	vap->iv_stats.is_tx_unknownmgt = 0;
+	vap->iv_stats.is_tx_badcipher = 0;
+	vap->iv_stats.is_tx_nodefkey = 0;
+	stats->tx_dropped = 0;
+
+	vap->iv_stats.is_rx_tooshort = 0;
+	vap->iv_stats.is_rx_wepfail = 0;
+	vap->iv_stats.is_rx_decap = 0;
+	vap->iv_stats.is_rx_nobuf = 0;
+	vap->iv_stats.is_rx_decryptcrc = 0;
+	vap->iv_stats.is_rx_ccmpmic = 0;
+	vap->iv_stats.is_rx_tkipmic = 0;
+	vap->iv_stats.is_rx_tkipicv = 0;
+	stats->rx_errors = 0;
+
+	ic->ic_reset_shared_vap_stats(vap);
+
+	/* The statics of local nodes in the node table will be cleared */
+	IEEE80211_NODE_LOCK_IRQ(nt);
+	TAILQ_FOREACH_SAFE(ni, &nt->nt_node, ni_list, next) {
+		if (!IEEE80211_ADDR_EQ(ni->ni_macaddr, vap->iv_myaddr)){
+			memset(&ni->ni_stats, 0, sizeof(struct ieee80211_nodestats));
+			ic->ic_reset_shared_node_stats(ni);
+		}
+	}
+	IEEE80211_NODE_UNLOCK_IRQ(nt);
+
+	return 0;
+}
+
+/*
+ * Handle private ioctl requests.
+ */
+static int
+ieee80211_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct ieee80211vap *vap = netdev_priv(dev);
+	u_int unit;
+
+	switch (cmd) {
+	case SIOCG80211STATS:
+		return copy_to_user(ifr->ifr_data, &vap->iv_stats,
+			sizeof (vap->iv_stats)) ? -EFAULT : 0;
+	case SIOCR80211STATS:
+		return ieee80211_rst_dev_stats(vap);
+	case SIOC80211IFDESTROY:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		ieee80211_stop(vap->iv_dev);	/* force state before cleanup */
+		unit = vap->iv_unit;
+		vap->iv_ic->ic_vap_delete(vap);
+		return 0;
+	case IEEE80211_IOCTL_GETKEY:
+		return ieee80211_ioctl_getkey(dev, (struct iwreq *) ifr);
+	case IEEE80211_IOCTL_GETWPAIE:
+		return ieee80211_ioctl_getwpaie(dev, (struct iwreq *) ifr);
+	case IEEE80211_IOCTL_STA_STATS:
+		return ieee80211_ioctl_getstastats(dev, (struct iwreq *) ifr);
+	case IEEE80211_IOCTL_STA_INFO:
+		return ieee80211_ioctl_getstainfo(dev, (struct iwreq *) ifr);
+	case IEEE80211_IOCTL_SCAN_RESULTS:
+		return ieee80211_ioctl_getscanresults(dev, (struct iwreq *)ifr);
+	case IEEE80211_IOCTL_GET_ASSOC_TBL:
+		return ieee80211_ioctl_getassoctbl(dev, (struct iwreq *)ifr);
+	case IEEE80211_IOCTL_GET_RATES:
+		return ieee80211_ioctl_get_rates(dev, (struct iwreq *)ifr);
+	case IEEE80211_IOCTL_SET_RATES:
+		return ieee80211_ioctl_set_rates(dev, (struct iwreq *)ifr);
+	case IEEE80211_IOCTL_EXT:
+		return ieee80211_ioctl_ext(dev, (struct iwreq *)ifr);
+	}
+	return -EOPNOTSUPP;
+}
+
+static u_int8_t wlan_units[32];		/* enough for 256 */
+
+/*
+ * Allocate a new unit number.  If the map is full return -1;
+ * otherwise the allocate unit number is returned.
+ */
+static int
+ieee80211_new_wlanunit(void)
+{
+#define	N(a)	(sizeof(a)/sizeof(a[0]))
+	u_int unit;
+	u_int8_t b;
+	int i;
+
+	/* NB: covered by rtnl_lock */
+	unit = 0;
+	for (i = 0; i < N(wlan_units) && wlan_units[i] == 0xff; i++)
+		unit += NBBY;
+	if (i == N(wlan_units))
+		return -1;
+	for (b = wlan_units[i]; b & 1; b >>= 1)
+		unit++;
+	setbit(wlan_units, unit);
+
+	return unit;
+#undef N
+}
+
+/*
+ * Reclaim the specified unit number.
+ */
+static void
+ieee80211_delete_wlanunit(u_int unit)
+{
+	/* NB: covered by rtnl_lock */
+	KASSERT(unit < sizeof(wlan_units) * NBBY, ("invalid wlan unit %u", unit));
+	KASSERT(isset(wlan_units, unit), ("wlan unit %u not allocated", unit));
+	clrbit(wlan_units, unit);
+}
+
+/*
+ * Create a virtual ap.  This is public as it must be implemented
+ * outside our control (e.g. in the driver).
+ */
+int
+ieee80211_ioctl_create_vap(struct ieee80211com *ic, struct ifreq *ifr, struct net_device *mdev)
+{
+	struct ieee80211_clone_params cp;
+	struct ieee80211vap *vap;
+	char name[IFNAMSIZ];
+	int unit;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+	if (copy_from_user(&cp, ifr->ifr_data, sizeof(cp)))
+		return -EFAULT;
+
+	unit = ieee80211_new_wlanunit();
+	if (unit == -1)
+		return -EIO;		/* XXX */
+	memcpy(name, cp.icp_name, IFNAMSIZ - 1);
+	name[IFNAMSIZ - 1] = '\0';
+
+	vap = ic->ic_vap_create(ic, name, unit, cp.icp_opmode, cp.icp_flags, mdev);
+	if (vap == NULL) {
+		ieee80211_delete_wlanunit(unit);
+		return -EIO;
+	}
+	/* return final device name */
+	memcpy(ifr->ifr_name, vap->iv_dev->name, IFNAMSIZ - 1);
+	ifr->ifr_name[IFNAMSIZ - 1] = '\0';
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_ioctl_create_vap);
+
+/*
+ * Create a virtual ap.  This is public as it must be implemented
+ * outside our control (e.g. in the driver).
+ * Must be called with rtnl_lock held
+ */
+int
+ieee80211_create_vap(struct ieee80211com *ic, char *name,
+	struct net_device *mdev, int opmode, int opflags)
+{
+	int unit;
+
+	if ((unit = ieee80211_new_wlanunit()) == -1)
+		return -EIO;		/* XXX */
+
+	if (!ic->ic_vap_create(ic, name, unit, opmode, opflags, mdev)) {
+		ieee80211_delete_wlanunit(unit);
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_create_vap);
+
+
+void
+ieee80211_ioctl_vattach(struct ieee80211vap *vap)
+{
+	struct net_device *dev = vap->iv_dev;
+	struct net_device_ops *pndo = (struct net_device_ops *)dev->netdev_ops;
+
+	pndo->ndo_do_ioctl = ieee80211_ioctl;
+#if IW_HANDLER_VERSION < 7
+	dev->get_wireless_stats = ieee80211_iw_getstats;
+#endif
+	dev->wireless_handlers = &ieee80211_iw_handler_def;
+}
+
+void
+ieee80211_ioctl_vdetach(struct ieee80211vap *vap)
+{
+	if ((vap->iv_unit != -1) && isset(wlan_units, vap->iv_unit))
+		ieee80211_delete_wlanunit(vap->iv_unit);
+}
+
+/* Input function to send a message via the wireless_event kernel mechanism. */
+void
+ieee80211_dot11_msg_send(struct ieee80211vap *vap,
+			const char *mac_bssid,
+			const char *message,
+			const char *message_code,
+			int message_reason,
+			const char *message_description,
+			const char *auth,
+			const char *crypto)
+{
+	char buf[384];
+	char mac[6] = {'0', '0', '0', '0', '0', '0'};
+	const char *tag = QEVT_COMMON_PREFIX;
+	buf[sizeof(buf)-1] = '\0';
+
+	/* Ensure we always have a valid address */
+	if (mac_bssid)
+	{
+		IEEE80211_ADDR_COPY(mac, mac_bssid);
+	}
+
+	/* Message includes a reason code - disassoc, deauth to/from AP/STA */
+	if (message_reason >= 0) {
+		ieee80211_eventf(vap->iv_dev, "%s%s [" DBGMAC "] [%s - %d - %s]",
+			tag,
+			message,
+			ETHERFMT(mac),
+			message_code ? message_code : "no code",
+			message_reason,
+			message_description ? message_description : "no description"
+			);
+
+	}
+	/* STA/AP connected including auth details */
+	else if (auth) {
+		ieee80211_eventf(vap->iv_dev, "%s%s [" DBGMAC "] [%s/%s]",
+			tag,
+			message,
+			ETHERFMT(mac),
+			auth,
+			crypto ? crypto : "unspecified"
+			);
+	}
+	/* Further details included in the message */
+	else if (message_description) {
+		ieee80211_eventf(vap->iv_dev, "%s%s [" DBGMAC "] [%s - %s]",
+			tag,
+			message,
+			ETHERFMT(mac),
+			message_code ? message_code : "no code",
+			message_description
+			);
+	}
+	/* Single descriptive text */
+	else {
+		ieee80211_eventf(vap->iv_dev, "%s%s [" DBGMAC "] [%s]",
+			tag,
+			message,
+			ETHERFMT(mac),
+			message_code ? message_code : "no code"
+			);
+	}
+}
+EXPORT_SYMBOL(ieee80211_dot11_msg_send);
+
+#if defined(CONFIG_QTN_BSA_SUPPORT)
+int ieee80211_bsa_disconnect_event_send(struct ieee80211vap *vap, struct ieee80211_node *ni,
+		uint16_t reason_code, uint8_t fc_subtype, uint8_t direction)
+{
+	struct qtn_bsa_peer_event_data *p_data;
+	struct qtn_bsa_disconnect_event_info *pevent;
+	uint8_t event_data[IEEE80211_MAX_EVENT_DATA_LEN];
+	union iwreq_data wreq;
+	memset(&wreq, 0, sizeof(union iwreq_data));
+
+	p_data = (void *)event_data;
+	pevent = (void *)(event_data + sizeof(struct qtn_bsa_peer_event_data));
+	strncpy(p_data->bsa_name, "BSA-PEER-EVENT", sizeof(p_data->bsa_name));
+	if (fc_subtype == IEEE80211_FC0_SUBTYPE_DISASSOC)
+		put_unaligned(BSA_EVENT_DISASSOC, &p_data->bsa_event_id);
+	else
+		put_unaligned(BSA_EVENT_DEAUTH, &p_data->bsa_event_id);
+
+	memcpy(p_data->bsa_bssid, ni->ni_bssid, IEEE80211_ADDR_LEN);
+	put_unaligned(sizeof(struct qtn_bsa_peer_event_data), &p_data->offset);
+	memcpy(pevent->bsa_sta_mac, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+
+	put_unaligned(reason_code, &pevent->reason_code);
+	pevent->direction = direction;
+	wreq.data.length = sizeof(*pevent) + sizeof(*p_data);
+
+	wireless_send_event(vap->iv_dev, IWEVCUSTOM, &wreq, (char *)&event_data);
+	return 0;
+}
+
+int ieee80211_bsa_connect_complete_event_send(struct ieee80211vap *vap,struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct qtn_bsa_peer_event_data *p_data;
+	struct qtn_bsa_assoc_compl_event_info *pevent;
+	uint8_t event_data[IEEE80211_MAX_EVENT_DATA_LEN];
+	struct ieee80211_ie_htcap *htcap_ie = NULL;
+	struct ieee80211_ie_vhtcap *vhtcap_ie = NULL;
+	uint32_t max_ht_phy_rate = 54;
+	uint32_t max_vht_phy_rate = 0;
+	uint32_t max_ht_ss = 1;
+	uint32_t max_vht_ss = 0;
+	uint32_t band_width = 1;
+
+	union iwreq_data wreq;
+	p_data = (void *)event_data;
+	pevent = (void *)(event_data + sizeof(struct qtn_bsa_peer_event_data));
+	strncpy(p_data->bsa_name, "BSA-PEER-EVENT", sizeof(p_data->bsa_name));
+	put_unaligned(BSA_CONNECT_COMPLETE_EVENT, &p_data->bsa_event_id);
+	memcpy(p_data->bsa_bssid, ni->ni_bssid, IEEE80211_ADDR_LEN);
+	put_unaligned(sizeof(struct qtn_bsa_peer_event_data), &p_data->offset);
+	memcpy(pevent->bsa_sta_mac, ni->ni_macaddr, IEEE80211_ADDR_LEN);
+
+	put_unaligned(ni->ni_rssi, &pevent->bsa_rssi);
+
+	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
+		put_unaligned(BSA_OPER_BAND_5G, &pevent->bsa_curr_band);
+	else
+		put_unaligned(BSA_OPER_BAND_2G, &pevent->bsa_curr_band);
+
+	put_unaligned(ic->ic_curchan->ic_ieee, &pevent->bsa_channel);
+	put_unaligned(0, &pevent->bsa_band_width);
+	pevent->bsa_vht_capab = 0;
+	pevent->bsa_mu_mimo_capab = 0;
+
+	if (ni->ni_ext_flags & IEEE80211_NODE_BSS_TRANSITION)
+		pevent->bsa_bss_transition_support = 1;
+	else
+		pevent->bsa_bss_transition_support = 0;
+	if (ni->ni_flags & IEEE80211_NODE_HT) {
+		htcap_ie = (struct ieee80211_ie_htcap *)&ni->ni_ie_htcap;
+		max_ht_phy_rate = ieee80211_wlan_ht_rx_maxrate(htcap_ie, &max_ht_ss);
+		band_width = ((htcap_ie->hc_cap[0] & 0x2) >> 1);
+		pevent->bsa_vht_capab |= BSA_HT_SUPPORTED;
+	}
+
+	if (ni->ni_flags & IEEE80211_NODE_VHT) {
+		vhtcap_ie = (struct ieee80211_ie_vhtcap * )&ni->ni_ie_vhtcap;
+		max_vht_phy_rate = ieee80211_wlan_vht_rx_maxrate(vhtcap_ie);
+		pevent->bsa_vht_capab |= BSA_VHT_SUPPORTED;
+		pevent->bsa_mu_mimo_capab = ((vhtcap_ie->vht_cap[2] & 0x18)>>3);
+		max_vht_ss = ieee80211_wlan_vht_rxstreams(vhtcap_ie);
+		band_width |= ((vhtcap_ie->vht_cap[0] & 0xc) >> 1);
+	}
+	put_unaligned(band_width, &pevent->bsa_band_width);
+	put_unaligned((max_vht_phy_rate > max_ht_phy_rate) ? max_vht_phy_rate : max_ht_phy_rate,
+		&pevent->bsa_max_phy_rate);
+	put_unaligned((max_ht_ss > max_vht_ss) ? max_ht_ss : max_vht_ss, &pevent->bsa_nss);
+
+	memset(&wreq, 0, sizeof(wreq));
+	wreq.data.length = sizeof(*pevent) + sizeof(*p_data);
+	wireless_send_event(vap->iv_dev, IWEVCUSTOM, &wreq, (char *)&event_data);
+
+	return 0;
+}
+
+static struct bsa_deny_sta * ieee80211_bsa_find_macfilter_list(struct ieee80211vap *vap,
+								uint8_t *macaddr)
+{
+	struct bsa_deny_sta *bsa_mf = NULL;
+
+	LIST_FOREACH(bsa_mf, &vap->deny_sta_list, list) {
+		if (IEEE80211_ADDR_EQ(bsa_mf->bsa_macaddr, macaddr))
+			return bsa_mf;
+	}
+	return NULL;
+}
+
+int ieee80211_bsa_macfilter_check(struct ieee80211vap *vap, uint8_t *mac)
+{
+	if (ieee80211_bsa_find_macfilter_list(vap, mac) != NULL)
+		return 1;
+
+	return 0;
+}
+
+static int ieee80211_bsa_macfilter_add(struct ieee80211vap *vap, uint8_t *mac)
+{
+	struct bsa_deny_sta *deny_sta, *new;
+
+	LIST_FOREACH(deny_sta, &vap->deny_sta_list, list) {
+		if (IEEE80211_ADDR_EQ(deny_sta->bsa_macaddr, mac)) {
+			printk("BSA: add %s failed, already present\n",ether_sprintf(mac));
+			return -EEXIST;
+		}
+	}
+
+	MALLOC(new, struct bsa_deny_sta *, sizeof(struct bsa_deny_sta), M_80211_ACL, M_NOWAIT | M_ZERO);
+
+	if (new == NULL) {
+		printk("BSA_MF: add %s failed, no memory\n", ether_sprintf(mac));
+		return -ENOMEM;
+	}
+
+	IEEE80211_ADDR_COPY(new->bsa_macaddr, mac);
+	LIST_INSERT_HEAD(&vap->deny_sta_list, new, list);
+
+	return 0;
+}
+
+static int ieee80211_bsa_macfilter_remove(struct ieee80211vap *vap, uint8_t *mac)
+{
+	struct bsa_deny_sta *sta;
+
+	sta = ieee80211_bsa_find_macfilter_list(vap, mac);
+	if (sta == NULL)
+		return -EFAULT;
+
+	LIST_REMOVE(sta, list);
+	FREE(sta, M_80211_BSA_MF);
+
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL,
+		"BSA_MF: remove %s%s\n", ether_sprintf(mac),
+		sta == NULL ? ", not present" : "");
+
+	return 0;
+}
+#endif
+
+/* Dot11Msg messages */
+char *d11_m[] = {
+	"Client connected",
+	"Client disconnected",
+	"Client authentication failed",
+	"Client removed",
+	"Connected to AP",
+	"Connection to AP failed",
+	"Disconnected from AP",
+};
+EXPORT_SYMBOL(d11_m);
+
+/* Dot11Msg details */
+char *d11_c[] = {
+	"Disassociated",
+	"Deauthenticated",
+	"TKIP countermeasures invoked",
+	"Client timeout",
+	"WPA password failure",
+	"WPA timeout",
+	"Beacon loss",
+	"Client sent deauthentication",
+	"Client sent disassociation"
+};
+EXPORT_SYMBOL(d11_c);
+
+/* Dot11Msg reason fields - directly taken from the Reason field in the 802.11 spec(s). */
+char *d11_r[] = {
+	"Reserved",
+	"Unspecified reason",
+	"Previous authentication no longer valid",
+	"Deauthenticated because sending STA is leaving (or has left) IBSS or ESS",
+	"Disassociated due to inactivity",
+	"Disassociated because AP is unable to handle all currently associated STAs",
+	"Class 2 frame received from nonauthenticated STA",
+	"Class 3 frame received from nonassociated STA",
+	"Disassociated because sending STA is leaving (or has left) BSS",
+	"STA requesting (re)association is not authenticated with responding STA",
+	"Disassociated because the information in the Power Capability element is unacceptable",
+	"Disassociated because the information in the Supported Channels element is unacceptable",
+	"Disassociated due to BSS Tranistion Management",
+	"Invalid information element",
+	"Message integrity code (MIC) failure",
+	"4-Way Handshake timeout",
+	"Group Key Handshake timeout",
+	"Information element in 4-Way Handshake different from (Re)Association Request/Probe Response/Beacon frame",
+	"Invalid group cipher",
+	"Invalid pairwise cipher",
+	"Invalid AKMP",
+	"Unsupported RSN information element version",
+	"Invalid RSN information element capabilities",
+	"IEEE 802.1X authentication failed",
+	"Cipher suite rejected because of the security policy",
+	"Reserved",
+	"Reserved",
+	"Reserved",
+	"Reserved",
+	"Reserved",
+	"Reserved",
+	"TS deleted because QoS AP lacks sufficient bandwidth due to change in BSS or operational mode",
+	"Disassociated for unspecified, QoS-related reason",
+	"Disassociated because QoS AP lacks sufficient bandwidth for this QoS STA",
+	"Disassociated because excessive number of frames need to be acknowledged, but are not acknowledged"
+	"Disassociated because STA is transmitting outside the limits of its TXOPs",
+	"Requested from peer STA as the STA is leaving hte BSS (or resetting)",
+	"Requested from peer STA as it does not want to use the mechanism",
+	"Requested from peer STA as the STA received frames using the mechanism for which a setup is required",
+	"Requested from peer STA due to timeout",
+	"Reserved",
+	"Reserved",
+	"Reserved",
+	"Reserved",
+	"Reserved",
+	"Peer STA does not support the requested cipher suite"
+};
+EXPORT_SYMBOL(d11_r);
+
+static struct ieee80211_node *ieee80211_get_vap_node(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic;
+	struct ieee80211_node *ni;
+	int ref_node = 1;
+
+	if ((vap == NULL) || (vap->iv_ic == NULL))
+		return NULL;
+
+	ic = vap->iv_ic;
+	ni = vap->iv_bss;
+
+	if ((ni == NULL) && (vap->iv_opmode == IEEE80211_M_WDS)) {
+		ni = ieee80211_find_wds_node(&ic->ic_sta, vap->wds_mac);
+		ref_node = (ni == NULL);
+	}
+
+	if ((ni == NULL) && (TAILQ_FIRST(&ic->ic_vaps) != NULL))
+		ni = TAILQ_FIRST(&ic->ic_vaps)->iv_bss;
+
+	if ((ni != NULL) && ref_node)
+		ieee80211_ref_node(ni);
+
+	return ni;
+}
+
diff --git a/drivers/qtn/wlan/ieee80211_xauth.c b/drivers/qtn/wlan/ieee80211_xauth.c
new file mode 100644
index 0000000..d2fb83b
--- /dev/null
+++ b/drivers/qtn/wlan/ieee80211_xauth.c
@@ -0,0 +1,103 @@
+/*-
+ * Copyright (c) 2004 Video54 Technologies, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: ieee80211_xauth.c 1721 2006-09-20 08:45:13Z mentor $
+ */
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+/*
+ * External authenticator placeholder module.
+ *
+ * This support is optional; it is only used when the 802.11 layer's
+ * authentication mode is set to use 802.1x or WPA is enabled separately
+ * (for WPA-PSK).  If compiled as a module this code does not need
+ * to be present unless 802.1x/WPA is in use.
+ *
+ * The authenticator hooks into the 802.11 layer.  At present we use none
+ * of the available callbacks--the user mode authenticator process works
+ * entirely from messages about stations joining and leaving.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/netfilter.h>
+#include <linux/sysctl.h>
+#include <linux/in.h>
+
+#include "net80211/if_media.h"
+#include "net80211/if_llc.h"
+#include "net80211/if_ethersubr.h"
+
+#include "net80211/ieee80211_var.h"
+
+/*
+ * Module glue.
+ */
+MODULE_AUTHOR("Errno Consulting, Sam Leffler");
+MODULE_DESCRIPTION("802.11 wireless support: external (user mode) authenticator");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual BSD/GPL");
+#endif
+
+/*
+ * One module handles everything for now.  May want
+ * to split things up for embedded applications.
+ */
+static const struct ieee80211_authenticator xauth = {
+	.ia_name	= "external",
+	.ia_attach	= NULL,
+	.ia_detach	= NULL,
+	.ia_node_join	= NULL,
+	.ia_node_leave	= NULL,
+};
+
+static int __init
+init_ieee80211_xauth(void)
+{
+	ieee80211_authenticator_register(IEEE80211_AUTH_8021X, &xauth);
+	ieee80211_authenticator_register(IEEE80211_AUTH_WPA, &xauth);
+	return 0;
+}
+module_init(init_ieee80211_xauth);
+
+static void __exit
+exit_ieee80211_xauth(void)
+{
+	ieee80211_authenticator_unregister(IEEE80211_AUTH_8021X);
+	ieee80211_authenticator_unregister(IEEE80211_AUTH_WPA);
+}
+module_exit(exit_ieee80211_xauth);
diff --git a/drivers/qtn/wlan/if_media.c b/drivers/qtn/wlan/if_media.c
new file mode 100644
index 0000000..5a90355
--- /dev/null
+++ b/drivers/qtn/wlan/if_media.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 1997
+ *	Jonathan Stone and Jason R. Thorpe.  All rights reserved.
+ *
+ * This software is derived from information provided by Matt Thomas.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *      This product includes software developed by Jonathan Stone
+ *	and Jason R. Thorpe for the NetBSD Project.
+ * 4. The names of the authors may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: if_media.c 1721 2006-09-20 08:45:13Z mentor $
+ */
+
+/*
+ * BSD/OS-compatible network interface media selection.
+ *
+ * Where it is safe to do so, this code strays slightly from the BSD/OS
+ * design.  Software which uses the API (device drivers, basically)
+ * shouldn't notice any difference.
+ *
+ * Many thanks to Matt Thomas for providing the information necessary
+ * to implement this interface.
+ */
+
+#ifndef EXPORT_SYMTAB
+#define	EXPORT_SYMTAB
+#endif
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#ifndef ifr_media
+#define	ifr_media	ifr_ifru.ifru_ivalue
+#endif
+
+#include <asm/uaccess.h>
+
+#include "net80211/if_media.h"
+
+/*
+ * Compile-time options:
+ * IFMEDIA_DEBUG:
+ *	turn on implementation-level debug printfs.
+ * 	Useful for debugging newly-ported  drivers.
+ */
+
+struct ifmedia_entry *ifmedia_match(struct ifmedia *, int, int);
+
+#ifdef IFMEDIA_DEBUG
+int ifmedia_debug = 0;
+static void ifmedia_printword(int);
+#endif
+
+/*
+ * Initialize if_media struct for a specific interface instance.
+ */
+void
+ifmedia_init(struct ifmedia *ifm, int dontcare_mask,
+	ifm_change_cb_t change_callback, ifm_stat_cb_t status_callback)
+{
+	LIST_INIT(&ifm->ifm_list);
+	ifm->ifm_cur = NULL;
+	ifm->ifm_media = 0;
+	ifm->ifm_mask = dontcare_mask;		/* IF don't-care bits */
+	ifm->ifm_change = change_callback;
+	ifm->ifm_status = status_callback;
+}
+
+void
+ifmedia_removeall(struct ifmedia *ifm)
+{
+	struct ifmedia_entry *entry;
+
+	for (entry = LIST_FIRST(&ifm->ifm_list); entry;
+	     entry = LIST_FIRST(&ifm->ifm_list)) {
+		LIST_REMOVE(entry, ifm_list);
+		kfree(entry);
+	}
+}
+
+/*
+ * Add a media configuration to the list of supported media
+ * for a specific interface instance.
+ */
+void
+ifmedia_add(struct ifmedia *ifm, int mword, int data, void *aux)
+{
+	register struct ifmedia_entry *entry;
+
+#ifdef IFMEDIA_DEBUG
+	if (ifmedia_debug) {
+		if (ifm == NULL) {
+			printk("ifmedia_add: null ifm\n");
+			return;
+		}
+		printk("Adding entry for ");
+		ifmedia_printword(mword);
+	}
+#endif
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (entry == NULL)
+		panic("ifmedia_add: can't malloc entry");
+
+	entry->ifm_media = mword;
+	entry->ifm_data = data;
+	entry->ifm_aux = aux;
+
+	LIST_INSERT_HEAD(&ifm->ifm_list, entry, ifm_list);
+}
+
+/*
+ * Add an array of media configurations to the list of
+ * supported media for a specific interface instance.
+ */
+void
+ifmedia_list_add(struct ifmedia *ifm, struct ifmedia_entry *lp, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++)
+		ifmedia_add(ifm, lp[i].ifm_media, lp[i].ifm_data, lp[i].ifm_aux);
+}
+
+/*
+ * Set the default active media. 
+ *
+ * Called by device-specific code which is assumed to have already
+ * selected the default media in hardware.  We do _not_ call the
+ * media-change callback.
+ */
+void
+ifmedia_set(struct ifmedia *ifm, int target)
+{
+	struct ifmedia_entry *match;
+
+	match = ifmedia_match(ifm, target, ifm->ifm_mask);
+
+	if (match == NULL) {
+		printk("ifmedia_set: no match for 0x%x/0x%x\n",
+			target, ~ifm->ifm_mask);
+		panic("ifmedia_set");
+	}
+	ifm->ifm_cur = match;
+
+#ifdef IFMEDIA_DEBUG
+	if (ifmedia_debug) {
+		printk("ifmedia_set: target ");
+		ifmedia_printword(target);
+		printk("ifmedia_set: setting to ");
+		ifmedia_printword(ifm->ifm_cur->ifm_media);
+	}
+#endif
+}
+
+/*
+ * Device-independent media ioctl support function.
+ */
+int
+ifmedia_ioctl(struct net_device *dev, struct ifreq *ifr,
+	struct ifmedia *ifm, u_long cmd)
+{
+	struct ifmedia_entry *match;
+	struct ifmediareq *ifmr = (struct ifmediareq *) ifr;
+	int error = 0, sticky;
+
+	if (dev == NULL || ifr == NULL || ifm == NULL)
+		return -EINVAL;
+
+	switch (cmd) {
+	/*
+	 * Set the current media.
+	 */
+	case  SIOCSIFMEDIA:
+	{
+		struct ifmedia_entry *oldentry;
+		int oldmedia;
+		int newmedia = ifr->ifr_media;
+
+		match = ifmedia_match(ifm, newmedia, ifm->ifm_mask);
+		if (match == NULL) {
+#ifdef IFMEDIA_DEBUG
+			if (ifmedia_debug) {
+				printk("ifmedia_ioctl: no media found for 0x%x\n", 
+					newmedia);
+			}
+#endif
+			return -ENXIO;
+		}
+
+		/*
+		 * If no change, we're done.
+		 * XXX Automedia may invole software intervention.
+		 *     Keep going in case the the connected media changed.
+		 *     Similarly, if best match changed (kernel debugger?).
+		 */
+		if ((IFM_SUBTYPE(newmedia) != IFM_AUTO) &&
+		    (newmedia == ifm->ifm_media) &&
+		    (match == ifm->ifm_cur))
+			return 0;
+
+		/*
+		 * We found a match, now make the driver switch to it.
+		 * Make sure to preserve our old media type in case the
+		 * driver can't switch.
+		 */
+#ifdef IFMEDIA_DEBUG
+		if (ifmedia_debug) {
+			printk("ifmedia_ioctl: switching %s to ", dev->name);
+			ifmedia_printword(match->ifm_media);
+		}
+#endif
+		oldentry = ifm->ifm_cur;
+		oldmedia = ifm->ifm_media;
+		ifm->ifm_cur = match;
+		ifm->ifm_media = newmedia;
+		error = (*ifm->ifm_change)(netdev_priv(dev));
+		if ((error < 0) && (error != -ENETRESET)) {
+			ifm->ifm_cur = oldentry;
+			ifm->ifm_media = oldmedia;
+		}
+		break;
+	}
+
+	/*
+	 * Get list of available media and current media on interface.
+	 */
+	case  SIOCGIFMEDIA: 
+	{
+		struct ifmedia_entry *ep;
+		int *kptr, count;
+		int usermax;	/* user requested max */
+
+		kptr = NULL;		/* XXX gcc */
+
+		ifmr->ifm_active = ifmr->ifm_current = ifm->ifm_cur ?
+			ifm->ifm_cur->ifm_media : IFM_NONE;
+		ifmr->ifm_mask = ifm->ifm_mask;
+		ifmr->ifm_status = 0;
+		(*ifm->ifm_status)(netdev_priv(dev), ifmr);
+
+		count = 0;
+		usermax = 0;
+
+		/*
+		 * If there are more interfaces on the list, count
+		 * them.  This allows the caller to set ifmr->ifm_count
+		 * to 0 on the first call to know how much space to
+		 * allocate.
+		 */
+		LIST_FOREACH(ep, &ifm->ifm_list, ifm_list)
+			usermax++;
+
+		/*
+		 * Don't allow the user to ask for too many
+		 * or a negative number.
+		 */
+		if (ifmr->ifm_count > usermax)
+			ifmr->ifm_count = usermax;
+		else if (ifmr->ifm_count < 0)
+			return (-EINVAL);
+
+		if (ifmr->ifm_count != 0) {
+			kptr = (int *)kmalloc(ifmr->ifm_count * sizeof(int),
+			    GFP_KERNEL);
+
+			if (kptr == NULL)
+				return (-ENOMEM);
+			/*
+			 * Get the media words from the interface's list.
+			 */
+			ep = LIST_FIRST(&ifm->ifm_list);
+			for (; ep != NULL && count < ifmr->ifm_count;
+			    ep = LIST_NEXT(ep, ifm_list), count++)
+				kptr[count] = ep->ifm_media;
+
+			if (ep != NULL)
+				error = -E2BIG;	/* oops! */
+		} else
+			count = usermax;
+
+		/*
+		 * We do the copyout on E2BIG, because that's
+		 * just our way of telling userland that there
+		 * are more.  This is the behavior I've observed
+		 * under BSD/OS 3.0
+		 */
+		sticky = error;
+		if ((error == 0 || error == -E2BIG) && ifmr->ifm_count != 0) {
+			error = copy_to_user(ifmr->ifm_ulist,
+				kptr, ifmr->ifm_count * sizeof(int));
+		}
+
+		if (error == 0)
+			error = sticky;
+
+		if (ifmr->ifm_count != 0)
+			kfree(kptr);
+
+		ifmr->ifm_count = count;
+		break;
+	}
+
+	default:
+		return -EINVAL;
+	}
+
+	return error;
+}
+
+/*
+ * Find media entry matching a given ifm word.
+ *
+ */
+struct ifmedia_entry *
+ifmedia_match(struct ifmedia *ifm, int target, int mask)
+{
+	struct ifmedia_entry *match, *next;
+
+	match = NULL;
+	mask = ~mask;
+
+	LIST_FOREACH(next, &ifm->ifm_list, ifm_list) {
+		if ((next->ifm_media & mask) == (target & mask)) {
+#if defined(IFMEDIA_DEBUG) || defined(DIAGNOSTIC)
+			if (match)
+				printk("ifmedia_match: multiple match for "
+					"0x%x/0x%x\n", target, mask);
+#endif
+			match = next;
+		}
+	}
+
+	return match;
+}
+
+#ifdef IFMEDIA_DEBUG
+struct ifmedia_description ifm_type_descriptions[] =
+    IFM_TYPE_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_ethernet_descriptions[] =
+    IFM_SUBTYPE_ETHERNET_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_ethernet_option_descriptions[] =
+    IFM_SUBTYPE_ETHERNET_OPTION_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_tokenring_descriptions[] =
+    IFM_SUBTYPE_TOKENRING_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_tokenring_option_descriptions[] =
+    IFM_SUBTYPE_TOKENRING_OPTION_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_fddi_descriptions[] =
+    IFM_SUBTYPE_FDDI_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_fddi_option_descriptions[] =
+    IFM_SUBTYPE_FDDI_OPTION_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_ieee80211_descriptions[] =
+    IFM_SUBTYPE_IEEE80211_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_ieee80211_option_descriptions[] =
+    IFM_SUBTYPE_IEEE80211_OPTION_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_ieee80211_mode_descriptions[] =
+    IFM_SUBTYPE_IEEE80211_MODE_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_shared_descriptions[] =
+    IFM_SUBTYPE_SHARED_DESCRIPTIONS;
+
+struct ifmedia_description ifm_shared_option_descriptions[] =
+    IFM_SHARED_OPTION_DESCRIPTIONS;
+
+struct ifmedia_type_to_subtype {		/* XXX: right place for declaration? */
+	struct ifmedia_description *subtypes;
+	struct ifmedia_description *options;
+	struct ifmedia_description *modes;
+};
+
+/* must be in the same order as IFM_TYPE_DESCRIPTIONS */
+struct ifmedia_type_to_subtype ifmedia_types_to_subtypes[] = {
+	{ &ifm_subtype_ethernet_descriptions[0],
+	  &ifm_subtype_ethernet_option_descriptions[0],
+	  NULL, },
+	{ &ifm_subtype_tokenring_descriptions[0],
+	  &ifm_subtype_tokenring_option_descriptions[0],
+	  NULL, },
+	{ &ifm_subtype_fddi_descriptions[0],
+	  &ifm_subtype_fddi_option_descriptions[0],
+	  NULL, },
+	{ &ifm_subtype_ieee80211_descriptions[0],
+	  &ifm_subtype_ieee80211_option_descriptions[0],
+	  &ifm_subtype_ieee80211_mode_descriptions[0] },
+};
+
+/*
+ * print a media word.
+ */
+static void
+ifmedia_printword(int ifmw)
+{
+	struct ifmedia_description *desc;
+	struct ifmedia_type_to_subtype *ttos;
+	int seen_option = 0;
+
+	/* Find the top-level interface type. */
+	for (desc = ifm_type_descriptions, ttos = ifmedia_types_to_subtypes;
+	    desc->ifmt_string != NULL; desc++, ttos++)
+		if (IFM_TYPE(ifmw) == desc->ifmt_word)
+			break;
+	if (desc->ifmt_string == NULL) {
+		printk("<unknown type>\n");
+		return;
+	}
+	printk(desc->ifmt_string);
+
+	/* Any mode. */
+	for (desc = ttos->modes; desc && desc->ifmt_string != NULL; desc++)
+		if (IFM_MODE(ifmw) == desc->ifmt_word) {
+			if (desc->ifmt_string != NULL)
+				printk(" mode %s", desc->ifmt_string);
+			break;
+		}
+
+	/*
+	 * Check for the shared subtype descriptions first, then the
+	 * type-specific ones.
+	 */
+	for (desc = ifm_subtype_shared_descriptions;
+	    desc->ifmt_string != NULL; desc++)
+		if (IFM_SUBTYPE(ifmw) == desc->ifmt_word)
+			goto got_subtype;
+
+	for (desc = ttos->subtypes; desc->ifmt_string != NULL; desc++)
+		if (IFM_SUBTYPE(ifmw) == desc->ifmt_word)
+			break;
+	if (desc->ifmt_string == NULL) {
+		printk(" <unknown subtype>\n");
+		return;
+	}
+
+ got_subtype:
+	printk(" %s", desc->ifmt_string);
+
+	/*
+	 * Look for shared options.
+	 */
+	for (desc = ifm_shared_option_descriptions;
+	    desc->ifmt_string != NULL; desc++) {
+		if (ifmw & desc->ifmt_word) {
+			if (seen_option == 0)
+				printk(" <");
+			printk("%s%s", seen_option++ ? "," : "",
+			    desc->ifmt_string);
+		}
+	}
+
+	/*
+	 * Look for subtype-specific options.
+	 */
+	for (desc = ttos->options; desc->ifmt_string != NULL; desc++) {
+		if (ifmw & desc->ifmt_word) {
+			if (seen_option == 0)
+				printk(" <");
+			printk("%s%s", seen_option++ ? "," : "",
+			    desc->ifmt_string); 
+		}
+	}
+	printk("%s\n", seen_option ? ">" : "");
+}
+#endif /* IFMEDIA_DEBUG */
+
+EXPORT_SYMBOL(ifmedia_ioctl);
diff --git a/drivers/qtn/wlan/release.h b/drivers/qtn/wlan/release.h
new file mode 100644
index 0000000..ae835cf
--- /dev/null
+++ b/drivers/qtn/wlan/release.h
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ *    redistribution must be conditioned upon including a substantially
+ *    similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: release.h 2763 2007-10-18 04:23:09Z mrenzmann $
+ */
+
+#define	RELEASE_TYPE	"RELEASE"
+
+#ifdef SVNVERSION
+#define RELEASE_VERSION	SVNVERSION
+#else
+#define RELEASE_VERSION	"0.9.3.3"
+#endif
diff --git a/drivers/qtn/wlan/version.h b/drivers/qtn/wlan/version.h
new file mode 100644
index 0000000..dbe23d8
--- /dev/null
+++ b/drivers/qtn/wlan/version.h
@@ -0,0 +1,38 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ *    redistribution must be conditioned upon including a substantially
+ *    similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: version.h 1426 2006-02-01 20:07:11Z mrenzmann $
+ */
+#define	WLAN_VERSION	"0.8.4.2"