pfe_ctrl-10.00.6.tar.gz from sdk-comcerto-openwrt-c2k_3.1-rc4
diff --git a/pfe_ctrl/Makefile b/pfe_ctrl/Makefile
new file mode 100644
index 0000000..03d9bae
--- /dev/null
+++ b/pfe_ctrl/Makefile
@@ -0,0 +1,148 @@
+PLATFORM?=C2000
+UNIT_TEST?=n
+UNIT_TEST_HIF?=n
+#PLATFORM?=EMULATION
+#PLATFORM?=PCI
+#UNIT_TEST?=y
+#UNIT_TEST_HIF?=y
+
+ifeq ($(PLATFORM),C2000)
+CROSS_COMPILE?=/home/upputuv/work/C2000/WiFi/porting_lsdk-10.1.45/sdk-cpe/staging_dir/toolchain-arm_v7-a_gcc-4.5-linaro_glibc-2.14_eabi/bin/arm-openwrt-linux-
+KERNELDIR?=/home/upputuv/work/C2000/WiFi/porting_lsdk-10.1.45/sdk-cpe/build_dir/linux-comcerto2000_hgw/linux-3.2.26
+ARCH=arm
+else
+ARCH=x86
+KERNELDIR=/lib/modules/`uname -r`/build
+endif
+
+# The following must be a relative path, or the kernel build system will fail
+PFE_DIR?=../pfe
+
+PFE_CTRL_VERSION_FILE:=version.h
+
+
+all: version config modules
+
+modules clean:
+ make CROSS_COMPILE=$(CROSS_COMPILE) V=1 ARCH=$(ARCH) -C $(KERNELDIR) M=`pwd` $@
+
+EXTRA_CFLAGS += -Werror -include $(src)/$(PFE_DIR)/c2000/version.h -I$(src)/$(PFE_DIR)/c2000 -I$(src)/$(PFE_DIR)/common -I$(src) -DENDIAN_LITTLE -DGCC_TOOLCHAIN -DCOMCERTO_2000 -DCOMCERTO_2000_CONTROL
+
+EXTRA_LDFLAGS += -T$(src)/control_link.lds
+
+obj-m += pfe.o
+
+pfe_ctrl_objs = $(PFE_DIR)/c2000/__pfe_ctrl.o \
+ $(PFE_DIR)/c2000/pfe.o \
+ $(PFE_DIR)/common/control_ipv4.o \
+ $(PFE_DIR)/common/control_ipv6.o \
+ $(PFE_DIR)/common/ipv6.o \
+ $(PFE_DIR)/common/control_mc4.o \
+ $(PFE_DIR)/common/control_mc6.o \
+ $(PFE_DIR)/common/multicast.o \
+ $(PFE_DIR)/common/control_pppoe.o \
+ $(PFE_DIR)/common/control_vlan.o \
+ $(PFE_DIR)/common/control_macvlan.o \
+ $(PFE_DIR)/common/query_ipv4.o \
+ $(PFE_DIR)/common/query_ipv6.o \
+ $(PFE_DIR)/common/query_mc.o \
+ $(PFE_DIR)/common/query_pppoe.o \
+ $(PFE_DIR)/common/query_vlan.o \
+ $(PFE_DIR)/common/query_tunnel.o \
+ $(PFE_DIR)/common/query_ipsec.o \
+ $(PFE_DIR)/common/query_Rx.o \
+ $(PFE_DIR)/c2000/control_expt.o \
+ $(PFE_DIR)/common/control_tunnel.o \
+ $(PFE_DIR)/common/module_hidrv.o \
+ $(PFE_DIR)/c2000/control_storage.o \
+ $(PFE_DIR)/common/layer2.o \
+ $(PFE_DIR)/common/alt_conf.o \
+ $(PFE_DIR)/common/control_stat.o \
+ $(PFE_DIR)/c2000/control_rx.o \
+ $(PFE_DIR)/c2000/control_tx.o \
+ $(PFE_DIR)/common/control_socket.o \
+ $(PFE_DIR)/common/control_rtp_relay.o \
+ $(PFE_DIR)/common/control_common.o \
+ $(PFE_DIR)/c2000/control_qm.o \
+ $(PFE_DIR)/common/fppdiag_lib.o \
+ $(PFE_DIR)/c2000/control_icc.o \
+ $(PFE_DIR)/common/control_voicebuf.o \
+ $(PFE_DIR)/common/control_ipsec.o \
+ $(PFE_DIR)/c2000/control_ipsec.o \
+ $(PFE_DIR)/c2000/control_ipsec_standalone.o \
+ $(PFE_DIR)/common/control_bridge.o \
+ $(PFE_DIR)/common/control_wifi_rx.o \
+ $(PFE_DIR)/c2000/control_ipsec.o \
+ $(PFE_DIR)/common/control_natpt.o \
+ $(PFE_DIR)/common/control_ipv4frag.o \
+ $(PFE_DIR)/common/control_ipv6frag.o \
+ $(PFE_DIR)/common/control_capture.o \
+ $(PFE_DIR)/c2000/control_l2tp.o \
+
+pfe-y += pfe_mod.o \
+ pfe_hw.o \
+ pfe_firmware.o \
+ pfe_ctrl.o \
+ pfe_ctrl_hal.o \
+ pfe_hif.o \
+ pfe_hif_lib.o\
+ pfe_eth.o \
+ pfe_pcap.o \
+ pfe_vwd.o \
+ pfe_perfmon.o \
+ pfe_tso.o \
+ pfe_sysfs.o \
+ pfe_diags.o \
+ $(pfe_ctrl_objs) \
+
+pfe-$(CONFIG_COMCERTO_MSP) += pfe_mspsync.o
+
+ifeq ($(UNIT_TEST),y)
+pfe-y += pfe_unit_test.o
+EXTRA_CFLAGS+=-DCONFIG_UNIT_TEST
+
+ifeq ($(UNIT_TEST_HIF),y)
+EXTRA_CFLAGS+=-DCONFIG_UNIT_TEST_HIF
+endif
+
+endif
+
+ifeq ($(PLATFORM),C2000)
+pfe-y += pfe_platform.o
+EXTRA_CFLAGS+=-DCONFIG_PLATFORM_C2000
+else ifeq ($(PLATFORM),PCI)
+pfe-y += pfe_pci.o
+EXTRA_CFLAGS+=-DCONFIG_PLATFORM_PCI
+EXTRA_CFLAGS+=-DCONFIG_UTIL_DISABLED
+#EXTRA_CFLAGS+=-DCONFIG_TMU_DUMMY
+else ifeq ($(PLATFORM),EMULATION)
+pfe-y += pfe_emulation.o
+EXTRA_CFLAGS+=-DCONFIG_PLATFORM_EMULATION
+endif
+
+distclean:
+ rm -rf pfe_ctrl-src.tar.gz pfe-src.tar.gz
+
+dist: all pfe_release_tar
+ pfe_ctrl_files="`find . -name '*.h'; find . -name '*.c'; find . -name '*.lds'; find . -name 'Makefile'`"; tar -czf pfe_ctrl-src.tar.gz $$pfe_ctrl_files
+
+pfe_release_tar:
+ pfe_files="`find $(PFE_DIR)/c2000 -name '*.h'; find $(PFE_DIR)/common -name '*.h'` $(PFE_DIR)/Makefile $(PFE_DIR)/config.mk $(PFE_DIR)/c2000/config.mk $(PFE_DIR)/toolchain.mk $(PFE_DIR)/license.txt $(PFE_DIR)/license_full.txt $(pfe_ctrl_objs:.o=.c)"; tar -cf pfe-src.tar $$pfe_files; tar -xf pfe-src.tar;bash pfe_licence.sh `basename $(PFE_DIR)`; rm `basename $(PFE_DIR)`/license.txt `basename $(PFE_DIR)`/license_full.txt;tar -czf pfe-src.tar.gz `basename $(PFE_DIR)`
+
+version:
+ if [ -d .git ]; then \
+ make -C $(PFE_DIR) version ; \
+ PFE_CTRL_GIT_VERSION=$$(git describe --always --tags --dirty) ; \
+ printf "/*Auto-generated file. Do not edit !*/\n#ifndef VERSION_H\n#define VERSION_H\n\n#define PFE_CTRL_VERSION \"$${PFE_CTRL_GIT_VERSION}\"\n\n#endif /* VERSION_H */\n" > $(PFE_CTRL_VERSION_FILE) ; \
+ fi
+
+config:
+ make -C $(PFE_DIR) config_check
+
+PFE_BIN_DIR=$(PFE_DIR)/obj_c2000/
+CLASS_BIN=$(PFE_BIN_DIR)/class/class_c2000_debug.elf
+UTIL_BIN=$(PFE_BIN_DIR)/util/util_c2000_debug.elf
+TMU_BIN=$(PFE_BIN_DIR)/tmu/tmu_c2000_debug.elf
+check_dmem:
+ ./check_shared_mem.py pfe.ko $(CLASS_BIN) $(TMU_BIN) $(UTIL_BIN)
+
diff --git a/pfe_ctrl/check_shared_mem.py b/pfe_ctrl/check_shared_mem.py
new file mode 100755
index 0000000..d6832a3
--- /dev/null
+++ b/pfe_ctrl/check_shared_mem.py
@@ -0,0 +1,100 @@
+#! /usr/bin/python
+
+import os, sys, string
+
+if sys.stdout.isatty():
+ OKBLUE = '\033[94m'
+ OKGREEN = '\033[92m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+ BOLD = "\033[1m"
+else:
+ OKBLUE = ''
+ OKGREEN = ''
+ WARNING = ''
+ FAIL = ''
+ ENDC = ''
+ BOLD = ''
+
+
+def hex_conv(s):
+ return int(s, 16)
+
+def parse_objdump(objdump_cmd):
+ syms = {}
+ for line in os.popen(objdump_cmd):
+ fields = line.split()
+ if len(fields) == 6:
+ if fields[3] == fields[5]:
+ start = hex_conv(fields[0])
+ else:
+ symbol = fields[5].replace("class_", "")
+ symbol = symbol.replace("util_", "")
+ symbol = symbol.replace("tmu_", "")
+ symbol = symbol.replace("mailbox", "mbox")
+ syms[symbol] = (hex_conv(fields[0]), hex_conv(fields[4]))
+ for sym in syms.keys():
+ (addr, size) = syms[sym]
+ syms[sym] = (addr-start, size)
+
+ return syms
+
+def one_way_compare(filename1, syms1, filename2, syms2):
+ list1 = syms1.keys()
+ list2 = syms2.keys()
+ err = 0
+
+ for sym in list1:
+ if sym not in list2:
+ print(WARNING + "ERROR: symbol " + sym + " present in " + filename1 + " but not in " +filename2 + ENDC)
+ err += 1
+ else:
+ (addr1, size1) = syms1[sym]
+ (addr2, size2) = syms2[sym]
+ if addr1 != addr2:
+ print(WARNING + "ERROR: symbol "+ sym + " at address " + str(addr1) + " in " + filename1 + ", at address " + str(addr2) + " in " + filename2 + ENDC)
+ syms2.pop(sym)
+ err += 1
+ if size1 != size2:
+ print(WARNING + "ERROR: symbol "+ sym + " has size " + str(size1) + " in " + filename1 + ", but size " + str(size2) + " in " + filename2 + ENDC)
+ syms2.pop(sym)
+ err += 1
+ return err
+
+
+
+def compare_shared_mem(ctrl_filename, pe, filename):
+
+ print("Comparing memory for "+ pe + " shared memory...")
+ sym_ctrl = parse_objdump("objdump -j ." + pe + "_dmem_sh -t " + sys.argv[1])
+
+ sym_pfe = parse_objdump("objdump -j .dmem_sh -t "+ filename)
+
+ err1 = one_way_compare(ctrl_filename, sym_ctrl, filename, sym_pfe)
+ err2 = one_way_compare(filename, sym_pfe, ctrl_filename, sym_ctrl)
+
+ if (err1 + err2) != 0:
+ print(FAIL + BOLD + pe + " ERROR: " + ctrl_filename + " and " + filename + " don't match." + ENDC)
+ else:
+ print(OKGREEN + BOLD + pe + " OK." + ENDC)
+ print("")
+
+ return err1 + err2
+
+# Main
+if __name__=="__main__":
+ if len(sys.argv)<3:
+ print("Usage: " + sys.argv[0] + " <pfe.ko file> <class ELF file> <tmu ELF file> <util ELF file>")
+ sys.exit(1)
+
+ compare_list = []
+ compare_list.append(("class", sys.argv[2]))
+ compare_list.append(("tmu", sys.argv[3]))
+ compare_list.append(("util", sys.argv[4]))
+
+ ret = 0
+ for (pe, filename) in compare_list:
+ ret += compare_shared_mem(sys.argv[1], pe, filename)
+
+ exit(ret)
diff --git a/pfe_ctrl/control_link.lds b/pfe_ctrl/control_link.lds
new file mode 100644
index 0000000..5771659
--- /dev/null
+++ b/pfe_ctrl/control_link.lds
@@ -0,0 +1,27 @@
+SECTIONS
+{
+ .class_dmem_sh : SUBALIGN(8) {
+ __class_dmem_sh = .;
+ *(SORT(.class_dmem_sh_*))
+ }
+
+ .class_pe_lmem_sh : SUBALIGN(8) {
+ __class_pe_lmem_sh = .;
+ *(SORT(.class_pe_lmem_sh_*))
+ }
+
+ .tmu_dmem_sh : SUBALIGN(8) {
+ __tmu_dmem_sh = .;
+ *(SORT(.tmu_dmem_sh_*))
+ }
+
+ .util_dmem_sh : SUBALIGN(8) {
+ __util_dmem_sh = .;
+ *(SORT(.util_dmem_sh_*))
+ }
+
+ .util_ddr_sh : SUBALIGN(8) {
+ __util_ddr_sh = .;
+ *(SORT(.util_ddr_sh_*))
+ }
+}
diff --git a/pfe_ctrl/doxygen/Doxyfile b/pfe_ctrl/doxygen/Doxyfile
new file mode 100644
index 0000000..2a8106e
--- /dev/null
+++ b/pfe_ctrl/doxygen/Doxyfile
@@ -0,0 +1,1661 @@
+# Doxyfile 1.7.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = pfe
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = .
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF = "The $name class" \
+ "The $name widget" \
+ "The $name file" \
+ is \
+ provides \
+ specifies \
+ contains \
+ represents \
+ a \
+ an \
+ the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = YES
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given extension.
+# Doxygen has a built-in mapping, but you can override or extend it using this
+# tag. The format is ext=language, where ext is a file extension, and language
+# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
+# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
+# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen to replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penality.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will rougly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespace are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = YES
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 28
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page. This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. The create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = ..
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS = *.c \
+ *.cc \
+ *.cxx \
+ *.cpp \
+ *.c++ \
+ *.d \
+ *.java \
+ *.ii \
+ *.ixx \
+ *.ipp \
+ *.i++ \
+ *.inl \
+ *.h \
+ *.hh \
+ *.hxx \
+ *.hpp \
+ *.h++ \
+ *.idl \
+ *.odl \
+ *.cs \
+ *.php \
+ *.php3 \
+ *.inc \
+ *.m \
+ *.mm \
+ *.dox \
+ *.py \
+ *.f90 \
+ *.f \
+ *.vhd \
+ *.vhdl
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS = *
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output. If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code. Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the stylesheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS = YES
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = YES
+
+# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list.
+
+USE_INLINE_TREES = YES
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT = YES
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE = NO
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a PHP enabled web server instead of at the web client
+# using Javascript. Doxygen will generate the search PHP script and index
+# file to put on the web server. The advantage of the server
+# based approach is that it scales better to large projects and allows
+# full text search. The disadvances is that it is more difficult to setup
+# and does not have live searching capabilities.
+
+SERVER_BASED_SEARCH = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = YES
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = NO
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS = 0
+
+# By default doxygen will write a font called FreeSans.ttf to the output
+# directory and reference it in all dot files that doxygen generates. This
+# font does not include all possible unicode characters however, so when you need
+# these (or just want a differently looking font) you can specify the font name
+# using DOT_FONTNAME. You need need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME = FreeSans.ttf
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/pfe_ctrl/pfe_ctrl.c b/pfe_ctrl/pfe_ctrl.c
new file mode 100644
index 0000000..3af2e61
--- /dev/null
+++ b/pfe_ctrl/pfe_ctrl.c
@@ -0,0 +1,627 @@
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#else
+#include "platform.h"
+#endif
+
+#include "pfe_mod.h"
+#include "pfe_ctrl.h"
+
+#include "pfe_ctrl_hal.h"
+#include "__pfe_ctrl.h"
+
+static struct pe_sync_mailbox CLASS_DMEM_SH2(sync_mailbox);
+static struct pe_sync_mailbox TMU_DMEM_SH2(sync_mailbox);
+static struct pe_sync_mailbox UTIL_DMEM_SH2(sync_mailbox);
+
+static struct pe_msg_mailbox CLASS_DMEM_SH2(msg_mailbox);
+static struct pe_msg_mailbox TMU_DMEM_SH2(msg_mailbox);
+static struct pe_msg_mailbox UTIL_DMEM_SH2(msg_mailbox);
+
+static int initialized = 0;
+
+#define TIMEOUT_MS 1000
+
+int relax(unsigned long end)
+{
+#ifdef __KERNEL__
+ if (time_after(jiffies, end)) {
+ if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000)) {
+ return -1;
+ }
+
+ if (need_resched())
+ schedule();
+ }
+#else
+ udelay(1);
+#endif
+
+ return 0;
+}
+
+/** PE sync stop.
+* Stops packet processing for a list of PE's (specified using a bitmask).
+* The caller must hold ctrl->mutex.
+*
+* @param ctrl Control context
+* @param pe_mask Mask of PE id's to stop
+*
+*/
+int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
+{
+ struct pe_sync_mailbox *mbox;
+ int pe_stopped = 0;
+ unsigned long end = jiffies + 2;
+ int i;
+
+ for (i = 0; i < MAX_PE; i++)
+ if (pe_mask & (1 << i)) {
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
+
+ pe_dmem_write(i, cpu_to_be32(0x1), (unsigned long)&mbox->stop, 4);
+ }
+
+ while (pe_stopped != pe_mask) {
+ for (i = 0; i < MAX_PE; i++)
+ if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
+
+ if (pe_dmem_read(i, (unsigned long)&mbox->stopped, 4) & cpu_to_be32(0x1))
+ pe_stopped |= (1 << i);
+ }
+
+ if (relax(end) < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ printk(KERN_ERR "%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
+
+ for (i = 0; i < MAX_PE; i++)
+ if (pe_mask & (1 << i)) {
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
+
+ pe_dmem_write(i, cpu_to_be32(0x0), (unsigned long)&mbox->stop, 4);
+ }
+
+ return -EIO;
+}
+
+/** PE start.
+* Starts packet processing for a list of PE's (specified using a bitmask).
+* The caller must hold ctrl->mutex.
+*
+* @param ctrl Control context
+* @param pe_mask Mask of PE id's to start
+*
+*/
+void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
+{
+ struct pe_sync_mailbox *mbox;
+ int i;
+
+ for (i = 0; i < MAX_PE; i++)
+ if (pe_mask & (1 << i)) {
+
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
+
+ pe_dmem_write(i, cpu_to_be32(0x0), (unsigned long)&mbox->stop, 4);
+ }
+}
+
+
+/** Sends a control request to a given PE (to copy data to/from internal memory from/to DDR).
+* The caller must hold ctrl->mutex.
+*
+* @param ctrl Control context
+* @param id PE id
+* @param dst Physical destination address of data
+* @param src Physical source address of data
+* @param len Data length
+*
+*/
+int pe_request(struct pfe_ctrl *ctrl, int id, unsigned short cmd_type, unsigned long dst, unsigned long src, int len)
+{
+ struct pe_msg_mailbox mbox = {
+ .dst = cpu_to_be32(dst),
+ .src = cpu_to_be32(src),
+ .len = cpu_to_be32(len),
+ .request = cpu_to_be32((cmd_type << 16) | 0x1),
+ };
+ struct pe_msg_mailbox *pmbox = (void *)ctrl->msg_mailbox_baseaddr[id];
+ unsigned long end = jiffies + 2;
+ u32 rc;
+
+ /* This works because .request is written last */
+ pe_dmem_memcpy_to32(id, (unsigned long)pmbox, &mbox, sizeof(mbox));
+
+ while ((rc = pe_dmem_read(id, (unsigned long)&pmbox->request, 4)) & cpu_to_be32(0xffff)) {
+ if (relax(end) < 0)
+ goto err;
+ }
+
+ rc = be32_to_cpu(rc);
+
+ return rc >> 16;
+
+err:
+ printk(KERN_ERR "%s: timeout, %x\n", __func__, be32_to_cpu(rc));
+ pe_dmem_write(id, cpu_to_be32(0), (unsigned long)&pmbox->request, 4);
+ return -EIO;
+}
+
+
+
+int comcerto_fpp_send_command(u16 fcode, u16 length, u16 *payload, u16 *rlen, u16 *rbuf)
+{
+ struct pfe_ctrl *ctrl;
+
+ if (!initialized) {
+ printk(KERN_ERR "pfe control not initialized\n");
+ *rlen = 2;
+ rbuf[0] = 1; /* ERR_UNKNOWN_COMMAND */
+ goto out;
+ }
+
+ ctrl = &pfe->ctrl;
+
+ mutex_lock(&ctrl->mutex);
+
+ __pfe_ctrl_cmd_handler(fcode, length, payload, rlen, rbuf);
+
+ mutex_unlock(&ctrl->mutex);
+
+out:
+ return 0;
+}
+EXPORT_SYMBOL(comcerto_fpp_send_command);
+
+/**
+ * comcerto_fpp_send_command_simple -
+ *
+ * This function is used to send command to FPP in a synchronous way. Calls to the function blocks until a response
+ * from FPP is received. This API can not be used to query data from FPP
+ *
+ * Parameters
+ * fcode: Function code. FPP function code associated to the specified command payload
+ * length: Command length. Length in bytes of the command payload
+ * payload: Command payload. Payload of the command sent to the FPP. 16bits buffer allocated by the client's code and sized up to 256 bytes
+ *
+ * Return values
+ * 0: Success
+ * <0: Linux system failure (check errno for detailed error condition)
+ * >0: FPP returned code
+ */
+int comcerto_fpp_send_command_simple(u16 fcode, u16 length, u16 *payload)
+{
+ u16 rbuf[128];
+ u16 rlen;
+ int rc;
+
+ if (!initialized) {
+ printk(KERN_ERR "pfe control not initialized\n");
+ rbuf[0] = 1; /* ERR_UNKNOWN_COMMAND */
+ goto out;
+ }
+
+ rc = comcerto_fpp_send_command(fcode, length, payload, &rlen, rbuf);
+
+ /* if a command delivery error is detected, do not check command returned code */
+ if (rc < 0)
+ return rc;
+
+out:
+ /* retrieve FPP command returned code. Could be error or acknowledgment */
+ rc = rbuf[0];
+
+ return rc;
+}
+EXPORT_SYMBOL(comcerto_fpp_send_command_simple);
+
+
+static void comcerto_fpp_workqueue(struct work_struct *work)
+{
+ struct pfe_ctrl *ctrl = container_of(work, struct pfe_ctrl, work);
+ struct fpp_msg *msg;
+ unsigned long flags;
+ u16 rbuf[128];
+ u16 rlen;
+ int rc;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+ while (!list_empty(&ctrl->msg_list)) {
+
+ msg = list_entry(ctrl->msg_list.next, struct fpp_msg, list);
+
+ list_del(&msg->list);
+
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ rc = comcerto_fpp_send_command(msg->fcode, msg->length, msg->payload, &rlen, rbuf);
+
+ /* send command response to caller's callback */
+ if (msg->callback != NULL)
+ msg->callback(msg->data, rc, rlen, rbuf);
+
+ pfe_kfree(msg);
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+}
+
+/**
+ * comcerto_fpp_send_command_atomic -
+ *
+ * This function is used to send command to FPP in an asynchronous way. The Caller specifies a function pointer
+ * that is called by the FPP Comcerto driver when command reponse from FPP engine is received. This API can be also
+ * used to query data from FPP. Queried data are returned through the specified client's callback function
+ *
+ * Parameters
+ * fcode: Function code. FPP function code associated to the specified command payload
+ * length: Command length. Length in bytes of the command payload
+ * payload: Command payload. Payload of the command sent to the FPP. 16bits buffer allocated by the client's code and sized up to 256 bytes
+ * callback: Client's callback handler for FPP response processing
+ * data: Client's private data. Not interpreted by the FPP driver and sent back to the Client as a reference (client's code own usage)
+ *
+ * Return values
+ * 0: Success
+ * <0: Linux system failure (check errno for detailed error condition)
+ **/
+int comcerto_fpp_send_command_atomic(u16 fcode, u16 length, u16 *payload, void (*callback)(unsigned long, int, u16, u16 *), unsigned long data)
+{
+ struct pfe_ctrl *ctrl;
+ struct fpp_msg *msg;
+ unsigned long flags;
+ int rc;
+
+ if (!initialized) {
+ printk(KERN_ERR "pfe control not initialized\n");
+ rc = -EIO;
+ goto err0;
+ }
+
+ ctrl = &pfe->ctrl;
+
+ if (length > FPP_MAX_MSG_LENGTH) {
+ rc = -EINVAL;
+ goto err0;
+ }
+
+ msg = pfe_kmalloc(sizeof(struct fpp_msg) + length, GFP_ATOMIC);
+ if (!msg) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ /* set caller's callback function */
+ msg->callback = callback;
+ msg->data = data;
+
+ msg->payload = (u16 *)(msg + 1);
+
+ msg->fcode = fcode;
+ msg->length = length;
+ memcpy(msg->payload, payload, length);
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+ list_add(&msg->list, &ctrl->msg_list);
+
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ schedule_work(&ctrl->work);
+
+ return 0;
+
+err0:
+ return rc;
+}
+
+EXPORT_SYMBOL(comcerto_fpp_send_command_atomic);
+
+
+
+/** Sends a control request to TMU PE ).
+* The caller must hold ctrl->mutex.
+*
+* @param ctrl Control context
+* @param id TMU id
+* @param tmu_cmd_bitmask Bitmask of commands sent to TMU
+*
+*/
+int tmu_pe_request(struct pfe_ctrl *ctrl, int id, unsigned int tmu_cmd_bitmask)
+{
+ if ((id < TMU0_ID) || (id > TMU_MAX_ID))
+ return -EIO;
+
+ return (pe_request(ctrl, id, 0, tmu_cmd_bitmask, 0, 0));
+}
+
+
+
+
+
+static int pfe_ctrl_send_command_simple(u16 fcode, u16 length, u16 *payload)
+{
+ u16 rbuf[128];
+ u16 rlen;
+ int rc;
+
+ /* send command to FE */
+ comcerto_fpp_send_command(fcode, length, payload, &rlen, rbuf);
+
+ /* retrieve FE command returned code. Could be error or acknowledgment */
+ rc = rbuf[0];
+
+ return rc;
+}
+
+
+/**
+ * comcerto_fpp_register_event_cb -
+ *
+ */
+int comcerto_fpp_register_event_cb(int (*event_cb)(u16, u16, u16*))
+{
+ struct pfe_ctrl *ctrl;
+
+ if (!initialized) {
+ printk(KERN_ERR "pfe control not initialized\n");
+ return -EIO;
+ }
+
+ ctrl = &pfe->ctrl;
+
+ /* register FCI callback used for asynchrounous event */
+ ctrl->event_cb = event_cb;
+
+ return 0;
+}
+EXPORT_SYMBOL(comcerto_fpp_register_event_cb);
+
+
+int pfe_ctrl_set_eth_state(int id, unsigned int state, unsigned char *mac_addr)
+{
+ unsigned char msg[20];
+ int rc;
+
+ memset(msg, 0, 20);
+
+ msg[0] = id;
+
+ if (state) {
+ memcpy(msg + 14, mac_addr, 6);
+
+ if ((rc = pfe_ctrl_send_command_simple(CMD_TX_ENABLE, 20, (unsigned short *)msg)) != 0)
+ goto err;
+
+ } else {
+ if ((rc = pfe_ctrl_send_command_simple(CMD_TX_DISABLE, 2, (unsigned short *)msg)) != 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return -1;
+}
+
+
+int pfe_ctrl_set_lro(char enable)
+{
+ unsigned short msg = 0;
+ int rc;
+
+ msg = enable;
+
+ if ((rc = pfe_ctrl_send_command_simple(CMD_RX_LRO, 2, (unsigned short *)&msg)) != 0)
+ return -1;
+
+ return 0;
+}
+
+#ifdef CFG_PCAP
+typedef struct _tQosExptRateCommand {
+ unsigned short expt_iftype; // PCAP
+ unsigned short pkts_per_msec;
+}QosExptRateCommand, *PQosExptRateCommand;
+
+int pfe_ctrl_set_pcap(char enable)
+{
+ unsigned short msg = 0;
+ int rc;
+ msg = enable;
+
+ if ((rc = pfe_ctrl_send_command_simple(CMD_PKTCAP_ENABLE, 2, (unsigned short *)&msg)) != 0)
+ return -1;
+
+ return 0;
+}
+
+int pfe_ctrl_set_pcap_ratelimit(u32 pkts_per_msec)
+{
+ QosExptRateCommand pcap_ratelimit;
+ int rc;
+
+ pcap_ratelimit.expt_iftype = EXPT_TYPE_PCAP;
+ pcap_ratelimit.pkts_per_msec = pkts_per_msec;
+
+ if ((rc = pfe_ctrl_send_command_simple(CMD_QM_EXPT_RATE, sizeof(QosExptRateCommand), (unsigned short *)&pcap_ratelimit)) != 0)
+ return -1;
+
+ return 0;
+
+}
+#endif
+/** Control code timer thread.
+*
+* A kernel thread is used so that the timer code can be run under the control path mutex.
+* The thread wakes up regularly and checks if any timer in the timer list as expired.
+* The timers are re-started automatically.
+* The code tries to keep the number of times a timer runs per unit time constant on average,
+* if the thread scheduling is delayed, it's possible for a particular timer to be scheduled in
+* quick succession to make up for the lost time.
+*
+* @param data Pointer to the control context structure
+*
+* @return 0 on sucess, a negative value on error
+*
+*/
+static int pfe_ctrl_timer(void *data)
+{
+ struct pfe_ctrl *ctrl = data;
+ TIMER_ENTRY *timer, *next;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ while (1)
+ {
+ schedule_timeout_uninterruptible(ctrl->timer_period);
+
+ mutex_lock(&ctrl->mutex);
+
+ list_for_each_entry_safe(timer, next, &ctrl->timer_list, list)
+ {
+ if (time_after(jiffies, timer->timeout))
+ {
+ timer->timeout += timer->period;
+
+ timer->handler();
+ }
+ }
+
+ mutex_unlock(&ctrl->mutex);
+
+ if (kthread_should_stop())
+ break;
+ }
+
+ printk(KERN_INFO "%s exiting\n", __func__);
+
+ return 0;
+}
+
+
+int pfe_ctrl_init(struct pfe *pfe)
+{
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+ int id;
+ int rc;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ mutex_init(&ctrl->mutex);
+ spin_lock_init(&ctrl->lock);
+
+ ctrl->timer_period = HZ / TIMER_TICKS_PER_SEC;
+
+ INIT_LIST_HEAD(&ctrl->timer_list);
+
+ INIT_WORK(&ctrl->work, comcerto_fpp_workqueue);
+
+ INIT_LIST_HEAD(&ctrl->msg_list);
+
+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
+ ctrl->sync_mailbox_baseaddr[id] = virt_to_class_dmem(&class_sync_mailbox);
+ ctrl->msg_mailbox_baseaddr[id] = virt_to_class_dmem(&class_msg_mailbox);
+ }
+
+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
+ ctrl->sync_mailbox_baseaddr[id] = virt_to_tmu_dmem(&tmu_sync_mailbox);
+ ctrl->msg_mailbox_baseaddr[id] = virt_to_tmu_dmem(&tmu_msg_mailbox);
+ }
+
+#if !defined(CONFIG_UTIL_DISABLED)
+ ctrl->sync_mailbox_baseaddr[UTIL_ID] = virt_to_util_dmem(&util_sync_mailbox);
+ ctrl->msg_mailbox_baseaddr[UTIL_ID] = virt_to_util_dmem(&util_msg_mailbox);
+#endif
+
+ ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
+ ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr + ROUTE_TABLE_BASEADDR;
+ ctrl->ipsec_lmem_phys_baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR + IPSEC_LMEM_BASEADDR);
+ ctrl->ipsec_lmem_baseaddr = (LMEM_BASE_ADDR + IPSEC_LMEM_BASEADDR);
+
+ ctrl->timer_thread = kthread_create(pfe_ctrl_timer, ctrl, "pfe_ctrl_timer");
+ if (IS_ERR(ctrl->timer_thread))
+ {
+ printk (KERN_ERR "%s: kthread_create() failed\n", __func__);
+ rc = PTR_ERR(ctrl->timer_thread);
+ goto err0;
+ }
+
+ ctrl->dma_pool = dma_pool_create("pfe_dma_pool_256B", pfe->dev, DMA_BUF_SIZE_256, DMA_BUF_MIN_ALIGNMENT, DMA_BUF_BOUNDARY);
+ if (!ctrl->dma_pool)
+ {
+ printk (KERN_ERR "%s: dma_pool_create() failed\n", __func__);
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ ctrl->dma_pool_512 = dma_pool_create("pfe_dma_pool_512B", pfe->dev, DMA_BUF_SIZE_512, DMA_BUF_MIN_ALIGNMENT, DMA_BUF_BOUNDARY);
+ if (!ctrl->dma_pool_512)
+ {
+ printk (KERN_ERR "%s: dma_pool_create() failed\n", __func__);
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ ctrl->dev = pfe->dev;
+
+ mutex_lock(&ctrl->mutex);
+
+ /* Initialize interface to fci */
+ rc = __pfe_ctrl_init();
+
+ mutex_unlock(&ctrl->mutex);
+
+ if (rc < 0)
+ goto err3;
+
+ wake_up_process(ctrl->timer_thread);
+
+ printk(KERN_INFO "%s finished\n", __func__);
+
+ initialized = 1;
+
+ return 0;
+
+err3:
+ dma_pool_destroy(ctrl->dma_pool_512);
+
+err2:
+ dma_pool_destroy(ctrl->dma_pool);
+
+err1:
+ kthread_stop(ctrl->timer_thread);
+
+err0:
+ return rc;
+}
+
+
+void pfe_ctrl_exit(struct pfe *pfe)
+{
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ initialized = 0;
+
+ __pfe_ctrl_exit();
+
+ dma_pool_destroy(ctrl->dma_pool);
+
+ dma_pool_destroy(ctrl->dma_pool_512);
+
+ kthread_stop(ctrl->timer_thread);
+}
diff --git a/pfe_ctrl/pfe_ctrl.h b/pfe_ctrl/pfe_ctrl.h
new file mode 100644
index 0000000..83def7e
--- /dev/null
+++ b/pfe_ctrl/pfe_ctrl.h
@@ -0,0 +1,88 @@
+#ifndef _PFE_CTRL_H_
+#define _PFE_CTRL_H_
+
+#include <linux/dmapool.h>
+
+#include "config.h"
+#include "pfe/pfe.h"
+
+#define DMA_BUF_SIZE_256 0x100 /* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
+#define DMA_BUF_SIZE_512 0x200 /* 512bytes dma allocated buffers used by rtp relay feature */
+#define DMA_BUF_MIN_ALIGNMENT 8
+#define DMA_BUF_BOUNDARY (4 * 1024) /* bursts can not cross 4k boundary */
+
+#define CMD_TX_ENABLE 0x0501
+#define CMD_TX_DISABLE 0x0502
+
+#define CMD_RX_LRO 0x0011
+#define CMD_PKTCAP_ENABLE 0x0d01
+#define CMD_QM_EXPT_RATE 0x020c
+
+#define EXPT_TYPE_PCAP 0x3
+
+struct pfe_ctrl {
+ struct mutex mutex;
+ spinlock_t lock;
+
+ void *dma_pool;
+ void *dma_pool_512;
+
+ struct device *dev;
+
+ void *hash_array_baseaddr; /** Virtual base address of the conntrack hash array */
+ unsigned long hash_array_phys_baseaddr; /** Physical base address of the conntrack hash array */
+
+ struct task_struct *timer_thread;
+ struct list_head timer_list;
+ unsigned long timer_period;
+
+ int (*event_cb)(u16, u16, u16*);
+
+ unsigned long sync_mailbox_baseaddr[MAX_PE]; /* Sync mailbox PFE internal address, initialized when parsing elf images */
+ unsigned long msg_mailbox_baseaddr[MAX_PE]; /* Msg mailbox PFE internal address, initialized when parsing elf images */
+
+ unsigned long class_dmem_sh;
+ unsigned long class_pe_lmem_sh;
+ unsigned long tmu_dmem_sh;
+ unsigned long util_dmem_sh;
+ unsigned long util_ddr_sh;
+ struct clk *clk_axi;
+ unsigned int sys_clk; // AXI clock value, in KHz
+ void *ipsec_lmem_baseaddr;
+ unsigned long ipsec_lmem_phys_baseaddr;
+
+ /* used for asynchronous message transfer to PFE */
+ struct list_head msg_list;
+ struct work_struct work;
+};
+
+int pfe_ctrl_init(struct pfe *pfe);
+void pfe_ctrl_exit(struct pfe *pfe);
+
+int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
+void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
+int pe_request(struct pfe_ctrl *ctrl, int id,unsigned short cmd_type, unsigned long dst, unsigned long src, int len);
+int pe_read(struct pfe_ctrl *ctrl, int id, u32 *dst, unsigned long src, int len, int clear_flag);
+int tmu_pe_request(struct pfe_ctrl *ctrl, int id, unsigned int tmu_cmd_bitmask);
+
+int pfe_ctrl_set_eth_state(int id, unsigned int state, unsigned char *mac_addr);
+int pfe_ctrl_set_lro(char enable);
+#ifdef CFG_PCAP
+int pfe_ctrl_set_pcap(char enable);
+int pfe_ctrl_set_pcap_ratelimit(u32 pkts_per_msec);
+#endif
+
+int relax(unsigned long end);
+
+/* used for asynchronous message transfer to PFE */
+#define FPP_MAX_MSG_LENGTH 256 /* expressed in U8 -> 256 bytes*/
+struct fpp_msg {
+ struct list_head list;
+ void (*callback)(unsigned long, int, u16, u16 *);
+ unsigned long data;
+ u16 fcode;
+ u16 length;
+ u16 *payload;
+};
+
+#endif /* _PFE_CTRL_H_ */
diff --git a/pfe_ctrl/pfe_ctrl_hal.c b/pfe_ctrl/pfe_ctrl_hal.c
new file mode 100644
index 0000000..bbeb60e
--- /dev/null
+++ b/pfe_ctrl/pfe_ctrl_hal.c
@@ -0,0 +1,212 @@
+
+/* OS abstraction functions used by PFE control code */
+
+#include <linux/slab.h>
+
+#include "pfe_ctrl_hal.h"
+
+#include "pfe_mod.h"
+
+extern char *__class_dmem_sh;
+extern char *__class_pe_lmem_sh;
+extern char *__tmu_dmem_sh;
+extern char *__util_dmem_sh;
+extern char *__util_ddr_sh;
+
+HostMessage msg_buf;
+static int msg_buf_used = 0;
+
+unsigned long virt_to_class(void *p)
+{
+ if (!p)
+ return 0;
+
+ if (p >= (void *)&__class_pe_lmem_sh)
+ return virt_to_class_pe_lmem(p);
+ else
+ return virt_to_class_dmem(p);
+}
+
+unsigned long virt_to_class_dmem(void *p)
+{
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+
+ if (p)
+ return (unsigned long)p - (unsigned long)&__class_dmem_sh + ctrl->class_dmem_sh;
+ else
+ return 0;
+}
+
+unsigned long virt_to_class_pe_lmem(void *p)
+{
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+
+ if (p)
+ return (unsigned long)p - (unsigned long)&__class_pe_lmem_sh + ctrl->class_pe_lmem_sh;
+ else
+ return 0;
+}
+
+unsigned long virt_to_tmu_dmem(void *p)
+{
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+
+ if (p)
+ return (unsigned long)p - (unsigned long)&__tmu_dmem_sh + ctrl->tmu_dmem_sh;
+ else
+ return 0;
+}
+
+
+unsigned long virt_to_util_dmem(void *p)
+{
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+
+ if (p)
+ return (unsigned long)p - (unsigned long)&__util_dmem_sh + ctrl->util_dmem_sh;
+ else
+ return 0;
+}
+
+/** Returns the DDR physical address of a Util PE shared DDR variable.
+ *
+ * @param p pointer (kernel space, virtual) to be converted to a physical address.
+ */
+unsigned long virt_to_util_ddr(void *p)
+{
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+
+ if (p)
+ return (unsigned long)p - (unsigned long)&__util_ddr_sh + ctrl->util_ddr_sh;
+ else
+ return 0;
+}
+/** Returns the virtual address of a Util PE shared DDR variable.
+ *
+ * @param p pointer (kernel space, virtual) to be converted to a pointer (usable in kernel space)
+ * pointing to the actual data.
+ */
+
+void * virt_to_util_virt(void *p)
+{
+ if (p)
+ return DDR_PHYS_TO_VIRT(virt_to_util_ddr(p));
+ else
+ return NULL;
+}
+
+unsigned long virt_to_phys_iram(void *p)
+{
+ if (p)
+ return (p - pfe->iram_baseaddr) + pfe->iram_phys_baseaddr;
+ else
+ return 0;
+}
+
+unsigned long virt_to_phys_ipsec_lmem(void *p)
+{
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+
+ if (p)
+ return (p - ctrl->ipsec_lmem_baseaddr) + ctrl->ipsec_lmem_phys_baseaddr;
+ else
+ return 0;
+}
+
+unsigned long virt_to_phys_ipsec_axi(void *p)
+{
+ if (p)
+ return (p - pfe->ipsec_baseaddr) + pfe->ipsec_phys_baseaddr;
+ else
+ return 0;
+}
+
+
+HostMessage *msg_alloc(void)
+{
+ if (msg_buf_used)
+ {
+ printk(KERN_ERR "%s: failed\n", __func__);
+ return NULL;
+ }
+
+ msg_buf_used = 1;
+
+ return &msg_buf;
+}
+
+void msg_free(HostMessage *msg)
+{
+ if (!msg_buf_used)
+ printk(KERN_ERR "%s: freing already free msg buffer\n", __func__);
+
+ msg_buf_used = 0;
+}
+
+int msg_send(HostMessage *msg)
+{
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+ int rc = -1;
+
+ if (!ctrl->event_cb)
+ goto out;
+
+ if (ctrl->event_cb(msg->code, msg->length, msg->data) < 0)
+ goto out;
+
+ rc = 0;
+
+out:
+ msg_free(msg);
+
+ return rc;
+}
+
+
+void timer_init(TIMER_ENTRY *timer, TIMER_HANDLER handler)
+{
+ timer->handler = handler;
+ timer->running = 0;
+}
+
+
+void timer_add(TIMER_ENTRY *timer, u16 granularity)
+{
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+
+ //printk(KERN_INFO "%s %lx\n", __func__, (unsigned long)timer);
+
+ timer->period = granularity;
+ timer->timeout = jiffies + timer->period;
+
+ if (!timer->running)
+ {
+ list_add(&timer->list, &ctrl->timer_list);
+ timer->running = 1;
+ }
+}
+
+
+void timer_del(TIMER_ENTRY *timer)
+{
+ //printk(KERN_INFO "%s %lx\n", __func__, (unsigned long)timer);
+
+ if (timer->running)
+ {
+ list_del(&timer->list);
+ timer->running = 0;
+ }
+}
+
+
+void *Heap_Alloc(int size)
+{
+ /* FIXME we may want to use dma API's and use non cacheable memory */
+ return pfe_kmalloc(size, GFP_KERNEL);
+}
+
+
+void Heap_Free(void *p)
+{
+ pfe_kfree(p);
+}
diff --git a/pfe_ctrl/pfe_ctrl_hal.h b/pfe_ctrl/pfe_ctrl_hal.h
new file mode 100644
index 0000000..34d39da
--- /dev/null
+++ b/pfe_ctrl/pfe_ctrl_hal.h
@@ -0,0 +1,111 @@
+
+#ifndef _PFE_CTRL_HAL_H_
+#define _PFE_CTRL_HAL_H_
+
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/elf.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+
+#include "pfe_mod.h"
+
+#define CLASS_DMEM_SH(var) __attribute__((section(".class_dmem_sh_" #var))) var
+#define CLASS_PE_LMEM_SH(var) __attribute__((section(".class_pe_lmem_sh_" #var))) var
+#define TMU_DMEM_SH(var) __attribute__((section(".tmu_dmem_sh_" #var))) var
+#define UTIL_DMEM_SH(var) __attribute__((section(".util_dmem_sh_" #var))) var
+#define UTIL_DDR_SH(var) __attribute__((section(".util_ddr_sh_" #var))) var
+
+#define CLASS_DMEM_SH2(var) __attribute__((section(".class_dmem_sh_" #var))) class_##var
+#define CLASS_PE_LMEM_SH2(var) __attribute__((section(".class_pe_lmem_sh_" #var))) class_##var
+#define TMU_DMEM_SH2(var) __attribute__((section(".tmu_dmem_sh_" #var))) tmu_##var
+#define UTIL_DMEM_SH2(var) __attribute__((section(".util_dmem_sh_" #var))) util_##var
+
+/** Translate the name of a shared variable to its PFE counterpart.
+ * Those macros may be used to determine the address of a shared variable,
+ * and will work even if the variable is accessed through a macro, as is the case
+ * with most fields of gFppGlobals.
+ */
+#define CONCAT(str, var) str##var
+#define CLASS_VARNAME2(var) CONCAT(class_, var)
+#define UTIL_VARNAME2(var) CONCAT(util_, var)
+#define TMU_VARNAME2(var) CONCAT(tmu_, var)
+
+typedef struct tHostMessage {
+ u16 length;
+ u16 code;
+ u16 data[128];
+} HostMessage;
+
+HostMessage *msg_alloc(void);
+void msg_free(HostMessage *msg);
+int msg_send(HostMessage *msg);
+
+
+unsigned long virt_to_class(void *p);
+unsigned long virt_to_class_dmem(void *p);
+unsigned long virt_to_class_pe_lmem(void *p);
+unsigned long virt_to_tmu_dmem(void *p);
+unsigned long virt_to_util_dmem(void *p);
+unsigned long virt_to_util_ddr(void *p);
+void * virt_to_util_virt(void *p);
+unsigned long virt_to_phys_iram(void *p);
+unsigned long virt_to_phys_ipsec_lmem(void *p);
+unsigned long virt_to_phys_ipsec_axi(void *p);
+
+
+#define TIMER_TICKS_PER_SEC 100
+
+#if TIMER_TICKS_PER_SEC > HZ
+#error TIMER_TICKS_PER_SEC is too high
+#endif
+
+
+typedef void (* TIMER_HANDLER)(void);
+
+typedef struct {
+ struct list_head list;
+ unsigned long timeout;
+ unsigned long period;
+ TIMER_HANDLER handler;
+ char running;
+} TIMER_ENTRY;
+
+
+/** Initializes a timer structure.
+* Must be called once for each TIMER_ENTRY structure.
+* The caller must be holding the ctrl->mutex.
+*
+* @param timer pointer to the timer to be initialized
+* @param handler timer handler function pointer
+*
+*/
+void timer_init(TIMER_ENTRY *timer, TIMER_HANDLER handler);
+
+/** Adds a timer to the running timer list.
+* It's safe to call even if the timer was already running. In this case we just update the granularity.
+* The caller must be holding the ctrl->mutex.
+*
+* @param timer pointer to the timer to be added
+* @param granularity granularity of the timer (in timer tick units)
+*
+*/
+void timer_add(TIMER_ENTRY *timer, u16 granularity);
+
+/** Deletes a timer from the running timer list.
+* It's safe to call even if the timer is no longer running.
+* The caller must be holding the ctrl->mutex.
+*
+* @param timer pointer to the timer to be removed
+*/
+void timer_del(TIMER_ENTRY *timer);
+
+void *Heap_Alloc(int size);
+
+#define Heap_Alloc_ARAM(s) Heap_Alloc(s)
+#define __Heap_Alloc(h, s) Heap_Alloc(s)
+void Heap_Free(void *p);
+
+#endif /* _PFE_CTRL_HAL_H_ */
diff --git a/pfe_ctrl/pfe_diags.c b/pfe_ctrl/pfe_diags.c
new file mode 100644
index 0000000..cc13ed1
--- /dev/null
+++ b/pfe_ctrl/pfe_diags.c
@@ -0,0 +1,874 @@
+#include<linux/ioctl.h>
+
+/*
+ * fppdiag_driver.c
+ *
+ * Copyright (C) 2004,2005 Mindspeed Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+
+#ifdef FPP_DIAGNOSTICS
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/stat.h> /* TODO check if needed */
+#include <linux/cdev.h> /* TODO check if needed */
+#include <linux/ioctl.h>
+#include <linux/semaphore.h>
+#include <linux/cpumask.h>
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/memory.h>
+
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/timer.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)
+#include <mach/hardware.h>
+#include <mach/debug.h>
+#include <mach/memory.h>
+#include <linux/sched.h>
+#endif
+
+#include "pfe_ctrl_hal.h"
+#include "pfe_mod.h"
+#include "pfe_diags.h"
+
+struct fppdiag_config CLASS_DMEM_SH2(fppdiagconfig) __attribute__((aligned(8)));
+struct fppdiag_config UTIL_DMEM_SH2(fppdiagconfig) __attribute__((aligned(8)));
+
+static int pe_id[NUM_PE_DIAGS] = { CLASS0_ID, CLASS1_ID, CLASS2_ID, CLASS3_ID, CLASS4_ID, CLASS5_ID, UTIL_ID };
+
+static char pe_names[7][4] = {
+ "PE0",
+ "PE1",
+ "PE2",
+ "PE3",
+ "PE4",
+ "PE5",
+ "UPE"};
+
+
+
+/** Enables the FPP diagnostics for a single PE by allocating and filling the ring buffers with the given ring size.
+ */
+static int pfediag_enable(int pe_index, int ring_size)
+{
+ int i, j;
+ struct fppdiag_drv_dat *fppdiag_driver_data = &pfe->diags.fppdiag_drv_data[pe_index];
+ struct fppdiag_config fppconfig;
+
+ if(ring_size <= 0)
+ return -1;
+
+ /* Allocate memory to store a ring_size of pages */
+ fppdiag_driver_data->pfe_rng_baseaddr = pfe_kzalloc((sizeof(u32) * ring_size), GFP_KERNEL);
+ if(!fppdiag_driver_data->pfe_rng_baseaddr)
+ return -1; /* Failure */
+
+ fppdiag_driver_data->virt_rng_baseaddr = pfe_kzalloc((sizeof(void *) * ring_size), GFP_KERNEL);
+ if(!fppdiag_driver_data->virt_rng_baseaddr)
+ goto err_virt;
+
+
+ /* Allocate pages */
+ for( i = 0; i < ring_size; i++)
+ {
+ void *ring_entry;
+ dma_addr_t phys_ring_entry;
+ ring_entry = (void *) get_zeroed_page(GFP_KERNEL);
+
+ if(!ring_entry)
+ break;
+ fppdiag_driver_data->virt_rng_baseaddr[i] = ring_entry;
+ /* Invalidate the cache for later use by FPP */
+ phys_ring_entry = dma_map_single(pfe->dev, ring_entry, PAGE_SIZE, DMA_FROM_DEVICE);
+ fppdiag_driver_data->pfe_rng_baseaddr[i] = cpu_to_be32((u32) phys_ring_entry);
+ }
+
+ /* In Failure case freeing the other pages */
+ if (i != ring_size)
+ goto err_ring;
+
+ fppdiag_driver_data->fpp_config.rng_baseaddr = (u32 *) dma_map_single(pfe->dev, fppdiag_driver_data->pfe_rng_baseaddr, (sizeof(void **) * ring_size), DMA_TO_DEVICE);
+
+ writel(0, &fppdiag_driver_data->virt_fppdiagctl->read_index);
+ writel(0, &fppdiag_driver_data->virt_fppdiagctl->write_index);
+
+ fppdiag_driver_data->fpp_config.rng_size = ring_size;
+ fppdiag_driver_data->fpp_config.diag_ctl_flag |= FPPDIAG_CTL_ENABLE;
+
+ fppconfig.rng_baseaddr = (u32 *) cpu_to_be32((u32) fppdiag_driver_data->fpp_config.rng_baseaddr);
+ fppconfig.rng_size = cpu_to_be32(fppdiag_driver_data->fpp_config.rng_size);
+ fppconfig.diag_ctl_flag = cpu_to_be16(fppdiag_driver_data->fpp_config.diag_ctl_flag);
+ fppconfig.diag_log_flag = cpu_to_be16(fppdiag_driver_data->fpp_config.diag_log_flag);
+ fppconfig.diag_mod_flag = cpu_to_be32(fppdiag_driver_data->fpp_config.diag_mod_flag);
+ fppconfig.fppdiagctl = (fppdiag_ctl_t *) cpu_to_be32((u32) fppdiag_driver_data->fpp_config.fppdiagctl);
+
+ // Copy fpp_config to PE DMEM
+ pe_sync_stop(&pfe->ctrl, pe_id[pe_index]);
+ if (pe_id[pe_index] != UTIL_ID) {
+ pe_dmem_memcpy_to32(pe_id[pe_index], virt_to_class_dmem(&class_fppdiagconfig), &fppconfig, sizeof(struct fppdiag_config));
+ }
+ else {
+ pe_dmem_memcpy_to32(pe_id[pe_index], virt_to_util_dmem(&util_fppdiagconfig), &fppconfig, sizeof(struct fppdiag_config));
+ }
+ pe_start(&pfe->ctrl, pe_id[pe_index]);
+
+ printk(KERN_INFO "PFE diags enabled for %sdiagctl = 0x%08x, rng_base_addr = 0x%08x\n",
+ pe_names[pe_index],
+ (u32) fppdiag_driver_data->fpp_config.fppdiagctl,
+ (u32) fppdiag_driver_data->fpp_config.rng_baseaddr);
+
+
+ return 0;
+
+err_ring:
+ /* Memory allocation failed above */
+ for (j = 0; j < i; j++) {
+ dma_unmap_single(pfe->dev, be32_to_cpu(fppdiag_driver_data->pfe_rng_baseaddr[j]), PAGE_SIZE, DMA_FROM_DEVICE);
+ free_page((u32) fppdiag_driver_data->virt_rng_baseaddr[j]);
+ }
+
+ pfe_kfree(fppdiag_driver_data->virt_rng_baseaddr);
+ fppdiag_driver_data->virt_rng_baseaddr = NULL;
+err_virt:
+ pfe_kfree(fppdiag_driver_data->pfe_rng_baseaddr);
+ fppdiag_driver_data->pfe_rng_baseaddr = NULL;
+ fppdiag_driver_data->fpp_config.rng_baseaddr = NULL;
+
+
+ return -1; /* Failure */
+}
+
+
+/** Enables FPP diagnostics for all PEs.
+ *
+ */
+static void pfediag_enable_all(int ring_size)
+{
+ int i;
+
+ for (i=0; i< NUM_PE_DIAGS; i++) {
+ pfediag_enable(i, ring_size);
+ }
+}
+
+
+/* This function disables the FPP diagnostics for a single PE and frees the memory allocated. */
+static int pfediag_disable(int pe_index)
+{
+ struct fppdiag_drv_dat *fppdiag_driver_data = &pfe->diags.fppdiag_drv_data[pe_index];
+ int i;
+ struct fppdiag_config fppconfig;
+
+ /* if fppdiag has already been disabled, we wouldn't want to disable it again,
+ to mark this we will check the validity of the rng array */
+ if(!fppdiag_driver_data->fpp_config.rng_baseaddr)
+ return 0;
+
+#if 0
+ if(comcerto_fpp_send_command(CMD_FPPDIAG_DISABLE, 0, NULL,
+ &rlen, (unsigned short *)rmsg))
+ return -1;
+#endif
+
+ fppconfig.diag_ctl_flag &= cpu_to_be32(~FPPDIAG_CTL_ENABLE);
+ fppconfig.rng_baseaddr = NULL;
+ fppconfig.rng_size = cpu_to_be32(DEFAULT_RING_SIZE);
+
+ // Copy fpp_config to PE DMEM
+ pe_sync_stop(&pfe->ctrl, pe_id[pe_index]);
+ if (pe_id[pe_index] != UTIL_ID) {
+ pe_dmem_memcpy_to32(pe_id[pe_index], virt_to_class_dmem(&class_fppdiagconfig), &fppconfig, sizeof(struct fppdiag_config));
+ }
+ else {
+ pe_dmem_memcpy_to32(pe_id[pe_index], virt_to_class_dmem(&util_fppdiagconfig), &fppconfig, sizeof(struct fppdiag_config));
+ }
+ pe_start(&pfe->ctrl, pe_id[pe_index]);
+
+ writel(0, &fppdiag_driver_data->virt_fppdiagctl->read_index);
+ writel(0, &fppdiag_driver_data->virt_fppdiagctl->write_index);
+
+ for( i = 0; i < fppdiag_driver_data->fpp_config.rng_size; i++)
+ {
+ if ( fppdiag_driver_data->pfe_rng_baseaddr[i])
+ {
+ /* Invalidate the cache */
+ dma_unmap_single(pfe->dev, be32_to_cpu(fppdiag_driver_data->pfe_rng_baseaddr[i]), PAGE_SIZE, DMA_FROM_DEVICE);
+ free_page((u32) fppdiag_driver_data->virt_rng_baseaddr[i]);
+ }
+ }
+ dma_unmap_single(pfe->dev, (dma_addr_t) fppdiag_driver_data->fpp_config.rng_baseaddr,
+ (sizeof(void **) * fppdiag_driver_data->fpp_config.rng_size), DMA_TO_DEVICE);
+ pfe_kfree(fppdiag_driver_data->pfe_rng_baseaddr);
+ fppdiag_driver_data->pfe_rng_baseaddr = NULL;
+ fppdiag_driver_data->fpp_config.rng_baseaddr = NULL;
+
+ pfe_kfree(fppdiag_driver_data->virt_rng_baseaddr);
+ fppdiag_driver_data->virt_rng_baseaddr = NULL;
+
+ fppdiag_driver_data->fpp_config.rng_size = DEFAULT_RING_SIZE;
+ fppdiag_driver_data->fpp_config.diag_ctl_flag &= ~FPPDIAG_CTL_ENABLE;
+
+ printk(KERN_INFO "PFE diagnostics for %s disabled.\n", pe_names[pe_index]);
+ return 0;
+}
+
+/** Disables FPP diagnostics for all PEs.
+ *
+ */
+static void pfediag_disable_all(void)
+{
+ int i;
+
+ for (i=0; i< NUM_PE_DIAGS; i++)
+ pfediag_disable(i);
+}
+
+#if 0 // TODO update code to work with PFE.
+/* This function checks for available entries written into the common buffers */
+static int fppdiag_get_avail_entries(int* fppdiag_read_index)
+{
+ int read_index, write_index;
+ fppdiag_ctl_t *fppdiagctl= (fppdiag_ctl_t *)(ARAM_DIAG_CTL_ADDR);
+
+ write_index = readl( &fppdiagctl->write_index);
+ read_index = readl( &fppdiagctl->read_index);
+ *fppdiag_read_index = read_index;
+ if(write_index < read_index )
+ {
+ return (fppdiag_drv_data->fpp_config.rng_size * FPPDIAG_ENTRIES_PER_PAGE)
+ - (read_index - write_index);
+ }
+ else
+ return (write_index - read_index);
+}
+
+static int fppdiag_update_fpp(struct fppdiag_config * fpp_config)
+{
+ short rlen = 1;
+ unsigned char rmsg[2] = {};
+ struct fppdiag_config msg = {} ;
+ msg.diag_mod_flag = fpp_config->diag_mod_flag;
+ msg.diag_log_flag = fpp_config->diag_log_flag;
+
+ if(comcerto_fpp_send_command(CMD_FPPDIAG_UPDATE, sizeof(msg),
+ (unsigned short *)&msg,
+ &rlen, rmsg))
+ return -1;
+
+ return 0;
+
+}
+
+static int fppdiag_update_diagmodule(pFPPDIAGCMD pFppCmd)
+{
+ if (pFppCmd->flags_enable)
+ fppdiag_drv_data->fpp_config.diag_mod_flag |= pFppCmd->flags.module;
+ else
+ fppdiag_drv_data->fpp_config.diag_mod_flag &= ~(pFppCmd->flags.module);
+ fppdiag_update_fpp(&fppdiag_drv_data->fpp_config);
+ return 0;
+}
+
+static int fppdiag_update_diaglog(pFPPDIAGCMD pFppCmd)
+{
+ if (pFppCmd->flags_enable)
+ fppdiag_drv_data->fpp_config.diag_log_flag |= pFppCmd->flags.log;
+ else
+ fppdiag_drv_data->fpp_config.diag_log_flag &= ~(pFppCmd->flags.log);
+ fppdiag_update_fpp(&fppdiag_drv_data->fpp_config);
+ return 0;
+}
+
+/* This function gets the diag information to present it to the user */
+static int fppdiag_get_diaginfo(pFPPDIAGINFO p_fppdiag_info)
+{
+ p_fppdiag_info->state = fppdiag_drv_data->fpp_config.diag_ctl_flag;
+ p_fppdiag_info->module = fppdiag_drv_data->fpp_config.diag_mod_flag;
+ p_fppdiag_info->log = fppdiag_drv_data->fpp_config.diag_log_flag;
+
+ return 0;
+}
+
+
+
+/* This function updates the read index once the information is updated from
+ * the user */
+static int fppdiag_update_read_index(pFPPDIAGDATA pFppData)
+{
+ fppdiag_ctl_t *fppdiagctl= (fppdiag_ctl_t *)(ARAM_DIAG_CTL_ADDR);
+ int read_index = readl( &fppdiagctl->read_index);
+ int diag_total_entries = fppdiag_drv_data->fpp_config.rng_size * FPPDIAG_ENTRIES_PER_PAGE;
+ unsigned long diag_data_addr;
+ unsigned int diag_page_addr;
+ int i = 0;
+
+ if (!fppdiag_drv_data->fpp_config.rng_baseaddr)
+ {
+ printk(KERN_INFO "ring base address is NULL\n");
+ return 0;
+ }
+
+ // TODO: This should be possible in a more efficient manner.
+ /* Invalidate the cache here for the entries read
+ * (old read_index to new read_index */
+
+ for (i = 0; i < pFppData->entries_to_read; i++)
+ {
+ diag_page_addr = (void *)phys_to_virt(be32_to_cpu(
+ fppdiag_drv_data->fpp_config.rng_baseaddr[read_index/FPPDIAG_ENTRIES_PER_PAGE]));
+ diag_data_addr = diag_page_addr +
+ ((read_index & FPPDIAG_ENTRY_MASK) << FPPDIAG_ENTRY_SHIFT );
+ dma_sync_single_for_cpu(pfe->dev, diag_data_addr, FPPDIAG_ENTRY_SIZE, DMA_FROM_DEVICE);
+
+ read_index++;
+ if (read_index == diag_total_entries)
+ read_index = 0;
+ }
+
+
+ writel(pFppData->read_index, &fppdiagctl->read_index);
+
+ return 0;
+}
+
+static unsigned int fppdiag_poll(struct file *filp, poll_table *wait)
+{
+ int read_index = 0;
+ int avail_entries = fppdiag_get_avail_entries(&read_index);
+
+ avail_entries = fppdiag_get_avail_entries(&read_index);
+ if(avail_entries == 0)
+ {
+ schedule();
+ }
+ return POLLIN;
+}
+
+
+static int fppdiag_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int fppdiag_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static long fppdiag_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long param)
+{
+ int rc = 0;
+ mutex_lock(&fppdiag_drv_data->lock);
+ switch (cmd)
+ {
+ case FPPDIAG_DRV_GET_RINGSIZE:
+ {
+ FPPCONFIG fppconfig;
+ pFPPCONFIG pFppConfig = (FPPCONFIG*) param;
+ fppconfig.ring_size = fppdiag_drv_data->fpp_config.rng_size;;
+ if (copy_to_user(pFppConfig, &fppconfig ,sizeof(FPPCONFIG))) {
+ printk(KERN_INFO " FPPDIAG_CONFIG Error");
+ rc = -EINVAL;
+ break;
+ }
+ }
+ break;
+ case FPPDIAG_DRV_GETDATA:
+ {
+ int read_index = 0;
+ int avail_entries = 0;
+ FPPDIAGDATA fppdiag_start_data;
+ pFPPDIAGDATA pFppDiagData = (FPPDIAGDATA*) param;
+ avail_entries = fppdiag_get_avail_entries(&read_index);
+ if (avail_entries > FPPDIAG_MAX_AVBL_ENTRIES)
+ avail_entries = FPPDIAG_MAX_AVBL_ENTRIES;
+
+ fppdiag_start_data.read_index = read_index;
+ fppdiag_start_data.entries_to_read = avail_entries;
+
+ if (copy_to_user(pFppDiagData, &fppdiag_start_data,
+ sizeof(FPPDIAGDATA))) {
+ printk(KERN_INFO " FPPDIAG_GETDATA:Error");
+ rc = -EINVAL;
+ break;
+ }
+ }
+ break;
+ case FPPDIAG_DRV_FINDATA:
+ {
+ FPPDIAGDATA fppdiag_fin_data;
+ pFPPDIAGDATA pFppDiagData = (FPPDIAGDATA*) param;
+
+ if (copy_from_user(&fppdiag_fin_data,pFppDiagData,sizeof(FPPDIAGDATA))) {
+ printk(KERN_INFO " FPPDIAG_FINDATA:Error");
+ rc = -EINVAL;
+ break;
+ }
+ fppdiag_update_read_index(&fppdiag_fin_data);
+
+ }
+ break;
+ case FPPDIAG_DRV_SET_STATE:
+ {
+
+ FPPDIAGCMD fppdiag_state_cmd;
+ pFPPDIAGCMD pFppDiagCmd = (FPPDIAGCMD*) param;
+
+ if (copy_from_user(&fppdiag_state_cmd,pFppDiagCmd,
+ sizeof(FPPDIAGCMD))) {
+ printk(KERN_INFO " FPPDIAG_STATE:Error");
+ rc = -EINVAL;
+ break;
+ }
+ pfediag_disable();
+ if(fppdiag_state_cmd.flags.state_size & 0xFF)
+ {
+ /* state_size holds MS 2 bytes of the ring buffer size and the LS 1 byte
+ holds enable/disable flag. */
+ if(pfediag_enable(&fppdiag_drv_data[0] /* FIXME */, fppdiag_state_cmd.flags.state_size >> 8 ) < 0)
+ {
+ printk(KERN_INFO " FPPDIAG_DRV_SET_STATE Error");
+ rc = -EINVAL;
+ break;
+ }
+ }
+ //fppdiag_update_diagstate(&fppdiag_state_cmd);
+ }
+ break;
+ case FPPDIAG_DRV_SET_MODULE:
+ {
+
+ FPPDIAGCMD fppdiag_state_cmd;
+ pFPPDIAGCMD pFppDiagCmd = (FPPDIAGCMD*) param;
+
+ if (copy_from_user(&fppdiag_state_cmd,pFppDiagCmd,
+ sizeof(FPPDIAGCMD))) {
+ printk(KERN_INFO " FPPDIAG_MODULE:Error");
+ rc = -EINVAL;
+ break;
+ }
+ fppdiag_update_diagmodule(&fppdiag_state_cmd);
+ }
+ break;
+ case FPPDIAG_DRV_SET_LOG:
+ {
+ FPPDIAGCMD fppdiag_state_cmd;
+ pFPPDIAGCMD pFppDiagCmd = (FPPDIAGCMD*) param;
+
+ if (copy_from_user(&fppdiag_state_cmd,pFppDiagCmd,
+ sizeof(FPPDIAGCMD))) {
+ printk(KERN_INFO " FPPDIAG_LOG:Error");
+ rc = -EINVAL;
+ break;
+ }
+ fppdiag_update_diaglog(&fppdiag_state_cmd);
+ }
+ break;
+ case FPPDIAG_DRV_GET_INFO:
+ {
+ FPPDIAGINFO fppdiag_info;
+ pFPPDIAGINFO p_fppdiag_info = (pFPPDIAGINFO) param;
+
+ fppdiag_get_diaginfo(&fppdiag_info);
+
+ if (copy_to_user(p_fppdiag_info, &fppdiag_info ,
+ sizeof(FPPDIAGINFO))) {
+ printk(KERN_INFO " FPPDIAG_DRV_GET_INFO Error");
+ rc = -EINVAL;
+ break;
+ }
+ }
+ break;
+ case FPPDIAG_DRV_DUMP_COUNTERS:
+ {
+ short rlen = 1;
+ unsigned char rmsg[2] = {};
+ struct fppdiag_config msg = {} ;
+ if(comcerto_fpp_send_command(CMD_FPPDIAG_DUMP_CTRS, sizeof(msg),
+ (unsigned short *)&msg,
+ &rlen, rmsg))
+ return -1;
+
+ }
+ break;
+ default:
+ {
+ printk(KERN_INFO "fppdiag_ioctl: cmd : %d is not implemented", cmd);
+ rc = -EINVAL;
+ break;
+ }
+ }
+ mutex_unlock(&fppdiag_drv_data->lock);
+ return rc;
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)
+
+
+static int fppdiag_vma_fault(struct vm_area_struct *vma , struct vm_fault *vmf)
+{
+ struct page *pageptr = NULL;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long physaddr = (unsigned long)vmf->virtual_address - vma->vm_start +
+ offset;
+ unsigned long pageframe = physaddr >> PAGE_SHIFT;
+ void* page_ptr = NULL;
+
+ //printk (KERN_DEBUG "diag_vm_nopage:start: %lx, end: %lx, address: %lx offset: %lx pageframe : %x\n", vma->vm_start, vma->vm_end, (unsigned long)vmf->virtual_address, offset, pageframe);
+
+ page_ptr = (void*)phys_to_virt(be32_to_cpu(fppdiag_drv_data->fpp_config.rng_baseaddr[pageframe]));
+ pageptr = virt_to_page(page_ptr);
+ get_page(pageptr);
+ vmf->page = pageptr;
+ if(pageptr)
+ return 0;
+ return VM_FAULT_SIGBUS;
+}
+
+#else
+
+static struct page *fppdiag_vma_nopage (struct vm_area_struct *vma,
+ unsigned long address, int *type)
+{
+ struct page *pageptr = NOPAGE_SIGBUS;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long physaddr = address - vma->vm_start + offset;
+ unsigned long pageframe = physaddr >> PAGE_SHIFT;
+ void* page_ptr = NULL;
+
+ //printk (KERN_DEBUG "diag_vm_nopage:start: %lx, end: %lx, address: %lx offset: %lx pageframe : %x\n", vma->vm_start, vma->vm_end, address,offset,pageframe);
+
+ page_ptr = (void*)phys_to_virt(be32_to_cpu(fppdiag_drv_data->fpp_config.rng_baseaddr[pageframe]));
+ pageptr = virt_to_page(page_ptr);
+ get_page(pageptr);
+ if (type)
+ *type = VM_FAULT_MINOR;
+ return pageptr;
+}
+
+#endif
+
+void fppdiag_vma_open(struct vm_area_struct *vma)
+{
+ printk(KERN_NOTICE "Simple VMA open, virt %lx, phys %lx\n",
+ vma->vm_start, vma->vm_pgoff << PAGE_SHIFT);
+}
+
+void fppdiag_vma_close(struct vm_area_struct *vma)
+{
+ printk(KERN_NOTICE "Simple VMA close.\n");
+}
+
+
+static struct vm_operations_struct fppdiag_vm_ops = {
+ .open = fppdiag_vma_open,
+ .close = fppdiag_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)
+ .fault = fppdiag_vma_fault,
+#else
+ .nopage = fppdiag_vma_nopage,
+#endif
+};
+
+
+
+static int fppdiag_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+
+
+ printk (KERN_DEBUG "start: %lx, end: %lx off: %lx, vsize: %lx, \n",
+ vma->vm_start, vma->vm_end, offset, vsize);
+
+ if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC))
+ {
+ printk(KERN_INFO "High Memory \n");
+ vma->vm_flags |= VM_IO;
+ }
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &fppdiag_vm_ops;
+ fppdiag_vma_open(vma);
+
+ return 0;
+}
+
+
+struct file_operations fppdiag_fops = {
+ .owner = THIS_MODULE,
+ .open = fppdiag_open,
+ .release = fppdiag_release,
+ .unlocked_ioctl = fppdiag_ioctl,
+ .mmap = fppdiag_mmap,
+ .poll = fppdiag_poll,
+};
+#endif // #if 0
+
+
+void pfe_diags_print(fppdiag_entry_t *entry, unsigned int pe_num)
+{
+ FPPDIAG_ARG_TYPE *args = (FPPDIAG_ARG_TYPE *)(entry->buffer + FPPDIAG_BUFFER_SIZE-FPPDIAG_MAX_ARGS*sizeof(FPPDIAG_ARG_TYPE));
+ struct pfe_diags_info * pfe_diags_info;
+ unsigned long str_offset = be32_to_cpu(*(u32 *)entry->buffer);
+ char string_buffer[256];
+
+ if (pe_id[pe_num] == UTIL_ID)
+ pfe_diags_info = &pfe->diags.util_diags_info;
+ else
+ pfe_diags_info = &pfe->diags.class_diags_info;
+
+ if (!pfe_diags_info) {
+ printk(KERN_WARNING "PFE diags: print message was received but diags strings could not be extracted from firmware, skipping.\n");
+ return;
+ }
+
+ str_offset -= pfe_diags_info->diags_str_base;
+ if (str_offset >= pfe_diags_info->diags_str_size) {
+ printk(KERN_WARNING "PFE diags: string offset passed by PFE %s is out of bounds: %ld", pe_names[pe_num], str_offset);
+ return;
+ }
+
+ snprintf(string_buffer, 256, "%s %s: %s", KERN_INFO, pe_names[pe_num], (char *)(pfe_diags_info->diags_str_array+str_offset));
+ printk(string_buffer, ntohl(args[0]), ntohl(args[1]), ntohl(args[2]), ntohl(args[3]));
+}
+
+void pfe_diags_dump(fppdiag_entry_t *entry, char *pe_name)
+{
+ unsigned int i;
+
+ for (i=0; i < FPPDIAG_BUFFER_SIZE; i++)
+ {
+ if ((i & 0x7) == 0)
+ printk(KERN_INFO "\n%s: 0x%02x ", pe_name, i);
+ printk(KERN_INFO "%02x ", entry->buffer[i]);
+ }
+ printk(KERN_INFO " \n");
+}
+
+void pfe_diags_exception_dump(fppdiag_entry_t *entry, char *pe_name)
+{
+ u32 *registers = (u32 *) entry->buffer;
+
+ printk(KERN_INFO "%s: Exception: EPC: %8x ECAS: %8x EID: %8x ED: %8x\n%s: r0/sp: %8x r1/ra: %8x r2/fp: %8x r3: %8x\n%s: r4: %8x r5: %8x r6: %8x r7: %8x\n%s: r8: %8x r9: %8x r10: %8x\n",
+ pe_name, ntohl(registers[0]), ntohl(registers[1]), ntohl(registers[2]), ntohl(registers[3]),
+ pe_name, ntohl(registers[4]), ntohl(registers[5]), ntohl(registers[6]), ntohl(registers[7]),
+ pe_name, ntohl(registers[8]), ntohl(registers[9]), ntohl(registers[10]), ntohl(registers[11]),
+ pe_name, ntohl(registers[12]), ntohl(registers[13]), ntohl(registers[14]) );
+}
+
+fppdiag_entry_t * pfe_diags_get_entry(unsigned int pe_num, unsigned int *pread_index)
+{
+ unsigned int read_index, page_index, previous_page_index, total_size;
+ fppdiag_ctl_t *fppdiagctl = pfe->diags.fppdiag_drv_data[pe_num].virt_fppdiagctl;
+ void *pageaddr;
+
+ total_size = pfe->diags.fppdiag_drv_data[pe_num].fpp_config.rng_size * FPPDIAG_ENTRIES_PER_PAGE;
+
+ read_index = be32_to_cpu(fppdiagctl->read_index);
+ previous_page_index = read_index/FPPDIAG_ENTRIES_PER_PAGE;
+ read_index++;
+ if (read_index == total_size)
+ read_index = 0;
+ page_index = read_index/FPPDIAG_ENTRIES_PER_PAGE;
+
+ if (read_index % FPPDIAG_ENTRIES_PER_PAGE == 0)
+ dma_sync_single_for_cpu(pfe->dev, pfe->diags.fppdiag_drv_data[pe_num].pfe_rng_baseaddr[previous_page_index], PAGE_SIZE, DMA_FROM_DEVICE);
+
+ pageaddr = pfe->diags.fppdiag_drv_data[pe_num].virt_rng_baseaddr[page_index];
+ *pread_index = read_index;
+ return (fppdiag_entry_t *) (pageaddr+(read_index % FPPDIAG_ENTRIES_PER_PAGE)*FPPDIAG_ENTRY_SIZE);
+}
+
+unsigned int pfe_diags_show_current(unsigned int pe_num)
+{
+ fppdiag_entry_t *entry;
+ unsigned int read_index;
+
+
+ entry = pfe_diags_get_entry(pe_num, &read_index);
+
+ switch (entry->flags)
+ {
+ case FPPDIAG_EXPT_ENTRY:
+ pfe_diags_exception_dump(entry, pe_names[pe_num]);
+ //pfe_diags_dump(entry, pe_names[pe_num]);
+ break;
+ case FPPDIAG_DUMP_ENTRY:
+ pfe_diags_dump(entry, pe_names[pe_num]);
+ break;
+ default:
+ pfe_diags_print(entry, pe_num);
+ break;
+ }
+
+ return read_index;
+}
+
+unsigned int fppdiag_show_one(unsigned int pe_start, unsigned int pe_end)
+{
+ fppdiag_ctl_t *fppdiagctl;
+ int pe_num = pe_start;
+
+ fppdiagctl = pfe->diags.fppdiag_drv_data[pe_num].virt_fppdiagctl;
+
+ while (be32_to_cpu(fppdiagctl->read_index) == be32_to_cpu(fppdiagctl->write_index))
+ {
+ pe_num++;
+ if (pe_num > pe_end)
+ pe_num = pe_start;
+ fppdiagctl = pfe->diags.fppdiag_drv_data[pe_num].virt_fppdiagctl;
+ }
+
+ fppdiagctl->read_index = cpu_to_be32(pfe_diags_show_current(pe_num));
+
+ return 0;
+}
+
+
+void pfe_diags_loop(unsigned long arg)
+{
+ int pe_num, count;
+ fppdiag_ctl_t *fppdiagctl;
+
+ for (pe_num=0; pe_num < NUM_PE_DIAGS; pe_num++) {
+ fppdiagctl = pfe->diags.fppdiag_drv_data[pe_num].virt_fppdiagctl;
+ count = 40;
+ while (count && (be32_to_cpu(fppdiagctl->read_index) != be32_to_cpu(fppdiagctl->write_index)))
+ {
+ fppdiagctl->read_index = cpu_to_be32(pfe_diags_show_current(pe_num));
+ count--;
+ }
+ }
+ add_timer(&pfe->diags.pfe_diags_timer);
+}
+
+
+
+int pfe_diags_init(struct pfe *pfe)
+{
+ int i;
+ int ret = 0;
+ fppdiag_ctl_t *phys_fppdiagctl, *virt_fppdiagctl;
+ struct fppdiag_drv_dat *fppdiag_drv_data;
+
+ printk(KERN_INFO "\n Fppdiag Driver initializing.\n");
+
+ fppdiag_drv_data = pfe_kzalloc(NUM_PE_DIAGS*sizeof(struct fppdiag_drv_dat), GFP_KERNEL);
+ if(!fppdiag_drv_data)
+ {
+ ret = -ENOMEM;
+ goto err0;
+ }
+ pfe->diags.fppdiag_drv_data = fppdiag_drv_data;
+
+ //fppdiagctl= (fppdiag_ctl_t *)(ARAM_DIAG_CTL_ADDR);
+ //ctl_phys_address = FPPDIAG_CTL_BASE_ADDR;
+ //fppdiagctl = pfe_kmalloc(NUM_PE_DIAGS*sizeof(fppdiag_ctl_t), GFP_KERNEL);
+ virt_fppdiagctl = dma_alloc_coherent(pfe->dev, NUM_PE_DIAGS*sizeof(fppdiag_ctl_t), (dma_addr_t *) &phys_fppdiagctl, GFP_KERNEL);
+ printk(KERN_INFO "PFE diags ctrl_phys_address = 0x%08x\n", (u32) phys_fppdiagctl);
+ if(!virt_fppdiagctl)
+ {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+#if 0 // TODO Update code to work with PFE
+ if(register_chrdev(FPPDIAG_MAJOR_NUMBER, FPPDIAG_DRIVER_NAME, &fppdiag_fops) < 0){
+ ret = -1;
+ printk(KERN_INFO "Diag register chrdev failed ");
+ goto err1;
+ }
+ else {
+ printk(KERN_INFO "DIAG driver allocated " );
+ }
+#endif
+
+ /* Initialize control structures */
+ for (i=0; i<NUM_PE_DIAGS; i++) {
+ fppdiag_drv_data[i].fpp_config.rng_baseaddr = NULL;
+ fppdiag_drv_data[i].fpp_config.diag_ctl_flag = FPPDIAG_CTL_FREERUN;
+ fppdiag_drv_data[i].fpp_config.diag_log_flag = 0xff;
+ fppdiag_drv_data[i].fpp_config.diag_mod_flag = 0xff;
+ fppdiag_drv_data[i].fpp_config.rng_size = DEFAULT_RING_SIZE;
+ fppdiag_drv_data[i].fpp_config.fppdiagctl = &phys_fppdiagctl[i];
+ fppdiag_drv_data[i].virt_fppdiagctl = &virt_fppdiagctl[i];
+ fppdiag_drv_data[i].virt_rng_baseaddr = NULL;
+
+ mutex_init(&fppdiag_drv_data[i].lock);
+
+ /* Reset the read and write index */
+ writel(0, &fppdiag_drv_data[i].virt_fppdiagctl->read_index);
+ writel(0, &fppdiag_drv_data[i].virt_fppdiagctl->write_index);
+ }
+
+ /* Until full diags control path is ready, enable diags now, and poll and dump diags from kernel-space */
+ pfediag_enable_all(8);
+
+ //Start timer to dump diags periodically
+ init_timer(&pfe->diags.pfe_diags_timer);
+ pfe->diags.pfe_diags_timer.function = pfe_diags_loop;
+ pfe->diags.pfe_diags_timer.expires = jiffies + 2;
+ add_timer(&pfe->diags.pfe_diags_timer);
+ //timer_init(&pfe->diags.pfe_diags_timer, pfe_diags_loop);
+
+
+ return 0;
+//err1:
+ pfe_kfree(fppdiag_drv_data);
+err0:
+ return ret;
+
+}
+
+void pfe_diags_exit(struct pfe *pfe)
+{
+ del_timer(&pfe->diags.pfe_diags_timer);
+
+ /* Disable the memory allocated for the modules */
+ pfediag_disable_all();
+
+#if 0
+ unregister_chrdev(FPPDIAG_MAJOR_NUMBER,FPPDIAG_DRIVER_NAME);
+#endif
+
+ /* Release the data initialized for this module */
+ pfe_kfree(pfe->diags.fppdiag_drv_data);
+}
+
+#else
+#include "pfe_mod.h"
+int pfe_diags_init(struct pfe *pfe)
+{
+ return 0;
+}
+
+void pfe_diags_exit(struct pfe *pfe)
+{
+}
+#endif
diff --git a/pfe_ctrl/pfe_diags.h b/pfe_ctrl/pfe_diags.h
new file mode 100644
index 0000000..7c02b87
--- /dev/null
+++ b/pfe_ctrl/pfe_diags.h
@@ -0,0 +1,114 @@
+/*
+ * diagdrv.h
+ *
+ * Copyright (C) 2004,2005 Mindspeed Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _PFE_DIAGS_H_
+#define _PFE_DIAGS_H_
+
+#ifdef FPP_DIAGNOSTICS
+#include "fppdiag_lib.h"
+
+#define NUM_PE_DIAGS 7 /**< No diags for the TMU PEs */
+
+
+#define DEFAULT_RING_SIZE 32
+#define MAX_RING_SIZE 256
+
+#define FPPDIAG_MAX_AVBL_ENTRIES 100
+
+#define CMD_FPPDIAG_ENABLE 0x1201
+#define CMD_FPPDIAG_DISABLE 0x1202
+#define CMD_FPPDIAG_UPDATE 0x1203
+#define CMD_FPPDIAG_DUMP_CTRS 0x1204
+
+
+typedef struct fppdiag_config_host
+{
+ int ring_size;
+}FPPCONFIG , *pFPPCONFIG;
+
+typedef struct fppdiag_data
+{
+ int read_index;
+ int entries_to_read;
+}FPPDIAGDATA, *pFPPDIAGDATA;
+
+typedef struct fppdiag_cmd
+{
+ union{
+ unsigned short log;
+ unsigned int module;
+ unsigned int state_size;
+ }flags;
+ unsigned int flags_enable;
+}FPPDIAGCMD , *pFPPDIAGCMD;
+
+typedef struct fppdiag_drv_info
+{
+ unsigned char state;
+ unsigned short log;
+ unsigned int module;
+}FPPDIAGINFO ,*pFPPDIAGINFO;
+
+struct pfe_diags_info {
+ unsigned long diags_str_base;
+ unsigned long diags_str_size;
+ char * diags_str_array;
+};
+
+
+#define FPPDIAG_MAJOR_NUMBER 200
+#define FPPDIAG_DRIVER_NAME "/dev/fppdiag/"
+
+
+#define FPPDIAG_DRV_GET_RINGSIZE _IOR(FPPDIAG_MAJOR_NUMBER,1,FPPCONFIG)
+#define FPPDIAG_DRV_GETDATA _IOR(FPPDIAG_MAJOR_NUMBER,2,FPPDIAGDATA)
+#define FPPDIAG_DRV_FINDATA _IOW(FPPDIAG_MAJOR_NUMBER,3,FPPDIAGDATA)
+#define FPPDIAG_DRV_SET_STATE _IOW(FPPDIAG_MAJOR_NUMBER,4,FPPDIAGCMD)
+#define FPPDIAG_DRV_SET_MODULE _IOW(FPPDIAG_MAJOR_NUMBER,5,FPPDIAGCMD)
+#define FPPDIAG_DRV_SET_LOG _IOW(FPPDIAG_MAJOR_NUMBER,6,FPPDIAGCMD)
+#define FPPDIAG_DRV_GET_INFO _IOR(FPPDIAG_MAJOR_NUMBER,7,FPPDIAGINFO)
+#define FPPDIAG_DRV_DUMP_COUNTERS _IOR(FPPDIAG_MAJOR_NUMBER,8,FPPDIAGINFO)
+
+
+
+struct fppdiag_drv_dat
+{
+ struct fppdiag_config fpp_config;
+ struct mutex lock;
+ fppdiag_ctl_t *virt_fppdiagctl; /**< FPP diags control structure (points to same area as fpp_config.fppdiagctl, but with kernel space address) */
+ void **virt_rng_baseaddr; /**< Pointer to an array of page buffer addresses (same as in fpp_config but pointer AND content are kernel space addresses) */
+ dma_addr_t *pfe_rng_baseaddr; /**< Pointer to fpp_config.rng_baseaddr array (but with kernel space address) */
+};
+
+struct pfe_diags
+{
+ struct timer_list pfe_diags_timer;
+ struct fppdiag_drv_dat *fppdiag_drv_data;
+ struct pfe_diags_info class_diags_info;
+ struct pfe_diags_info util_diags_info;
+};
+
+extern int comcerto_fpp_send_command(u16 fcode, u16 length, u16 *payload, u16 *resp_length, u16 *resp_payload);
+
+#endif
+
+int pfe_diags_init(struct pfe *pfe);
+void pfe_diags_exit(struct pfe *pfe);
+#endif /* _PFE_DIAGS_H_ */
diff --git a/pfe_ctrl/pfe_emulation.c b/pfe_ctrl/pfe_emulation.c
new file mode 100644
index 0000000..2203cde
--- /dev/null
+++ b/pfe_ctrl/pfe_emulation.c
@@ -0,0 +1,160 @@
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "pfe_mod.h"
+
+static u64 dma_mask = 0xffffffff;
+
+/**
+ * pfe_emulation_probe -
+ *
+ *
+ */
+static int pfe_emulation_probe(struct platform_device *pdev)
+{
+ int rc;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ pfe = kzalloc(sizeof(struct pfe), GFP_KERNEL);
+ if (!pfe) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ platform_set_drvdata(pdev, pfe);
+
+ pfe->ddr_phys_baseaddr = 0x00020000; /* should match the value used on the BSP */
+ pfe->ddr_size = 12 * SZ_1M; /* should match the value used on the BSP */
+ pfe->ddr_baseaddr = vmalloc(pfe->ddr_size);
+ if (!pfe->ddr_baseaddr) {
+ printk(KERN_INFO "vmalloc() ddr failed\n");
+ rc = -ENOMEM;
+ goto err_ddr;
+ }
+
+ pfe->cbus_baseaddr = vmalloc(16 * SZ_1M);
+ if (!pfe->cbus_baseaddr) {
+ printk(KERN_INFO "vmalloc() cbus failed\n");
+ rc = -ENOMEM;
+ goto err_cbus;
+ }
+
+ pfe->apb_baseaddr = vmalloc(16 * SZ_1M);
+ if (!pfe->cbus_baseaddr) {
+ printk(KERN_INFO "vmalloc() apb failed\n");
+ rc = -ENOMEM;
+ goto err_apb;
+ }
+
+ pfe->iram_baseaddr = vmalloc(128 * SZ_1K);
+ if (!pfe->iram_baseaddr) {
+ printk(KERN_INFO "vmalloc() iram failed\n");
+ rc = -ENOMEM;
+ goto err_iram;
+ }
+
+ pfe->hif_irq = 0;
+ pfe->dev = &pdev->dev;
+ pfe->dev->dma_mask = &dma_mask;
+
+ pfe->ctrl.sys_clk = 250000;
+ rc = pfe_probe(pfe);
+ if (rc < 0)
+ goto err_probe;
+
+ return 0;
+
+err_probe:
+ vfree(pfe->iram_baseaddr);
+
+err_iram:
+ vfree(pfe->apb_baseaddr);
+
+err_apb:
+ vfree(pfe->cbus_baseaddr);
+
+err_cbus:
+ vfree(pfe->ddr_baseaddr);
+
+err_ddr:
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(pfe);
+
+err_alloc:
+ return rc;
+}
+
+
+/**
+ * pfe_emulation_remove -
+ *
+ *
+ */
+static int pfe_emulation_remove(struct platform_device *pdev)
+{
+ struct pfe *pfe = platform_get_drvdata(pdev);
+ int rc;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ rc = pfe_remove(pfe);
+
+ vfree(pfe->iram_baseaddr);
+ vfree(pfe->apb_baseaddr);
+ vfree(pfe->cbus_baseaddr);
+ vfree(pfe->ddr_baseaddr);
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(pfe);
+
+ return rc;
+}
+
+
+static struct platform_driver pfe_platform_driver = {
+ .probe = pfe_emulation_probe,
+ .remove = pfe_emulation_remove,
+ .driver = {
+ .name = "pfe",
+ },
+};
+
+
+static void pfe_device_release(struct device *dev)
+{
+
+}
+
+
+static struct platform_device pfe_platform_device = {
+ .name = "pfe",
+ .id = 0,
+ .dev.release = pfe_device_release,
+};
+
+static int __init pfe_module_init(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+
+ platform_device_register(&pfe_platform_device);
+
+ return platform_driver_register(&pfe_platform_driver);
+}
+
+
+static void __exit pfe_module_exit(void)
+{
+ platform_driver_unregister(&pfe_platform_driver);
+
+ platform_device_unregister(&pfe_platform_device);
+
+ printk(KERN_INFO "%s\n", __func__);
+}
+
+MODULE_LICENSE("GPL");
+module_init(pfe_module_init);
+module_exit(pfe_module_exit);
diff --git a/pfe_ctrl/pfe_eth.c b/pfe_ctrl/pfe_eth.c
new file mode 100644
index 0000000..d6681ac
--- /dev/null
+++ b/pfe_ctrl/pfe_eth.c
@@ -0,0 +1,2727 @@
+
+/** @pfe_eth.c.
+ * Ethernet driver for to handle exception path for PFE.
+ * - uses HIF functions to send/receive packets.
+ * - uses ctrl function to start/stop interfaces.
+ * - uses direct register accesses to control phy operation.
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+
+#include <net/ip.h>
+#include <net/sock.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/delay.h>
+
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+#include <net/xfrm.h>
+#endif
+
+#include "pfe_mod.h"
+#include "pfe_eth.h"
+
+const char comcerto_eth_driver_version[]="1.0";
+static void *cbus_emac_base[3];
+static void *cbus_gpi_base[3];
+
+static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
+static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv, int force);
+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int txQ_num, int from_tx, int n_desc);
+
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+extern struct xfrm_state *xfrm_state_lookup_byhandle(struct net *net, u16 handle);
+#endif
+
+unsigned int gemac_regs[] = {
+ 0x0000, /* Network control */
+ 0x0004, /* Network configuration */
+ 0x0008, /* Network status */
+ 0x0010, /* DMA configuration */
+ 0x0014, /* Transmit status */
+ 0x0020, /* Receive status */
+ 0x0024, /* Interrupt status */
+ 0x0030, /* Interrupt mask */
+ 0x0038, /* Received pause quantum */
+ 0x003c, /* Transmit pause quantum */
+ 0x0080, /* Hash register bottom [31:0] */
+ 0x0084, /* Hash register bottom [63:32] */
+ 0x0088, /* Specific address 1 bottom [31:0] */
+ 0x008c, /* Specific address 1 top [47:32] */
+ 0x0090, /* Specific address 2 bottom [31:0] */
+ 0x0094, /* Specific address 2 top [47:32] */
+ 0x0098, /* Specific address 3 bottom [31:0] */
+ 0x009c, /* Specific address 3 top [47:32] */
+ 0x00a0, /* Specific address 4 bottom [31:0] */
+ 0x00a4, /* Specific address 4 top [47:32] */
+ 0x00a8, /* Type ID Match 1 */
+ 0x00ac, /* Type ID Match 2 */
+ 0x00b0, /* Type ID Match 3 */
+ 0x00b4, /* Type ID Match 4 */
+ 0x00b8, /* Wake Up ON LAN */
+ 0x00bc, /* IPG stretch register */
+ 0x00c0, /* Stacked VLAN Register */
+ 0x00fc, /* Module ID */
+ 0x07a0 /* EMAC Control register */
+};
+
+/********************************************************************/
+/* SYSFS INTERFACE */
+/********************************************************************/
+#if defined(CONFIG_SMP) && (NR_CPUS > 1)
+/** pfe_eth_show_rx_cpu_affinity
+ *
+ */
+static ssize_t pfe_eth_show_rx_cpu_affinity(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buf, "%d\n", priv->cpu_id);
+}
+
+/** pfe_eth_set_rx_cpu_affinity
+ *
+ */
+static ssize_t pfe_eth_set_rx_cpu_affinity(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ unsigned int cpu_id = 0;
+
+ sscanf(buf, "%d", &cpu_id);
+
+ if (cpu_id < NR_CPUS) {
+ priv->cpu_id = (int)cpu_id;
+ hif_lib_set_rx_cpu_affinity(&priv->client, priv->cpu_id);
+ }
+ else
+ printk(KERN_ERR "%s: Invalid CPU (%d)\n", __func__, cpu_id);
+
+ return count;
+}
+#endif
+
+#ifdef PFE_ETH_TSO_STATS
+/** pfe_eth_show_tso_stats
+ *
+ */
+static ssize_t pfe_eth_show_tso_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < 32; i++)
+ len += sprintf(buf + len, "TSO packets > %dKBytes = %u\n", i * 2, priv->tso.len_counters[i]);
+
+ return len;
+}
+
+/** pfe_eth_set_tso_stats
+ *
+ */
+static ssize_t pfe_eth_set_tso_stats(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+
+ memset(priv->tso.len_counters, 0, sizeof(priv->tso.len_counters));
+
+ return count;
+}
+#endif
+
+#ifdef PFE_ETH_LRO_STATS
+/*
+ * pfe_eth_show_lro_nb_stats
+ */
+static ssize_t pfe_eth_show_lro_nb_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < LRO_NB_COUNT_MAX; i++)
+ len += sprintf(buf + len, "%d fragments packets = %u\n", i, priv->lro_nb_counters[i]);
+
+ return len;
+}
+
+/*
+ * pfe_eth_set_lro_nb_stats
+ */
+static ssize_t pfe_eth_set_lro_nb_stats(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+
+ memset(priv->lro_nb_counters, 0, sizeof(priv->lro_nb_counters));
+
+ return count;
+}
+
+/*
+ * pfe_eth_show_lro_len_stats
+ */
+static ssize_t pfe_eth_show_lro_len_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < LRO_LEN_COUNT_MAX; i++)
+ len += sprintf(buf + len, "RX packets > %dKBytes = %u\n", i * 2, priv->lro_len_counters[i]);
+
+ return len;
+}
+
+/*
+ * pfe_eth_set_lro_len_stats
+ */
+static ssize_t pfe_eth_set_lro_len_stats(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+
+ memset(priv->lro_len_counters, 0, sizeof(priv->lro_len_counters));
+
+ return count;
+}
+#endif
+
+#ifdef PFE_ETH_NAPI_STATS
+/*
+ * pfe_eth_show_napi_stats
+ */
+static ssize_t pfe_eth_show_napi_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t len = 0;
+
+ len += sprintf(buf + len, "sched: %u\n", priv->napi_counters[NAPI_SCHED_COUNT]);
+ len += sprintf(buf + len, "poll: %u\n", priv->napi_counters[NAPI_POLL_COUNT]);
+ len += sprintf(buf + len, "packet: %u\n", priv->napi_counters[NAPI_PACKET_COUNT]);
+ len += sprintf(buf + len, "budget: %u\n", priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
+ len += sprintf(buf + len, "desc: %u\n", priv->napi_counters[NAPI_DESC_COUNT]);
+
+ return len;
+}
+
+/*
+ * pfe_eth_set_napi_stats
+ */
+static ssize_t pfe_eth_set_napi_stats(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+
+ memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
+
+ return count;
+}
+#endif
+#ifdef PFE_ETH_TX_STATS
+/** pfe_eth_show_tx_stats
+ *
+ */
+static ssize_t pfe_eth_show_tx_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t len = 0;
+ int i;
+
+ len += sprintf(buf + len, "TX queues stats:\n");
+
+ for (i = 0; i < EMAC_TXQ_CNT; i++) {
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, i);
+
+ len += sprintf(buf + len, "\n");
+ __netif_tx_lock_bh(tx_queue);
+
+ hif_tx_lock(&pfe->hif);
+ len += sprintf(buf + len, "Queue %2d : credits = %10d\n", i, hif_lib_tx_credit_avail(pfe, priv->id, i));
+ len += sprintf(buf + len, " tx packets = %10d\n", pfe->tmu_credit.tx_packets[priv->id][i]);
+ hif_tx_unlock(&pfe->hif);
+
+ /* Don't output additionnal stats if queue never used */
+ if (!pfe->tmu_credit.tx_packets[priv->id][i])
+ goto skip;
+
+ len += sprintf(buf + len, " clean_fail = %10d\n", priv->clean_fail[i]);
+ len += sprintf(buf + len, " stop_queue = %10d\n", priv->stop_queue_total[i]);
+ len += sprintf(buf + len, " stop_queue_hif = %10d\n", priv->stop_queue_hif[i]);
+ len += sprintf(buf + len, " stop_queue_hif_client = %10d\n", priv->stop_queue_hif_client[i]);
+ len += sprintf(buf + len, " stop_queue_credit = %10d\n", priv->stop_queue_credit[i]);
+skip:
+ __netif_tx_unlock_bh(tx_queue);
+ }
+ return len;
+}
+
+/** pfe_eth_set_tx_stats
+ *
+ */
+static ssize_t pfe_eth_set_tx_stats(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ int i;
+
+ for (i = 0; i < EMAC_TXQ_CNT; i++) {
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, i);
+
+ __netif_tx_lock_bh(tx_queue);
+ priv->clean_fail[i] = 0;
+ priv->stop_queue_total[i] = 0;
+ priv->stop_queue_hif[i] = 0;
+ priv->stop_queue_hif_client[i]= 0;
+ priv->stop_queue_credit[i] = 0;
+ __netif_tx_unlock_bh(tx_queue);
+ }
+
+ return count;
+}
+#endif
+/** pfe_eth_show_txavail
+ *
+ */
+static ssize_t pfe_eth_show_txavail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < EMAC_TXQ_CNT; i++) {
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, i);
+
+ __netif_tx_lock_bh(tx_queue);
+
+ len += sprintf(buf + len, "%d", hif_lib_tx_avail(&priv->client, i));
+
+ __netif_tx_unlock_bh(tx_queue);
+
+ if (i == (EMAC_TXQ_CNT - 1))
+ len += sprintf(buf + len, "\n");
+ else
+ len += sprintf(buf + len, " ");
+ }
+
+ return len;
+}
+
+
+/** pfe_eth_show_default_priority
+ *
+ */
+static ssize_t pfe_eth_show_default_priority(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ rc = sprintf(buf, "%d\n", priv->default_priority);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return rc;
+}
+
+/** pfe_eth_set_default_priority
+ *
+ */
+
+static ssize_t pfe_eth_set_default_priority(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->default_priority = simple_strtoul(buf, NULL, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return count;
+}
+
+#if defined(CONFIG_SMP) && (NR_CPUS > 1)
+static DEVICE_ATTR(rx_cpu_affinity, 0644, pfe_eth_show_rx_cpu_affinity, pfe_eth_set_rx_cpu_affinity);
+#endif
+
+static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
+static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority, pfe_eth_set_default_priority);
+
+#ifdef PFE_ETH_NAPI_STATS
+static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats, pfe_eth_set_napi_stats);
+#endif
+
+#ifdef PFE_ETH_TX_STATS
+static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats, pfe_eth_set_tx_stats);
+#endif
+
+#ifdef PFE_ETH_TSO_STATS
+static DEVICE_ATTR(tso_stats, 0644, pfe_eth_show_tso_stats, pfe_eth_set_tso_stats);
+#endif
+
+#ifdef PFE_ETH_LRO_STATS
+static DEVICE_ATTR(lro_nb_stats, 0644, pfe_eth_show_lro_nb_stats, pfe_eth_set_lro_nb_stats);
+static DEVICE_ATTR(lro_len_stats, 0644, pfe_eth_show_lro_len_stats, pfe_eth_set_lro_len_stats);
+#endif
+
+/** pfe_eth_sysfs_init
+ *
+ */
+static int pfe_eth_sysfs_init(struct net_device *dev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ int err;
+
+ /* Initialize the default values */
+ /* By default, packets without conntrack will use this default high priority queue */
+ priv->default_priority = 15;
+
+ /* Create our sysfs files */
+ err = device_create_file(&dev->dev, &dev_attr_default_priority);
+ if (err) {
+ netdev_err(dev, "failed to create default_priority sysfs files\n");
+ goto err_priority;
+ }
+
+ err = device_create_file(&dev->dev, &dev_attr_txavail);
+ if (err) {
+ netdev_err(dev, "failed to create default_priority sysfs files\n");
+ goto err_txavail;
+ }
+
+#ifdef PFE_ETH_NAPI_STATS
+ err = device_create_file(&dev->dev, &dev_attr_napi_stats);
+ if (err) {
+ netdev_err(dev, "failed to create napi stats sysfs files\n");
+ goto err_napi;
+ }
+#endif
+
+#ifdef PFE_ETH_TX_STATS
+ err = device_create_file(&dev->dev, &dev_attr_tx_stats);
+ if (err) {
+ netdev_err(dev, "failed to create tx stats sysfs files\n");
+ goto err_tx;
+ }
+#endif
+
+#ifdef PFE_ETH_TSO_STATS
+ err = device_create_file(&dev->dev, &dev_attr_tso_stats);
+ if (err) {
+ netdev_err(dev, "failed to create tso stats sysfs files\n");
+ goto err_tso;
+ }
+#endif
+
+#ifdef PFE_ETH_LRO_STATS
+ err = device_create_file(&dev->dev, &dev_attr_lro_nb_stats);
+ if (err) {
+ netdev_err(dev, "failed to create lro nb stats sysfs files\n");
+ goto err_lro_nb;
+ }
+
+ err = device_create_file(&dev->dev, &dev_attr_lro_len_stats);
+ if (err) {
+ netdev_err(dev, "failed to create lro len stats sysfs files\n");
+ goto err_lro_len;
+ }
+#endif
+
+#if defined(CONFIG_SMP) && (NR_CPUS > 1)
+ err = device_create_file(&dev->dev, &dev_attr_rx_cpu_affinity);
+ if (err) {
+ netdev_err(dev, "failed to create rx cpu affinity sysfs file\n");
+ goto err_rx_affinity;
+ }
+#endif
+
+ return 0;
+
+#if defined(CONFIG_SMP) && (NR_CPUS > 1)
+err_rx_affinity:
+#ifdef PFE_ETH_LRO_STATS
+ device_remove_file(&dev->dev, &dev_attr_lro_len_stats);
+#endif
+#endif
+
+#ifdef PFE_ETH_LRO_STATS
+err_lro_len:
+ device_remove_file(&dev->dev, &dev_attr_lro_nb_stats);
+
+err_lro_nb:
+#endif
+
+#ifdef PFE_ETH_TSO_STATS
+ device_remove_file(&dev->dev, &dev_attr_tso_stats);
+
+err_tso:
+#endif
+#ifdef PFE_ETH_TX_STATS
+ device_remove_file(&dev->dev, &dev_attr_tx_stats);
+
+err_tx:
+#endif
+#ifdef PFE_ETH_NAPI_STATS
+ device_remove_file(&dev->dev, &dev_attr_napi_stats);
+
+err_napi:
+#endif
+ device_remove_file(&dev->dev, &dev_attr_txavail);
+
+err_txavail:
+ device_remove_file(&dev->dev, &dev_attr_default_priority);
+
+err_priority:
+ return -1;
+}
+
+/** pfe_eth_sysfs_exit
+ *
+ */
+void pfe_eth_sysfs_exit(struct net_device *dev)
+{
+#if defined(CONFIG_SMP) && (NR_CPUS > 1)
+ device_remove_file(&dev->dev, &dev_attr_rx_cpu_affinity);
+#endif
+
+#ifdef PFE_ETH_LRO_STATS
+ device_remove_file(&dev->dev, &dev_attr_lro_nb_stats);
+ device_remove_file(&dev->dev, &dev_attr_lro_len_stats);
+#endif
+
+#ifdef PFE_ETH_TSO_STATS
+ device_remove_file(&dev->dev, &dev_attr_tso_stats);
+#endif
+
+#ifdef PFE_ETH_TX_STATS
+ device_remove_file(&dev->dev, &dev_attr_tx_stats);
+#endif
+
+#ifdef PFE_ETH_NAPI_STATS
+ device_remove_file(&dev->dev, &dev_attr_napi_stats);
+#endif
+ device_remove_file(&dev->dev, &dev_attr_txavail);
+ device_remove_file(&dev->dev, &dev_attr_default_priority);
+}
+
+/*************************************************************************/
+/* ETHTOOL INTERCAE */
+/*************************************************************************/
+static char stat_gstrings[][ETH_GSTRING_LEN] = {
+ "tx- octets",
+ "tx- packets",
+ "tx- broadcast",
+ "tx- multicast",
+ "tx- pause",
+ "tx- 64 bytes packets",
+ "tx- 64 - 127 bytes packets",
+ "tx- 128 - 255 bytes packets",
+ "tx- 256 - 511 bytes packets",
+ "tx- 512 - 1023 bytes packets",
+ "tx- 1024 - 1518 bytes packets",
+ "tx- > 1518 bytes packets",
+ "tx- underruns - errors",
+ "tx- single collision",
+ "tx- multi collision",
+ "tx- exces. collision - errors",
+ "tx- late collision - errors",
+ "tx- deferred",
+ "tx- carrier sense - errors",
+ "rx- octets",
+ "rx- packets",
+ "rx- broadcast",
+ "rx- multicast",
+ "rx- pause",
+ "rx- 64 bytes packets",
+ "rx- 64 - 127 bytes packets",
+ "rx- 128 - 255 bytes packets",
+ "rx- 256 - 511 bytes packets",
+ "rx- 512 - 1023 bytes packets",
+ "rx- 1024 - 1518 bytes packets",
+ "rx- > 1518 bytes packets",
+ "rx- undersize -errors",
+ "rx- oversize - errors ",
+ "rx- jabbers - errors",
+ "rx- fcs - errors",
+ "rx- length - errors",
+ "rx- symbol - errors",
+ "rx- align - errors",
+ "rx- ressource - errors",
+ "rx- overrun - errors",
+ "rx- IP cksum - errors",
+ "rx- TCP cksum - errors",
+ "rx- UDP cksum - errors"
+};
+
+
+/**
+ * pfe_eth_gstrings - Fill in a buffer with the strings which correspond to
+ * the stats.
+ *
+ */
+static void pfe_eth_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, stat_gstrings, (EMAC_RMON_LEN - 2) * ETH_GSTRING_LEN);
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+/**
+ * pfe_eth_fill_stats - Fill in an array of 64-bit statistics from
+ * various sources. This array will be appended
+ * to the end of the ethtool_stats* structure, and
+ * returned to user space
+ */
+static void pfe_eth_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ int i;
+ for (i=0;i<EMAC_RMON_LEN;i++, buf++) {
+ *buf = readl(priv->EMAC_baseaddr + EMAC_RMON_BASE_OFST + (i << 2));
+ if ( ( i == EMAC_RMON_TXBYTES_POS ) || ( i == EMAC_RMON_RXBYTES_POS ) ){
+ i++;
+ *buf |= (u64)readl(priv->EMAC_baseaddr + EMAC_RMON_BASE_OFST + (i << 2)) << 32;
+ }
+ }
+
+}
+
+/**
+ * pfe_eth_stats_count - Returns the number of stats (and their corresponding strings)
+ *
+ */
+static int pfe_eth_stats_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return EMAC_RMON_LEN - 2;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info
+ *
+ */
+static void pfe_eth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, DRV_NAME, COMCERTO_INFOSTR_LEN);
+ strncpy(drvinfo->version, comcerto_eth_driver_version, COMCERTO_INFOSTR_LEN);
+ strncpy(drvinfo->fw_version, "N/A", COMCERTO_INFOSTR_LEN);
+ strncpy(drvinfo->bus_info, "N/A", COMCERTO_INFOSTR_LEN);
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+/**
+ * pfe_eth_set_settings - Used to send commands to PHY.
+ *
+ */
+
+static int pfe_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (NULL == phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+
+/**
+ * pfe_eth_getsettings - Return the current settings in the ethtool_cmd structure.
+ *
+ */
+static int pfe_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (NULL == phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+/**
+ * pfe_eth_gemac_reglen - Return the length of the register structure.
+ *
+ */
+static int pfe_eth_gemac_reglen(struct net_device *dev)
+{
+ return (sizeof (gemac_regs)/ sizeof(u32)) + (( MAX_UC_SPEC_ADDR_REG - 3 ) * 2);
+}
+
+/**
+ * pfe_eth_gemac_get_regs - Return the gemac register structure.
+ *
+ */
+static void pfe_eth_gemac_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
+{
+ int i,j;
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ u32 *buf = (u32 *) regbuf;
+
+ for (i = 0; i < sizeof (gemac_regs) / sizeof (u32); i++)
+ buf[i] = readl( priv->EMAC_baseaddr + gemac_regs[i] );
+
+ for (j = 0; j < (( MAX_UC_SPEC_ADDR_REG - 3 ) * 2); j++,i++)
+ buf[i] = readl( priv->EMAC_baseaddr + EMAC_SPEC5_ADD_BOT + (j<<2) );
+
+}
+
+/**
+ * pfe_eth_get_msglevel - Gets the debug message mask.
+ *
+ */
+static uint32_t pfe_eth_get_msglevel(struct net_device *dev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+/**
+ * pfe_eth_set_msglevel - Sets the debug message mask.
+ *
+ */
+static void pfe_eth_set_msglevel(struct net_device *dev, uint32_t data)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+
+ priv->msg_enable = data;
+}
+
+#define HIF_RX_COAL_MAX_CLKS (~(1<<31))
+#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk/1000)
+#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS/HIF_RX_COAL_CLKS_PER_USEC)
+
+/**
+ * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
+ *
+ */
+static int pfe_eth_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
+ return -EINVAL;
+
+ if (!ec->rx_coalesce_usecs) {
+ writel(0, HIF_INT_COAL);
+ return 0;
+ }
+
+ writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) | HIF_INT_COAL_ENABLE, HIF_INT_COAL);
+
+ return 0;
+}
+
+/**
+ * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
+ *
+ */
+static int pfe_eth_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ int reg_val = readl(HIF_INT_COAL);
+
+ if (reg_val & HIF_INT_COAL_ENABLE)
+ ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) / HIF_RX_COAL_CLKS_PER_USEC;
+ else
+ ec->rx_coalesce_usecs = 0;
+
+ return 0;
+}
+
+/**
+ * pfe_eth_pause_rx_enabled - Tests if pause rx is enabled on GEM
+ *
+ */
+static int pfe_eth_pause_rx_enabled(struct pfe_eth_priv_s *priv)
+{
+ return (readl(priv->EMAC_baseaddr + EMAC_NETWORK_CONFIG) & EMAC_ENABLE_PAUSE_RX) != 0;
+}
+
+/**
+ * pfe_eth_set_pauseparam - Sets pause parameters
+ *
+ */
+static int pfe_eth_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+
+ if (epause->rx_pause)
+ {
+ gemac_enable_pause_rx(priv->EMAC_baseaddr);
+ if (priv->phydev)
+ priv->phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ }
+ else
+ {
+ gemac_disable_pause_rx(priv->EMAC_baseaddr);
+ if (priv->phydev)
+ priv->phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ }
+
+ return 0;
+}
+
+/**
+ * pfe_eth_get_pauseparam - Gets pause parameters
+ *
+ */
+static void pfe_eth_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+
+ epause->autoneg = 0;
+ epause->tx_pause = 0;
+ epause->rx_pause = pfe_eth_pause_rx_enabled(priv);
+}
+
+
+struct ethtool_ops pfe_ethtool_ops = {
+ .get_settings = pfe_eth_get_settings,
+ .set_settings = pfe_eth_set_settings,
+ .get_drvinfo = pfe_eth_get_drvinfo,
+ .get_regs_len = pfe_eth_gemac_reglen,
+ .get_regs = pfe_eth_gemac_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_strings = pfe_eth_gstrings,
+ .get_sset_count = pfe_eth_stats_count,
+ .get_ethtool_stats = pfe_eth_fill_stats,
+ .get_msglevel = pfe_eth_get_msglevel,
+ .set_msglevel = pfe_eth_set_msglevel,
+ .set_coalesce = pfe_eth_set_coalesce,
+ .get_coalesce = pfe_eth_get_coalesce,
+ .set_pauseparam = pfe_eth_set_pauseparam,
+ .get_pauseparam = pfe_eth_get_pauseparam,
+};
+
+
+
+/** pfe_eth_mdio_reset
+ */
+static int pfe_eth_mdio_reset(struct mii_bus *bus)
+{
+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
+
+ netif_info(priv, drv, priv->dev, "%s\n", __func__);
+
+#if !defined(CONFIG_PLATFORM_EMULATION)
+ mutex_lock(&bus->mdio_lock);
+
+ /* Setup the MII Mgmt clock speed */
+ gemac_set_mdc_div(priv->EMAC_baseaddr, priv->mdc_div);
+
+ /* Reset the management interface */
+ __raw_writel(__raw_readl(priv->EMAC_baseaddr + EMAC_NETWORK_CONTROL) | EMAC_MDIO_EN,
+ priv->EMAC_baseaddr + EMAC_NETWORK_CONTROL);
+
+ /* Wait until the bus is free */
+ while(!(__raw_readl(priv->EMAC_baseaddr + EMAC_NETWORK_STATUS) & EMAC_PHY_IDLE));
+
+ mutex_unlock(&bus->mdio_lock);
+#endif
+
+ return 0;
+}
+
+
+/** pfe_eth_gemac_phy_timeout
+ *
+ */
+static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
+{
+ while(!(__raw_readl(priv->EMAC_baseaddr + EMAC_NETWORK_STATUS) & EMAC_PHY_IDLE)) {
+
+ if (timeout-- <= 0) {
+ return -1;
+ }
+
+ udelay(10);
+ }
+
+ return 0;
+}
+
+
+/** pfe_eth_mdio_write
+ */
+static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
+{
+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
+ u32 write_data;
+
+#if !defined(CONFIG_PLATFORM_EMULATION)
+
+ netif_info(priv, hw, priv->dev, "%s: phy %d\n", __func__, mii_id);
+
+// netif_info(priv, hw, priv->dev, "%s %d %d %x\n", bus->id, mii_id, regnum, value);
+
+ write_data = 0x50020000;
+ write_data |= ((mii_id << 23) | (regnum << 18) | value);
+ __raw_writel(write_data, priv->EMAC_baseaddr + EMAC_PHY_MANAGEMENT);
+
+ if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)){
+ netdev_err(priv->dev, "%s: phy MDIO write timeout\n", __func__);
+ return -1;
+ }
+
+#endif
+
+ return 0;
+}
+
+
+/** pfe_eth_mdio_read
+ */
+static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
+ u16 value = 0;
+ u32 write_data;
+
+#if !defined(CONFIG_PLATFORM_EMULATION)
+ netif_info(priv, hw, priv->dev, "%s: phy %d\n", __func__, mii_id);
+
+ write_data = 0x60020000;
+ write_data |= ((mii_id << 23) | (regnum << 18));
+
+ __raw_writel(write_data, priv->EMAC_baseaddr + EMAC_PHY_MANAGEMENT);
+
+ if (pfe_eth_gemac_phy_timeout( priv, EMAC_MDIO_TIMEOUT)) {
+ netdev_err(priv->dev, "%s: phy MDIO read timeout\n", __func__);
+ return -1;
+ }
+
+ value = __raw_readl(priv->EMAC_baseaddr + EMAC_PHY_MANAGEMENT) & 0xFFFF;
+#endif
+
+// netif_info(priv, hw, priv->dev, "%s %d %d %x\n", bus->id, mii_id, regnum, value);
+
+ return value;
+}
+
+
+/** pfe_eth_mdio_init
+ */
+static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv, struct comcerto_mdio_platform_data *minfo)
+{
+ struct mii_bus *bus;
+ int rc;
+
+ netif_info(priv, drv, priv->dev, "%s\n", __func__);
+
+#if !defined(CONFIG_PLATFORM_EMULATION)
+ bus = mdiobus_alloc();
+ if (!bus) {
+ netdev_err(priv->dev, "mdiobus_alloc() failed\n");
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ bus->name = "Comcerto MDIO Bus";
+ bus->read = &pfe_eth_mdio_read;
+ bus->write = &pfe_eth_mdio_write;
+ bus->reset = &pfe_eth_mdio_reset;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "comcerto-%x", priv->id);
+ bus->priv = priv;
+
+ bus->phy_mask = minfo->phy_mask;
+ priv->mdc_div = minfo->mdc_div;
+
+ if (!priv->mdc_div)
+ priv->mdc_div = 64;
+
+ bus->irq = minfo->irq;
+
+ bus->parent = priv->pfe->dev;
+
+ netif_info(priv, drv, priv->dev, "%s: mdc_div: %d, phy_mask: %x \n", __func__, priv->mdc_div, bus->phy_mask);
+
+ rc = mdiobus_register(bus);
+ if (rc) {
+ netdev_err(priv->dev, "mdiobus_register(%s) failed\n", bus->name);
+ goto err1;
+ }
+
+ priv->mii_bus = bus;
+
+ return 0;
+
+err1:
+ mdiobus_free(bus);
+err0:
+ return rc;
+#else
+ return 0;
+#endif
+
+}
+
+/** pfe_eth_mdio_exit
+ */
+static void pfe_eth_mdio_exit(struct mii_bus *bus)
+{
+ if (!bus)
+ return;
+
+ netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct pfe_eth_priv_s *)(bus->priv))->dev, "%s\n", __func__);
+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+}
+
+/** pfe_get_interface
+ */
+static phy_interface_t pfe_get_interface(struct net_device *dev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ u32 mii_mode = priv->einfo->mii_config;
+
+ netif_info(priv, drv, dev, "%s\n", __func__);
+
+ if (priv->einfo->gemac_mode & (GEMAC_SW_CONF)) {
+ switch (mii_mode) {
+ case CONFIG_COMCERTO_USE_GMII:
+ return PHY_INTERFACE_MODE_GMII;
+ break;
+ case CONFIG_COMCERTO_USE_RGMII:
+ return PHY_INTERFACE_MODE_RGMII;
+ break;
+ case CONFIG_COMCERTO_USE_RMII:
+ return PHY_INTERFACE_MODE_RMII;
+ break;
+ case CONFIG_COMCERTO_USE_SGMII:
+ return PHY_INTERFACE_MODE_SGMII;
+ break;
+
+ default :
+ case CONFIG_COMCERTO_USE_MII:
+ return PHY_INTERFACE_MODE_MII;
+ break;
+
+ }
+ } else {
+ // Bootstrap config read from controller
+ BUG();
+ return 0;
+ }
+}
+
+/** pfe_get_phydev_speed
+ */
+static int pfe_get_phydev_speed(struct phy_device *phydev)
+{
+ switch (phydev->speed) {
+ case 10:
+ return SPEED_10M;
+ case 100:
+ return SPEED_100M;
+ case 1000:
+ default:
+ return SPEED_1000M;
+ }
+
+}
+
+/** pfe_get_phydev_duplex
+ */
+static int pfe_get_phydev_duplex(struct phy_device *phydev)
+{
+ return ( phydev->duplex == DUPLEX_HALF ) ? DUP_HALF:DUP_FULL ;
+}
+
+/** pfe_eth_adjust_link
+ */
+static void pfe_eth_adjust_link(struct net_device *dev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ unsigned long flags;
+ struct phy_device *phydev = priv->phydev;
+ int new_state = 0;
+
+ netif_info(priv, drv, dev, "%s\n", __func__);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (phydev->link) {
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (phydev->duplex != priv->oldduplex) {
+ new_state = 1;
+ gemac_set_duplex(priv->EMAC_baseaddr, pfe_get_phydev_duplex(phydev));
+ priv->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != priv->oldspeed) {
+ new_state = 1;
+ gemac_set_speed(priv->EMAC_baseaddr, pfe_get_phydev_speed(phydev));
+ priv->oldspeed = phydev->speed;
+ }
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+
+/** pfe_phy_exit
+ */
+static void pfe_phy_exit(struct net_device *dev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+
+ netif_info(priv, drv, dev, "%s\n", __func__);
+
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+}
+
+/** pfe_eth_stop
+ */
+static void pfe_eth_stop( struct net_device *dev )
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+
+ netif_info(priv, drv, dev, "%s\n", __func__);
+
+ gemac_disable(priv->EMAC_baseaddr);
+ gpi_disable(priv->GPI_baseaddr);
+
+ if (priv->phydev)
+ phy_stop(priv->phydev);
+}
+
+/** pfe_eth_start
+ */
+static int pfe_eth_start( struct pfe_eth_priv_s *priv )
+{
+ netif_info(priv, drv, priv->dev, "%s\n", __func__);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ gpi_enable(priv->GPI_baseaddr);
+ gemac_enable(priv->EMAC_baseaddr);
+
+ return 0;
+}
+
+/** pfe_phy_init
+ *
+ */
+static int pfe_phy_init(struct net_device *dev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ struct phy_device *phydev;
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ char bus_id[MII_BUS_ID_SIZE];
+ phy_interface_t interface;
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ sprintf(bus_id, "comcerto-%d", priv->einfo->bus_id);
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, priv->einfo->phy_id);
+
+ netif_info(priv, drv, dev, "%s: %s\n", __func__, phy_id);
+
+ interface = pfe_get_interface(dev);
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ phydev = phy_connect(dev, phy_id, &pfe_eth_adjust_link, 0, interface);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(dev, "phy_connect() failed\n");
+ return PTR_ERR(phydev);
+ }
+
+ priv->phydev = phydev;
+
+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ if (pfe_eth_pause_rx_enabled(priv))
+ phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ else
+ phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ return 0;
+}
+
+/** pfe_gemac_init
+ */
+static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
+{
+ GEMAC_CFG cfg;
+
+ netif_info(priv, ifup, priv->dev, "%s\n", __func__);
+
+ /* software config */
+ /* MII interface mode selection */
+ switch (priv->einfo->mii_config) {
+ case CONFIG_COMCERTO_USE_GMII:
+ cfg.mode = GMII;
+ break;
+
+ case CONFIG_COMCERTO_USE_MII:
+ cfg.mode = MII;
+ break;
+
+ case CONFIG_COMCERTO_USE_RGMII:
+ cfg.mode = RGMII;
+ break;
+
+ case CONFIG_COMCERTO_USE_RMII:
+ cfg.mode = RMII;
+ break;
+
+ case CONFIG_COMCERTO_USE_SGMII:
+ cfg.mode = SGMII;
+ break;
+
+ default:
+ cfg.mode = RGMII;
+ }
+
+ /* Speed selection */
+ switch (priv->einfo->gemac_mode & GEMAC_SW_SPEED_1G ) {
+ case GEMAC_SW_SPEED_1G:
+ cfg.speed = SPEED_1000M;
+ break;
+
+ case GEMAC_SW_SPEED_100M:
+ cfg.speed = SPEED_100M;
+ break;
+
+ case GEMAC_SW_SPEED_10M:
+ cfg.speed = SPEED_10M;
+ break;
+
+ default:
+ cfg.speed = SPEED_1000M;
+ }
+
+ /* Duplex selection */
+ cfg.duplex = ( priv->einfo->gemac_mode & GEMAC_SW_FULL_DUPLEX ) ? DUPLEX_FULL : DUPLEX_HALF;
+
+ gemac_set_config( priv->EMAC_baseaddr, &cfg);
+ gemac_allow_broadcast( priv->EMAC_baseaddr );
+ gemac_disable_unicast( priv->EMAC_baseaddr );
+ gemac_disable_multicast( priv->EMAC_baseaddr );
+ gemac_disable_fcs_rx( priv->EMAC_baseaddr );
+ gemac_enable_1536_rx( priv->EMAC_baseaddr );
+ gemac_enable_rx_jmb( priv->EMAC_baseaddr );
+ gemac_enable_pause_rx( priv->EMAC_baseaddr );
+ gemac_set_bus_width(priv->EMAC_baseaddr, 64);
+
+ /*GEM will perform checksum verifications*/
+ if (priv->dev->features & NETIF_F_RXCSUM)
+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
+ else
+ gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
+
+ return 0;
+}
+
+/** pfe_eth_event_handler
+ */
+static int pfe_eth_event_handler(void *data, int event, int qno)
+{
+ struct pfe_eth_priv_s *priv = data;
+
+ switch (event) {
+ case EVENT_RX_PKT_IND:
+
+ if (qno == 0) {
+ if (napi_schedule_prep(&priv->high_napi)) {
+ netif_info(priv, intr, priv->dev, "%s: schedule high prio poll\n", __func__);
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_SCHED_COUNT]++;
+#endif
+
+ __napi_schedule(&priv->high_napi);
+ }
+ }
+ else if (qno == 1) {
+ if (napi_schedule_prep(&priv->low_napi)) {
+ netif_info(priv, intr, priv->dev, "%s: schedule low prio poll\n", __func__);
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_SCHED_COUNT]++;
+#endif
+ __napi_schedule(&priv->low_napi);
+ }
+ }
+ else if (qno == 2) {
+ if (napi_schedule_prep(&priv->lro_napi)) {
+ netif_info(priv, intr, priv->dev, "%s: schedule lro prio poll\n", __func__);
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_SCHED_COUNT]++;
+#endif
+ __napi_schedule(&priv->lro_napi);
+ }
+ }
+
+ break;
+
+ case EVENT_TXDONE_IND:
+ case EVENT_HIGH_RX_WM:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/** pfe_eth_open
+ */
+static int pfe_eth_open( struct net_device *dev )
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ struct hif_client_s *client;
+ int rc;
+
+ netif_info(priv, ifup, dev, "%s\n", __func__);
+
+ /* Register client driver with HIF */
+ client = &priv->client;
+ memset(client, 0, sizeof(*client));
+ client->id = PFE_CL_GEM0 + priv->id;
+ client->tx_qn = EMAC_TXQ_CNT;
+ client->rx_qn = EMAC_RXQ_CNT;
+ client->priv = priv;
+ client->pfe = priv->pfe;
+ client->event_handler = pfe_eth_event_handler;
+ client->user_cpu_id = priv->cpu_id;
+
+ /* FIXME : For now hif lib sets all tx and rx queues to same size */
+ client->tx_qsize = EMAC_TXQ_DEPTH;
+ client->rx_qsize = EMAC_RXQ_DEPTH;
+
+ if ((rc = hif_lib_client_register(client))) {
+ netdev_err(dev, "%s: hif_lib_client_register(%d) failed\n", __func__, client->id);
+ goto err0;
+ }
+
+ netif_info(priv, drv, dev, "%s: registered client: %p\n", __func__, client);
+
+ /* Enable gemac tx clock */
+ clk_enable(priv->gemtx_clk);
+
+ pfe_gemac_init(priv);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ netdev_err(dev, "%s: invalid MAC address\n", __func__);
+ rc = -EADDRNOTAVAIL;
+ goto err1;
+ }
+
+ gemac_set_laddrN( priv->EMAC_baseaddr, ( MAC_ADDR *)dev->dev_addr, 1 );
+
+
+ if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
+ rc = pfe_phy_init(dev);
+ if (rc) {
+ netdev_err(dev, "%s: pfe_phy_init() failed\n", __func__);
+ goto err2;
+ }
+ }
+
+ napi_enable(&priv->high_napi);
+ napi_enable(&priv->low_napi);
+ napi_enable(&priv->lro_napi);
+
+ rc = pfe_eth_start( priv );
+ netif_tx_wake_all_queues(dev);
+
+ pfe_ctrl_set_eth_state(priv->id, 1, dev->dev_addr);
+
+ priv->tx_timer.expires = jiffies + ( COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ )/1000;
+ add_timer(&priv->tx_timer);
+
+ return 0;
+err2:
+ pfe_ctrl_set_eth_state(priv->id, 0, NULL);
+
+err1:
+ hif_lib_client_unregister(&priv->client);
+ clk_disable(priv->gemtx_clk);
+
+err0:
+ return rc;
+}
+#define TX_POLL_TIMEOUT_MS 1000
+/** pfe_eth_close
+ */
+static int pfe_eth_close( struct net_device *dev )
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ int i, qstatus;
+ unsigned long next_poll = jiffies + 1, end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
+ int tx_pkts, prv_tx_pkts;
+
+ netif_info(priv, ifdown, dev, "%s\n", __func__);
+
+ del_timer_sync(&priv->tx_timer);
+
+ for(i = 0; i < EMAC_TXQ_CNT; i++)
+ hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
+
+ netif_tx_stop_all_queues(dev);
+
+ pfe_eth_flush_tx(priv, 1);
+
+ /*Disable transmit in PFE before disabling GEMAC */
+ pfe_ctrl_set_eth_state(priv->id, 0, NULL);
+
+ prv_tx_pkts = tmu_pkts_processed(priv->id);
+ /*Wait till TMU transmits all pending packets
+ * poll tmu_qstatus and pkts processed by TMU for every 10ms
+ * Consider TMU is busy, If we see TMU qeueu pending or any packets processed by TMU
+ */
+ while(1) {
+
+ if (time_after(jiffies, next_poll)) {
+
+ tx_pkts = tmu_pkts_processed(priv->id);
+ qstatus = tmu_qstatus(priv->id) & 0x7ffff;
+
+ if(!qstatus && (tx_pkts == prv_tx_pkts)) {
+ break;
+ }
+ /*Don't wait forever, break if we cross max timeout(TX_POLL_TIMEOUT_MS) */
+ if (time_after(jiffies, end)) {
+ printk(KERN_ERR "TMU%d is busy after %dmsec\n", priv->id, TX_POLL_TIMEOUT_MS);
+ break;
+ }
+ prv_tx_pkts = tx_pkts;
+ next_poll++;
+ }
+ if (need_resched())
+ schedule();
+
+
+ }
+ /* Wait for some more time to complete transmitting packet if any */
+ next_poll = jiffies + 1;
+ while(1) {
+ if (time_after(jiffies, next_poll))
+ break;
+ if (need_resched())
+ schedule();
+ }
+
+ pfe_eth_stop(dev);
+
+ napi_disable(&priv->lro_napi);
+ napi_disable(&priv->low_napi);
+ napi_disable(&priv->high_napi);
+
+ if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
+ pfe_phy_exit(dev);
+
+
+ /* Disable gemac tx clock */
+ clk_disable(priv->gemtx_clk);
+
+ hif_lib_client_unregister(&priv->client);
+
+ return 0;
+}
+
+
+/** pfe_eth_get_priority
+ *
+ */
+static int pfe_eth_get_queuenum( struct pfe_eth_priv_s *priv, struct sk_buff *skb )
+{
+ int queuenum;
+ unsigned long flags;
+
+ /* Get the FPP queue number from the CONNMARK value */
+#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
+ if (skb->nfct) {
+ enum ip_conntrack_info cinfo;
+ struct nf_conn *ct;
+ ct = nf_ct_get(skb, &cinfo);
+
+ if (ct) {
+ u_int32_t connmark;
+ connmark = ct->mark;
+
+ if ((connmark & 0x80000000) && priv->id != 0)
+ connmark >>= 16;
+
+ queuenum = connmark & EMAC_QUEUENUM_MASK;
+ }
+ else
+ queuenum = 0;
+ }
+ else /* continued after #endif ... */
+#endif
+ if (skb->mark & EMAC_QUEUENUM_MASK)
+ queuenum = skb->mark & EMAC_QUEUENUM_MASK;
+ else{
+ spin_lock_irqsave(&priv->lock, flags);
+ queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ return queuenum;
+}
+
+
+
+/** pfe_eth_might_stop_tx
+ *
+ */
+static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum, struct netdev_queue *tx_queue, unsigned int n_desc, unsigned int n_segs)
+{
+ int tried = 0;
+ ktime_t kt;
+
+try_again:
+ if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc)
+ || (hif_lib_tx_avail(&priv->client, queuenum) < n_desc)
+ || (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
+
+ if (!tried) {
+ hif_tx_unlock(&pfe->hif);
+ pfe_eth_flush_txQ(priv, queuenum, 1, n_desc);
+ hif_lib_update_credit(&priv->client, queuenum);
+ tried = 1;
+ hif_tx_lock(&pfe->hif);
+ goto try_again;
+ }
+#ifdef PFE_ETH_TX_STATS
+ if (__hif_tx_avail(&pfe->hif) < n_desc)
+ priv->stop_queue_hif[queuenum]++;
+ else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
+ priv->stop_queue_hif_client[queuenum]++;
+ }
+ else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs) {
+ priv->stop_queue_credit[queuenum]++;
+ }
+ priv->stop_queue_total[queuenum]++;
+#endif
+ netif_tx_stop_queue(tx_queue);
+
+ kt = ktime_set(0, COMCERTO_TX_FAST_RECOVERY_TIMEOUT_MS * NSEC_PER_MSEC);
+ hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt, HRTIMER_MODE_REL);
+ return -1;
+ }
+ else {
+ return 0;
+ }
+}
+
+#define SA_MAX_OP 2
+/** pfe_hif_send_packet
+ *
+ * At this level if TX fails we drop the packet
+ */
+static void pfe_hif_send_packet( struct sk_buff *skb, struct pfe_eth_priv_s *priv, int queuenum)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int nr_frags, nr_bytes;
+ u32 ctrl = 0;
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+ int i;
+ u16 sah[SA_MAX_OP] = {0};
+ struct hif_ipsec_hdr *hif_ipsec;
+#endif
+
+ netif_info(priv, tx_queued, priv->dev, "%s\n", __func__);
+
+ if (skb_is_gso(skb)) {
+ if(likely(nr_bytes = pfe_tso(skb, &priv->client, &priv->tso, queuenum, 0))) {
+
+ hif_lib_tx_credit_use(pfe, priv->id, queuenum, sh->gso_segs);
+ priv->stats.tx_packets += sh->gso_segs;
+ priv->stats.tx_bytes += nr_bytes;
+ }
+ else
+ priv->stats.tx_dropped++;
+
+ return;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->len > 1522) {
+ *(u16*)(skb_transport_header(skb) + skb->csum_offset) = csum_fold(csum_partial(skb_transport_header(skb), skb->len - skb_transport_offset(skb), 0));
+ skb->ip_summed = 0;
+ ctrl = 0;
+ }
+ else
+ ctrl = HIF_CTRL_TX_CHECKSUM;
+ }
+
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+ /* check if packet sent from Host to PFE needs IPsec processing */
+ if (skb->ipsec_offload)
+ {
+ if (skb->sp)
+ {
+ for (i = skb->sp->len-1; i >= 0; i--)
+ {
+ struct xfrm_state *x = skb->sp->xvec[i];
+ sah[i] = htons(x->handle);
+ }
+
+ ctrl |= HIF_CTRL_TX_IPSEC_OUT;
+
+ /* add SA info to the hif header*/
+ hif_ipsec = (struct hif_ipsec_hdr *)(skb->data - sizeof(struct hif_ipsec_hdr));
+ hif_ipsec->sa_handle[0] = sah[0];
+ hif_ipsec->sa_handle[1] = sah[1];
+
+ skb->data -= sizeof(struct hif_ipsec_hdr);
+ skb->len += sizeof(struct hif_ipsec_hdr);
+ }
+ else
+ printk(KERN_ERR "%s: secure path data not found\n", __func__);
+ }
+#endif
+
+ nr_frags = sh->nr_frags;
+
+ if (nr_frags) {
+ skb_frag_t *f;
+ int i;
+
+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data, skb_headlen(skb), ctrl, HIF_FIRST_BUFFER, skb);
+
+ for (i = 0; i < nr_frags - 1; i++) {
+ f = &sh->frags[i];
+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb_frag_address(f), skb_frag_size(f), 0x0, 0x0, skb);
+ }
+
+ f = &sh->frags[i];
+
+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb_frag_address(f), skb_frag_size(f), 0x0, HIF_LAST_BUFFER|HIF_DATA_VALID, skb);
+
+ netif_info(priv, tx_queued, priv->dev, "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n", __func__, skb, nr_frags, skb->len);
+ }
+ else {
+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data, skb->len, ctrl, HIF_FIRST_BUFFER | HIF_LAST_BUFFER | HIF_DATA_VALID, skb);
+ netif_info(priv, tx_queued, priv->dev, "%s: pkt sent successfully skb:%p len:%d\n", __func__, skb, skb->len);
+ }
+ hif_tx_dma_start();
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+ hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
+}
+
+/** pfe_eth_flush_txQ
+ */
+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int txQ_num, int from_tx, int n_desc)
+{
+ struct sk_buff *skb;
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, txQ_num);
+ int count = max(TX_FREE_MAX_COUNT, n_desc);
+ unsigned int flags;
+
+ netif_info(priv, tx_done, priv->dev, "%s\n", __func__);
+
+ if (!from_tx)
+ __netif_tx_lock(tx_queue, smp_processor_id());
+
+ /* Clean HIF and client queue */
+ while (count && (skb = hif_lib_tx_get_next_complete(&priv->client, txQ_num, &flags, count))) {
+
+ /* FIXME : Invalid data can be skipped in hif_lib itself */
+ if (flags & HIF_DATA_VALID) {
+#ifdef ETH_HIF_NODMA_MAP
+ if (flags & HIF_DONT_DMA_MAP) {
+ pfe_tx_skb_unmap(skb);
+ }
+#endif
+ dev_kfree_skb_any(skb);
+
+ }
+ // When called from the timer, flush all descriptors
+ if (from_tx)
+ count--;
+ }
+
+ if (!from_tx)
+ __netif_tx_unlock(tx_queue);
+}
+
+/** pfe_eth_flush_tx
+ */
+static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv, int force)
+{
+ int ii;
+
+ netif_info(priv, tx_done, priv->dev, "%s\n", __func__);
+
+ for (ii = 0; ii < EMAC_TXQ_CNT; ii++) {
+ if (force || (time_after(jiffies, priv->client.tx_q[ii].jiffies_last_packet + (COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ)/1000))) {
+ pfe_eth_flush_txQ(priv, ii, 0, 0); //We will release everything we can based on from_tx param, so the count param can be set to any value
+ hif_lib_update_credit(&priv->client, ii);
+ }
+ }
+}
+
+
+/** pfe_eth_send_packet
+ */
+static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ int txQ_num = skb_get_queue_mapping(skb);
+ int n_desc, n_segs, count;
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, txQ_num);
+
+ netif_info(priv, tx_queued, dev, "%s\n", __func__);
+
+ if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ + sizeof(unsigned long)))) {
+
+ netif_warn(priv, tx_err, priv->dev, "%s: copying skb\n", __func__);
+
+ if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned long)), 0, GFP_ATOMIC)) {
+ /* No need to re-transmit, no way to recover*/
+ kfree_skb(skb);
+ priv->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
+
+ pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
+
+ hif_tx_lock(&pfe->hif);
+ if(unlikely(pfe_eth_might_stop_tx(priv, txQ_num, tx_queue, n_desc, n_segs))) {
+#ifdef PFE_ETH_TX_STATS
+ if(priv->was_stopped[txQ_num]) {
+ priv->clean_fail[txQ_num]++;
+ priv->was_stopped[txQ_num] = 0;
+ }
+#endif
+ hif_tx_unlock(&pfe->hif);
+ return NETDEV_TX_BUSY;
+ }
+
+ pfe_hif_send_packet(skb, priv, txQ_num);
+
+ hif_tx_unlock(&pfe->hif);
+
+ dev->trans_start = jiffies;
+
+ // Recycle buffers if a socket's send buffer becomes half full or if the HIF client queue starts filling up
+ if (((count = (hif_lib_tx_pending(&priv->client, txQ_num) - HIF_CL_TX_FLUSH_MARK)) > 0)
+ || (skb->sk && ((sk_wmem_alloc_get(skb->sk) << 1) > skb->sk->sk_sndbuf)))
+ pfe_eth_flush_txQ(priv, txQ_num, 1, count);
+
+#ifdef PFE_ETH_TX_STATS
+ priv->was_stopped[txQ_num] = 0;
+#endif
+
+ return NETDEV_TX_OK;
+}
+
+/** pfe_eth_select_queue
+ *
+ */
+static u16 pfe_eth_select_queue( struct net_device *dev, struct sk_buff *skb )
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+
+ return pfe_eth_get_queuenum(priv, skb);
+}
+
+
+/** pfe_eth_get_stats
+ */
+static struct net_device_stats *pfe_eth_get_stats(struct net_device *dev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+
+ netif_info(priv, drv, dev, "%s\n", __func__);
+
+ return &priv->stats;
+}
+
+
+/** pfe_eth_change_mtu
+ */
+static int pfe_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ int oldsize = dev->mtu ;
+ int frame_size = new_mtu + ETH_HLEN +4;
+
+ netif_info(priv, drv, dev, "%s\n", __func__);
+
+ if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+ netif_err(priv, drv, dev, "Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ if ((new_mtu > 1500) && (dev->features & NETIF_F_TSO))
+ {
+ netdev_err(dev, "MTU cannot be set to more than 1500 while TSO is enabled. TSO must be disabled first.\n");
+ return -EINVAL;
+ }
+
+ /* Only stop and start the controller if it isn't already
+ * stopped, and we changed something */
+ if ((oldsize != new_mtu) && (dev->flags & IFF_UP)){
+ netdev_err(dev, "Can not change MTU - fast_path must be disabled and ifconfig down must be issued first\n");
+
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+/** pfe_eth_set_mac_address
+ */
+static int pfe_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ struct sockaddr *sa = addr;
+
+ netif_info(priv, drv, dev, "%s\n", __func__);
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+
+ gemac_set_laddrN(priv->EMAC_baseaddr, (MAC_ADDR *)dev->dev_addr, 1);
+
+ return 0;
+
+}
+
+/** pfe_eth_enet_addr_byte_mac
+ */
+int pfe_eth_enet_addr_byte_mac(u8 * enet_byte_addr, MAC_ADDR *enet_addr)
+{
+ if ((enet_byte_addr == NULL) || (enet_addr == NULL))
+ {
+ return -1;
+ }
+ else
+ {
+ enet_addr->bottom = enet_byte_addr[0] |
+ (enet_byte_addr[1] << 8) |
+ (enet_byte_addr[2] << 16) |
+ (enet_byte_addr[3] << 24);
+ enet_addr->top = enet_byte_addr[4] |
+ (enet_byte_addr[5] << 8);
+ return 0;
+ }
+}
+
+/** pfe_eth_get_hash
+ */
+static int pfe_eth_get_hash(u8 * addr)
+{
+ u8 temp1,temp2,temp3,temp4,temp5,temp6,temp7,temp8;
+ temp1 = addr[0] & 0x3F ;
+ temp2 = ((addr[0] & 0xC0) >> 6)| ((addr[1] & 0x0F) << 2);
+ temp3 = ((addr[1] & 0xF0) >> 4) | ((addr[2] & 0x03) << 4);
+ temp4 = (addr[2] & 0xFC) >> 2;
+ temp5 = addr[3] & 0x3F;
+ temp6 = ((addr[3] & 0xC0) >> 6) | ((addr[4] & 0x0F) << 2);
+ temp7 = ((addr[4] & 0xF0) >>4 ) | ((addr[5] & 0x03) << 4);
+ temp8 = ((addr[5] &0xFC) >> 2);
+ return (temp1 ^ temp2 ^ temp3 ^ temp4 ^ temp5 ^ temp6 ^ temp7 ^ temp8);
+}
+
+/** pfe_eth_set_multi
+ */
+static void pfe_eth_set_multi(struct net_device *dev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ MAC_ADDR hash_addr; /* hash register structure */
+ MAC_ADDR spec_addr; /* specific mac address register structure */
+ int result; /* index into hash register to set.. */
+ int uc_count = 0;
+ struct netdev_hw_addr *ha;
+
+ if (dev->flags & IFF_PROMISC) {
+ netif_info(priv, drv, dev, "entering promiscuous mode\n");
+
+ priv->promisc = 1;
+ gemac_enable_copy_all(priv->EMAC_baseaddr);
+ } else {
+ priv->promisc = 0;
+ gemac_disable_copy_all(priv->EMAC_baseaddr);
+ }
+
+ /* Enable broadcast frame reception if required. */
+ if (dev->flags & IFF_BROADCAST) {
+ gemac_allow_broadcast(priv->EMAC_baseaddr);
+ } else {
+ netif_info(priv, drv, dev, "disabling broadcast frame reception\n");
+
+ gemac_no_broadcast(priv->EMAC_baseaddr);
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Set the hash to rx all multicast frames */
+ hash_addr.bottom = 0xFFFFFFFF;
+ hash_addr.top = 0xFFFFFFFF;
+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
+ gemac_enable_multicast(priv->EMAC_baseaddr);
+ netdev_for_each_uc_addr(ha, dev) {
+ if(uc_count >= MAX_UC_SPEC_ADDR_REG) break;
+ pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
+ gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr, uc_count + 2);
+ uc_count++;
+ }
+ } else if ((netdev_mc_count(dev) > 0) || (netdev_uc_count(dev))) {
+ u8 *addr;
+
+ hash_addr.bottom = 0;
+ hash_addr.top = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ addr = ha->addr;
+
+ netif_info(priv, drv, dev, "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ result = pfe_eth_get_hash(addr);
+
+ if (result >= EMAC_HASH_REG_BITS) {
+ break;
+ } else {
+ if (result < 32) {
+ hash_addr.bottom |= (1 << result);
+ } else {
+ hash_addr.top |= (1 << (result - 32));
+ }
+ }
+
+ }
+
+ uc_count = -1;
+ netdev_for_each_uc_addr(ha, dev) {
+ addr = ha->addr;
+
+ if(++uc_count < MAX_UC_SPEC_ADDR_REG)
+ {
+ netdev_info(dev, "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
+ addr[0], addr[1], addr[2],
+ addr[3], addr[4], addr[5]);
+
+ pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
+ gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr, uc_count + 2);
+ }
+ else
+ {
+ netif_info(priv, drv, dev, "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
+ addr[0], addr[1], addr[2],
+ addr[3], addr[4], addr[5]);
+
+ result = pfe_eth_get_hash(addr);
+ if (result >= EMAC_HASH_REG_BITS) {
+ break;
+ } else {
+ if (result < 32)
+ hash_addr.bottom |= (1 << result);
+ else
+ hash_addr.top |= (1 << (result - 32));
+ }
+
+
+ }
+ }
+
+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
+ if(netdev_mc_count(dev))
+ gemac_enable_multicast(priv->EMAC_baseaddr);
+ else
+ gemac_disable_multicast(priv->EMAC_baseaddr);
+ }
+
+ if(netdev_uc_count(dev) >= MAX_UC_SPEC_ADDR_REG)
+ gemac_enable_unicast(priv->EMAC_baseaddr);
+ else
+ {
+ /* Check if there are any specific address HW registers that need
+ * to be flushed
+ * */
+ for(uc_count = netdev_uc_count(dev); uc_count < MAX_UC_SPEC_ADDR_REG; uc_count++)
+ gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
+
+ gemac_disable_unicast(priv->EMAC_baseaddr);
+ }
+
+ if (dev->flags & IFF_LOOPBACK) {
+ gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
+ }
+
+ return;
+}
+
+/** pfe_eth_set_features
+ */
+static int pfe_eth_set_features(struct net_device *dev, u32 features)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(dev);
+ int rc = 0;
+
+ if (features & NETIF_F_RXCSUM)
+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
+ else
+ gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
+
+ if (features & NETIF_F_LRO) {
+ if (pfe_ctrl_set_lro(1) < 0)
+ rc = -1;
+ } else {
+ if (pfe_ctrl_set_lro(0) < 0)
+ rc = -1;
+ }
+
+ return rc;
+}
+
+/** pfe_eth_fix_features
+ */
+static u32 pfe_eth_fix_features(struct net_device *dev, u32 features)
+{
+ if (dev->mtu > 1500)
+ {
+ if (features & (NETIF_F_TSO))
+ {
+ features &= ~(NETIF_F_TSO);
+ netdev_err(dev, "TSO cannot be enabled when the MTU is larger than 1500. Please set the MTU to 1500 or lower first.\n");
+ }
+ }
+
+ return features;
+}
+
+/** pfe_eth_tx_timeout
+ */
+void pfe_eth_tx_timeout(unsigned long data )
+{
+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)data;
+
+ netif_info(priv, timer, priv->dev, "%s\n", __func__);
+
+ pfe_eth_flush_tx(priv, 0);
+
+ priv->tx_timer.expires = jiffies + ( COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ )/1000;
+ add_timer(&priv->tx_timer);
+}
+
+/** pfe_eth_fast_tx_timeout
+ */
+static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
+{
+ struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct pfe_eth_fast_timer, timer);
+ struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base, struct pfe_eth_priv_s, fast_tx_timeout);
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->dev, fast_tx_timeout->queuenum);
+
+ if(netif_tx_queue_stopped(tx_queue)) {
+#ifdef PFE_ETH_TX_STATS
+ priv->was_stopped[fast_tx_timeout->queuenum] = 1;
+#endif
+ netif_tx_wake_queue(tx_queue);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+/** pfe_eth_fast_tx_timeout_init
+ */
+static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
+{
+ int i;
+ for (i = 0; i < EMAC_TXQ_CNT; i++) {
+ priv->fast_tx_timeout[i].queuenum = i;
+ hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->fast_tx_timeout[i].timer.function = pfe_eth_fast_tx_timeout;
+ priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
+ }
+}
+
+static struct sk_buff *pfe_eth_rx_skb(struct net_device *dev, struct pfe_eth_priv_s *priv, unsigned int qno)
+{
+ void *buf_addr;
+ unsigned int rx_ctrl;
+ unsigned int desc_ctrl = 0;
+ struct hif_ipsec_hdr *ipsec_hdr;
+ struct sk_buff *skb;
+ struct sk_buff *skb_frag, *skb_frag_last = NULL;
+ int length = 0, offset;
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+ struct timespec ktime;
+#endif
+
+ skb = priv->skb_inflight[qno];
+
+ if (skb && (skb_frag_last = skb_shinfo(skb)->frag_list)) {
+ while (skb_frag_last->next)
+ skb_frag_last = skb_frag_last->next;
+ }
+
+ while (!(desc_ctrl & CL_DESC_LAST)) {
+
+ buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length, &offset, &rx_ctrl, &desc_ctrl, (void **)&ipsec_hdr);
+ if (!buf_addr)
+ goto incomplete;
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_DESC_COUNT]++;
+#endif
+
+ /* First frag */
+ if (desc_ctrl & CL_DESC_FIRST) {
+#if defined(CONFIG_PLATFORM_EMULATION) || defined(CONFIG_PLATFORM_PCI)
+ skb = dev_alloc_skb(PFE_BUF_SIZE);
+ if (unlikely(!skb)) {
+ goto pkt_drop;
+ }
+
+ skb_copy_to_linear_data(skb, buf_addr, length + offset);
+ kfree(buf_addr);
+#else
+#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB)
+ skb = alloc_skb(length + offset + 32, GFP_ATOMIC);
+#else
+ skb = alloc_skb_header(PFE_BUF_SIZE, buf_addr, GFP_ATOMIC);
+#endif
+ if (unlikely(!skb)) {
+ goto pkt_drop;
+ }
+#endif
+ skb_reserve(skb, offset);
+#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB)
+ __memcpy(skb->data, buf_addr + offset, length);
+ kfree(buf_addr);
+#endif
+ skb_put(skb, length);
+ skb->dev = dev;
+
+ if ((dev->features & NETIF_F_RXCSUM) && (rx_ctrl & HIF_CTRL_RX_CHECKSUMMED))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+ if (rx_ctrl & HIF_CTRL_RX_IPSEC_IN) {
+ if (ipsec_hdr) {
+ struct sec_path *sp;
+ struct xfrm_state *x;
+ unsigned short *sah = &ipsec_hdr->sa_handle[0];
+ int i = 0;
+
+ sp = secpath_dup(skb->sp);
+
+ if (!sp)
+ {
+ kfree_skb(skb);
+ goto pkt_drop;
+ }
+
+ skb->sp = sp;
+
+ /* at maximum 2 SA are expected */
+ while (i <= 1)
+ {
+ if(!*sah)
+ break;
+
+ if ((x = xfrm_state_lookup_byhandle(dev_net(dev), ntohs(*sah))) == NULL)
+ {
+ kfree_skb(skb);
+ goto pkt_drop;
+ }
+
+ sp->xvec[i] = x;
+
+ if (!x->curlft.use_time)
+ {
+ ktime = current_kernel_time();
+ x->curlft.use_time = (unsigned long)ktime.tv_sec;
+ }
+
+ i++; sah++;
+ }
+
+ sp->len = i;
+ }
+ }
+#endif
+ } else {
+
+ /* Next frags */
+ if (unlikely(!skb)) {
+ printk(KERN_ERR "%s: NULL skb_inflight\n", __func__);
+ goto pkt_drop;
+ }
+
+#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB)
+ skb_frag = alloc_skb(length + offset + 32, GFP_ATOMIC);
+#else
+ skb_frag = alloc_skb_header(PFE_BUF_SIZE, buf_addr, GFP_ATOMIC);
+#endif
+ if (unlikely(!skb_frag)) {
+ kfree(buf_addr);
+ goto pkt_drop;
+ }
+
+ skb_reserve(skb_frag, offset);
+#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB)
+ __memcpy(skb_frag->data, buf_addr + offset, length);
+ kfree(buf_addr);
+#endif
+ skb_put(skb_frag, length);
+
+ skb_frag->dev = dev;
+
+ if (skb_shinfo(skb)->frag_list)
+ skb_frag_last->next = skb_frag;
+ else
+ skb_shinfo(skb)->frag_list = skb_frag;
+
+ skb->truesize += skb_frag->truesize;
+ skb->data_len += length;
+ skb->len += length;
+ skb_frag_last = skb_frag;
+ }
+ }
+
+ priv->skb_inflight[qno] = NULL;
+ return skb;
+
+incomplete:
+ priv->skb_inflight[qno] = skb;
+ return NULL;
+
+pkt_drop:
+ priv->skb_inflight[qno] = NULL;
+
+ if (skb) {
+ kfree_skb(skb);
+ } else {
+ kfree(buf_addr);
+ }
+
+ priv->stats.rx_errors++;
+
+ return NULL;
+}
+
+
+static struct sk_buff *pfe_eth_rx_page(struct net_device *dev, struct pfe_eth_priv_s *priv, unsigned int qno)
+{
+ struct page *p;
+ void *buf_addr;
+ unsigned int rx_ctrl;
+ unsigned int desc_ctrl;
+ struct sk_buff *skb;
+ int length, offset, data_offset;
+ struct hif_lro_hdr *lro_hdr;
+ u32 pe_id;
+
+ while (1) {
+ buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length, &offset, &rx_ctrl, &desc_ctrl, (void **)&lro_hdr);
+ if (!buf_addr)
+ goto empty;
+
+ if (qno == 2)
+ pe_id = (rx_ctrl >> HIF_CTRL_RX_PE_ID_OFST) & 0xf;
+ else
+ pe_id = 0;
+
+ skb = priv->skb_inflight[qno + pe_id];
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_DESC_COUNT]++;
+#endif
+
+ /* First frag */
+ if ((desc_ctrl & CL_DESC_FIRST) && !skb) {
+ p = virt_to_page(buf_addr);
+
+ skb = dev_alloc_skb(MAX_HDR_SIZE + PFE_PKT_HEADROOM + 2);
+ if (unlikely(!skb)) {
+ goto pkt_drop;
+ }
+
+ skb_reserve(skb, PFE_PKT_HEADROOM + 2);
+
+ if (lro_hdr) {
+ data_offset = lro_hdr->data_offset;
+ if (lro_hdr->mss)
+ skb_shinfo(skb)->gso_size = lro_hdr->mss;
+
+// printk(KERN_INFO "mss: %d, offset: %d, data_offset: %d, len: %d\n", lro_hdr->mss, offset, lro_hdr->data_offset, length);
+ } else {
+ data_offset = MAX_HDR_SIZE;
+ }
+
+ /* We don't need the fragment if the whole packet */
+ /* has been copied in the first linear skb */
+ if (length <= data_offset) {
+ __memcpy(skb->data, buf_addr + offset, length);
+ skb_put(skb, length);
+ free_page((unsigned long)buf_addr);
+ } else {
+ __memcpy(skb->data, buf_addr + offset, data_offset);
+ skb_put(skb, data_offset);
+ skb_add_rx_frag(skb, 0, p, offset + data_offset, length - data_offset);
+ }
+
+ skb->dev = dev;
+
+ if ((dev->features & NETIF_F_RXCSUM) && (rx_ctrl & HIF_CTRL_RX_CHECKSUMMED))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ } else {
+ /* Next frags */
+ if (unlikely(!skb)) {
+ printk(KERN_ERR "%s: NULL skb_inflight\n", __func__);
+ goto pkt_drop;
+ }
+
+ p = virt_to_page(buf_addr);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, p, offset, length);
+ }
+
+ /* Last buffer in a software chain */
+ if ((desc_ctrl & CL_DESC_LAST) && !(rx_ctrl & HIF_CTRL_RX_CONTINUED))
+ break;
+
+ /* Keep track of skb for this queue/pe */
+ priv->skb_inflight[qno + pe_id] = skb;
+ }
+
+ priv->skb_inflight[qno + pe_id] = NULL;
+
+ return skb;
+
+pkt_drop:
+ priv->skb_inflight[qno + pe_id] = NULL;
+
+ if (skb) {
+ kfree_skb(skb);
+ } else {
+ free_page((unsigned long)buf_addr);
+ }
+
+ priv->stats.rx_errors++;
+
+ return NULL;
+
+empty:
+ return NULL;
+}
+
+/** pfe_eth_poll
+ */
+static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi, unsigned int qno, int budget)
+{
+ struct net_device *dev = priv->dev;
+ struct sk_buff *skb;
+ int work_done = 0;
+ unsigned int len;
+
+ netif_info(priv, intr, priv->dev, "%s\n", __func__);
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_POLL_COUNT]++;
+#endif
+
+ do {
+ if (page_mode)
+ skb = pfe_eth_rx_page(dev, priv, qno);
+ else
+ skb = pfe_eth_rx_skb(dev, priv, qno);
+
+ if (!skb)
+ break;
+
+ len = skb->len;
+
+ /* Packet will be processed */
+ skb->protocol = eth_type_trans(skb, dev);
+
+#ifdef PFE_ETH_LRO_STATS
+ priv->lro_len_counters[((u32)skb->len >> 11) & (LRO_LEN_COUNT_MAX - 1)]++;
+ priv->lro_nb_counters[skb_shinfo(skb)->nr_frags & (LRO_NB_COUNT_MAX - 1)]++;
+#endif
+
+ netif_receive_skb(skb);
+
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += len;
+
+ dev->last_rx = jiffies;
+
+ work_done++;
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_PACKET_COUNT]++;
+#endif
+
+ } while (work_done < budget);
+
+ /* If no Rx receive nor cleanup work was done, exit polling mode.
+ * No more netif_running(dev) check is required here , as this is checked in
+ * net/core/dev.c ( 2.6.33.5 kernel specific).
+ */
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND, qno);
+ }
+#ifdef PFE_ETH_NAPI_STATS
+ else
+ priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
+#endif
+
+ return work_done;
+}
+
+/** pfe_eth_lro_poll
+ */
+static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
+{
+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s, lro_napi);
+
+ netif_info(priv, intr, priv->dev, "%s\n", __func__);
+
+ return pfe_eth_poll(priv, napi, 2, budget);
+}
+
+
+/** pfe_eth_low_poll
+ */
+static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
+{
+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s, low_napi);
+
+ netif_info(priv, intr, priv->dev, "%s\n", __func__);
+
+ return pfe_eth_poll(priv, napi, 1, budget);
+}
+
+/** pfe_eth_high_poll
+ */
+static int pfe_eth_high_poll(struct napi_struct *napi, int budget )
+{
+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s, high_napi);
+
+ netif_info(priv, intr, priv->dev, "%s\n", __func__);
+
+ return pfe_eth_poll(priv, napi, 0, budget);
+}
+
+static const struct net_device_ops pfe_netdev_ops = {
+ .ndo_open = pfe_eth_open,
+ .ndo_stop = pfe_eth_close,
+ .ndo_start_xmit = pfe_eth_send_packet,
+ .ndo_select_queue = pfe_eth_select_queue,
+ .ndo_get_stats = pfe_eth_get_stats,
+ .ndo_change_mtu = pfe_eth_change_mtu,
+ .ndo_set_mac_address = pfe_eth_set_mac_address,
+ .ndo_set_rx_mode = pfe_eth_set_multi,
+ .ndo_set_features = pfe_eth_set_features,
+ .ndo_fix_features = pfe_eth_fix_features,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+
+/** pfe_eth_init_one
+ */
+
+static int pfe_eth_init_one( struct pfe *pfe, int id )
+{
+ struct net_device *dev = NULL;
+ struct pfe_eth_priv_s *priv = NULL;
+ struct comcerto_eth_platform_data *einfo;
+ struct comcerto_mdio_platform_data *minfo;
+ struct comcerto_pfe_platform_data *pfe_info;
+ int err;
+
+ /* Extract pltform data */
+#if defined(CONFIG_PLATFORM_EMULATION) || defined(CONFIG_PLATFORM_PCI)
+ pfe_info = (struct comcerto_pfe_platform_data *) &comcerto_pfe_pdata;
+#else
+ pfe_info = (struct comcerto_pfe_platform_data *) pfe->dev->platform_data;
+#endif
+ if (!pfe_info) {
+ printk(KERN_ERR "%s: pfe missing additional platform data\n", __func__);
+ err = -ENODEV;
+ goto err0;
+ }
+
+ einfo = (struct comcerto_eth_platform_data *) pfe_info->comcerto_eth_pdata;
+
+ /* einfo never be NULL, but no harm in having this check */
+ if (!einfo) {
+ printk(KERN_ERR "%s: pfe missing additional gemacs platform data\n", __func__);
+ err = -ENODEV;
+ goto err0;
+ }
+
+ minfo = (struct comcerto_mdio_platform_data *) pfe_info->comcerto_mdio_pdata;
+
+ /* einfo never be NULL, but no harm in having this check */
+ if (!minfo) {
+ printk(KERN_ERR "%s: pfe missing additional mdios platform data\n", __func__);
+ err = -ENODEV;
+ goto err0;
+ }
+
+ /*
+ * FIXME: Need to check some flag in "einfo" to know whether
+ * GEMAC is enabled Or not.
+ */
+
+ /* Create an ethernet device instance */
+ dev = alloc_etherdev_mq(sizeof (*priv), EMAC_TXQ_CNT);
+
+ if (!dev) {
+ printk(KERN_ERR "%s: gemac %d device allocation failed\n", __func__, einfo[id].gem_id);
+ err = -ENOMEM;
+ goto err0;
+ }
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+ priv->id = einfo[id].gem_id;
+ priv->pfe = pfe;
+ /* get gemac tx clock */
+ priv->gemtx_clk = clk_get(NULL, "gemtx");
+
+ if (IS_ERR(priv->gemtx_clk)) {
+ printk(KERN_ERR "%s: Unable to get the clock for gemac %d\n", __func__, priv->id);
+ err = -ENODEV;
+ goto err1;
+ }
+
+ pfe->eth.eth_priv[id] = priv;
+
+ /* Set the info in the priv to the current info */
+ priv->einfo = &einfo[id];
+ priv->EMAC_baseaddr = cbus_emac_base[id];
+ priv->GPI_baseaddr = cbus_gpi_base[id];
+
+ /* FIXME : For now TMU queue numbers hardcoded, later should be taken from pfe.h */
+#define HIF_GEMAC_TMUQ_BASE 6
+ priv->low_tmuQ = HIF_GEMAC_TMUQ_BASE + (id * 2);
+ priv->high_tmuQ = priv->low_tmuQ + 1;
+
+ spin_lock_init(&priv->lock);
+ priv->tx_timer.data = (unsigned long)priv;
+ priv->tx_timer.function = pfe_eth_tx_timeout;
+ priv->tx_timer.expires = jiffies + ( COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ )/1000;
+ init_timer(&priv->tx_timer);
+ priv->cpu_id = -1;
+
+ pfe_eth_fast_tx_timeout_init(priv);
+
+ /* Copy the station address into the dev structure, */
+ memcpy(dev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
+
+ err = dev_alloc_name(dev, einfo[id].name);
+
+ if (err < 0) {
+ netdev_err(dev, "%s: dev_alloc_name(%s) failed\n", __func__, einfo[id].name);
+ err = -EINVAL;
+ goto err2;
+ }
+
+ /* Initialize mdio */
+ if (minfo[id].enabled) {
+
+ if ((err = pfe_eth_mdio_init(priv, &minfo[id]))) {
+ netdev_err(dev, "%s: pfe_eth_mdio_init() failed\n", __func__);
+ goto err2;
+ }
+ }
+
+ dev->mtu = 1500;
+
+ /* supported features */
+ dev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO;
+
+ /* enabled by default */
+ dev->features = dev->hw_features;
+
+ if (lro_mode) {
+ dev->hw_features |= NETIF_F_LRO;
+ dev->features |= NETIF_F_LRO;
+ pfe_ctrl_set_lro(1);
+ }
+
+ dev->netdev_ops = &pfe_netdev_ops;
+
+ dev->ethtool_ops = &pfe_ethtool_ops;
+
+ /* Enable basic messages by default */
+ priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK | NETIF_MSG_PROBE;
+
+ err = register_netdev(dev);
+
+ if (err) {
+ netdev_err(dev, "register_netdev() failed\n");
+ goto err3;
+ }
+
+ netif_napi_add(dev, &priv->low_napi, pfe_eth_low_poll, HIF_RX_POLL_WEIGHT - 16);
+ netif_napi_add(dev, &priv->high_napi, pfe_eth_high_poll, HIF_RX_POLL_WEIGHT - 16);
+ netif_napi_add(dev, &priv->lro_napi, pfe_eth_lro_poll, HIF_RX_POLL_WEIGHT - 16);
+
+ /* Create all the sysfs files */
+ if(pfe_eth_sysfs_init(dev))
+ goto err4;
+
+ netif_info(priv, probe, dev, "%s: created interface, baseaddr: %p\n", __func__, priv->EMAC_baseaddr);
+
+ return 0;
+err4:
+ unregister_netdev(dev);
+err3:
+ pfe_eth_mdio_exit(priv->mii_bus);
+err2:
+ clk_put(priv->gemtx_clk);
+err1:
+ free_netdev(priv->dev);
+
+err0:
+ return err;
+}
+
+/** pfe_eth_init
+ */
+int pfe_eth_init(struct pfe *pfe)
+{
+ int ii = 0;
+ int err;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ cbus_emac_base[0] = EMAC1_BASE_ADDR;
+ cbus_emac_base[1] = EMAC2_BASE_ADDR;
+ cbus_emac_base[2] = EMAC3_BASE_ADDR;
+
+ cbus_gpi_base[0] = EGPI1_BASE_ADDR;
+ cbus_gpi_base[1] = EGPI2_BASE_ADDR;
+ cbus_gpi_base[2] = EGPI3_BASE_ADDR;
+
+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
+ if ((err = pfe_eth_init_one(pfe, ii)))
+ goto err0;
+ }
+
+ return 0;
+
+err0:
+ while(ii--){
+ pfe_eth_exit_one( pfe->eth.eth_priv[ii] );
+ }
+
+ /* Register three network devices in the kernel */
+ return err;
+}
+
+/** pfe_eth_exit_one
+ */
+static void pfe_eth_exit_one( struct pfe_eth_priv_s *priv )
+{
+ netif_info(priv, probe, priv->dev, "%s\n", __func__);
+
+ pfe_eth_sysfs_exit(priv->dev);
+
+ clk_put(priv->gemtx_clk);
+
+ unregister_netdev(priv->dev);
+
+ pfe_eth_mdio_exit(priv->mii_bus);
+
+
+ free_netdev(priv->dev);
+}
+
+/** pfe_eth_exit
+ */
+void pfe_eth_exit(struct pfe *pfe)
+{
+ int ii;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ for(ii = 0; ii < NUM_GEMAC_SUPPORT; ii++ ) {
+ /*
+ * FIXME: Need to check some flag in "einfo" to know whether
+ * GEMAC is enabled Or not.
+ */
+
+ pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
+ }
+}
+
diff --git a/pfe_ctrl/pfe_eth.h b/pfe_ctrl/pfe_eth.h
new file mode 100644
index 0000000..76d8e44
--- /dev/null
+++ b/pfe_ctrl/pfe_eth.h
@@ -0,0 +1,305 @@
+#ifndef _PFE_ETH_H_
+#define _PFE_ETH_H_
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+
+#define PFE_ETH_TSO_STATS
+#define PFE_ETH_LRO_STATS
+#define PFE_ETH_NAPI_STATS
+#define PFE_ETH_TX_STATS
+
+#define LRO_LEN_COUNT_MAX 32
+#define LRO_NB_COUNT_MAX 32
+
+#if defined(CONFIG_PLATFORM_PCI) || defined(CONFIG_PLATFORM_EMULATION)
+
+#define CONFIG_COMCERTO_GEMAC 1
+
+#define CONFIG_COMCERTO_USE_MII 1
+#define CONFIG_COMCERTO_USE_RMII 2
+#define CONFIG_COMCERTO_USE_GMII 4
+#define CONFIG_COMCERTO_USE_RGMII 8
+#define CONFIG_COMCERTO_USE_SGMII 16
+
+#define GEMAC_SW_CONF (1 << 8) | (1 << 11) // GEMAC configured by SW
+#define GEMAC_PHY_CONF 0 // GEMAC configured by phy lines (not for MII/GMII)
+#define GEMAC_SW_FULL_DUPLEX (1 << 9)
+#define GEMAC_SW_SPEED_10M (0 << 12)
+#define GEMAC_SW_SPEED_100M (1 << 12)
+#define GEMAC_SW_SPEED_1G (2 << 12)
+
+#define GEMAC_NO_PHY (1 << 0) // set if no phy connected to MAC (ex ethernet switch). In this case use MAC fixed configuration
+#define GEMAC_PHY_RGMII_ADD_DELAY (1 << 1)
+
+/* gemac to interface name assignment */
+#define GEMAC0_ITF_NAME "eth5"
+#define GEMAC1_ITF_NAME "eth6"
+#define GEMAC2_ITF_NAME "eth7"
+
+#define GEMAC0_MAC { 0x00, 0xED, 0xCD, 0xEF, 0xAA, 0xCC }
+#define GEMAC1_MAC { 0x00, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E }
+
+struct comcerto_eth_platform_data {
+ /* device specific information */
+ u32 device_flags;
+ char name[16];
+
+
+ /* board specific information */
+ u32 mii_config;
+ u32 gemac_mode;
+ u32 phy_flags;
+ u32 gem_id;
+ u32 bus_id;
+ u32 phy_id;
+ u8 *mac_addr;
+};
+
+struct comcerto_mdio_platform_data {
+ int enabled;
+ int irq[32];
+ u32 phy_mask;
+ int mdc_div;
+};
+
+struct comcerto_pfe_platform_data
+{
+ struct comcerto_eth_platform_data comcerto_eth_pdata[3];
+ struct comcerto_mdio_platform_data comcerto_mdio_pdata[3];
+};
+
+static struct comcerto_pfe_platform_data comcerto_pfe_pdata = {
+ .comcerto_eth_pdata[0] = {
+ .name = GEMAC0_ITF_NAME,
+ .device_flags = CONFIG_COMCERTO_GEMAC,
+ .mii_config = CONFIG_COMCERTO_USE_MII,
+ .gemac_mode = GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_100M,
+#if defined(CONFIG_PLATFORM_EMULATION) || defined(CONFIG_PLATFORM_PCI)
+ .phy_flags = GEMAC_NO_PHY,
+#else
+ .phy_flags = GEMAC_PHY_RGMII_ADD_DELAY,
+#endif
+ .bus_id = 0,
+ .phy_id = 0,
+ .gem_id = 0,
+ .mac_addr = (u8[])GEMAC0_MAC,
+ },
+
+ .comcerto_eth_pdata[1] = {
+ .name = GEMAC1_ITF_NAME,
+ .device_flags = CONFIG_COMCERTO_GEMAC,
+ .mii_config = CONFIG_COMCERTO_USE_RGMII,
+ .gemac_mode = GEMAC_SW_CONF | GEMAC_SW_FULL_DUPLEX | GEMAC_SW_SPEED_1G,
+ .phy_flags = GEMAC_NO_PHY,
+ .gem_id = 1,
+ .mac_addr = (u8[])GEMAC1_MAC,
+ },
+
+ .comcerto_eth_pdata[2] = {
+ .name = GEMAC2_ITF_NAME,
+ },
+
+ .comcerto_mdio_pdata[0] = {
+ .enabled = 1,
+ .phy_mask = 0xFFFFFFFE,
+ .mdc_div = 96,
+ .irq = {
+ [0] = PHY_POLL,
+ },
+ },
+};
+#endif
+
+#define NUM_GEMAC_SUPPORT 3
+#define DRV_NAME "c2000-geth"
+#define COMCERTO_INFOSTR_LEN 32
+#define COMCERTO_TX_RECOVERY_TIMEOUT_MS 500
+#define COMCERTO_TX_FAST_RECOVERY_TIMEOUT_MS 3
+
+#define EMAC_TXQ_CNT 16
+#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
+
+#define JUMBO_FRAME_SIZE 10258
+/**
+ * Client Tx queue threshold, for txQ flush condition.
+ * It must be smaller than the queue size (in case we ever change it in the future).
+ */
+#define HIF_CL_TX_FLUSH_MARK 32
+
+/**
+ * Max number of TX resources (HIF descriptors or skbs) that will be released
+ * in a single go during batch recycling.
+ * Should be lower than the flush mark so the SW can provide the HW with a
+ * continuous stream of packets instead of bursts.
+ */
+#define TX_FREE_MAX_COUNT 16
+#define EMAC_RXQ_CNT 3
+#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT /* make sure clients can receive a full burst of packets */
+#define EMAC_RMON_TXBYTES_POS 0x00
+#define EMAC_RMON_RXBYTES_POS 0x14
+
+#define EMAC_QUEUENUM_MASK (EMAC_TXQ_CNT - 1)
+#define EMAC_MDIO_TIMEOUT 1000
+#define MAX_UC_SPEC_ADDR_REG 31
+
+#define ETH_HIF_NODMA_MAP 1
+
+#ifdef ETH_HIF_NODMA_MAP
+struct hif_frag_dma_map_s {
+ dma_addr_t data;
+ int len;
+};
+
+struct hif_frag_info_s {
+ struct hif_frag_dma_map_s *map;
+ int frag_count;
+};
+#endif
+
+/* The set of statistics registers implemented in the Cadence MAC.
+ * The statistics registers implemented are a subset of all the statistics
+ * available, but contains all the compulsory ones.
+ * For full descriptions on the registers, refer to the Cadence MAC programmers
+ * guide or the IEEE 802.3 specifications.
+ */
+struct gemac_stats{
+ u32 octets_tx_bot; /* Lower 32-bits for number of octets tx'd */
+ u32 octets_tx_top; /* Upper 16-bits for number of octets tx'd */
+ u32 frames_tx; /* Number of frames transmitted OK */
+ u32 broadcast_tx; /* Number of broadcast frames transmitted */
+ u32 multicast_tx; /* Number of multicast frames transmitted */
+ u32 pause_tx; /* Number of pause frames transmitted. */
+ u32 frame64_tx; /* Number of 64byte frames transmitted */
+ u32 frame65_127_tx; /* Number of 65-127 byte frames transmitted */
+ u32 frame128_255_tx; /* Number of 128-255 byte frames transmitted */
+ u32 frame256_511_tx; /* Number of 256-511 byte frames transmitted */
+ u32 frame512_1023_tx; /* Number of 512-1023 byte frames transmitted */
+ u32 frame1024_1518_tx; /* Number of 1024-1518 byte frames transmitted*/
+ u32 frame1519_tx; /* Number of frames greater than 1518 bytes tx*/
+ u32 tx_urun; /* Transmit underrun errors due to DMA */
+ u32 single_col; /* Number of single collision frames */
+ u32 multi_col; /* Number of multi collision frames */
+ u32 excess_col; /* Number of excessive collision frames. */
+ u32 late_col; /* Collisions occuring after slot time */
+ u32 def_tx; /* Frames deferred due to crs */
+ u32 crs_errors; /* Errors caused by crs not being asserted. */
+ u32 octets_rx_bot; /* Lower 32-bits for number of octets rx'd */
+ u32 octets_rx_top; /* Upper 16-bits for number of octets rx'd */
+ u32 frames_rx; /* Number of frames received OK */
+ u32 broadcast_rx; /* Number of broadcast frames received */
+ u32 multicast_rx; /* Number of multicast frames received */
+ u32 pause_rx; /* Number of pause frames received. */
+ u32 frame64_rx; /* Number of 64byte frames received */
+ u32 frame65_127_rx; /* Number of 65-127 byte frames received */
+ u32 frame128_255_rx; /* Number of 128-255 byte frames received */
+ u32 frame256_511_rx; /* Number of 256-511 byte frames received */
+ u32 frame512_1023_rx; /* Number of 512-1023 byte frames received */
+ u32 frame1024_1518_rx; /* Number of 1024-1518 byte frames received*/
+ u32 frame1519_rx; /* Number of frames greater than 1518 bytes rx*/
+ u32 usize_frames; /* Frames received less than min of 64 bytes */
+ u32 excess_length; /* Number of excessive length frames rx */
+ u32 jabbers; /* Excessive length + crc or align errors. */
+ u32 fcs_errors; /* Number of frames received with crc errors */
+ u32 length_check_errors;/* Number of frames with incorrect length */
+ u32 rx_symbol_errors; /* Number of times rx_er asserted during rx */
+ u32 align_errors; /* Frames received without integer no. bytes */
+ u32 rx_res_errors; /* Number of times buffers ran out during rx */
+ u32 rx_orun; /* Receive overrun errors due to DMA */
+ u32 ip_cksum; /* IP header checksum errors */
+ u32 tcp_cksum; /* TCP checksum errors */
+ u32 udp_cksum; /* UDP checksum errors */
+};
+
+#define EMAC_REG_SPACE sizeof(struct gemac_reg)
+#define EMAC_RMON_LEN (sizeof(struct gemac_stats)/sizeof(u32))
+
+
+struct pfe_eth_fast_timer {
+ int queuenum;
+ struct hrtimer timer;
+ void * base;
+};
+
+#include "pfe_tso.h"
+typedef struct pfe_eth_priv_s
+{
+ struct pfe *pfe;
+ struct hif_client_s client;
+ struct napi_struct lro_napi;
+ struct napi_struct low_napi;
+ struct napi_struct high_napi;
+ int low_tmuQ;
+ int high_tmuQ;
+ struct net_device_stats stats;
+ struct net_device *dev;
+ int id;
+ int promisc;
+ unsigned int msg_enable;
+
+ spinlock_t lock;
+ unsigned int event_status;
+ int irq;
+ void* EMAC_baseaddr;
+ void* GPI_baseaddr;
+ /* PHY stuff */
+ struct phy_device *phydev;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+ /* mdio info */
+ int mdc_div;
+ struct mii_bus *mii_bus;
+ /* gemac tc clock */
+ struct clk *gemtx_clk;
+
+ int default_priority;
+ struct timer_list tx_timer;
+ struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
+ int cpu_id;
+
+#ifdef ETH_HIF_NODMA_MAP
+ /* This array is used to store the dma mapping for skb fragments */
+ struct hif_frag_info_s dma_map_array[EMAC_TXQ_CNT][EMAC_TXQ_DEPTH];
+#endif
+ struct comcerto_eth_platform_data *einfo;
+ struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
+
+#ifdef PFE_ETH_LRO_STATS
+ unsigned int lro_len_counters[LRO_LEN_COUNT_MAX];
+ unsigned int lro_nb_counters[LRO_NB_COUNT_MAX]; //TODO change to exact max number when RX scatter done
+#endif
+
+ struct tso_cb_s tso;
+
+#ifdef PFE_ETH_TX_STATS
+ unsigned int stop_queue_total[EMAC_TXQ_CNT];
+ unsigned int stop_queue_hif[EMAC_TXQ_CNT];
+ unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
+ unsigned int stop_queue_credit[EMAC_TXQ_CNT];
+ unsigned int clean_fail[EMAC_TXQ_CNT];
+ unsigned int was_stopped[EMAC_TXQ_CNT];
+#endif
+
+#ifdef PFE_ETH_NAPI_STATS
+ unsigned int napi_counters[NAPI_MAX_COUNT];
+#endif
+
+}pfe_eth_priv_t;
+
+struct pfe_eth {
+ struct pfe_eth_priv_s *eth_priv[3];
+};
+
+int pfe_eth_init(struct pfe *pfe);
+void pfe_eth_exit(struct pfe *pfe);
+
+
+
+#endif /* _PFE_ETH_H_ */
diff --git a/pfe_ctrl/pfe_firmware.c b/pfe_ctrl/pfe_firmware.c
new file mode 100644
index 0000000..2f2146e
--- /dev/null
+++ b/pfe_ctrl/pfe_firmware.c
@@ -0,0 +1,299 @@
+
+
+/** @file
+ * Contains all the functions to handle parsing and loading of PE firmware files.
+ */
+#include <linux/firmware.h>
+
+#include "pfe_mod.h"
+#include "pfe_firmware.h"
+#include "pfe/pfe.h"
+
+static Elf32_Shdr * get_elf_section_header(const struct firmware *fw, const char *section)
+{
+ Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *)fw->data;
+ Elf32_Shdr *shdr, *shdr_shstr;
+ Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
+ Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
+ Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
+ Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
+ Elf32_Off shstr_offset;
+ Elf32_Word sh_name;
+ const char *name;
+ int i;
+
+ /* Section header strings */
+ shdr_shstr = (Elf32_Shdr *)(fw->data + e_shoff + e_shstrndx * e_shentsize);
+ shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
+
+ for (i = 0; i < e_shnum; i++) {
+ shdr = (Elf32_Shdr *)(fw->data + e_shoff + i * e_shentsize);
+
+ sh_name = be32_to_cpu(shdr->sh_name);
+
+ name = (const char *)(fw->data + shstr_offset + sh_name);
+
+ if (!strcmp(name, section))
+ return shdr;
+ }
+
+ printk(KERN_ERR "%s: didn't find section %s\n", __func__, section);
+
+ return NULL;
+}
+
+static unsigned long get_elf_section(const struct firmware *fw, const char *section)
+{
+ Elf32_Shdr *shdr = get_elf_section_header(fw, section);
+
+ if (shdr)
+ return be32_to_cpu(shdr->sh_addr);
+ else
+ return -1;
+}
+
+#if defined(FPP_DIAGNOSTICS)
+static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info *diags_info)
+{
+ Elf32_Shdr *shdr;
+ unsigned long offset, size;
+
+ shdr = get_elf_section_header(fw, ".pfe_diags_str");
+ if (shdr)
+ {
+ offset = be32_to_cpu(shdr->sh_offset);
+ size = be32_to_cpu(shdr->sh_size);
+ diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
+ diags_info->diags_str_size = size;
+ diags_info->diags_str_array = pfe_kmalloc(size, GFP_KERNEL);
+ memcpy(diags_info->diags_str_array, fw->data+offset, size);
+
+ return 0;
+ } else
+ {
+ return -1;
+ }
+}
+#endif
+
+static void pfe_check_version_info(const struct firmware *fw)
+{
+ static char *version = NULL;
+
+ Elf32_Shdr *shdr = get_elf_section_header(fw, ".version");
+
+ if (shdr)
+ {
+ if(!version)
+ {
+ /* this is the first fw we load, use its version string as reference (whatever it is) */
+ version = (char *)(fw->data + be32_to_cpu(shdr->sh_offset));
+
+ printk(KERN_INFO "PFE binary version: %s\n", version);
+ }
+ else
+ {
+ /* already have loaded at least one firmware, check sequence can start now */
+ if(strcmp(version, (char *)(fw->data + be32_to_cpu(shdr->sh_offset))))
+ {
+ printk(KERN_INFO "WARNING: PFE firmware binaries from incompatible version\n");
+ }
+ }
+ }
+ else
+ {
+ /* version cannot be verified, a potential issue that should be reported */
+ printk(KERN_INFO "WARNING: PFE firmware binaries from incompatible version\n");
+ }
+}
+
+/** PFE elf firmware loader.
+* Loads an elf firmware image into a list of PE's (specified using a bitmask)
+*
+* @param pe_mask Mask of PE id's to load firmware to
+* @param fw Pointer to the firmware image
+*
+* @return 0 on sucess, a negative value on error
+*
+*/
+int pfe_load_elf(int pe_mask, const struct firmware *fw)
+{
+ Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *)fw->data;
+ Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
+ Elf32_Shdr *shdr = (Elf32_Shdr *) (fw->data + be32_to_cpu(elf_hdr->e_shoff));
+ int id, section;
+ int rc;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ /* Some sanity checks */
+ if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG))
+ {
+ printk(KERN_ERR "%s: incorrect elf magic number\n", __func__);
+ return -EINVAL;
+ }
+
+ if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32)
+ {
+ printk(KERN_ERR "%s: incorrect elf class(%x)\n", __func__, elf_hdr->e_ident[EI_CLASS]);
+ return -EINVAL;
+ }
+
+ if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB)
+ {
+ printk(KERN_ERR "%s: incorrect elf data(%x)\n", __func__, elf_hdr->e_ident[EI_DATA]);
+ return -EINVAL;
+ }
+
+ if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC)
+ {
+ printk(KERN_ERR "%s: incorrect elf file type(%x)\n", __func__, be16_to_cpu(elf_hdr->e_type));
+ return -EINVAL;
+ }
+
+ for (section = 0; section < sections; section++, shdr++)
+ {
+ if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC | SHF_EXECINSTR)))
+ continue;
+
+ for (id = 0; id < MAX_PE; id++)
+ if (pe_mask & (1 << id))
+ {
+ rc = pe_load_elf_section(id, fw->data, shdr);
+ if (rc < 0)
+ goto err;
+ }
+ }
+
+ pfe_check_version_info(fw);
+
+ return 0;
+
+err:
+ return rc;
+}
+
+
+/** PFE firmware initialization.
+* Loads different firmware files from filesystem.
+* Initializes PE IMEM/DMEM and UTIL-PE DDR
+* Initializes control path symbol addresses (by looking them up in the elf firmware files
+* Takes PE's out of reset
+*
+* @return 0 on sucess, a negative value on error
+*
+*/
+int pfe_firmware_init(struct pfe *pfe)
+{
+ const struct firmware *class_fw, *tmu_fw, *util_fw;
+ int rc = 0;
+#if !defined(CONFIG_UTIL_DISABLED)
+ const char* util_fw_name;
+#endif
+ printk(KERN_INFO "%s\n", __func__);
+
+ if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
+ printk(KERN_ERR "%s: request firmware %s failed\n", __func__, CLASS_FIRMWARE_FILENAME);
+ rc = -ETIMEDOUT;
+ goto err0;
+ }
+
+ if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
+ printk(KERN_ERR "%s: request firmware %s failed\n", __func__, TMU_FIRMWARE_FILENAME);
+ rc = -ETIMEDOUT;
+ goto err1;
+ }
+#if !defined(CONFIG_UTIL_DISABLED)
+ util_fw_name = (system_rev == 0) ? UTIL_REVA0_FIRMWARE_FILENAME : UTIL_FIRMWARE_FILENAME;
+
+ if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
+ printk(KERN_ERR "%s: request firmware %s failed\n", __func__, util_fw_name);
+ rc = -ETIMEDOUT;
+ goto err2;
+ }
+#endif
+ rc = pfe_load_elf(CLASS_MASK, class_fw);
+ if (rc < 0) {
+ printk(KERN_ERR "%s: class firmware load failed\n", __func__);
+ goto err3;
+ }
+
+ pfe->ctrl.class_dmem_sh = get_elf_section(class_fw, ".dmem_sh");
+ pfe->ctrl.class_pe_lmem_sh = get_elf_section(class_fw, ".pe_lmem_sh");
+
+#if defined(FPP_DIAGNOSTICS)
+ rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
+ if (rc < 0) {
+ printk (KERN_WARNING "PFE diags won't be available for class PEs\n");
+ rc = 0;
+ }
+#endif
+
+ printk(KERN_INFO "%s: class firmware loaded %#lx %#lx\n", __func__, pfe->ctrl.class_dmem_sh, pfe->ctrl.class_pe_lmem_sh);
+
+ rc = pfe_load_elf(TMU_MASK, tmu_fw);
+ if (rc < 0) {
+ printk(KERN_ERR "%s: tmu firmware load failed\n", __func__);
+ goto err3;
+ }
+
+ pfe->ctrl.tmu_dmem_sh = get_elf_section(tmu_fw, ".dmem_sh");
+
+ printk(KERN_INFO "%s: tmu firmware loaded %#lx\n", __func__, pfe->ctrl.tmu_dmem_sh);
+
+#if !defined(CONFIG_UTIL_DISABLED)
+ rc = pfe_load_elf(UTIL_MASK, util_fw);
+ if (rc < 0) {
+ printk(KERN_ERR "%s: util firmware load failed\n", __func__);
+ goto err3;
+ }
+
+ pfe->ctrl.util_dmem_sh = get_elf_section(util_fw, ".dmem_sh");
+ pfe->ctrl.util_ddr_sh = get_elf_section(util_fw, ".ddr_sh");
+
+#if defined(FPP_DIAGNOSTICS)
+ rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
+ if (rc < 0) {
+ printk(KERN_WARNING "PFE diags won't be available for util PE\n");
+ rc = 0;
+ }
+#endif
+
+ printk(KERN_INFO "%s: util firmware loaded %#lx\n", __func__, pfe->ctrl.util_dmem_sh);
+
+ util_enable();
+#endif
+
+ tmu_enable(0xf);
+ class_enable();
+
+err3:
+#if !defined(CONFIG_UTIL_DISABLED)
+ release_firmware(util_fw);
+
+err2:
+#endif
+ release_firmware(tmu_fw);
+
+err1:
+ release_firmware(class_fw);
+
+err0:
+ return rc;
+}
+
+/** PFE firmware cleanup
+* Puts PE's in reset
+*
+*
+*/
+void pfe_firmware_exit(struct pfe *pfe)
+{
+ printk(KERN_INFO "%s\n", __func__);
+
+ class_disable();
+ tmu_disable(0xf);
+#if !defined(CONFIG_UTIL_DISABLED)
+ util_disable();
+#endif
+}
diff --git a/pfe_ctrl/pfe_firmware.h b/pfe_ctrl/pfe_firmware.h
new file mode 100644
index 0000000..0d5db73
--- /dev/null
+++ b/pfe_ctrl/pfe_firmware.h
@@ -0,0 +1,17 @@
+#ifndef _PFE_FIRMWARE_H_
+#define _PFE_FIRMWARE_H_
+
+#define CLASS_FIRMWARE_FILENAME "class_c2000.elf"
+#define TMU_FIRMWARE_FILENAME "tmu_c2000.elf"
+#define UTIL_FIRMWARE_FILENAME "util_c2000.elf"
+#define UTIL_REVA0_FIRMWARE_FILENAME "util_c2000_revA0.elf"
+
+#define PFE_FW_CHECK_PASS 0
+#define PFE_FW_CHECK_FAIL 1
+#define NUM_PFE_FW 3
+
+int pfe_firmware_init(struct pfe *pfe);
+void pfe_firmware_exit(struct pfe *pfe);
+
+#endif /* _PFE_FIRMWARE_H_ */
+
diff --git a/pfe_ctrl/pfe_hif.c b/pfe_ctrl/pfe_hif.c
new file mode 100644
index 0000000..03e612f
--- /dev/null
+++ b/pfe_ctrl/pfe_hif.c
@@ -0,0 +1,897 @@
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#else
+#include "platform.h"
+#endif
+
+
+#include "pfe_mod.h"
+
+#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT)
+
+#define inc_cl_idx(idxname) idxname = (idxname+1) & (queue->size-1)
+#define inc_hif_rxidx(idxname) idxname = (idxname+1) & (hif->RxRingSize-1)
+#define inc_hif_txidx(idxname) idxname = (idxname+1) & (hif->TxRingSize-1)
+
+unsigned char napi_first_batch = 0;
+
+static int pfe_hif_alloc_descr(struct pfe_hif *hif)
+{
+#if !defined(CONFIG_PLATFORM_PCI)
+ void *addr;
+ dma_addr_t dma_addr;
+ int err = 0;
+
+ printk(KERN_INFO "%s\n", __func__);
+ addr = dma_alloc_coherent(pfe->dev,
+ HIF_RX_DESC_NT * sizeof(struct hif_desc) + HIF_TX_DESC_NT * sizeof(struct hif_desc),
+ &dma_addr, GFP_KERNEL);
+
+ if (!addr) {
+ printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", __func__);
+ err = -ENOMEM;
+ goto err0;
+ }
+
+ hif->descr_baseaddr_p = dma_addr;
+ hif->descr_baseaddr_v = addr;
+#else
+ hif->descr_baseaddr_p = pfe->ddr_phys_baseaddr + HIF_DESC_BASEADDR;
+ hif->descr_baseaddr_v = pfe->ddr_baseaddr + HIF_DESC_BASEADDR;
+#endif
+ hif->RxRingSize = HIF_RX_DESC_NT;
+ hif->TxRingSize = HIF_TX_DESC_NT;
+
+ addr = dma_alloc_coherent(pfe->dev, HIF_TX_DESC_NT * sizeof(struct hif_tso_hdr),
+ &dma_addr, GFP_KERNEL);
+
+ if (!addr) {
+ printk(KERN_ERR "%s: Could not allocate buffer per-packet tx header!\n", __func__);
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ hif->tso_hdr_p = dma_addr;
+ hif->tso_hdr_v = addr;
+
+ return 0;
+
+err1:
+ dma_free_coherent(pfe->dev,
+ hif->RxRingSize * sizeof(struct hif_desc) + hif->TxRingSize * sizeof(struct hif_desc),
+ hif->descr_baseaddr_v, hif->descr_baseaddr_p);
+
+err0:
+ return err;
+}
+
+static void pfe_hif_free_descr(struct pfe_hif *hif)
+{
+ printk(KERN_INFO "%s\n", __func__);
+#if !defined(CONFIG_PLATFORM_PCI)
+ dma_free_coherent(pfe->dev, hif->TxRingSize * sizeof(struct hif_tso_hdr), hif->tso_hdr_v, hif->tso_hdr_p);
+ dma_free_coherent(pfe->dev,
+ hif->RxRingSize * sizeof(struct hif_desc) + hif->TxRingSize * sizeof(struct hif_desc),
+ hif->descr_baseaddr_v, hif->descr_baseaddr_p);
+#endif
+}
+void pfe_hif_desc_dump(struct pfe_hif *hif)
+{
+ struct hif_desc *desc;
+ unsigned long desc_p;
+ int ii=0;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ desc = hif->RxBase;
+ desc_p = ((u32)desc - (u32)hif->descr_baseaddr_v + hif->descr_baseaddr_p);
+
+ printk("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
+ for (ii = 0; ii < hif->RxRingSize; ii++) {
+ printk(KERN_INFO "status: %08x, ctrl: %08x, data: %08x, next: %x\n",
+ desc->status, desc->ctrl, desc->data, desc->next);
+ desc++;
+ }
+
+ desc = hif->TxBase;
+ desc_p = ((u32)desc - (u32)hif->descr_baseaddr_v + hif->descr_baseaddr_p);
+
+ printk("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
+ for (ii = 0; ii < hif->TxRingSize; ii++) {
+ printk(KERN_INFO "status: %08x, ctrl: %08x, data: %08x, next: %x\n",
+ desc->status, desc->ctrl, desc->data, desc->next);
+ desc++;
+ }
+}
+
+/* pfe_hif_release_buffers
+ *
+ */
+static void pfe_hif_release_buffers(struct pfe_hif *hif)
+{
+ struct hif_desc *desc;
+ int i = 0;
+
+ hif->RxBase = hif->descr_baseaddr_v;
+
+ printk(KERN_INFO "%s\n", __func__);
+ /*Free Rx buffers */
+#if !defined(CONFIG_PLATFORM_PCI)
+ desc = hif->RxBase;
+ for (i = 0; i < hif->RxRingSize; i++) {
+ if (desc->data) {
+ if ((i < hif->shm->rx_buf_pool_cnt) && (hif->shm->rx_buf_pool[i] == NULL)) {
+ dma_unmap_single(hif->dev, desc->data, pfe_pkt_size, DMA_FROM_DEVICE);
+ hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
+ }
+ else {
+ /*TODO This should not happen*/
+ printk(KERN_ERR "%s: buffer pool already full\n", __func__);
+ }
+ }
+
+ desc->data = 0;
+ desc->status = 0;
+ desc->ctrl = 0;
+ desc++;
+ }
+#endif
+}
+
+
+/*
+ * pfe_hif_init_buffers
+ * This function initializes the HIF Rx/Tx ring descriptors and
+ * initialize Rx queue with buffers.
+ */
+static int pfe_hif_init_buffers(struct pfe_hif *hif)
+{
+ struct hif_desc *desc, *first_desc_p;
+ u32 data;
+ int i = 0;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ /* Check enough Rx buffers available in the shared memory */
+ if (hif->shm->rx_buf_pool_cnt < hif->RxRingSize)
+ return -ENOMEM;
+
+ hif->RxBase = hif->descr_baseaddr_v;
+ memset(hif->RxBase, 0, hif->RxRingSize * sizeof(struct hif_desc));
+
+ /*Initialize Rx descriptors */
+ desc = hif->RxBase;
+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
+
+ for (i = 0; i < hif->RxRingSize; i++) {
+ /* Initialize Rx buffers from the shared memory */
+
+#if defined(CONFIG_PLATFORM_PCI)
+ data = pfe->ddr_phys_baseaddr + HIF_RX_PKT_DDR_BASEADDR + i * DDR_BUF_SIZE;
+#else
+ data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i], pfe_pkt_size, DMA_FROM_DEVICE);
+ hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
+ hif->shm->rx_buf_pool[i] = NULL;
+#endif
+ if (likely(dma_mapping_error(hif->dev, data) == 0)) {
+ desc->data = data;
+ } else {
+ printk(KERN_ERR "%s : low on mem\n", __func__);
+
+ goto err;
+ }
+
+ desc->status = 0;
+ wmb();
+ desc->ctrl = BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
+ BD_CTRL_DESC_EN | BD_BUF_LEN(pfe_pkt_size);
+ /* Chain descriptors */
+ desc->next = (u32)(first_desc_p + i + 1);
+ desc++;
+ }
+
+ /* Overwrite last descriptor to chain it to first one*/
+ desc--;
+ desc->next = (u32)first_desc_p;
+
+ /*Initialize Rx buffer descriptor ring base address */
+ writel(hif->descr_baseaddr_p, HIF_RX_BDP_ADDR);
+
+ hif->TxBase = hif->RxBase + hif->RxRingSize;
+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p + hif->RxRingSize;
+ memset(hif->TxBase, 0, hif->TxRingSize * sizeof(struct hif_desc));
+
+ /*Initialize tx descriptors */
+ desc = hif->TxBase;
+
+ for (i = 0; i < hif->TxRingSize; i++) {
+ /* Chain descriptors */
+ desc->next = (u32)(first_desc_p + i + 1);
+#if defined(CONFIG_PLATFORM_PCI)
+ desc->data = pfe->ddr_phys_baseaddr + HIF_TX_PKT_DDR_BASEADDR + i * DDR_BUF_SIZE;
+#endif
+ desc++;
+ }
+
+ /* Overwrite last descriptor to chain it to first one */
+ desc--;
+ desc->next = (u32)first_desc_p;
+ hif->TxAvail = hif->TxRingSize;
+ hif->Txtosend = 0;
+ hif->Txtoclean = 0;
+
+ /*Initialize Tx buffer descriptor ring base address */
+ writel((u32)first_desc_p, HIF_TX_BDP_ADDR);
+
+ return 0;
+
+err:
+ pfe_hif_release_buffers(hif);
+ return -ENOMEM;
+}
+
+/* pfe_hif_client_register
+ *
+ * This function used to register a client driver with the HIF driver.
+ *
+ * Return value:
+ * 0 - on Successful registration
+ */
+static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id, struct hif_client_shm *client_shm)
+{
+ struct hif_client *client = &hif->client[client_id];
+ u32 i, cnt;
+ struct rx_queue_desc *rx_qbase;
+ struct tx_queue_desc *tx_qbase;
+ struct hif_rx_queue *rx_queue;
+ struct hif_tx_queue *tx_queue;
+ int err = 0;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ spin_lock_bh(&hif->tx_lock);
+
+ if (test_bit(client_id, &hif->shm->gClient_status[0])) {
+ printk(KERN_ERR "%s: client %d already registered\n", __func__, client_id);
+ err = -1;
+ goto unlock;
+ }
+
+ memset(client, 0, sizeof(struct hif_client));
+
+ /*Initialize client Rx queues baseaddr, size */
+
+ cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
+ /*Check if client is requesting for more queues than supported */
+ if (cnt > HIF_CLIENT_QUEUES_MAX)
+ cnt = HIF_CLIENT_QUEUES_MAX;
+
+ client->rx_qn = cnt;
+ rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
+ for (i = 0; i < cnt; i++)
+ {
+ rx_queue = &client->rx_q[i];
+ rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
+ rx_queue->size = client_shm->rx_qsize;
+ rx_queue->write_idx = 0;
+ }
+
+ /*Initialize client Tx queues baseaddr, size */
+ cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
+
+ /*Check if client is requesting for more queues than supported */
+ if (cnt > HIF_CLIENT_QUEUES_MAX)
+ cnt = HIF_CLIENT_QUEUES_MAX;
+
+ client->tx_qn = cnt;
+ tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
+ for (i = 0; i < cnt; i++)
+ {
+ tx_queue = &client->tx_q[i];
+ tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
+ tx_queue->size = client_shm->tx_qsize;
+ tx_queue->ack_idx = 0;
+ }
+
+ set_bit(client_id, &hif->shm->gClient_status[0]);
+
+unlock:
+ spin_unlock_bh(&hif->tx_lock);
+
+ return err;
+}
+
+
+/* pfe_hif_client_unregister
+ *
+ * This function used to unregister a client from the HIF driver.
+ *
+ */
+static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
+{
+ printk(KERN_INFO "%s\n", __func__);
+
+ /* Mark client as no longer available (which prevents further packet receive for this client) */
+ spin_lock_bh(&hif->tx_lock);
+
+ if (!test_bit(client_id, &hif->shm->gClient_status[0])) {
+ printk(KERN_ERR "%s: client %d not registered\n", __func__, client_id);
+
+ spin_unlock_bh(&hif->tx_lock);
+ return;
+ }
+
+ clear_bit(client_id, &hif->shm->gClient_status[0]);
+
+ spin_unlock_bh(&hif->tx_lock);
+}
+
+
+/* client_put_rxpacket-
+ * This functions puts the Rx pkt in the given client Rx queue.
+ * It actually swap the Rx pkt in the client Rx descriptor buffer
+ * and returns the free buffer from it.
+ *
+ * If the funtion returns NULL means client Rx queue is full and
+ * packet couldn't send to client queue.
+ */
+static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len, u32 flags, u32 client_ctrl)
+{
+ void *free_pkt = NULL;
+ struct rx_queue_desc *desc = queue->base + queue->write_idx;
+
+ if (desc->ctrl & CL_DESC_OWN) {
+#if defined(CONFIG_PLATFORM_PCI)
+ memcpy(desc->data, pkt, len);
+ free_pkt = PFE_HOST_TO_PCI(pkt);
+ smp_wmb();
+ desc->ctrl = CL_DESC_BUF_LEN(len) | flags;
+ inc_cl_idx(queue->write_idx);
+#else
+ //TODO: move allocations after Rx loop to improve instruction cache locality
+ if (page_mode)
+ free_pkt = (void *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
+ else
+ free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC | GFP_DMA_PFE);
+
+ if (free_pkt) {
+ desc->data = pkt;
+ desc->client_ctrl = client_ctrl;
+ smp_wmb();
+ desc->ctrl = CL_DESC_BUF_LEN(len) | flags;
+ inc_cl_idx(queue->write_idx);
+ free_pkt += pfe_pkt_headroom;
+ }
+#endif
+ }
+
+ return free_pkt;
+}
+
+
+/* pfe_hif_rx_process-
+ * This function does pfe hif rx queue processing.
+ * Dequeue packet from Rx queue and send it to corresponding client queue
+ */
+static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
+{
+ struct hif_desc *desc;
+ struct hif_hdr *pkt_hdr;
+ struct __hif_hdr hif_hdr;
+ void *free_buf;
+ int rtc, len, rx_processed = 0;
+ struct __hif_desc local_desc;
+ int flags;
+ unsigned int desc_p;
+
+ spin_lock_bh(&hif->lock);
+
+ rtc = hif->RxtocleanIndex;
+
+ while (rx_processed < budget)
+ {
+ /*TODO may need to implement rx process budget */
+ desc = hif->RxBase + rtc;
+
+ __memcpy12(&local_desc, desc);
+
+ /* ACK pending Rx interrupt */
+ if (local_desc.ctrl & BD_CTRL_DESC_EN) {
+ writel(HIF_INT_MASK, HIF_INT_SRC);
+
+ if(rx_processed == 0)
+ {
+ if(napi_first_batch == 1)
+ {
+ desc_p = hif->descr_baseaddr_p + ((u32)(desc) -(u32)hif->descr_baseaddr_v);
+ outer_inv_range(desc_p, (desc_p + 16));
+ napi_first_batch = 0;
+ }
+ }
+
+ __memcpy12(&local_desc, desc);
+
+ if (local_desc.ctrl & BD_CTRL_DESC_EN)
+ break;
+ }
+
+ napi_first_batch = 0;
+
+#ifdef HIF_NAPI_STATS
+ hif->napi_counters[NAPI_DESC_COUNT]++;
+#endif
+ len = BD_BUF_LEN(local_desc.ctrl);
+#if defined(CONFIG_PLATFORM_PCI)
+ pkt_hdr = &hif_hdr;
+ memcpy(pkt_hdr, (void *)PFE_PCI_TO_HOST(local_desc.data), sizeof(struct hif_hdr));
+#else
+ dma_unmap_single(hif->dev, local_desc.data, pfe_pkt_size, DMA_FROM_DEVICE);
+
+ pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
+
+ /* Track last HIF header received */
+ if (!hif->started) {
+ hif->started = 1;
+
+ __memcpy8(&hif_hdr, pkt_hdr);
+
+ hif->qno = hif_hdr.hdr.qNo;
+ hif->client_id = hif_hdr.hdr.client_id;
+ hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) | hif_hdr.hdr.client_ctrl;
+ flags = CL_DESC_FIRST;
+
+// printk(KERN_INFO "start of packet: id %d, q %d, len %d, flags %x %x\n", hif->client_id, hif->qno, len, local_desc.ctrl, hif->client_ctrl);
+ }
+ else {
+// printk(KERN_INFO "continuation: id %d, q %d, len %d, flags %x\n", hif->client_id, hif->qno, len, local_desc.ctrl);
+ flags = 0;
+ }
+
+ if (local_desc.ctrl & BD_CTRL_LIFM)
+ flags |= CL_DESC_LAST;
+#endif
+ /* Check for valid client id and still registered */
+ if ((hif->client_id >= HIF_CLIENTS_MAX) || !(test_bit(hif->client_id, &hif->shm->gClient_status[0]))) {
+ if (printk_ratelimit())
+ printk(KERN_ERR "%s: packet with invalid client id %d qNo %d\n", __func__, hif->client_id, hif->qno);
+
+#if defined(CONFIG_PLATFORM_PCI)
+ free_buf = local_desc.data;
+#else
+ free_buf = pkt_hdr;
+#endif
+ goto pkt_drop;
+ }
+
+ /* Check to valid queue number */
+ if (hif->client[hif->client_id].rx_qn <= hif->qno) {
+ printk(KERN_INFO "%s: packet with invalid queue: %d\n", __func__, hif->qno);
+ hif->qno = 0;
+ }
+
+#if defined(CONFIG_PLATFORM_PCI)
+ free_buf = client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
+ (void *)PFE_PCI_TO_HOST(desc->data), len, flags, hif->client_ctrl);
+#else
+ free_buf = client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
+ (void *)pkt_hdr, len, flags, hif->client_ctrl);
+#endif
+
+ hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND, hif->qno);
+
+ if (unlikely(!free_buf)) {
+#ifdef HIF_NAPI_STATS
+ hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
+#endif
+ /* If we want to keep in polling mode to retry later, we need to tell napi that we consumed
+ the full budget or we will hit a livelock scenario. The core code keeps this napi instance
+ at the head of the list and none of the other instances get to run */
+ rx_processed = budget;
+
+ if (flags & CL_DESC_FIRST)
+ hif->started = 0;
+
+ break;
+ }
+
+ pkt_drop:
+#if defined(CONFIG_PLATFORM_PCI)
+ desc->data = (u32)free_buf;
+#else
+ /*Fill free buffer in the descriptor */
+ hif->rx_buf_addr[rtc] = free_buf;
+ desc->data = (u32)dma_map_single(hif->dev, free_buf, pfe_pkt_size, DMA_FROM_DEVICE);
+#endif
+ wmb();
+ desc->ctrl = BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
+ BD_CTRL_DESC_EN | BD_BUF_LEN(pfe_pkt_size);
+
+ inc_hif_rxidx(rtc);
+
+ if (local_desc.ctrl & BD_CTRL_LIFM) {
+ if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
+ rx_processed++;
+
+#ifdef HIF_NAPI_STATS
+ hif->napi_counters[NAPI_PACKET_COUNT]++;
+#endif
+ }
+ hif->started = 0;
+ }
+ }
+
+ hif->RxtocleanIndex = rtc;
+ spin_unlock_bh(&hif->lock);
+
+ /* we made some progress, re-start rx dma in case it stopped */
+ hif_rx_dma_start();
+
+ return rx_processed;
+}
+
+
+/* client_ack_txpacket-
+ * This function ack the Tx packet in the give client Tx queue by resetting
+ * ownership bit in the descriptor.
+ */
+static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no)
+{
+ struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
+ struct tx_queue_desc *desc = queue->base + queue->ack_idx;
+
+ if (desc->ctrl & CL_DESC_OWN) {
+ /*TODO Do we need to match the pkt address also? */
+ desc->ctrl &= ~CL_DESC_OWN;
+ inc_cl_idx(queue->ack_idx);
+
+ return 0;
+ }
+ else {
+ /*This should not happen */
+ printk(KERN_ERR "%s: %d %d %d %d %d %p %d\n", __func__, hif->Txtosend, hif->Txtoclean, hif->TxAvail, client_id, q_no, queue, queue->ack_idx);
+ BUG();
+ return 1;
+ }
+}
+
+void __hif_tx_done_process(struct pfe_hif *hif, int count)
+{
+ struct hif_desc *desc;
+ struct hif_desc_sw *desc_sw;
+ int ttc, tx_avl;
+
+ ttc = hif->Txtoclean;
+ tx_avl = hif->TxAvail;
+
+ while ((tx_avl < hif->TxRingSize) && count--) {
+ desc = hif->TxBase + ttc;
+
+ if (desc->ctrl & BD_CTRL_DESC_EN)
+ break;
+
+ desc_sw = &hif->tx_sw_queue[ttc];
+
+ if (desc_sw->data) {
+#if !defined(CONFIG_PLATFORM_PCI)
+ dma_unmap_single(hif->dev, desc_sw->data, desc_sw->len, DMA_TO_DEVICE);
+#endif
+ }
+ client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
+
+ inc_hif_txidx(ttc);
+ tx_avl++;
+ }
+
+ hif->Txtoclean = ttc;
+ hif->TxAvail = tx_avl;
+}
+
+
+/* __hif_xmit_pkt -
+ * This function puts one packet in the HIF Tx queue
+ */
+void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no, void *data, u32 len, unsigned int flags)
+{
+ struct hif_desc *desc;
+ struct hif_desc_sw *desc_sw;
+
+#if defined(CONFIG_PLATFORM_EMULATION)
+ {
+ struct hif_queue *queue = &hif->client[client_id].rx_q[0];
+ struct queue_desc *qdesc = queue->base + queue->write_idx;
+ void *buf;
+
+ printk("%s: packet loop backed client_id:%d qno:%d data : %p len:%d\n", __func__, client_id, q_no, data, len);
+#if 1
+ if (qdesc->ctrl & CL_DESC_OWN) {
+ buf = (void *)qdesc->data;
+ memcpy(buf, data, len);
+ wmb();
+ qdesc->ctrl = CL_DESC_BUF_LEN(len);
+ inc_cl_idx(queue->write_idx);
+ printk("%s: packet loop backed..\n", __func__);
+ hif_lib_indicate_client(client_id, EVENT_RX_PKT_IND, q_no);
+ client_ack_txpacket(&hif->client[client_id].tx_q[q_no]);
+ }
+#endif
+ }
+
+#else
+ desc = hif->TxBase + hif->Txtosend;
+ desc_sw = &hif->tx_sw_queue[hif->Txtosend];
+
+ desc_sw->len = len;
+ desc_sw->client_id = client_id;
+ desc_sw->q_no = q_no;
+ desc_sw->flags = flags;
+
+#if !defined(CONFIG_PLATFORM_PCI)
+ if (flags & HIF_DONT_DMA_MAP) {
+ desc_sw->data = 0;
+ desc->data = (u32)data;
+ } else {
+ desc_sw->data = dma_map_single(hif->dev, data, len, DMA_TO_DEVICE);
+ desc->data = (u32)desc_sw->data;
+ }
+#else
+#define ALIGN32(x) ((x) & ~0x3)
+ memcpy(PFE_PCI_TO_HOST(desc->data), data, ALIGN32(len+0x3));
+#endif
+
+ inc_hif_txidx(hif->Txtosend);
+ hif->TxAvail--;
+
+ /* For TSO we skip actual TX until the last descriptor */
+ /* This reduce the number of required wmb() */
+ if ((flags & HIF_TSO) && (!((flags & HIF_DATA_VALID) && (flags & HIF_LAST_BUFFER))))
+ goto skip_tx;
+
+ wmb();
+
+ do {
+ desc_sw = &hif->tx_sw_queue[hif->Txtoflush];
+ desc = hif->TxBase + hif->Txtoflush;
+
+ if (desc_sw->flags & HIF_LAST_BUFFER) {
+ if ((desc_sw->client_id < PFE_CL_VWD0) || (desc_sw->client_id > (PFE_CL_VWD0 + MAX_VAP_SUPPORT)))
+ desc->ctrl = BD_CTRL_LIFM | BD_CTRL_BRFETCH_DISABLE |
+ BD_CTRL_RTFETCH_DISABLE | BD_CTRL_PARSE_DISABLE |
+ BD_CTRL_DESC_EN | BD_BUF_LEN(desc_sw->len);
+ else
+ desc->ctrl = BD_CTRL_LIFM | BD_CTRL_DESC_EN | BD_BUF_LEN(desc_sw->len);
+ }
+ else
+ desc->ctrl = BD_CTRL_DESC_EN | BD_BUF_LEN(desc_sw->len);
+
+ inc_hif_txidx(hif->Txtoflush);
+ }
+ while (hif->Txtoflush != hif->Txtosend);
+
+skip_tx:
+ return;
+
+#endif
+}
+
+
+int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no, void *data, unsigned int len)
+{
+ int rc = 0;
+
+ spin_lock_bh(&hif->tx_lock);
+
+ if (!hif->TxAvail)
+ rc = 1;
+ else {
+ __hif_xmit_pkt(hif, client_id, q_no, data, len, HIF_FIRST_BUFFER | HIF_LAST_BUFFER);
+ hif_tx_dma_start();
+ }
+ if (hif->TxAvail < (hif->TxRingSize >> 1))
+ __hif_tx_done_process(hif, TX_FREE_MAX_COUNT);
+
+ spin_unlock_bh(&hif->tx_lock);
+
+ return rc;
+}
+
+/* hif_isr-
+ * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
+ */
+static irqreturn_t hif_isr(int irq, void *dev_id)
+{
+ struct pfe_hif *hif = (struct pfe_hif *) dev_id;
+ int int_status;
+
+ /*Read hif interrupt source register */
+ int_status = readl_relaxed(HIF_INT_SRC);
+
+ if ((int_status & HIF_INT) == 0)
+ return(IRQ_NONE);
+
+ int_status &= ~(HIF_INT);
+
+ if (int_status & HIF_RXPKT_INT) {
+ int_status &= ~(HIF_RXPKT_INT);
+
+ /* Disable interrupts */
+ writel_relaxed(0, HIF_INT_ENABLE);
+
+ napi_first_batch = 1;
+
+ if (napi_schedule_prep(&hif->napi))
+ {
+#ifdef HIF_NAPI_STATS
+ hif->napi_counters[NAPI_SCHED_COUNT]++;
+#endif
+ __napi_schedule(&hif->napi);
+ }
+ }
+
+ if (int_status) {
+ printk(KERN_INFO "%s : Invalid interrupt : %d\n", __func__, int_status);
+ writel(int_status, HIF_INT_SRC);
+ }
+
+ return IRQ_HANDLED;
+}
+
+
+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
+{
+ unsigned int client_id = data1;
+
+ if (client_id >= HIF_CLIENTS_MAX)
+ {
+ printk(KERN_ERR "%s: client id %d out of bounds\n", __func__, client_id);
+ return;
+ }
+
+ switch (req) {
+ case REQUEST_CL_REGISTER:
+ /* Request for register a client */
+ printk(KERN_INFO "%s: register client_id %d\n", __func__, client_id);
+ pfe_hif_client_register(hif, client_id, (struct hif_client_shm *)&hif->shm->client[client_id]);
+ break;
+
+ case REQUEST_CL_UNREGISTER:
+ printk(KERN_INFO "%s: unregister client_id %d\n", __func__, client_id);
+
+ /* Request for unregister a client */
+ pfe_hif_client_unregister(hif, client_id);
+
+ break;
+
+ default:
+ printk(KERN_ERR "%s: unsupported request %d\n", __func__, req);
+ break;
+ }
+
+ /*TODO check for TMU queue resume request */
+
+ /*Process client Tx queues
+ * Currently we don't have checking for tx pending*/
+}
+
+/** pfe_hif_rx_poll
+ * This function is NAPI poll function to process HIF Rx queue.
+ */
+static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
+ int work_done;
+
+#ifdef HIF_NAPI_STATS
+ hif->napi_counters[NAPI_POLL_COUNT]++;
+#endif
+
+ work_done = pfe_hif_rx_process(hif, budget);
+
+ if (work_done < budget)
+ {
+ napi_complete(napi);
+ writel_relaxed(HIF_INT_MASK, HIF_INT_ENABLE);
+ }
+#ifdef HIF_NAPI_STATS
+ else
+ hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
+#endif
+
+ return work_done;
+}
+
+/* pfe_hif_init
+ * This function initializes the baseaddresses and irq, etc.
+ */
+int pfe_hif_init(struct pfe *pfe)
+{
+ struct pfe_hif *hif = &pfe->hif;
+ int err;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ hif->dev = pfe->dev;
+ hif->irq = pfe->hif_irq;
+
+ if ((err = pfe_hif_alloc_descr(hif))) {
+ goto err0;
+ }
+
+ if (pfe_hif_init_buffers(hif)) {
+ printk(KERN_ERR "%s: Could not initialize buffer descriptors\n", __func__);
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ /* Initilize NAPI for Rx processing */
+ init_dummy_netdev(&hif->dummy_dev);
+ netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll, HIF_RX_POLL_WEIGHT);
+ napi_enable(&hif->napi);
+
+ spin_lock_init(&hif->tx_lock);
+ spin_lock_init(&hif->lock);
+
+ hif_init();
+ hif_rx_enable();
+ hif_tx_enable();
+
+ /* Disable tx done interrupt */
+ writel(HIF_INT_MASK, HIF_INT_ENABLE);
+
+ gpi_enable(HGPI_BASE_ADDR);
+
+#ifdef __KERNEL__
+ err = request_irq(hif->irq, hif_isr, IRQF_DISABLED, "pfe_hif", hif);
+ if (err) {
+ printk(KERN_ERR "%s: failed to get the hif IRQ = %d\n", __func__, hif->irq);
+ goto err1;
+ }
+#else
+ /*TODO register interrupts */
+#endif
+
+ return 0;
+err1:
+ pfe_hif_free_descr(hif);
+err0:
+ return err;
+}
+
+/* pfe_hif_exit-
+ */
+void pfe_hif_exit(struct pfe *pfe)
+{
+ struct pfe_hif *hif = &pfe->hif;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ spin_lock_bh(&hif->lock);
+ hif->shm->gClient_status[0] = 0;
+ hif->shm->gClient_status[1] = 0; /* Make sure all clients are disabled */
+
+ spin_unlock_bh(&hif->lock);
+
+ /*Disable Rx/Tx */
+ gpi_disable(HGPI_BASE_ADDR);
+ hif_rx_disable();
+ hif_tx_disable();
+
+ napi_disable(&hif->napi);
+ netif_napi_del(&hif->napi);
+
+#ifdef __KERNEL__
+ free_irq(hif->irq, hif);
+#endif
+ pfe_hif_release_buffers(hif);
+ pfe_hif_free_descr(hif);
+}
diff --git a/pfe_ctrl/pfe_hif.h b/pfe_ctrl/pfe_hif.h
new file mode 100644
index 0000000..f331e0a
--- /dev/null
+++ b/pfe_ctrl/pfe_hif.h
@@ -0,0 +1,279 @@
+#ifndef _PFE_HIF_H_
+#define _PFE_HIF_H_
+
+#include <linux/netdevice.h>
+
+#define HIF_NAPI_STATS
+
+#define HIF_CLIENT_QUEUES_MAX 16
+#define HIF_RX_POLL_WEIGHT 64
+
+enum {
+ NAPI_SCHED_COUNT = 0,
+ NAPI_POLL_COUNT,
+ NAPI_PACKET_COUNT,
+ NAPI_DESC_COUNT,
+ NAPI_FULL_BUDGET_COUNT,
+ NAPI_CLIENT_FULL_COUNT,
+ NAPI_MAX_COUNT
+};
+
+
+/* XXX HIF_TX_DESC_NT value should be always greter than 4,
+ * Otherwise HIF_TX_POLL_MARK will become zero.
+ */
+#if defined(CONFIG_PLATFORM_PCI)
+#define HIF_RX_DESC_NT 4
+#define HIF_TX_DESC_NT 4
+#else
+#if defined(CONFIG_COMCERTO_64K_PAGES)
+#define HIF_RX_DESC_NT 64
+#else
+#define HIF_RX_DESC_NT 256
+#endif
+#define HIF_TX_DESC_NT 2048
+#endif
+
+#define HIF_FIRST_BUFFER (1 << 0)
+#define HIF_LAST_BUFFER (1 << 1)
+#define HIF_DONT_DMA_MAP (1 << 2) //TODO merge it with TSO
+#define HIF_DATA_VALID (1 << 3)
+#define HIF_TSO (1 << 4)
+
+#define MAX_VAP_SUPPORT 3
+#define MAX_WIFI_VAPS MAX_VAP_SUPPORT
+
+enum {
+ PFE_CL_GEM0 = 0,
+ PFE_CL_GEM1,
+ PFE_CL_GEM2,
+ PFE_CL_VWD0,
+ PFE_CL_VWD_LAST = PFE_CL_VWD0 + MAX_VAP_SUPPORT,
+ PFE_CL_PCAP0,
+ HIF_CLIENTS_MAX
+};
+
+/*structure to store client queue info */
+struct hif_rx_queue {
+ struct rx_queue_desc *base;
+ u32 size;
+ u32 write_idx;
+};
+
+struct hif_tx_queue {
+ struct tx_queue_desc *base;
+ u32 size;
+ u32 ack_idx;
+};
+
+/*Structure to store the client info */
+struct hif_client {
+ int rx_qn;
+ struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
+ int tx_qn;
+ struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
+};
+
+/*HIF hardware buffer descriptor */
+struct hif_desc {
+ volatile u32 ctrl;
+ volatile u32 status;
+ volatile u32 data;
+ volatile u32 next;
+};
+
+struct __hif_desc {
+ u32 ctrl;
+ u32 status;
+ u32 data;
+};
+
+struct hif_desc_sw {
+ dma_addr_t data;
+ u16 len;
+ u8 client_id;
+ u8 q_no;
+ u16 flags;
+};
+
+struct hif_hdr {
+ u8 client_id;
+ u8 qNo;
+ u16 client_ctrl;
+ u16 client_ctrl1;
+};
+
+struct __hif_hdr {
+ union {
+ struct hif_hdr hdr;
+ u32 word[2];
+ };
+};
+
+struct hif_lro_hdr {
+ u16 data_offset;
+ u16 mss;
+};
+
+struct hif_ipsec_hdr {
+ u16 sa_handle[2];
+}__attribute__((packed));
+
+struct hif_tso_hdr {
+ struct hif_hdr pkt_hdr;
+ u16 ip_off;
+ u16 ip_id;
+ u16 ip_len;
+ u16 tcp_off;
+ u32 tcp_seq;
+} __attribute__((packed));
+
+struct hif_pcap_hdr {
+ u8 ifindex;
+ u8 unused;
+ u16 seqno;
+ u32 timestamp;
+}__attribute__((packed));
+
+/* HIF_CTRL_TX... defines */
+#define HIF_CTRL_TX_IPSEC_OUT (1 << 7)
+#define HIF_CTRL_TX_OWN_MAC (1 << 6)
+#define HIF_CTRL_TX_TSO_END (1 << 5)
+#define HIF_CTRL_TX_TSO6 (1 << 4)
+#define HIF_CTRL_TX_TSO (1 << 3)
+#define HIF_CTRL_TX_CHECKSUM (1 << 2)
+#define HIF_CTRL_TX_CSUM_VALIDATE (1 << 1)
+#define HIF_CTRL_TX_WIFI (1 << 0)
+
+/* HIF_CTRL_RX... defines */
+#define HIF_CTRL_RX_OFFSET_OFST (24)
+#define HIF_CTRL_RX_PE_ID_OFST (16)
+#define HIF_CTRL_RX_IPSEC_IN (1 << 4)
+#define HIF_CTRL_RX_WIFI_EXPT (1 << 3)
+#define HIF_CTRL_RX_CHECKSUMMED (1 << 2)
+#define HIF_CTRL_RX_CONTINUED (1 << 1)
+#define HIF_CTRL_RX_WIFI_HEADROOM (1 << 0)
+
+#define HIF_CTRL_VAPID_OFST (8)
+
+struct pfe_hif {
+ /* To store registered clients in hif layer */
+ struct hif_client client[HIF_CLIENTS_MAX];
+ struct hif_shm *shm;
+ int irq;
+
+ void *descr_baseaddr_v;
+ unsigned long descr_baseaddr_p;
+
+ struct hif_desc *RxBase;
+ u32 RxRingSize;
+ u32 RxtocleanIndex;
+ void *rx_buf_addr[HIF_RX_DESC_NT];
+ unsigned int qno;
+ unsigned int client_id;
+ unsigned int client_ctrl;
+ unsigned int started;
+
+ struct hif_desc *TxBase;
+ u32 TxRingSize;
+ u32 Txtosend;
+ u32 Txtoclean;
+ u32 TxAvail;
+ u32 Txtoflush;
+ struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
+ struct hif_tso_hdr *tso_hdr_v;
+ dma_addr_t tso_hdr_p;
+
+ spinlock_t tx_lock;
+ spinlock_t lock;
+ struct net_device dummy_dev;
+ struct napi_struct napi;
+ struct device *dev;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ struct notifier_block cpu_notify;
+#endif
+
+#ifdef HIF_NAPI_STATS
+ unsigned int napi_counters[NAPI_MAX_COUNT];
+#endif
+};
+
+void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no, void *data, u32 len, unsigned int flags);
+int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no, void *data, unsigned int len);
+void __hif_tx_done_process(struct pfe_hif *hif, int count);
+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2);
+int pfe_hif_init(struct pfe *pfe);
+void pfe_hif_exit(struct pfe *pfe);
+
+static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
+{
+ spin_lock_bh(&hif->tx_lock);
+ __hif_tx_done_process(hif, count);
+ spin_unlock_bh(&hif->tx_lock);
+}
+
+static inline void hif_tx_lock(struct pfe_hif *hif)
+{
+ spin_lock_bh(&hif->tx_lock);
+}
+
+static inline void hif_tx_unlock(struct pfe_hif *hif)
+{
+ spin_unlock_bh(&hif->tx_lock);
+}
+
+static inline int __hif_tx_avail(struct pfe_hif *hif)
+{
+ return hif->TxAvail;
+}
+
+static inline void __memcpy8(void *dst, void *src)
+{
+ asm volatile ( "ldm %1, {r9, r10}\n\t"
+ "stm %0, {r9, r10}\n\t"
+ :
+ : "r" (dst), "r" (src)
+ : "r9", "r10", "memory"
+ );
+}
+
+static inline void __memcpy12(void *dst, void *src)
+{
+ asm volatile ( "ldm %1, {r8, r9, r10}\n\t"
+ "stm %0, {r8, r9, r10}\n\t"
+ :
+ : "r" (dst), "r" (src)
+ : "r8", "r9", "r10", "memory"
+ );
+}
+
+static inline void __memcpy16(void *dst, void *src)
+{
+ asm volatile ( "ldm %1, {r7, r8, r9, r10}\n\t"
+ "stm %0, {r7, r8, r9, r10}\n\t"
+ :
+ : "r"(dst), "r"(src)
+ : "r7", "r8", "r9", "r10", "memory"
+ );
+}
+
+#define HIF_MEMCPY_BURSTSIZE 32 /*__memcpy copy 32byte in a burst*/
+static inline void __memcpy(void *dst, void *src, unsigned int len)
+{
+ void *end = src + len;
+
+ dst = (void *)((unsigned long)dst & ~0x3);
+ src = (void *)((unsigned long)src & ~0x3);
+
+ while (src < end) {
+ asm volatile ( "ldm %1!, {r3, r4, r5, r6, r7, r8, r9, r10}\n\t"
+ "stm %0!, {r3, r4, r5, r6, r7, r8, r9, r10}\n\t"
+ : "+r"(dst), "+r"(src)
+ :
+ : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "memory"
+ );
+ }
+}
+
+#endif /* _PFE_HIF_H_ */
diff --git a/pfe_ctrl/pfe_hif_lib.c b/pfe_ctrl/pfe_hif_lib.c
new file mode 100644
index 0000000..f12f0d7
--- /dev/null
+++ b/pfe_ctrl/pfe_hif_lib.c
@@ -0,0 +1,759 @@
+#include <linux/version.h>
+#include <asm/system.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/moduleparam.h>
+#include <linux/cpu.h>
+
+#include "pfe_mod.h"
+#include "pfe_hif.h"
+#include "pfe_hif_lib.h"
+#include "pfe_ctrl_hal.h"
+
+
+unsigned int lro_mode = 0;
+unsigned int page_mode = 0;
+module_param(lro_mode, uint, S_IRUGO);
+MODULE_PARM_DESC(lro_mode,
+ "0: disable lro support(default), 1: enable lro support, skb data is kept in pages, class is configured in toe mode");
+unsigned int tx_qos = 0;
+module_param(tx_qos, uint, S_IRUGO);
+MODULE_PARM_DESC(tx_qos,
+ "0: disable tx qos (default), 1: enable tx qos, guarantee no packet drop at TMU level, only works if forwarding is disabled");
+unsigned int pfe_pkt_size;
+unsigned int pfe_pkt_headroom;
+
+
+/** @pfe_hal_lib.c.
+ * Common functions used by HIF client drivers
+ */
+
+/*HIF shared memory Global variable */
+struct hif_shm ghif_shm;
+
+/* TMU tx transmitted packets counter, 1 per TMU */
+unsigned int TMU_DMEM_SH(tx_trans)[EMAC_TXQ_CNT];
+
+/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
+ * This function should be called after pfe_hif_exit
+ *
+ * @param[in] hif_shm Shared memory address location in DDR
+ */
+static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
+{
+ int i;
+ void *pkt;
+
+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
+ pkt = hif_shm->rx_buf_pool[i];
+ if (pkt) {
+ hif_shm->rx_buf_pool[i] = NULL;
+ pkt -= pfe_pkt_headroom;
+
+ if (page_mode)
+ free_page((unsigned long)pkt);
+ else
+ kfree(pkt);
+ }
+ }
+}
+
+/* Initialize shared memory used between HIF driver and clients,
+ * allocate rx_buffer_pool required for HIF Rx descriptors.
+ * This function should be called before initializing HIF driver.
+ *
+ * @param[in] hif_shm Shared memory address location in DDR
+ * @rerurn 0 - on succes, <0 on fail to initialize
+ */
+static int pfe_hif_shm_init(struct hif_shm *hif_shm)
+{
+ int i;
+ void *pkt;
+
+ memset(hif_shm, 0, sizeof(struct hif_shm));
+ hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
+
+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
+ if (page_mode)
+ pkt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA_PFE);
+ else
+ pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
+
+ if (pkt)
+ hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
+ else
+ goto err0;
+ }
+
+ return 0;
+
+err0:
+ printk(KERN_ERR "%s Low memory\n", __func__);
+ pfe_hif_shm_clean(hif_shm);
+ return -ENOMEM;
+}
+
+/*This function sends indication to HIF driver
+ *
+ * @param[in] hif hif context
+ **/
+static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int data2)
+{
+ //TODO : If we separate HIF and HIF LIB, then send req and data through shared memory.
+
+ hif_process_client_req(hif, req, data1, data2);
+}
+
+/** hif_lib_set_rx_cpu_affinity
+ *
+ * @param[in] client Client control block.
+ * @param[in] cpu_id cpu number.
+ */
+void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id)
+{
+ spin_lock_bh(&pfe->hif.lock);
+
+
+ if (cpu_id < 0) {
+ printk(KERN_INFO "%s:Client (%d) and HIF Rx processing will use same cpu\n", __func__, client->id);
+ client->user_cpu_id = client->cpu_id = cpu_id;
+ }
+ else if ( cpu_online(cpu_id)) {
+ printk(KERN_INFO "%s:Client (%d) rx processing is moved to cpu#%d\n", __func__, client->id, cpu_id);
+ client->user_cpu_id = client->cpu_id = cpu_id;
+ }
+ else {
+ client->user_cpu_id = cpu_id;
+ printk(KERN_INFO "%s : CPU (%d) is offline, applied automatically after cpu online\n", __func__, cpu_id);
+ }
+ spin_unlock_bh(&pfe->hif.lock);
+}
+
+/** hif_lib_load_balance
+ *
+ */
+static void hif_lib_load_balance(void *data)
+{
+ int cl_id = (u32)data & 0xffff;
+ int qno = ((u32)data >> 16) & 0xff;
+ int event_type = ((u32)data >> 24) & 0xff;
+ struct hif_client_s *client = pfe->hif_client[cl_id];
+
+ client->event_handler(client->priv, event_type, qno);
+}
+
+void hif_lib_indicate_client(int client_id, int event_type, int qno)
+{
+ struct hif_client_s *client = pfe->hif_client[client_id];
+
+ /*
+ * TODO : Right now, all events are queue number based. So we are masking events per queue
+ * basis. Later if we add any events those do not depend on queue number, then we may
+ * may need may need to add masking per event.
+ */
+ if (!client || (event_type >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX) )
+ return;
+
+ if (!test_and_set_bit(qno, &client->queue_mask[event_type])) {
+
+#if defined(CONFIG_SMP) && (NR_CPUS > 1)
+ if ((event_type == EVENT_RX_PKT_IND) && ((client->cpu_id != smp_processor_id()) && (client->cpu_id >= 0)))
+ if(!smp_call_function_single(client->cpu_id, hif_lib_load_balance, (void *)((event_type << 24) | (qno << 16) | client_id), 0))
+ return;
+
+#endif
+ client->event_handler(client->priv, event_type, qno);
+ }
+
+}
+
+
+/*This function releases Rx queue descriptors memory and pre-filled buffers
+ *
+ * @param[in] client hif_client context
+ */
+static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
+{
+ struct rx_queue_desc *desc;
+ int qno, ii;
+ void *buf;
+
+ for (qno = 0; qno < client->rx_qn; qno++) {
+ desc = client->rx_q[qno].base;
+
+ for (ii = 0; ii < client->rx_q[qno].size; ii++) {
+ buf = (void *)desc->data;
+ if (buf) {
+ buf -= pfe_pkt_headroom;
+
+ if (page_mode)
+ free_page((unsigned long)buf);
+ else
+ kfree(buf);
+
+ desc->ctrl = 0;
+ }
+
+ desc++;
+ }
+ }
+
+ kfree(client->rx_qbase);
+}
+
+
+/*This function allocates memory for the rxq descriptors and pre-fill rx queues
+ * with buffers.
+ * @param[in] client client context
+ * @param[in] q_size size of the rxQ, all queues are of same size
+ */
+static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int q_size)
+{
+ struct rx_queue_desc *desc;
+ struct hif_client_rx_queue *queue;
+ int ii, qno;
+
+ /*Allocate memory for the client queues */
+ client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct rx_queue_desc), GFP_KERNEL);
+ if (!client->rx_qbase){
+ goto err;
+ }
+
+ for (qno = 0; qno < client->rx_qn; qno++) {
+ queue = &client->rx_q[qno];
+
+ queue->base = client->rx_qbase + qno * q_size * sizeof(struct rx_queue_desc);
+ queue->size = q_size;
+ queue->read_idx = 0;
+ queue->write_idx = 0;
+
+ dbg_print_info("rx queue: %d, base: %p, size: %d \n", qno, queue->base, queue->size);
+ }
+
+ for (qno = 0; qno < client->rx_qn; qno++) {
+ queue = &client->rx_q[qno];
+ desc = queue->base;
+
+ for (ii = 0; ii < queue->size; ii++) {
+ desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
+ desc++;
+ }
+ }
+
+ return 0;
+
+err:
+ return 1;
+}
+
+#define inc_cl_idx(idxname) idxname = (idxname+1) & (queue->size-1)
+
+static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
+{
+ dbg_print_info( "%s\n", __func__);
+
+ /* Check if there are any pending packets. Client must flush the tx queues
+ before unregistering, by calling by calling hif_lib_tx_get_next_complete() */
+ /* Hif no longer calls since we are no longer registered */
+
+ if (queue->tx_pending)
+ printk(KERN_ERR "%s: pending transmit packets\n", __func__);
+}
+
+static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
+{
+ int qno;
+
+ dbg_print_info("%s\n", __func__);
+
+ for (qno = 0; qno < client->tx_qn; qno++) {
+ hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
+ }
+
+ kfree(client->tx_qbase);
+}
+
+static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int q_size)
+{
+ struct hif_client_tx_queue *queue;
+ int qno;
+
+ client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct tx_queue_desc), GFP_KERNEL);
+ if (!client->tx_qbase) {
+ return 1;
+ }
+
+ for (qno = 0; qno < client->tx_qn; qno++) {
+ queue = &client->tx_q[qno];
+
+ queue->base = client->tx_qbase + qno * q_size * sizeof(struct tx_queue_desc);
+ queue->size = q_size;
+ queue->read_idx = 0;
+ queue->write_idx = 0;
+ queue->tx_pending = 0;
+
+ dbg_print_info("tx queue: %d, base: %p, size: %d \n", qno, queue->base, queue->size);
+ }
+
+ return 0;
+}
+
+static int hif_lib_event_dummy( void *priv, int event_type, int qno)
+{
+ return 0;
+}
+
+int hif_lib_client_register(struct hif_client_s *client)
+{
+ struct hif_shm *hif_shm;
+ struct hif_client_shm *client_shm;
+ int err, i;
+// int loop_cnt = 0;
+
+ dbg_print_info("%s\n", __func__);
+
+ spin_lock_bh(&pfe->hif.lock);
+ if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) || (pfe->hif_client[client->id])) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ hif_shm = client->pfe->hif.shm;
+
+ if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
+ err = -ENOMEM;
+ goto err_rx;
+ }
+
+ if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
+ err = -ENOMEM;
+ goto err_tx;
+ }
+
+ if (!client->event_handler)
+ client->event_handler = hif_lib_event_dummy;
+
+ /*Initialize client specific shared memory */
+ client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
+ client_shm->rx_qbase = (u32)client->rx_qbase;
+ client_shm->rx_qsize = client->rx_qsize;
+ client_shm->tx_qbase = (u32)client->tx_qbase;
+ client_shm->tx_qsize = client->tx_qsize;
+ client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) | (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
+// spin_lock_init(&client->rx_lock);
+
+ for (i = 0; i < HIF_EVENT_MAX; i++) {
+ client->queue_mask[i] = 0; /* By default all events are unmasked */
+ }
+
+ /*Indicate to HIF driver*/
+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
+
+ dbg_print_info("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
+ __func__, client, client->id, client->tx_qsize, client->rx_qsize);
+
+ client->cpu_id = -1;
+
+ pfe->hif_client[client->id] = client;
+ spin_unlock_bh(&pfe->hif.lock);
+
+ /*This function need to be called with out holding hif.lock */
+ hif_lib_set_rx_cpu_affinity(client, client->user_cpu_id);
+
+ return 0;
+
+err_tx:
+ hif_lib_client_release_rx_buffers(client);
+
+err_rx:
+err:
+ spin_unlock_bh(&pfe->hif.lock);
+ return err;
+}
+
+int hif_lib_client_unregister(struct hif_client_s *client)
+{
+ struct pfe *pfe = client->pfe;
+ u32 client_id = client->id;
+
+ printk(KERN_INFO "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n",
+ __func__, client, client->id, client->tx_qsize, client->rx_qsize);
+
+
+ spin_lock_bh(&pfe->hif.lock);
+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
+
+ hif_lib_client_release_tx_buffers(client);
+ hif_lib_client_release_rx_buffers(client);
+ pfe->hif_client[client_id] = NULL;
+ spin_unlock_bh(&pfe->hif.lock);
+
+ return 0;
+}
+
+int hif_lib_event_handler_start(struct hif_client_s *client, int event, int qno)
+{
+ struct hif_client_rx_queue *queue = &client->rx_q[qno];
+ struct rx_queue_desc *desc = queue->base + queue->read_idx;
+
+ if ((event >= HIF_EVENT_MAX) || ( qno >= HIF_CLIENT_QUEUES_MAX)) {
+ dbg_print_info("%s: Unsupported event : %d queue number : %d\n", __func__, event, qno);
+ return -1;
+ }
+
+ test_and_clear_bit(qno, &client->queue_mask[event]);
+
+ switch (event) {
+ case EVENT_RX_PKT_IND:
+ if (!(desc->ctrl & CL_DESC_OWN))
+ hif_lib_indicate_client(client->id, EVENT_RX_PKT_IND, qno);
+ break;
+
+ case EVENT_HIGH_RX_WM:
+ case EVENT_TXDONE_IND:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+/*This function gets one packet from the specified client queue
+ * It also refill the rx buffer */
+void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int *ofst, unsigned int *rx_ctrl, unsigned int *desc_ctrl, void **priv_data)
+{
+ struct hif_client_rx_queue *queue = &client->rx_q[qno];
+ struct rx_queue_desc *desc;
+ void *pkt = NULL;
+
+ //printk(KERN_INFO "%s\n", __func__);
+#if defined(CONFIG_PLATFORM_EMULATION)
+ printk(KERN_INFO "%s:qno:%d cid:%d desc:%p rdidx:%d \n",
+ __func__, qno, client->id, desc,
+ queue->read_idx);
+#endif
+
+ /* Following lock is to protect rx queue access from, hif_lib_event_handler_start.
+ * In general below lock is not required, because hif_lib_xmit_pkt and
+ * hif_lib_event_handler_start are called from napi poll and which is not
+ * re-entrant. But if some client use in different way this lock is required.
+ */
+ //spin_lock_irqsave(&client->rx_lock, flags);
+ desc = queue->base + queue->read_idx;
+ if (!(desc->ctrl & CL_DESC_OWN)) {
+ pkt = desc->data - pfe_pkt_headroom;
+
+ *rx_ctrl = desc->client_ctrl;
+ *desc_ctrl = desc->ctrl;
+
+ if (desc->ctrl & CL_DESC_FIRST) {
+ u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
+
+ if (size) {
+ *len = CL_DESC_BUF_LEN(desc->ctrl) - PFE_PKT_HEADER_SZ - size;
+ *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ + size;
+ *priv_data = desc->data + PFE_PKT_HEADER_SZ;
+ } else {
+ *len = CL_DESC_BUF_LEN(desc->ctrl) - PFE_PKT_HEADER_SZ;
+ *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ;
+ *priv_data = NULL;
+ }
+
+ if(*rx_ctrl & HIF_CTRL_RX_WIFI_HEADROOM) {
+ /* PFE inserts empty head room for WiFi Tx packets */
+ *ofst += PFE_WIFI_PKT_HEADROOM;
+ *len -= PFE_WIFI_PKT_HEADROOM;
+ if(priv_data)
+ priv_data += PFE_WIFI_PKT_HEADROOM;
+ }
+ } else {
+ *len = CL_DESC_BUF_LEN(desc->ctrl);
+ *ofst = pfe_pkt_headroom;
+ }
+
+ desc->data = NULL; // Needed so we don't free a buffer/page twice on module_exit
+ smp_wmb();
+
+ desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
+ inc_cl_idx(queue->read_idx);
+ }
+
+ //spin_unlock_irqrestore(&client->rx_lock, flags);
+ return pkt;
+}
+
+void __hif_lib_xmit_tso_hdr(struct hif_client_s *client, unsigned int qno, u32 client_ctrl, unsigned int ip_off,
+ unsigned int ip_id, unsigned int ip_len, unsigned int tcp_off, unsigned int tcp_seq)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+ struct tx_queue_desc *desc = queue->base + queue->write_idx;
+ struct hif_tso_hdr *tso_hdr;
+ u32 tso_hdr_p;
+
+// printk(KERN_INFO "%s\n",__func__);
+
+ tso_hdr = pfe->hif.tso_hdr_v + pfe->hif.Txtosend;
+ tso_hdr_p = (u32)pfe->hif.tso_hdr_p + (pfe->hif.Txtosend * sizeof(struct hif_tso_hdr));
+
+ tso_hdr->ip_off = ip_off;
+ tso_hdr->ip_id = ip_id;
+ tso_hdr->ip_len = ip_len;
+ tso_hdr->tcp_off = tcp_off;
+ tso_hdr->tcp_seq = tcp_seq;
+
+ tso_hdr->pkt_hdr.client_id = client->id;
+ tso_hdr->pkt_hdr.qNo = qno;
+ tso_hdr->pkt_hdr.client_ctrl = client_ctrl;
+// printk(KERN_DEBUG "%s : seq: %x id: %x desc_n: %d desc_addr: %p tx_hdr_v : %p tx_hdr_p: %x, ip_off: %d tcp_off: %d\n",__func__, tso_hdr->tcp_seq, tso_hdr->ip_id, pfe->hif.Txtosend, desc, tso_hdr, tso_hdr_p, tso_hdr->ip_off, tso_hdr->tcp_off );
+
+ desc->data = (void *)0x1; /* bogus non null value to avoid stopping tx done processing */
+ desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(0);
+
+ __hif_xmit_pkt(&pfe->hif, client->id, qno, (void *)tso_hdr_p, sizeof(struct hif_tso_hdr), HIF_DONT_DMA_MAP | HIF_TSO);
+
+ inc_cl_idx(queue->write_idx);
+ queue->tx_pending++;
+}
+
+static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int client_id, unsigned int qno, u32 client_ctrl)
+{
+ /* Optimize the write since the destinaton may be non-cacheable */
+ if (!((unsigned long)pkt_hdr & 0x3)) {
+ ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) | client_id;
+ } else {
+ ((u16 *)pkt_hdr)[0] = (qno << 8) | client_id;
+ ((u16 *)pkt_hdr)[1] = client_ctrl;
+ }
+}
+
+/*This function puts the given packet in the specific client queue */
+void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data, unsigned int len, u32 client_ctrl, unsigned int flags, void *client_data)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+ struct tx_queue_desc *desc = queue->base + queue->write_idx;
+
+ //printk(KERN_INFO "%s\n",__func__);
+
+ /* First buffer */
+ if (flags & HIF_FIRST_BUFFER)
+ {
+ data -= sizeof(struct hif_hdr);
+ len += sizeof(struct hif_hdr);
+
+ hif_hdr_write(data, client->id, qno, client_ctrl);
+ }
+
+ desc->data = client_data;
+ desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
+
+ __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
+
+ inc_cl_idx(queue->write_idx);
+ queue->tx_pending++;
+ queue->jiffies_last_packet = jiffies;
+}
+
+/*This function puts the given packet in the specific client queue */
+int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data, unsigned int len, u32 client_ctrl, void *client_data)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+ struct tx_queue_desc *desc = queue->base + queue->write_idx;
+
+ //printk(KERN_INFO "%s\n",__func__);
+
+ if (queue->tx_pending < queue->size) {
+ /*Construct pkt header */
+
+ data -= sizeof(struct hif_hdr);
+ len += sizeof(struct hif_hdr);
+
+ hif_hdr_write(data, client->id, qno, client_ctrl);
+
+ desc->data = client_data;
+ desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(HIF_FIRST_BUFFER | HIF_LAST_BUFFER | HIF_DATA_VALID);
+
+ if (hif_xmit_pkt(&pfe->hif, client->id, qno, data, len))
+ return 1;
+
+ inc_cl_idx(queue->write_idx);
+ queue->tx_pending++;
+ queue->jiffies_last_packet = jiffies;
+
+ return 0;
+ }
+
+ dbg_print_info("%s Tx client %d qno %d is full\n",__func__, client->id, qno);
+ return 1;
+}
+
+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno, unsigned int *flags, int count)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+ struct tx_queue_desc *desc = queue->base + queue->read_idx;
+
+ dbg_print_info("%s: qno : %d rd_indx: %d pending:%d\n",__func__, qno, queue->read_idx, queue->tx_pending);
+
+ if (!queue->tx_pending)
+ return NULL;
+
+ if (desc->ctrl & CL_DESC_OWN) {
+ hif_tx_done_process(&pfe->hif, count);
+
+ //Check again, if packets done in tx queue.
+ if (desc->ctrl & CL_DESC_OWN)
+ return NULL;
+ }
+
+ inc_cl_idx(queue->read_idx);
+ queue->tx_pending--;
+
+ *flags = CL_DESC_GET_FLAGS(desc->ctrl);
+
+ return desc->data;
+}
+
+//FIXME: TMU queues length mapping needs to be declared in shared PFE/PFE_CTRL header
+static void hif_lib_tmu_credit_init(struct pfe *pfe)
+{
+ int i, q;
+
+ for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
+ for (q = 0; q < EMAC_TXQ_CNT; q++) {
+ pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ? DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
+ pfe->tmu_credit.tx_credit[i][q] = pfe->tmu_credit.tx_credit_max[i][q];
+ }
+}
+/** __hif_lib_update_credit
+ *
+ * @param[in] client hif client context
+ * @param[in] queue queue number in match with TMU
+ */
+void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue)
+{
+ unsigned int tmu_tx_packets, tmp;
+
+ if (tx_qos) {
+ tmu_tx_packets = be32_to_cpu(pe_dmem_read(TMU0_ID + client->id, virt_to_tmu_dmem(&tx_trans[queue]), 4));
+
+ // tx_packets counter overflowed
+ if (tmu_tx_packets > pfe->tmu_credit.tx_packets[client->id][queue]) {
+ tmp = UINT_MAX - tmu_tx_packets + pfe->tmu_credit.tx_packets[client->id][queue];
+ pfe->tmu_credit.tx_credit[client->id][queue] = pfe->tmu_credit.tx_credit_max[client->id][queue] - tmp;
+ }
+ // TMU tx <= pfe_eth tx, normal case or both OF since last time
+ else
+ pfe->tmu_credit.tx_credit[client->id][queue] = pfe->tmu_credit.tx_credit_max[client->id][queue] - (pfe->tmu_credit.tx_packets[client->id][queue] - tmu_tx_packets);
+ }
+}
+
+/** hif_lib_update_credit
+ *
+ * @param[in] client hif client context
+ * @param[in] queue queue number in match with TMU
+ */
+void hif_lib_update_credit(struct hif_client_s *client, unsigned int queue)
+{
+ spin_lock_bh(&pfe->hif.tx_lock);
+ __hif_lib_update_credit(client, queue);
+ spin_unlock_bh(&pfe->hif.tx_lock);
+}
+
+
+#ifdef CONFIG_HOTPLUG_CPU
+/** pfe_hif_lib_cpu_notifier
+ *
+ * @param[in] nfb cpu notifier control block.
+ * @param[in] action event to notify.
+ * @param[in] hcpu cpu id.
+ */
+static int pfe_hif_lib_cpu_notifier(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ int cpu = (long)hcpu;
+ int ii;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pfe->hif.lock, flags);
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DOWN_FAILED:
+ case CPU_ONLINE:
+ printk(KERN_INFO "%s : CPU (%d) is up\n", __func__, cpu);
+
+ for (ii = 0; ii < HIF_CLIENTS_MAX; ii++) {
+ if (pfe->hif_client[ii] && (pfe->hif_client[ii]->user_cpu_id == cpu))
+ pfe->hif_client[ii]->cpu_id = pfe->hif_client[ii]->user_cpu_id;
+ }
+ break;
+
+ case CPU_DOWN_PREPARE:
+ printk(KERN_INFO "%s : CPU (%d) is down\n", __func__, cpu);
+
+ for (ii = 0; ii < HIF_CLIENTS_MAX; ii++) {
+ if (pfe->hif_client[ii] && (pfe->hif_client[ii]->user_cpu_id == cpu))
+ pfe->hif_client[ii]->cpu_id = -1;
+ }
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&pfe->hif.lock, flags);
+
+ return NOTIFY_OK;
+}
+#endif
+
+
+int pfe_hif_lib_init(struct pfe *pfe)
+{
+ int rc;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ if (lro_mode) {
+ page_mode = 1;
+ pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
+ pfe_pkt_headroom = 0;
+ } else {
+ page_mode = 0;
+ pfe_pkt_size = PFE_PKT_SIZE;
+ pfe_pkt_headroom = PFE_PKT_HEADROOM;
+ }
+ hif_lib_tmu_credit_init(pfe);
+ pfe->hif.shm = &ghif_shm;
+ rc = pfe_hif_shm_init(pfe->hif.shm);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (rc)
+ goto err_shm_init;
+
+ pfe->hif.cpu_notify.notifier_call = pfe_hif_lib_cpu_notifier;
+ pfe->hif.cpu_notify.priority = 0;
+
+ rc = register_cpu_notifier(&pfe->hif.cpu_notify);
+
+ if (rc)
+ pfe_hif_shm_clean(pfe->hif.shm);
+
+
+err_shm_init:
+#endif
+
+ return rc;
+}
+
+
+void pfe_hif_lib_exit(struct pfe *pfe)
+{
+ printk(KERN_INFO "%s\n", __func__);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_cpu_notifier(&pfe->hif.cpu_notify);
+#endif
+ pfe_hif_shm_clean(pfe->hif.shm);
+}
diff --git a/pfe_ctrl/pfe_hif_lib.h b/pfe_ctrl/pfe_hif_lib.h
new file mode 100644
index 0000000..bae74e8
--- /dev/null
+++ b/pfe_ctrl/pfe_hif_lib.h
@@ -0,0 +1,194 @@
+#ifndef _PFE_HIF_LIB_H_
+#define _PFE_HIF_LIB_H_
+
+#include "pfe_hif.h"
+
+#ifdef HIF_LIB_DEBUG
+#define dbg_print_info( fmt, args...) \
+ printk(KERN_INFO fmt, ##args)
+#else
+#define dbg_print_info( fmt, args...)
+#endif
+
+#define HIF_CL_REQ_TIMEOUT 10
+
+#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB)
+#define GFP_DMA_PFE (GFP_DMA_NCNB | __GFP_NOWARN)
+#else
+#define GFP_DMA_PFE 0
+#endif
+
+enum {
+ REQUEST_CL_REGISTER = 0,
+ REQUEST_CL_UNREGISTER,
+ HIF_REQUEST_MAX
+};
+
+enum {
+ EVENT_HIGH_RX_WM = 0, /* Event to indicate that client rx queue is reached water mark level */
+ EVENT_RX_PKT_IND, /* Event to indicate that, packet recieved for client */
+ EVENT_TXDONE_IND, /* Event to indicate that, packet tx done for client */
+ HIF_EVENT_MAX
+};
+
+/*structure to store client queue info */
+
+/*structure to store client queue info */
+struct hif_client_rx_queue {
+ struct rx_queue_desc *base;
+ u32 size;
+ u32 read_idx;
+ u32 write_idx;
+};
+
+struct hif_client_tx_queue {
+ struct tx_queue_desc *base;
+ u32 size;
+ u32 read_idx;
+ u32 write_idx;
+ u32 tx_pending;
+ unsigned long jiffies_last_packet;
+};
+
+struct hif_client_s
+{
+ int id;
+ int tx_qn;
+ int rx_qn;
+ void *rx_qbase;
+ void *tx_qbase;
+ /* FIXME tx/rx_qsize fields can be removed after per queue depth is supported*/
+ int tx_qsize;
+ int rx_qsize;
+ int cpu_id;
+ int user_cpu_id;
+
+// spinlock_t rx_lock;
+ struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
+ struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
+ int (*event_handler)(void *priv, int event, int data);
+ unsigned long queue_mask[HIF_EVENT_MAX];
+ struct pfe *pfe;
+ void *priv;
+};
+
+
+/* Client specific shared memory
+ * It contains number of Rx/Tx queues, base addresses and queue sizes */
+struct hif_client_shm {
+ u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
+ u32 rx_qbase; /*Rx queue base address */
+ u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
+ u32 tx_qbase; /* Tx queue base address */
+ u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
+};
+
+/*Client shared memory ctrl bit description */
+#define CLIENT_CTRL_RX_Q_CNT_OFST 0
+#define CLIENT_CTRL_TX_Q_CNT_OFST 8
+#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) & 0xFF)
+#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) & 0xFF)
+
+
+
+/*Shared memory used to communicate between HIF driver and host/client drivers
+ * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
+ * initialized with host buffers and buffers count in the pool.
+ * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
+ *
+ */
+struct hif_shm {
+ u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
+ void *rx_buf_pool[HIF_RX_DESC_NT];/*Rx buffers required to initialize HIF rx descriptors */
+ unsigned long gClient_status[2]; /*Global client status bit mask */
+ u32 hif_qfull; /*TODO Client-id that caused for the TMU3 queue stop */
+ u32 hif_qresume; /*TODO */
+ struct hif_client_shm client[HIF_CLIENTS_MAX]; /* Client specific shared memory */
+};
+
+
+#define CL_DESC_OWN (1 << 31) /* This sets owner ship to HIF driver */
+#define CL_DESC_LAST (1 << 30) /* This indicates last packet for multi buffers handling */
+#define CL_DESC_FIRST (1 << 29) /* This indicates first packet for multi buffers handling */
+#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
+#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
+#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
+
+struct rx_queue_desc {
+ void *data;
+ u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
+ u32 client_ctrl;
+};
+
+struct tx_queue_desc {
+ void *data;
+ u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
+};
+
+/* HIF Rx is not working properly for 2-byte aligned buffers and
+ * ip_header should be 4byte aligned for better iperformance.
+ * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
+ */
+#define PFE_PKT_HEADER_SZ sizeof(struct hif_hdr)
+#define PFE_BUF_SIZE 2048 /* must be big enough for headroom, pkt size and skb shared info */
+#define PFE_PKT_HEADROOM 128
+#define SKB_SHARED_INFO_SIZE 256 /* At least sizeof(struct skb_shared_info) bytes */
+
+//#define PFE_PKT_SIZE 1544 /* maximum ethernet packet size */
+#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM - SKB_SHARED_INFO_SIZE) /* maximum ethernet packet size after reassembly offload*/
+#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
+#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
+#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
+#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE + MAX_L4_HDR_SIZE)
+#define MAX_WIFI_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE + 6)
+#define MAX_PFE_PKT_SIZE 16380UL /* Used in page mode to clamp packet size to the maximum supported by the hif hw interface (<16KiB) */
+
+extern unsigned int pfe_pkt_size;
+extern unsigned int pfe_pkt_headroom;
+extern unsigned int page_mode;
+extern unsigned int lro_mode;
+extern unsigned int tx_qos;
+
+int pfe_hif_lib_init(struct pfe *pfe);
+void pfe_hif_lib_exit(struct pfe *pfe);
+int hif_lib_client_register(struct hif_client_s *client);
+int hif_lib_client_unregister(struct hif_client_s *client);
+void __hif_lib_xmit_tso_hdr(struct hif_client_s *client, unsigned int qno, u32 client_ctrl, unsigned int ip_off, unsigned int ip_id, unsigned int ip_len, unsigned int tcp_off, unsigned int tcp_seq);
+void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data, unsigned int len, u32 client_ctrl, unsigned int flags, void *client_data);
+int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data, unsigned int len, u32 client_ctrl, void *client_data);
+void hif_lib_indicate_client(int cl_id, int event, int data);
+int hif_lib_event_handler_start( struct hif_client_s *client, int event, int data );
+int hif_lib_tmu_queue_start( struct hif_client_s *client, int qno );
+int hif_lib_tmu_queue_stop( struct hif_client_s *client, int qno );
+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno, unsigned int *flags, int count);
+void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int *ofst, unsigned int *rx_ctrl, unsigned int *desc_ctrl, void **priv_data);
+void hif_lib_update_credit(struct hif_client_s *client, unsigned int qno);
+void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue);
+void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
+static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int qno)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+
+ return (queue->size - queue->tx_pending);
+}
+
+static inline int hif_lib_get_tx_wrIndex(struct hif_client_s *client, unsigned int qno)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+
+ return queue->write_idx;
+}
+
+
+static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int qno)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+
+ return queue->tx_pending;
+}
+
+#define hif_lib_tx_credit_avail(pfe, id, qno) pfe->tmu_credit.tx_credit[id][qno]
+#define hif_lib_tx_credit_max(pfe, id, qno) pfe->tmu_credit.tx_credit_max[id][qno]
+#define hif_lib_tx_credit_use(pfe, id, qno, credit) do {if (tx_qos) {pfe->tmu_credit.tx_credit[id][qno]-= credit; pfe->tmu_credit.tx_packets[id][qno]+=credit;}} while (0)
+
+#endif /* _PFE_HIF_LIB_H_ */
diff --git a/pfe_ctrl/pfe_hw.c b/pfe_ctrl/pfe_hw.c
new file mode 100644
index 0000000..383ba14
--- /dev/null
+++ b/pfe_ctrl/pfe_hw.c
@@ -0,0 +1,153 @@
+#include "pfe_mod.h"
+#include "pfe_hw.h"
+
+/* Functions to handle most of pfe hw register initialization */
+
+int pfe_hw_init(struct pfe *pfe)
+{
+ CLASS_CFG class_cfg = {
+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
+ .route_table_baseaddr = pfe->ddr_phys_baseaddr + ROUTE_TABLE_BASEADDR,
+ .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
+ };
+
+ TMU_CFG tmu_cfg = {
+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
+ .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
+ .llm_queue_len = TMU_LLM_QUEUE_LEN,
+ };
+
+#if !defined(CONFIG_UTIL_DISABLED)
+ UTIL_CFG util_cfg = {
+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
+ };
+#endif
+
+ BMU_CFG bmu1_cfg = {
+ .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR + BMU1_LMEM_BASEADDR),
+ .count = BMU1_BUF_COUNT,
+ .size = BMU1_BUF_SIZE,
+ };
+
+ BMU_CFG bmu2_cfg = {
+ .baseaddr = pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR,
+ .count = BMU2_BUF_COUNT,
+ .size = BMU2_BUF_SIZE,
+ };
+
+ GPI_CFG egpi1_cfg = {
+ .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
+ .tmlf_txthres = EGPI1_TMLF_TXTHRES,
+ .aseq_len = EGPI1_ASEQ_LEN,
+ };
+
+ GPI_CFG egpi2_cfg = {
+ .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
+ .tmlf_txthres = EGPI2_TMLF_TXTHRES,
+ .aseq_len = EGPI2_ASEQ_LEN,
+ };
+
+ GPI_CFG egpi3_cfg = {
+ .lmem_rtry_cnt = EGPI3_LMEM_RTRY_CNT,
+ .tmlf_txthres = EGPI3_TMLF_TXTHRES,
+ .aseq_len = EGPI3_ASEQ_LEN,
+ };
+
+ GPI_CFG hgpi_cfg = {
+ .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
+ .tmlf_txthres = HGPI_TMLF_TXTHRES,
+ .aseq_len = HGPI_ASEQ_LEN,
+ };
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ if (lro_mode)
+ class_cfg.toe_mode = 1;
+
+ printk(KERN_INFO "CLASS version: %x\n", readl(CLASS_VERSION));
+ printk(KERN_INFO "TMU version: %x\n", readl(TMU_VERSION));
+
+ printk(KERN_INFO "BMU1 version: %x\n", readl(BMU1_BASE_ADDR + BMU_VERSION));
+ printk(KERN_INFO "BMU2 version: %x\n", readl(BMU2_BASE_ADDR + BMU_VERSION));
+
+ printk(KERN_INFO "EMAC1 network cfg: %x\n", readl(EMAC1_BASE_ADDR + EMAC_NETWORK_CONFIG));
+ printk(KERN_INFO "EMAC2 network cfg: %x\n", readl(EMAC2_BASE_ADDR + EMAC_NETWORK_CONFIG));
+#if !defined(CONFIG_PLATFORM_PCI)
+ printk(KERN_INFO "EMAC3 network cfg: %x\n", readl(EMAC3_BASE_ADDR + EMAC_NETWORK_CONFIG));
+#endif
+
+ printk(KERN_INFO "EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR + GPI_VERSION));
+ printk(KERN_INFO "EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR + GPI_VERSION));
+#if !defined(CONFIG_PLATFORM_PCI)
+ printk(KERN_INFO "EGPI3 version: %x\n", readl(EGPI3_BASE_ADDR + GPI_VERSION));
+#endif
+ printk(KERN_INFO "HGPI version: %x\n", readl(HGPI_BASE_ADDR + GPI_VERSION));
+
+#if !defined(CONFIG_PLATFORM_PCI)
+ printk(KERN_INFO "GPT version: %x\n", readl(CBUS_GPT_VERSION));
+#endif
+
+ printk(KERN_INFO "HIF version: %x\n", readl(HIF_VERSION));
+ printk(KERN_INFO "HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
+
+#if !defined(CONFIG_UTIL_DISABLED)
+ printk(KERN_INFO "UTIL version: %x\n", readl(UTIL_VERSION));
+#endif
+
+ bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
+
+ printk(KERN_INFO "bmu_init(1) done\n");
+
+ bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
+
+ printk(KERN_INFO "bmu_init(2) done\n");
+
+ class_init(&class_cfg);
+
+ printk(KERN_INFO "class_init() done\n");
+
+ tmu_init(&tmu_cfg);
+
+ printk(KERN_INFO "tmu_init() done\n");
+#if !defined(CONFIG_UTIL_DISABLED)
+ util_init(&util_cfg);
+
+ printk(KERN_INFO "util_init() done\n");
+#endif
+ gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
+
+ printk(KERN_INFO "gpi_init(1) done\n");
+
+ gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
+
+ printk(KERN_INFO "gpi_init(2) done\n");
+#if !defined(CONFIG_PLATFORM_PCI)
+ gpi_init(EGPI3_BASE_ADDR, &egpi3_cfg);
+
+ printk(KERN_INFO "gpi_init(3) done\n");
+#endif
+ gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
+
+ printk(KERN_INFO "gpi_init(hif) done\n");
+
+ bmu_enable(BMU1_BASE_ADDR);
+
+ printk(KERN_INFO "bmu_enable(1) done\n");
+
+ bmu_enable(BMU2_BASE_ADDR);
+
+ printk(KERN_INFO "bmu_enable(2) done\n");
+
+ return 0;
+}
+
+void pfe_hw_exit(struct pfe *pfe)
+{
+ printk(KERN_INFO "%s\n", __func__);
+
+ bmu_disable(BMU1_BASE_ADDR);
+ bmu_reset(BMU1_BASE_ADDR);
+
+ bmu_disable(BMU2_BASE_ADDR);
+ bmu_reset(BMU2_BASE_ADDR);
+}
diff --git a/pfe_ctrl/pfe_hw.h b/pfe_ctrl/pfe_hw.h
new file mode 100644
index 0000000..9df71c1
--- /dev/null
+++ b/pfe_ctrl/pfe_hw.h
@@ -0,0 +1,13 @@
+#ifndef _PFE_HW_H_
+#define _PFE_HW_H_
+
+#if !defined(CONFIG_PLATFORM_PCI)
+#define PE_SYS_CLK_RATIO 1 /* SYS/AXI = 250MHz, HFE = 500MHz */
+#else
+#define PE_SYS_CLK_RATIO 0 /* SYS = 40MHz, HFE = 40MHz */
+#endif
+
+int pfe_hw_init(struct pfe *pfe);
+void pfe_hw_exit(struct pfe *pfe);
+
+#endif /* _PFE_HW_H_ */
diff --git a/pfe_ctrl/pfe_licence.sh b/pfe_ctrl/pfe_licence.sh
new file mode 100755
index 0000000..324482c
--- /dev/null
+++ b/pfe_ctrl/pfe_licence.sh
@@ -0,0 +1,83 @@
+#! /bin/sh
+
+if [ -z "$1" ]; then
+ echo "Invalid PFE directory path"
+ exit
+fi
+PFE_PATH=`pwd`/$1
+if [ ! -d "$PFE_PATH" ]; then
+ echo "PFE directory path does not exist : $PFE_PATH"
+ exit
+fi
+pfe_files=$(find ${PFE_PATH} -type f)
+#find ${PFE_PATH} -type f > pfe_files.log
+confidential_files_no=$(grep -nr "THIS FILE IS CONFIDENTIAL" ${PFE_PATH} | wc -l)
+authorized_files_no=$(grep -nr "AUTHORIZED USE IS GOVERNED BY CONFIDENTIALITY AND LICENSE AGREEMENTS WITH MINDSPEED TECHNOLOGIES, INC" ${PFE_PATH} | wc -l)
+unauthorized_files_no=$(grep -nr "UNAUTHORIZED COPIES AND USE ARE STRICTLY PROHIBITED AND MAY RESULT IN CRIMINAL AND/OR CIVIL PROSECUTION" ${PFE_PATH} | wc -l)
+copyright_files_no=$(grep -nr "Copyright" ${PFE_PATH} --exclude="licencse.txt" --exclude="license_full.txt" | wc -l)
+if [ -z "$confidential_files_no" -o -z "$authorized_files_no" -o -z "$unauthorized_files_no" -o -z "$copyright_files_no" ]; then
+ echo "Invalid pattern search. Please check it"
+ exit
+fi
+if [ $confidential_files_no -ne $authorized_files_no -o $confidential_files_no -ne $unauthorized_files_no -o $confidential_files_no -ne $copyright_files_no ]; then
+ echo "Removed text pattern not matched properly. $confidential_files_no: $authorized_files_no : $unauthorized_files_no : $copyright_files_no . Please check the patterns"
+ exit
+fi
+confidential_files=$(grep -nr "THIS FILE IS CONFIDENTIAL" ${PFE_PATH} | cut -d':' -f 1)
+#grep -nr "THIS FILE IS CONFIDENTIAL" ${PFE_PATH} | cut -d':' -f 1 > confidential_files.log
+#no_license_files=$(diff pfe_files_list pfe_conf_files_list --new-line-format="" --old-line-format="%L" --unchanged-line-format="")
+
+for pfe_file in $pfe_files ; do
+ matched=0
+ for conf_file in $confidential_files ; do
+ if [ $pfe_file = $conf_file ]; then
+ matched=1
+ break
+ fi
+ done
+ if [ $matched -eq 0 ]; then
+ no_license_files="$no_license_files $pfe_file"
+ fi
+done
+#echo $no_license_files > no_license_files.log
+DEBUG=""
+for conf_file in $confidential_files ; do
+ if [ `basename $conf_file` = "license.txt" -o `basename $conf_file` = "license_full.txt" ]; then
+ continue
+ fi
+ sed -i '/THIS FILE IS CONFIDENTIAL/,+4 {d}' $conf_file
+ copy_right_line_no=$(grep -nr "Copyright" $conf_file | cut -d':' -f 1)
+# echo "copy_right_line_no=$copy_right_line_no file $conf_file"
+ if [ -z "$copy_right_line_no" -o $copy_right_line_no -eq 0 ]; then
+ echo "Invalid copy right line number of file : $conf_file. Please check it"
+ exit
+ fi
+ if [ $copy_right_line_no -ge 4 ]; then
+ echo "copy right line present after line no 4 in file : $conf_file. Please check it"
+ exit
+ fi
+ $DEBUG head -n $copy_right_line_no $conf_file > ${conf_file}.tmp
+ file_ext=$(basename $conf_file | cut -d'.' -f 2)
+ if [ "$file_ext" = "Makefile" -o "$file_ext" = "makefile" -o "$file_ext" = "mk" ]; then
+ cat ${PFE_PATH}/license.txt | sed -r 's/\*/#/g' >>${conf_file}.tmp
+ else
+ cat ${PFE_PATH}/license.txt >>${conf_file}.tmp
+ fi
+ tail_line_no=`expr $copy_right_line_no + 1`
+# echo "conf_file=$conf_file tail_line_no=$tail_line_no"
+ $DEBUG tail -n +$tail_line_no $conf_file >>${conf_file}.tmp
+ $DEBUG mv ${conf_file}.tmp ${conf_file}
+done
+for no_license_file in $no_license_files ; do
+ if [ `basename $no_license_file` = "license.txt" -o `basename $no_license_file` = "license_full.txt" ]; then
+ continue
+ fi
+ file_ext=$(basename $no_license_file | cut -d'.' -f 2)
+ if [ "$file_ext" = "Makefile" -o "$file_ext" = "makefile" -o "$file_ext" = "mk" ]; then
+ cat ${PFE_PATH}/license_full.txt | sed -r 's/\/\*/#/g' | sed -r 's/\*\//#/g' | sed -r 's/\*/#/g' >${no_license_file}.tmp
+ else
+ cat ${PFE_PATH}/license_full.txt >${no_license_file}.tmp
+ fi
+ cat $no_license_file >>${no_license_file}.tmp
+ mv ${no_license_file}.tmp ${no_license_file}
+done
diff --git a/pfe_ctrl/pfe_mod.c b/pfe_ctrl/pfe_mod.c
new file mode 100644
index 0000000..2ffc879
--- /dev/null
+++ b/pfe_ctrl/pfe_mod.c
@@ -0,0 +1,177 @@
+#include <linux/dma-mapping.h>
+#include "pfe_mod.h"
+#include "version.h"
+
+struct pfe *pfe;
+
+#if defined(CONFIG_UNIT_TEST)
+extern void pfe_unit_test(struct pfe *pfe);
+#if defined(UNIT_TEST_HIF)
+void hif_unit_test(struct pfe *pfe);
+#endif
+#endif
+
+/**
+ * pfe_probe -
+ *
+ *
+ */
+int pfe_probe(struct pfe *pfe)
+{
+ int rc;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ printk(KERN_INFO "PFE Driver version:\n%s\nbuilt with pfe sources version: %s\n", PFE_CTRL_VERSION, PFE_VERSION);
+
+ if (DDR_MAX_SIZE > pfe->ddr_size) {
+ printk(KERN_ERR "%s: required DDR memory (%x) above platform ddr memory (%x)\n", __func__, DDR_MAX_SIZE, pfe->ddr_size);
+ rc = -ENOMEM;
+ goto err_hw;
+ }
+
+ if (((int) (pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) & (8*SZ_1M - 1)) != 0) {
+ printk(KERN_ERR "%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n", __func__, (int) pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR);
+ rc = -ENOMEM;
+ goto err_hw;
+ }
+
+
+ printk(KERN_INFO "cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
+ (unsigned long)pfe->cbus_baseaddr, (unsigned long)pfe->ddr_baseaddr,
+ pfe->ddr_phys_baseaddr, pfe->ddr_size);
+
+ pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr, pfe->ddr_phys_baseaddr, pfe->ddr_size);
+
+ rc = pfe_hw_init(pfe);
+ if (rc < 0)
+ goto err_hw;
+
+ rc = pfe_hif_lib_init(pfe);
+ if (rc < 0)
+ goto err_hif_lib;
+
+ rc = pfe_hif_init(pfe);
+ if (rc < 0)
+ goto err_hif;
+
+ rc = pfe_firmware_init(pfe);
+ if (rc < 0)
+ goto err_firmware;
+
+ rc = pfe_ctrl_init(pfe);
+ if (rc < 0)
+ goto err_ctrl;
+
+ rc = pfe_eth_init(pfe);
+ if (rc < 0)
+ goto err_eth;
+
+ rc = pfe_vwd_init(pfe);
+ if (rc < 0)
+ goto err_vwd;
+
+ rc = pfe_pcap_init(pfe);
+ if (rc < 0)
+ goto err_pcap;
+
+ rc = pfe_perfmon_init(pfe);
+ if(rc < 0)
+ goto err_perfmon;
+
+ rc = pfe_sysfs_init(pfe);
+ if(rc < 0)
+ goto err_sysfs;
+
+ rc = pfe_diags_init(pfe);
+ if(rc < 0)
+ goto err_diags;
+
+#if defined(CONFIG_COMCERTO_MSP)
+ rc = pfe_msp_sync_init(pfe);
+ if(rc < 0)
+ goto err_msp;
+#endif
+
+#if defined(CONFIG_UNIT_TEST)
+ pfe_unit_test(pfe);
+#endif
+
+ return 0;
+
+#if defined(CONFIG_COMCERTO_MSP)
+err_msp:
+ pfe_diags_exit(pfe);
+#endif
+
+err_diags:
+ pfe_sysfs_exit(pfe);
+
+err_sysfs:
+ pfe_perfmon_exit(pfe);
+
+err_perfmon:
+ pfe_pcap_exit(pfe);
+
+err_pcap:
+ pfe_vwd_exit(pfe);
+
+err_vwd:
+ pfe_eth_exit(pfe);
+
+err_eth:
+ pfe_ctrl_exit(pfe);
+
+err_ctrl:
+ pfe_firmware_exit(pfe);
+
+err_firmware:
+ pfe_hif_exit(pfe);
+
+err_hif:
+ pfe_hif_lib_exit(pfe);
+
+err_hif_lib:
+ pfe_hw_exit(pfe);
+
+err_hw:
+ return rc;
+}
+
+
+/**
+ * pfe_remove -
+ *
+ *
+ */
+int pfe_remove(struct pfe *pfe)
+{
+ printk(KERN_INFO "%s\n", __func__);
+
+#if defined(CONFIG_COMCERTO_MSP)
+ pfe_msp_sync_exit(pfe);
+#endif
+ pfe_diags_exit(pfe);
+
+ pfe_sysfs_exit(pfe);
+
+ pfe_perfmon_exit(pfe);
+
+ pfe_pcap_exit(pfe);
+
+ pfe_vwd_exit(pfe);
+
+ pfe_eth_exit(pfe);
+
+ pfe_ctrl_exit(pfe);
+
+ pfe_firmware_exit(pfe);
+
+ pfe_hif_exit(pfe);
+
+ pfe_hif_lib_exit(pfe);
+
+ pfe_hw_exit(pfe);
+
+ return 0;
+}
diff --git a/pfe_ctrl/pfe_mod.h b/pfe_ctrl/pfe_mod.h
new file mode 100644
index 0000000..f32c717
--- /dev/null
+++ b/pfe_ctrl/pfe_mod.h
@@ -0,0 +1,144 @@
+#ifndef _PFE_MOD_H_
+#define _PFE_MOD_H_
+
+#include <linux/device.h>
+#include <linux/elf.h>
+
+struct pfe;
+
+#include "pfe_hw.h"
+#include "pfe_firmware.h"
+#include "pfe_ctrl.h"
+#include "pfe_hif.h"
+#include "pfe_hif_lib.h"
+#include "pfe_eth.h"
+#include "pfe_vwd.h"
+#include "pfe_pcap.h"
+#include "pfe_sysfs.h"
+#include "pfe_mspsync.h"
+#include "pfe_diags.h"
+#include "pfe_perfmon.h"
+
+struct pfe_tmu_credit {
+ /* Number of allowed TX packet in-flight, matches TMU queue size */
+ unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
+ unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
+ unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
+};
+
+struct pfe {
+ unsigned long ddr_phys_baseaddr;
+ void *ddr_baseaddr;
+ unsigned int ddr_size;
+ void *cbus_baseaddr;
+ void *apb_baseaddr;
+ unsigned long iram_phys_baseaddr;
+ void *iram_baseaddr;
+ unsigned long ipsec_phys_baseaddr;
+ void *ipsec_baseaddr;
+ int hif_irq;
+ int hif_client_irq;
+ struct device *dev;
+ struct pfe_ctrl ctrl;
+ struct pfe_hif hif;
+ struct pfe_eth eth;
+ struct pfe_vwd_priv_s vwd;
+ struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
+#if defined(FPP_DIAGNOSTICS)
+ struct pfe_diags diags;
+#endif
+ struct pfe_tmu_credit tmu_credit;
+ struct pcap_priv_s pcap;
+ struct pfe_cpumon cpumon;
+ struct pfe_memmon memmon;
+};
+
+extern struct pfe *pfe;
+
+int pfe_probe(struct pfe *pfe);
+int pfe_remove(struct pfe *pfe);
+
+#ifndef SZ_1K
+#define SZ_1K 1024
+#endif
+
+#ifndef SZ_1M
+#define SZ_1M (1024 * 1024)
+#endif
+
+/* DDR Mapping */
+#if !defined(CONFIG_PLATFORM_PCI)
+#define ROUTE_TABLE_BASEADDR 0
+#define ROUTE_TABLE_HASH_BITS 15 /**< 32K entries */
+#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) * CLASS_ROUTE_SIZE)
+#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
+#define BMU2_BUF_COUNT (4096 - 256) /**< This is to get a total DDR size of 12MiB */
+#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
+#define UTIL_CODE_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
+#define UTIL_CODE_SIZE (128 * SZ_1K)
+#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
+#define UTIL_DDR_DATA_SIZE (64 * SZ_1K)
+#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
+#define CLASS_DDR_DATA_SIZE (32 * SZ_1K)
+#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
+#define TMU_DDR_DATA_SIZE (32 * SZ_1K)
+#define TMU_LLM_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
+#define TMU_LLM_QUEUE_LEN (8 * 512) /**< Must be power of two and at least 16 * 8 = 128 bytes */
+#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN) /**< (4 TMU's x 16 queues x queue_len) */
+
+#define DDR_MAX_SIZE (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
+
+#else
+
+#define UTIL_CODE_BASEADDR 0
+#if defined(CONFIG_UTIL_DISABLED)
+#define UTIL_CODE_SIZE (0 * SZ_1K)
+#else
+#define UTIL_CODE_SIZE (8 * SZ_1K)
+#endif
+#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
+#define UTIL_DDR_DATA_SIZE (0 * SZ_1K)
+#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
+#define CLASS_DDR_DATA_SIZE (0 * SZ_1K)
+#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
+#define TMU_DDR_DATA_SIZE (0 * SZ_1K)
+#define ROUTE_TABLE_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
+#define ROUTE_TABLE_HASH_BITS 5 /**< 32 entries */
+#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) * CLASS_ROUTE_SIZE)
+#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
+#define BMU2_BUF_COUNT 16
+#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
+#define TMU_LLM_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
+#define TMU_LLM_QUEUE_LEN (16 * 8) /**< Must be power of two and at least 16 * 8 = 128 bytes */
+#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN) /**< (4 TMU's x 16 queues x queue_len) */
+#define HIF_DESC_BASEADDR (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
+#define HIF_RX_DESC_SIZE (16*HIF_RX_DESC_NT)
+#define HIF_TX_DESC_SIZE (16*HIF_TX_DESC_NT)
+#define HIF_DESC_SIZE (HIF_RX_DESC_SIZE + HIF_TX_DESC_SIZE)
+#define HIF_RX_PKT_DDR_BASEADDR (HIF_DESC_BASEADDR + HIF_DESC_SIZE)
+#define HIF_RX_PKT_DDR_SIZE (HIF_RX_DESC_NT * DDR_BUF_SIZE)
+#define HIF_TX_PKT_DDR_BASEADDR (HIF_RX_PKT_DDR_BASEADDR + HIF_RX_PKT_DDR_SIZE)
+#define HIF_TX_PKT_DDR_SIZE (HIF_TX_DESC_NT * DDR_BUF_SIZE)
+#define ROUTE_BASEADDR (HIF_TX_PKT_DDR_BASEADDR + HIF_TX_PKT_DDR_SIZE)
+#define ROUTE_SIZE (2 * CLASS_ROUTE_SIZE)
+
+#define DDR_MAX_SIZE (ROUTE_BASEADDR + ROUTE_SIZE)
+
+#define PFE_HOST_TO_PCI(addr) (((u32)addr)- ((u32)DDR_BASE_ADDR))
+#define PFE_PCI_TO_HOST(addr) (((u32)addr)+ ((u32)DDR_BASE_ADDR))
+#endif
+
+/* IRAM Mapping */
+#define IPSEC_IRAM_BASEADDR 0
+#define IPSEC_IRAM_SIZE 0x2000
+
+/* LMEM Mapping */
+#define BMU1_LMEM_BASEADDR 0
+#define BMU1_BUF_COUNT 256
+#define BMU1_LMEM_SIZE (LMEM_BUF_SIZE * BMU1_BUF_COUNT)
+#define IPSEC_LMEM_BASEADDR (BMU1_LMEM_BASEADDR + BMU1_LMEM_SIZE)
+#define IPSEC_LMEM_SIZE (30 * 1024)
+
+
+
+#endif /* _PFE_MOD_H */
diff --git a/pfe_ctrl/pfe_mspsync.c b/pfe_ctrl/pfe_mspsync.c
new file mode 100644
index 0000000..3e788ea
--- /dev/null
+++ b/pfe_ctrl/pfe_mspsync.c
@@ -0,0 +1,34 @@
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#else
+#include "platform.h"
+#endif
+
+
+#include "pfe_mod.h"
+
+int msp_register_pfe(struct pfe_info *pfe_sync_info);
+void msp_unregister_pfe(void);
+
+struct pfe_info pfe_sync_info;
+
+int pfe_msp_sync_init(struct pfe *pfe)
+{
+ pfe_sync_info.owner = THIS_MODULE;
+ pfe_sync_info.cbus_baseaddr = pfe->cbus_baseaddr;
+ pfe_sync_info.ddr_baseaddr = pfe->ddr_baseaddr;
+
+ if (msp_register_pfe(&pfe_sync_info)) {
+ printk(KERN_ERR "%s: Failed to register with msp\n",__func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void pfe_msp_sync_exit(struct pfe *pfe)
+{
+ msp_unregister_pfe();
+}
diff --git a/pfe_ctrl/pfe_mspsync.h b/pfe_ctrl/pfe_mspsync.h
new file mode 100644
index 0000000..ef19df4
--- /dev/null
+++ b/pfe_ctrl/pfe_mspsync.h
@@ -0,0 +1,12 @@
+#ifndef _PFE_MSPSYNC_H_
+#define _PFE_MSPSYNC_H_
+
+struct pfe_info {
+ void *ddr_baseaddr;
+ void *cbus_baseaddr;
+ void *owner;
+};
+
+int pfe_msp_sync_init(struct pfe *pfe);
+void pfe_msp_sync_exit(struct pfe *pfe);
+#endif /* _PFE_MSPSYNC_H_ */
diff --git a/pfe_ctrl/pfe_pcap.c b/pfe_ctrl/pfe_pcap.c
new file mode 100644
index 0000000..f100366
--- /dev/null
+++ b/pfe_ctrl/pfe_pcap.c
@@ -0,0 +1,414 @@
+#include <linux/platform_device.h>
+#include <linux/kthread.h>
+#include "config.h"
+#include "pfe_mod.h"
+#include "pfe_pcap.h"
+#include "pfe_hif_lib.h"
+
+#ifdef CFG_PCAP
+
+/* PFE packet capture:
+ - uses HIF functions to receive packets
+ - uses ctrl function to control packet capture
+ */
+
+static ssize_t pcap_stats_get(struct device *, struct device_attribute *, char *);
+static ssize_t pcap_stats_clear(struct device *, struct device_attribute *, const char *, size_t );
+
+static int pcap_open(struct net_device *dev)
+{
+ printk(KERN_INFO "%s: %s\n", dev->name, __func__);
+ netif_stop_queue(dev);
+ return 0;
+}
+static int pcap_close(struct net_device *dev)
+{
+ printk(KERN_INFO "%s: %s\n", dev->name, __func__);
+ return 0;
+}
+
+static int pcap_hard_start_xmit(struct sk_buff *skb, struct net_device *dev )
+{
+ netif_stop_queue(dev);
+ printk(KERN_INFO "%s() Dropping pkt!!!\n",__func__);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+
+static DEVICE_ATTR(pcap_stats, 0644, pcap_stats_get, pcap_stats_clear);
+
+
+
+static int pcap_sysfs_init(struct device *dev)
+{
+ if (device_create_file(dev, &dev_attr_pcap_stats))
+ {
+ printk(KERN_ERR "Failed to create attr capture stats\n");
+ goto err_stats;
+ }
+
+ return 0;
+
+err_stats:
+ return -1;
+}
+
+/** pfe_pcap_sysfs_exit
+ *
+ */
+static void pcap_sysfs_exit(struct pfe* pfe)
+{
+ device_remove_file(pfe->dev, &dev_attr_pcap_stats);
+}
+
+static const struct net_device_ops pcap_netdev_ops = {
+ .ndo_open = pcap_open,
+ .ndo_stop = pcap_close,
+ .ndo_start_xmit = pcap_hard_start_xmit,
+};
+
+static struct net_device *pcap_register_capdev(char *dev_name)
+{
+ struct net_device *dev=NULL;
+
+ printk("%s:\n", __func__);
+
+ /* Create an ethernet device instance */
+ dev = (struct net_device *)alloc_etherdev(sizeof (int));
+ if (!dev) {
+ printk(KERN_ERR "%s: cap device allocation failed\n", __func__);
+ goto err0;
+ }
+
+ strcpy(dev->name, dev_name);
+ //dev->irq = priv->irq;
+
+ /* Fill in the dev structure */
+ dev->mtu = 1500;
+ dev->features = 0;
+ dev->netdev_ops = &pcap_netdev_ops;
+ /*TODO */
+ dev->dev_addr[0] = 0x0;
+ dev->dev_addr[1] = 0x21;
+ dev->dev_addr[2] = 0x32;
+ dev->dev_addr[3] = 0x43;
+ dev->dev_addr[4] = 0x54;
+ dev->dev_addr[5] = 0x65;
+
+ if(register_netdev(dev)) {
+ printk(KERN_ERR "%s: cannot register net device, aborting.\n", dev->name);
+ free_netdev(dev);
+ dev = NULL;
+ }
+
+err0:
+ return dev;
+}
+static void pcap_unregister_capdev(struct net_device *dev)
+{
+ unregister_netdev(dev);
+ free_netdev(dev);
+}
+
+static ssize_t pcap_stats_get(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pfe* pfe = platform_get_drvdata(pdev);
+ struct pcap_priv_s *priv = &pfe->pcap;
+ int ii, len = 0;
+
+ //printk("rtc %d rtf %d\n", priv->rxQ.rxToCleanIndex,priv->rxQ.rxToFillIndex);
+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++)
+ len += sprintf(&buf[len], "GEMAC%d Rx pkts : %lu\n", ii, priv->stats[ii].rx_packets);
+
+ return len;
+}
+
+static ssize_t pcap_stats_clear(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pfe* pfe = platform_get_drvdata(pdev);
+ struct pcap_priv_s *priv = &pfe->pcap;
+ int ii;
+
+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++)
+ priv->stats[ii].rx_packets = 0;
+ return count;
+}
+
+
+
+/** pfe_pcap_rx_poll
+ *
+ */
+static int pfe_pcap_rx_poll (struct napi_struct *napi, int budget)
+{
+ struct sk_buff *skb;
+ void *data_addr;
+ int length,offset;
+ unsigned int idx = 0, work_done=0, qno=0;
+ unsigned int desc_ctrl = 0,rx_ctrl;
+ struct hif_pcap_hdr *pcap_hdr;
+ u32 tstamp;
+ struct pcap_priv_s *priv = container_of(napi, struct pcap_priv_s, low_napi);
+
+
+ do{
+
+ data_addr = hif_lib_receive_pkt(&priv->client, qno, &length, &offset, &rx_ctrl, &desc_ctrl, (void **)&pcap_hdr);
+ if (!data_addr)
+ break;
+
+ idx = pcap_hdr->ifindex;
+
+ //printk("%s: data:%x len:%d idx:%d \n", __func__, (u32)data_addr, length, idx);
+
+ if(idx < NUM_GEMAC_SUPPORT && priv->dev[idx]) {
+ struct net_device *dev = priv->dev[idx];
+ if(length > 1514) {
+ printk(KERN_ERR "Dropping big packet\n");
+ goto pkt_drop;
+ }
+ skb = alloc_skb_header(PFE_BUF_SIZE, data_addr, GFP_ATOMIC);
+
+ if (unlikely(!skb)) {
+ printk(KERN_ERR "Failed to allocate skb header\n");
+ goto pkt_drop;
+ }
+
+ skb_reserve(skb, offset);
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, dev);
+ tstamp = ntohl(pcap_hdr->timestamp);
+ skb->tstamp = ktime_set( tstamp/USEC_PER_SEC, (tstamp%USEC_PER_SEC)*1000 );
+
+ /* Send packet to PF_PACKET socket queue */
+ capture_receive_skb(skb);
+
+ priv->stats[idx].rx_packets++;
+ priv->stats[idx].rx_bytes += length;
+ //dev->last_rx = jiffies;
+ work_done++;
+ }
+ else
+ {
+pkt_drop:
+ printk(KERN_ERR "Received with wrong dev\n");
+ pfe_kfree(data_addr);
+ }
+
+ }while(work_done < budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND, qno);
+ }
+ return (work_done);
+
+}
+
+
+
+
+
+static int pfe_pcap_event_handler(void *data, int event, int qno)
+{
+ struct pcap_priv_s *priv = data;
+
+
+ switch (event) {
+ case EVENT_RX_PKT_IND:
+ /* qno is always 0 */
+ if (qno == 0) {
+ if (napi_schedule_prep(&priv->low_napi)) {
+ //printk(KERN_INFO "%s: schedule high prio poll\n", __func__);
+
+ __napi_schedule(&priv->low_napi);
+ }
+ }
+
+ break;
+
+ case EVENT_TXDONE_IND:
+ case EVENT_HIGH_RX_WM:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+int pfe_pcap_up( struct pcap_priv_s *priv)
+{
+ struct hif_client_s *client;
+ int err = 0,rc, ii;
+
+ char ifname[IFNAMSIZ];
+
+ printk(KERN_INFO "%s\n", __func__);
+
+
+ priv->pfe = pfe;
+
+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++)
+ {
+ snprintf(ifname, IFNAMSIZ, "cap_gemac%d", ii);
+ priv->dev[ii] = pcap_register_capdev(ifname);
+
+ if(priv->dev[ii]==NULL) {
+ printk(KERN_ERR "%s: Failed to register capture device\n",__func__);
+ err = -EAGAIN;
+ goto err0;
+ }
+ }
+
+ if (pcap_sysfs_init(pfe->dev) < 0 )
+ {
+ printk(KERN_ERR "%s: Failed to register to sysfs\n",__func__);
+ err = -EAGAIN;
+ goto err0;
+ }
+
+ /* Register pktcap client driver to HIF */
+ client = &priv->client;
+ memset(client, 0, sizeof(*client));
+ client->id = PFE_CL_PCAP0;
+ client->tx_qn = PCAP_TXQ_CNT;
+ client->rx_qn = PCAP_RXQ_CNT;
+ client->priv = priv;
+ client->pfe = priv->pfe;
+ client->event_handler = pfe_pcap_event_handler;
+
+ /* FIXME : For now hif lib sets all tx and rx queues to same size */
+ client->tx_qsize = PCAP_TXQ_DEPTH;
+ client->rx_qsize = PCAP_RXQ_DEPTH;
+
+ printk(KERN_INFO "%s Registering client \n", __func__);
+ if ((rc = hif_lib_client_register(client))) {
+ printk(KERN_ERR"%s: hif_lib_client_register(%d) failed\n", __func__, client->id);
+ goto err1;
+ }
+
+ printk(KERN_INFO "%s Enable PCAP in pfe \n", __func__);
+ /* Enable Packet capture in PFE */
+ if ((rc = pfe_ctrl_set_pcap(1)) != 0)
+ {
+ printk("%s: Failed to send command(enable) to pfe\n",__func__);
+ err = -EAGAIN;
+ goto err2;
+ }
+
+ printk(KERN_INFO "%s Enable PCAP ratelimit in pfe \n", __func__);
+ /* Set the default values for the configurable parameters*/
+ priv->rate_limit = COMCERTO_CAP_DFL_RATELIMIT;
+ priv->pkts_per_msec = priv->rate_limit/1000;
+
+ if ((rc = pfe_ctrl_set_pcap_ratelimit(priv->pkts_per_msec)) != 0)
+ {
+ printk("%s: Failed to send ratelimit command to pfe\n",__func__);
+ err = -EAGAIN;
+ goto err2;
+ }
+
+
+
+ return 0;
+
+err2:
+ hif_lib_client_unregister(client);
+err1:
+ pcap_sysfs_exit(pfe);
+err0:
+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++)
+ if(priv->dev[ii])
+ pcap_unregister_capdev(priv->dev[ii]);
+
+ return err;
+
+}
+
+static int pfe_pcap_down(struct pcap_priv_s* priv)
+{
+ struct hif_client_s *client = &priv->client;
+ int ii;
+
+ printk("%s()\n", __func__);
+
+ /* Disable Packet capture module in PFE */
+ if(pfe_ctrl_set_pcap(0)!= 0)
+ printk(KERN_ERR "%s: Failed while sending command CMD_PKTCAP_ENABLE\n", __func__ );
+ hif_lib_client_unregister(client);
+ pcap_sysfs_exit(pfe);
+
+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++)
+ if(priv->dev[ii])
+ pcap_unregister_capdev(priv->dev[ii]);
+
+ return 0;
+
+}
+
+static int pcap_driver_init(struct pfe* pfe)
+{
+ struct pcap_priv_s *priv = &pfe->pcap;
+ int err;
+
+ /* Initilize NAPI for Rx processing */
+ init_dummy_netdev(&priv->dummy_dev);
+ netif_napi_add(&priv->dummy_dev, &priv->low_napi, pfe_pcap_rx_poll, PCAP_RX_POLL_WEIGHT);
+ napi_enable(&priv->low_napi);
+
+ priv->pfe = pfe;
+
+ err = pfe_pcap_up(priv);
+
+ if (err < 0)
+ napi_disable(&priv->low_napi);
+
+ return err;
+}
+
+int pfe_pcap_init(struct pfe *pfe)
+{
+ int rc ;
+ printk(KERN_INFO "%s\n",__func__);
+
+ rc = pcap_driver_init(pfe);
+ if(rc) goto err0;
+ return 0;
+err0:
+ return rc;
+}
+
+void pfe_pcap_exit(struct pfe *pfe)
+{
+ struct pcap_priv_s *priv = &pfe->pcap;
+
+ printk(KERN_INFO "%s\n", __func__);
+ pfe_pcap_down(priv);
+
+}
+
+#else /* !CFG_PCAP */
+
+int pfe_pcap_init(struct pfe *pfe)
+{
+ printk(KERN_INFO "%s\n", __func__);
+
+ return 0;
+}
+
+void pfe_pcap_exit(struct pfe *pfe)
+{
+ printk(KERN_INFO "%s\n", __func__);
+}
+
+#endif /* !CFG_PCAP */
+
diff --git a/pfe_ctrl/pfe_pcap.h b/pfe_ctrl/pfe_pcap.h
new file mode 100644
index 0000000..54553b5
--- /dev/null
+++ b/pfe_ctrl/pfe_pcap.h
@@ -0,0 +1,40 @@
+#ifndef _PFE_PCAP_H_
+#define _PFE_PCAP_H_
+
+
+
+#define COMCERTO_CAP_RX_DESC_NT (1024)
+#define COMCERTO_CAP_DFL_RATELIMIT 10000 //pps
+#define COMCERTO_CAP_MIN_RATELIMIT 1000 //pps
+#define COMCERTO_CAP_MAX_RATELIMIT 800000 //pps
+#define COMCERTO_CAP_DFL_BUDGET 32 //packets processed in tasklet
+#define COMCERTO_CAP_MAX_BUDGET 64
+#define COMCERTO_CAP_POLL_MS 100
+
+int pfe_pcap_init(struct pfe *pfe);
+void pfe_pcap_exit(struct pfe *pfe);
+
+#define PCAP_RXQ_CNT 1
+#define PCAP_TXQ_CNT 1
+
+#define PCAP_RXQ_DEPTH 1024
+#define PCAP_TXQ_DEPTH 1
+
+#define PCAP_RX_POLL_WEIGHT (HIF_RX_POLL_WEIGHT - 16)
+
+
+typedef struct pcap_priv_s {
+ struct pfe* pfe;
+ unsigned char name[12];
+
+ struct net_device_stats stats[NUM_GEMAC_SUPPORT];
+ struct net_device *dev[NUM_GEMAC_SUPPORT];
+ struct hif_client_s client;
+ u32 rate_limit;
+ u32 pkts_per_msec;
+ struct net_device dummy_dev;
+ struct napi_struct low_napi;
+}pcap_priv_t;
+
+
+#endif /* _PFE_PCAP_H_ */
diff --git a/pfe_ctrl/pfe_pci.c b/pfe_ctrl/pfe_pci.c
new file mode 100644
index 0000000..0bab3da
--- /dev/null
+++ b/pfe_ctrl/pfe_pci.c
@@ -0,0 +1,199 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+
+#include "pfe_mod.h"
+
+#if 0
+static struct pci_device_id pfe_pci_tbl[] =
+{
+ { 0x0700, 0x1108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0 },
+};
+#else
+static struct pci_device_id pfe_pci_tbl[] =
+{
+ { 0x0700, 0x1107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0 },
+};
+#endif
+
+
+static int __devinit pfe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+ unsigned long mem_start, mem_end, mem_len;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ pfe = kzalloc(sizeof(struct pfe), GFP_KERNEL);
+ if (!pfe) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ pci_set_drvdata(pdev, pfe);
+
+ rc = pci_enable_device(pdev);
+ if (rc < 0)
+ {
+ printk(KERN_INFO "pci_enable_device() failed\n");
+ goto err_pci_enable;
+ }
+
+ printk(KERN_INFO "PCI device enabled\n");
+
+ mem_start = pci_resource_start(pdev, 0);
+ mem_end = pci_resource_end(pdev, 0);
+ mem_len = pci_resource_len(pdev, 0);
+
+ printk(KERN_INFO "PCI resource 0 %#lx:%#lx (%lx)\n", mem_start, mem_end, mem_len);
+
+#if 0
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM))
+ {
+ printk("\n cann't read PCI resource flags");
+ goto err_out_disable_pdev;
+ }
+
+ printk("\n PCI resource flags read as MEM mapped");
+
+
+ io_start = pci_resource_start(pdev, 1);
+ printk("\n PCI resource start address:%x ",io_start);
+ io_end = pci_resource_end(pdev, 1);
+ printk("\n PCI resource end address:%x ",io_end);
+ io_len = pci_resource_len(pdev, 1);
+ printk("\n PCI resource end address:%x ",io_len);
+
+ if (!(pci_resource_flags(pdev, 1) & IORESOURCE_IO))
+ {
+ printk("\n cann't read PCI resource flags");
+ goto err_out_disable_pdev;
+ }
+
+ printk("\n PCI resource flags read as IO mapped");
+#endif
+ if ((rc = pci_request_regions(pdev, "pfe-pci"))) {
+ printk(KERN_INFO "pci_request_regions() failed\n");
+ goto err_pci_request;
+ }
+#if 0
+ printk("\n PCI acquired regions");
+ if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK))){
+ printk("\n No usable DMA configuration");
+ goto err_out_free_res;
+ }
+
+ printk("using i/o access mode\n");
+#endif
+ pfe->cbus_baseaddr = ioremap(mem_start, mem_len);
+ if (!pfe->cbus_baseaddr) {
+ printk(KERN_INFO "ioremap() cbus failed\n");
+ rc = -ENOMEM;
+ goto err_cbus;
+ }
+
+ pfe->ddr_baseaddr = pfe->cbus_baseaddr + 8 * SZ_1M;
+ pfe->ddr_phys_baseaddr = 0x20000; /* This is the physical address seen by the FPGA ... */
+ pfe->ddr_size = 0x10000;
+
+ pfe->apb_baseaddr = vmalloc(16 * SZ_1M);
+ if (!pfe->apb_baseaddr) {
+ printk(KERN_INFO "vmalloc() apb failed\n");
+ rc = -ENOMEM;
+ goto err_apb;
+ }
+
+ pfe->iram_baseaddr = vmalloc(128 * SZ_1K);
+ if (!pfe->iram_baseaddr) {
+ printk(KERN_INFO "vmalloc() iram failed\n");
+ rc = -ENOMEM;
+ goto err_iram;
+ }
+
+ pfe->hif_irq = pdev->irq;
+ pfe->dev = &pdev->dev;
+
+ pfe->ctrl.sys_clk = 40000;
+
+ rc = pfe_probe(pfe);
+ if (rc < 0)
+ goto err_probe;
+
+ return 0;
+
+err_probe:
+ vfree(pfe->iram_baseaddr);
+
+err_iram:
+ vfree(pfe->apb_baseaddr);
+
+err_apb:
+ iounmap(pfe->cbus_baseaddr);
+
+err_cbus:
+ pci_release_regions(pdev);
+
+err_pci_request:
+ pci_disable_device(pdev);
+
+err_pci_enable:
+
+ pci_set_drvdata(pdev, NULL);
+
+ kfree(pfe);
+
+err_alloc:
+ return rc;
+}
+
+
+static void __devexit pfe_pci_remove (struct pci_dev *pdev)
+{
+ struct pfe *pfe = pci_get_drvdata(pdev);
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ pfe_remove(pfe);
+
+ vfree(pfe->iram_baseaddr);
+ vfree(pfe->apb_baseaddr);
+
+ iounmap(pfe->cbus_baseaddr);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ kfree(pfe);
+}
+
+
+static struct pci_driver pfe_pci_driver = {
+ .name = "pfe-pci",
+ .id_table = pfe_pci_tbl,
+ .probe = pfe_pci_probe,
+ .remove = __devexit_p(pfe_pci_remove),
+};
+
+
+static int __init pfe_module_init(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+
+ return pci_register_driver(&pfe_pci_driver);
+}
+
+static void __exit pfe_module_exit(void)
+{
+ pci_unregister_driver(&pfe_pci_driver);
+
+ printk(KERN_INFO "%s\n", __func__);
+}
+
+MODULE_LICENSE("GPL");
+module_init(pfe_module_init);
+module_exit(pfe_module_exit);
diff --git a/pfe_ctrl/pfe_perfmon.c b/pfe_ctrl/pfe_perfmon.c
new file mode 100644
index 0000000..50c0bce
--- /dev/null
+++ b/pfe_ctrl/pfe_perfmon.c
@@ -0,0 +1,151 @@
+/* PFE performance monitoring functions */
+
+#include "pfe_ctrl_hal.h"
+#include "pfe_perfmon.h"
+
+static TIMER_ENTRY cpumon_timer;
+
+u32 CLASS_DMEM_SH2(cpu_ticks[2]);
+u32 TMU_DMEM_SH2(cpu_ticks[2]);
+u32 UTIL_DMEM_SH2(cpu_ticks[2]);
+
+#define compute_active_pct(total_ticks, active_ticks) ((active_ticks * 100 + (total_ticks >> 1)) / total_ticks)
+
+static void cpumon_timer_handler(void)
+{
+ int id;
+ u32 dmem_addr;
+ u32 ticks[2];
+ u32 total, active;
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+ struct pfe_cpumon *cpumon = &pfe->cpumon;
+
+ // Process class PE's
+ total = active = 0;
+ dmem_addr = virt_to_class_dmem(&class_cpu_ticks[0]);
+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
+ {
+ cpumon->cpu_usage_pct[id] = 0;
+ if (pe_sync_stop(ctrl, (1 << id)) < 0)
+ continue;
+ ticks[0] = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
+ ticks[1] = be32_to_cpu(pe_dmem_read(id, dmem_addr + 4, 4));
+ pe_dmem_write(id, 0, dmem_addr, 4);
+ pe_dmem_write(id, 0, dmem_addr + 4, 4);
+ pe_start(ctrl, (1 << id));
+ ticks[0] >>= 8; // divide both values by 256, so multiply by 100 won't overflow
+ ticks[1] >>= 8;
+ total += ticks[0];
+ active += ticks[1];
+ if (ticks[0] != 0)
+ cpumon->cpu_usage_pct[id] = compute_active_pct(ticks[0], ticks[1]);
+ }
+ if (total != 0)
+ cpumon->class_usage_pct = compute_active_pct(total, active);
+ else
+ cpumon->class_usage_pct = 0;
+
+ // Process TMU PE's
+ total = active = 0;
+ dmem_addr = virt_to_tmu_dmem(&tmu_cpu_ticks[0]);
+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++)
+ {
+ cpumon->cpu_usage_pct[id] = 0;
+ if (pe_sync_stop(ctrl, (1 << id)) < 0)
+ continue;
+ ticks[0] = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
+ ticks[1] = be32_to_cpu(pe_dmem_read(id, dmem_addr + 4, 4));
+ pe_dmem_write(id, 0, dmem_addr, 4);
+ pe_dmem_write(id, 0, dmem_addr + 4, 4);
+ pe_start(ctrl, (1 << id));
+ if (id == TMU0_ID) { int x=0; if (++x == 10){x=0; printk("total=%x, active=%x\n",ticks[0],ticks[1]);}} //zzz
+ ticks[0] >>= 8; // divide both values by 256, so multiply by 100 won't overflow
+ ticks[1] >>= 8;
+ if (ticks[0] != 0)
+ cpumon->cpu_usage_pct[id] = compute_active_pct(ticks[0], ticks[1]);
+ }
+
+ // Process Util PE
+ dmem_addr = virt_to_util_dmem(&util_cpu_ticks[0]);
+ cpumon->cpu_usage_pct[UTIL_ID] = 0;
+ if (pe_sync_stop(ctrl, (1 << UTIL_ID)) < 0)
+ return;
+ ticks[0] = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
+ ticks[1] = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr + 4, 4));
+ pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
+ pe_dmem_write(UTIL_ID, 0, dmem_addr + 4, 4);
+ pe_start(ctrl, (1 << UTIL_ID));
+ ticks[0] >>= 8; // divide both values by 256, so multiply by 100 won't overflow
+ ticks[1] >>= 8;
+ if (ticks[0] != 0)
+ cpumon->cpu_usage_pct[UTIL_ID] = compute_active_pct(ticks[0], ticks[1]);
+}
+
+static int pfe_cpumon_init(struct pfe *pfe)
+{
+ timer_init(&cpumon_timer, cpumon_timer_handler);
+ timer_add(&cpumon_timer, CT_CPUMON_INTERVAL);
+ return 0;
+}
+
+static void pfe_cpumon_exit(struct pfe *pfe)
+{
+ timer_del(&cpumon_timer);
+}
+
+
+/*********************************************************************************/
+
+// Memory monitor functions
+
+void * pfe_kmalloc(size_t size, int flags)
+{
+ struct pfe_memmon *memmon = &pfe->memmon;
+ void *ptr;
+ ptr = kmalloc(size, flags);
+ if (ptr)
+ memmon->kernel_memory_allocated += ksize(ptr);
+ return ptr;
+}
+
+void * pfe_kzalloc(size_t size, int flags)
+{
+ struct pfe_memmon *memmon = &pfe->memmon;
+ void *ptr;
+ ptr = kzalloc(size, flags);
+ if (ptr)
+ memmon->kernel_memory_allocated += ksize(ptr);
+ return ptr;
+}
+
+void pfe_kfree(void *ptr)
+{
+ struct pfe_memmon *memmon = &pfe->memmon;
+ memmon->kernel_memory_allocated -= ksize(ptr);
+ kfree(ptr);
+}
+
+static int pfe_memmon_init(struct pfe *pfe)
+{
+ return 0;
+}
+
+static void pfe_memmon_exit(struct pfe *pfe)
+{
+}
+
+/*********************************************************************************/
+
+
+int pfe_perfmon_init(struct pfe *pfe)
+{
+ pfe_cpumon_init(pfe);
+ pfe_memmon_init(pfe);
+ return 0;
+}
+
+void pfe_perfmon_exit(struct pfe *pfe)
+{
+ pfe_cpumon_exit(pfe);
+ pfe_memmon_exit(pfe);
+}
diff --git a/pfe_ctrl/pfe_perfmon.h b/pfe_ctrl/pfe_perfmon.h
new file mode 100644
index 0000000..12240bd
--- /dev/null
+++ b/pfe_ctrl/pfe_perfmon.h
@@ -0,0 +1,22 @@
+#ifndef _PFE_PERFMON_H_
+#define _PFE_PERFMON_H_
+
+#define CT_CPUMON_INTERVAL (1 * TIMER_TICKS_PER_SEC)
+
+struct pfe_cpumon {
+ u32 cpu_usage_pct[MAX_PE];
+ u32 class_usage_pct;
+};
+
+struct pfe_memmon {
+ u32 kernel_memory_allocated;
+};
+
+void * pfe_kmalloc(size_t size, int flags);
+void * pfe_kzalloc(size_t size, int flags);
+void pfe_kfree(void *ptr);
+
+int pfe_perfmon_init(struct pfe *pfe);
+void pfe_perfmon_exit(struct pfe *pfe);
+
+#endif /* _PFE_PERFMON_H_ */
diff --git a/pfe_ctrl/pfe_platform.c b/pfe_ctrl/pfe_platform.c
new file mode 100644
index 0000000..ee5361f
--- /dev/null
+++ b/pfe_ctrl/pfe_platform.c
@@ -0,0 +1,233 @@
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include "pfe_mod.h"
+
+/**
+ * pfe_platform_probe -
+ *
+ *
+ */
+static int pfe_platform_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ int rc;
+ struct clk *clk_axi;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ pfe = kzalloc(sizeof(struct pfe), GFP_KERNEL);
+ if (!pfe) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ platform_set_drvdata(pdev, pfe);
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ddr");
+ if (!r) {
+ printk(KERN_INFO "platform_get_resource_byname(ddr) failed\n");
+ rc = -ENXIO;
+ goto err_ddr;
+ }
+
+ pfe->ddr_phys_baseaddr = r->start;
+ pfe->ddr_size = resource_size(r);
+
+ pfe->ddr_baseaddr = ioremap(r->start, resource_size(r));
+ if (!pfe->ddr_baseaddr) {
+ printk(KERN_INFO "ioremap() ddr failed\n");
+ rc = -ENOMEM;
+ goto err_ddr;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "axi");
+ if (!r) {
+ printk(KERN_INFO "platform_get_resource_byname(axi) failed\n");
+ rc = -ENXIO;
+ goto err_axi;
+ }
+
+ pfe->cbus_baseaddr = ioremap(r->start, resource_size(r));
+ if (!pfe->cbus_baseaddr) {
+ printk(KERN_INFO "ioremap() axi failed\n");
+ rc = -ENOMEM;
+ goto err_axi;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
+ if (!r) {
+ printk(KERN_INFO "platform_get_resource_byname(apb) failed\n");
+ rc = -ENXIO;
+ goto err_apb;
+ }
+
+ pfe->apb_baseaddr = ioremap(r->start, resource_size(r));
+ if (!pfe->apb_baseaddr) {
+ printk(KERN_INFO "ioremap() apb failed\n");
+ rc = -ENOMEM;
+ goto err_apb;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iram");
+ if (!r) {
+ printk(KERN_INFO "platform_get_resource_byname(iram) failed\n");
+ rc = -ENXIO;
+ goto err_iram;
+ }
+
+ pfe->iram_phys_baseaddr = r->start;
+ pfe->iram_baseaddr = ioremap(r->start, resource_size(r));
+ if (!pfe->iram_baseaddr) {
+ printk(KERN_INFO "ioremap() iram failed\n");
+ rc = -ENOMEM;
+ goto err_iram;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipsec");
+ if (!r) {
+ printk(KERN_INFO "platform_get_resource_byname(ipsec) failed\n");
+ rc = -ENXIO;
+ goto err_ipsec;
+ }
+
+ pfe->ipsec_phys_baseaddr = r->start;
+ /* Just map only initial 1MB , as its enough to access espah engine
+ */
+ //pfe->ipsec_baseaddr = ioremap(r->start, resource_size(r));
+ pfe->ipsec_baseaddr = ioremap(r->start, 1*1024*1024);
+ if (!pfe->ipsec_baseaddr) {
+ printk(KERN_INFO "ioremap() ipsec failed\n");
+ rc = -ENOMEM;
+ goto err_ipsec;
+ }
+
+ printk(KERN_INFO "ipsec: baseaddr :%x --- %x\n", (u32)pfe->ipsec_phys_baseaddr, (u32)pfe->ipsec_baseaddr);
+
+ pfe->hif_irq = platform_get_irq_byname(pdev, "hif");
+ if (pfe->hif_irq < 0) {
+ printk(KERN_INFO "platform_get_irq_byname(hif) failed\n");
+ rc = pfe->hif_irq;
+ goto err_hif_irq;
+ }
+
+#if 0
+ pfe->hif_client_irq = platform_get_irq_byname(pdev, "hif_client");
+ if (pfe->hif_client_irq < 0) {
+ printk(KERN_INFO "platform_get_irq_byname(hif_client) failed\n");
+ rc = pfe->hif_client_irq;
+ goto err_hif_irq;
+ }
+#endif
+
+ pfe->dev = &pdev->dev;
+
+ /* FIXME this needs to be done at the BSP level with proper locking */
+ writel(readl(AXI_RESET_1) | (1 << 3), AXI_RESET_1);
+ mdelay(1);
+ writel(readl(AXI_RESET_1) & ~(1 << 3), AXI_RESET_1);
+
+ /* Get the system clock */
+ clk_axi = clk_get(NULL,"axi");
+ if (IS_ERR(clk_axi)) {
+ printk(KERN_INFO "clk_get call failed\n");
+ rc = -ENXIO;
+ goto err_clk;
+ }
+ pfe->ctrl.clk_axi = clk_axi;
+ pfe->ctrl.sys_clk = clk_get_rate(clk_axi) / 1000; // save sys_clk value as KHz
+
+ rc = pfe_probe(pfe);
+ if (rc < 0)
+ goto err_probe;
+
+ return 0;
+
+err_probe:
+ clk_put(clk_axi);
+
+err_clk:
+err_hif_irq:
+ iounmap(pfe->ipsec_baseaddr);
+err_ipsec:
+ iounmap(pfe->iram_baseaddr);
+err_iram:
+ iounmap(pfe->apb_baseaddr);
+
+err_apb:
+ iounmap(pfe->cbus_baseaddr);
+
+err_axi:
+ iounmap(pfe->ddr_baseaddr);
+
+err_ddr:
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(pfe);
+
+err_alloc:
+ return rc;
+}
+
+
+/**
+ * pfe_platform_remove -
+ *
+ *
+ */
+static int pfe_platform_remove(struct platform_device *pdev)
+{
+ struct pfe *pfe = platform_get_drvdata(pdev);
+ int rc;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ rc = pfe_remove(pfe);
+
+ /* FIXME this needs to be done at the BSP level with proper locking */
+ writel(readl(AXI_RESET_1) | (1 << 3), AXI_RESET_1);
+
+ clk_put(pfe->ctrl.clk_axi);
+ iounmap(pfe->ipsec_baseaddr);
+ iounmap(pfe->iram_baseaddr);
+ iounmap(pfe->apb_baseaddr);
+ iounmap(pfe->cbus_baseaddr);
+ iounmap(pfe->ddr_baseaddr);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(pfe);
+
+ return rc;
+}
+
+
+static struct platform_driver pfe_platform_driver = {
+ .probe = pfe_platform_probe,
+ .remove = pfe_platform_remove,
+ .driver = {
+ .name = "pfe",
+ },
+};
+
+
+static int __init pfe_module_init(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+
+ return platform_driver_register(&pfe_platform_driver);
+}
+
+
+static void __exit pfe_module_exit(void)
+{
+ platform_driver_unregister(&pfe_platform_driver);
+
+ printk(KERN_INFO "%s\n", __func__);
+}
+
+MODULE_LICENSE("GPL");
+module_init(pfe_module_init);
+module_exit(pfe_module_exit);
diff --git a/pfe_ctrl/pfe_sysfs.c b/pfe_ctrl/pfe_sysfs.c
new file mode 100644
index 0000000..2c3018e
--- /dev/null
+++ b/pfe_ctrl/pfe_sysfs.c
@@ -0,0 +1,831 @@
+/*
+ * (C) Copyright 2011
+ * Author : Mindspeed Technologes
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ * */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pfe_mod.h"
+#include "pfe_ctrl_hal.h"
+
+static unsigned long class_do_clear = 0;
+static unsigned long tmu_do_clear = 0;
+static unsigned long util_do_clear = 0;
+
+static ssize_t pe_status(char *buf, int id, u32 dmem_addr, unsigned long do_clear)
+{
+ ssize_t len = 0;
+ u32 val;
+ char statebuf[5];
+ struct pfe_cpumon *cpumon = &pfe->cpumon;
+ u32 debug_indicator;
+ u32 debug[16];
+
+ *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+
+ statebuf[4] = '\0';
+ len += sprintf(buf + len, "state=%4s ", statebuf);
+
+ val = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
+
+ val = pe_dmem_read(id, dmem_addr, 4);
+ if (do_clear && val)
+ pe_dmem_write(id, 0, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
+
+ val = pe_dmem_read(id, dmem_addr, 4);
+ if (do_clear && val)
+ pe_dmem_write(id, 0, dmem_addr, 4);
+ dmem_addr += 4;
+ if (id >= TMU0_ID && id <= TMU_MAX_ID)
+ len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
+ else
+ len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
+
+ val = pe_dmem_read(id, dmem_addr, 4);
+ if (do_clear && val)
+ pe_dmem_write(id, 0, dmem_addr, 4);
+ dmem_addr += 4;
+ if (val)
+ len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
+
+ len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
+
+ len += sprintf(buf + len, "\n");
+
+ debug_indicator = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ if (!strncmp((char *)&debug_indicator, "DBUG", 4))
+ {
+ int j, last = 0;
+ for (j = 0; j < 16; j++)
+ {
+ debug[j] = pe_dmem_read(id, dmem_addr, 4);
+ if (debug[j])
+ {
+ if (do_clear)
+ pe_dmem_write(id, 0, dmem_addr, 4);
+ last = j + 1;
+ }
+ dmem_addr += 4;
+ }
+ for (j = 0; j < last; j++)
+ {
+ len += sprintf(buf + len, "%08x%s", cpu_to_be32(debug[j]),
+ (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
+ }
+ }
+
+ return len;
+}
+
+static ssize_t class_phy_stats(char *buf, int phy)
+{
+ ssize_t len = 0;
+ int off1 = phy * 0x28;
+ int off2 = phy * 0x10;
+
+ if (phy == 3)
+ off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
+
+ len += sprintf(buf + len, "phy: %d\n", phy);
+ len += sprintf(buf + len, " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n",
+ readl(CLASS_PHY1_RX_PKTS + off1), readl(CLASS_PHY1_TX_PKTS + off1),
+ readl(CLASS_PHY1_INTF_MATCH_PKTS + off1), readl(CLASS_PHY1_V4_PKTS + off1),
+ readl(CLASS_PHY1_V6_PKTS + off1));
+
+ len += sprintf(buf + len, " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n",
+ readl(CLASS_PHY1_ICMP_PKTS + off2), readl(CLASS_PHY1_IGMP_PKTS + off2),
+ readl(CLASS_PHY1_TCP_PKTS + off2), readl(CLASS_PHY1_UDP_PKTS + off2));
+
+ len += sprintf(buf + len, " err\n");
+ len += sprintf(buf + len, " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n",
+ readl(CLASS_PHY1_LP_FAIL_PKTS + off1), readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
+ readl(CLASS_PHY1_L3_FAIL_PKTS + off1), readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
+ readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
+
+ return len;
+}
+
+static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
+{
+ ssize_t len = 0;
+
+ len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
+
+ /* Select queue */
+ writel((tmu << 8) | queue, TMU_TEQ_CTRL);
+ writel((tmu << 8) | queue, TMU_LLM_CTRL);
+
+ len += sprintf(buf + len, "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
+ readl(TMU_TEQ_DROP_STAT), readl(TMU_TEQ_TRANS_STAT),
+ readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
+ readl(TMU_LLM_QUE_DROPCNT));
+
+ return len;
+}
+
+
+static ssize_t tmu_queues(char *buf, int tmu)
+{
+ ssize_t len = 0;
+ int queue;
+
+ for (queue = 0; queue < 16; queue++)
+ len += tmu_queue_stats(buf + len, tmu, queue);
+
+ return len;
+}
+
+static ssize_t tmu_ctx(char *buf, int tmu)
+{
+ ssize_t len = 0;
+ int i;
+ u32 val, tmu_context_addr = TMU_CONTEXT_ADDR;
+
+ len += sprintf(buf+len, " TMU %d \n", TMU0_ID+tmu);
+ for (i = 1; i <= 160 ; i++, tmu_context_addr += 4)
+ {
+ val = pe_dmem_read(TMU0_ID+tmu, tmu_context_addr , 4);
+ if (i == 5)
+ len += sprintf(buf+len, "\nShapers: Each shaper structure is 8 bytes and there are 10 shapers\n");
+
+ if (i == 25)
+ len += sprintf(buf+len, "\nScheduler: Each scheduler structure is 48 bytes and there are 8 schedulers\n");
+ if (i == 121)
+ len += sprintf(buf+len, "\nQueue: Each queue structure is 2 bytes and there are 16 queues\n");
+
+ if (i == 129)
+ len += sprintf(buf+len, "\nqlenmasks array for 16 queues\n");
+ if (i == 145)
+ len += sprintf(buf+len, "\nqresultmap array for 16 queues\n");
+ if (i%8 == 0)
+ len += sprintf(buf+len, "%08x \n", cpu_to_be32(val));
+ else
+ len += sprintf(buf+len, "%08x ", cpu_to_be32(val));
+ }
+
+ len += sprintf(buf+len, "\n");
+
+ return len;
+}
+
+static ssize_t block_version(char *buf, void *addr)
+{
+ ssize_t len = 0;
+ u32 val;
+
+ val = readl(addr);
+ len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n", (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
+
+ return len;
+}
+
+static ssize_t bmu(char *buf, int id, void *base)
+{
+ ssize_t len = 0;
+
+ len += sprintf(buf + len, "bmu: %d\n ", id);
+
+ len += block_version(buf + len, base + BMU_VERSION);
+
+ len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base + BMU_BUF_SIZE)));
+ len += sprintf(buf + len, " buf count: %x\n", readl(base + BMU_BUF_CNT));
+ len += sprintf(buf + len, " buf rem: %x\n", readl(base + BMU_REM_BUF_CNT));
+ len += sprintf(buf + len, " buf curr: %x\n", readl(base + BMU_CURR_BUF_CNT));
+ len += sprintf(buf + len, " free err: %x\n", readl(base + BMU_FREE_ERR_ADDR));
+
+ return len;
+}
+
+static ssize_t gpi(char *buf, int id, void *base)
+{
+ ssize_t len = 0;
+ u32 val;
+
+ len += sprintf(buf + len, "gpi%d:\n ", id);
+ len += block_version(buf + len, base + GPI_VERSION);
+
+ len += sprintf(buf + len, " tx under stick: %x\n", readl(base + GPI_FIFO_STATUS));
+ val = readl(base + GPI_FIFO_DEBUG);
+ len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) & 0x3f);
+ len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) & 0x3f);
+ len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) & 0x1ff);
+ len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) & 0x1ff);
+ len += sprintf(buf + len, " overrun: %x\n", readl(base + GPI_OVERRUN_DROPCNT));
+
+ return len;
+}
+
+static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ class_do_clear = simple_strtoul(buf, NULL, 0);
+ return count;
+}
+
+static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+ int id;
+ u32 val;
+ struct pfe_cpumon *cpumon = &pfe->cpumon;
+
+ len += block_version(buf + len, CLASS_VERSION);
+
+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
+ {
+ len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
+
+ val = readl(CLASS_PE0_DEBUG + id * 4);
+ len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
+
+ len += pe_status(buf + len, id, PESTATUS_ADDR_CLASS, class_do_clear);
+ }
+ len += sprintf(buf + len, "aggregate load=%d%%\n\n", cpumon->class_usage_pct);
+
+ len += sprintf(buf + len, "pe status: 0x%x\n", readl(CLASS_PE_STATUS));
+ len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n", readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
+ len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n", readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
+ len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE));
+
+ len += class_phy_stats(buf + len, 0);
+ len += class_phy_stats(buf + len, 1);
+ len += class_phy_stats(buf + len, 2);
+ len += class_phy_stats(buf + len, 3);
+
+ return len;
+}
+
+static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ tmu_do_clear = simple_strtoul(buf, NULL, 0);
+ return count;
+}
+
+static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+ int id;
+ u32 val;
+
+ len += block_version(buf + len, TMU_VERSION);
+
+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++)
+ {
+ len += sprintf(buf + len, "%d: ", id - TMU0_ID);
+
+ len += pe_status(buf + len, id, PESTATUS_ADDR_TMU, tmu_do_clear);
+ }
+
+ len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS));
+ len += sprintf(buf + len, "inq fifo cnt: %x\n", readl(TMU_PHY_INQ_FIFO_CNT));
+ val = readl(TMU_INQ_STAT);
+ len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff);
+ len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10);
+
+
+ return len;
+}
+
+
+static unsigned long drops_do_clear = 0;
+static u32 CLASS_DMEM_SH2(drop_counter)[CLASS_NUM_DROP_COUNTERS];
+static u32 UTIL_DMEM_SH2(drop_counter)[UTIL_NUM_DROP_COUNTERS];
+
+char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
+ "ICC",
+ "Host Pkt Error",
+ "Rx Error",
+ "IPsec Outbound",
+ "IPsec Inbound",
+ "EXPT IPsec Error",
+ "Reassembly",
+ "Fragmenter",
+ "NAT-T",
+ "Socket",
+ "Multicast",
+ "NAT-PT",
+ "Tx Disabled",
+};
+
+char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
+ "IPsec Outbound",
+ "IPsec Inbound",
+ "IPsec Rate Limiter",
+ "Fragmenter",
+ "Socket",
+ "Tx Disabled",
+ "Rx Error",
+};
+
+static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ drops_do_clear = simple_strtoul(buf, NULL, 0);
+ return count;
+}
+
+static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+ int id, dropnum;
+ int tmu, queue;
+ u32 val;
+ u32 dmem_addr;
+ int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+ static u32 tmu_drops[4];
+
+ memset(class_drop_counter, 0, sizeof(class_drop_counter));
+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
+ {
+ if (drops_do_clear)
+ pe_sync_stop(ctrl, (1 << id));
+ for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS; dropnum++)
+ {
+ dmem_addr = virt_to_class_dmem(&class_drop_counter[dropnum]);
+ val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
+ class_drop_counter[dropnum] += val;
+ num_class_drops += val;
+ if (drops_do_clear)
+ pe_dmem_write(id, 0, dmem_addr, 4);
+ }
+ if (drops_do_clear)
+ pe_start(ctrl, (1 << id));
+ }
+
+ if (drops_do_clear)
+ pe_sync_stop(ctrl, (1 << UTIL_ID));
+ for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++)
+ {
+ dmem_addr = virt_to_util_dmem(&util_drop_counter[dropnum]);
+ val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
+ util_drop_counter[dropnum] = val;
+ num_util_drops += val;
+ if (drops_do_clear)
+ pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
+ }
+ if (drops_do_clear)
+ pe_start(ctrl, (1 << UTIL_ID));
+
+ for (tmu = 0; tmu < 4; tmu++)
+ {
+ for (queue = 0; queue < 16; queue++)
+ {
+ writel((tmu << 8) | queue, TMU_TEQ_CTRL); /* Select queue */
+ val = readl(TMU_TEQ_DROP_STAT);
+ tmu_drops[tmu] += val;
+ num_tmu_drops += tmu_drops[tmu];
+ }
+ }
+
+ if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
+ len += sprintf(buf + len, "No PE drops\n\n");
+
+ if (num_class_drops > 0)
+ {
+ len += sprintf(buf + len, "Class PE drops --\n");
+ for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS; dropnum++)
+ {
+ if (class_drop_counter[dropnum] > 0)
+ len += sprintf(buf + len, " %s: %d\n", class_drop_description[dropnum], class_drop_counter[dropnum]);
+ }
+ len += sprintf(buf + len, "\n");
+ }
+
+ if (num_util_drops > 0)
+ {
+ len += sprintf(buf + len, "Util PE drops --\n");
+ for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++)
+ {
+ if (util_drop_counter[dropnum] > 0)
+ len += sprintf(buf + len, " %s: %d\n", util_drop_description[dropnum], util_drop_counter[dropnum]);
+ }
+ len += sprintf(buf + len, "\n");
+ }
+
+ if (num_tmu_drops > 0)
+ {
+ len += sprintf(buf + len, "TMU drops --\n");
+ for (tmu = 0; tmu < 4; tmu++)
+ {
+ if (tmu_drops[tmu] > 0)
+ len += sprintf(buf + len, " TMU%d: %d\n", tmu, tmu_drops[tmu]);
+ if (drops_do_clear)
+ tmu_drops[tmu] = 0;
+ }
+ len += sprintf(buf + len, "\n");
+ }
+
+ return len;
+}
+
+static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return tmu_queues(buf, 0);
+}
+
+static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return tmu_queues(buf, 1);
+}
+
+static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return tmu_queues(buf, 2);
+}
+
+static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return tmu_queues(buf, 3);
+}
+
+static ssize_t pfe_show_tmu0_ctx(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return tmu_ctx(buf, 0);
+}
+static ssize_t pfe_show_tmu1_ctx(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return tmu_ctx(buf, 1);
+}
+static ssize_t pfe_show_tmu2_ctx(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return tmu_ctx(buf, 2);
+}
+
+static ssize_t pfe_show_tmu3_ctx(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return tmu_ctx(buf, 3);
+}
+
+
+#if !defined(CONFIG_UTIL_DISABLED)
+static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ util_do_clear = simple_strtoul(buf, NULL, 0);
+ return count;
+}
+
+static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+
+
+ len += block_version(buf + len, UTIL_VERSION);
+
+ pe_sync_stop(ctrl, (1 << UTIL_ID));
+ len += pe_status(buf + len, UTIL_ID, PESTATUS_ADDR_UTIL, util_do_clear);
+ pe_start(ctrl, (1 << UTIL_ID));
+
+ len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS));
+ len += sprintf(buf + len, "max buf cnt: %x\n", readl(UTIL_MAX_BUF_CNT));
+ len += sprintf(buf + len, "tsq max cnt: %x\n", readl(UTIL_TSQ_MAX_CNT));
+
+ return len;
+}
+#endif
+
+static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+
+ len += bmu(buf + len, 1, BMU1_BASE_ADDR);
+ len += bmu(buf + len, 2, BMU2_BASE_ADDR);
+
+ return len;
+}
+
+static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+
+ len += sprintf(buf + len, "hif:\n ");
+ len += block_version(buf + len, HIF_VERSION);
+
+ len += sprintf(buf + len, " tx curr bd: %x\n", readl(HIF_TX_CURR_BD_ADDR));
+ len += sprintf(buf + len, " tx status: %x\n", readl(HIF_TX_STATUS));
+ len += sprintf(buf + len, " tx dma status: %x\n", readl(HIF_TX_DMA_STATUS));
+
+ len += sprintf(buf + len, " rx curr bd: %x\n", readl(HIF_RX_CURR_BD_ADDR));
+ len += sprintf(buf + len, " rx status: %x\n", readl(HIF_RX_STATUS));
+ len += sprintf(buf + len, " rx dma status: %x\n", readl(HIF_RX_DMA_STATUS));
+
+ len += sprintf(buf + len, "hif nocopy:\n ");
+ len += block_version(buf + len, HIF_NOCPY_VERSION);
+
+ len += sprintf(buf + len, " tx curr bd: %x\n", readl(HIF_NOCPY_TX_CURR_BD_ADDR));
+ len += sprintf(buf + len, " tx status: %x\n", readl(HIF_NOCPY_TX_STATUS));
+ len += sprintf(buf + len, " tx dma status: %x\n", readl(HIF_NOCPY_TX_DMA_STATUS));
+
+ len += sprintf(buf + len, " rx curr bd: %x\n", readl(HIF_NOCPY_RX_CURR_BD_ADDR));
+ len += sprintf(buf + len, " rx status: %x\n", readl(HIF_NOCPY_RX_STATUS));
+ len += sprintf(buf + len, " rx dma status: %x\n", readl(HIF_NOCPY_RX_DMA_STATUS));
+
+ return len;
+}
+
+
+static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+
+ len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
+ len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
+ len += gpi(buf + len, 2, EGPI3_BASE_ADDR);
+ len += gpi(buf + len, 3, HGPI_BASE_ADDR);
+
+ return len;
+}
+void ipsec_standalone_init(void);
+static ssize_t pfe_set_ipsec_cmd(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+
+ ipsec_standalone_init();
+ len += sprintf(buf + len, " ipsec details added ");
+ return len;
+}
+
+static ssize_t pfe_show_ipsec_cntrs(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+ u32 dmem_addr = IPSEC_CNTRS_ADDR;
+ u32 val;
+ int id = UTIL_ID;
+
+ val = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, " \noutbound cntrs:\n ");
+ len += sprintf(buf + len, " rcvd: %08x ", cpu_to_be32(val));
+ val = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, " xmit: %08x ", cpu_to_be32(val));
+ val = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, " tohost: %08x ", cpu_to_be32(val));
+ val = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, " dropped: %08x ", cpu_to_be32(val));
+
+ len += sprintf(buf + len, " \ninbound cntrs:\n ");
+ val = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, " rcvd: %08x ", cpu_to_be32(val));
+ val = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, " xmit: %08x ", cpu_to_be32(val));
+ val = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, " tohost: %08x ", cpu_to_be32(val));
+ val = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, " dropped: %08x\n ", cpu_to_be32(val));
+
+ val = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, " wa_drops: %08x\n ", cpu_to_be32(val));
+ return len;
+}
+
+static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+ struct pfe_memmon *memmon = &pfe->memmon;
+
+ len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n", memmon->kernel_memory_allocated, (memmon->kernel_memory_allocated + 1023) / 1024);
+
+ return len;
+}
+
+#ifdef HIF_NAPI_STATS
+static ssize_t pfe_show_hif_napi_stats(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pfe *pfe = platform_get_drvdata(pdev);
+ ssize_t len = 0;
+
+ len += sprintf(buf + len, "sched: %u\n", pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
+ len += sprintf(buf + len, "poll: %u\n", pfe->hif.napi_counters[NAPI_POLL_COUNT]);
+ len += sprintf(buf + len, "packet: %u\n", pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
+ len += sprintf(buf + len, "budget: %u\n", pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
+ len += sprintf(buf + len, "desc: %u\n", pfe->hif.napi_counters[NAPI_DESC_COUNT]);
+ len += sprintf(buf + len, "full: %u\n", pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
+
+ return len;
+}
+
+static ssize_t pfe_set_hif_napi_stats(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pfe *pfe = platform_get_drvdata(pdev);
+
+ memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
+
+ return count;
+}
+
+static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats, pfe_set_hif_napi_stats);
+#endif
+
+
+static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
+static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
+#if !defined(CONFIG_UTIL_DISABLED)
+static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
+#endif
+static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
+static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
+static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
+static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
+static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
+static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
+static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
+static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
+static DEVICE_ATTR(tmu0_ctx, 0444, pfe_show_tmu0_ctx, NULL);
+static DEVICE_ATTR(tmu1_ctx, 0444, pfe_show_tmu1_ctx, NULL);
+static DEVICE_ATTR(tmu2_ctx, 0444, pfe_show_tmu2_ctx, NULL);
+static DEVICE_ATTR(tmu3_ctx, 0444, pfe_show_tmu3_ctx, NULL);
+static DEVICE_ATTR(ipsec_cntrs, 0444, pfe_show_ipsec_cntrs, NULL);
+static DEVICE_ATTR(ipsec_cmd, 0444, pfe_set_ipsec_cmd, NULL);
+static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
+
+
+int pfe_sysfs_init(struct pfe *pfe)
+{
+ if (device_create_file(pfe->dev, &dev_attr_class))
+ goto err_class;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu))
+ goto err_tmu;
+
+#if !defined(CONFIG_UTIL_DISABLED)
+ if (device_create_file(pfe->dev, &dev_attr_util))
+ goto err_util;
+#endif
+
+ if (device_create_file(pfe->dev, &dev_attr_bmu))
+ goto err_bmu;
+
+ if (device_create_file(pfe->dev, &dev_attr_hif))
+ goto err_hif;
+
+ if (device_create_file(pfe->dev, &dev_attr_gpi))
+ goto err_gpi;
+
+ if (device_create_file(pfe->dev, &dev_attr_drops))
+ goto err_drops;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
+ goto err_tmu0_queues;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
+ goto err_tmu1_queues;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
+ goto err_tmu2_queues;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
+ goto err_tmu3_queues;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu0_ctx))
+ goto err_tmu0_ctx;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu1_ctx))
+ goto err_tmu1_ctx;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu2_ctx))
+ goto err_tmu2_ctx;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu3_ctx))
+ goto err_tmu3_ctx;
+
+ if (device_create_file(pfe->dev, &dev_attr_ipsec_cmd))
+ goto err_ipsec_cmd;
+
+ if (device_create_file(pfe->dev, &dev_attr_ipsec_cntrs))
+ goto err_ipsec_cntrs;
+
+ if (device_create_file(pfe->dev, &dev_attr_pfemem))
+ goto err_pfemem;
+
+#ifdef HIF_NAPI_STATS
+ if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
+ goto err_hif_napi_stats;
+#endif
+
+ return 0;
+
+#ifdef HIF_NAPI_STATS
+err_hif_napi_stats:
+ device_remove_file(pfe->dev, &dev_attr_pfemem);
+#endif
+
+err_pfemem:
+ device_remove_file(pfe->dev, &dev_attr_ipsec_cntrs);
+
+err_ipsec_cntrs:
+ device_remove_file(pfe->dev, &dev_attr_ipsec_cmd);
+
+err_ipsec_cmd:
+ device_remove_file(pfe->dev, &dev_attr_tmu3_ctx);
+
+err_tmu3_ctx:
+ device_remove_file(pfe->dev, &dev_attr_tmu2_ctx);
+
+err_tmu2_ctx:
+ device_remove_file(pfe->dev, &dev_attr_tmu1_ctx);
+
+err_tmu1_ctx:
+ device_remove_file(pfe->dev, &dev_attr_tmu0_ctx);
+
+err_tmu0_ctx:
+ device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
+
+err_tmu3_queues:
+ device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
+
+err_tmu2_queues:
+ device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
+
+err_tmu1_queues:
+ device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
+
+err_tmu0_queues:
+ device_remove_file(pfe->dev, &dev_attr_drops);
+
+err_drops:
+ device_remove_file(pfe->dev, &dev_attr_gpi);
+
+err_gpi:
+ device_remove_file(pfe->dev, &dev_attr_hif);
+
+err_hif:
+ device_remove_file(pfe->dev, &dev_attr_bmu);
+
+err_bmu:
+#if !defined(CONFIG_UTIL_DISABLED)
+ device_remove_file(pfe->dev, &dev_attr_util);
+
+err_util:
+#endif
+ device_remove_file(pfe->dev, &dev_attr_tmu);
+
+err_tmu:
+ device_remove_file(pfe->dev, &dev_attr_class);
+
+err_class:
+ return -1;
+}
+
+
+void pfe_sysfs_exit(struct pfe *pfe)
+{
+#ifdef HIF_NAPI_STATS
+ device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
+#endif
+
+ device_remove_file(pfe->dev, &dev_attr_pfemem);
+ device_remove_file(pfe->dev, &dev_attr_ipsec_cntrs);
+ device_remove_file(pfe->dev, &dev_attr_ipsec_cmd);
+ device_remove_file(pfe->dev, &dev_attr_tmu3_ctx);
+ device_remove_file(pfe->dev, &dev_attr_tmu2_ctx);
+ device_remove_file(pfe->dev, &dev_attr_tmu1_ctx);
+ device_remove_file(pfe->dev, &dev_attr_tmu0_ctx);
+ device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
+ device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
+ device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
+ device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
+ device_remove_file(pfe->dev, &dev_attr_drops);
+ device_remove_file(pfe->dev, &dev_attr_gpi);
+ device_remove_file(pfe->dev, &dev_attr_hif);
+ device_remove_file(pfe->dev, &dev_attr_bmu);
+#if !defined(CONFIG_UTIL_DISABLED)
+ device_remove_file(pfe->dev, &dev_attr_util);
+#endif
+ device_remove_file(pfe->dev, &dev_attr_tmu);
+ device_remove_file(pfe->dev, &dev_attr_class);
+}
+
diff --git a/pfe_ctrl/pfe_sysfs.h b/pfe_ctrl/pfe_sysfs.h
new file mode 100644
index 0000000..5fe02a5
--- /dev/null
+++ b/pfe_ctrl/pfe_sysfs.h
@@ -0,0 +1,15 @@
+#ifndef _PFE_SYSFS_H_
+#define _PFE_SYSFS_H_
+
+#include <linux/proc_fs.h>
+
+#define PESTATUS_ADDR_CLASS 0x800
+#define PESTATUS_ADDR_TMU 0x80
+#define PESTATUS_ADDR_UTIL 0x0
+
+#define TMU_CONTEXT_ADDR 0x3c8
+#define IPSEC_CNTRS_ADDR 0x840
+
+int pfe_sysfs_init(struct pfe *pfe);
+void pfe_sysfs_exit(struct pfe *pfe);
+#endif /* _PFE_SYSFS_H_ */
diff --git a/pfe_ctrl/pfe_tso.c b/pfe_ctrl/pfe_tso.c
new file mode 100644
index 0000000..566b25f
--- /dev/null
+++ b/pfe_ctrl/pfe_tso.c
@@ -0,0 +1,316 @@
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/timer.h>
+
+#include <net/ip.h>
+#include <net/sock.h>
+#include "pfe_mod.h"
+#include "pfe_tso.h"
+
+
+/** pfe_tso_skb_unmap
+ */
+
+void pfe_tx_skb_unmap( struct sk_buff *skb)
+{
+ struct hif_frag_info_s *fi = (struct hif_frag_info_s *)skb->cb;
+ int ii;
+ //printk("frag count : %d, map :%p\n", fi->frag_count, fi->map);
+
+ for (ii = 0; ii < fi->frag_count; ii++) {
+ struct hif_frag_dma_map_s *dma = fi->map + ii;
+
+ // printk("frag addr : %x len : %d\n", dma->data, dma->len);
+ dma_unmap_single(pfe->hif.dev, dma->data, dma->len, DMA_TO_DEVICE);
+ }
+}
+
+/** pfe_tso_to_desc
+ */
+static unsigned int pfe_tso_to_desc(struct sk_buff *skb)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_frag_t *f; /* current fragment */
+ int len;
+ int f_size; /* size of the current fragment */
+ unsigned int s_size; /* segment size */
+ unsigned int n_desc = 0;
+ int i;
+
+ /* linear part */
+ len = skb_headlen(skb) - sh_len;
+ s_size = sh->gso_size;
+
+ while (len >= s_size) {
+ if (printk_ratelimit())
+ printk(KERN_INFO "%s: remainig leanier len : %d\n", __func__, len);
+
+ len -= s_size;
+ n_desc++;
+ }
+
+ if (len) {
+ if (printk_ratelimit())
+ printk(KERN_INFO "%s: remainig leanier len : %d\n", __func__, len);
+
+ n_desc++;
+ s_size -= len;
+ }
+
+ /* fragment part */
+ for (i = 0; i < sh->nr_frags; i++) {
+ f = &sh->frags[i];
+ f_size = skb_frag_size(f);
+
+ while (f_size >= s_size) {
+ f_size -= s_size;
+ n_desc++;
+ s_size = sh->gso_size;
+ }
+
+ if (f_size) {
+ n_desc++;
+ s_size -= f_size;
+ }
+ }
+
+ return n_desc;
+
+}
+
+/** pfe_tso_get_req_desc
+ * Compute number of required HIF descriptors and number of packets on wire
+ * n_desc : number of HIF descriptors
+ * n_segs : number of segments on wire
+ */
+void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int *n_segs)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ /**
+ * TSO case
+ * Number of descriptors required = payload scatter +
+ * sh->gso_segs (pre segment header) +
+ * sh->gso_segs (hif/tso segment header)
+ * sh->gso_segs : number of packets on wire
+ */
+ if (skb_is_gso(skb)) {
+ *n_desc = pfe_tso_to_desc(skb) + 2 * sh->gso_segs;
+ *n_segs = sh->gso_segs;
+ }
+ // Scattered data
+ else if (sh->nr_frags) {
+ *n_desc = sh->nr_frags + 1;
+ *n_segs = 1;
+ }
+ // Regular case
+ else {
+ *n_desc = 1;
+ *n_segs = 1;
+ }
+ return;
+}
+
+#define inc_txq_idx(idxname) idxname = (idxname+1) & (qdepth-1)
+/** pfe_tso
+ */
+int pfe_tso( struct sk_buff *skb, struct hif_client_s *client, struct tso_cb_s *tso, int qno, u32 init_ctrl)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ skb_frag_t *f;
+ struct tcphdr *th;
+ unsigned int tcp_off = skb_transport_offset(skb);
+ unsigned int ip_off = skb_network_offset(skb);
+ unsigned int sh_len = tcp_off + tcp_hdrlen(skb); /* segment header length (link + network + transport) */
+ unsigned int data_len = skb->len - sh_len;
+ unsigned int f_id; /* id of the current fragment */
+ unsigned int f_len; /* size of the current fragment */
+ unsigned int s_len; /* size of the current segment */
+ unsigned int ip_tcp_hdr_len;
+ unsigned int l_len; /* skb linear size */
+ unsigned int len;
+ int segment;
+ unsigned int off, f_off;
+ u32 ctrl;
+ unsigned int id;
+ unsigned int seq;
+ int wrIndx;
+ struct hif_frag_info_s *f_info;
+ struct hif_frag_dma_map_s *f_dma, *q_dma_base, *f_dma_first;
+ unsigned int tx_bytes = 0;
+ int qdepth = client->tx_qsize;
+
+ // ctrl = (skb->ip_summed == CHECKSUM_PARTIAL) ? HIF_CTRL_TX_CHECKSUM : 0;
+ ctrl = (HIF_CTRL_TX_CHECKSUM | init_ctrl);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *ih = ip_hdr(skb);
+
+ id = ntohs(ih->id);
+
+ ctrl |= HIF_CTRL_TX_TSO;
+
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ id = 0;
+ ctrl |= HIF_CTRL_TX_TSO6;
+ }
+ else {
+ kfree_skb(skb);
+ return 0;
+ }
+
+#ifdef PFE_TSO_STATS
+ tso->len_counters[((u32)skb->len >> 11) & 0x1f]++;
+#endif
+
+ th = tcp_hdr(skb);
+ ip_tcp_hdr_len = sh_len - ip_off;
+
+ seq = ntohl(th->seq);
+
+ /* linear part */
+ l_len = skb_headlen(skb) - sh_len;
+ off = sh_len;
+
+ /* fragment part */
+ f_id = 0;
+ f_len = 0;
+ f = NULL;
+ f_off = 0;
+
+ f_info = (struct hif_frag_info_s *)skb->cb;
+
+ /* In case of TSO segmentation, we might need to transmit single skb->fragment
+ * in multiple segments/scatters. So hif will perform cache flush, for each scatter
+ * of same fragment separatly. Since cahche flush is expensive operatrion we will
+ * perform dma mapping for full fragment here and provide physical addresses to HIF.
+ *
+ * Preserve dmamapping information corresponding to skb fragments in array, this
+ * information is used while freeing SKB.
+ */
+ wrIndx = hif_lib_get_tx_wrIndex(client, qno);
+ q_dma_base = (struct hif_frag_dma_map_s *)&tso->dma_map_array[qno][0];
+ f_dma = f_info->map = q_dma_base + wrIndx;
+ f_dma_first = f_dma;
+ f_dma->data = dma_map_single(pfe->hif.dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ f_dma->len = skb_headlen(skb);
+ f_info->frag_count = 1;
+ //printk("data : %x, len :%d base : %p\n",f_dma->data, f_dma->len, f_info->map);
+
+
+// printk("%s: orig_seq: %x, orig_id: %x, seg_size: %d, seg_n: %d, data_len: %d, hdr_len: %d skb_fras: %d\n", __func__, seq, id, s_len, sh->gso_segs, data_len, sh_len, sh->nr_frags);
+
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+ unsigned int hif_flags = HIF_TSO;
+
+ if (segment == (sh->gso_segs - 1)) {
+ ctrl |= HIF_CTRL_TX_TSO_END;
+ }
+
+ /* The last segment may be less than gso_size. */
+ if (data_len < sh->gso_size)
+ s_len = data_len;
+ else
+ s_len = sh->gso_size;
+
+ data_len -= s_len;
+
+// printk("%s: seq: %x, id: %x, ctrl: %x\n", __func__, seq, id, ctrl);
+
+ /* hif/tso header */
+ __hif_lib_xmit_tso_hdr(client, qno, ctrl, ip_off, id, ip_tcp_hdr_len + s_len, tcp_off, seq);
+
+ id++;
+ seq += s_len;
+
+ /* ethernet/ip/tcp header */
+ __hif_lib_xmit_pkt(client, qno, (void *)f_dma_first->data, sh_len, 0, HIF_DONT_DMA_MAP | hif_flags, skb);
+ tx_bytes += sh_len;
+
+ while (l_len && s_len)
+ {
+ len = s_len;
+ if (len > l_len)
+ len = l_len;
+
+ l_len -= len;
+ s_len -= len;
+
+ if (!s_len)
+ hif_flags |= HIF_LAST_BUFFER;
+
+ if (printk_ratelimit())
+ printk(KERN_INFO "%s: Extra lienear data addr: %p, len: %d, flags: %x\n", __func__,
+ skb->data + off, len, hif_flags);
+
+ __hif_lib_xmit_pkt(client, qno, (void *)((u32)f_dma->data + off),
+ len, 0, hif_flags | HIF_DONT_DMA_MAP, skb);
+ off += len;
+ tx_bytes += len;
+ }
+
+ while (s_len) {
+ hif_flags = HIF_TSO | HIF_DONT_DMA_MAP;
+
+ /* Advance as needed. */
+ if (!f_len) {
+ f = &sh->frags[f_id];
+ f_len = skb_frag_size(f);
+ f_off = 0;
+ f_id++;
+
+ /* In case of TSO segmentation, we might need to transmit single skb->fragment
+ * in multiple segments/scatters. So hif will perform cache flush, for each scatter
+ * of same fragment separatly. Since cahche flush is expensive operatrion we will
+ * perform dma mapping for full fragment here and provide physical addresses to HIF.
+ *
+ * Preserve dmamapping information corresponding to skb fragments in array, this
+ * information is used while freeing SKB.
+ */
+ inc_txq_idx(wrIndx);
+ f_dma = q_dma_base + wrIndx;
+ f_dma->data = dma_map_single(pfe->hif.dev, skb_frag_address(f), f_len, DMA_TO_DEVICE);
+ f_dma->len = f_len;
+ f_info->frag_count++;
+ //printk("data : %x, len :%d\n",f_dma->data, f_dma->len);
+ //printk("%s: frag addr: %p, len: %d\n", __func__, f, f_len);
+ }
+
+ /* Use bytes from the current fragment. */
+ len = s_len;
+ if (len > f_len)
+ len = f_len;
+
+ f_len -= len;
+ s_len -= len;
+
+ if (!s_len) {
+ hif_flags |= HIF_LAST_BUFFER;
+
+ if (segment == (sh->gso_segs - 1))
+ hif_flags |= HIF_DATA_VALID;
+ }
+
+ //printk("%s: scatter addr: %p, len: %d, flags: %x\n", __func__,
+ // skb_frag_address(f) + f_off, len, hif_flags);
+
+ __hif_lib_xmit_pkt(client, qno, (void *)((u32)f_dma->data + f_off), len, 0, hif_flags, skb);
+
+ f_off += len;
+ tx_bytes += len;
+ }
+ }
+
+ hif_tx_dma_start();
+
+ return tx_bytes;
+
+}
diff --git a/pfe_ctrl/pfe_tso.h b/pfe_ctrl/pfe_tso.h
new file mode 100644
index 0000000..473b97d
--- /dev/null
+++ b/pfe_ctrl/pfe_tso.h
@@ -0,0 +1,16 @@
+#ifndef _PFE_TSO_H_
+#define _PFE_TSO_H_
+
+#define PFE_TSO_STATS
+
+struct tso_cb_s
+{
+ unsigned int len_counters[32];
+ /* This array is used to store the dma mapping for skb fragments */
+ struct hif_frag_info_s dma_map_array[EMAC_TXQ_CNT][EMAC_TXQ_DEPTH];
+};
+
+int pfe_tso( struct sk_buff *skb, struct hif_client_s *client, struct tso_cb_s *tso, int qno, u32 ctrl);
+void pfe_tx_skb_unmap( struct sk_buff *skb);
+void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int *n_segs);
+#endif /* _PFE_TSO_H_ */
diff --git a/pfe_ctrl/pfe_unit_test.c b/pfe_ctrl/pfe_unit_test.c
new file mode 100644
index 0000000..52cf994
--- /dev/null
+++ b/pfe_ctrl/pfe_unit_test.c
@@ -0,0 +1,571 @@
+#include "pfe_mod.h"
+#include "pfe_ctrl.h"
+
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+#if defined(CONFIG_UNIT_TEST_HIF)
+void hif_unit_test(struct pfe *pfe);
+#endif
+
+#define DMEM_TEST_BASE_ADDR 0x0 /* !!! For class overlaps with packets in dmem, for tmu overwrites exception vectors !!! */
+
+
+#define PMEM_TEST_BASE_ADDR 0x10000
+
+#define BUF_SIZE 6
+
+static u32 w[BUF_SIZE] = {0x01234567, 0x8988cdef, 0x00cc2233, 0x4455dd77, 0x8899aabb, 0xccddeeff};
+static u32 zero[BUF_SIZE] = {0, };
+static u32 r[BUF_SIZE];
+
+volatile void *buf_coherent;
+unsigned long buf_coherent_phys;
+
+static int memcmp_io(void *a, void *b, int len)
+{
+#if defined(CONFIG_PLATFORM_PCI)
+ int rc;
+
+ for (;len >= 4; len -= 4)
+ {
+ rc = readl(a) - readl(b);
+ if (rc)
+ return rc;
+
+ a += 4;
+ b += 4;
+ }
+
+ return 0;
+#else
+ return memcmp(a, b, len);
+#endif
+}
+
+static void memcpy_io(void *dst, void *src, int len)
+{
+#if defined(CONFIG_PLATFORM_PCI)
+ for (;len >= 4; len -= 4)
+ {
+ writel(*(u32 *)src, dst);
+ dst += 4;
+ src += 4;
+ }
+#else
+ memcpy(dst, src, len);
+#endif
+}
+
+static void pe_request_write(struct pfe_ctrl *ctrl, unsigned int id)
+{
+ int len, i;
+ int rc;
+
+ for (len = 1; len <= BUF_SIZE * sizeof(u32); len += 1) {
+ /* Copy to dmem memory */
+ pe_dmem_memcpy_to32(id, DMEM_TEST_BASE_ADDR, &w[0], len);
+
+ memset_io(buf_coherent, 0, BUF_SIZE * sizeof(u32));
+
+ /* Request PE to copy it back */
+ rc = pe_request(ctrl, id, buf_coherent_phys, DMEM_TEST_BASE_ADDR, len);
+ if (rc) {
+ printk(KERN_ERR "PE %d: pe_request() failed: %d %#x\n", id, rc, readl(CLASS_PE0_DEBUG));
+ continue;
+ }
+
+ if (memcmp_io(buf_coherent, w, len))
+ printk(KERN_ERR "PE %d: %s failed: %d", id, __func__, len);
+ else
+ printk(KERN_ERR "PE %d: %s success: %d", id, __func__, len);
+
+ for (i = 0; i < len; i++)
+ printk(" %x/%x", ((volatile u8 *)buf_coherent)[i], ((u8 *)w)[i]);
+
+ printk("\n");
+ }
+}
+
+static void pe_request_read(struct pfe_ctrl *ctrl, unsigned int id)
+{
+ u8 *rb = (u8 *)&r[0];
+ int len, i;
+ int rc;
+
+ for (len = 1; len <= BUF_SIZE * sizeof(u32); len += 1) {
+ /* Zero memory */
+ pe_dmem_memcpy_to32(id, DMEM_TEST_BASE_ADDR, &zero[0], len);
+
+ /* Request PE to copy to internal memory */
+ memcpy_io(buf_coherent, w, BUF_SIZE * sizeof(u32));
+ rc = pe_request(ctrl, id, DMEM_TEST_BASE_ADDR, buf_coherent_phys, len);
+ if (rc) {
+ printk(KERN_ERR "PE %d: pe_request() failed: %d %#x\n", id, rc, readl(CLASS_PE0_DEBUG));
+ continue;
+ }
+
+ /* Read back and compare */
+ for (i = 0; i < len; i++)
+ rb[i] = pe_dmem_read(id, DMEM_TEST_BASE_ADDR + i, 1);
+
+ if (memcmp_io(rb, buf_coherent, len))
+ printk(KERN_ERR "PE %d: %s failed: %d", id, __func__, len);
+ else
+ printk(KERN_ERR "PE %d: %s success: %d", id, __func__, len);
+
+ for (i = 0; i < len; i++)
+ printk(" %x/%x", rb[i], ((volatile u8 *)buf_coherent)[i]);
+
+ printk("\n");
+ }
+}
+
+static void pcie_mem_dump(struct pfe *pfe)
+{
+ int i;
+ u32 a;
+ int print;
+ int count;
+
+ for (i = 0, count = 0, print = 1; i < 64 *SZ_1K; i+=4) {
+ a = readl(pfe->ddr_baseaddr + i);
+ if (a == 0x67452301)
+ print = 1;
+
+ if (print) {
+ count++;
+ printk(KERN_ERR "%#x %#x\n", i, a);
+
+ if (count == 16) {
+ count = 0;
+ print = 0;
+ }
+ }
+ }
+}
+
+static void pcie_mem(struct pfe_ctrl *ctrl)
+{
+ int i, r;
+#if 0
+ for (i = 0; i < 100; i++) {
+ writeb(i, buf_coherent + i);
+ if ((r = readb(buf_coherent + i)) != i)
+ printk(KERN_ERR "%s: readb() %d %d\n", __func__, i, r);
+ }
+
+ for (i = 0; i < 100/2; i++) {
+ writew(i, buf_coherent + i * 2);
+ if ((r = readw(buf_coherent + i * 2)) != i)
+ printk(KERN_ERR "%s: readw() %d %d\n", __func__, i, r);
+ }
+#endif
+
+#if 0
+ for (i = 0; i < 100/4; i++) {
+ writel(i, buf_coherent + i * 4);
+ if ((r = readl(buf_coherent + i * 4)) != i)
+ printk(KERN_ERR "%s: readl() %d %d\n", __func__, i, r);
+ }
+#endif
+ for (i = 0; i < 256; i++)
+ printk(KERN_ERR "%lx %lx %x %x %x\n", buf_coherent_phys, (unsigned long)buf_coherent, readl(pfe->ddr_baseaddr), readl(buf_coherent), readl(CLASS_PE0_DEBUG));
+}
+
+static void pe_stop_start(struct pfe_ctrl *ctrl, int pe_mask)
+{
+ printk(KERN_INFO "%s\n", __func__);
+
+ pe_sync_stop(ctrl, pe_mask);
+
+ printk(KERN_INFO "%s stopped\n", __func__);
+
+ pe_start(ctrl, pe_mask);
+
+ printk(KERN_INFO "%s re-started\n", __func__);
+}
+
+
+static void pe_running(struct pfe_ctrl *ctrl)
+{
+ struct pe_sync_mailbox *mbox;
+ struct pe_msg_mailbox *msg;
+ u32 val;
+ u32 r32[2];
+ u16 r16[2];
+ u8 r8[4];
+ int i;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[CLASS0_ID];
+
+ for (i = 0; i < 100; i++) {
+ val = pe_dmem_read(CLASS0_ID, (unsigned long)&mbox->stopped, 4);
+ printk(KERN_ERR "%s: %#lx %#x %#10x %#10x %#10x %#10x\n", __func__, (unsigned long)&mbox->stopped, be32_to_cpu(val),
+ readl(CLASS_PE0_DEBUG), readl(CLASS_PE1_DEBUG), readl(CLASS_PE2_DEBUG), readl(CLASS_PE3_DEBUG));
+ }
+
+ printk(KERN_ERR "%s: stopped", __func__);
+
+ for (i = 0; i < MAX_PE; i++) {
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
+ val = pe_dmem_read(i, (unsigned long)&mbox->stopped, 4);
+
+ printk(" %x", val);
+ }
+
+ printk("\n");
+
+ printk(KERN_ERR "%s: request", __func__);
+
+ for (i = 0; i < MAX_PE; i++) {
+ msg = (void *)ctrl->msg_mailbox_baseaddr[i];
+ val = pe_dmem_read(i, (unsigned long)&msg->request, 4);
+
+ printk(" %x", val);
+ }
+
+ printk("\n");
+
+
+ r32[0] = pe_dmem_read(CLASS0_ID, 0x800, 4);
+ r32[1] = pe_dmem_read(CLASS0_ID, 0x804, 4);
+ r16[0] = pe_dmem_read(CLASS0_ID, 0x808, 2);
+ r16[1] = pe_dmem_read(CLASS0_ID, 0x80a, 2);
+
+ r8[0] = pe_dmem_read(CLASS0_ID, 0x80c, 1);
+ r8[1] = pe_dmem_read(CLASS0_ID, 0x80d, 1);
+ r8[2] = pe_dmem_read(CLASS0_ID, 0x80e, 1);
+ r8[3] = pe_dmem_read(CLASS0_ID, 0x80f, 1);
+
+
+ printk(KERN_ERR "%x %x\n", r32[0], r32[1]);
+ printk(KERN_ERR "%x %x\n", r16[0], r16[1]);
+ printk(KERN_ERR "%x %x %x %x\n", r8[0], r8[1], r8[2], r8[3]);
+ printk(KERN_ERR "%x %x %x %x\n", pe_dmem_read(CLASS0_ID, 0x810, 4), pe_dmem_read(CLASS1_ID, 0x810, 4),
+ pe_dmem_read(CLASS2_ID, 0x810, 4), pe_dmem_read(CLASS3_ID, 0x810, 4));
+}
+
+
+static void pmem_writeN_readN(unsigned int id)
+{
+ u8 *rb = (u8 *)&r[0];
+ int len, i;
+
+ /* PMEM can not be modified if CPU is running */
+ class_disable();
+
+ for (len = 1; len <= BUF_SIZE * sizeof(u32); len++) {
+ pe_pmem_memcpy_to32(id, PMEM_TEST_BASE_ADDR, &zero[0], len);
+
+ pe_pmem_memcpy_to32(id, PMEM_TEST_BASE_ADDR, &w[0], len);
+
+ for (i = 0; i < len; i++)
+ rb[i] = pe_pmem_read(id, PMEM_TEST_BASE_ADDR + i, 1);
+
+ if (memcmp(rb, w, len))
+ printk(KERN_ERR "PE %d: %s failed: %d\n", id, __func__, len);
+ else
+ printk(KERN_ERR "PE %d: %s success: %d\n", id, __func__, len);
+ }
+
+ class_enable();
+}
+
+
+static void pmem_write4_read4(unsigned int id)
+{
+ /* PMEM can not be modified if CPU is running */
+ class_disable();
+
+ pe_pmem_memcpy_to32(id, PMEM_TEST_BASE_ADDR, &w[0], 4);
+
+ r[0] = pe_pmem_read(id, PMEM_TEST_BASE_ADDR, 4);
+
+ if (r[0] != w[0])
+ printk(KERN_ERR "PE %d: %s failed: %#x %#x\n", id, __func__, w[0], r[0]);
+ else
+ printk(KERN_ERR "PE %d: %s success\n", id, __func__);
+
+ class_enable();
+}
+
+static void dmem_writeN_readN(unsigned int id)
+{
+ u32 zero[3] = {0, };
+ u8 *rb = (u8 *)&r[0];
+ int len, i;
+
+ for (len = 1; len <= BUF_SIZE * sizeof(u32); len++)
+ {
+ pe_dmem_memcpy_to32(id, DMEM_TEST_BASE_ADDR, &zero[0], len);
+
+ pe_dmem_memcpy_to32(id, DMEM_TEST_BASE_ADDR, &w[0], len);
+
+ for (i = 0; i < len; i++)
+ rb[i] = pe_dmem_read(id, DMEM_TEST_BASE_ADDR + i, 1);
+
+ if (memcmp(rb, w, len))
+ printk(KERN_ERR "PE %d: %s failed: %d\n", id, __func__, len);
+ else
+ printk(KERN_ERR "PE %d: %s success: %d\n", id, __func__, len);
+ }
+}
+
+
+static void dmem_write4_read2(unsigned int id)
+{
+ u16 *h = (u16 *)&r[0];
+
+ pe_dmem_write(id, w[0], DMEM_TEST_BASE_ADDR + 0, 4);
+
+ h[0] = pe_dmem_read(id, DMEM_TEST_BASE_ADDR + 0, 2);
+ h[1] = pe_dmem_read(id, DMEM_TEST_BASE_ADDR + 2, 2);
+
+ if (r[0] != w[0])
+ printk(KERN_ERR "PE %d: %s failed: %#x %#x\n", id, __func__, w[0], r[0]);
+ else
+ printk(KERN_ERR "PE %d: %s success\n", id, __func__);
+}
+
+
+static void dmem_write4_read1(unsigned int id)
+{
+ u8 *b = (u8 *)&r[0];
+
+ pe_dmem_write(id, w[0], DMEM_TEST_BASE_ADDR + 0, 4);
+
+ b[0] = pe_dmem_read(id, DMEM_TEST_BASE_ADDR + 0, 1);
+ b[1] = pe_dmem_read(id, DMEM_TEST_BASE_ADDR + 1, 1);
+ b[2] = pe_dmem_read(id, DMEM_TEST_BASE_ADDR + 2, 1);
+ b[3] = pe_dmem_read(id, DMEM_TEST_BASE_ADDR + 3, 1);
+
+ if (r[0] != w[0])
+ printk(KERN_ERR "PE %d: %s failed: %#x %#x\n", id, __func__, w[0], r[0]);
+ else
+ printk(KERN_ERR "PE %d: %s success\n", id, __func__);
+}
+
+static void dmem_write1_read4(unsigned int id)
+{
+ u8 *b = (u8 *)&w[0];
+
+ pe_dmem_write(id, 0x0, DMEM_TEST_BASE_ADDR, 4);
+
+ pe_dmem_write(id, b[0], DMEM_TEST_BASE_ADDR + 0, 1);
+ pe_dmem_write(id, b[1], DMEM_TEST_BASE_ADDR + 1, 1);
+ pe_dmem_write(id, b[2], DMEM_TEST_BASE_ADDR + 2, 1);
+ pe_dmem_write(id, b[3], DMEM_TEST_BASE_ADDR + 3, 1);
+
+ r[0] = pe_dmem_read(id, DMEM_TEST_BASE_ADDR, 4);
+
+ if (r[0] != w[0])
+ printk(KERN_ERR "PE %d: %s failed: %#x %#x\n", id, __func__, w[0], r[0]);
+ else
+ printk(KERN_ERR "PE %d: %s success\n", id, __func__);
+}
+
+
+static void dmem_write2_read4(unsigned int id)
+{
+ u16 *h = (u16 *)&w[0];
+
+ pe_dmem_write(id, 0x0, DMEM_TEST_BASE_ADDR, 4);
+
+ pe_dmem_write(id, h[0], DMEM_TEST_BASE_ADDR + 0, 2);
+ pe_dmem_write(id, h[1], DMEM_TEST_BASE_ADDR + 2, 2);
+
+ r[0] = pe_dmem_read(id, DMEM_TEST_BASE_ADDR, 4);
+
+ if (r[0] != w[0])
+ printk(KERN_ERR "PE %d: %s failed: %#x %#x\n", id, __func__, w[0], r[0]);
+ else
+ printk(KERN_ERR "PE %d: %s success\n", id, __func__);
+}
+
+
+static void dmem_read4_write4(unsigned int id)
+{
+ pe_dmem_write(id, w[0], DMEM_TEST_BASE_ADDR, 4);
+
+ r[0] = pe_dmem_read(id, DMEM_TEST_BASE_ADDR, 4);
+
+ if (r[0] != w[0])
+ printk(KERN_ERR "PE %d: %s failed: %#x %#x\n", id, __func__, w[0], r[0]);
+ else
+ printk(KERN_ERR "PE %d: %s success\n", id, __func__);
+}
+
+
+void bmu_unit_test(void *base, BMU_CFG *cfg)
+{
+ unsigned long buf;
+ int i;
+ int loop = 2;
+ unsigned long *bitmap;
+ unsigned int bitoff;
+
+ bitmap = kzalloc((cfg->count + 7) / 8, GFP_KERNEL);
+ if (!bitmap)
+ return;
+
+ bmu_enable(base);
+
+ do {
+ printk(KERN_INFO "%s: testing %d\n", __func__, loop);
+
+ for (i = 0; i < cfg->count; i++) {
+ buf = readl(base + BMU_ALLOC_CTRL);
+ if (!buf) {
+ printk(KERN_ERR "%s: allocation failed %d\n", __func__, i);
+ continue;
+ }// else
+ // printk(KERN_ERR "%s: allocated %lx\n", __func__, buf);
+
+ if (buf & ((1 << cfg->size) - 1))
+ printk(KERN_ERR "%s: non aligned buffer %lx\n", __func__, buf);
+
+ if (buf < cfg->baseaddr)
+ printk(KERN_ERR "%s: out of bounds buffer %lx\n", __func__, buf);
+
+ if (buf >= (cfg->baseaddr + cfg->count * (1 << cfg->size)))
+ printk(KERN_ERR "%s: out of bounds buffer %lx\n", __func__, buf);
+
+// if ((readl(base + BMU_BUF_CNT) & 0xffff) != (i + 1))
+// printk(KERN_ERR "%s: used buffer count wrong %d %d\n", __func__, readl(base + BMU_BUF_CNT) & 0xffff, i + 1);
+
+ if (readl(base + BMU_REM_BUF_CNT) != (cfg->count - i - 1))
+ printk(KERN_ERR "%s: remaining buffer count wrong %d %d\n", __func__, readl(base + BMU_REM_BUF_CNT), cfg->count - i - 1);
+
+ if (readl(base + BMU_CURR_BUF_CNT) != (i + 1))
+ printk(KERN_ERR "%s: allocate buffer count wrong %d %d\n", __func__, readl(base + BMU_CURR_BUF_CNT), i + 1);
+
+ bitoff = (buf - cfg->baseaddr) >> cfg->size;
+
+ if (test_and_set_bit(bitoff, bitmap))
+ printk(KERN_ERR "%s: duplicated buffer %lx\n", __func__, buf);
+ }
+
+ if (readl(base + BMU_ALLOC_CTRL) != 0)
+ printk(KERN_ERR "%s: too many buffers in pool\n", __func__);
+
+ for (i = 0; i < cfg->count; i++) {
+ buf = cfg->baseaddr + i * (1 << cfg->size);
+ writel(buf, base + BMU_FREE_CTRL);
+
+ bitoff = (buf - cfg->baseaddr) >> cfg->size;
+
+ if (!test_and_clear_bit(bitoff, bitmap))
+ printk(KERN_ERR "%s: not allocated buffer %lx\n", __func__, buf);
+ }
+
+ } while (loop--);
+
+ bmu_disable(base);
+
+ kfree(bitmap);
+}
+
+
+void pfe_unit_test(struct pfe *pfe)
+{
+ int i;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+#if defined(CONFIG_TMU_DUMMY)
+ for (i = 0; i <= CLASS_MAX_ID; i++) {
+#else
+ for (i = 0; i < MAX_PE; i++) {
+#endif
+ dmem_read4_write4(i);
+ dmem_write2_read4(i);
+ dmem_write1_read4(i);
+ dmem_write4_read1(i);
+ dmem_write4_read2(i);
+ dmem_writeN_readN(i);
+#if 0
+ pmem_write4_read4(i);
+ pmem_writeN_readN(i);
+#endif
+ }
+
+ pe_running(&pfe->ctrl);
+
+ /* Skip TMU, UTIL testing for now */
+#if defined(CONFIG_TMU_DUMMY)
+ pe_stop_start(&pfe->ctrl, CLASS_MASK);
+#else
+ pe_stop_start(&pfe->ctrl, CLASS_MASK | TMU_MASK);
+#endif
+
+#if defined(CONFIG_UNIT_TEST_HIF)
+ hif_unit_test(pfe);
+#endif
+
+#if !defined(CONFIG_PLATFORM_PCI)
+ buf_coherent = dma_alloc_coherent(pfe->dev, BUF_SIZE * sizeof(u32), &buf_coherent_phys, GFP_KERNEL);
+ if (!buf_coherent) {
+ printk(KERN_ERR "%s: dma_alloc_coherent() failed\n", __func__);
+ goto out;
+ }
+#else
+ buf_coherent = pfe->ddr_baseaddr + 0x8;
+ buf_coherent_phys = pfe->ddr_phys_baseaddr + 0x8;
+
+ pcie_mem_dump(pfe);
+ pcie_mem(&pfe->ctrl);
+#endif
+
+ for (i = CLASS0_ID; i <= CLASS_MAX_ID; i++) {
+ pe_request_read(&pfe->ctrl, i);
+
+ pe_request_write(&pfe->ctrl, i);
+ }
+
+#if !defined(CONFIG_PLATFORM_PCI)
+ dma_free_coherent(pfe->dev, BUF_SIZE * sizeof(u32), buf_coherent, buf_coherent_phys);
+
+out:
+#endif
+
+ return;
+}
+
+#if defined(CONFIG_UNIT_TEST_HIF)
+void hif_unit_test(struct pfe *pfe)
+{
+ struct hif_client_s *client;
+ unsigned char *pkt;
+
+ printk(KERN_INFO "%s\n", __func__);
+ client = hif_lib_client_register(pfe, NULL, PFE_CL_EVENT, 64, 2, 2);
+ if(client==NULL) {
+ printk("Failed to register client\n");
+ return;
+ }
+
+ printk("hif client registered successfully \n");
+
+ pkt = kmalloc(FPP_SKB_SIZE, GFP_KERNEL);
+ if(!pkt) goto client_dereg;
+
+ if(hif_lib_xmit_pkt(client, 0, pkt+PKT_HEADROOM, 100)==0)
+ printk("%s Packet successfully transmitted\n",__func__);
+ else
+ printk("%s Failed to transmit packet\n",__func__);
+
+ kfree(pkt);
+
+client_dereg:
+ if(hif_lib_client_deregister(pfe, client) != 0) {
+ printk("Failed to deregister client\n");
+ return;
+ }
+ printk("hif client deregistered successfully \n");
+}
+
+#endif
diff --git a/pfe_ctrl/pfe_vwd.c b/pfe_ctrl/pfe_vwd.c
new file mode 100644
index 0000000..da3f879
--- /dev/null
+++ b/pfe_ctrl/pfe_vwd.c
@@ -0,0 +1,2912 @@
+#include <linux/version.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+
+#include <net/pkt_sched.h>
+#include <linux/rcupdate.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/irqnr.h>
+#include <linux/ppp_defs.h>
+
+#include <linux/rculist.h>
+#include <../../../net/bridge/br_private.h>
+
+#include "config.h"
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+#include <net/xfrm.h>
+#endif
+
+#include "pfe_mod.h"
+#include "pfe_tso.h"
+#include "pfe_vwd.h"
+
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+#include <net/xfrm.h>
+#endif
+
+#ifdef CFG_WIFI_OFFLOAD
+
+//#define VWD_DEBUG
+
+unsigned int vwd_tx_ofld = 0;
+module_param(vwd_tx_ofld, uint, S_IRUGO);
+MODULE_PARM_DESC(vwd_tx_ofld,
+ "0: Local Tx offload is not supported for offload interfaces, 1: Local Tx offload is supported for offload interfaces");
+
+static int pfe_vwd_rx_low_poll(struct napi_struct *napi, int budget);
+static int pfe_vwd_rx_high_poll(struct napi_struct *napi, int budget);
+static void pfe_vwd_sysfs_exit(void);
+static void pfe_vwd_vap_down(struct vap_desc_s *vap);
+static unsigned int pfe_vwd_nf_route_hook_fn( unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+static unsigned int pfe_vwd_nf_bridge_hook_fn( unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+static int pfe_vwd_handle_vap( struct pfe_vwd_priv_s *vwd, struct vap_cmd_s *cmd );
+static int pfe_vwd_event_handler(void *data, int event, int qno);
+extern int comcerto_wifi_rx_fastpath_register(int (*hdlr)(struct sk_buff *skb));
+extern void comcerto_wifi_rx_fastpath_unregister(void);
+
+extern unsigned int page_mode;
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+extern struct xfrm_state *xfrm_state_lookup_byhandle(struct net *net, u16 handle);
+#endif
+
+
+/* IPV4 route hook , recieve the packet and forward to VWD driver*/
+static struct nf_hook_ops vwd_hook = {
+ .hook = pfe_vwd_nf_route_hook_fn,
+ .pf = PF_INET,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP_PRI_FIRST,
+};
+
+/* IPV6 route hook , recieve the packet and forward to VWD driver*/
+static struct nf_hook_ops vwd_hook_ipv6 = {
+ .hook = pfe_vwd_nf_route_hook_fn,
+ .pf = PF_INET6,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP6_PRI_FIRST,
+};
+
+/* Bridge hook , recieve the packet and forward to VWD driver*/
+static struct nf_hook_ops vwd_hook_bridge = {
+ .hook = pfe_vwd_nf_bridge_hook_fn,
+ .pf = PF_BRIDGE,
+ .hooknum = NF_BR_PRE_ROUTING,
+ .priority = NF_BR_PRI_FIRST,
+};
+
+struct pfe_vwd_priv_s glbl_pfe_vwd_priv;
+
+#ifdef VWD_DEBUG
+static void pfe_vwd_dump_skb( struct sk_buff *skb )
+{
+ int i;
+
+ for (i = 0; i < skb->len; i++)
+ {
+ if (!(i % 16))
+ printk("\n");
+
+ printk(" %02x", skb->data[i]);
+ }
+}
+#endif
+
+/** get_vap_by_name
+ *
+ */
+static struct vap_desc_s *get_vap_by_name(struct pfe_vwd_priv_s *priv, const char *name)
+{
+ int ii;
+ struct vap_desc_s *vap = NULL;
+
+ for (ii = 0; ii < MAX_VAP_SUPPORT; ii++)
+ if (priv->vaps[ii] && (!strcmp(priv->vaps[ii]->ifname, name))) {
+ vap = priv->vaps[ii];
+ break;
+ }
+
+ return vap;
+}
+
+
+/**
+ * vwd_vap_device_event_notifier
+ */
+static int vwd_vap_device_event_notifier(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ int ii;
+
+
+ switch (event) {
+ case NETDEV_UP:
+ spin_lock_bh(&priv->vaplock);
+
+ for (ii = 0; ii < MAX_VAP_SUPPORT; ii++) {
+ if(!strcmp(priv->conf_vap_names[ii], dev->name) && !pfe->vwd.vaps[ii])
+ break;
+ }
+ spin_unlock_bh(&priv->vaplock);
+
+ if (ii < MAX_VAP_SUPPORT) {
+ printk(KERN_INFO"%s : VAP name(%s) is already configured\n", __func__, dev->name);
+ schedule_work(&priv->event);
+ }
+ break;
+
+ case NETDEV_DOWN:
+ if (!dev->wifi_offload_dev)
+ goto done;
+
+ if ( !(dev->flags & IFF_UP)){
+ schedule_work(&priv->event);
+ }
+ break;
+
+ }
+
+done:
+
+ return NOTIFY_DONE;
+}
+
+
+static struct notifier_block vwd_vap_notifier = {
+ .notifier_call = vwd_vap_device_event_notifier,
+};
+
+/** pfe_vwd_vap_create
+ *
+ */
+static int pfe_vwd_vap_create(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ struct net_device *wifi_dev;
+ struct vap_cmd_s vap_cmd;
+ char name[IFNAMSIZ];
+ char tmp_name[IFNAMSIZ];
+ int ii, len;
+
+ len = IFNAMSIZ - 1;
+
+ if (len > count)
+ len = count;
+
+ memcpy(tmp_name, buf, len);
+ tmp_name[len] = '\n';
+ sscanf(tmp_name, "%s", name);
+
+ spin_lock(&priv->conf_lock);
+ spin_lock_bh(&priv->vaplock);
+ for (ii = 0; ii < MAX_VAP_SUPPORT; ii++) {
+ if (!strcmp(priv->conf_vap_names[ii], name)) {
+ printk("%s: VAP with same name already exist\n", __func__);
+ goto done;
+ }
+ }
+
+
+ wifi_dev = dev_get_by_name(&init_net, name);
+ if(wifi_dev) {
+ for (ii = 0; ii < MAX_VAP_SUPPORT; ii++) {
+ if (!priv->vaps[ii])
+ break;
+ }
+
+ if (ii < MAX_VAP_SUPPORT) {
+ vap_cmd.action = ADD;
+ vap_cmd.vapid = ii;
+ vap_cmd.ifindex = wifi_dev->ifindex;
+ strcpy(vap_cmd.ifname, name);
+ memcpy(vap_cmd.macaddr, wifi_dev->dev_addr, 6);
+ vap_cmd.direct_rx_path = 0;
+
+ if (!pfe_vwd_handle_vap(priv, &vap_cmd)){
+ strcpy(priv->conf_vap_names[ii], name);
+
+ printk(KERN_INFO"VAP added successfully\n");
+ }
+ }
+ else
+ printk("%s: All VAPs are used.. No space.\n",__func__);
+
+
+ dev_put(wifi_dev);
+ }
+ else {
+ printk(KERN_ERR "%s: %s is invalid interface Or not created...\n",__func__, name);
+ }
+
+done:
+ spin_unlock_bh(&priv->vaplock);
+ spin_unlock(&priv->conf_lock);
+
+ return count;
+}
+
+/** pfe_vwd_vap_remove
+ *
+ */
+static int pfe_vwd_vap_remove(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ struct vap_desc_s *vap;
+ struct net_device *wifi_dev;
+ struct vap_cmd_s vap_cmd;
+ char name[IFNAMSIZ];
+ char tmp_name[IFNAMSIZ];
+ int len;
+
+ len = IFNAMSIZ - 1;
+
+ if (len > count)
+ len = count;
+
+ memcpy(tmp_name, buf, len);
+ tmp_name[len] = '\n';
+
+ sscanf(tmp_name, "%s", name);
+
+ wifi_dev = dev_get_by_name(&init_net, name);
+
+ spin_lock(&priv->conf_lock);
+ spin_lock_bh(&priv->vaplock);
+
+ if (wifi_dev) {
+ vap = get_vap_by_name(priv, name);
+
+ if (!vap) {
+ printk(KERN_ERR "%s: %s is not valid VAP\n", __func__, name);
+ dev_put(wifi_dev);
+ goto done;
+ }
+
+ vap_cmd.action = REMOVE;
+ vap_cmd.vapid = vap->vapid;
+ strcpy(vap_cmd.ifname, name);
+ if (!pfe_vwd_handle_vap(priv, &vap_cmd)){
+ printk(KERN_INFO"VAP removed successfully\n");
+ priv->conf_vap_names[vap->vapid][0] = '\0';
+ }
+ dev_put(wifi_dev);
+ }
+
+done:
+ spin_unlock_bh(&priv->vaplock);
+ spin_unlock(&priv->conf_lock);
+
+ return count;
+}
+
+
+#ifdef PFE_VWD_LRO_STATS
+/*
+ * pfe_vwd_show_lro_nb_stats
+ */
+static ssize_t pfe_vwd_show_lro_nb_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < LRO_NB_COUNT_MAX; i++)
+ len += sprintf(buf + len, "%d fragments packets = %d\n", i, priv->lro_nb_counters[i]);
+
+ return len;
+}
+
+/*
+ * pfe_vwd_set_lro_nb_stats
+ */
+static ssize_t pfe_vwd_set_lro_nb_stats(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+
+ memset(priv->lro_nb_counters, 0, sizeof(priv->lro_nb_counters));
+
+ return count;
+}
+
+/*
+ * pfe_vwd_show_lro_len_stats
+ */
+static ssize_t pfe_vwd_show_lro_len_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < LRO_LEN_COUNT_MAX; i++)
+ len += sprintf(buf + len, "RX packets > %dKBytes = %d\n", i * 2, priv->lro_len_counters[i]);
+
+ return len;
+}
+
+/*
+ * pfe_vwd_set_lro_len_stats
+ */
+static ssize_t pfe_vwd_set_lro_len_stats(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+
+ memset(priv->lro_len_counters, 0, sizeof(priv->lro_len_counters));
+
+ return count;
+}
+#endif
+
+#ifdef PFE_VWD_NAPI_STATS
+/*
+ * pfe_vwd_show_napi_stats
+ */
+static ssize_t pfe_vwd_show_napi_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ ssize_t len = 0;
+
+ len += sprintf(buf + len, "sched: %d\n", priv->napi_counters[NAPI_SCHED_COUNT]);
+ len += sprintf(buf + len, "poll: %d\n", priv->napi_counters[NAPI_POLL_COUNT]);
+ len += sprintf(buf + len, "packet: %d\n", priv->napi_counters[NAPI_PACKET_COUNT]);
+ len += sprintf(buf + len, "budget: %d\n", priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
+ len += sprintf(buf + len, "desc: %d\n", priv->napi_counters[NAPI_DESC_COUNT]);
+
+ return len;
+}
+
+/*
+ * pfe_vwd_set_napi_stats
+ */
+static ssize_t pfe_vwd_set_napi_stats(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+
+ memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
+
+ return count;
+}
+#endif
+
+/** pfe_vwd_show_dump_stats
+ *
+ */
+static ssize_t pfe_vwd_show_dump_stats(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+
+#ifdef VWD_DEBUG_STATS
+ len += sprintf(buf, "\nTo PFE\n");
+ len += sprintf(buf + len, " WiFi Rx pkts : %d\n", priv->pkts_transmitted);
+ len += sprintf(buf + len, " WiFi Tx pkts : %d\n", priv->pkts_total_local_tx);
+ len += sprintf(buf + len, " WiFi Tx SG pkts : %d\n", priv->pkts_local_tx_sgs);
+ len += sprintf(buf + len, " Drops : %d\n", priv->pkts_tx_dropped);
+
+ len += sprintf(buf + len, "From PFE\n");
+ len += sprintf(buf + len, " WiFi Rx pkts : %d %d %d\n", priv->pkts_slow_forwarded[0],
+ priv->pkts_slow_forwarded[1], priv->pkts_slow_forwarded[2]);
+ len += sprintf(buf + len, " WiFi Tx pkts : %d %d %d\n", priv->pkts_rx_fast_forwarded[0],
+ priv->pkts_rx_fast_forwarded[1], priv->pkts_rx_fast_forwarded[2]);
+ len += sprintf(buf + len, " Skb Alloc fails : %d\n", priv->rx_skb_alloc_fail);
+#endif
+ len += sprintf(buf + len, "\nStatus\n");
+ len += sprintf(buf + len, " Fast path - %s\n", priv->fast_path_enable ? "Enable" : "Disable");
+ len += sprintf(buf + len, " Route hook - %s\n", priv->fast_routing_enable ? "Enable" : "Disable");
+ len += sprintf(buf + len, " Bridge hook - %s\n", priv->fast_bridging_enable ? "Enable" : "Disable");
+ len += sprintf(buf + len, " TSO hook - %s\n", priv->tso_hook_enable ? "Enable" : "Disable");
+
+
+ return len;
+}
+
+
+/** pfe_vwd_show_fast_path_enable
+ *
+ */
+static ssize_t pfe_vwd_show_fast_path_enable(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ int idx;
+
+ idx = sprintf(buf, "\n%d\n", priv->fast_path_enable);
+
+ return idx;
+}
+
+/** pfe_vwd_set_fast_path_enable
+ *
+ */
+static ssize_t pfe_vwd_set_fast_path_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ unsigned int fast_path = 0;
+
+ sscanf(buf, "%d", &fast_path);
+
+ printk("%s: Wifi fast path %d\n", __func__, fast_path);
+
+ if (fast_path && !priv->fast_path_enable)
+ {
+ printk("%s: Wifi fast path enabled \n", __func__);
+
+ priv->fast_path_enable = 1;
+
+ }
+ else if (!fast_path && priv->fast_path_enable)
+ {
+ printk("%s: Wifi fast path disabled \n", __func__);
+
+ priv->fast_path_enable = 0;
+
+ }
+
+ return count;
+}
+
+/** pfe_vwd_show_route_hook_enable
+ *
+ */
+static ssize_t pfe_vwd_show_route_hook_enable(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ int idx;
+
+ idx = sprintf(buf, "\n%d\n", priv->fast_routing_enable);
+
+ return idx;
+}
+
+/** pfe_vwd_set_route_hook_enable
+ *
+ */
+static ssize_t pfe_vwd_set_route_hook_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ unsigned int user_val = 0;
+
+ sscanf(buf, "%d", &user_val);
+
+ if (user_val && !priv->fast_routing_enable)
+ {
+ printk("%s: Wifi fast routing enabled \n", __func__);
+ priv->fast_routing_enable = 1;
+
+ if (priv->fast_bridging_enable)
+ {
+ nf_unregister_hook(&vwd_hook_bridge);
+ priv->fast_bridging_enable = 0;
+ }
+
+ nf_register_hook(&vwd_hook);
+ nf_register_hook(&vwd_hook_ipv6);
+
+
+ }
+ else if (!user_val && priv->fast_routing_enable)
+ {
+ printk("%s: Wifi fast routing disabled \n", __func__);
+ priv->fast_routing_enable = 0;
+
+ nf_unregister_hook(&vwd_hook);
+ nf_unregister_hook(&vwd_hook_ipv6);
+
+ }
+
+ return count;
+}
+
+/** pfe_vwd_show_bridge_hook_enable
+ *
+ */
+static int pfe_vwd_show_bridge_hook_enable(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ int idx;
+
+ idx = sprintf(buf, "%d", priv->fast_bridging_enable);
+ return idx;
+}
+
+/** pfe_vwd_set_bridge_hook_enable
+ *
+ */
+static int pfe_vwd_set_bridge_hook_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ unsigned int user_val = 0;
+
+ sscanf(buf, "%d", &user_val);
+
+ if ( user_val && !priv->fast_bridging_enable )
+ {
+ printk("%s: Wifi fast bridging enabled \n", __func__);
+ priv->fast_bridging_enable = 1;
+
+ if(priv->fast_routing_enable)
+ {
+ nf_unregister_hook(&vwd_hook);
+ nf_unregister_hook(&vwd_hook_ipv6);
+ priv->fast_routing_enable = 0;
+ }
+
+ nf_register_hook(&vwd_hook_bridge);
+ }
+ else if ( !user_val && priv->fast_bridging_enable )
+ {
+ printk("%s: Wifi fast bridging disabled \n", __func__);
+ priv->fast_bridging_enable = 0;
+
+ nf_unregister_hook(&vwd_hook_bridge);
+ }
+
+ return count;
+}
+
+/** pfe_vwd_show_direct_tx_path
+ *
+ */
+static int pfe_vwd_show_direct_tx_path(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct vap_desc_s *vap;
+ int rc;
+
+ spin_lock_bh(&pfe->vwd.vaplock);
+ vap = get_vap_by_name(&pfe->vwd, kobject_name(kobj));
+ BUG_ON(!vap);
+ rc = sprintf(buf, "%d\n", vap->direct_tx_path);
+ spin_unlock_bh(&pfe->vwd.vaplock);
+
+ return rc;
+}
+
+/** pfe_vwd_set_direct_tx_path
+ *
+ */
+static int pfe_vwd_set_direct_tx_path(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct vap_desc_s *vap;
+ int enable;
+
+ sscanf(buf, "%d", &enable);
+
+ spin_lock_bh(&pfe->vwd.vaplock);
+ vap = get_vap_by_name(&pfe->vwd, kobject_name(kobj));
+ BUG_ON(!vap);
+ printk(KERN_INFO "%s: VWD => WiFi direct path is %s for %s\n",
+ __func__, enable ? "enabled":"disabled", vap->ifname);
+ vap->direct_tx_path = enable;
+ spin_unlock_bh(&pfe->vwd.vaplock);
+
+ return count;
+}
+
+/** pfe_vwd_show_direct_rx_path
+ *
+ */
+static int pfe_vwd_show_direct_rx_path(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct vap_desc_s *vap;
+ int rc;
+
+ spin_lock_bh(&pfe->vwd.vaplock);
+ vap = get_vap_by_name(&pfe->vwd, kobject_name(kobj));
+ BUG_ON(!vap);
+ rc = sprintf(buf, "%d\n", vap->direct_rx_path);
+ spin_unlock_bh(&pfe->vwd.vaplock);
+
+ return rc;
+}
+
+/** pfe_vwd_set_direct_rx_path
+ *
+ */
+static int pfe_vwd_set_direct_rx_path(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct vap_desc_s *vap;
+ int enable;
+
+ sscanf(buf, "%d", &enable);
+
+ spin_lock_bh(&pfe->vwd.vaplock);
+ vap = get_vap_by_name(&pfe->vwd, kobject_name(kobj));
+ BUG_ON(!vap);
+ printk(KERN_INFO "%s: WiFi => VWD direct path is %s for %s\n",
+ __func__, enable ? "enabled":"disabled", vap->ifname);
+ vap->direct_rx_path = enable;
+
+ spin_unlock_bh(&pfe->vwd.vaplock);
+
+ return count;
+}
+
+#if defined(CONFIG_SMP) && (NR_CPUS > 1)
+/** pfe_vwd_show_rx_cpu_affinity
+ *
+ */
+static int pfe_vwd_show_rx_cpu_affinity(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct vap_desc_s *vap;
+ int rc;
+
+ spin_lock_bh(&pfe->vwd.vaplock);
+ vap = get_vap_by_name(&pfe->vwd, kobject_name(kobj));
+ BUG_ON(!vap);
+ rc = sprintf(buf, "%d\n", vap->cpu_id);
+ spin_unlock_bh(&pfe->vwd.vaplock);
+
+ return rc;
+}
+
+/** pfe_vwd_set_rx_cpu_affinity
+ *
+ */
+static int pfe_vwd_set_rx_cpu_affinity(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct vap_desc_s *vap;
+ unsigned int cpu_id;
+
+ sscanf(buf, "%d", &cpu_id);
+
+ spin_lock_bh(&pfe->vwd.vaplock);
+ vap = get_vap_by_name(&pfe->vwd, kobject_name(kobj));
+ BUG_ON(!vap);
+
+ if (cpu_id < NR_CPUS) {
+ vap->cpu_id = cpu_id;
+ hif_lib_set_rx_cpu_affinity(&vap->client, cpu_id);
+ }
+ else
+ printk(KERN_ERR "%s: Invalid cpu#%d \n", __func__, cpu_id);
+
+ spin_unlock_bh(&pfe->vwd.vaplock);
+
+ return count;
+}
+#endif
+
+#if defined(CONFIG_COMCERTO_CUSTOM_SKB_LAYOUT)
+/** pfe_vwd_show_custom_skb_enable
+ *
+ */
+static int pfe_vwd_show_custom_skb_enable(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct vap_desc_s *vap;
+ int rc;
+
+ spin_lock_bh(&pfe->vwd.vaplock);
+ vap = get_vap_by_name(&pfe->vwd, kobject_name(kobj));
+ BUG_ON(!vap);
+ rc = sprintf(buf, "%d\n", vap->custom_skb);
+ spin_unlock_bh(&pfe->vwd.vaplock);
+
+ return rc;
+}
+
+/** pfe_vwd_set_custom_skb_enable
+ *
+ */
+static int pfe_vwd_set_custom_skb_enable(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct vap_desc_s *vap;
+ int enable;
+
+ sscanf(buf, "%d", &enable);
+
+ spin_lock_bh(&pfe->vwd.vaplock);
+ vap = get_vap_by_name(&pfe->vwd, kobject_name(kobj));
+ BUG_ON(!vap);
+ printk(KERN_INFO "%s: Custun skb feature is %s for %s\n", __func__, enable ? "enabled":"disabled", vap->ifname);
+ vap->custom_skb = enable;
+ spin_unlock_bh(&pfe->vwd.vaplock);
+
+ return count;
+}
+#endif
+
+#ifdef PFE_VWD_TX_STATS
+/** pfe_vwd_show_vap_tx_stats
+ *
+ */
+static int pfe_vwd_show_vap_tx_stats(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct vap_desc_s *vap;
+ int len = 0, ii;
+
+ spin_lock_bh(&pfe->vwd.vaplock);
+ vap = get_vap_by_name(&pfe->vwd, kobject_name(kobj));
+
+ BUG_ON(!vap);
+
+ len = sprintf(buf, "TX queues stats:\n");
+
+ for (ii = 0; ii < VWD_TXQ_CNT; ii++) {
+ len += sprintf(buf + len, "Queue #%02d\n", ii);
+ len += sprintf(buf + len, " clean_fail = %10d\n", vap->clean_fail[ii]);
+ len += sprintf(buf + len, " stop_queue = %10d\n", vap->stop_queue_total[ii]);
+ len += sprintf(buf + len, " stop_queue_hif = %10d\n", vap->stop_queue_hif[ii]);
+ len += sprintf(buf + len, " stop_queue_hif_client = %10d\n", vap->stop_queue_hif_client[ii]);
+ }
+
+ spin_unlock_bh(&pfe->vwd.vaplock);
+ return len;
+}
+
+/** pfe_vwd_set_vap_tx_stats
+ *
+ */
+static int pfe_vwd_set_vap_tx_stats(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct vap_desc_s *vap;
+ int ii;
+
+ spin_lock_bh(&pfe->vwd.vaplock);
+ vap = get_vap_by_name(&pfe->vwd, kobject_name(kobj));
+
+ BUG_ON(!vap);
+
+ for (ii = 0; ii < VWD_TXQ_CNT; ii++) {
+ spin_lock_bh(&vap->tx_lock[ii]);
+ vap->clean_fail[ii] = 0;
+ vap->stop_queue_total[ii] = 0;
+ vap->stop_queue_hif[ii] = 0;
+ vap->stop_queue_hif_client[ii] = 0;
+ spin_unlock_bh(&vap->tx_lock[ii]);
+ }
+
+ spin_unlock_bh(&pfe->vwd.vaplock);
+ return count;
+}
+#endif
+
+/*
+ * pfe_vwd_show_tso_stats
+ */
+static ssize_t pfe_vwd_show_tso_stats(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < 32; i++)
+ len += sprintf(buf + len, "TSO packets > %dKBytes = %u\n", i * 2, priv->tso.len_counters[i]);
+
+ return len;
+}
+
+/*
+ * pfe_vwd_set_tso_stats
+ */
+static ssize_t pfe_vwd_set_tso_stats(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+
+ memset(priv->tso.len_counters, 0, sizeof(priv->tso.len_counters));
+ return count;
+}
+
+/** pfe_vwd_classify_packet
+ *
+ */
+static int pfe_vwd_classify_packet( struct pfe_vwd_priv_s *priv, struct sk_buff *skb,
+ int bridge_hook, int route_hook, int *vapid, int *own_mac)
+{
+ unsigned short type;
+ struct vap_desc_s *vap;
+ int rc = 1, ii, length;
+ unsigned char *data_ptr;
+ struct ethhdr *hdr;
+#if defined (CONFIG_COMCERTO_VWD_MULTI_MAC)
+ struct net_bridge_fdb_entry *dst = NULL;
+ struct net_bridge_port *p = NULL;
+ const unsigned char *dest = eth_hdr(skb)->h_dest;
+#endif
+ *own_mac = 0;
+
+ /* Move to packet network header */
+ data_ptr = skb_mac_header(skb);
+ length = skb->len + (skb->data - data_ptr);
+
+ spin_lock_bh(&priv->vaplock);
+ /* Broadcasts and MC are handled by stack */
+ if( (eth_hdr(skb)->h_dest[0] & 0x1) ||
+ ( length <= ETH_HLEN ) )
+ {
+ rc = 1;
+ goto done;
+ }
+
+ /* FIXME: This packet is VWD slow path packet, and already seen by VWD */
+
+ if (*(unsigned long *)skb->head == 0xdead)
+ {
+ //printk(KERN_INFO "%s:This is dead packet....\n", __func__);
+ *(unsigned long *)skb->head = 0x0;
+ rc = 1;
+ goto done;
+ }
+
+#ifdef VWD_DEBUG
+ printk(KERN_INFO "%s: skb cur len:%d skb orig len:%d\n", __func__, skb->len, length );
+#endif
+
+ /* FIXME: We need to check the route table for the route entry. If route
+ * entry found for the current packet, send the packet to PFE. Otherwise
+ * REJECT the packet.
+ */
+ for ( ii = 0; ii < MAX_VAP_SUPPORT; ii++ )
+ {
+ vap = priv->vaps[ii];
+ if ((vap) && (vap->ifindex == skb->skb_iif) )
+ {
+ /* This interface packets need to be processed by direct API */
+ if (vap->direct_rx_path) {
+ rc = 1;
+ goto done;
+ }
+
+ hdr = (struct ethhdr *)data_ptr;
+ type = htons(hdr->h_proto);
+ data_ptr += ETH_HLEN;
+ length -= ETH_HLEN;
+ rc = 0;
+
+ *vapid = vap->vapid;
+
+ /* FIXME send only IPV4 and IPV6 packets to PFE */
+ //Determain final protocol type
+ //FIXME : This multi level parsing is not required for
+ // Bridged packets.
+ if( type == ETH_P_8021Q )
+ {
+ struct vlan_hdr *vhdr = (struct vlan_hdr *)data_ptr;
+
+ data_ptr += VLAN_HLEN;
+ length -= VLAN_HLEN;
+ type = htons(vhdr->h_vlan_encapsulated_proto);
+ }
+
+ if( type == ETH_P_PPP_SES )
+ {
+ struct pppoe_hdr *phdr = (struct pppoe_hdr *)data_ptr;
+
+ data_ptr += PPPOE_SES_HLEN;
+ length -= PPPOE_SES_HLEN;
+
+ if (htons(*(u16 *)(phdr+1)) == PPP_IP)
+ type = ETH_P_IP;
+ else if (htons(*(u16 *)(phdr+1)) == PPP_IPV6)
+ type = ETH_P_IPV6;
+ }
+
+
+ if (bridge_hook)
+ {
+#if defined (CONFIG_COMCERTO_VWD_MULTI_MAC)
+ /* check if destination MAC matches one of the interfaces attached to the bridge */
+ if((p = rcu_dereference(skb->dev->br_port)) != NULL)
+ dst = __br_fdb_get(p->br, dest);
+
+ if (skb->pkt_type == PACKET_HOST || (dst && dst->is_local))
+#else
+ if (skb->pkt_type == PACKET_HOST)
+#endif
+ {
+ *own_mac = 1;
+
+ if ((type != ETH_P_IP) && (type != ETH_P_IPV6))
+ {
+ rc = 1;
+ goto done;
+ }
+ }
+ else if (!memcmp(vap->macaddr, eth_hdr(skb)->h_dest, ETH_ALEN))
+ {
+ //WiFi management packets received with dst address
+ //as bssid
+ rc = 1;
+ goto done;
+ }
+ }
+ else
+ *own_mac = 1;
+
+ break;
+ }
+
+ }
+
+done:
+ spin_unlock_bh(&priv->vaplock);
+ return rc;
+
+}
+
+/** pfe_vwd_flush_txQ
+ *
+ */
+static void pfe_vwd_flush_txQ(struct vap_desc_s *vap, int queuenum, int from_tx, int n_desc)
+{
+ struct sk_buff *skb;
+ int count = max(TX_FREE_MAX_COUNT, n_desc);
+ unsigned int flags;
+
+ //printk(KERN_INFO "%s\n", __func__);
+
+ if (!from_tx)
+ spin_lock_bh(&vap->tx_lock[queuenum]);
+
+ while (count && (skb = hif_lib_tx_get_next_complete(&vap->client, queuenum, &flags, count))) {
+
+ /* FIXME : Invalid data can be skipped in hif_lib itself */
+ if (flags & HIF_DATA_VALID) {
+ if (flags & HIF_DONT_DMA_MAP)
+ pfe_tx_skb_unmap(skb);
+ dev_kfree_skb_any(skb);
+ }
+ // When called from the timer, flush all descriptors
+ if (from_tx)
+ count--;
+ }
+
+ if (!from_tx)
+ spin_unlock_bh(&vap->tx_lock[queuenum]);
+
+
+}
+
+/** pfe_eth_flush_tx
+ */
+static void pfe_vwd_flush_tx(struct vap_desc_s *vap, int force)
+{
+ int ii;
+
+ for (ii = 0; ii < VWD_TXQ_CNT; ii++) {
+ if (force || (time_after(jiffies, vap->client.tx_q[ii].jiffies_last_packet + (COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ)/1000)))
+ pfe_vwd_flush_txQ(vap, ii, 0, 0); //We will release everything we can based on from_tx param, so the count param can be set to any value
+ }
+}
+
+
+/** pfe_vwd_tx_timeout
+ */
+void pfe_vwd_tx_timeout(unsigned long data )
+{
+ struct pfe_vwd_priv_s *priv = (struct pfe_vwd_priv_s *)data;
+ int ii;
+
+ spin_lock_bh(&priv->vaplock);
+ for (ii = 0; ii < MAX_VAP_SUPPORT; ii++) {
+ if (!priv->vaps[ii])
+ continue;
+
+ pfe_vwd_flush_tx(priv->vaps[ii], 0);
+ }
+
+ priv->tx_timer.expires = jiffies + ( COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ )/1000;
+ add_timer(&priv->tx_timer);
+ spin_unlock_bh(&priv->vaplock);
+}
+
+/** pfe_vwd_send_packet
+ *
+ */
+static void pfe_vwd_send_packet( struct sk_buff *skb, struct pfe_vwd_priv_s *priv, int queuenum, struct vap_desc_s *vap, int own_mac, u32 ctrl)
+{
+ void *data;
+ int count;
+ unsigned int nr_frags;
+ struct skb_shared_info *sh;
+ unsigned int nr_desc, nr_segs;
+
+ spin_lock_bh(&vap->tx_lock[queuenum]);
+
+ if (skb_headroom(skb) < (PFE_PKT_HEADER_SZ + sizeof(unsigned long))) {
+
+ //printk(KERN_INFO "%s: copying skb %d\n", __func__, skb_headroom(skb));
+
+ if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned long)), 0, GFP_ATOMIC)) {
+ kfree_skb(skb);
+#ifdef VWD_DEBUG_STATS
+ priv->pkts_tx_dropped += 1;
+#endif
+ goto out;
+ }
+ }
+
+ pfe_tx_get_req_desc(skb, &nr_desc, &nr_segs);
+ hif_tx_lock(&pfe->hif);
+
+ if ((__hif_tx_avail(&pfe->hif) < nr_desc) || (hif_lib_tx_avail(&vap->client, queuenum) < nr_desc)) {
+
+ //printk(KERN_ERR "%s: __hif_lib_xmit_pkt() failed\n", __func__);
+ kfree_skb(skb);
+#ifdef VWD_DEBUG_STATS
+ priv->pkts_tx_dropped++;
+#endif
+ goto out;
+ }
+
+ /* Send vap_id to PFE */
+ if (own_mac)
+ ctrl |= ((vap->vapid << HIF_CTRL_VAPID_OFST) | HIF_CTRL_TX_OWN_MAC);
+ else
+ ctrl |= (vap->vapid << HIF_CTRL_VAPID_OFST);
+
+
+ if ((vap->wifi_dev->features & NETIF_F_RXCSUM) && (skb->ip_summed == CHECKSUM_NONE))
+ ctrl |= HIF_CTRL_TX_CSUM_VALIDATE;
+
+ sh = skb_shinfo(skb);
+ nr_frags = sh->nr_frags;
+
+ /* if nr_desc > 1, then skb is scattered, otherwise linear skb */
+ if (nr_frags) {
+ skb_frag_t *f;
+ int i;
+
+ __hif_lib_xmit_pkt(&vap->client, queuenum, skb->data, skb_headlen(skb), ctrl, HIF_FIRST_BUFFER, skb);
+
+ for (i = 0; i < nr_frags - 1; i++) {
+ f = &sh->frags[i];
+
+ __hif_lib_xmit_pkt(&vap->client, queuenum, skb_frag_address(f), skb_frag_size(f), 0x0, 0x0, skb);
+ }
+
+ f = &sh->frags[i];
+
+ __hif_lib_xmit_pkt(&vap->client, queuenum, skb_frag_address(f), skb_frag_size(f), 0x0, HIF_LAST_BUFFER|HIF_DATA_VALID, skb);
+
+
+#ifdef VWD_DEBUG_STATS
+ priv->pkts_local_tx_sgs += 1;
+#endif
+ }
+ else
+ {
+#if defined(CONFIG_COMCERTO_CUSTOM_SKB_LAYOUT)
+ if (skb->mspd_data && skb->mspd_len) {
+ int len = skb->len - skb->mspd_len;
+
+ //printk("%s : custom skb\n", __func__);
+
+ data = (skb->mspd_data + skb->mspd_ofst) - len;
+ memcpy(data, skb->data, len);
+ }
+ else
+#endif
+
+ data = skb->data;
+
+ __hif_lib_xmit_pkt(&vap->client, queuenum, data, skb->len, ctrl, HIF_FIRST_BUFFER | HIF_LAST_BUFFER | HIF_DATA_VALID, skb);
+ }
+
+ hif_tx_dma_start();
+
+#ifdef VWD_DEBUG_STATS
+ priv->pkts_transmitted += 1;
+#endif
+ vap->stats.tx_packets++;
+ vap->stats.tx_bytes += skb->len;
+
+
+out:
+ hif_tx_unlock(&pfe->hif);
+ // Recycle buffers if a socket's send buffer becomes half full or if the HIF client queue starts filling up
+ if (((count = (hif_lib_tx_pending(&vap->client, queuenum) - HIF_CL_TX_FLUSH_MARK)) > 0)
+ || (skb && skb->sk && ((sk_wmem_alloc_get(skb->sk) << 1) > skb->sk->sk_sndbuf)))
+ pfe_vwd_flush_txQ(vap, queuenum, 1, count);
+
+ spin_unlock_bh(&vap->tx_lock[queuenum]);
+
+ return;
+}
+
+/*
+ * vwd_wifi_if_send_pkt
+ */
+static int vwd_wifi_if_send_pkt(struct sk_buff *skb)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ int ii;
+ unsigned int dst_mac[2];
+
+ if (!priv->fast_path_enable)
+ goto end;
+
+ /* Copy destination mac into cacheable memory */
+ if (!((unsigned long)skb->data & 0x3))
+ __memcpy8(dst_mac, skb->data);
+ else
+ memcpy(dst_mac, skb->data, 6);
+
+ if (dst_mac[0] & 0x1)
+ goto end;
+
+ spin_lock_bh(&priv->vaplock);
+
+ for (ii = 0; ii < MAX_VAP_SUPPORT; ii++)
+ {
+ struct vap_desc_s *vap;
+
+ vap = priv->vaps[ii];
+
+ if (vap && (vap->ifindex == skb->dev->ifindex))
+ {
+ if (unlikely(!vap->direct_rx_path)) {
+ spin_unlock_bh(&priv->vaplock);
+ goto end;
+ }
+
+ if (!memcmp(vap->macaddr, dst_mac, ETH_ALEN))
+ pfe_vwd_send_packet( skb, priv, 0, vap, 1, 0);
+ else
+ pfe_vwd_send_packet( skb, priv, 0, vap, 0, 0);
+
+ break;
+ }
+ }
+
+ spin_unlock_bh(&priv->vaplock);
+
+ if (unlikely(ii == MAX_VAP_SUPPORT))
+ goto end;
+
+ return 0;
+
+end:
+ return -1;
+}
+
+
+/** vwd_nf_bridge_hook_fn
+ *
+ */
+static unsigned int pfe_vwd_nf_bridge_hook_fn( unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ int vapid = -1;
+ int own_mac = 0;
+
+#ifdef VWD_DEBUG
+ printk("%s: protocol : 0x%04x\n", __func__, htons(skb->protocol));
+#endif
+
+ if( !priv->fast_path_enable )
+ goto done;
+
+ if( !pfe_vwd_classify_packet(priv, skb, 1, 0, &vapid, &own_mac) )
+ {
+#ifdef VWD_DEBUG
+ printk("%s: Accepted\n", __func__);
+ // pfe_vwd_dump_skb( skb );
+#endif
+ skb_push(skb, ETH_HLEN);
+ pfe_vwd_send_packet( skb, priv, 0, priv->vaps[vapid], own_mac, 0);
+ return NF_STOLEN;
+ }
+
+done:
+
+ return NF_ACCEPT;
+
+}
+
+/** vwd_nf_route_hook_fn
+ *
+ */
+static unsigned int pfe_vwd_nf_route_hook_fn( unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+ int vapid = -1;
+ int own_mac = 0;
+
+#ifdef VWD_DEBUG
+ printk("%s: protocol : 0x%04x\n", __func__, htons(skb->protocol));
+#endif
+
+ if (!priv->fast_path_enable)
+ goto done;
+
+ if (!pfe_vwd_classify_packet(priv, skb, 0, 1, &vapid, &own_mac))
+ {
+#ifdef VWD_DEBUG
+ printk("%s: Accepted\n", __func__);
+// pfe_vwd_dump_skb( skb );
+#endif
+ skb_push(skb, ETH_HLEN);
+ pfe_vwd_send_packet( skb, priv, 0, priv->vaps[vapid], own_mac, 0);
+ return NF_STOLEN;
+ }
+
+done:
+ return NF_ACCEPT;
+
+}
+
+static DEVICE_ATTR(vwd_debug_stats, 0444, pfe_vwd_show_dump_stats, NULL);
+static DEVICE_ATTR(vwd_fast_path_enable, 0644, pfe_vwd_show_fast_path_enable, pfe_vwd_set_fast_path_enable);
+static DEVICE_ATTR(vwd_route_hook_enable, 0644, pfe_vwd_show_route_hook_enable, pfe_vwd_set_route_hook_enable);
+static DEVICE_ATTR(vwd_bridge_hook_enable, 0644, pfe_vwd_show_bridge_hook_enable, pfe_vwd_set_bridge_hook_enable);
+static DEVICE_ATTR(vwd_tso_stats, 0644, pfe_vwd_show_tso_stats, pfe_vwd_set_tso_stats);
+
+static struct kobj_attribute direct_rx_attr =
+ __ATTR(direct_rx_path, 0644, pfe_vwd_show_direct_rx_path, pfe_vwd_set_direct_rx_path);
+static struct kobj_attribute direct_tx_attr =
+ __ATTR(direct_tx_path, 0644, pfe_vwd_show_direct_tx_path, pfe_vwd_set_direct_tx_path);
+#if defined(CONFIG_COMCERTO_CUSTOM_SKB_LAYOUT)
+static struct kobj_attribute custom_skb_attr =
+ __ATTR(custom_skb_enable, 0644, pfe_vwd_show_custom_skb_enable, pfe_vwd_set_custom_skb_enable);
+#endif
+#if defined(CONFIG_SMP) && (NR_CPUS > 1)
+static struct kobj_attribute rx_cpu_affinity_attr =
+ __ATTR(rx_cpu_affinity, 0644, pfe_vwd_show_rx_cpu_affinity, pfe_vwd_set_rx_cpu_affinity);
+#endif
+#ifdef PFE_VWD_TX_STATS
+static struct kobj_attribute tx_stats_attr =
+ __ATTR(tx_stats, 0644, pfe_vwd_show_vap_tx_stats, pfe_vwd_set_vap_tx_stats);
+#endif
+static struct attribute *vap_attrs[] = {
+ &direct_rx_attr.attr,
+ &direct_tx_attr.attr,
+#if defined(CONFIG_COMCERTO_CUSTOM_SKB_LAYOUT)
+ &custom_skb_attr.attr,
+#endif
+#if defined(CONFIG_SMP) && (NR_CPUS > 1)
+ &rx_cpu_affinity_attr.attr,
+#endif
+#ifdef PFE_VWD_TX_STATS
+ &tx_stats_attr.attr,
+#endif
+ NULL,
+};
+
+static struct attribute_group vap_attr_group = {
+ .attrs = vap_attrs,
+};
+
+#ifdef PFE_VWD_NAPI_STATS
+static DEVICE_ATTR(vwd_napi_stats, 0644, pfe_vwd_show_napi_stats, pfe_vwd_set_napi_stats);
+#endif
+static DEVICE_ATTR(vwd_vap_create, 0644, NULL, pfe_vwd_vap_create);
+static DEVICE_ATTR(vwd_vap_remove, 0644, NULL, pfe_vwd_vap_remove);
+#ifdef PFE_VWD_LRO_STATS
+static DEVICE_ATTR(vwd_lro_nb_stats, 0644, pfe_vwd_show_lro_nb_stats, pfe_vwd_set_lro_nb_stats);
+static DEVICE_ATTR(vwd_lro_len_stats, 0644, pfe_vwd_show_lro_len_stats, pfe_vwd_set_lro_len_stats);
+#endif
+/** pfe_vwd_sysfs_init
+ *
+ */
+static int pfe_vwd_sysfs_init( struct pfe_vwd_priv_s *priv )
+{
+ struct pfe *pfe = priv->pfe;
+
+ if (device_create_file(pfe->dev, &dev_attr_vwd_debug_stats))
+ goto err_dbg_sts;
+
+ if (device_create_file(pfe->dev, &dev_attr_vwd_fast_path_enable))
+ goto err_fp_en;
+
+ if (device_create_file(pfe->dev, &dev_attr_vwd_route_hook_enable))
+ goto err_rt;
+
+ if (device_create_file(pfe->dev, &dev_attr_vwd_bridge_hook_enable))
+ goto err_br;
+
+ if (vwd_tx_ofld && device_create_file(pfe->dev, &dev_attr_vwd_vap_create))
+ goto err_vap_add;
+
+ if (vwd_tx_ofld && device_create_file(pfe->dev, &dev_attr_vwd_vap_remove))
+ goto err_vap_del;
+
+ if (device_create_file(pfe->dev, &dev_attr_vwd_tso_stats))
+ goto err_tso_stats;
+
+#ifdef PFE_VWD_NAPI_STATS
+ if (device_create_file(pfe->dev, &dev_attr_vwd_napi_stats))
+ goto err_napi;
+#endif
+
+#ifdef PFE_VWD_LRO_STATS
+ if (device_create_file(pfe->dev, &dev_attr_vwd_lro_nb_stats))
+ goto err_lro_nb;
+
+ if (device_create_file(pfe->dev, &dev_attr_vwd_lro_len_stats))
+ goto err_lro_len;
+#endif
+
+ return 0;
+
+#ifdef PFE_VWD_LRO_STATS
+err_lro_len:
+ device_remove_file(pfe->dev, &dev_attr_vwd_lro_nb_stats);
+err_lro_nb:
+#endif
+
+#ifdef PFE_VWD_NAPI_STATS
+ device_remove_file(pfe->dev, &dev_attr_vwd_napi_stats);
+err_napi:
+#endif
+
+#if defined(PFE_VWD_LRO_STATS) || defined(PFE_VWD_NAPI_STATS)
+ device_remove_file(pfe->dev, &dev_attr_vwd_tso_stats);
+#endif
+
+err_tso_stats:
+ if (vwd_tx_ofld)
+ device_remove_file(pfe->dev, &dev_attr_vwd_vap_remove);
+err_vap_del:
+ if (vwd_tx_ofld)
+ device_remove_file(pfe->dev, &dev_attr_vwd_vap_create);
+err_vap_add:
+ device_remove_file(pfe->dev, &dev_attr_vwd_bridge_hook_enable);
+err_br:
+ device_remove_file(pfe->dev, &dev_attr_vwd_route_hook_enable);
+err_rt:
+ device_remove_file(pfe->dev, &dev_attr_vwd_fast_path_enable);
+err_fp_en:
+ device_remove_file(pfe->dev, &dev_attr_vwd_debug_stats);
+err_dbg_sts:
+ return -1;
+}
+
+
+/** pfe_vwd_sysfs_exit
+ *
+ */
+static void pfe_vwd_sysfs_exit(void)
+{
+ device_remove_file(pfe->dev, &dev_attr_vwd_tso_stats);
+#ifdef PFE_VWD_LRO_STATS
+ device_remove_file(pfe->dev, &dev_attr_vwd_lro_len_stats);
+ device_remove_file(pfe->dev, &dev_attr_vwd_lro_nb_stats);
+#endif
+
+#ifdef PFE_VWD_NAPI_STATS
+ device_remove_file(pfe->dev, &dev_attr_vwd_napi_stats);
+#endif
+ if (vwd_tx_ofld) {
+ device_remove_file(pfe->dev, &dev_attr_vwd_vap_create);
+ device_remove_file(pfe->dev, &dev_attr_vwd_vap_remove);
+ }
+ device_remove_file(pfe->dev, &dev_attr_vwd_bridge_hook_enable);
+ device_remove_file(pfe->dev, &dev_attr_vwd_route_hook_enable);
+ device_remove_file(pfe->dev, &dev_attr_vwd_fast_path_enable);
+ device_remove_file(pfe->dev, &dev_attr_vwd_debug_stats);
+}
+
+/** pfe_vwd_rx_page
+ *
+ */
+static struct sk_buff *pfe_vwd_rx_page(struct vap_desc_s *vap, int qno, unsigned int *ctrl)
+{
+ struct page *p;
+ void *buf_addr;
+ unsigned int rx_ctrl;
+ unsigned int desc_ctrl = 0;
+ struct sk_buff *skb;
+ int length, offset, data_offset;
+ struct hif_lro_hdr *lro_hdr;
+ u32 pe_id;
+ struct pfe_vwd_priv_s *priv = vap->priv;
+
+
+ while (1) {
+ buf_addr = hif_lib_receive_pkt(&vap->client, qno, &length, &offset, &rx_ctrl, &desc_ctrl, (void **)&lro_hdr);
+
+ if (!buf_addr)
+ goto empty;
+
+ if (qno == 2)
+ pe_id = (rx_ctrl >> HIF_CTRL_RX_PE_ID_OFST) & 0xf;
+ else
+ pe_id = 0;
+
+ skb = vap->skb_inflight[qno + pe_id];
+
+#ifdef PFE_VWD_NAPI_STATS
+ priv->napi_counters[NAPI_DESC_COUNT]++;
+#endif
+
+ *ctrl = rx_ctrl;
+
+ /* First frag */
+ if ((desc_ctrl & CL_DESC_FIRST) && !skb) {
+ p = virt_to_page(buf_addr);
+
+ skb = dev_alloc_skb(MAX_HDR_SIZE + PFE_PKT_HEADROOM + 2);
+ if (unlikely(!skb)) {
+ goto pkt_drop;
+ }
+
+ skb_reserve(skb, PFE_PKT_HEADROOM + 2);
+
+ if (lro_hdr) {
+ data_offset = lro_hdr->data_offset;
+ if (lro_hdr->mss)
+ skb_shinfo(skb)->gso_size = lro_hdr->mss;
+
+ // printk(KERN_INFO "mss: %d, offset: %d, data_offset: %d, len: %d\n", lro_hdr->mss, offset, lro_hdr->data_offset, length);
+ } else {
+ data_offset = MAX_HDR_SIZE;
+ }
+
+ /* We don't need the fragment if the whole packet */
+ /* has been copied in the first linear skb */
+ if (length <= data_offset) {
+ __memcpy(skb->data, buf_addr + offset, length);
+ skb_put(skb, length);
+ free_page((unsigned long)buf_addr);
+ } else {
+ __memcpy(skb->data, buf_addr + offset, data_offset);
+ skb_put(skb, data_offset);
+ skb_add_rx_frag(skb, 0, p, offset + data_offset, length - data_offset);
+ }
+
+ if ((vap->wifi_dev->features & NETIF_F_RXCSUM) && (rx_ctrl & HIF_CTRL_RX_CHECKSUMMED))
+ {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+#ifdef VWD_DEBUG_STATS
+ priv->rx_csum_correct++;
+#endif
+ }
+
+ } else {
+ /* Next frags */
+ if (unlikely(!skb)) {
+ printk(KERN_ERR "%s: NULL skb_inflight\n", __func__);
+ goto pkt_drop;
+ }
+
+ p = virt_to_page(buf_addr);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, p, offset, length);
+ }
+
+ /* Last buffer in a software chain */
+ if ((desc_ctrl & CL_DESC_LAST) && !(rx_ctrl & HIF_CTRL_RX_CONTINUED))
+ break;
+
+ /* Keep track of skb for this queue/pe */
+ vap->skb_inflight[qno + pe_id] = skb;
+ }
+
+ vap->skb_inflight[qno + pe_id] = NULL;
+
+ return skb;
+
+pkt_drop:
+ vap->skb_inflight[qno + pe_id] = NULL;
+
+ if (skb) {
+ kfree_skb(skb);
+ } else {
+ free_page((unsigned long)buf_addr);
+ }
+
+ return NULL;
+
+empty:
+ return NULL;
+}
+
+/** pfe_vwd_rx_skb
+ *
+ */
+static struct sk_buff *pfe_vwd_rx_skb(struct vap_desc_s *vap, int qno, unsigned int *ctrl)
+{
+ void *buf_addr;
+ struct hif_ipsec_hdr *ipsec_hdr;
+ unsigned int rx_ctrl;
+ unsigned int desc_ctrl = 0;
+ struct sk_buff *skb = NULL;
+ int length = 0, offset;
+ struct pfe_vwd_priv_s *priv = vap->priv;
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+ struct timespec ktime;
+#endif
+
+ buf_addr = hif_lib_receive_pkt(&vap->client, qno, &length, &offset, &rx_ctrl, &desc_ctrl,(void **) &ipsec_hdr);
+ if (!buf_addr)
+ goto out;
+
+ *ctrl = rx_ctrl;
+#ifdef PFE_VWD_NAPI_STATS
+ priv->napi_counters[NAPI_DESC_COUNT]++;
+#endif
+
+#if defined(CONFIG_COMCERTO_CUSTOM_SKB_LAYOUT)
+ if ((vap->custom_skb) && !(rx_ctrl & HIF_CTRL_RX_WIFI_EXPT)) {
+ /* Even we use smaller area allocate bigger buffer, to meet skb helper function's requirements */
+ skb = dev_alloc_skb(length + offset + 32);
+
+ if (unlikely(!skb)) {
+#ifdef VWD_DEBUG_STATS
+ priv->rx_skb_alloc_fail += 1;
+#endif
+ goto pkt_drop;
+ }
+
+ /**
+ * __memcpy expects src and dst need to be same alignment. So make sure that
+ * skb->data starts at same alignement as buf_addr + offset.
+ */
+ skb_reserve(skb, offset);
+ if (length <= MAX_WIFI_HDR_SIZE) {
+ __memcpy(skb->data, buf_addr + offset, length);
+ skb_put(skb, length);
+ kfree(buf_addr);
+ }
+ else {
+ __memcpy(skb->data, buf_addr + offset, MAX_WIFI_HDR_SIZE);
+ skb_put(skb, length);
+ skb->mspd_data = buf_addr;
+ skb->mspd_len = length - MAX_WIFI_HDR_SIZE;
+ skb->mspd_ofst = offset + MAX_WIFI_HDR_SIZE;
+ }
+ }
+
+ else
+#endif
+ if (rx_ctrl & HIF_CTRL_RX_WIFI_EXPT) {
+#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB)
+ skb = dev_alloc_skb(length + offset + 32);
+#else
+ skb = alloc_skb_header(PFE_BUF_SIZE, buf_addr, GFP_ATOMIC);
+#endif
+
+ if (unlikely(!skb)) {
+#ifdef VWD_DEBUG_STATS
+ priv->rx_skb_alloc_fail += 1;
+#endif
+ goto pkt_drop;
+ }
+
+ skb_reserve(skb, offset);
+#if defined(CONFIG_COMCERTO_ZONE_DMA_NCNB)
+ /* Since, these packets are going to linux stack,
+ * to avoid NCNB access overhead copy NCNB to CB buffer.
+ */
+ __memcpy(skb->data, buf_addr + offset, length);
+ kfree(buf_addr);
+#endif
+ skb_put(skb, length);
+
+
+ if ((vap->wifi_dev->features & NETIF_F_RXCSUM) && (rx_ctrl & HIF_CTRL_RX_CHECKSUMMED))
+ {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+#ifdef VWD_DEBUG_STATS
+ priv->rx_csum_correct++;
+#endif
+ }
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+ if (rx_ctrl & HIF_CTRL_RX_IPSEC_IN) {
+ if (ipsec_hdr) {
+ struct sec_path *sp;
+ struct xfrm_state *x;
+ unsigned short *sah = &ipsec_hdr->sa_handle[0];
+ int i = 0;
+
+ sp = secpath_dup(skb->sp);
+
+ if (!sp)
+ {
+ goto pkt_drop;
+ }
+
+ skb->sp = sp;
+
+ /* at maximum 2 SA are expected */
+ while (i <= 1)
+ {
+ if(!*sah)
+ break;
+
+ if ((x = xfrm_state_lookup_byhandle(dev_net(vap->dev), ntohs(*sah))) == NULL)
+ {
+ goto pkt_drop;
+ }
+
+ sp->xvec[i] = x;
+
+ if (!x->curlft.use_time)
+ {
+ ktime = current_kernel_time();
+ x->curlft.use_time = (unsigned long)ktime.tv_sec;
+ }
+
+ i++; sah++;
+ }
+
+ sp->len = i;
+ }
+ }
+#endif
+
+ }
+ else
+ {
+ skb = alloc_skb_header(PFE_BUF_SIZE, buf_addr, GFP_ATOMIC);
+
+ if (unlikely(!skb)) {
+#ifdef VWD_DEBUG_STATS
+ priv->rx_skb_alloc_fail += 1;
+#endif
+ goto pkt_drop;
+ }
+
+ skb_reserve(skb, offset);
+ skb_put(skb, length);
+ }
+
+
+ return skb;
+
+pkt_drop:
+ if (skb) {
+ kfree_skb(skb);
+ } else {
+ kfree(buf_addr);
+ }
+
+out:
+ return NULL;
+}
+
+/** pfe_vwd_send_to_vap
+ *
+ */
+
+/* The most of the logic inside this function is copied from dev_queue_xmit() in linux/net/core/dev.c.*/
+
+static void pfe_vwd_send_to_vap(struct vap_desc_s *vap, struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_queue *txq;
+ int cpu, rc;
+
+ if (!vap->direct_tx_path) {
+ original_dev_queue_xmit(skb);
+ return;
+ }
+
+ /* Disable soft irqs for various locks below. Also
+ * stops preemption for RCU.
+ */
+ rcu_read_lock_bh();
+
+ if (unlikely(dev->real_num_tx_queues != 1)) {
+ //printk("%s : number of queues : %d\n", __func__, dev->real_num_tx_queues);
+ goto deliver_slow;
+ }
+
+ if (dev->flags & IFF_UP) {
+ skb_set_queue_mapping(skb, 0);
+ txq = netdev_get_tx_queue(dev, 0);
+
+ cpu = smp_processor_id();
+
+ if (txq->xmit_lock_owner != cpu) {
+ HARD_TX_LOCK(dev, txq, cpu);
+
+ if (unlikely(netif_tx_queue_stopped(txq))) {
+ //printk("%s : stopped \n", __func__);
+ HARD_TX_UNLOCK(dev, txq);
+ goto deliver_slow;
+ }
+
+ rc = dev->netdev_ops->ndo_start_xmit(skb, dev);
+
+ if (dev_xmit_complete(rc)) {
+ HARD_TX_UNLOCK(dev, txq);
+ goto done;
+ }
+ }
+ }
+
+ rcu_read_unlock_bh();
+ kfree_skb(skb);
+
+ return;
+
+done:
+ //printk("%s : devivered packet through fast path\n", __func__);
+ rcu_read_unlock_bh();
+ return;
+
+deliver_slow:
+ rcu_read_unlock_bh();
+
+ /* deliver packet to vap through stack */
+ original_dev_queue_xmit(skb);
+ return;
+}
+
+/** pfe_vwd_rx_poll
+ *
+ */
+static int pfe_vwd_rx_poll( struct vap_desc_s *vap, struct napi_struct *napi, int qno, int budget)
+{
+ struct sk_buff *skb;
+ int work_done = 0;
+ struct net_device *dev;
+ struct pfe_vwd_priv_s *priv = vap->priv;
+
+ //printk(KERN_INFO"%s\n", __func__);
+ dev = dev_get_by_index(&init_net, vap->ifindex);
+
+#ifdef PFE_VWD_NAPI_STATS
+ priv->napi_counters[NAPI_POLL_COUNT]++;
+#endif
+ do {
+ unsigned int ctrl = 0;
+
+ if (page_mode)
+ skb = pfe_vwd_rx_page(vap, qno, &ctrl);
+ else
+ skb = pfe_vwd_rx_skb(vap, qno, &ctrl);
+
+ if (!skb)
+ break;
+ if(!dev) {
+ /*VAP got disappeared, simply drop the packet */
+ kfree_skb(skb);
+ work_done++;
+ continue;
+ }
+
+ skb->dev = dev;
+ dev->last_rx = jiffies;
+
+#ifdef PFE_VWD_LRO_STATS
+ priv->lro_len_counters[((u32)skb->len >> 11) & (LRO_LEN_COUNT_MAX - 1)]++;
+ priv->lro_nb_counters[skb_shinfo(skb)->nr_frags & (LRO_NB_COUNT_MAX - 1)]++;
+#endif
+ vap->stats.rx_packets++;
+ vap->stats.rx_bytes += skb->len;
+
+ /*FIXME: Need to handle WiFi to WiFi fast path */
+ if (ctrl & HIF_CTRL_RX_WIFI_EXPT) {
+ //printk("%s : packet sent to expt\n", __func__);
+
+ *(unsigned long *)skb->head = 0xdead;
+ skb->protocol = eth_type_trans(skb, dev);
+#ifdef VWD_DEBUG_STATS
+ priv->pkts_slow_forwarded[qno] += 1;
+#endif
+ netif_receive_skb(skb);
+ }
+ else {
+ struct ethhdr *hdr;
+
+ hdr = (struct ethhdr *)skb->data;
+ skb->protocol = hdr->h_proto;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
+ skb->mac.raw = skb->data;
+ skb->nh.raw = skb->data + sizeof(struct ethhdr);
+#else
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, sizeof(struct ethhdr));
+#endif
+
+#ifdef VWD_DEBUG_STATS
+ priv->pkts_rx_fast_forwarded[qno] += 1;
+#endif
+ skb->priority = 0;
+
+
+ pfe_vwd_send_to_vap(vap, skb, dev);
+ }
+
+
+ work_done++;
+#ifdef PFE_VWD_NAPI_STATS
+ priv->napi_counters[NAPI_PACKET_COUNT]++;
+#endif
+ } while (work_done < budget);
+
+ if(dev)
+ dev_put(dev);
+
+ /* If no Rx receive nor cleanup work was done, exit polling mode.
+ * No more netif_running(dev) check is required here , as this is checked in
+ * net/core/dev.c ( 2.6.33.5 kernel specific).
+ */
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ hif_lib_event_handler_start(&vap->client, EVENT_RX_PKT_IND, qno);
+ }
+#ifdef PFE_VWD_NAPI_STATS
+ else
+ priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
+#endif
+
+ return work_done;
+}
+
+/** pfe_vwd_lro_poll
+ */
+static int pfe_vwd_lro_poll(struct napi_struct *napi, int budget)
+{
+ struct vap_desc_s *vap = container_of(napi, struct vap_desc_s, lro_napi);
+
+
+ return pfe_vwd_rx_poll(vap, napi, 2, budget);
+}
+
+
+/** pfe_eth_low_poll
+ */
+static int pfe_vwd_rx_high_poll(struct napi_struct *napi, int budget)
+{
+ struct vap_desc_s *vap = container_of(napi, struct vap_desc_s, high_napi);
+
+ return pfe_vwd_rx_poll(vap, napi, 1, budget);
+}
+
+/** pfe_eth_high_poll
+ */
+static int pfe_vwd_rx_low_poll(struct napi_struct *napi, int budget )
+{
+ struct vap_desc_s *vap = container_of(napi, struct vap_desc_s, low_napi);
+
+ return pfe_vwd_rx_poll(vap, napi, 0, budget);
+}
+
+/** pfe_vwd_event_handler
+ */
+static int pfe_vwd_event_handler(void *data, int event, int qno)
+{
+ struct vap_desc_s *vap = data;
+
+ //printk(KERN_INFO "%s: %d\n", __func__, __LINE__);
+
+ switch (event) {
+ case EVENT_RX_PKT_IND:
+ if (qno == 0) {
+ if (napi_schedule_prep(&vap->low_napi)) {
+ //printk(KERN_INFO "%s: schedule high prio poll\n", __func__);
+
+ __napi_schedule(&vap->low_napi);
+ }
+ }
+ else if (qno == 1) {
+ if (napi_schedule_prep(&vap->high_napi)) {
+ //printk(KERN_INFO "%s: schedule high prio poll\n", __func__);
+
+ __napi_schedule(&vap->high_napi);
+ }
+ }
+ else if (qno == 2) {
+ if (napi_schedule_prep(&vap->lro_napi)) {
+ //printk(KERN_INFO "%s: schedule lro poll\n", __func__);
+
+ __napi_schedule(&vap->lro_napi);
+ }
+ }
+
+#ifdef PFE_VWD_NAPI_STATS
+ vap->priv->napi_counters[NAPI_SCHED_COUNT]++;
+#endif
+ break;
+
+ case EVENT_TXDONE_IND:
+ case EVENT_HIGH_RX_WM:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/** pfe_vwd_fast_tx_timeout
+ */
+static enum hrtimer_restart pfe_vwd_fast_tx_timeout(struct hrtimer *timer)
+{
+ struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct pfe_eth_fast_timer, timer);
+ struct vap_desc_s *vap = container_of(fast_tx_timeout->base, struct vap_desc_s, fast_tx_timeout);
+
+ if(netif_queue_stopped(vap->dev)) {
+#ifdef PFE_VWD_TX_STATS
+ vap->was_stopped[fast_tx_timeout->queuenum] = 1;
+#endif
+ netif_wake_queue(vap->dev);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+/** pfe_eth_fast_tx_timeout_init
+ */
+static void pfe_vwd_fast_tx_timeout_init(struct vap_desc_s *vap)
+{
+ int i;
+ for (i = 0; i < VWD_TXQ_CNT; i++) {
+ vap->fast_tx_timeout[i].queuenum = i;
+ hrtimer_init(&vap->fast_tx_timeout[i].timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ vap->fast_tx_timeout[i].timer.function = pfe_vwd_fast_tx_timeout;
+ vap->fast_tx_timeout[i].base = vap->fast_tx_timeout;
+ }
+}
+
+static int pfe_vwd_might_stop_tx(struct vap_desc_s *vap, int queuenum, unsigned int n_desc)
+{
+ int tried = 0;
+ ktime_t kt;
+
+try_again:
+ if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc)
+ || (hif_lib_tx_avail(&vap->client, queuenum) < n_desc))) {
+
+ if (!tried) {
+ hif_tx_unlock(&pfe->hif);
+ pfe_vwd_flush_txQ(vap, queuenum, 1, n_desc);
+ tried = 1;
+ hif_tx_lock(&pfe->hif);
+ goto try_again;
+ }
+#ifdef PFE_VWD_TX_STATS
+ if (__hif_tx_avail(&pfe->hif) < n_desc)
+ vap->stop_queue_hif[queuenum]++;
+ else if (hif_lib_tx_avail(&vap->client, queuenum) < n_desc) {
+ vap->stop_queue_hif_client[queuenum]++;
+ }
+ vap->stop_queue_total[queuenum]++;
+#endif
+ netif_stop_queue(vap->dev);
+
+ kt = ktime_set(0, COMCERTO_TX_FAST_RECOVERY_TIMEOUT_MS * NSEC_PER_MSEC);
+ hrtimer_start(&vap->fast_tx_timeout[queuenum].timer, kt, HRTIMER_MODE_REL);
+ return -1;
+ }
+ else {
+ return 0;
+ }
+}
+
+#define SA_MAX_OP 2
+
+/**
+ * pfe_vwd_xmit_local_packet()
+ */
+static int pfe_vwd_vap_xmit_local_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vap_desc_s *vap = netdev_priv(dev);
+ struct skb_shared_info *sh;
+ int queuenum = 0, n_desc = 0, n_segs;
+ int count = 0, nr_frags;
+ int ii;
+ u32 ctrl = HIF_CTRL_TX_WIFI;
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+ u16 sah[SA_MAX_OP] = {0};
+ struct hif_ipsec_hdr *hif_ipsec;
+#endif
+
+ pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
+
+ spin_lock_bh(&pfe->vwd.vaplock);
+ spin_lock_bh(&vap->tx_lock[queuenum]);
+ hif_tx_lock(&pfe->hif);
+
+ if (pfe_vwd_might_stop_tx(vap, queuenum, n_desc)){
+ hif_tx_unlock(&pfe->hif);
+ spin_unlock_bh(&vap->tx_lock[queuenum]);
+ spin_unlock_bh(&pfe->vwd.vaplock);
+#ifdef PFE_VWD_TX_STATS
+ if(vap->was_stopped[queuenum]) {
+ vap->clean_fail[queuenum]++;
+ vap->was_stopped[queuenum] = 0;
+ }
+#endif
+ return NETDEV_TX_BUSY;
+ }
+
+
+ if ( !(skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ + sizeof(unsigned long)))) {
+
+ //printk(KERN_INFO "%s: copying skb %d\n", __func__, skb_headroom(skb));
+
+ if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned long)), 0, GFP_ATOMIC)) {
+ kfree_skb(skb);
+ goto out;
+ }
+ }
+
+ /* Send vap_id to PFE */
+ ctrl |= vap->vapid << HIF_CTRL_VAPID_OFST;
+ sh = skb_shinfo(skb);
+
+ if (skb_is_gso(skb)) {
+ int nr_bytes;
+
+ if(likely(nr_bytes = pfe_tso(skb, &vap->client, &pfe->vwd.tso, queuenum, ctrl))) {
+ vap->stats.tx_packets += sh->gso_segs;
+ vap->stats.tx_bytes += nr_bytes;
+ }
+ else
+ vap->stats.tx_dropped++;
+
+ goto out;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ ctrl |= HIF_CTRL_TX_CHECKSUM;
+
+ nr_frags = sh->nr_frags;
+
+#if defined(CONFIG_INET_IPSEC_OFFLOAD) || defined(CONFIG_INET6_IPSEC_OFFLOAD)
+ /* check if packet sent from Host to PFE needs IPsec processing */
+ if (skb->ipsec_offload)
+ {
+ if (skb->sp)
+ {
+ for (ii = skb->sp->len-1; ii >= 0; ii--)
+ {
+ struct xfrm_state *x = skb->sp->xvec[ii];
+ sah[ii] = htons(x->handle);
+ }
+
+ ctrl |= HIF_CTRL_TX_IPSEC_OUT;
+
+ /* add SA info to the hif header*/
+ hif_ipsec = (struct hif_ipsec_hdr *)(skb->data - sizeof(struct hif_ipsec_hdr));
+ hif_ipsec->sa_handle[0] = sah[0];
+ hif_ipsec->sa_handle[1] = sah[1];
+
+ skb->data -= sizeof(struct hif_ipsec_hdr);
+ skb->len += sizeof(struct hif_ipsec_hdr);
+ }
+ else
+ printk(KERN_ERR "%s: secure path data not found\n", __func__);
+ }
+#endif
+
+ if (nr_frags) {
+ skb_frag_t *f;
+
+ __hif_lib_xmit_pkt(&vap->client, queuenum, skb->data, skb_headlen(skb), ctrl, HIF_FIRST_BUFFER, skb);
+
+ for (ii = 0; ii < nr_frags - 1; ii++) {
+ f = &sh->frags[ii];
+
+ __hif_lib_xmit_pkt(&vap->client, queuenum, skb_frag_address(f), skb_frag_size(f), 0x0, 0x0, skb);
+ }
+
+ f = &sh->frags[ii];
+
+ __hif_lib_xmit_pkt(&vap->client, queuenum, skb_frag_address(f), skb_frag_size(f), 0x0, HIF_LAST_BUFFER|HIF_DATA_VALID, skb);
+
+#ifdef VWD_DEBUG_STATS
+ pfe->vwd.pkts_local_tx_sgs += 1;
+#endif
+ }
+ else
+ {
+ __hif_lib_xmit_pkt(&vap->client, queuenum, skb->data, skb->len, ctrl, HIF_FIRST_BUFFER | HIF_LAST_BUFFER | HIF_DATA_VALID, skb);
+ }
+
+ hif_tx_dma_start();
+
+#ifdef VWD_DEBUG_STATS
+ pfe->vwd.pkts_transmitted += 1;
+#endif
+ vap->stats.tx_packets++;
+ vap->stats.tx_bytes += skb->len;
+
+ //printk(KERN_INFO "%s: pkt sent successfully skb:%p len:%d\n", __func__, skb, skb->len);
+
+out:
+ hif_tx_unlock(&pfe->hif);
+
+ dev->trans_start = jiffies;
+
+ // Recycle buffers if a socket's send buffer becomes half full or if the HIF client queue starts filling up
+ if (((count = (hif_lib_tx_pending(&vap->client, queuenum) - HIF_CL_TX_FLUSH_MARK)) > 0)
+ || (skb && skb->sk && ((sk_wmem_alloc_get(skb->sk) << 1) > skb->sk->sk_sndbuf)))
+ pfe_vwd_flush_txQ(vap, queuenum, 1, count);
+
+ spin_unlock_bh(&vap->tx_lock[queuenum]);
+ spin_unlock_bh(&pfe->vwd.vaplock);
+
+ return 0;
+}
+
+/**
+ * pfe_vwd_get_stats()
+ */
+
+static struct net_device_stats *pfe_vwd_vap_get_stats(struct net_device *dev)
+{
+ struct vap_desc_s *vap = netdev_priv(dev);
+
+ return &vap->stats;
+}
+
+static const struct net_device_ops vwd_netdev_ops = {
+ .ndo_start_xmit = pfe_vwd_vap_xmit_local_packet,
+ .ndo_get_stats = pfe_vwd_vap_get_stats,
+};
+
+/**
+ * pfe_vwd_vap_get_drvinfo - Fills in the drvinfo structure with some basic info
+ *
+ */
+static void pfe_vwd_vap_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, "VWD", COMCERTO_INFOSTR_LEN);
+ strncpy(drvinfo->version, "1.0", COMCERTO_INFOSTR_LEN);
+ strncpy(drvinfo->fw_version, "N/A", COMCERTO_INFOSTR_LEN);
+ strncpy(drvinfo->bus_info, "N/A", COMCERTO_INFOSTR_LEN);
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+struct ethtool_ops pfe_vwd_vap_ethtool_ops = {
+ .get_drvinfo = pfe_vwd_vap_get_drvinfo,
+};
+
+/** pfe_vwd_vap_up
+ *
+ */
+
+static struct vap_desc_s *pfe_vwd_vap_up(struct pfe_vwd_priv_s *vwd, struct vap_cmd_s *cmd, struct net_device *wifi_dev)
+{
+ struct vap_desc_s *vap = NULL;
+ struct net_device *dev;
+ struct hif_client_s *client;
+ int ii, rc;
+ int vap_unlocked = 0;
+ unsigned char name[IFNAMSIZ];
+
+ printk("%s:%d\n", __func__, __LINE__);
+
+ sprintf(name, "vwd%d", cmd->vapid);
+
+ dev = alloc_etherdev(sizeof(*vap));
+
+ if (!dev) {
+ printk(KERN_ERR "%s : Unable to allocate device structure for %s\n", __func__, cmd->ifname);
+ goto err0;
+ }
+
+ vap = netdev_priv(dev);
+ vap->vapid = cmd->vapid;
+ vap->ifindex = wifi_dev->ifindex;
+ vap->direct_rx_path = cmd->direct_rx_path;
+ vap->direct_tx_path = 0;
+ memcpy(vap->ifname, wifi_dev->name, 12);
+ memcpy(vap->macaddr, wifi_dev->dev_addr, 6);
+
+ vap->wifi_dev = wifi_dev;
+ vap->dev = dev;
+ vap->priv = vwd;
+ vap->cpu_id = -1;
+ dev->netdev_ops = &vwd_netdev_ops;
+
+ dev->mtu = 1500;
+ dev->flags |= IFF_NOARP;
+
+ if (dev_alloc_name(dev, name) < 0) {
+ netdev_err(dev, "%s: dev_alloc_name(%s) failed\n", __func__, name);
+ goto err1;
+ }
+
+ /* FIXME: Assuming that vwd_tx_ofld is NAS mode. But tx_ofld_mode is required for HGW also
+ * to perform encryption for WiFi locat Tx IPSec packets.
+ */
+ if (vwd_tx_ofld) {
+ /* FIXME : We need to backup wifi device ethtool ops and override required functions
+ * with our own function which will enable required functionality for both WiFi
+ * and vwd network device.
+ */
+ vap->wifi_ethtool_ops = wifi_dev->ethtool_ops;
+
+ if (!wifi_dev->ethtool_ops)
+ wifi_dev->ethtool_ops = &pfe_vwd_vap_ethtool_ops;
+
+ /* supported features */
+ dev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO;
+
+ /* enabled by default */
+ dev->features = dev->hw_features;
+ if (lro_mode) {
+ dev->hw_features |= NETIF_F_LRO;
+ dev->features |= NETIF_F_LRO;
+ }
+
+ /* Enable offloading features to offload device/interface */
+ if (!(wifi_dev->hw_features & NETIF_F_RXCSUM))
+ vap->diff_hw_features |= NETIF_F_RXCSUM;
+
+ if (!(wifi_dev->hw_features & NETIF_F_IP_CSUM))
+ vap->diff_hw_features |= NETIF_F_IP_CSUM;
+
+ if (!(wifi_dev->hw_features & NETIF_F_IPV6_CSUM))
+ vap->diff_hw_features |= NETIF_F_IPV6_CSUM;
+
+ if (!(wifi_dev->hw_features & NETIF_F_SG))
+ vap->diff_hw_features |= NETIF_F_SG;
+
+ if (!(wifi_dev->hw_features & NETIF_F_TSO))
+ vap->diff_hw_features |= NETIF_F_TSO;
+
+ if (lro_mode) {
+ if (!(wifi_dev->hw_features & NETIF_F_LRO))
+ vap->diff_hw_features |= NETIF_F_LRO;
+ }
+
+ wifi_dev->hw_features |= vap->diff_hw_features;
+ }
+
+ if (spin_is_locked(&vwd->vaplock)) {
+ spin_unlock_bh(&vwd->vaplock);
+ vap_unlocked = 1;
+ }
+
+ rc = register_netdev(dev);
+
+ if (vap_unlocked)
+ spin_lock_bh(&vwd->vaplock);
+
+ if (rc) {
+ netdev_err(dev, "register_netdev() failed\n");
+ goto err1;
+ }
+
+ /* Initilize NAPI for Rx processing */
+ netif_napi_add(vap->dev, &vap->low_napi, pfe_vwd_rx_low_poll, VWD_RX_POLL_WEIGHT);
+ netif_napi_add(vap->dev, &vap->high_napi, pfe_vwd_rx_high_poll, VWD_RX_POLL_WEIGHT);
+ netif_napi_add(vap->dev, &vap->lro_napi, pfe_vwd_lro_poll, VWD_RX_POLL_WEIGHT);
+ napi_enable(&vap->high_napi);
+ napi_enable(&vap->low_napi);
+ napi_enable(&vap->lro_napi);
+ pfe_vwd_fast_tx_timeout_init(vap);
+
+ vap->vap_kobj = kobject_create_and_add(vap->ifname, &pfe->dev->kobj);
+ if (!vap->vap_kobj) {
+ printk(KERN_ERR "%s : Failed to create kobject\n", __func__);
+ goto err2;
+ }
+
+ if (sysfs_create_group(vap->vap_kobj, &vap_attr_group)) {
+ printk(KERN_ERR "%s : Failed to create sysfs entries \n", __func__);
+ goto err3;
+ }
+
+
+ /* Register VWD Client driver with HIF */
+ client = &vap->client;
+ memset(client, 0, sizeof(*client));
+ client->id = PFE_CL_VWD0 + vap->vapid;
+ client->tx_qn = VWD_TXQ_CNT;
+ client->rx_qn = VWD_RXQ_CNT;
+ client->priv = vap;
+ client->pfe = pfe;
+ client->event_handler = pfe_vwd_event_handler;
+ client->user_cpu_id = vap->cpu_id;
+
+ /* FIXME : For now hif lib sets all tx and rx queues to same size */
+ client->tx_qsize = EMAC_TXQ_DEPTH;
+ client->rx_qsize = EMAC_RXQ_DEPTH;
+
+ if (hif_lib_client_register(client)) {
+ printk(KERN_ERR"%s: hif_lib_client_register(%d) failed\n", __func__, client->id);
+ goto err4;
+ }
+
+ for (ii = 0; ii < VWD_TXQ_CNT; ii++)
+ spin_lock_init(&vap->tx_lock[ii]);
+
+ if (vwd_tx_ofld) {
+ dev_get_by_index(&init_net, dev->ifindex);
+ wifi_dev->wifi_offload_dev = dev;
+
+ rtnl_lock();
+ wifi_dev->flags |= IFF_WIFI_OFLD;
+ dev->flags |= IFF_UP;
+ netif_tx_wake_all_queues(dev);
+ set_bit(__LINK_STATE_START, &dev->state);
+ dev_activate(dev);
+ rtnl_unlock();
+ }
+
+ vwd->vaps[cmd->vapid] = vap;
+ if (!vwd->vap_count) {
+ printk("%s: Tx recover Timer started...\n", __func__);
+ add_timer(&vwd->tx_timer);
+ }
+
+ vwd->vap_count++;
+
+ return vap;
+
+err4:
+ sysfs_remove_group(vap->vap_kobj, &vap_attr_group);
+
+err3:
+ kobject_put(vap->vap_kobj);
+
+err2:
+ napi_disable(&vap->high_napi);
+ napi_disable(&vap->low_napi);
+ napi_disable(&vap->lro_napi);
+ spin_unlock_bh(&pfe->vwd.vaplock);
+ unregister_netdev(dev);
+ spin_lock_bh(&pfe->vwd.vaplock);
+err1:
+ free_netdev(dev);
+err0:
+ return NULL;
+}
+
+/** pfe_vwd_vap_down
+ *
+ */
+static void pfe_vwd_vap_down(struct vap_desc_s *vap)
+{
+ int i;
+ int vap_unlocked = 0;
+ struct net_device *dev = vap->dev;
+
+ printk("%s:%d\n", __func__, __LINE__);
+ pfe->vwd.vap_count--;
+ pfe->vwd.vaps[vap->vapid] = NULL;
+ netif_stop_queue(vap->dev);
+
+ for (i = 0; i < VWD_TXQ_CNT; i++)
+ hrtimer_cancel(&vap->fast_tx_timeout[i].timer);
+
+ pfe_vwd_flush_tx(vap, 1);
+ hif_lib_client_unregister(&vap->client);
+ napi_disable(&vap->high_napi);
+ napi_disable(&vap->low_napi);
+ napi_disable(&vap->lro_napi);
+ sysfs_remove_group(vap->vap_kobj, &vap_attr_group);
+ kobject_put(vap->vap_kobj);
+
+ /* FIXME Assuming that vwd_tx_ofld is NAS mode */
+ if (vwd_tx_ofld) {
+ struct net_device *wifi_dev = dev_get_by_name(&init_net, vap->ifname);
+
+ if (wifi_dev) {
+ wifi_dev->ethtool_ops = vap->wifi_ethtool_ops;
+ wifi_dev->wifi_offload_dev = NULL;
+ rtnl_lock();
+ wifi_dev->flags &= ~IFF_WIFI_OFLD;
+ wifi_dev->hw_features &= ~vap->diff_hw_features;
+ wifi_dev->features &= ~vap->diff_hw_features;
+ rtnl_unlock();
+ dev_put(wifi_dev);
+ }
+
+ rtnl_lock();
+ vap->dev->flags &= ~(IFF_UP);
+ rtnl_unlock();
+ vap->diff_hw_features = 0;
+ dev_put(dev);
+ }
+
+ if (spin_is_locked(&pfe->vwd.vaplock)) {
+ spin_unlock_bh(&pfe->vwd.vaplock);
+ vap_unlocked = 1;
+ }
+
+ if (!pfe->vwd.vap_count) {
+ printk("%s: Tx recover Timer stopped...\n", __func__);
+ del_timer_sync(&pfe->vwd.tx_timer);
+ }
+
+
+ unregister_netdev(dev);
+
+ if (vap_unlocked)
+ spin_lock_bh(&pfe->vwd.vaplock);
+
+ free_netdev(dev);
+}
+
+/**
+ * pfe_vwd_vap_event_hanler
+ */
+static void pfe_vwd_vap_event_hanler(struct work_struct *work)
+{
+ struct pfe_vwd_priv_s *priv = container_of(work, struct pfe_vwd_priv_s, event);
+ struct net_device *wifi_dev;
+ struct vap_cmd_s vap_cmd;
+ struct vap_desc_s *vap;
+ int ii;
+
+ printk("%s: %s\n", __func__, priv->name);
+
+ spin_lock(&priv->conf_lock);
+ spin_lock_bh(&priv->vaplock);
+ for (ii = 0; ii < MAX_VAP_SUPPORT; ii++) {
+ if (!strlen(priv->conf_vap_names[ii]))
+ continue;
+
+ wifi_dev = dev_get_by_name(&init_net, priv->conf_vap_names[ii]);
+ if (wifi_dev && !priv->vaps[ii] && (wifi_dev->flags & IFF_UP)) {
+
+ vap_cmd.vapid = ii;
+ strcpy(vap_cmd.ifname, priv->conf_vap_names[ii]);
+ vap_cmd.direct_rx_path = 0;
+ if ((vap = pfe_vwd_vap_up(priv, &vap_cmd, wifi_dev))) {
+ printk("%s:ADD: name:%s, vapid:%d, direct_rx_path : %s, ifindex:%d, mac:%x:%x:%x:%x:%x:%x\n",
+ __func__, vap->ifname, vap->vapid,
+ vap->direct_rx_path ? "ON":"OFF", wifi_dev->ifindex,
+ wifi_dev->dev_addr[0], wifi_dev->dev_addr[1],
+ wifi_dev->dev_addr[2], wifi_dev->dev_addr[3],
+ wifi_dev->dev_addr[4], wifi_dev->dev_addr[5] );
+
+ }
+ else {
+ printk(KERN_ERR "%s: Unable to add VAP (%s)\n", __func__, vap_cmd.ifname);
+ }
+ }
+
+ if (wifi_dev)
+ dev_put(wifi_dev);
+ }
+
+
+ for (ii = 0; ii < MAX_VAP_SUPPORT; ii++) {
+ if (!priv->vaps[ii])
+ continue;
+
+ wifi_dev = dev_get_by_name(&init_net, priv->conf_vap_names[ii]);
+
+ if ( (wifi_dev && !(wifi_dev->flags & IFF_UP)) || !wifi_dev)
+ pfe_vwd_vap_down(priv->vaps[ii]);
+
+ if (wifi_dev)
+ dev_put(wifi_dev);
+ }
+ spin_unlock_bh(&priv->vaplock);
+ spin_unlock(&priv->conf_lock);
+
+}
+
+
+/** pfe_vwd_handle_vap
+ *
+ */
+static int pfe_vwd_handle_vap( struct pfe_vwd_priv_s *vwd, struct vap_cmd_s *cmd )
+{
+ struct vap_desc_s *vap;
+ int rc = 0, ii;
+ struct net_device *dev;
+
+
+ printk("%s function called %d: %s\n", __func__, cmd->action, cmd->ifname);
+
+ dev = dev_get_by_name(&init_net, cmd->ifname);
+
+ if ((!dev) && ((cmd->action != REMOVE) && (cmd->action != RESET)))
+ return -EINVAL;
+
+
+ switch( cmd->action )
+ {
+ case ADD:
+ /* Find free VAP */
+
+ if( cmd->vapid >= MAX_VAP_SUPPORT )
+ {
+ rc = -EINVAL;
+ goto done;
+ }
+
+
+ if (vwd->vaps[cmd->vapid])
+ {
+ rc = -EINVAL;
+ break;
+ }
+
+ if ((vap = pfe_vwd_vap_up(vwd, cmd, dev))) {
+ printk("%s:ADD: name:%s, vapid:%d, direct_rx_path : %s, ifindex:%d, mac:%x:%x:%x:%x:%x:%x\n",
+ __func__, vap->ifname, vap->vapid,
+ vap->direct_rx_path ? "ON":"OFF", vap->ifindex,
+ vap->macaddr[0], vap->macaddr[1],
+ vap->macaddr[2], vap->macaddr[3],
+ vap->macaddr[4], vap->macaddr[5] );
+ }
+ else {
+ printk(KERN_ERR "%s: Unable to add VAP (%s)\n", __func__, cmd->ifname);
+ }
+ break;
+
+ case REMOVE:
+ /* Find VAP to be removed*/
+ if (cmd->vapid >= MAX_VAP_SUPPORT)
+ {
+ rc = -EINVAL;
+ goto done;
+ }
+
+ vap = vwd->vaps[cmd->vapid];
+
+ if (!vap)
+ {
+ rc = 0;
+ goto done;
+ }
+
+ printk("%s:REMOVE: name:%s, vapid:%d ifindex:%d mac:%x:%x:%x:%x:%x:%x\n", __func__,
+ vap->ifname, vap->vapid, vap->ifindex,
+ vap->macaddr[0], vap->macaddr[1],
+ vap->macaddr[2], vap->macaddr[3],
+ vap->macaddr[4], vap->macaddr[5] );
+
+ pfe_vwd_vap_down(vap);
+
+
+ break;
+
+ case UPDATE:
+ /* Find VAP to be updated */
+
+ if( cmd->vapid >= MAX_VAP_SUPPORT )
+ {
+ rc = -EINVAL;
+ goto done;
+ }
+
+ vap = vwd->vaps[cmd->vapid];
+
+ if (!vap)
+ {
+ rc = -EINVAL;
+ goto done;
+ }
+
+ printk("%s:UPDATE: old mac:%x:%x:%x:%x:%x:%x\n", __func__,
+ vap->macaddr[0], vap->macaddr[1],
+ vap->macaddr[2], vap->macaddr[3],
+ vap->macaddr[4], vap->macaddr[5] );
+
+ /* Not yet implemented */
+ memcpy(vap->macaddr, cmd->macaddr, 6);
+
+ printk("%s:UPDATE: name:%s, vapid:%d ifindex:%d mac:%x:%x:%x:%x:%x:%x\n", __func__,
+ vap->ifname, vap->vapid, vap->ifindex,
+ vap->macaddr[0], vap->macaddr[1],
+ vap->macaddr[2], vap->macaddr[3],
+ vap->macaddr[4], vap->macaddr[5] );
+ break;
+
+ case RESET:
+ /* Remove all VAPs */
+ printk("%s: Removing fastpath vaps\n", __func__);
+ for (ii = 0; (ii < MAX_VAP_SUPPORT) && vwd->vap_count; ii++) {
+ vap = vwd->vaps[ii];
+ if (vap) {
+ pfe_vwd_vap_down(vap);
+ }
+ }
+ break;
+
+
+ default:
+ rc = -EINVAL;
+
+ }
+done:
+
+ if(dev)
+ dev_put(dev);
+
+
+ return rc;
+
+}
+
+#define SIOCVAPUPDATE ( 0x6401 )
+
+/** pfe_vwd_ioctl
+ *
+ */
+static long
+pfe_vwd_ioctl(struct file * file, unsigned int cmd, unsigned long arg)
+{
+ struct vap_cmd_s vap_cmd;
+ void __user *argp = (void __user *)arg;
+ int rc;
+ struct pfe_vwd_priv_s *priv = (struct pfe_vwd_priv_s *)file->private_data;
+
+ printk("%s: start\n", __func__);
+ switch(cmd) {
+ case SIOCVAPUPDATE:
+ if (copy_from_user(&vap_cmd, argp, sizeof(struct vap_cmd_s)))
+ return -EFAULT;
+
+ spin_lock_bh(&priv->vaplock);
+ rc = pfe_vwd_handle_vap(priv, &vap_cmd);
+ spin_unlock_bh(&priv->vaplock);
+
+ return rc;
+ }
+ printk("%s: end\n", __func__);
+
+ return -EOPNOTSUPP;
+}
+
+
+/** vwd_open
+ *
+ */
+ static int
+pfe_vwd_open(struct inode *inode, struct file *file)
+{
+ int result = 0;
+ unsigned int dev_minor = MINOR(inode->i_rdev);
+
+#if defined (CONFIG_COMCERTO_VWD_MULTI_MAC)
+ printk( "%s : Multi MAC mode enabled\n", __func__);
+#endif
+ printk( "%s : minor device -> %d\n", __func__, dev_minor);
+ if (dev_minor != 0)
+ {
+ printk(KERN_ERR ": trying to access unknown minor device -> %d\n", dev_minor);
+ result = -ENODEV;
+ goto out;
+ }
+
+ file->private_data = &pfe->vwd;
+
+out:
+ return result;
+}
+
+/** vwd_close
+ *
+ */
+ static int
+pfe_vwd_close(struct inode * inode, struct file * file)
+{
+ printk( "%s \n", __func__);
+
+ return 0;
+}
+
+struct file_operations vwd_fops = {
+unlocked_ioctl: pfe_vwd_ioctl,
+ open: pfe_vwd_open,
+ release: pfe_vwd_close,
+};
+
+
+/** pfe_vwd_up
+ *
+ */
+static int pfe_vwd_up(struct pfe_vwd_priv_s *priv )
+{
+ int ii;
+
+ printk("%s: start\n", __func__);
+
+ nf_register_hook(&vwd_hook);
+ nf_register_hook(&vwd_hook_ipv6);
+
+ if (pfe_vwd_sysfs_init(priv))
+ goto err0;
+
+ priv->fast_path_enable = 0;
+ priv->fast_bridging_enable = 0;
+ priv->fast_routing_enable = 1;
+
+ for (ii = 0; ii < MAX_VAP_SUPPORT; ii++)
+ priv->vaps[ii] = NULL;
+
+ comcerto_wifi_rx_fastpath_register(vwd_wifi_if_send_pkt);
+
+ if (vwd_tx_ofld) {
+ priv->event_queue = create_workqueue("vwd_events");
+ INIT_WORK(&priv->event, pfe_vwd_vap_event_hanler);
+
+ register_netdevice_notifier(&vwd_vap_notifier);
+ }
+ return 0;
+
+err0:
+ nf_unregister_hook(&vwd_hook);
+ nf_unregister_hook(&vwd_hook_ipv6);
+
+ return -1;
+}
+
+/** pfe_vwd_down
+ *
+ */
+static int pfe_vwd_down( struct pfe_vwd_priv_s *priv )
+{
+ int ii;
+
+ printk(KERN_INFO "%s: %s\n", priv->name, __func__);
+
+ comcerto_wifi_rx_fastpath_unregister();
+
+ if( priv->fast_bridging_enable )
+ {
+ nf_unregister_hook(&vwd_hook_bridge);
+ }
+
+ if( priv->fast_routing_enable )
+ {
+ nf_unregister_hook(&vwd_hook);
+ nf_unregister_hook(&vwd_hook_ipv6);
+ }
+
+ /*Stop Tx recovery timer and cleanup all vaps*/
+ if (priv->vap_count) {
+ printk("%s: Tx recover Timer stopped...\n", __func__);
+ del_timer_sync(&priv->tx_timer);
+ }
+
+ for (ii = 0; ii < MAX_VAP_SUPPORT; ii++)
+ {
+ if (priv->vaps[ii]) {
+ pfe_vwd_vap_down(priv->vaps[ii]);
+ }
+ }
+
+ if (vwd_tx_ofld) {
+ unregister_netdevice_notifier(&vwd_vap_notifier);
+ flush_workqueue(priv->event_queue);
+ destroy_workqueue(priv->event_queue);
+ }
+
+
+ priv->vap_count = 0;
+ pfe_vwd_sysfs_exit();
+
+ return 0;
+}
+
+/** pfe_vwd_driver_init
+ *
+ * PFE wifi offload:
+ * - uses HIF functions to receive/send packets
+ */
+
+static int pfe_vwd_driver_init( struct pfe_vwd_priv_s *priv )
+{
+ printk("%s: start\n", __func__);
+
+ strcpy(priv->name, "vwd");
+
+ spin_lock_init(&priv->vaplock);
+ spin_lock_init(&priv->conf_lock);
+ priv->pfe = pfe;
+
+ pfe_vwd_up(priv);
+ printk("%s: end\n", __func__);
+ return 0;
+}
+
+/** vwd_driver_remove
+ *
+ */
+static int pfe_vwd_driver_remove(void)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+
+ pfe_vwd_down(priv);
+
+ return 0;
+}
+
+/** pfe_vwd_init
+ *
+ */
+int pfe_vwd_init(struct pfe *pfe)
+{
+ struct pfe_vwd_priv_s *priv ;
+ int rc = 0;
+
+ printk(KERN_INFO "%s\n", __func__);
+ priv = &pfe->vwd;
+ memset(priv, 0, sizeof(*priv));
+
+
+ rc = alloc_chrdev_region(&priv->char_devno,VWD_MINOR, VWD_MINOR_COUNT, VWD_DRV_NAME);
+ if (rc < 0) {
+ printk(KERN_ERR "%s: alloc_chrdev_region() failed\n", __func__);
+ goto err0;
+ }
+
+ cdev_init(&priv->char_dev, &vwd_fops);
+ priv->char_dev.owner = THIS_MODULE;
+
+ rc = cdev_add (&priv->char_dev, priv->char_devno, VWD_DEV_COUNT);
+ if (rc < 0) {
+ printk(KERN_ERR "%s: cdev_add() failed\n", __func__);
+ goto err1;
+ }
+
+ printk(KERN_INFO "%s: created vwd device(%d, %d)\n", __func__, MAJOR(priv->char_devno),
+ MINOR(priv->char_devno));
+
+ priv->pfe = pfe;
+
+ priv->tx_timer.data = (unsigned long)priv;
+ priv->tx_timer.function = pfe_vwd_tx_timeout;
+ priv->tx_timer.expires = jiffies + ( COMCERTO_TX_RECOVERY_TIMEOUT_MS * HZ )/1000;
+ init_timer(&priv->tx_timer);
+
+ if( pfe_vwd_driver_init( priv ) )
+ goto err1;
+
+ return 0;
+
+err1:
+
+ unregister_chrdev_region(priv->char_devno, VWD_MINOR_COUNT);
+
+err0:
+ return rc;
+}
+
+/** pfe_vwd_exit
+ *
+ */
+void pfe_vwd_exit(struct pfe *pfe)
+{
+ struct pfe_vwd_priv_s *priv = &pfe->vwd;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ pfe_vwd_driver_remove();
+ cdev_del(&priv->char_dev);
+ unregister_chrdev_region(priv->char_devno, VWD_MINOR_COUNT);
+}
+
+#else /* !CFG_WIFI_OFFLOAD */
+
+/** pfe_vwd_init
+ *
+ */
+int pfe_vwd_init(struct pfe *pfe)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ return 0;
+}
+
+/** pfe_vwd_exit
+ *
+ */
+void pfe_vwd_exit(struct pfe *pfe)
+{
+ printk(KERN_INFO "%s\n", __func__);
+}
+
+#endif /* !CFG_WIFI_OFFLOAD */
+
diff --git a/pfe_ctrl/pfe_vwd.h b/pfe_ctrl/pfe_vwd.h
new file mode 100644
index 0000000..7d7aed4
--- /dev/null
+++ b/pfe_ctrl/pfe_vwd.h
@@ -0,0 +1,128 @@
+#ifndef _PFE_VWD_H_
+#define _PFE_VWD_H_
+
+#include <linux/cdev.h>
+#include <linux/interrupt.h>
+#include "pfe_tso.h"
+
+
+#define PFE_VWD_TX_STATS
+#define PFE_VWD_LRO_STATS
+#define PFE_VWD_NAPI_STATS
+#define VWD_DEBUG_STATS
+#define VWD_TXQ_CNT 16
+#define VWD_RXQ_CNT 3
+
+#define PFE_WIFI_PKT_HEADROOM 96 /*PFE inserts this headroom for WiFi tx packets only in lro mode */
+
+#define VWD_MINOR 0
+#define VWD_MINOR_COUNT 1
+#define VWD_DRV_NAME "vwd"
+#define VWD_DEV_COUNT 1
+#define VWD_RX_POLL_WEIGHT HIF_RX_POLL_WEIGHT - 16
+
+struct vap_desc_s {
+ struct kobject *vap_kobj;
+ struct net_device *dev;
+ struct net_device *wifi_dev;
+ const struct ethtool_ops *wifi_ethtool_ops;
+ struct pfe_vwd_priv_s *priv;
+ unsigned int ifindex;
+ unsigned int diff_hw_features;
+ struct hif_client_s client;
+ struct net_device dummy_dev;
+ struct napi_struct low_napi;
+ struct napi_struct high_napi;
+ struct napi_struct lro_napi;
+ spinlock_t tx_lock[VWD_TXQ_CNT];
+ struct sk_buff *skb_inflight[VWD_RXQ_CNT + 6];
+ struct pfe_eth_fast_timer fast_tx_timeout[VWD_TXQ_CNT];
+ struct net_device_stats stats;
+ int cpu_id;
+ unsigned char ifname[12];
+ unsigned char macaddr[6];
+ unsigned short vapid;
+ unsigned short programmed;
+ unsigned short bridged;
+ unsigned short direct_rx_path; /* Direct path support from offload device=>VWD */
+ unsigned short direct_tx_path; /* Direct path support from offload VWD=>device */
+ unsigned short custom_skb; /* Customized skb model from VWD=>offload_device */
+#ifdef PFE_VWD_TX_STATS
+ unsigned int stop_queue_total[VWD_TXQ_CNT];
+ unsigned int stop_queue_hif[VWD_TXQ_CNT];
+ unsigned int stop_queue_hif_client[VWD_TXQ_CNT];
+ unsigned int clean_fail[VWD_TXQ_CNT];
+ unsigned int was_stopped[VWD_TXQ_CNT];
+#endif
+};
+
+
+
+struct vap_cmd_s {
+ int action;
+#define ADD 0
+#define REMOVE 1
+#define UPDATE 2
+#define RESET 3
+ int ifindex;
+ unsigned short vapid;
+ unsigned short direct_rx_path;
+ unsigned char ifname[12];
+ unsigned char macaddr[6];
+};
+
+
+
+struct pfe_vwd_priv_s {
+
+ struct pfe *pfe;
+ unsigned char name[12];
+
+ struct cdev char_dev;
+ int char_devno;
+
+ struct vap_desc_s *vaps[MAX_VAP_SUPPORT];
+ char conf_vap_names[MAX_VAP_SUPPORT][IFNAMSIZ];
+ int vap_count;
+ int vap_programmed_count;
+ int vap_bridged_count;
+ spinlock_t vaplock;
+ spinlock_t conf_lock;
+ struct timer_list tx_timer;
+ struct tso_cb_s tso;
+ struct workqueue_struct *event_queue;
+ struct work_struct event;
+
+#ifdef PFE_VWD_LRO_STATS
+ unsigned int lro_len_counters[LRO_LEN_COUNT_MAX];
+ unsigned int lro_nb_counters[LRO_NB_COUNT_MAX]; //TODO change to exact max number when RX scatter done
+#endif
+#ifdef PFE_VWD_NAPI_STATS
+ unsigned int napi_counters[NAPI_MAX_COUNT];
+#endif
+ int fast_path_enable;
+ int fast_bridging_enable;
+ int fast_routing_enable;
+ int tso_hook_enable;
+
+#ifdef VWD_DEBUG_STATS
+ u32 pkts_local_tx_sgs;
+ u32 pkts_total_local_tx;
+ u32 pkts_local_tx_csum;
+ u32 pkts_transmitted;
+ u32 pkts_slow_forwarded[VWD_RXQ_CNT];
+ u32 pkts_tx_dropped;
+ u32 pkts_rx_fast_forwarded[VWD_RXQ_CNT];
+ u32 rx_skb_alloc_fail;
+ u32 rx_csum_correct;
+#endif
+
+ u32 msg_enable;
+
+};
+
+
+int pfe_vwd_init(struct pfe *pfe);
+void pfe_vwd_exit(struct pfe *pfe);
+
+#endif /* _PFE_VWD_H_ */
diff --git a/pfe_ctrl/platform.h b/pfe_ctrl/platform.h
new file mode 100644
index 0000000..4f0ded4
--- /dev/null
+++ b/pfe_ctrl/platform.h
@@ -0,0 +1,6 @@
+#ifndef _PLATFORM_H_
+#define _PLATFORM_H_
+
+#define virt_to_phys(virt) ((unsigned long)virt)
+
+#endif /* _PLATFORM_H_ */
diff --git a/pfe_ctrl/version.h b/pfe_ctrl/version.h
new file mode 100644
index 0000000..102974d
--- /dev/null
+++ b/pfe_ctrl/version.h
@@ -0,0 +1,7 @@
+/*Auto-generated file. Do not edit !*/
+#ifndef VERSION_H
+#define VERSION_H
+
+#define PFE_CTRL_VERSION "pfe_ctrl_10_00_6"
+
+#endif /* VERSION_H */