blob: 4d54029e08759858ed20bb36f64ab28149aab6f3 [file] [log] [blame]
/*******************************************************************************
Copyright (C) Marvell International Ltd. and its affiliates
This software file (the "File") is owned and distributed by Marvell
International Ltd. and/or its affiliates ("Marvell") under the following
alternative licensing terms. Once you have made an election to distribute the
File under one of the following license alternatives, please (i) delete this
introductory statement regarding license alternatives, (ii) delete the two
license alternatives that you have not elected to use and (iii) preserve the
Marvell copyright notice above.
********************************************************************************
Marvell GPL License Option
If you received this File from Marvell, you may opt to use, redistribute and/or
modify this File in accordance with the terms and conditions of the General
Public License Version 2, June 1991 (the "GPL License"), a copy of which is
available along with the File in the license.txt file or by writing to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
DISCLAIMED. The GPL License provides additional details about this warranty
disclaimer.
*******************************************************************************/
#include "mvCommon.h"
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/mv_neta.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include "mvOs.h"
#include "mvDebug.h"
#include "dbg-trace.h"
#include "mvSysHwConfig.h"
#include "boardEnv/mvBoardEnvLib.h"
#include "ctrlEnv/mvCtrlEnvLib.h"
#include "eth-phy/mvEthPhy.h"
#include "mvSysEthPhyApi.h"
#include "mvSysNetaApi.h"
#include "gbe/mvNeta.h"
#include "bm/mvBm.h"
#include "pnc/mvPnc.h"
#include "pnc/mvTcam.h"
#include "pmt/mvPmt.h"
#include "mv_switch.h"
#include "mv_netdev.h"
#include "mv_eth_tool.h"
#include "cpu/mvCpuCntrs.h"
#ifdef CONFIG_MV_CPU_PERF_CNTRS
MV_CPU_CNTRS_EVENT *event0 = NULL;
MV_CPU_CNTRS_EVENT *event1 = NULL;
MV_CPU_CNTRS_EVENT *event2 = NULL;
MV_CPU_CNTRS_EVENT *event3 = NULL;
MV_CPU_CNTRS_EVENT *event4 = NULL;
MV_CPU_CNTRS_EVENT *event5 = NULL;
#endif /* CONFIG_MV_CPU_PERF_CNTRS */
unsigned int ext_switch_port_mask = 0;
void handle_group_affinity(int port);
void set_rxq_affinity(struct eth_port *pp, MV_U32 rxqAffinity, int group);
static inline int mv_eth_tx_policy(struct eth_port *pp, struct sk_buff *skb);
/* uncomment if you want to debug the SKB recycle feature */
/* #define ETH_SKB_DEBUG */
#ifdef CONFIG_MV_ETH_PNC
unsigned int mv_eth_pnc_ctrl_en = 1;
#else
unsigned int mv_eth_pnc_ctrl_en = 0;
#endif /* CONFIG_MV_ETH_PNC */
int mv_eth_ctrl_pnc(int en)
{
mv_eth_pnc_ctrl_en = en;
return 0;
}
int mv_eth_ctrl_pnc_get(void)
{
return mv_eth_pnc_ctrl_en;
}
#ifdef CONFIG_NET_SKB_RECYCLE
int mv_ctrl_recycle = CONFIG_NET_SKB_RECYCLE_DEF;
EXPORT_SYMBOL(mv_ctrl_recycle);
int mv_eth_ctrl_recycle(int en)
{
mv_ctrl_recycle = en;
return 0;
}
#else
int mv_eth_ctrl_recycle(int en)
{
printk(KERN_ERR "SKB recycle is not supported\n");
return 1;
}
#endif /* CONFIG_NET_SKB_RECYCLE */
extern u8 mvMacAddr[CONFIG_MV_ETH_PORTS_NUM][MV_MAC_ADDR_SIZE];
extern u16 mvMtu[CONFIG_MV_ETH_PORTS_NUM];
extern unsigned int switch_enabled_ports;
struct bm_pool mv_eth_pool[MV_ETH_BM_POOLS];
struct eth_port **mv_eth_ports;
struct net_device **mv_net_devs;
int mv_net_devs_num = 0;
int mv_ctrl_txdone = CONFIG_MV_ETH_TXDONE_COAL_PKTS;
EXPORT_SYMBOL(mv_ctrl_txdone);
/*
* Static declarations
*/
static int mv_eth_ports_num = 0;
static int mv_net_devs_max = 0;
static int mv_eth_initialized = 0;
/*
* Local functions
*/
static void mv_eth_txq_delete(struct eth_port *pp, struct tx_queue *txq_ctrl);
static void mv_eth_tx_timeout(struct net_device *dev);
static int mv_eth_tx(struct sk_buff *skb, struct net_device *dev);
static void mv_eth_tx_frag_process(struct eth_port *pp, struct sk_buff *skb, struct tx_queue *txq_ctrl, u16 flags);
static void mv_eth_config_show(void);
static int mv_eth_priv_init(struct eth_port *pp, int port);
static void mv_eth_priv_cleanup(struct eth_port *pp);
static int mv_eth_config_get(struct eth_port *pp, u8 *mac);
static int mv_eth_hal_init(struct eth_port *pp);
struct net_device *mv_eth_netdev_init(struct eth_port *pp, int mtu, u8 *mac);
static void mv_eth_netdev_set_features(struct net_device *dev);
static void mv_eth_netdev_update_features(struct net_device *dev);
static MV_STATUS mv_eth_pool_create(int pool, int capacity);
static int mv_eth_pool_add(int pool, int buf_num);
static int mv_eth_pool_free(int pool, int num);
static int mv_eth_pool_destroy(int pool);
#ifdef CONFIG_MV_ETH_TSO
int mv_eth_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mv_eth_tx_spec *tx_spec,
struct tx_queue *txq_ctrl);
#endif
/* Get the configuration string from the Kernel Command Line */
static char *port0_config_str = NULL, *port1_config_str = NULL, *port2_config_str = NULL, *port3_config_str = NULL;
int mv_eth_cmdline_port0_config(char *s);
__setup("mv_port0_config=", mv_eth_cmdline_port0_config);
int mv_eth_cmdline_port1_config(char *s);
__setup("mv_port1_config=", mv_eth_cmdline_port1_config);
int mv_eth_cmdline_port2_config(char *s);
__setup("mv_port2_config=", mv_eth_cmdline_port2_config);
int mv_eth_cmdline_port3_config(char *s);
__setup("mv_port3_config=", mv_eth_cmdline_port3_config);
#if defined(CONFIG_MV_ETH_NFP) || defined(CONFIG_MV_ETH_NFP_MODULE)
struct nfpHookMgr *nfpHookMgr = NULL;
int nfp_hook_mgr_register(mv_eth_nfp_func_t *func)
{
nfpHookMgr = kmalloc(sizeof(struct nfpHookMgr *), GFP_ATOMIC);
if (nfpHookMgr == NULL) {
printk(KERN_ERR "nfp_mgr_init: Error allocating memory for nfp Hook Mgr \n");
return -ENOMEM;
}
nfpHookMgr->mv_eth_nfp = func;
return 0;
}
EXPORT_SYMBOL(nfp_hook_mgr_register);
void nfp_hook_mgr_unregister(void)
{
kfree(nfpHookMgr);
nfpHookMgr = NULL;
}
EXPORT_SYMBOL(nfp_hook_mgr_unregister);
#endif /* CONFIG_MV_ETH_NFP || CONFIG_MV_ETH_NFP_MODULE */
int mv_eth_cmdline_port0_config(char *s)
{
port0_config_str = s;
return 1;
}
int mv_eth_cmdline_port1_config(char *s)
{
port1_config_str = s;
return 1;
}
int mv_eth_cmdline_port2_config(char *s)
{
port2_config_str = s;
return 1;
}
int mv_eth_cmdline_port3_config(char *s)
{
port3_config_str = s;
return 1;
}
void set_cpu_affinity(struct eth_port *pp, MV_U32 cpuAffinity, int group)
{
int cpu;
MV_U32 rxqAffinity = 0;
/* nothing to do when cpuAffinity == 0 */
if (cpuAffinity == 0)
return;
/* First, read affinity of the target group, in case it contains CPUs */
for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
if (!(MV_BIT_CHECK(pp->cpuMask, cpu)))
continue;
if (pp->napiCpuGroup[cpu] == group) {
rxqAffinity = MV_REG_READ(NETA_CPU_MAP_REG(pp->port, cpu)) & 0xff;
break;
}
}
for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
if (cpuAffinity & 1) {
pp->napi[cpu] = pp->napiGroup[group];
pp->napiCpuGroup[cpu] = group;
/* set rxq affinity of the target group */
MV_REG_WRITE(NETA_CPU_MAP_REG(pp->port, cpu), rxqAffinity | NETA_CPU_TXQ_ACCESS_ALL_MASK);
}
cpuAffinity >>= 1;
}
}
int group_has_cpus(struct eth_port *pp, int group)
{
int cpu;
for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
if (!(MV_BIT_CHECK(pp->cpuMask, cpu)))
continue;
if (pp->napiCpuGroup[cpu] == group)
return 1;
}
/* the group contains no CPU */
return 0;
}
void set_rxq_affinity(struct eth_port *pp, MV_U32 rxqAffinity, int group)
{
int rxq, cpu;
MV_U32 regVal;
MV_U32 tmpRxqAffinity;
int groupHasCpus;
int cpuInGroup;
/* nothing to do when rxqAffinity == 0 */
if (rxqAffinity == 0)
return;
groupHasCpus = group_has_cpus(pp, group);
if (!groupHasCpus) {
printk(KERN_ERR "%s: operation not performed; group %d has no cpu \n", __func__, group);
return;
}
for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
if (!(MV_BIT_CHECK(pp->cpuMask, cpu)))
continue;
tmpRxqAffinity = rxqAffinity;
regVal = MV_REG_READ(NETA_CPU_MAP_REG(pp->port, cpu));
if (pp->napiCpuGroup[cpu] == group) {
cpuInGroup = 1;
/* init TXQ Access Enable bits */
regVal = regVal & 0xff00;
} else {
cpuInGroup = 0;
}
for (rxq = 0; rxq < CONFIG_MV_ETH_RXQ; rxq++) {
/* set rxq affinity for this cpu */
if (tmpRxqAffinity & 1) {
if (cpuInGroup)
regVal |= NETA_CPU_RXQ_ACCESS_MASK(rxq);
else
regVal &= ~NETA_CPU_RXQ_ACCESS_MASK(rxq);
}
tmpRxqAffinity >>= 1;
}
MV_REG_WRITE(NETA_CPU_MAP_REG(pp->port, cpu), regVal);
}
}
static int mv_eth_port_config_parse(struct eth_port *pp)
{
char *str;
printk(KERN_ERR "\n");
if (pp == NULL) {
printk(KERN_ERR " o mv_eth_port_config_parse: got NULL pp\n");
return -1;
}
switch (pp->port) {
case 0:
str = port0_config_str;
break;
case 1:
str = port1_config_str;
break;
case 2:
str = port2_config_str;
break;
case 3:
str = port3_config_str;
break;
default:
printk(KERN_ERR " o mv_eth_port_config_parse: got unknown port %d\n", pp->port);
return -1;
}
if (str != NULL) {
if ((!strcmp(str, "disconnected")) || (!strcmp(str, "Disconnected"))) {
printk(KERN_ERR " o Port %d is disconnected from Linux netdevice\n", pp->port);
clear_bit(MV_ETH_F_CONNECT_LINUX_BIT, &(pp->flags));
return 0;
}
}
printk(KERN_ERR " o Port %d is connected to Linux netdevice\n", pp->port);
set_bit(MV_ETH_F_CONNECT_LINUX_BIT, &(pp->flags));
return 0;
}
#ifdef ETH_SKB_DEBUG
struct sk_buff *mv_eth_skb_debug[MV_BM_POOL_CAP_MAX * MV_ETH_BM_POOLS];
static spinlock_t skb_debug_lock;
void mv_eth_skb_check(struct sk_buff *skb)
{
int i;
struct sk_buff *temp;
unsigned long flags;
if (skb == NULL)
printk(KERN_ERR "mv_eth_skb_check: got NULL SKB\n");
spin_lock_irqsave(&skb_debug_lock, flags);
i = *((u32 *)&skb->cb[0]);
if ((i >= 0) && (i < MV_BM_POOL_CAP_MAX * MV_ETH_BM_POOLS)) {
temp = mv_eth_skb_debug[i];
if (mv_eth_skb_debug[i] != skb) {
printk(KERN_ERR "mv_eth_skb_check: Unexpected skb: %p (%d) != %p (%d)\n",
skb, i, temp, *((u32 *)&temp->cb[0]));
}
mv_eth_skb_debug[i] = NULL;
} else {
printk(KERN_ERR "mv_eth_skb_check: skb->cb=%d is out of range\n", i);
}
spin_unlock_irqrestore(&skb_debug_lock, flags);
}
void mv_eth_skb_save(struct sk_buff *skb, const char *s)
{
int i;
int saved = 0;
unsigned long flags;
spin_lock_irqsave(&skb_debug_lock, flags);
for (i = 0; i < MV_BM_POOL_CAP_MAX * MV_ETH_BM_POOLS; i++) {
if (mv_eth_skb_debug[i] == skb) {
printk(KERN_ERR "%s: mv_eth_skb_debug Duplicate: i=%d, skb=%p\n", s, i, skb);
mv_eth_skb_print(skb);
}
if ((!saved) && (mv_eth_skb_debug[i] == NULL)) {
mv_eth_skb_debug[i] = skb;
*((u32 *)&skb->cb[0]) = i;
saved = 1;
}
}
spin_unlock_irqrestore(&skb_debug_lock, flags);
if ((i == MV_BM_POOL_CAP_MAX * MV_ETH_BM_POOLS) && (!saved))
printk(KERN_ERR "mv_eth_skb_debug is FULL, skb=%p\n", skb);
}
#endif /* ETH_SKB_DEBUG */
struct eth_port *mv_eth_port_by_id(unsigned int port)
{
if (port < mv_eth_ports_num)
return mv_eth_ports[port];
return NULL;
}
struct net_device *mv_eth_netdev_by_id(unsigned int idx)
{
if (idx < mv_net_devs_num)
return mv_net_devs[idx];
return NULL;
}
static inline int mv_eth_skb_mh_add(struct sk_buff *skb, u16 mh)
{
/* sanity: Check that there is place for MH in the buffer */
if (skb_headroom(skb) < MV_ETH_MH_SIZE) {
printk(KERN_ERR "%s: skb (%p) doesn't have place for MH, head=%p, data=%p\n",
__func__, skb, skb->head, skb->data);
return 1;
}
/* Prepare place for MH header */
skb->len += MV_ETH_MH_SIZE;
skb->data -= MV_ETH_MH_SIZE;
*((u16 *) skb->data) = mh;
return 0;
}
void mv_eth_ctrl_txdone(int num)
{
mv_ctrl_txdone = num;
}
int mv_eth_ctrl_flag(int port, u32 flag, u32 val)
{
struct eth_port *pp = mv_eth_port_by_id(port);
u32 bit_flag = (fls(flag) - 1);
if (!pp)
return -ENODEV;
if ((flag == MV_ETH_F_MH) && (pp->flags & MV_ETH_F_SWITCH)) {
printk(KERN_ERR "Error: cannot change Marvell Header on a port used by the Gateway driver\n");
return -EPERM;
}
if (val)
set_bit(bit_flag, &(pp->flags));
else
clear_bit(bit_flag, &(pp->flags));
if (flag == MV_ETH_F_MH)
mvNetaMhSet(pp->port, val ? MV_NETA_MH : MV_NETA_MH_NONE);
return 0;
}
int mv_eth_ctrl_port_buf_num_set(int port, int long_num, int short_num)
{
struct eth_port *pp = mv_eth_port_by_id(port);
if (pp == NULL) {
printk(KERN_INFO "port doens not exist (%d) in %s\n" , port, __func__);
return -EINVAL;
}
if (pp->flags & MV_ETH_F_STARTED) {
printk(KERN_ERR "Port %d must be stopped before\n", port);
return -EINVAL;
}
if (pp->pool_long != NULL) {
/* Update number of buffers in existing pool (allocate or free) */
if (pp->pool_long_num > long_num)
mv_eth_pool_free(pp->pool_long->pool, pp->pool_long_num - long_num);
else if (long_num > pp->pool_long_num)
mv_eth_pool_add(pp->pool_long->pool, long_num - pp->pool_long_num);
}
pp->pool_long_num = long_num;
#ifdef CONFIG_MV_ETH_BM_CPU
if (pp->pool_short != NULL) {
/* Update number of buffers in existing pool (allocate or free) */
if (pp->pool_short_num > short_num)
mv_eth_pool_free(pp->pool_short->pool, pp->pool_short_num - short_num);
else if (short_num > pp->pool_short_num)
mv_eth_pool_add(pp->pool_short->pool, short_num - pp->pool_short_num);
}
pp->pool_short_num = short_num;
#endif /* CONFIG_MV_ETH_BM_CPU */
return 0;
}
#ifdef CONFIG_MV_ETH_BM
/* Set pkt_size for the pool. Check that pool not in use (all ports are stopped) */
/* Free all buffers from the pool */
/* Detach the pool from all ports */
int mv_eth_ctrl_pool_size_set(int pool, int pkt_size)
{
#ifdef CONFIG_MV_ETH_BM_CPU
int port;
struct bm_pool *ppool;
struct eth_port *pp;
if (mvNetaMaxCheck(pool, MV_ETH_BM_POOLS))
return -EINVAL;
ppool = &mv_eth_pool[pool];
for (port = 0; port < mv_eth_ports_num; port++) {
/* Check that all ports using this pool are stopped */
if (ppool->port_map & (1 << port)) {
pp = mv_eth_port_by_id(port);
if (pp->flags & MV_ETH_F_STARTED) {
printk(KERN_ERR "Port %d use pool #%d and must be stopped before change pkt_size\n",
port, pool);
return -EINVAL;
}
}
}
for (port = 0; port < mv_eth_ports_num; port++) {
/* Free all buffers and detach pool */
if (ppool->port_map & (1 << port)) {
pp = mv_eth_port_by_id(port);
if (ppool == pp->pool_long) {
mv_eth_pool_free(pool, pp->pool_long_num);
ppool->port_map &= ~(1 << pp->port);
pp->pool_long = NULL;
}
if (ppool == pp->pool_short) {
mv_eth_pool_free(pool, pp->pool_short_num);
ppool->port_map &= ~(1 << pp->port);
pp->pool_short = NULL;
}
}
}
ppool->pkt_size = pkt_size;
#endif /* CONFIG_MV_ETH_BM_CPU */
mv_eth_bm_config_pkt_size_set(pool, pkt_size);
if (pkt_size == 0)
mvBmPoolBufSizeSet(pool, 0);
else
mvBmPoolBufSizeSet(pool, RX_BUF_SIZE(pkt_size));
return 0;
}
#endif /* CONFIG_MV_ETH_BM */
int mv_eth_ctrl_set_poll_rx_weight(int port, u32 weight)
{
struct eth_port *pp = mv_eth_port_by_id(port);
int cpu;
if (pp == NULL) {
printk(KERN_INFO "port doens not exist (%d) in %s\n" , port, __func__);
return -EINVAL;
}
if (pp->flags & MV_ETH_F_STARTED) {
printk(KERN_ERR "Port %d must be stopped before\n", port);
return -EINVAL;
}
if (weight > 255)
weight = 255;
pp->weight = weight;
for_each_possible_cpu(cpu) {
if (pp->napi[cpu])
pp->napi[cpu]->weight = pp->weight;
}
return 0;
}
int mv_eth_ctrl_rxq_size_set(int port, int rxq, int value)
{
struct eth_port *pp = mv_eth_port_by_id(port);
struct rx_queue *rxq_ctrl;
if (pp == NULL) {
printk(KERN_INFO "port doens not exist (%d) in %s\n" , port, __func__);
return -EINVAL;
}
if (pp->flags & MV_ETH_F_STARTED) {
printk(KERN_ERR "Port %d must be stopped before\n", port);
return -EINVAL;
}
rxq_ctrl = &pp->rxq_ctrl[rxq];
if ((rxq_ctrl->q) && (rxq_ctrl->rxq_size != value)) {
/* Reset is required when RXQ ring size is changed */
mv_eth_rx_reset(pp->port);
mvNetaRxqDelete(pp->port, rxq);
rxq_ctrl->q = NULL;
}
pp->rxq_ctrl[rxq].rxq_size = value;
/* New RXQ will be created during mv_eth_start_internals */
return 0;
}
int mv_eth_ctrl_txq_size_set(int port, int txp, int txq, int value)
{
struct tx_queue *txq_ctrl;
struct eth_port *pp = mv_eth_port_by_id(port);
if (pp == NULL) {
printk(KERN_INFO "port doens not exist (%d) in %s\n" , port, __func__);
return -EINVAL;
}
if (pp->flags & MV_ETH_F_STARTED) {
printk(KERN_ERR "Port %d must be stopped before\n", port);
return -EINVAL;
}
txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
if ((txq_ctrl->q) && (txq_ctrl->txq_size != value)) {
/* Reset of port/txp is required to change TXQ ring size */
if ((mvNetaTxqNextIndexGet(pp->port, txq_ctrl->txp, txq_ctrl->txq) != 0) ||
(mvNetaTxqPendDescNumGet(pp->port, txq_ctrl->txp, txq_ctrl->txq) != 0) ||
(mvNetaTxqSentDescNumGet(pp->port, txq_ctrl->txp, txq_ctrl->txq) != 0)) {
printk(KERN_ERR "%s: port=%d, txp=%d, txq=%d must be in its initial state\n",
__func__, port, txq_ctrl->txp, txq_ctrl->txq);
return -EINVAL;
}
mv_eth_txq_delete(pp, txq_ctrl);
}
txq_ctrl->txq_size = value;
/* New TXQ will be created during mv_eth_start_internals */
return 0;
}
int mv_eth_ctrl_txq_mode_get(int port, int txp, int txq, int *value)
{
int mode = MV_ETH_TXQ_FREE, val = 0;
struct tx_queue *txq_ctrl;
struct eth_port *pp = mv_eth_port_by_id(port);
if (pp == NULL)
return -ENODEV;
txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
if (txq_ctrl->cpu_owner) {
mode = MV_ETH_TXQ_CPU;
val = txq_ctrl->cpu_owner;
} else if (txq_ctrl->hwf_rxp < (MV_U8) mv_eth_ports_num) {
mode = MV_ETH_TXQ_HWF;
val = txq_ctrl->hwf_rxp;
}
if (value)
*value = val;
return mode;
}
/* Increment/Decrement CPU ownership for this TXQ */
int mv_eth_ctrl_txq_cpu_own(int port, int txp, int txq, int add)
{
int mode;
struct tx_queue *txq_ctrl;
struct eth_port *pp = mv_eth_port_by_id(port);
if ((pp == NULL) || (pp->txq_ctrl == NULL))
return -ENODEV;
/* Check that new txp/txq can be allocated for CPU */
mode = mv_eth_ctrl_txq_mode_get(port, txp, txq, NULL);
txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
if (add) {
if ((mode != MV_ETH_TXQ_CPU) && (mode != MV_ETH_TXQ_FREE))
return -EINVAL;
txq_ctrl->cpu_owner++;
} else {
if (mode != MV_ETH_TXQ_CPU)
return -EINVAL;
txq_ctrl->cpu_owner--;
}
return 0;
}
/* Set TXQ ownership to HWF from the RX port. rxp=-1 - free TXQ ownership */
int mv_eth_ctrl_txq_hwf_own(int port, int txp, int txq, int rxp)
{
int mode;
struct tx_queue *txq_ctrl;
struct eth_port *pp = mv_eth_port_by_id(port);
if ((pp == NULL) || (pp->txq_ctrl == NULL))
return -ENODEV;
/* Check that new txp/txq can be allocated for HWF */
mode = mv_eth_ctrl_txq_mode_get(port, txp, txq, NULL);
txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
if (rxp == -1) {
if (mode != MV_ETH_TXQ_HWF)
return -EINVAL;
} else {
if ((mode != MV_ETH_TXQ_HWF) && (mode != MV_ETH_TXQ_FREE))
return -EINVAL;
}
txq_ctrl->hwf_rxp = (MV_U8) rxp;
return 0;
}
/* Set TXQ for CPU originated packets */
int mv_eth_ctrl_txq_cpu_def(int port, int txp, int txq, int cpu)
{
struct eth_port *pp = mv_eth_port_by_id(port);
if (cpu >= CONFIG_NR_CPUS) {
printk(KERN_ERR "cpu #%d is out of range: from 0 to %d\n",
cpu, CONFIG_NR_CPUS - 1);
return -EINVAL;
}
if (mvNetaTxpCheck(port, txp))
return -EINVAL;
if ((pp == NULL) || (pp->txq_ctrl == NULL))
return -ENODEV;
/* Decrement CPU ownership for old txq */
mv_eth_ctrl_txq_cpu_own(port, pp->txp, pp->txq[cpu], 0);
if (txq != -1) {
if (mvNetaMaxCheck(txq, CONFIG_MV_ETH_TXQ))
return -EINVAL;
/* Increment CPU ownership for new txq */
if (mv_eth_ctrl_txq_cpu_own(port, txp, txq, 1))
return -EINVAL;
}
pp->txp = txp;
pp->txq[cpu] = txq;
return 0;
}
/* Get deafult TXQ for CPU originated packets */
int mv_eth_get_txq_cpu_def(int port, int *txp, int *txq, int cpu)
{
struct eth_port *pp = mv_eth_port_by_id(port);
if (!pp) {
return -ENODEV;
}
*txp = pp->txp;
*txq = pp->txq[cpu];
return 0;
}
int mv_eth_ctrl_tx_cmd(int port, u32 tx_cmd)
{
struct eth_port *pp = mv_eth_port_by_id(port);
if (!pp)
return -ENODEV;
pp->hw_cmd = tx_cmd;
return 0;
}
int mv_eth_ctrl_tx_mh(int port, u16 mh)
{
struct eth_port *pp = mv_eth_port_by_id(port);
if (!pp)
return -ENODEV;
pp->tx_mh = mh;
return 0;
}
#ifdef CONFIG_MV_ETH_TX_SPECIAL
/* Register special transmit check function */
void mv_eth_tx_special_check_func(int port,
int (*func)(int port, struct net_device *dev, struct sk_buff *skb,
struct mv_eth_tx_spec *tx_spec_out))
{
struct eth_port *pp = mv_eth_port_by_id(port);
if (pp)
pp->tx_special_check = func;
}
#endif /* CONFIG_MV_ETH_TX_SPECIAL */
#ifdef CONFIG_MV_ETH_RX_SPECIAL
/* Register special transmit check function */
void mv_eth_rx_special_proc_func(int port, void (*func)(int port, int rxq, struct net_device *dev,
struct eth_pbuf *pkt, struct neta_rx_desc *rx_desc))
{
struct eth_port *pp = mv_eth_port_by_id(port);
if (pp)
pp->rx_special_proc = func;
}
#endif /* CONFIG_MV_ETH_RX_SPECIAL */
#ifdef CONFIG_MV_MAC_LEARN
/* Register mac learn parse function */
void mv_eth_rx_mac_learn_func(int port, void (*func)(int port, int rxq, struct net_device *dev,
struct sk_buff *skb, struct neta_rx_desc *rx_desc))
{
struct eth_port *pp = mv_eth_port_by_id(port);
if (pp != NULL)
pp->rx_mc_mac_learn = func;
}
#endif /* CONFIG_MV_MAC_LEARN */
static inline u16 mv_eth_select_txq(struct net_device *dev, struct sk_buff *skb)
{
struct eth_port *pp = MV_ETH_PRIV(dev);
return mv_eth_tx_policy(pp, skb);
}
static const struct net_device_ops mv_eth_netdev_ops = {
.ndo_open = mv_eth_open,
.ndo_stop = mv_eth_stop,
.ndo_start_xmit = mv_eth_tx,
.ndo_set_multicast_list = mv_eth_set_multicast_list,
.ndo_set_mac_address = mv_eth_set_mac_addr,
.ndo_change_mtu = mv_eth_change_mtu,
.ndo_tx_timeout = mv_eth_tx_timeout,
.ndo_select_queue = mv_eth_select_txq,
};
#ifdef CONFIG_MV_ETH_SWITCH
static const struct net_device_ops mv_switch_netdev_ops = {
.ndo_open = mv_eth_switch_start,
.ndo_stop = mv_eth_switch_stop,
.ndo_start_xmit = mv_eth_tx,
.ndo_set_multicast_list = mv_eth_switch_set_multicast_list,
.ndo_set_mac_address = mv_eth_switch_set_mac_addr,
.ndo_change_mtu = mv_eth_switch_change_mtu,
.ndo_tx_timeout = mv_eth_tx_timeout,
};
int mv_eth_switch_netdev_first = 0;
int mv_eth_switch_netdev_last = 0;
static inline struct net_device *mv_eth_switch_netdev_get(struct eth_port *pp, struct eth_pbuf *pkt)
{
MV_U8 *data;
int db_num;
if (pp->flags & MV_ETH_F_SWITCH) {
data = pkt->pBuf + pkt->offset;
/* bits[4-7] of MSB in Marvell header */
db_num = ((*data) >> 4);
return mv_net_devs[mv_eth_switch_netdev_first + db_num];
}
return pp->dev;
}
void mv_eth_switch_priv_update(struct net_device *netdev, int i)
{
struct eth_netdev *dev_priv;
struct eth_port *pp = MV_ETH_PRIV(netdev);
int print_flag, port, switch_port;
/* Update dev_priv structure */
dev_priv = MV_DEV_PRIV(netdev);
dev_priv->port_map = 0;
dev_priv->link_map = 0;
print_flag = 1;
for (port = 0; port < BOARD_ETH_SWITCH_PORT_NUM; port++) {
if (switch_net_config[pp->port].board_port_map[i] & (1 << port)) {
if (print_flag) {
printk(KERN_CONT ". Interface ports: ");
print_flag = 0;
}
printk(KERN_CONT "%d ", port);
switch_port = mvBoardSwitchPortGet(MV_SWITCH_ID_0, port);
if (switch_port >= 0) {
dev_priv->port_map |= (1 << switch_port);
switch_enabled_ports |= (1 << switch_port);
}
}
}
printk(KERN_CONT "\n");
dev_priv->group = i;
dev_priv->vlan_grp_id = MV_SWITCH_GROUP_VLAN_ID(i); /* e.g. 0x100, 0x200... */
dev_priv->tx_vlan_mh = cpu_to_be16((i << 12) | dev_priv->port_map);
dev_priv->cpu_port = mvBoardSwitchCpuPortGet(MV_SWITCH_ID_0);
mv_eth_switch_vlan_set(dev_priv->vlan_grp_id, dev_priv->port_map, dev_priv->cpu_port);
}
int mv_eth_switch_netdev_init(struct eth_port *pp, int dev_i)
{
int i;
struct net_device *netdev;
switch_enabled_ports = 0;
for (i = 0; i < switch_net_config[pp->port].netdev_max; i++) {
netdev = mv_eth_netdev_init(pp, switch_net_config[pp->port].mtu, switch_net_config[pp->port].mac_addr[i]);
if (netdev == NULL) {
printk(KERN_ERR "mv_eth_switch_netdev_init: can't create netdevice\n");
break;
}
mv_net_devs[dev_i++] = netdev;
mv_eth_switch_priv_update(netdev, i);
}
return dev_i;
}
#endif /* CONFIG_MV_ETH_SWITCH */
void mv_eth_link_status_print(int port)
{
MV_ETH_PORT_STATUS link;
mvNetaLinkStatus(port, &link);
#ifdef CONFIG_MV_PON
if (MV_PON_PORT(port))
link.linkup = mv_pon_link_status();
#endif /* CONFIG_MV_PON */
if (link.linkup) {
printk(KERN_CONT "link up");
printk(KERN_CONT ", %s duplex", (link.duplex == MV_ETH_DUPLEX_FULL) ? "full" : "half");
printk(KERN_CONT ", speed ");
if (link.speed == MV_ETH_SPEED_1000)
printk(KERN_CONT "1 Gbps\n");
else if (link.speed == MV_ETH_SPEED_100)
printk(KERN_CONT "100 Mbps\n");
else
printk(KERN_CONT "10 Mbps\n");
} else
printk(KERN_CONT "link down\n");
}
static void mv_eth_rx_error(struct eth_port *pp, struct neta_rx_desc *rx_desc)
{
STAT_ERR(pp->stats.rx_error++);
if (pp->dev)
pp->dev->stats.rx_errors++;
#ifdef CONFIG_MV_ETH_DEBUG_CODE
if ((pp->flags & MV_ETH_F_DBG_RX) == 0)
return;
if (!printk_ratelimit())
return;
if ((rx_desc->status & NETA_RX_FL_DESC_MASK) != NETA_RX_FL_DESC_MASK) {
printk(KERN_ERR "giga #%d: bad rx status %08x (buffer oversize), size=%d\n",
pp->port, rx_desc->status, rx_desc->dataSize);
return;
}
switch (rx_desc->status & NETA_RX_ERR_CODE_MASK) {
case NETA_RX_ERR_CRC:
printk(KERN_ERR "giga #%d: bad rx status %08x (crc error), size=%d\n",
pp->port, rx_desc->status, rx_desc->dataSize);
break;
case NETA_RX_ERR_OVERRUN:
printk(KERN_ERR "giga #%d: bad rx status %08x (overrun error), size=%d\n",
pp->port, rx_desc->status, rx_desc->dataSize);
break;
case NETA_RX_ERR_LEN:
printk(KERN_ERR "giga #%d: bad rx status %08x (max frame length error), size=%d\n",
pp->port, rx_desc->status, rx_desc->dataSize);
break;
case NETA_RX_ERR_RESOURCE:
printk(KERN_ERR "giga #%d: bad rx status %08x (resource error), size=%d\n",
pp->port, rx_desc->status, rx_desc->dataSize);
break;
}
mv_eth_rx_desc_print(rx_desc);
#endif /* CONFIG_MV_ETH_DEBUG_CODE */
}
void mv_eth_skb_print(struct sk_buff *skb)
{
printk(KERN_ERR "skb=%p: head=%p, data=%p, tail=%p, end=%p\n", skb, skb->head, skb->data, skb->tail, skb->end);
printk(KERN_ERR "\t mac=%p, network=%p, transport=%p\n",
skb->mac_header, skb->network_header, skb->transport_header);
printk(KERN_ERR "\t truesize=%d, len=%d, data_len=%d, mac_len=%d\n",
skb->truesize, skb->len, skb->data_len, skb->mac_len);
printk(KERN_ERR "\t users=%d, dataref=%d, nr_frags=%d, gso_size=%d, gso_segs=%d\n",
atomic_read(&skb->users), atomic_read(&skb_shinfo(skb)->dataref),
skb_shinfo(skb)->nr_frags, skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_segs);
printk(KERN_ERR "\t proto=%d, ip_summed=%d, priority=%d\n", ntohs(skb->protocol), skb->ip_summed, skb->priority);
#ifdef CONFIG_NET_SKB_RECYCLE
printk(KERN_ERR "\t skb_recycle=%p, hw_cookie=%p\n", skb->skb_recycle, skb->hw_cookie);
#endif /* CONFIG_NET_SKB_RECYCLE */
}
void mv_eth_rx_desc_print(struct neta_rx_desc *desc)
{
int i;
u32 *words = (u32 *) desc;
printk(KERN_ERR "RX desc - %p: ", desc);
for (i = 0; i < 8; i++)
printk(KERN_CONT "%8.8x ", *words++);
printk(KERN_CONT "\n");
if (desc->status & NETA_RX_IP4_FRAG_MASK)
printk(KERN_ERR "Frag, ");
printk(KERN_CONT "size=%d, L3_offs=%d, IP_hlen=%d, L4_csum=%s, L3=",
desc->dataSize,
(desc->status & NETA_RX_L3_OFFSET_MASK) >> NETA_RX_L3_OFFSET_OFFS,
(desc->status & NETA_RX_IP_HLEN_MASK) >> NETA_RX_IP_HLEN_OFFS,
(desc->status & NETA_RX_L4_CSUM_OK_MASK) ? "Ok" : "Bad");
if (NETA_RX_L3_IS_IP4(desc->status))
printk(KERN_CONT "IPv4, ");
else if (NETA_RX_L3_IS_IP4_ERR(desc->status))
printk(KERN_CONT "IPv4 bad, ");
else if (NETA_RX_L3_IS_IP6(desc->status))
printk(KERN_CONT "IPv6, ");
else
printk(KERN_CONT "Unknown, ");
printk(KERN_CONT "L4=");
if (NETA_RX_L4_IS_TCP(desc->status))
printk(KERN_CONT "TCP");
else if (NETA_RX_L4_IS_UDP(desc->status))
printk(KERN_CONT "UDP");
else
printk(KERN_CONT "Unknown");
printk(KERN_CONT "\n");
#ifdef CONFIG_MV_ETH_PNC
printk(KERN_ERR "RINFO: ");
if (desc->pncInfo & NETA_PNC_DA_MC)
printk(KERN_CONT "DA_MC, ");
if (desc->pncInfo & NETA_PNC_DA_BC)
printk(KERN_CONT "DA_BC, ");
if (desc->pncInfo & NETA_PNC_DA_UC)
printk(KERN_CONT "DA_UC, ");
if (desc->pncInfo & NETA_PNC_VLAN)
printk(KERN_CONT "VLAN, ");
if (desc->pncInfo & NETA_PNC_PPPOE)
printk(KERN_CONT "PPPOE, ");
if (desc->pncInfo & NETA_PNC_RX_SPECIAL)
printk(KERN_CONT "RX_SPEC, ");
#endif /* CONFIG_MV_ETH_PNC */
printk(KERN_CONT "\n");
}
EXPORT_SYMBOL(mv_eth_rx_desc_print);
void mv_eth_tx_desc_print(struct neta_tx_desc *desc)
{
int i;
u32 *words = (u32 *) desc;
printk(KERN_ERR "TX desc - %p: ", desc);
for (i = 0; i < 8; i++)
printk(KERN_CONT "%8.8x ", *words++);
printk(KERN_CONT "\n");
}
EXPORT_SYMBOL(mv_eth_tx_desc_print);
void mv_eth_pkt_print(struct eth_pbuf *pkt)
{
printk(KERN_ERR "pkt: len=%d off=%d pool=%d "
"skb=%p pa=%lx buf=%p\n",
pkt->bytes, pkt->offset, pkt->pool,
pkt->osInfo, pkt->physAddr, pkt->pBuf);
mvDebugMemDump(pkt->pBuf + pkt->offset, 64, 1);
mvOsCacheInvalidate(NULL, pkt->pBuf + pkt->offset, 64);
}
EXPORT_SYMBOL(mv_eth_pkt_print);
static inline void mv_eth_rx_csum(struct eth_port *pp, struct neta_rx_desc *rx_desc, struct sk_buff *skb)
{
#if defined(CONFIG_MV_ETH_RX_CSUM_OFFLOAD)
if (pp->rx_csum_offload &&
((NETA_RX_L3_IS_IP4(rx_desc->status) ||
NETA_RX_L3_IS_IP6(rx_desc->status)) && (rx_desc->status & NETA_RX_L4_CSUM_OK_MASK))) {
skb->csum = 0;
skb->ip_summed = CHECKSUM_UNNECESSARY;
STAT_DBG(pp->stats.rx_csum_hw++);
return;
}
#endif /* CONFIG_MV_ETH_RX_CSUM_OFFLOAD */
skb->ip_summed = CHECKSUM_NONE;
STAT_DBG(pp->stats.rx_csum_sw++);
}
static inline int mv_eth_tx_done_policy(u32 cause)
{
return fls(cause >> NETA_CAUSE_TXQ_SENT_DESC_OFFS) - 1;
}
inline int mv_eth_rx_policy(u32 cause)
{
return fls(cause >> NETA_CAUSE_RXQ_OCCUP_DESC_OFFS) - 1;
}
static inline int mv_eth_txq_tos_map_get(struct eth_port *pp, MV_U8 tos)
{
MV_U8 q = pp->txq_tos_map[tos];
if (q == MV_ETH_TXQ_INVALID)
return pp->txq[smp_processor_id()];
return q;
}
static inline int mv_eth_tx_policy(struct eth_port *pp, struct sk_buff *skb)
{
int txq = pp->txq[smp_processor_id()];
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
txq = mv_eth_txq_tos_map_get(pp, iph->tos);
}
return txq;
}
#ifdef CONFIG_NET_SKB_RECYCLE
int mv_eth_skb_recycle(struct sk_buff *skb)
{
struct eth_pbuf *pkt = skb->hw_cookie;
struct bm_pool *pool = &mv_eth_pool[pkt->pool];
int status = 0;
if (skb_recycle_check(skb, pool->pkt_size)) {
#ifdef CONFIG_MV_ETH_DEBUG_CODE
/* Sanity check */
if (skb->truesize != ((skb->end - skb->head) + sizeof(struct sk_buff)))
mv_eth_skb_print(skb);
#endif /* CONFIG_MV_ETH_DEBUG_CODE */
STAT_DBG(pool->stats.skb_recycled_ok++);
mvOsCacheInvalidate(NULL, skb->head, RX_BUF_SIZE(pool->pkt_size));
status = mv_eth_pool_put(pool, pkt);
#ifdef ETH_SKB_DEBUG
if (status == 0)
mv_eth_skb_save(skb, "recycle");
#endif /* ETH_SKB_DEBUG */
return 0;
}
/* printk(KERN_ERR "mv_eth_skb_recycle failed: pool=%d, pkt=%p, skb=%p\n", pkt->pool, pkt, skb); */
mvOsFree(pkt);
skb->hw_cookie = NULL;
STAT_DBG(pool->stats.skb_recycled_err++);
return 1;
}
EXPORT_SYMBOL(mv_eth_skb_recycle);
#endif /* CONFIG_NET_SKB_RECYCLE */
static struct sk_buff *mv_eth_skb_alloc(struct bm_pool *pool, struct eth_pbuf *pkt)
{
struct sk_buff *skb;
skb = dev_alloc_skb(pool->pkt_size);
if (!skb) {
STAT_ERR(pool->stats.skb_alloc_oom++);
return NULL;
}
STAT_DBG(pool->stats.skb_alloc_ok++);
#ifdef ETH_SKB_DEBUG
mv_eth_skb_save(skb, "alloc");
#endif /* ETH_SKB_DEBUG */
#ifdef CONFIG_MV_ETH_BM_CPU
/* Save pkt as first 4 bytes in the buffer */
#if !defined(CONFIG_MV_ETH_BE_WA)
*((MV_U32 *) skb->head) = MV_32BIT_LE((MV_U32)pkt);
#else
*((MV_U32 *) skb->head) = (MV_U32)pkt;
#endif /* !CONFIG_MV_ETH_BE_WA */
mvOsCacheLineFlush(NULL, skb->head);
#endif /* CONFIG_MV_ETH_BM_CPU */
pkt->osInfo = (void *)skb;
pkt->pBuf = skb->head;
pkt->physAddr = mvOsCacheInvalidate(NULL, skb->head, RX_BUF_SIZE(pool->pkt_size));
pkt->offset = NET_SKB_PAD;
pkt->pool = pool->pool;
return skb;
}
static inline void mv_eth_txq_buf_free(struct eth_port *pp, u32 shadow)
{
if (!shadow)
return;
if (shadow & MV_ETH_SHADOW_SKB) {
shadow &= ~MV_ETH_SHADOW_SKB;
dev_kfree_skb_any((struct sk_buff *)shadow);
STAT_DBG(pp->stats.tx_skb_free++);
} else {
if (shadow & MV_ETH_SHADOW_EXT) {
shadow &= ~MV_ETH_SHADOW_EXT;
mv_eth_extra_pool_put(pp, (void *)shadow);
} else {
/* packet from NFP without BM */
struct eth_pbuf *pkt = (struct eth_pbuf *)shadow;
struct bm_pool *pool = &mv_eth_pool[pkt->pool];
if (mv_eth_pool_bm(pool)) {
/* Refill BM pool */
STAT_DBG(pool->stats.bm_put++);
mvBmPoolPut(pkt->pool, (MV_ULONG) pkt->physAddr);
} else {
mv_eth_pool_put(pool, pkt);
}
}
}
}
static inline void mv_eth_txq_cpu_clean(struct eth_port *pp, struct tx_queue *txq_ctrl)
{
int hw_txq_i, last_txq_i, i, count;
u32 shadow;
hw_txq_i = mvNetaTxqNextIndexGet(pp->port, txq_ctrl->txp, txq_ctrl->txq);
last_txq_i = txq_ctrl->shadow_txq_put_i;
i = hw_txq_i;
count = 0;
while (i != last_txq_i) {
shadow = txq_ctrl->shadow_txq[i];
mv_eth_txq_buf_free(pp, shadow);
txq_ctrl->shadow_txq[i] = (u32)NULL;
i = MV_NETA_QUEUE_NEXT_DESC(&txq_ctrl->q->queueCtrl, i);
count++;
}
printk(KERN_INFO "\n%s: port=%d, txp=%d, txq=%d, mode=CPU\n",
__func__, pp->port, txq_ctrl->txp, txq_ctrl->txq);
printk(KERN_INFO "Free %d buffers: from desc=%d to desc=%d, tx_count=%d\n",
count, hw_txq_i, last_txq_i, txq_ctrl->txq_count);
}
static inline void mv_eth_txq_hwf_clean(struct eth_port *pp, struct tx_queue *txq_ctrl, int rx_port)
{
int pool, hw_txq_i, last_txq_i, i, count;
struct neta_tx_desc *tx_desc;
hw_txq_i = mvNetaTxqNextIndexGet(pp->port, txq_ctrl->txp, txq_ctrl->txq);
if (mvNetaHwfTxqNextIndexGet(rx_port, pp->port, txq_ctrl->txp, txq_ctrl->txq, &last_txq_i) != MV_OK) {
printk(KERN_ERR "%s: mvNetaHwfTxqNextIndexGet failed\n", __func__);
return;
}
i = hw_txq_i;
count = 0;
while (i != last_txq_i) {
tx_desc = (struct neta_tx_desc *)MV_NETA_QUEUE_DESC_PTR(&txq_ctrl->q->queueCtrl, i);
if (mvNetaTxqDescIsValid(tx_desc)) {
mvNetaTxqDescInv(tx_desc);
mv_eth_tx_desc_flush(tx_desc);
pool = (tx_desc->command & NETA_TX_BM_POOL_ID_ALL_MASK) >> NETA_TX_BM_POOL_ID_OFFS;
mvBmPoolPut(pool, (MV_ULONG)tx_desc->bufPhysAddr);
count++;
}
i = MV_NETA_QUEUE_NEXT_DESC(&txq_ctrl->q->queueCtrl, i);
}
printk(KERN_DEBUG "\n%s: port=%d, txp=%d, txq=%d, mode=HWF-%d\n",
__func__, pp->port, txq_ctrl->txp, txq_ctrl->txq, rx_port);
printk(KERN_DEBUG "Free %d buffers to BM pool %d: from desc=%d to desc=%d\n",
count, pool, hw_txq_i, last_txq_i);
}
int mv_eth_txq_clean(int port, int txp, int txq)
{
int mode, rx_port;
struct eth_port *pp;
struct tx_queue *txq_ctrl;
if (mvNetaTxpCheck(port, txp))
return -EINVAL;
if (mvNetaMaxCheck(txq, CONFIG_MV_ETH_TXQ))
return -EINVAL;
pp = mv_eth_port_by_id(port);
if ((pp == NULL) || (pp->txq_ctrl == NULL))
return -ENODEV;
txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
mode = mv_eth_ctrl_txq_mode_get(pp->port, txq_ctrl->txp, txq_ctrl->txq, &rx_port);
if (mode == MV_ETH_TXQ_CPU)
mv_eth_txq_cpu_clean(pp, txq_ctrl);
else if (mode == MV_ETH_TXQ_HWF)
mv_eth_txq_hwf_clean(pp, txq_ctrl, rx_port);
// else
// printk(KERN_ERR "%s: port=%d, txp=%d, txq=%d is not in use\n",
// __func__, pp->port, txp, txq);
return 0;
}
static inline void mv_eth_txq_bufs_free(struct eth_port *pp, struct tx_queue *txq_ctrl, int num)
{
u32 shadow;
int i;
/* Free buffers that was not freed automatically by BM */
for (i = 0; i < num; i++) {
shadow = txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_get_i];
mv_eth_shadow_inc_get(txq_ctrl);
mv_eth_txq_buf_free(pp, shadow);
}
}
inline u32 mv_eth_txq_done(struct eth_port *pp, struct tx_queue *txq_ctrl)
{
int tx_done;
tx_done = mvNetaTxqSentDescProc(pp->port, txq_ctrl->txp, txq_ctrl->txq);
if (!tx_done)
return tx_done;
/*
printk(KERN_ERR "tx_done: txq_count=%d, port=%d, txp=%d, txq=%d, tx_done=%d\n",
txq_ctrl->txq_count, pp->port, txq_ctrl->txp, txq_ctrl->txq, tx_done);
*/
if (!mv_eth_txq_bm(txq_ctrl))
mv_eth_txq_bufs_free(pp, txq_ctrl, tx_done);
txq_ctrl->txq_count -= tx_done;
STAT_DBG(txq_ctrl->stats.txq_txdone += tx_done);
return tx_done;
}
EXPORT_SYMBOL(mv_eth_txq_done);
inline struct eth_pbuf *mv_eth_pool_get(struct bm_pool *pool)
{
struct eth_pbuf *pkt = NULL;
struct sk_buff *skb;
unsigned long flags = 0;
MV_ETH_LOCK(&pool->lock, flags);
if (mvStackIndex(pool->stack) > 0) {
STAT_DBG(pool->stats.stack_get++);
pkt = (struct eth_pbuf *)mvStackPop(pool->stack);
} else
STAT_ERR(pool->stats.stack_empty++);
MV_ETH_UNLOCK(&pool->lock, flags);
if (pkt)
return pkt;
/* Try to allocate new pkt + skb */
pkt = mvOsMalloc(sizeof(struct eth_pbuf));
if (pkt) {
skb = mv_eth_skb_alloc(pool, pkt);
if (!skb) {
mvOsFree(pkt);
pkt = NULL;
}
}
return pkt;
}
/* Reuse pkt if possible, allocate new skb and move BM pool or RXQ ring */
inline int mv_eth_refill(struct eth_port *pp, int rxq,
struct eth_pbuf *pkt, struct bm_pool *pool, struct neta_rx_desc *rx_desc)
{
if (pkt == NULL) {
pkt = mv_eth_pool_get(pool);
if (pkt == NULL)
return 1;
} else {
struct sk_buff *skb;
/* No recycle - alloc new skb */
skb = mv_eth_skb_alloc(pool, pkt);
if (!skb) {
mvOsFree(pkt);
pool->missed++;
mv_eth_add_cleanup_timer(pp);
return 1;
}
}
mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc);
return 0;
}
EXPORT_SYMBOL(mv_eth_refill);
static inline MV_U32 mv_eth_skb_tx_csum(struct eth_port *pp, struct sk_buff *skb)
{
#ifdef CONFIG_MV_ETH_TX_CSUM_OFFLOAD
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
MV_U8 l4_proto;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
/* If not IPv4 - must be ETH_P_IPV6 - Calculate only L4 checksum */
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers ?????? */
if (skb_network_header_len(skb) > 0)
ip_hdr_len = (skb_network_header_len(skb) >> 2);
l4_proto = ip6h->nexthdr;
} else {
STAT_DBG(pp->stats.tx_csum_sw++);
return NETA_TX_L4_CSUM_NOT;
}
STAT_DBG(pp->stats.tx_csum_hw++);
return mvNetaTxqDescCsum(skb_network_offset(skb), skb->protocol, ip_hdr_len, l4_proto);
}
#endif /* CONFIG_MV_ETH_TX_CSUM_OFFLOAD */
STAT_DBG(pp->stats.tx_csum_sw++);
return NETA_TX_L4_CSUM_NOT;
}
#ifdef CONFIG_MV_ETH_RX_DESC_PREFETCH
inline struct neta_rx_desc *mv_eth_rx_prefetch(struct eth_port *pp, MV_NETA_RXQ_CTRL *rx_ctrl,
int rx_done, int rx_todo)
{
struct neta_rx_desc *rx_desc, *next_desc;
rx_desc = mvNetaRxqNextDescGet(rx_ctrl);
if (rx_done == 0) {
/* First descriptor in the NAPI loop */
mvOsCacheLineInv(NULL, rx_desc);
prefetch(rx_desc);
}
if ((rx_done + 1) == rx_todo) {
/* Last descriptor in the NAPI loop - prefetch are not needed */
return rx_desc;
}
/* Prefetch next descriptor */
next_desc = mvNetaRxqDescGet(rx_ctrl);
mvOsCacheLineInv(NULL, next_desc);
prefetch(next_desc);
return rx_desc;
}
#endif /* CONFIG_MV_ETH_RX_DESC_PREFETCH */
static inline int mv_eth_rx(struct eth_port *pp, int rx_todo, int rxq)
{
struct net_device *dev;
MV_NETA_RXQ_CTRL *rx_ctrl = pp->rxq_ctrl[rxq].q;
int rx_done, rx_filled, err;
struct neta_rx_desc *rx_desc;
u32 rx_status;
int rx_bytes;
struct eth_pbuf *pkt;
struct sk_buff *skb;
struct bm_pool *pool;
/* Get number of received packets */
rx_done = mvNetaRxqBusyDescNumGet(pp->port, rxq);
mvOsCacheIoSync();
if (rx_todo > rx_done)
rx_todo = rx_done;
rx_done = 0;
rx_filled = 0;
/* Fairness NAPI loop */
while (rx_done < rx_todo) {
#ifdef CONFIG_MV_ETH_RX_DESC_PREFETCH
rx_desc = mv_eth_rx_prefetch(pp, rx_ctrl, rx_done, rx_todo);
#else
rx_desc = mvNetaRxqNextDescGet(rx_ctrl);
mvOsCacheLineInv(NULL, rx_desc);
prefetch(rx_desc);
#endif /* CONFIG_MV_ETH_RX_DESC_PREFETCH */
rx_done++;
rx_filled++;
#if defined(MV_CPU_BE)
mvNetaRxqDescSwap(rx_desc);
#endif /* MV_CPU_BE */
#ifdef CONFIG_MV_ETH_DEBUG_CODE
if (pp->flags & MV_ETH_F_DBG_RX) {
printk(KERN_ERR "\n%s: port=%d, cpu=%d\n", __func__, pp->port, smp_processor_id());
mv_eth_rx_desc_print(rx_desc);
}
#endif /* CONFIG_MV_ETH_DEBUG_CODE */
rx_status = rx_desc->status;
pkt = (struct eth_pbuf *)rx_desc->bufCookie;
pool = &mv_eth_pool[pkt->pool];
if (((rx_status & NETA_RX_FL_DESC_MASK) != NETA_RX_FL_DESC_MASK) ||
(rx_status & NETA_RX_ES_MASK)) {
mv_eth_rx_error(pp, rx_desc);
mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc);
continue;
}
/* Speculative ICache prefetch WA: should be replaced with dma_unmap_single (invalidate l2) */
mvOsCacheMultiLineInv(NULL, pkt->pBuf + pkt->offset, rx_desc->dataSize);
#ifdef CONFIG_MV_ETH_RX_PKT_PREFETCH
prefetch(pkt->pBuf + pkt->offset);
prefetch(pkt->pBuf + pkt->offset + CPU_D_CACHE_LINE_SIZE);
#endif /* CONFIG_MV_ETH_RX_PKT_PREFETCH */
#ifdef CONFIG_MV_ETH_SWITCH
dev = mv_eth_switch_netdev_get(pp, pkt);
#else
dev = pp->dev;
#endif /* CONFIG_MV_ETH_SWITCH */
#ifdef CONFIG_MV_ETH_6601_LB_WA
/* This workaround is for the 6601 w/ loopback.
All RX traffic originating from GMAC1 is treated as if comming from GMAC0 netdev */
if (pp->port == 1) {
struct eth_port *new_pp = mv_eth_port_by_id(0);
dev = new_pp->dev;
}
#endif
STAT_DBG(pp->stats.rxq[rxq]++);
dev->stats.rx_packets++;
rx_bytes = rx_desc->dataSize - (MV_ETH_CRC_SIZE + MV_ETH_MH_SIZE);
dev->stats.rx_bytes += rx_bytes;
#ifndef CONFIG_MV_ETH_PNC
/* Update IP offset and IP header len in RX descriptor */
if (NETA_RX_L3_IS_IP4(rx_desc->status)) {
int ip_offset;
if ((rx_desc->status & ETH_RX_VLAN_TAGGED_FRAME_MASK))
ip_offset = MV_ETH_MH_SIZE + sizeof(MV_802_3_HEADER) + MV_VLAN_HLEN;
else
ip_offset = MV_ETH_MH_SIZE + sizeof(MV_802_3_HEADER);
NETA_RX_SET_IPHDR_OFFSET(rx_desc, ip_offset);
NETA_RX_SET_IPHDR_HDRLEN(rx_desc, 5);
}
#endif /* !CONFIG_MV_ETH_PNC */
#ifdef CONFIG_MV_ETH_DEBUG_CODE
if (pp->flags & MV_ETH_F_DBG_RX)
mvDebugMemDump(pkt->pBuf + pkt->offset, 64, 1);
#endif /* CONFIG_MV_ETH_DEBUG_CODE */
#if defined(CONFIG_MV_ETH_PNC) && defined(CONFIG_MV_ETH_RX_SPECIAL)
/* Special RX processing */
#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
if ((MV_PON_PORT_ID == pp->port) || (rx_desc->pncInfo & NETA_PNC_RX_SPECIAL)) {
#else
if (rx_desc->pncInfo & NETA_PNC_RX_SPECIAL) {
#endif
if (pp->rx_special_proc) {
pp->rx_special_proc(pp->port, rxq, dev, pkt, rx_desc);
STAT_INFO(pp->stats.rx_special++);
if (rx_desc->pncInfo & NETA_PNC_RX_SPECIAL) {
/* Refill processing */
err = mv_eth_refill(pp, rxq, pkt, pool, rx_desc);
if (err) {
printk(KERN_ERR "Linux processing - Can't refill\n");
pp->rxq_ctrl[rxq].missed++;
rx_filled--;
}
continue;
}
}
}
#endif /* CONFIG_MV_ETH_PNC && CONFIG_MV_ETH_RX_SPECIAL */
#if defined(CONFIG_MV_ETH_NFP) || defined(CONFIG_MV_ETH_NFP_MODULE)
if (pp->flags & MV_ETH_F_NFP_EN) {
MV_STATUS status;
pkt->bytes = rx_bytes + MV_ETH_MH_SIZE;
pkt->offset = NET_SKB_PAD;
status = nfpHookMgr->mv_eth_nfp(pp, rxq, rx_desc, pkt, pool);
if (status == MV_OK)
continue;
if (status == MV_FAIL) {
rx_filled--;
continue;
}
/* MV_TERMINATE - packet returned to slow path */
}
#endif /* CONFIG_MV_ETH_NFP || CONFIG_MV_ETH_NFP_MODULE */
/* Linux processing */
skb = (struct sk_buff *)(pkt->osInfo);
skb->data += MV_ETH_MH_SIZE;
skb->tail += (rx_bytes + MV_ETH_MH_SIZE);
skb->len = rx_bytes;
#ifdef ETH_SKB_DEBUG
mv_eth_skb_check(skb);
#endif /* ETH_SKB_DEBUG */
skb->protocol = eth_type_trans(skb, dev);
#ifdef CONFIG_NET_SKB_RECYCLE
if (mv_eth_is_recycle()) {
skb->skb_recycle = mv_eth_skb_recycle;
skb->hw_cookie = pkt;
pkt = NULL;
}
#endif /* CONFIG_NET_SKB_RECYCLE */
if (skb)
mv_eth_rx_csum(pp, rx_desc, skb);
#ifdef CONFIG_MV_ETH_GRO
if (skb && (dev->features & NETIF_F_GRO)) {
STAT_DBG(pp->stats.rx_gro++);
STAT_DBG(pp->stats.rx_gro_bytes += skb->len);
rx_status = napi_gro_receive(pp->napi[smp_processor_id()], skb);
skb = NULL;
}
#endif /* CONFIG_MV_ETH_GRO */
#if defined(CONFIG_MV_ETH_PNC) && defined(CONFIG_MV_MAC_LEARN)
if (skb && (rx_desc->pncInfo & NETA_PNC_MAC_LEARN)) {
if (pp->rx_mc_mac_learn) {
pp->rx_mc_mac_learn(pp->port, rxq, dev, skb, rx_desc);
STAT_INFO(pp->stats.rx_mac_learn++);
}
}
#endif /* CONFIG_MV_ETH_PNC && CONFIG_MV_MAC_LEARN */
if (skb) {
STAT_DBG(pp->stats.rx_netif++);
rx_status = netif_receive_skb(skb);
STAT_DBG((rx_status == 0) ? : pp->stats.rx_drop_sw++);
}
/* Refill processing: */
err = mv_eth_refill(pp, rxq, pkt, pool, rx_desc);
if (err) {
printk(KERN_ERR "Linux processing - Can't refill\n");
pp->rxq_ctrl[rxq].missed++;
mv_eth_add_cleanup_timer(pp);
rx_filled--;
}
}
/* Update RxQ management counters */
mvOsCacheIoSync();
mvNetaRxqDescNumUpdate(pp->port, rxq, rx_done, rx_filled);
return rx_done;
}
static int mv_eth_tx(struct sk_buff *skb, struct net_device *dev)
{
struct eth_port *pp = MV_ETH_PRIV(dev);
struct eth_netdev *dev_priv = MV_DEV_PRIV(dev);
int frags = 0;
bool tx_spec_ready = false;
struct mv_eth_tx_spec tx_spec;
u32 tx_cmd;
u16 mh;
struct tx_queue *txq_ctrl = NULL;
struct neta_tx_desc *tx_desc;
read_lock(&pp->rwlock);
if (!(netif_running(dev))) {
printk(KERN_ERR "!netif_running() in %s\n", __func__);
goto out;
}
#if defined(CONFIG_MV_ETH_TX_SPECIAL)
if (pp->tx_special_check) {
if (pp->tx_special_check(pp->port, dev, skb, &tx_spec)) {
STAT_INFO(pp->stats.tx_special++);
if (tx_spec.tx_func) {
tx_spec.tx_func(skb->data, skb->len, &tx_spec);
goto out;
} else {
/* Check validity of tx_spec txp/txq must be CPU owned */
tx_spec_ready = true;
}
}
}
#endif /* CONFIG_MV_ETH_TX_SPECIAL */
/* Get TXQ (without BM) to send packet generated by Linux */
if (tx_spec_ready == false) {
tx_spec.txp = pp->txp;
tx_spec.txq = mv_eth_tx_policy(pp, skb);
tx_spec.hw_cmd = pp->hw_cmd;
tx_spec.flags = pp->flags;
}
if (tx_spec.txq == MV_ETH_TXQ_INVALID)
goto out;
txq_ctrl = &pp->txq_ctrl[tx_spec.txp * CONFIG_MV_ETH_TXQ + tx_spec.txq];
if (txq_ctrl == NULL) {
printk(KERN_ERR "%s: invalidate txp/txq (%d/%d)\n", __func__, tx_spec.txp, tx_spec.txq);
goto out;
}
spin_lock(&txq_ctrl->queue_lock);
#ifdef CONFIG_MV_ETH_TSO
/* GSO/TSO */
if (skb_is_gso(skb)) {
frags = mv_eth_tx_tso(skb, dev, &tx_spec, txq_ctrl);
goto out;
}
#endif /* CONFIG_MV_ETH_TSO */
frags = skb_shinfo(skb)->nr_frags + 1;
if (tx_spec.flags & MV_ETH_F_MH) {
if (tx_spec.flags & MV_ETH_F_SWITCH)
mh = dev_priv->tx_vlan_mh;
else
mh = pp->tx_mh;
if (mv_eth_skb_mh_add(skb, mh)) {
frags = 0;
goto out;
}
}
tx_desc = mv_eth_tx_desc_get(txq_ctrl, frags);
if (tx_desc == NULL) {
frags = 0;
goto out;
}
/* Don't use BM for Linux packets: NETA_TX_BM_ENABLE_MASK = 0 */
/* NETA_TX_PKT_OFFSET_MASK = 0 - for all descriptors */
tx_cmd = mv_eth_skb_tx_csum(pp, skb);
#ifdef CONFIG_MV_PON
tx_desc->hw_cmd = tx_spec.hw_cmd;
#endif
/* FIXME: beware of nonlinear --BK */
tx_desc->dataSize = skb_headlen(skb);
tx_desc->bufPhysAddr = mvOsCacheFlush(NULL, skb->data, tx_desc->dataSize);
if (frags == 1) {
/*
* First and Last descriptor
*/
if (tx_spec.flags & MV_ETH_F_NO_PAD)
tx_cmd |= NETA_TX_F_DESC_MASK | NETA_TX_L_DESC_MASK;
else
tx_cmd |= NETA_TX_FLZ_DESC_MASK;
tx_desc->command = tx_cmd;
mv_eth_tx_desc_flush(tx_desc);
txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = ((MV_ULONG) skb | MV_ETH_SHADOW_SKB);
mv_eth_shadow_inc_put(txq_ctrl);
} else {
/* First but not Last */
tx_cmd |= NETA_TX_F_DESC_MASK;
txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = 0;
mv_eth_shadow_inc_put(txq_ctrl);
tx_desc->command = tx_cmd;
mv_eth_tx_desc_flush(tx_desc);
/* Continue with other skb fragments */
mv_eth_tx_frag_process(pp, skb, txq_ctrl, tx_spec.flags);
STAT_DBG(pp->stats.tx_sg++);
}
/*
printk(KERN_ERR "tx: frags=%d, tx_desc[0x0]=%x [0xc]=%x, wr_id=%d, rd_id=%d, skb=%p\n",
frags, tx_desc->command,tx_desc->hw_cmd,
txq_ctrl->shadow_txq_put_i, txq_ctrl->shadow_txq_get_i, skb);
*/
txq_ctrl->txq_count += frags;
#ifdef CONFIG_MV_ETH_DEBUG_CODE
if (pp->flags & MV_ETH_F_DBG_TX) {
printk(KERN_ERR "\n");
printk(KERN_ERR "%s - eth_tx_%lu: port=%d, txp=%d, txq=%d, skb=%p, head=%p, data=%p, size=%d\n",
dev->name, dev->stats.tx_packets, pp->port, tx_spec.txp, tx_spec.txq, skb,
skb->head, skb->data, skb->len);
mv_eth_tx_desc_print(tx_desc);
/*mv_eth_skb_print(skb);*/
mvDebugMemDump(skb->data, 64, 1);
}
#endif /* CONFIG_MV_ETH_DEBUG_CODE */
#ifdef CONFIG_MV_PON
if (MV_PON_PORT(pp->port))
mvNetaPonTxqBytesAdd(pp->port, tx_spec.txp, tx_spec.txq, skb->len);
#endif /* CONFIG_MV_PON */
/* Enable transmit */
mvNetaTxqPendDescAdd(pp->port, tx_spec.txp, tx_spec.txq, frags);
STAT_DBG(txq_ctrl->stats.txq_tx += frags);
out:
if (frags > 0) {
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
} else {
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
}
#ifndef CONFIG_MV_ETH_TXDONE_ISR
if (txq_ctrl) {
if (txq_ctrl->txq_count >= mv_ctrl_txdone) {
u32 tx_done = mv_eth_txq_done(pp, txq_ctrl);
STAT_DIST((tx_done < pp->dist_stats.tx_done_dist_size) ? pp->dist_stats.tx_done_dist[tx_done]++ : 0);
}
/* If after calling mv_eth_txq_done, txq_ctrl->txq_count equals frags, we need to set the timer */
if ((txq_ctrl->txq_count == frags) && (frags > 0))
mv_eth_add_tx_done_timer(pp);
}
#endif /* CONFIG_MV_ETH_TXDONE_ISR */
if (txq_ctrl)
spin_unlock(&txq_ctrl->queue_lock);
read_unlock(&pp->rwlock);
return NETDEV_TX_OK;
}
#ifdef CONFIG_MV_ETH_TSO
/* Validate TSO */
static inline int mv_eth_tso_validate(struct sk_buff *skb, struct net_device *dev)
{
if (!(dev->features & NETIF_F_TSO)) {
printk(KERN_ERR "error: (skb_is_gso(skb) returns true but features is not NETIF_F_TSO\n");
return 1;
}
if (skb_shinfo(skb)->frag_list != NULL) {
printk(KERN_ERR "***** ERROR: frag_list is not null\n");
return 1;
}
if (skb_shinfo(skb)->gso_segs == 1) {
printk(KERN_ERR "***** ERROR: only one TSO segment\n");
return 1;
}
if (skb->len <= skb_shinfo(skb)->gso_size) {
printk(KERN_ERR "***** ERROR: total_len (%d) less than gso_size (%d)\n", skb->len, skb_shinfo(skb)->gso_size);
return 1;
}
if ((htons(ETH_P_IP) != skb->protocol) || (ip_hdr(skb)->protocol != IPPROTO_TCP) || (tcp_hdr(skb) == NULL)) {
printk(KERN_ERR "***** ERROR: Protocol is not TCP over IP\n");
return 1;
}
return 0;
}
static inline int mv_eth_tso_build_hdr_desc(struct neta_tx_desc *tx_desc, struct eth_port *priv, struct sk_buff *skb,
struct tx_queue *txq_ctrl, u16 *mh, int hdr_len, int size,
MV_U32 tcp_seq, MV_U16 ip_id, int left_len)
{
struct iphdr *iph;
struct tcphdr *tcph;
MV_U8 *data, *mac;
int mac_hdr_len = skb_network_offset(skb);
data = mv_eth_extra_pool_get(priv);
if (!data)
return 0;
txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = ((MV_ULONG)data | MV_ETH_SHADOW_EXT);
/* Reserve 2 bytes for IP header alignment */
mac = data + MV_ETH_MH_SIZE;
iph = (struct iphdr *)(mac + mac_hdr_len);
memcpy(mac, skb->data, hdr_len);
if (iph) {
iph->id = htons(ip_id);
iph->tot_len = htons(size + hdr_len - mac_hdr_len);
}
tcph = (struct tcphdr *)(mac + skb_transport_offset(skb));
tcph->seq = htonl(tcp_seq);
if (left_len) {
/* Clear all special flags for not last packet */
tcph->psh = 0;
tcph->fin = 0;
tcph->rst = 0;
}
if (mh) {
/* Start tarnsmit from MH - add 2 bytes to size */
*((MV_U16 *)data) = *mh;
/* increment ip_offset field in TX descriptor by 2 bytes */
mac_hdr_len += MV_ETH_MH_SIZE;
hdr_len += MV_ETH_MH_SIZE;
} else {
/* Start transmit from MAC */
data = mac;
}
tx_desc->dataSize = hdr_len;
tx_desc->command = mvNetaTxqDescCsum(mac_hdr_len, skb->protocol, ((u8 *)tcph - (u8 *)iph) >> 2, IPPROTO_TCP);
tx_desc->command |= NETA_TX_F_DESC_MASK;
tx_desc->bufPhysAddr = mvOsCacheFlush(NULL, data, tx_desc->dataSize);
mv_eth_shadow_inc_put(txq_ctrl);
mv_eth_tx_desc_flush(tx_desc);
return hdr_len;
}
static inline int mv_eth_tso_build_data_desc(struct neta_tx_desc *tx_desc, struct sk_buff *skb,
struct tx_queue *txq_ctrl, char *frag_ptr,
int frag_size, int data_left, int total_left)
{
int size;
size = MV_MIN(frag_size, data_left);
tx_desc->dataSize = size;
tx_desc->bufPhysAddr = mvOsCacheFlush(NULL, frag_ptr, size);
tx_desc->command = 0;
txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = 0;
if (size == data_left) {
/* last descriptor in the TCP packet */
tx_desc->command = NETA_TX_L_DESC_MASK;
if (total_left == 0) {
/* last descriptor in SKB */
txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = ((MV_ULONG) skb | MV_ETH_SHADOW_SKB);
}
}
mv_eth_shadow_inc_put(txq_ctrl);
mv_eth_tx_desc_flush(tx_desc);
return size;
}
/***********************************************************
* mv_eth_tx_tso -- *
* send a packet. *
***********************************************************/
int mv_eth_tx_tso(struct sk_buff *skb, struct net_device *dev,
struct mv_eth_tx_spec *tx_spec, struct tx_queue *txq_ctrl)
{
int frag = 0;
int total_len, hdr_len, size, frag_size, data_left;
char *frag_ptr;
int totalDescNum, totalBytes = 0;
struct neta_tx_desc *tx_desc;
MV_U16 ip_id;
MV_U32 tcp_seq = 0;
skb_frag_t *skb_frag_ptr;
const struct tcphdr *th = tcp_hdr(skb);
struct eth_port *priv = MV_ETH_PRIV(dev);
struct eth_netdev *dev_priv = MV_DEV_PRIV(dev);
MV_U16 *mh = NULL;
int i;
STAT_DBG(priv->stats.tx_tso++);
/*
printk(KERN_ERR "mv_eth_tx_tso_%d ENTER: skb=%p, total_len=%d\n", priv->stats.tx_tso, skb, skb->len);
*/
if (mv_eth_tso_validate(skb, dev))
return 0;
/* Calculate expected number of TX descriptors */
totalDescNum = skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
if ((txq_ctrl->txq_count + totalDescNum) >= txq_ctrl->txq_size) {
/*
printk(KERN_ERR "%s: no TX descriptors - txq_count=%d, len=%d, nr_frags=%d, gso_segs=%d\n",
__func__, txq_ctrl->txq_count, skb->len, skb_shinfo(skb)->nr_frags,
skb_shinfo(skb)->gso_segs);
*/
STAT_ERR(txq_ctrl->stats.txq_err++);
return 0;
}
total_len = skb->len;
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
total_len -= hdr_len;
ip_id = ntohs(ip_hdr(skb)->id);
tcp_seq = ntohl(th->seq);
frag_size = skb_headlen(skb);
frag_ptr = skb->data;
if (frag_size < hdr_len) {
printk(KERN_ERR "***** ERROR: frag_size=%d, hdr_len=%d\n", frag_size, hdr_len);
return 0;
}
frag_size -= hdr_len;
frag_ptr += hdr_len;
if (frag_size == 0) {
skb_frag_ptr = &skb_shinfo(skb)->frags[frag];
/* Move to next segment */
frag_size = skb_frag_ptr->size;
frag_ptr = page_address(skb_frag_ptr->page) + skb_frag_ptr->page_offset;
frag++;
}
totalDescNum = 0;
while (total_len > 0) {
data_left = MV_MIN(skb_shinfo(skb)->gso_size, total_len);
tx_desc = mv_eth_tx_desc_get(txq_ctrl, 1);
if (tx_desc == NULL)
goto outNoTxDesc;
totalDescNum++;
total_len -= data_left;
txq_ctrl->txq_count++;
if (tx_spec->flags & MV_ETH_F_MH) {
if (tx_spec->flags & MV_ETH_F_SWITCH)
mh = &dev_priv->tx_vlan_mh;
else
mh = &priv->tx_mh;
}
/* prepare packet headers: MAC + IP + TCP */
size = mv_eth_tso_build_hdr_desc(tx_desc, priv, skb, txq_ctrl, mh,
hdr_len, data_left, tcp_seq, ip_id, total_len);
if (size == 0)
goto outNoTxDesc;
totalBytes += size;
/*
printk(KERN_ERR "Header desc: tx_desc=%p, skb=%p, hdr_len=%d, data_left=%d\n",
tx_desc, skb, hdr_len, data_left);
*/
ip_id++;
while (data_left > 0) {
tx_desc = mv_eth_tx_desc_get(txq_ctrl, 1);
if (tx_desc == NULL)
goto outNoTxDesc;
totalDescNum++;
txq_ctrl->txq_count++;
size = mv_eth_tso_build_data_desc(tx_desc, skb, txq_ctrl,
frag_ptr, frag_size, data_left, total_len);
totalBytes += size;
/*
printk(KERN_ERR "Data desc: tx_desc=%p, skb=%p, size=%d, frag_size=%d, data_left=%d\n",
tx_desc, skb, size, frag_size, data_left);
*/
data_left -= size;
tcp_seq += size;
frag_size -= size;
frag_ptr += size;
if ((frag_size == 0) && (frag < skb_shinfo(skb)->nr_frags)) {
skb_frag_ptr = &skb_shinfo(skb)->frags[frag];
/* Move to next segment */
frag_size = skb_frag_ptr->size;
frag_ptr = page_address(skb_frag_ptr->page) + skb_frag_ptr->page_offset;
frag++;
}
} /* of while data_left > 0 */
} /* of while (total_len > 0) */
#ifdef CONFIG_MV_PON
if (MV_PON_PORT(priv->port))
mvNetaPonTxqBytesAdd(priv->port, txq_ctrl->txp, txq_ctrl->txq, totalBytes);
#endif /* CONFIG_MV_PON */
STAT_DBG(priv->stats.tx_tso_bytes += totalBytes);
STAT_DBG(txq_ctrl->stats.txq_tx += totalDescNum);
mvNetaTxqPendDescAdd(priv->port, txq_ctrl->txp, txq_ctrl->txq, totalDescNum);
/*
printk(KERN_ERR "mv_eth_tx_tso EXIT: totalDescNum=%d\n", totalDescNum);
*/
return totalDescNum;
outNoTxDesc:
/* No enough TX descriptors for the whole skb - rollback */
printk(KERN_ERR "%s: No TX descriptors - rollback %d, txq_count=%d, nr_frags=%d, skb=%p, len=%d, gso_segs=%d\n",
__func__, totalDescNum, txq_ctrl->txq_count, skb_shinfo(skb)->nr_frags,
skb, skb->len, skb_shinfo(skb)->gso_segs);
for (i = 0; i < totalDescNum; i++) {
txq_ctrl->txq_count--;
mv_eth_shadow_dec_put(txq_ctrl);
mvNetaTxqPrevDescGet(txq_ctrl->q);
}
return 0;
}
#endif /* CONFIG_MV_ETH_TSO */
/* Drop packets received by the RXQ and free buffers */
static void mv_eth_rxq_drop_pkts(struct eth_port *pp, int rxq)
{
struct neta_rx_desc *rx_desc;
struct eth_pbuf *pkt;
struct bm_pool *pool;
int rx_done, i;
MV_NETA_RXQ_CTRL *rx_ctrl = pp->rxq_ctrl[rxq].q;
if (rx_ctrl == NULL)
return;
rx_done = mvNetaRxqBusyDescNumGet(pp->port, rxq);
mvOsCacheIoSync();
for (i = 0; i < rx_done; i++) {
rx_desc = mvNetaRxqNextDescGet(rx_ctrl);
mvOsCacheLineInv(NULL, rx_desc);
#if defined(MV_CPU_BE)
mvNetaRxqDescSwap(rx_desc);
#endif /* MV_CPU_BE */
pkt = (struct eth_pbuf *)rx_desc->bufCookie;
pool = &mv_eth_pool[pkt->pool];
mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc);
}
if (rx_done) {
mvOsCacheIoSync();
mvNetaRxqDescNumUpdate(pp->port, rxq, rx_done, rx_done);
}
}
static void mv_eth_txq_done_force(struct eth_port *pp, struct tx_queue *txq_ctrl)
{
int tx_done = txq_ctrl->txq_count;
mv_eth_txq_bufs_free(pp, txq_ctrl, tx_done);
txq_ctrl->txq_count -= tx_done;
STAT_DBG(txq_ctrl->stats.txq_txdone += tx_done);
}
inline u32 mv_eth_tx_done_pon(struct eth_port *pp, int *tx_todo)
{
int txp, txq;
struct tx_queue *txq_ctrl;
u32 tx_done = 0;
*tx_todo = 0;
STAT_INFO(pp->stats.tx_done++);
/* simply go over all TX ports and TX queues */
txp = pp->txp_num;
while (txp--) {
txq = CONFIG_MV_ETH_TXQ;
while (txq--) {
txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
spin_lock(&txq_ctrl->queue_lock);
if ((txq_ctrl) && (txq_ctrl->txq_count)) {
tx_done += mv_eth_txq_done(pp, txq_ctrl);
*tx_todo += txq_ctrl->txq_count;
}
spin_unlock(&txq_ctrl->queue_lock);
}
}
STAT_DIST((tx_done < pp->dist_stats.tx_done_dist_size) ? pp->dist_stats.tx_done_dist[tx_done]++ : 0);
return tx_done;
}
inline u32 mv_eth_tx_done_gbe(struct eth_port *pp, u32 cause_tx_done, int *tx_todo)
{
int txp, txq;
struct tx_queue *txq_ctrl;
u32 tx_done = 0;
*tx_todo = 0;
STAT_INFO(pp->stats.tx_done++);
while (cause_tx_done != 0) {
/* For GbE ports we get TX Buffers Threshold Cross per queue in bits [7:0] */
txp = pp->txp_num; /* 1 for GbE ports */
while (txp--) {
txq = mv_eth_tx_done_policy(cause_tx_done);
if (txq == -1)
break;
txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
spin_lock(&txq_ctrl->queue_lock);
if ((txq_ctrl) && (txq_ctrl->txq_count)) {
tx_done += mv_eth_txq_done(pp, txq_ctrl);
*tx_todo += txq_ctrl->txq_count;
}
spin_unlock(&txq_ctrl->queue_lock);
cause_tx_done &= ~((1 << txq) << NETA_CAUSE_TXQ_SENT_DESC_OFFS);
}
}
STAT_DIST((tx_done < pp->dist_stats.tx_done_dist_size) ? pp->dist_stats.tx_done_dist[tx_done]++ : 0);
return tx_done;
}
static void mv_eth_tx_frag_process(struct eth_port *pp, struct sk_buff *skb, struct tx_queue *txq_ctrl, u16 flags)
{
int i;
struct neta_tx_desc *tx_desc;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tx_desc = mvNetaTxqNextDescGet(txq_ctrl->q);
/* NETA_TX_BM_ENABLE_MASK = 0 */
/* NETA_TX_PKT_OFFSET_MASK = 0 */
tx_desc->dataSize = frag->size;
tx_desc->bufPhysAddr = mvOsCacheFlush(NULL, page_address(frag->page) + frag->page_offset,
tx_desc->dataSize);
if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */
if (flags & MV_ETH_F_NO_PAD)
tx_desc->command = NETA_TX_L_DESC_MASK;
else
tx_desc->command = (NETA_TX_L_DESC_MASK | NETA_TX_Z_PAD_MASK);
txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = ((MV_ULONG) skb | MV_ETH_SHADOW_SKB);
mv_eth_shadow_inc_put(txq_ctrl);
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = 0;
mv_eth_shadow_inc_put(txq_ctrl);
}
mv_eth_tx_desc_flush(tx_desc);
}
}
/* Free "num" buffers from the pool */
static int mv_eth_pool_free(int pool, int num)
{
struct eth_pbuf *pkt;
int i = 0;
struct bm_pool *ppool = &mv_eth_pool[pool];
unsigned long flags = 0;
bool free_all = false;
MV_ETH_LOCK(&ppool->lock, flags);
if (num >= ppool->buf_num) {
/* Free all buffers from the pool */
free_all = true;
num = ppool->buf_num;
}
#ifdef CONFIG_MV_ETH_BM_CPU
if (mv_eth_pool_bm(ppool)) {
if (free_all)
mvBmConfigSet(MV_BM_EMPTY_LIMIT_MASK);
while (i < num) {
MV_U32 *va;
MV_U32 pa = mvBmPoolGet(pool);
if (pa == 0)
break;
va = phys_to_virt(pa);
pkt = (struct eth_pbuf *)*va;
#if !defined(CONFIG_MV_ETH_BE_WA)
pkt = (struct eth_pbuf *)MV_32BIT_LE((MV_U32)pkt);
#endif /* !CONFIG_MV_ETH_BE_WA */
if (pkt) {
mv_eth_pkt_free(pkt);
#ifdef ETH_SKB_DEBUG
mv_eth_skb_check((struct sk_buff *)pkt->osInfo);
#endif /* ETH_SKB_DEBUG */
}
i++;
}
printk(KERN_ERR "bm pool #%d: pkt_size=%d, buf_size=%d - %d of %d buffers free\n",
pool, ppool->pkt_size, RX_BUF_SIZE(ppool->pkt_size), i, num);
if (free_all)
mvBmConfigClear(MV_BM_EMPTY_LIMIT_MASK);
}
#endif /* CONFIG_MV_ETH_BM_CPU */
ppool->buf_num -= num;
/* Free buffers from the pool stack too */
if (free_all)
num = mvStackIndex(ppool->stack);
else if (mv_eth_pool_bm(ppool))
num = 0;
i = 0;
while (i < num) {
/* sanity check */
if (mvStackIndex(ppool->stack) == 0) {
printk(KERN_ERR "%s: No more buffers in the stack\n", __func__);
break;
}
pkt = (struct eth_pbuf *)mvStackPop(ppool->stack);
if (pkt) {
mv_eth_pkt_free(pkt);
#ifdef ETH_SKB_DEBUG
mv_eth_skb_check((struct sk_buff *)pkt->osInfo);
#endif /* ETH_SKB_DEBUG */
}
i++;
}
if (i > 0)
printk(KERN_ERR "stack pool #%d: pkt_size=%d, buf_size=%d - %d of %d buffers free\n",
pool, ppool->pkt_size, RX_BUF_SIZE(ppool->pkt_size), i, num);
MV_ETH_UNLOCK(&ppool->lock, flags);
return i;
}
static int mv_eth_pool_destroy(int pool)
{
int num, status = 0;
struct bm_pool *ppool = &mv_eth_pool[pool];
num = mv_eth_pool_free(pool, ppool->buf_num);
if (num != ppool->buf_num) {
printk(KERN_ERR "Warning: could not free all buffers in pool %d while destroying pool\n", pool);
return MV_ERROR;
}
status = mvStackDelete(ppool->stack);
#ifdef CONFIG_MV_ETH_BM_CPU
mvBmPoolDisable(pool);
/* Note: we don't free the bm_pool here ! */
if (ppool->bm_pool)
mvOsFree(ppool->bm_pool);
#endif /* CONFIG_MV_ETH_BM_CPU */
memset(ppool, 0, sizeof(struct bm_pool));
return status;
}
static int mv_eth_pool_add(int pool, int buf_num)
{
struct bm_pool *bm_pool;
struct sk_buff *skb;
struct eth_pbuf *pkt;
int i;
unsigned long flags = 0;
if ((pool < 0) || (pool >= MV_ETH_BM_POOLS)) {
printk(KERN_ERR "%s: invalid pool number %d\n", __func__, pool);
return 0;
}
bm_pool = &mv_eth_pool[pool];
/* Check buffer size */
if (bm_pool->pkt_size == 0) {
printk(KERN_ERR "%s: invalid pool #%d state: pkt_size=%d, buf_size=%d, buf_num=%d\n",
__func__, pool, bm_pool->pkt_size, RX_BUF_SIZE(bm_pool->pkt_size), bm_pool->buf_num);
return 0;
}
/* Insure buf_num is smaller than capacity */
if ((buf_num < 0) || ((buf_num + bm_pool->buf_num) > (bm_pool->capacity))) {
printk(KERN_ERR "%s: can't add %d buffers into bm_pool=%d: capacity=%d, buf_num=%d\n",
__func__, buf_num, pool, bm_pool->capacity, bm_pool->buf_num);
return 0;
}
MV_ETH_LOCK(&bm_pool->lock, flags);
for (i = 0; i < buf_num; i++) {
pkt = mvOsMalloc(sizeof(struct eth_pbuf));
if (!pkt) {
printk(KERN_ERR "%s: can't allocate %d bytes\n", __func__, sizeof(struct eth_pbuf));
break;
}
skb = mv_eth_skb_alloc(bm_pool, pkt);
if (!skb) {
kfree(pkt);
break;
}
/*
printk(KERN_ERR "skb_alloc_%d: pool=%d, skb=%p, pkt=%p, head=%p (%lx), skb->truesize=%d\n",
i, bm_pool->pool, skb, pkt, pkt->pBuf, pkt->physAddr, skb->truesize);
*/
#ifdef CONFIG_MV_ETH_BM_CPU
mvBmPoolPut(pool, (MV_ULONG) pkt->physAddr);
STAT_DBG(bm_pool->stats.bm_put++);
#else
mvStackPush(bm_pool->stack, (MV_U32) pkt);
STAT_DBG(bm_pool->stats.stack_put++);
#endif /* CONFIG_MV_ETH_BM_CPU */
}
bm_pool->buf_num += i;
printk(KERN_ERR "pool #%d: pkt_size=%d, buf_size=%d - %d of %d buffers added\n",
pool, bm_pool->pkt_size, RX_BUF_SIZE(bm_pool->pkt_size), i, buf_num);
MV_ETH_UNLOCK(&bm_pool->lock, flags);
return i;
}
#ifdef CONFIG_MV_ETH_BM
void *mv_eth_bm_pool_create(int pool, int capacity, MV_ULONG *pPhysAddr)
{
MV_ULONG physAddr;
MV_UNIT_WIN_INFO winInfo;
void *pVirt;
MV_STATUS status;
pVirt = mvOsIoUncachedMalloc(NULL, sizeof(MV_U32) * capacity, &physAddr, NULL);
if (pVirt == NULL) {
mvOsPrintf("%s: Can't allocate %d bytes for Long pool #%d\n",
__func__, MV_BM_POOL_CAP_MAX * sizeof(MV_U32), pool);
return NULL;
}
/* Pool address must be MV_BM_POOL_PTR_ALIGN bytes aligned */
if (MV_IS_NOT_ALIGN((unsigned)pVirt, MV_BM_POOL_PTR_ALIGN)) {
mvOsPrintf("memory allocated for BM pool #%d is not %d bytes aligned\n",
pool, MV_BM_POOL_PTR_ALIGN);
mvOsIoCachedFree(NULL, sizeof(MV_U32) * capacity, physAddr, pVirt, 0);
return NULL;
}
status = mvBmPoolInit(pool, pVirt, physAddr, capacity);
if (status != MV_OK) {
mvOsPrintf("%s: Can't init #%d BM pool. status=%d\n", __func__, pool, status);
mvOsIoCachedFree(NULL, sizeof(MV_U32) * capacity, physAddr, pVirt, 0);
return NULL;
}
status = mvCtrlAddrWinInfoGet(&winInfo, physAddr);
if (status != MV_OK) {
printk(KERN_ERR "%s: Can't map BM pool #%d. phys_addr=0x%x, status=%d\n",
__func__, pool, (unsigned)physAddr, status);
mvOsIoCachedFree(NULL, sizeof(MV_U32) * capacity, physAddr, pVirt, 0);
return NULL;
}
mvBmPoolTargetSet(pool, winInfo.targetId, winInfo.attrib);
mvBmPoolEnable(pool);
if (pPhysAddr != NULL)
*pPhysAddr = physAddr;
return pVirt;
}
#endif /* CONFIG_MV_ETH_BM */
static MV_STATUS mv_eth_pool_create(int pool, int capacity)
{
struct bm_pool *bm_pool;
if ((pool < 0) || (pool >= MV_ETH_BM_POOLS)) {
printk(KERN_ERR "%s: pool=%d is out of range\n", __func__, pool);
return MV_BAD_VALUE;
}
bm_pool = &mv_eth_pool[pool];
memset(bm_pool, 0, sizeof(struct bm_pool));
#ifdef CONFIG_MV_ETH_BM_CPU
bm_pool->bm_pool = mv_eth_bm_pool_create(pool, capacity, NULL);
if (bm_pool->bm_pool == NULL)
return MV_FAIL;
#endif /* CONFIG_MV_ETH_BM_CPU */
/* Create Stack as container of alloacted skbs for SKB_RECYCLE and for RXQs working without BM support */
bm_pool->stack = mvStackCreate(capacity);
if (bm_pool->stack == NULL) {
printk(KERN_ERR "Can't create MV_STACK structure for %d elements\n", capacity);
return MV_OUT_OF_CPU_MEM;
}
bm_pool->pool = pool;
bm_pool->capacity = capacity;
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
spin_lock_init(&bm_pool->lock);
return MV_OK;
}
/* Interrupt handling */
irqreturn_t mv_eth_isr(int irq, void *dev_id)
{
struct eth_port *pp = (struct eth_port *)dev_id;
struct napi_struct *napi = pp->napi[smp_processor_id()];
#ifdef CONFIG_MV_ETH_DEBUG_CODE
if (pp->flags & MV_ETH_F_DBG_ISR) {
printk(KERN_ERR "%s: port=%d, cpu=%d, mask=0x%x, cause=0x%x\n",
__func__, pp->port, smp_processor_id(),
MV_REG_READ(NETA_INTR_NEW_MASK_REG(pp->port)), MV_REG_READ(NETA_INTR_NEW_CAUSE_REG(pp->port)));
}
#endif /* CONFIG_MV_ETH_DEBUG_CODE */
STAT_INFO(pp->stats.irq++);
/* Mask all interrupts */
MV_REG_WRITE(NETA_INTR_NEW_MASK_REG(pp->port), 0);
/* To be sure that itterrupt already masked Dummy read is required */
/* MV_REG_READ(NETA_INTR_NEW_MASK_REG(pp->port));*/
/* Verify that the device not already on the polling list */
if (napi_schedule_prep(napi)) {
/* schedule the work (rx+txdone+link) out of interrupt contxet */
__napi_schedule(napi);
} else {
STAT_INFO(pp->stats.irq_err++);
#ifdef CONFIG_MV_ETH_DEBUG_CODE
printk(KERN_ERR "mv_eth_isr ERROR: port=%d, cpu=%d\n", pp->port, smp_processor_id());
#endif /* CONFIG_MV_ETH_DEBUG_CODE */
}
return IRQ_HANDLED;
}
void mv_eth_link_event(struct eth_port *pp, int print)
{
struct net_device *dev = pp->dev;
bool link_is_up;
STAT_INFO(pp->stats.link++);
/* Check Link status on ethernet port */
#ifdef CONFIG_MV_PON
if (MV_PON_PORT(pp->port))
link_is_up = mv_pon_link_status();
else
#endif /* CONFIG_MV_PON */
link_is_up = mvNetaLinkIsUp(pp->port);
if (link_is_up) {
mvNetaPortUp(pp->port);
set_bit(MV_ETH_F_LINK_UP_BIT, &(pp->flags));
if (mv_eth_ctrl_is_tx_enabled(pp)) {
if (dev) {
netif_carrier_on(dev);
netif_wake_queue(dev);
}
}
} else {
if (dev) {
netif_carrier_off(dev);
netif_stop_queue(dev);
}
mvNetaPortDown(pp->port);
clear_bit(MV_ETH_F_LINK_UP_BIT, &(pp->flags));
}
if (print) {
if (dev)
printk(KERN_ERR "%s: ", dev->name);
else
printk(KERN_ERR "%s: ", "none");
mv_eth_link_status_print(pp->port);
}
}
/***********************************************************************************************/
int mv_eth_poll(struct napi_struct *napi, int budget)
{
int rx_done = 0;
MV_U32 causeRxTx;
struct eth_port *pp = MV_ETH_PRIV(napi->dev);
#ifdef CONFIG_MV_ETH_DEBUG_CODE
if (pp->flags & MV_ETH_F_DBG_POLL) {
printk(KERN_ERR "%s ENTER: port=%d, cpu=%d, mask=0x%x, cause=0x%x\n",
__func__, pp->port, smp_processor_id(),
MV_REG_READ(NETA_INTR_NEW_MASK_REG(pp->port)), MV_REG_READ(NETA_INTR_NEW_CAUSE_REG(pp->port)));
}
#endif /* CONFIG_MV_ETH_DEBUG_CODE */
read_lock(&pp->rwlock);
STAT_INFO(pp->stats.poll[smp_processor_id()]++);
/* Read cause register */
causeRxTx = MV_REG_READ(NETA_INTR_NEW_CAUSE_REG(pp->port)) &
(MV_ETH_MISC_SUM_INTR_MASK | MV_ETH_TXDONE_INTR_MASK | MV_ETH_RX_INTR_MASK);
if ((pp->flags & MV_ETH_F_STARTED) == 0) {
printk(KERN_ERR "%s: port #%d is not started", __func__, pp->port);
}
if (causeRxTx & MV_ETH_MISC_SUM_INTR_MASK) {
MV_U32 causeMisc;
/* Process MISC events - Link, etc ??? */
causeRxTx &= ~MV_ETH_MISC_SUM_INTR_MASK;
causeMisc = MV_REG_READ(NETA_INTR_MISC_CAUSE_REG(pp->port));
if (causeMisc & NETA_CAUSE_LINK_CHANGE_MASK)