blob: 39fd6f0e6dc31b8ac18d3f4b6b2a6037e348e718 [file] [log] [blame]
/*
* Copyright (c) 2002-2008 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*
* File Name : bcmgenet.c
*
* Description: This is Linux driver for the broadcom GENET ethernet MAC core.
*/
#define CARDNAME "bcmgenet"
#define VERSION "2.0"
#define VER_STR "v" VERSION " " __DATE__ " " __TIME__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/pm.h>
#include <linux/clk.h>
#include <linux/version.h>
#include <linux/debugfs.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <asm/mipsregs.h>
#include <asm/cacheflush.h>
#include <asm/brcmstb/brcmstb.h>
#include <asm/brcmstb/brcmapi.h>
#include "bcmmii.h"
#include "bcmgenet.h"
#include "if_net.h"
#define MY_BUG_ON(c) do { if ((c)) { printk(KERN_EMERG "MY_BUG_ON(%s) at %s:%d\n", #c, __FILE__, __LINE__); BUG(); } } while (0)
#ifdef CONFIG_NET_SCH_MULTIQ
#if CONFIG_BRCM_GENET_VERSION == 1
#error "This version of GENET doesn't support tx multi queue"
#endif
/* Default # of tx queues for multi queue support */
#define GENET_TX_MQ_CNT 1
/* Default # of bds for each priority tx queue for multi queue support */
#define GENET_TX_MQ_BD_CNT 128
/* Default # of additional rx queues for multi queue support */
#define GENET_RX_MQ_CNT 1
/* Default # of bds for each priority rx queue for multi queue support */
#define GENET_RX_MQ_BD_CNT 128
/* Highest priority is given to the tx descriptor ring 0.
* All other rings are mapped to lower priorities (higher numerical values) */
#define GENET_TX_Q0_PRIORITY 0
static void bcmgenet_init_multiq_tx(struct net_device *dev);
static void bcmgenet_init_multiq_rx(struct net_device *dev);
#else
#define GENET_RX_MQ_CNT 0
#define GENET_RX_MQ_BD_CNT 0
#define GENET_TX_MQ_CNT 0
#define GENET_TX_MQ_BD_CNT 0
#endif /*CONFIG_NET_SCH_MULTIQ */
/* Total number or priority descriptors must be less than TOTAL_DESC */
#define GENET_RX_TOTAL_MQ_BD (GENET_RX_MQ_CNT * GENET_RX_MQ_BD_CNT)
#define GENET_TX_TOTAL_MQ_BD (GENET_TX_MQ_CNT * GENET_TX_MQ_BD_CNT)
#if GENET_RX_TOTAL_MQ_BD > TOTAL_DESC
#error Total number or rx priority descriptors must be less than TOTAL_DESC.
#else
#define GENET_RX_DEFAULT_BD_CNT (TOTAL_DESC - GENET_RX_TOTAL_MQ_BD)
#endif
#if GENET_TX_TOTAL_MQ_BD > TOTAL_DESC
#error Total number or tx priority descriptors must be less than TOTAL_DESC.
#else
#define GENET_TX_DEFAULT_BD_CNT (TOTAL_DESC - GENET_TX_TOTAL_MQ_BD)
#endif
#define RX_BUF_LENGTH 2048
#define RX_BUF_BITS 12
#define SKB_ALIGNMENT 32
#define DMA_DESC_THRES 4
#define HFB_ARP_LEN 21
/* NAPI budget for the default queue (queue 16) */
#define DEFAULT_DESC_BUDGET GENET_RX_DEFAULT_BD_CNT
#define THROTTLED_DESC_BUDGET 2
/*
* Length in bytes that we will match in the filter for 802.1Q packets
* This includes the source and destination mac addresses (6 bytes each)
* and the 802.1Q frame (4 bytes), for a total of 16 bytes.
*/
#define HFB_8021Q_LEN 16
/*
* Per IEEE 802.1Q, Tag Protocol Identifier (TPID): a 16-bit field set to a
* value of 0x8100 in order to identify the frame as an IEEE 802.1Q-tagged
* frame. This field is located at the same position as the EtherType/Length
* field in untagged frames, and is thus used to distinguish the frame from
* untagged frames.
*/
#define TPID 0x8100
/*
* Priority Code Point (PCP): a 3-bit field which refers to the IEEE 802.1p
* priority. It indicates the frame priority level. Values are from 0 (best
* effort) to 7 (highest); 1 represents the lowest priority. These values can be
* used to prioritize different classes of traffic (voice, video, data, etc.).
* See also Class of Service or CoS.
*/
#define PCP_COUNT 8 /* Represented by 3 bits. */
/* The first and last enabled priority. */
#define PCP_START 1 /* First PCP to enable. */
#define PCP_END 7 /* Last PCP to enable. */
/*
* Combination of interrupts that we process as a group.
*/
#define UMAC_IRQ_HFB_OR_DONE \
(UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM | \
UMAC_IRQ_RXDMA_BDONE| UMAC_IRQ_RXDMA_PDONE)
/* Tx/Rx DMA register offset, skip 256 descriptors */
#define GENET_TDMA_REG_OFF (GENET_TDMA_OFF + \
2 * TOTAL_DESC * sizeof(unsigned long))
#define GENET_RDMA_REG_OFF (GENET_RDMA_OFF + \
2 * TOTAL_DESC * sizeof(unsigned long))
#ifdef CONFIG_BRUNO
#define GENET0_DEVICE_NAME "eth0"
/*
* GENET MDIO Configuration Register.
*
* 31 10 9 4 3 1 0
* --------------------------------------------------
* | Reserved |mdio_clk_divider|Reserved|mdio_clause|
* --------------------------------------------------
* MDIO clock (MDC) = system clock / 2 * (MDIO_CLK_DIVIDER + 1)
* With system clock = 108Mhz, mdio_clk_divider = 0x4, MDC = 10.8MHz.
*/
#define CLOCK_DIVIDER_SHIFT 4
#define CLOCK_DIVIDER_MASK 0x3F
#define CLOCK_DIVIDER_10MHZ 0x4
#endif /* CONFIG_BRUNO */
/* --------------------------------------------------------------------------
External, indirect entry points.
--------------------------------------------------------------------------*/
static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
/* --------------------------------------------------------------------------
Called for "ifconfig ethX up" & "down"
--------------------------------------------------------------------------*/
static int bcmgenet_open(struct net_device *dev);
static int bcmgenet_close(struct net_device *dev);
/* --------------------------------------------------------------------------
Watchdog timeout
--------------------------------------------------------------------------*/
static void bcmgenet_timeout(struct net_device *dev);
/* --------------------------------------------------------------------------
Packet transmission.
--------------------------------------------------------------------------*/
static int bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev);
/* --------------------------------------------------------------------------
Set address filtering mode
--------------------------------------------------------------------------*/
static void bcmgenet_set_multicast_list(struct net_device *dev);
/* --------------------------------------------------------------------------
Set the hardware MAC address.
--------------------------------------------------------------------------*/
static int bcmgenet_set_mac_addr(struct net_device *dev, void *p);
/* --------------------------------------------------------------------------
Interrupt routine, for all interrupts except ring buffer interrupts
--------------------------------------------------------------------------*/
static irqreturn_t bcmgenet_isr0(int irq, void *dev_id);
/*---------------------------------------------------------------------------
IRQ handler for ring buffer interrupt.
--------------------------------------------------------------------------*/
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id);
/* --------------------------------------------------------------------------
dev->poll() method
--------------------------------------------------------------------------*/
static int bcmgenet_poll(struct napi_struct *napi, int budget);
static int bcmgenet_ring_poll(struct napi_struct *napi, int budget);
/* --------------------------------------------------------------------------
Process recived packet for descriptor based DMA
--------------------------------------------------------------------------*/
static unsigned int bcmgenet_desc_rx(void *ptr, unsigned int budget, int index);
/* --------------------------------------------------------------------------
Process recived packet for ring buffer DMA
--------------------------------------------------------------------------*/
static unsigned int bcmgenet_ring_rx(void *ptr, unsigned int budget);
/* --------------------------------------------------------------------------
Internal routines
--------------------------------------------------------------------------*/
/* Allocate and initialize tx/rx buffer descriptor pools */
static int bcmgenet_init_dev(struct BcmEnet_devctrl *pDevCtrl);
static void bcmgenet_uninit_dev(struct BcmEnet_devctrl *pDevCtrl);
/* Assign the Rx descriptor ring */
static int assign_rx_buffers(struct BcmEnet_devctrl *pDevCtrl);
static int assign_rx_buffers_for_queue(struct BcmEnet_devctrl *pDevCtrl, int i);
static int assign_rx_buffers_range(struct BcmEnet_devctrl *pDevCtrl,
unsigned long start_addr, unsigned long end_addr,
unsigned long read_ptr);
/* Initialize the uniMac control registers */
static int init_umac(struct BcmEnet_devctrl *pDevCtrl);
/* Initialize DMA control register */
static void init_edma(struct BcmEnet_devctrl *pDevCtrl);
/* Interrupt bottom-half */
static void bcmgenet_irq_task(struct work_struct *work);
/* power management */
static void bcmgenet_power_down(struct BcmEnet_devctrl *pDevCtrl, int mode);
static void bcmgenet_power_up(struct BcmEnet_devctrl *pDevCtrl, int mode);
/* allocate an skb, the data comes from ring buffer */
static struct sk_buff *__bcmgenet_alloc_skb_from_buf(unsigned char *buf,
int len, int headroom);
/* clock control */
static void bcmgenet_clock_enable(struct BcmEnet_devctrl *pDevCtrl);
static void bcmgenet_clock_disable(struct BcmEnet_devctrl *pDevCtrl);
/* S3 warm boot */
static void save_state(struct BcmEnet_devctrl *pDevCtrl);
static void restore_state(struct BcmEnet_devctrl *pDevCtrl);
/* HFB filtering for IPv4 multicast */
static int bcmgenet_enable_multicast_hfb(struct BcmEnet_devctrl *pDevCtrl);
/* HFB filtering for PCP */
static void bcmgenet_enable_pcp_hfb(struct BcmEnet_devctrl *pDevCtrl);
static void bcmgenet_disable_pcp_hfb(struct BcmEnet_devctrl *pDevCtrl);
static int bcmgenet_update_hfb(struct net_device *dev, unsigned int *data,
int len, int user);
static struct net_device *eth_root_dev;
static int DmaDescThres = DMA_DESC_THRES;
/* Descriptor queue budget. */
static int desc_budget = DEFAULT_DESC_BUDGET;
/* bcmgenet multi-queue budget count variables for debugfs*/
static struct dentry *bcmgenet_debugfs_root = NULL;
#ifdef CONFIG_NET_SCH_MULTIQ
static u32 bcmgenet_rx_mq_bd_cnt = GENET_RX_MQ_BD_CNT;
static u32 bcmgenet_tx_mq_bd_cnt = GENET_TX_MQ_BD_CNT;
static u32 bcmgenet_tx_mq_cnt = GENET_TX_MQ_CNT;
#endif
static u32 bcmgenet_tx_default_bd_cnt = GENET_TX_DEFAULT_BD_CNT;
/* bcmgenet debugfs variable pointer and file name */
typedef struct {
u32 *dbfs_p;
const char *dbfs_name;
} bcmgenet_debugfs;
/* Initialization function of bcmgenet debugfs variable pointer and file name*/
#define BCMGENET_DEBUGFS(x) {.dbfs_p = &x, .dbfs_name = #x}
static int bcmgenet_debugfs_create(struct BcmEnet_devctrl *dev);
static int bcmgenet_debugfs_create_u32(bcmgenet_debugfs bcmgenet_dbfs,
int op, struct dentry *rtdir);
static int bcmgenet_debugfs_create_indexed_u32(unsigned int *var, int index,
int op, struct dentry *rtdir);
#ifdef CONFIG_BCM7429A0
static void bcm7429_ephy_workaround(struct BcmEnet_devctrl *pDevCtrl)
{
int data;
data = pDevCtrl->mii.mdio_read(pDevCtrl->dev, pDevCtrl->phyAddr, 0x1f);
data |= 0x0004;
pDevCtrl->mii.mdio_write(pDevCtrl->dev, pDevCtrl->phyAddr, 0x1f, data);
data = 0x7555;
pDevCtrl->mii.mdio_write(pDevCtrl->dev, pDevCtrl->phyAddr, 0x13, data);
data = pDevCtrl->mii.mdio_read(pDevCtrl->dev, pDevCtrl->phyAddr, 0x1f);
data &= ~0x0004;
pDevCtrl->mii.mdio_write(pDevCtrl->dev, pDevCtrl->phyAddr, 0x1f, data);
}
#endif
/*
* HFB data for ARP request.
* * In WoL (Magic Packet or ACPI) mode, we need to response
* ARP request, so dedicate an HFB to filter the ARP request.
* NOTE: the last two words are to be filled by destination.
*/
static unsigned int hfb_arp[] = {
0x000FFFFF, 0x000FFFFF, 0x000FFFFF, 0x00000000,
0x00000000, 0x00000000, 0x000F0806, 0x000F0001,
0x000F0800, 0x000F0604, 0x000F0001, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x000F0000, 0x000F0000, 0x000F0000, 0x000F0000,
0x000F0000
};
/*
* HFB data for IPv4 packets with a multicast address in their dst field.
* Match:
* - Ethernet frame must use Type IP (0x0800)
* - IP version field must be 4
* - First nibble (not byte) of dst IP address must be 0xe.
*/
static unsigned int hfb_ipv4_multicast[] = {
/* offset 0x00: */ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
/* offset 0x08: */ 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
/* offset 0x10: */ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
/* offset 0x18: */ 0x00000000, 0x00000000, 0x00000000, 0x0008e000,
};
/* -------------------------------------------------------------------------
* The following bcmemac_xxxx() functions are legacy netaccel hook, will be
* replaced!
* -----------------------------------------------------------------------*/
struct net_device *bcmemac_get_device(void)
{
return eth_root_dev;
}
EXPORT_SYMBOL(bcmemac_get_device);
/* --------------------------------------------------------------------------
Name: bcmemac_get_free_txdesc
Purpose: Get Current Available TX desc count
-------------------------------------------------------------------------- */
int bcmemac_get_free_txdesc(struct net_device *dev)
{
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
return pDevCtrl->txFreeBds;
}
EXPORT_SYMBOL(bcmemac_get_free_txdesc);
/* --------------------------------------------------------------------------
Name: bcmemac_xmit_check
Purpose: Reclaims TX descriptors
-------------------------------------------------------------------------- */
int bcmemac_xmit_check(struct net_device *dev)
{
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
struct Enet_CB *txCBPtr;
unsigned long flags, ret;
unsigned int c_index;
int lastTxedCnt = 0, i = 0;
/*
* Obtain exclusive access to transmitter. This is necessary because
* we might have more than one stack transmitting at once.
*/
spin_lock_irqsave(&pDevCtrl->lock, flags);
/* Compute how many buffers are transmited since last xmit call */
c_index = pDevCtrl->txDma->tDmaRings[GENET_TX_RING_COUNT].tdma_consumer_index;
c_index &= (TOTAL_DESC - 1);
if (c_index >= pDevCtrl->txLastCIndex)
lastTxedCnt = c_index - pDevCtrl->txLastCIndex;
else
lastTxedCnt = TOTAL_DESC - pDevCtrl->txLastCIndex + c_index;
TRACE(("c_index=%d lastTxedCnt=%d txLastCIndex=%d\n",
c_index, lastTxedCnt, pDevCtrl->txLastCIndex));
/* Reclaim transmitted buffers */
i = pDevCtrl->txLastCIndex;
while (lastTxedCnt-- > 0) {
txCBPtr = &pDevCtrl->txCbs[i];
if (txCBPtr->skb != NULL) {
dma_unmap_single(&pDevCtrl->dev->dev,
txCBPtr->dma_addr,
txCBPtr->skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(txCBPtr->skb);
txCBPtr->skb = NULL;
}
pDevCtrl->txFreeBds += 1;
if (i == (TOTAL_DESC - 1))
i = 0;
else
i++;
}
pDevCtrl->txLastCIndex = c_index;
if (pDevCtrl->txFreeBds > 0) {
/* Disable txdma bdone/pdone interrupt if we have free tx bds */
pDevCtrl->intrl2_0->cpu_mask_set |= (UMAC_IRQ_TXDMA_BDONE |
UMAC_IRQ_TXDMA_PDONE);
netif_wake_queue(dev);
ret = 0;
} else {
ret = 1;
}
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
return ret;
}
EXPORT_SYMBOL(bcmemac_xmit_check);
/* --------------------------------------------------------------------------
Name: bcmemac_xmit_fragment
Purpose: Send ethernet traffic Buffer DESC and submit to UDMA
-------------------------------------------------------------------------- */
int bcmemac_xmit_fragment(int ch, unsigned char *buf, int buf_len,
unsigned long tx_flags , struct net_device *dev)
{
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
struct Enet_CB *txCBPtr;
unsigned int write_ptr = 0;
if (pDevCtrl->txFreeBds == 0)
return 1;
/*
* We must don't have 64B status block enabled in this case!
*/
write_ptr = pDevCtrl->txDma->tDmaRings[GENET_TX_RING_COUNT].tdma_write_pointer;
write_ptr = ((write_ptr & DMA_RW_POINTER_MASK) >> 1);
/* Obtain transmit control block */
txCBPtr = &pDevCtrl->txCbs[write_ptr];
txCBPtr->BdAddr = &pDevCtrl->txBds[write_ptr];
txCBPtr->dma_addr = dma_map_single(&pDevCtrl->dev->dev, buf,
buf_len, DMA_TO_DEVICE);
/*
* Add the buffer to the ring.
* Set addr and length of DMA BD to be transmitted.
*/
txCBPtr->BdAddr->address = txCBPtr->dma_addr;
txCBPtr->BdAddr->length_status = ((unsigned long)(buf_len)) << 16;
txCBPtr->BdAddr->length_status |= tx_flags | DMA_TX_APPEND_CRC;
/* Default QTAG for MoCA */
txCBPtr->BdAddr->length_status |=
(DMA_TX_QTAG_MASK << DMA_TX_QTAG_SHIFT);
#ifdef CONFIG_BCMGENET_DUMP_DATA
TRACE(("%s: len %d", __func__, buf_len));
print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS,
16, 1, buf, buf_len, 0);
#endif
/* Decrement total BD count and advance our write pointer */
pDevCtrl->txFreeBds -= 1;
if (write_ptr == pDevCtrl->nrTxBds - 1)
write_ptr = 0;
else
write_ptr++;
/* advance producer index and write pointer.*/
pDevCtrl->txDma->tDmaRings[DESC_INDEX].tdma_producer_index += 1;
pDevCtrl->txDma->tDmaRings[DESC_INDEX].tdma_write_pointer += 2;
if (pDevCtrl->txFreeBds == 0) {
TRACE(("%s: %s no transmit queue space -- stopping queues\n",
dev->name, __func__));
/* Enable Tx bdone/pdone interrupt !*/
pDevCtrl->intrl2_0->cpu_mask_clear |= (UMAC_IRQ_TXDMA_BDONE |
UMAC_IRQ_TXDMA_PDONE);
netif_stop_queue(dev);
}
/* update stats */
dev->stats.tx_bytes += buf_len;
dev->stats.tx_packets++;
dev->trans_start = jiffies;
return 0;
}
EXPORT_SYMBOL(bcmemac_xmit_fragment);
/* --------------------------------------------------------------------------
Name: bcmemac_xmit_multibuf
Purpose: Send ethernet traffic in multi buffers (hdr, buf, tail)
-------------------------------------------------------------------------- */
int bcmemac_xmit_multibuf(int ch, unsigned char *hdr, int hdr_len,
unsigned char *buf, int buf_len, unsigned char *tail,
int tail_len, struct net_device *dev)
{
unsigned long flags;
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
while (bcmemac_xmit_check(dev))
;
/*
* Obtain exclusive access to transmitter. This is necessary because
* we might have more than one stack transmitting at once.
*/
spin_lock_irqsave(&pDevCtrl->lock, flags);
/* Header + Optional payload in two parts */
if ((hdr_len > 0) && (buf_len > 0) &&
(tail_len > 0) && (hdr) &&
(buf) && (tail)) {
/* Send Header */
while (bcmemac_xmit_fragment(ch, hdr, hdr_len, DMA_SOP, dev))
bcmemac_xmit_check(dev);
/* Send First Fragment */
while (bcmemac_xmit_fragment(ch, buf, buf_len, 0, dev))
bcmemac_xmit_check(dev);
/* Send 2nd Fragment */
while (bcmemac_xmit_fragment(ch, tail, tail_len, DMA_EOP, dev))
bcmemac_xmit_check(dev);
} else if ((hdr_len > 0) && (buf_len > 0) && (hdr) && (buf)) {
/* Header + Optional payload */
/* Send Header */
while (bcmemac_xmit_fragment(ch, hdr, hdr_len, DMA_SOP, dev))
bcmemac_xmit_check(dev);
/* Send First Fragment */
while (bcmemac_xmit_fragment(ch, buf, buf_len, DMA_EOP, dev))
bcmemac_xmit_check(dev);
} else if ((hdr_len > 0) && (hdr)) {
/* Header Only (includes payload) */
/* Send Header */
while (bcmemac_xmit_fragment(ch, hdr, hdr_len,
DMA_SOP | DMA_EOP, dev))
bcmemac_xmit_check(dev);
} else {
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
return 0; /* Drop the packet */
}
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
return 0;
}
EXPORT_SYMBOL(bcmemac_xmit_multibuf);
static inline void handleAlignment(struct BcmEnet_devctrl *pDevCtrl,
struct sk_buff *skb)
{
/*
* We request to allocate 2048 + 32 bytes of buffers, and the
* dev_alloc_skb() added 16B for NET_SKB_PAD, so we totally
* requested 2048+32+16 bytes buffer, the size was aligned to
* SMP_CACHE_BYTES, which is 64B.(is it?), so we finnally ended
* up got 2112 bytes of buffer! Among which, the first 16B is
* reserved for NET_SKB_PAD, to make the skb->data aligned 32B
* boundary, we should have enough space to fullfill the 2KB
* buffer after alignment!
*/
unsigned long boundary32, curData, resHeader;
curData = (unsigned long) skb->data;
boundary32 = (curData + (SKB_ALIGNMENT - 1)) & ~(SKB_ALIGNMENT - 1);
resHeader = boundary32 - curData ;
/* 4 bytes for skb pointer */
if (resHeader < 4)
boundary32 += 32;
resHeader = boundary32 - curData - 4;
/* We'd have minimum 16B reserved by default. */
skb_reserve(skb, resHeader);
*(unsigned int *)skb->data = (unsigned int)skb;
skb_reserve(skb, 4);
/*
* Make sure it is on 32B boundary, should never happen if our
* calculation was correct.
*/
if ((unsigned long) skb->data & (SKB_ALIGNMENT - 1)) {
printk(KERN_WARNING "skb buffer is NOT aligned on %d boundary!\n",
SKB_ALIGNMENT);
}
/*
* we don't reserve 2B for IP Header optimization here,
* use skb_pull when receiving packets
*/
}
/* --------------------------------------------------------------------------
Name: bcmgenet_gphy_link_status
Purpose: GPHY link status monitoring task
-------------------------------------------------------------------------- */
static void bcmgenet_gphy_link_status(struct work_struct *work)
{
struct BcmEnet_devctrl *pDevCtrl = container_of(work,
struct BcmEnet_devctrl, bcmgenet_link_work);
mii_setup(pDevCtrl->dev);
}
/* --------------------------------------------------------------------------
Name: bcmgenet_gphy_link_timer
Purpose: GPHY link status monitoring timer function
-------------------------------------------------------------------------- */
static void bcmgenet_gphy_link_timer(unsigned long data)
{
struct BcmEnet_devctrl * pDevCtrl = (struct BcmEnet_devctrl *)data;
schedule_work(&pDevCtrl->bcmgenet_link_work);
mod_timer(&pDevCtrl->timer, jiffies + HZ);
}
#ifdef CONFIG_BRCM_HAS_STANDBY
static int bcmgenet_wakeup_enable(void *ref)
{
struct BcmEnet_devctrl *pDevCtrl = (struct BcmEnet_devctrl *)ref;
u32 mask;
if (pDevCtrl->phyType == BRCM_PHY_TYPE_MOCA)
mask = WOL_MOCA_MASK;
else
mask = pDevCtrl->devnum ? WOL_MOCA_MASK : WOL_ENET_MASK;
if (device_may_wakeup(&pDevCtrl->dev->dev))
brcm_pm_wakeup_source_enable(mask, 1);
return 0;
}
static int bcmgenet_wakeup_disable(void *ref)
{
struct BcmEnet_devctrl *pDevCtrl = (struct BcmEnet_devctrl *)ref;
u32 mask;
if (pDevCtrl->phyType == BRCM_PHY_TYPE_MOCA)
mask = WOL_MOCA_MASK;
else
mask = pDevCtrl->devnum ? WOL_MOCA_MASK : WOL_ENET_MASK;
if (device_may_wakeup(&pDevCtrl->dev->dev))
brcm_pm_wakeup_source_enable(mask, 0);
return 0;
}
static int bcmgenet_wakeup_poll(void *ref)
{
struct BcmEnet_devctrl *pDevCtrl = (struct BcmEnet_devctrl *)ref;
int retval = 0;
u32 mask = 0;
if (device_may_wakeup(&pDevCtrl->dev->dev)) {
if (pDevCtrl->phyType == BRCM_PHY_TYPE_MOCA)
mask = WOL_MOCA_MASK;
else
mask = pDevCtrl->devnum ? WOL_MOCA_MASK : WOL_ENET_MASK;
retval = brcm_pm_wakeup_get_status(mask);
}
printk(KERN_DEBUG "%s %s(%08x): %d\n", __func__,
pDevCtrl->dev->name, mask, retval);
return retval;
}
static struct brcm_wakeup_ops bcmgenet_wakeup_ops = {
.enable = bcmgenet_wakeup_enable,
.disable = bcmgenet_wakeup_disable,
.poll = bcmgenet_wakeup_poll,
};
#endif
/* --------------------------------------------------------------------------
Name: bcmgenet_open
Purpose: Open and Initialize the EMAC on the chip
-------------------------------------------------------------------------- */
static int bcmgenet_open(struct net_device *dev)
{
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
unsigned long dma_ctrl;
volatile struct uniMacRegs *umac = pDevCtrl->umac;
TRACE(("%s: bcmgenet_open\n", dev->name));
bcmgenet_clock_enable(pDevCtrl);
#ifdef CONFIG_BRUNO
/*
* Set the clock divider to 0x04 to generate a 10.8MHz clock.
*/
if (strcmp(dev->name, GENET0_DEVICE_NAME) == 0) {
volatile unsigned long val = pDevCtrl->umac->mdio_cfg;
val &= ~(CLOCK_DIVIDER_MASK << CLOCK_DIVIDER_SHIFT);
val |= (CLOCK_DIVIDER_10MHZ << CLOCK_DIVIDER_SHIFT);
pDevCtrl->umac->mdio_cfg = val;
}
#endif /* CONFIG_BRUNO */
GENET_RBUF_FLUSH_CTRL(pDevCtrl) = 0;
/* disable ethernet MAC while updating its registers */
pDevCtrl->umac->cmd &= ~(CMD_TX_EN | CMD_RX_EN);
umac->mac_0 = (dev->dev_addr[0] << 24 |
dev->dev_addr[1] << 16 |
dev->dev_addr[2] << 8 |
dev->dev_addr[3]);
umac->mac_1 = dev->dev_addr[4] << 8 | dev->dev_addr[5];
if (pDevCtrl->wol_enabled) {
/* From WOL-enabled suspend, switch to regular clock */
clk_disable(pDevCtrl->clk_wol);
/* init umac registers to synchronize s/w with h/w */
init_umac(pDevCtrl);
/* Speed settings must be restored */
mii_init(dev);
mii_setup(dev);
}
if (pDevCtrl->phyType == BRCM_PHY_TYPE_INT)
pDevCtrl->ext->ext_pwr_mgmt |= EXT_ENERGY_DET_MASK;
if (test_and_clear_bit(GENET_POWER_WOL_MAGIC, &pDevCtrl->wol_enabled))
bcmgenet_power_up(pDevCtrl, GENET_POWER_WOL_MAGIC);
if (test_and_clear_bit(GENET_POWER_WOL_ACPI, &pDevCtrl->wol_enabled))
bcmgenet_power_up(pDevCtrl, GENET_POWER_WOL_ACPI);
/* disable DMA */
dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
pDevCtrl->txDma->tdma_ctrl &= ~dma_ctrl;
pDevCtrl->rxDma->rdma_ctrl &= ~dma_ctrl;
pDevCtrl->umac->tx_flush = 1;
GENET_RBUF_FLUSH_CTRL(pDevCtrl) = 1;
udelay(10);
pDevCtrl->umac->tx_flush = 0;
GENET_RBUF_FLUSH_CTRL(pDevCtrl) = 0;
/* reset dma, start from beginning of the ring. */
init_edma(pDevCtrl);
/* reset internal book keeping variables. */
pDevCtrl->txLastCIndex = 0;
pDevCtrl->rxBdAssignPtr = pDevCtrl->rxBds;
if (brcm_pm_deep_sleep())
restore_state(pDevCtrl);
else
assign_rx_buffers(pDevCtrl);
pDevCtrl->txFreeBds = pDevCtrl->nrTxBds;
/*Always enable ring 16 - descriptor ring */
pDevCtrl->rxDma->rdma_ctrl |= dma_ctrl;
pDevCtrl->txDma->tdma_ctrl |= dma_ctrl;
if (pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_MII ||
pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_RGMII ||
pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_RGMII_IBS) {
mod_timer(&pDevCtrl->timer, jiffies);
}
if (request_irq(pDevCtrl->irq0, bcmgenet_isr0, IRQF_SHARED,
dev->name, pDevCtrl) < 0) {
printk(KERN_ERR "can't request IRQ %d\n", pDevCtrl->irq0);
goto err2;
}
if (request_irq(pDevCtrl->irq1, bcmgenet_isr1, IRQF_SHARED,
dev->name, pDevCtrl) < 0) {
printk(KERN_ERR "can't request IRQ %d\n", pDevCtrl->irq1);
free_irq(pDevCtrl->irq0, pDevCtrl);
goto err1;
}
/* Start the network engine */
netif_tx_start_all_queues(dev);
napi_enable(&pDevCtrl->napi);
pDevCtrl->umac->cmd |= (CMD_TX_EN | CMD_RX_EN);
#ifdef CONFIG_BRCM_HAS_STANDBY
brcm_pm_wakeup_register(&bcmgenet_wakeup_ops, pDevCtrl, dev->name);
device_set_wakeup_capable(&dev->dev, 1);
#endif
if (pDevCtrl->phyType == BRCM_PHY_TYPE_INT)
bcmgenet_power_up(pDevCtrl, GENET_POWER_PASSIVE);
return 0;
err1:
free_irq(pDevCtrl->irq0, dev);
err2:
free_irq(pDevCtrl->irq1, dev);
del_timer_sync(&pDevCtrl->timer);
netif_tx_stop_all_queues(dev);
return -ENODEV;
}
/* --------------------------------------------------------------------------
Name: bcmgenet_close
Purpose: Stop communicating with the outside world
Note: Caused by 'ifconfig ethX down'
-------------------------------------------------------------------------- */
static int bcmgenet_close(struct net_device *dev)
{
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
int timeout = 0;
TRACE(("%s: bcmgenet_close\n", dev->name));
napi_disable(&pDevCtrl->napi);
netif_tx_stop_all_queues(dev);
/* Stop Tx DMA */
pDevCtrl->txDma->tdma_ctrl &= ~DMA_EN;
while (timeout < 5000) {
if (pDevCtrl->txDma->tdma_status & DMA_EN)
break;
udelay(1);
timeout++;
}
if (timeout == 5000)
printk(KERN_ERR "Timed out while shutting down Tx DMA\n");
/* Disable Rx DMA*/
pDevCtrl->rxDma->rdma_ctrl &= ~DMA_EN;
timeout = 0;
while (timeout < 5000) {
if (pDevCtrl->rxDma->rdma_status & DMA_EN)
break;
udelay(1);
timeout++;
}
if (timeout == 5000)
printk(KERN_ERR "Timed out while shutting down Rx DMA\n");
pDevCtrl->umac->cmd &= ~(CMD_RX_EN | CMD_TX_EN);
/* tx reclaim */
bcmgenet_xmit(NULL, dev);
free_irq(pDevCtrl->irq0, (void *)pDevCtrl);
free_irq(pDevCtrl->irq1, (void *)pDevCtrl);
if (pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_MII ||
pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_RGMII ||
pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_RGMII_IBS) {
del_timer_sync(&pDevCtrl->timer);
cancel_work_sync(&pDevCtrl->bcmgenet_link_work);
}
/*
* Wait for pending work items to complete - we are stopping
* the clock now. Since interrupts are disabled, no new work
* will be scheduled.
*/
cancel_work_sync(&pDevCtrl->bcmgenet_irq_work);
if (brcm_pm_deep_sleep())
save_state(pDevCtrl);
if (device_may_wakeup(&dev->dev) && pDevCtrl->dev_asleep) {
if (pDevCtrl->wolopts & WAKE_MAGIC)
bcmgenet_power_down(pDevCtrl, GENET_POWER_WOL_MAGIC);
else if (pDevCtrl->wolopts & WAKE_ARP)
bcmgenet_power_down(pDevCtrl, GENET_POWER_WOL_ACPI);
} else if (pDevCtrl->phyType == BRCM_PHY_TYPE_INT)
bcmgenet_power_down(pDevCtrl, GENET_POWER_PASSIVE);
if (pDevCtrl->wol_enabled)
clk_enable(pDevCtrl->clk_wol);
bcmgenet_clock_disable(pDevCtrl);
return 0;
}
/* --------------------------------------------------------------------------
Name: bcmgenet_net_timeout
Purpose:
-------------------------------------------------------------------------- */
static void bcmgenet_timeout(struct net_device *dev)
{
MY_BUG_ON(dev == NULL);
TRACE(("%s: bcmgenet_timeout\n", dev->name));
dev->trans_start = jiffies;
dev->stats.tx_errors++;
netif_tx_wake_all_queues(dev);
}
/* --------------------------------------------------------------------------
Name: bcmgenet_set_multicast_list
Purpose: Set the multicast mode, ie. promiscuous or multicast
-------------------------------------------------------------------------- */
static void bcmgenet_set_multicast_list(struct net_device *dev)
{
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
struct netdev_hw_addr *ha;
#else
struct dev_mc_list *dmi;
#endif
int i, mc;
#define MAX_MC_COUNT 16
TRACE(("%s: bcmgenet_set_multicast_list: %08X\n",
dev->name, dev->flags));
/* Promiscous mode */
if (dev->flags & IFF_PROMISC) {
pDevCtrl->umac->cmd |= CMD_PROMISC;
pDevCtrl->umac->mdf_ctrl = 0;
return;
} else
pDevCtrl->umac->cmd &= ~CMD_PROMISC;
/* UniMac doesn't support ALLMULTI */
if (dev->flags & IFF_ALLMULTI)
return;
/* update MDF filter */
i = 0;
mc = 0;
/* Broadcast */
pDevCtrl->umac->mdf_addr[i] = 0xFFFF;
pDevCtrl->umac->mdf_addr[i+1] = 0xFFFFFFFF;
pDevCtrl->umac->mdf_ctrl |= (1 << (MAX_MC_COUNT - mc));
i += 2;
mc++;
/* Unicast*/
pDevCtrl->umac->mdf_addr[i] = (dev->dev_addr[0]<<8) | dev->dev_addr[1];
pDevCtrl->umac->mdf_addr[i+1] = dev->dev_addr[2] << 24 |
dev->dev_addr[3] << 16 |
dev->dev_addr[4] << 8 |
dev->dev_addr[5];
pDevCtrl->umac->mdf_ctrl |= (1 << (MAX_MC_COUNT - mc));
i += 2;
mc++;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= MAX_MC_COUNT)
return;
netdev_for_each_mc_addr(ha, dev) {
pDevCtrl->umac->mdf_addr[i] = ha->addr[0] << 8 | ha->addr[1];
pDevCtrl->umac->mdf_addr[i+1] = ha->addr[2] << 24 |
ha->addr[3] << 16 | ha->addr[4] << 8 | ha->addr[5];
pDevCtrl->umac->mdf_ctrl |= (1 << (MAX_MC_COUNT - mc));
i += 2;
mc++;
}
#else
if (dev->mc_count == 0 || dev->mc_count > (MAX_MC_COUNT - 1))
return;
/* Multicast */
for (dmi = dev->mc_list; dmi; dmi = dmi->next) {
pDevCtrl->umac->mdf_addr[i] = (dmi->dmi_addr[0] << 8) |
dmi->dmi_addr[1];
pDevCtrl->umac->mdf_addr[i+1] = (dmi->dmi_addr[2] << 24) |
(dmi->dmi_addr[3] << 16) |
(dmi->dmi_addr[4] << 8) |
dmi->dmi_addr[5];
pDevCtrl->umac->mdf_ctrl |= (1 << (MAX_MC_COUNT - mc));
i += 2;
mc++;
}
#endif
}
/*
* Set the hardware MAC address.
*/
static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
return 0;
}
/* --------------------------------------------------------------------------
Name: bcmgenet_select_queue
Purpose: select which xmit queue to use based on skb->queue_mapping.
-------------------------------------------------------------------------- */
static u16 __maybe_unused bcmgenet_select_queue(struct net_device *dev,
struct sk_buff *skb)
{
/*
* If multi-queue support is enabled, and NET_ACT_SKBEDIT is not
* defined, this function simply returns current queue_mapping set
* inside skb, this means other modules, (netaccel, for example),
* must provide a mechanism to set the queue_mapping before trying
* to send a packet.
*/
return skb->queue_mapping;
}
/* --------------------------------------------------------------------------
Name: __bcmgenet_skb_destructor
Purpose: For ring buffer, called after skb is consumed.
-------------------------------------------------------------------------- */
static void __bcmgenet_skb_destructor(struct sk_buff *skb)
{
struct Enet_CB *cb;
int index, cbi;
volatile struct rDmaRingRegs *rDma_ring;
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(skb->dev);
struct status_64 *status = (struct status_64 *)skb->head;
index = status->reserved[0];
cbi = status->reserved[1];
rDma_ring = &pDevCtrl->rxDma->rDmaRings[index];
cb = pDevCtrl->rxRingCbs[index] + cbi;
dma_sync_single_for_device(&pDevCtrl->dev->dev,
cb->dma_addr, cb->dma_len, DMA_FROM_DEVICE);
/* Increment consumer index, if previous skb was not consumed.
* this will cause buffer out of sync!! */
if ((rDma_ring->rdma_consumer_index & DMA_C_INDEX_MASK) == 0xFFFF)
rDma_ring->rdma_consumer_index = 0;
else
rDma_ring->rdma_consumer_index++;
skb->destructor = NULL;
}
/* --------------------------------------------------------------------------
Name: __bcmgenet_alloc_skb_from_buf
Purpose: Allocated an skb from exsiting buffer.
-------------------------------------------------------------------------- */
static struct sk_buff *__bcmgenet_alloc_skb_from_buf(unsigned char *buf,
int len, int headroom)
{
struct skb_shared_info *shinfo;
struct sk_buff *skb;
skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
if (!skb)
return NULL;
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->truesize = len + sizeof(struct sk_buff);
atomic_set(&skb->users, 1);
skb->head = buf;
skb->data = buf;
skb_reset_tail_pointer(skb);
skb->end = skb->tail + len - sizeof(struct skb_shared_info);
/* FCLONE_ORIG tell kfree_skb() not to release data */
skb->cloned = SKB_FCLONE_ORIG;
/* FLONE_ORIG tells kfree_skb to free skb from skb head cache*/
skb->fclone = SKB_FCLONE_UNAVAILABLE;
skb_reserve(skb, headroom);
shinfo = skb_shinfo(skb);
/* Set dataref to 2, so upper layer won't free the data buffer */
atomic_set(&shinfo->dataref, 2);
shinfo->nr_frags = 0;
shinfo->gso_size = 0;
shinfo->gso_segs = 0;
shinfo->gso_type = 0;
shinfo->ip6_frag_id = 0;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
shinfo->tx_flags = 0;
#else
shinfo->tx_flags.flags = 0;
#endif
shinfo->frag_list = NULL;
memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
return skb;
}
/* --------------------------------------------------------------------------
Name: bcmgenet_alloc_txring_skb
Purpose: Allocated skb for tx ring buffer.
-------------------------------------------------------------------------- */
struct sk_buff *bcmgenet_alloc_txring_skb(struct net_device *dev, int index)
{
unsigned long flags, p_index = 0;
struct sk_buff *skb = NULL;
struct Enet_CB *cb;
volatile struct tDmaRingRegs *tDma_ring;
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
if (!(pDevCtrl->txDma->tdma_ctrl &
(1 << (index + DMA_RING_BUF_EN_SHIFT)))) {
printk(KERN_ERR "Ring %d is not enabled\n", index);
BUG();
}
spin_lock_irqsave(&pDevCtrl->lock, flags);
if (pDevCtrl->txRingFreeBds[index] == 0) {
/*
* This shouldn't happen, upper level should
* check if the tx queue stopped before calling this.
*/
printk(KERN_ERR "%s:%d queue stopped!!\n", __func__, index);
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
return skb;
}
tDma_ring = &pDevCtrl->txDma->tDmaRings[index];
p_index = (DMA_P_INDEX_MASK & tDma_ring->tdma_producer_index);
/* P/C index is 16 bits, we do modulo of RING_SIZE */
p_index &= (pDevCtrl->txRingSize[index] - 1);
cb = pDevCtrl->txRingCBs[index] + p_index;
skb = __bcmgenet_alloc_skb_from_buf((unsigned char *)cb->BdAddr,
RX_BUF_LENGTH, 64);
cb->skb = skb;
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
return skb;
}
EXPORT_SYMBOL(bcmgenet_alloc_txring_skb);
/* --------------------------------------------------------------------------
Name: bcmgenet_get_txcb
Purpose: return tx control data and increment write pointer.
-------------------------------------------------------------------------- */
static struct Enet_CB *bcmgenet_get_txcb(struct net_device *dev,
int *pos, int index)
{
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
struct Enet_CB *txCBPtr = NULL;
#if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ)
if (index == DESC_INDEX) {
txCBPtr = pDevCtrl->txCbs;
txCBPtr += (*pos - GENET_TX_MQ_CNT * GENET_TX_MQ_BD_CNT);
txCBPtr->BdAddr = &pDevCtrl->txBds[*pos];
if (*pos == (TOTAL_DESC - 1))
*pos = (GENET_TX_MQ_CNT * GENET_TX_MQ_BD_CNT);
else
*pos += 1;
} else {
txCBPtr = pDevCtrl->txRingCBs[index];
txCBPtr += (*pos - index * GENET_TX_MQ_BD_CNT);
txCBPtr->BdAddr = &pDevCtrl->txBds[*pos];
if (*pos == (GENET_TX_MQ_BD_CNT * (index + 1) - 1))
*pos = GENET_TX_MQ_BD_CNT * index;
else
*pos += 1;
}
#else
txCBPtr = pDevCtrl->txCbs + *pos;
txCBPtr->BdAddr = &pDevCtrl->txBds[*pos];
/* Advancing local write pointer */
if (*pos == (TOTAL_DESC - 1))
*pos = 0;
else
*pos += 1;
#endif
return txCBPtr;
}
/* --------------------------------------------------------------------------
Name: bcmgenet_tx_reclaim
Purpose: reclaim xmited skb
-------------------------------------------------------------------------- */
static void bcmgenet_tx_reclaim(struct net_device *dev, int index)
{
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
unsigned int c_index;
struct Enet_CB *txCBPtr;
int lastTxedCnt = 0, lastCIndex = 0, nrTxBds = 0;
/* Compute how many buffers are transmited since last xmit call */
c_index = pDevCtrl->txDma->tDmaRings[index].tdma_consumer_index;
#if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ)
if (index == DESC_INDEX) {
lastCIndex = pDevCtrl->txLastCIndex;
nrTxBds = GENET_TX_DEFAULT_BD_CNT;
} else {
lastCIndex = pDevCtrl->txRingCIndex[index];
nrTxBds = GENET_TX_MQ_BD_CNT;
}
#else
lastCIndex = pDevCtrl->txLastCIndex;
nrTxBds = TOTAL_DESC;
#endif
c_index &= (nrTxBds - 1);
if (c_index >= lastCIndex)
lastTxedCnt = c_index - lastCIndex;
else
lastTxedCnt = nrTxBds - lastCIndex + c_index;
TRACE(("%s: %s index=%d c_index=%d "
"lastTxedCnt=%d txLastCIndex=%d\n",
__func__, pDevCtrl->dev->name, index,
c_index, lastTxedCnt, lastCIndex));
/* Reclaim transmitted buffers */
while (lastTxedCnt-- > 0) {
if (index == DESC_INDEX)
txCBPtr = &pDevCtrl->txCbs[lastCIndex];
else
txCBPtr = pDevCtrl->txRingCBs[index] + lastCIndex;
if (txCBPtr->skb != NULL) {
dma_unmap_single(&pDevCtrl->dev->dev,
txCBPtr->dma_addr,
txCBPtr->skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(txCBPtr->skb);
txCBPtr->skb = NULL;
txCBPtr->dma_addr = 0;
} else if (txCBPtr->dma_addr) {
dma_unmap_page(&pDevCtrl->dev->dev,
txCBPtr->dma_addr,
txCBPtr->dma_len,
DMA_TO_DEVICE);
txCBPtr->dma_addr = 0;
}
if (index == DESC_INDEX)
pDevCtrl->txFreeBds += 1;
else
pDevCtrl->txRingFreeBds[index] += 1;
if (lastCIndex == (nrTxBds - 1))
lastCIndex = 0;
else
lastCIndex++;
}
#if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ)
if (index == DESC_INDEX) {
if (pDevCtrl->txFreeBds > (MAX_SKB_FRAGS + 1)
&& __netif_subqueue_stopped(dev, 0)) {
pDevCtrl->intrl2_0->cpu_mask_set |=
(UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE);
netif_wake_subqueue(dev, 0);
}
pDevCtrl->txLastCIndex = c_index;
} else {
if (pDevCtrl->txRingFreeBds[index] > (MAX_SKB_FRAGS + 1)
&& __netif_subqueue_stopped(dev, index+1)) {
pDevCtrl->intrl2_1->cpu_mask_set = (1 << index);
netif_wake_subqueue(dev, index+1);
}
pDevCtrl->txRingCIndex[index] = c_index;
}
#else
if (pDevCtrl->txFreeBds > (MAX_SKB_FRAGS + 1)
&& netif_queue_stopped(dev)) {
/* Disable txdma bdone/pdone interrupt if we have free tx bds */
pDevCtrl->intrl2_0->cpu_mask_set |= (UMAC_IRQ_TXDMA_BDONE |
UMAC_IRQ_TXDMA_PDONE);
netif_wake_queue(dev);
}
pDevCtrl->txLastCIndex = c_index;
#endif
}
/* --------------------------------------------------------------------------
Name: bcmgenet_xmit
Purpose: Send ethernet traffic
-------------------------------------------------------------------------- */
static int bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
volatile struct tDmaRingRegs *tDma_ring;
struct Enet_CB *txCBPtr;
unsigned int write_ptr = 0;
int i = 0;
unsigned long flags;
struct status_64 *Status = NULL;
int nr_frags = 0, index = DESC_INDEX;
spin_lock_irqsave(&pDevCtrl->lock, flags);
if (!pDevCtrl->clock_active) {
printk(KERN_WARNING "%s: transmitting with gated clock!\n",
dev_name(&dev->dev));
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
return NETDEV_TX_OK;
}
#if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ)
if (skb) {
index = skb_get_queue_mapping(skb);
/*
* Mapping strategy:
* queue_mapping = 0, unclassified, packet xmited through ring16
* queue_mapping = 1, goes to ring 0. (highest priority queue)
*/
if (index == 0)
index = DESC_INDEX;
else
index -= 1;
if (index != DESC_INDEX && index >= GENET_TX_MQ_CNT) {
printk(KERN_ERR "%s: skb->queue_mapping %d is invalid\n",
__func__, skb_get_queue_mapping(skb));
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
return 1;
}
nr_frags = skb_shinfo(skb)->nr_frags;
if (index == DESC_INDEX) {
if (pDevCtrl->txFreeBds <= nr_frags + 1) {
netif_stop_subqueue(dev, 0);
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
printk(KERN_ERR "%s: tx ring %d full when queue awake\n",
__func__, index);
return 1;
}
} else if (pDevCtrl->txRingFreeBds[index] <= nr_frags + 1) {
netif_stop_subqueue(dev, index + 1);
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
printk(KERN_ERR "%s: tx ring %d full when queue awake\n",
__func__, index);
return 1;
}
}
/* Reclaim xmited skb for each subqueue */
for (i = 0; i < GENET_TX_MQ_CNT; i++)
bcmgenet_tx_reclaim(dev, i);
#else
if (skb) {
nr_frags = skb_shinfo(skb)->nr_frags;
if (pDevCtrl->txFreeBds <= nr_frags + 1) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
printk(KERN_ERR "%s: tx ring full when queue awake\n",
__func__);
return 1;
}
}
#endif
if (!skb) {
#if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ)
for (i = 0; i < GENET_TX_MQ_CNT; i++)
bcmgenet_tx_reclaim(dev, i);
#endif
bcmgenet_tx_reclaim(dev, DESC_INDEX);
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
return 0;
}
/*
* reclaim xmited skb every 8 packets.
*/
if ((index == DESC_INDEX) &&
(pDevCtrl->txFreeBds < pDevCtrl->nrTxBds - 8))
bcmgenet_tx_reclaim(dev, index);
#if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ)
if ((index != DESC_INDEX) && (pDevCtrl->txRingFreeBds[index]
< GENET_TX_MQ_BD_CNT - 8))
bcmgenet_tx_reclaim(dev, index);
#endif
tDma_ring = &pDevCtrl->txDma->tDmaRings[index];
/*
* If 64 byte status block enabled, must make sure skb has
* enough headroom for us to insert 64B status block.
*/
if (GENET_TBUF_CTRL(pDevCtrl) & RBUF_64B_EN) {
if (likely(skb_headroom(skb) < 64)) {
struct sk_buff *new_skb;
new_skb = skb_realloc_headroom(skb, 64);
if (new_skb == NULL) {
dev_kfree_skb(skb);
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
return 0;
} else if (skb->sk) {
skb_set_owner_w(new_skb, skb->sk);
}
dev_kfree_skb(skb);
skb = new_skb;
}
skb_push(skb, 64);
Status = (struct status_64 *)skb->data;
}
write_ptr = (DMA_RW_POINTER_MASK & tDma_ring->tdma_write_pointer) >> 1;
/* Obtain transmit control block */
txCBPtr = bcmgenet_get_txcb(dev, &write_ptr, index);
if (unlikely(!txCBPtr))
BUG();
txCBPtr->skb = skb;
if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
(GENET_TBUF_CTRL(pDevCtrl) & RBUF_64B_EN)) {
u16 offset;
offset = skb->csum_start - skb_headroom(skb) - 64;
/* Insert 64B TSB and set the flag */
Status->tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
(offset + skb->csum_offset) |
STATUS_TX_CSUM_LV;
}
/*
* Add the buffer to the ring.
* Set addr and length of DMA BD to be transmitted.
*/
if (!nr_frags) {
txCBPtr->dma_addr = dma_map_single(&pDevCtrl->dev->dev,
skb->data, skb->len, DMA_TO_DEVICE);
if (!txCBPtr->dma_addr) {
dev_err(&pDevCtrl->dev->dev, "Tx DMA map failed\n");
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
return 0;
}
txCBPtr->dma_len = skb->len;
txCBPtr->BdAddr->address = txCBPtr->dma_addr;
txCBPtr->BdAddr->length_status = (
((unsigned long)((skb->len < ETH_ZLEN) ?
ETH_ZLEN : skb->len)) << 16) | DMA_SOP | DMA_EOP |
(DMA_TX_QTAG_MASK << DMA_TX_QTAG_SHIFT) |
DMA_TX_APPEND_CRC;
if (skb->ip_summed == CHECKSUM_PARTIAL)
txCBPtr->BdAddr->length_status |= DMA_TX_DO_CSUM;
#ifdef CONFIG_BCMGENET_DUMP_DATA
printk(KERN_NOTICE "%s: data 0x%p len %d",
__func__, skb->data, skb->len);
print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS,
16, 1, skb->data, skb->len, 0);
#endif
/* Decrement total BD count and advance our write pointer */
#if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ)
if (index == DESC_INDEX)
pDevCtrl->txFreeBds -= 1;
else
pDevCtrl->txRingFreeBds[index] -= 1;
#else
pDevCtrl->txFreeBds -= 1;
#endif
/* advance producer index and write pointer.*/
tDma_ring->tdma_producer_index += 1;
tDma_ring->tdma_write_pointer = (write_ptr << 1);
/* update stats */
dev->stats.tx_bytes += ((skb->len < ETH_ZLEN) ?
ETH_ZLEN : skb->len);
dev->stats.tx_packets++;
} else {
/* xmit head */
txCBPtr->dma_addr = dma_map_single(&pDevCtrl->dev->dev,
skb->data, skb_headlen(skb), DMA_TO_DEVICE);
if (!txCBPtr->dma_addr) {
dev_err(&pDevCtrl->dev->dev, "Tx DMA map failed\n");
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
return 0;
}
txCBPtr->dma_len = skb_headlen(skb);
txCBPtr->BdAddr->address = txCBPtr->dma_addr;
txCBPtr->BdAddr->length_status = (skb_headlen(skb) << 16) |
(DMA_TX_QTAG_MASK << DMA_TX_QTAG_SHIFT) |
DMA_SOP | DMA_TX_APPEND_CRC;
if (skb->ip_summed == CHECKSUM_PARTIAL)
txCBPtr->BdAddr->length_status |= DMA_TX_DO_CSUM;
#ifdef CONFIG_BCMGENET_DUMP_DATA
printk(KERN_NOTICE "%s: frag head len %d",
__func__, skb_headlen(skb));
print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS,
16, 1, skb->data, skb_headlen(skb), 0);
#endif
/* Decrement total BD count and advance our write pointer */
#if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ)
if (index == DESC_INDEX)
pDevCtrl->txFreeBds -= 1;
else
pDevCtrl->txRingFreeBds[index] -= 1;
#else
pDevCtrl->txFreeBds -= 1;
#endif
/* advance producer index and write pointer.*/
tDma_ring->tdma_producer_index += 1;
tDma_ring->tdma_write_pointer = (write_ptr << 1);
dev->stats.tx_bytes += skb_headlen(skb);
/* xmit fragment */
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
txCBPtr = bcmgenet_get_txcb(dev, &write_ptr, index);
if (unlikely(!txCBPtr))
BUG();
txCBPtr->skb = NULL;
txCBPtr->dma_addr = dma_map_page(&pDevCtrl->dev->dev,
frag->page,
frag->page_offset,
frag->size,
DMA_TO_DEVICE);
if (txCBPtr->dma_addr == 0) {
printk(KERN_ERR "%s: Tx DMA map failed\n",
__func__);
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
return 0;
}
txCBPtr->dma_len = frag->size;
txCBPtr->BdAddr->address = txCBPtr->dma_addr;
#ifdef CONFIG_BCMGENET_DUMP_DATA
printk(KERN_NOTICE "%s: frag%d len %d",
__func__, i, frag->size);
print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS,
16, 1,
page_address(frag->page)+frag->page_offset,
frag->size, 0);
#endif
txCBPtr->BdAddr->length_status =
((unsigned long)frag->size << 16) |
(DMA_TX_QTAG_MASK << DMA_TX_QTAG_SHIFT);
if (i == nr_frags - 1)
txCBPtr->BdAddr->length_status |= DMA_EOP;
#if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ)
if (index == DESC_INDEX)
pDevCtrl->txFreeBds -= 1;
else
pDevCtrl->txRingFreeBds[index] -= 1;
#else
pDevCtrl->txFreeBds -= 1;
#endif
/* advance producer index and write pointer.*/
tDma_ring->tdma_producer_index += 1;
tDma_ring->tdma_write_pointer = (write_ptr << 1);
/* update stats */
dev->stats.tx_bytes += frag->size;
}
dev->stats.tx_packets++;
}
#if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ)
if (index == DESC_INDEX) {
if (pDevCtrl->txFreeBds <= (MAX_SKB_FRAGS + 1)) {
netif_stop_subqueue(dev, 0);
pDevCtrl->intrl2_0->cpu_mask_clear =
UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE;
}
} else if (pDevCtrl->txRingFreeBds[index] <= (MAX_SKB_FRAGS + 1)) {
netif_stop_subqueue(dev, index+1);
pDevCtrl->intrl2_1->cpu_mask_clear = (1 << index);
}
#else
if (pDevCtrl->txFreeBds <= (MAX_SKB_FRAGS + 1)) {
/* Enable Tx bdone/pdone interrupt !*/
pDevCtrl->intrl2_0->cpu_mask_clear |= UMAC_IRQ_TXDMA_BDONE |
UMAC_IRQ_TXDMA_PDONE;
netif_stop_queue(dev);
}
#endif
dev->trans_start = jiffies;
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
return 0;
}
/* --------------------------------------------------------------------------
Name: bcmgenet_tx_ring_reclaim
Purpose: reclaim xmited skb for a ring buffer
-------------------------------------------------------------------------- */
static void bcmgenet_tx_ring_reclaim(struct net_device *dev, int index,
unsigned int p_index, unsigned int c_index)
{
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
struct Enet_CB *txCBPtr;
int lastTxedCnt = 0, lastCIndex = 0;
struct sk_buff *skb;
unsigned long flags;
/* Compute how many buffers are transmited since last xmit call */
spin_lock_irqsave(&pDevCtrl->lock, flags);
if (c_index >= pDevCtrl->txRingCIndex[index]) {
/* index not wrapped */
lastTxedCnt = c_index - pDevCtrl->txRingCIndex[index];
} else {
/* index wrapped */
lastTxedCnt = pDevCtrl->txRingSize[index] -
pDevCtrl->txRingCIndex[index] + c_index;
}
TRACE(("%s: ring %d: p_index=%d c_index=%d"
"lastTxedCnt=%d txLastCIndex=%d\n",
__func__, index, p_index, c_index,
lastTxedCnt, pDevCtrl->txRingCIndex[index]));
pDevCtrl->txRingFreeBds[index] += lastTxedCnt;
lastCIndex = pDevCtrl->txRingCIndex[index];
pDevCtrl->txRingCIndex[index] = c_index;
/* free xmited skb */
while (lastTxedCnt-- > 0) {
txCBPtr = pDevCtrl->txRingCBs[index] + lastCIndex;
skb = txCBPtr->skb;
if (skb != NULL) {
/*
* This will consume skb, we don't want to run
* destructor which is to drop the skb.
*/
if (skb->destructor != NULL)
skb->destructor = NULL;
/* make sure dev_kfree_skb_any() don't free mem. */
if ((atomic_read(&skb_shinfo(skb)->dataref) &
SKB_DATAREF_MASK) < 2)
atomic_set(&(skb_shinfo(skb)->dataref), 2);
dev_kfree_skb_any(skb);
txCBPtr->skb = NULL;
}
if (lastCIndex == (pDevCtrl->txRingSize[index] - 1))
lastCIndex = 0;
else
lastCIndex++;
}
if (pDevCtrl->txRingFreeBds[index] > 0 &&
netif_queue_stopped(dev)) {
/*
* Disable txdma multibuf done interrupt for this ring
* since we have free tx bds.
*/
pDevCtrl->intrl2_1->cpu_mask_set |= (1 << index);
netif_wake_queue(dev);
}
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
}
/* --------------------------------------------------------------------------
Name: bcmgenet_ring_xmit
Purpose: Send ethernet traffic through ring buffer
-------------------------------------------------------------------------- */
int __maybe_unused bcmgenet_ring_xmit(struct sk_buff *skb,
struct net_device *dev, int index, int drop)
{
struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev);
volatile struct tDmaRingRegs *tDma_ring;
struct Enet_CB *txCBPtr;
struct status_64 *Status;
unsigned int p_index = 0, c_index = 0;
/* Compute how many buffers are transmited since last xmit call */
tDma_ring = &pDevCtrl->txDma->tDmaRings[index];
p_index = (DMA_P_INDEX_MASK & tDma_ring->tdma_producer_index);
c_index = (DMA_C_INDEX_MASK & tDma_ring->tdma_consumer_index);
/* P/C index is 16 bits, we do modulo of RING_SIZE */
p_index &= (pDevCtrl->txRingSize[index] - 1);
c_index &= (pDevCtrl->txRingSize[index] - 1);
bcmgenet_tx_ring_reclaim(dev, index, p_index, c_index);
if (!skb)
return 0;
/* Obtain a tx control block */
txCBPtr = pDevCtrl->txRingCBs[index] + p_index;
txCBPtr->skb = skb;
TRACE(("%s: txCBPtr=0x%08lx skb=0x%08lx skb->head=0x%08lx\n",
__func__,
(unsigned long)txCBPtr,
(unsigned long)skb,
(unsigned long)skb->head));
/*
* Make sure we have headroom for us to insert 64B status block.
*/
if (unlikely(skb_headroom(skb) < 64)) {
printk(KERN_ERR "no enough headroom for TSB (head=0x%08x)\n",
(unsigned int)skb->head);
BUG();
}
Status = (struct status_64 *)skb->head;
Status->length_status = ((unsigned long)((skb->len < ETH_ZLEN) ?
ETH_ZLEN : skb->len)) << 16;
Status->length_status += (sizeof(struct status_64) << 16);
Status->length_status |= DMA_SOP | DMA_EOP | DMA_TX_APPEND_CRC;
if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
(GENET_TBUF_CTRL(pDevCtrl) & RBUF_64B_EN)) {
u16 offset;
offset = skb->csum_start - skb_headroom(skb) - 64;
/* Insert 64B TSB and set the flag */
Status->tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
(offset + skb->csum_offset) | STATUS_TX_CSUM_LV;
Status->length_status |= DMA_TX_DO_CSUM;
TRACE(("Tx Hw Csum: head=0x%08x data=0x%08x "
"csum_start=%d csum_offset=%d\n",
(unsigned int)skb->head,
(unsigned int)skb->data,
skb->csum_start,
skb->csum_offset));
} else {
Status->tx_csum_info = 0;
}
/* Default QTAG for MoCA */
Status->length_status |= (DMA_TX_QTAG_MASK << DMA_TX_QTAG_SHIFT);
txCBPtr->dma_addr = dma_map_single(&pDevCtrl->dev->dev,
skb->head, skb->len + 64, DMA_TO_DEVICE);
#ifdef CONFIG_BCMGENET_DUMP_DATA
printk(KERN_NOTICE "bcmgenet_xmit: len %d", skb->len);
print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS,
16, 1, skb->head, skb->len + 64, 0);
#endif
/*
* Decrement total BD count and advance our
* write pointer/producer index
*/
pDevCtrl->txRingFreeBds[index] -= 1;
if (likely(txCBPtr->dma_addr == tDma_ring->tdma_write_pointer)) {
unsigned long start_addr = tDma_ring->tdma_start_addr;
if (unlikely(drop)) {
/*
* Don't xmit current packet pointed by read_pointer,
* there is no such mechanism in GENET's TDMA, so we
* disable TDMA and increment consumer index/read
* pointer to skip this packet as a work around.
*/
pDevCtrl->txDma->tdma_ctrl &= ~DMA_EN;
tDma_ring->tdma_consumer_index += 1;
if ((tDma_ring->tdma_read_pointer + RX_BUF_LENGTH) >
tDma_ring->tdma_end_addr) {
tDma_ring->tdma_read_pointer = start_addr;
} else {
tDma_ring->tdma_read_pointer += RX_BUF_LENGTH;
}
}
/* advance producer index and write pointer.*/
tDma_ring->tdma_producer_index += 1;
if ((tDma_ring->tdma_write_pointer + RX_BUF_LENGTH) >
tDma_ring->tdma_end_addr) {
tDma_ring->tdma_write_pointer = start_addr;
} else {
tDma_ring->tdma_write_pointer += RX_BUF_LENGTH;
}
if (unlikely(drop))
pDevCtrl->txDma->tdma_ctrl |= DMA_EN;
} else {
/* ooops! how can we get here ?*/
BUG();
}
if (pDevCtrl->txRingFreeBds[index] == 0) {
TRACE(("%s: no xmit queue space, stopping queue\n", dev->name));
/* Enable Tx bdone/pdone interrupt !*/
pDevCtrl->intrl2_0->cpu_mask_clear |= (1 << index);
netif_stop_subqueue(dev, index);
}
if (!drop) {
/* update stats */
dev->stats.tx_bytes += ((skb->len < ETH_ZLEN) ?
ETH_ZLEN : skb->len);
dev->stats.tx_packets++;
}
dev->trans_start = jiffies;
return 0;
}
/* NAPI polling method*/
static int bcmgenet_poll(struct napi_struct *napi, int budget)
{
struct BcmEnet_devctrl *pDevCtrl = container_of(napi,
struct BcmEnet_devctrl, napi);
volatile struct intrl2Regs *intrl2 = pDevCtrl->intrl2_0;
volatile struct rDmaRingRegs *rDma_desc;
unsigned int work_done, total_work_done = 0;
int local_budget;
int i;
/* Process the priority queues. */
for (i = 0, local_budget = GENET_RX_MQ_BD_CNT;
i < GENET_RX_MQ_CNT || i == DESC_INDEX;) {
work_done = bcmgenet_desc_rx(pDevCtrl, local_budget, i);
rDma_desc = &pDevCtrl->rxDma->rDmaRings[i];
rDma_desc->rdma_consumer_index += work_done;
total_work_done += work_done;
if (i == GENET_RX_MQ_CNT - 1) {
/* Process the default queue. */
i = DESC_INDEX;
local_budget = desc_budget;
} else {
i++;
}
}
/*
* Per NAPI spec at
*
* http://www.linuxfoundation.org/collaborate/workgroups/networking/napi
*
* If packets remain to be processed (i.e. the driver used its entire
* quota), poll() should return a value of one.
* If, instead, all packets have been processed, your driver should
* reenable interrupts, turn off polling, and return zero.
*/
if (total_work_done < budget) {
napi_complete(napi);
intrl2->cpu_mask_clear |= UMAC_IRQ_HFB_OR_DONE;
return 0;
} else {
return 1;
}
}
/*
* NAPI polling for ring buffer.
*/
static int bcmgenet_ring_poll(struct napi_struct *napi, int budget)
{
struct BcmEnet_devctrl *pDevCtrl = container_of(napi,
struct BcmEnet_devctrl, ring_napi);
volatile struct intrl2Regs *intrl2 = pDevCtrl->intrl2_1;
unsigned int work_done;
work_done = bcmgenet_ring_rx(pDevCtrl, budget);
/* tx reclaim */
bcmgenet_ring_xmit(NULL, pDevCtrl->dev, 0, 0);
if (work_done < budget) {
unsigned long bits;
napi_complete(napi);
bits = (pDevCtrl->rxDma->rdma_ctrl >> 1) << 16;
intrl2->cpu_mask_clear |= bits;
}
return work_done;
}
/*
* Interrupt bottom half
*/
static void bcmgenet_irq_task(struct work_struct *work)
{
struct BcmEnet_devctrl *pDevCtrl = container_of(
work, struct BcmEnet_devctrl, bcmgenet_irq_work);
struct net_device *dev;
dev = pDevCtrl->dev;
TRACE(("%s\n", __func__));
/* Cable plugged/unplugged event */
if (pDevCtrl->irq0_stat & UMAC_IRQ_PHY_DET_R) {
pDevCtrl->irq0_stat &= ~UMAC_IRQ_PHY_DET_R;
printk(KERN_CRIT "%s cable plugged in, powering up\n",
pDevCtrl->dev->name);
bcmgenet_power_up(pDevCtrl, GENET_POWER_CABLE_SENSE);
} else if (pDevCtrl->irq0_stat & UMAC_IRQ_PHY_DET_F) {
pDevCtrl->irq0_stat &= ~UMAC_IRQ_PHY_DET_F;
printk(KERN_CRIT "%s cable unplugged, powering down\n",
pDevCtrl->dev->name);
bcmgenet_power_down(pDevCtrl, GENET_POWER_CABLE_SENSE);
}
if (pDevCtrl->irq0_stat & UMAC_IRQ_MPD_R) {
pDevCtrl->irq0_stat &= ~UMAC_IRQ_MPD_R;
printk(KERN_CRIT "%s magic packet detected, waking up\n",
pDevCtrl->dev->name);
/* disable mpd interrupt */
pDevCtrl->intrl2_0->cpu_mask_set |= UMAC_IRQ_MPD_R;
/* disable CRC forward.*/
pDevCtrl->umac->cmd &= ~CMD_CRC_FWD;
if (pDevCtrl->dev_asleep)
bcmgenet_power_up(pDevCtrl, GENET_POWER_WOL_MAGIC);
} else if (pDevCtrl->irq0_stat & (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM)) {
pDevCtrl->irq0_stat &= ~(UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM);
printk(KERN_CRIT "%s ACPI pattern matched, waking up\n",
pDevCtrl->dev->name);
/* disable HFB match interrupts */
pDevCtrl->intrl2_0->cpu_mask_set |= (UMAC_IRQ_HFB_SM |
UMAC_IRQ_HFB_MM);
if (pDevCtrl->dev_asleep)
bcmgenet_power_up(pDevCtrl, GENET_POWER_WOL_ACPI);
}
/* Link UP/DOWN event */
if (pDevCtrl->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN)) {
pDevCtrl->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
mii_setup(pDevCtrl->dev);
}
}
/*
* bcmgenet_ring_rx: ring buffer rx function.
*/
static unsigned int bcmgenet_ring_rx(void *ptr, unsigned int budget)
{
struct BcmEnet_devctrl *pDevCtrl = ptr;
volatile struct status_64 *status;
volatile struct rDmaRingRegs *rDma_ring;
int i, len, rx_discard_flag = 0;
struct Enet_CB *cb;
struct sk_buff *skb;
unsigned long dmaFlag;
unsigned int rxpktprocessed = 0, pktcnt = 0, retvalue = 0;
unsigned int read_ptr = 0, write_ptr = 0, p_index = 0, c_index = 0;
TRACE(("%s: ifindex=%d irq_stat=0x%08x\n",
__func__, pDevCtrl->dev->ifindex, pDevCtrl->irq1_stat));
/* loop for each ring */
for (i = 0; i < GENET_RX_RING_COUNT; i++) {
/* Skip if this ring is not eanbled*/
if (!(pDevCtrl->rxDma->rdma_ctrl &
(1 << (i + DMA_RING_BUF_EN_SHIFT))))
continue;
/* Skip if not outstanding packet for this ring*/
if (!(pDevCtrl->irq1_stat & (1 << (16 + i))))
continue;
rDma_ring = &pDevCtrl->rxDma->rDmaRings[i];
write_ptr = rDma_ring->rdma_write_pointer;
read_ptr = rDma_ring->rdma_read_pointer;
p_index = rDma_ring->rdma_producer_index;
p_index &= DMA_P_INDEX_MASK;
c_index = rDma_ring->rdma_consumer_index;
c_index &= DMA_C_INDEX_MASK;
if (p_index < c_index) {
/* index wrapped */
if ((DMA_P_INDEX_MASK - c_index + p_index) ==
(pDevCtrl->rxRingSize[i] - 1))
rx_discard_flag = 1;
} else if (p_index > c_index) {
/* index not wrapped */
if (p_index - c_index == pDevCtrl->rxRingSize[i])
rx_discard_flag = 1;
}
if (rx_discard_flag) {
int discard_cnt = rDma_ring->rdma_producer_index >> 16;
/* Report rx overrun errors */
pDevCtrl->dev->stats.rx_over_errors += discard_cnt;
pDevCtrl->rxRingDiscCnt[i] += discard_cnt;
rDma_ring->rdma_producer_index = 0;
}
/*
* We can't use produer/consumer index to compute how
* many outstanding packets are there, because we are not
* advancing consumer index right after packets are moved
* out of DMA. So we use read/write pointer for the math.
*/
if (write_ptr < read_ptr) {
/* pointer wrapped */
pktcnt = (rDma_ring->rdma_end_addr + 1 - read_ptr) >>
(RX_BUF_BITS - 1);
pktcnt += (write_ptr - rDma_ring->rdma_start_addr) >>
(RX_BUF_BITS - 1);
} else if (write_ptr > read_ptr) {
/* pointer not wrapped */
pktcnt = (write_ptr - read_ptr) >> (RX_BUF_BITS - 1);
} else if (write_ptr == read_ptr && p_index != c_index) {
/* overflowed, some packets are discarded by DMA */
pktcnt = rDma_ring->rdma_ring_buf_size >> 16;
}
TRACE(("%s: p_index=%d c_index=%d write_ptr=0x%08x "
"read_ptr=0x%08x pktcnt=%d\n",
__func__, p_index, c_index, write_ptr,
read_ptr, pktcnt));
/*Start processing packets */
while ((rxpktprocessed < pktcnt) &&
(rxpktprocessed < budget)) {
unsigned int cbi;
/*
* Find out Which buffer in the ring are we pointing to.
*/
cbi = (read_ptr - rDma_ring->rdma_start_addr) >>
(RX_BUF_BITS - 1);
cb = pDevCtrl->rxRingCbs[i] + cbi;
dma_sync_single_for_cpu(&pDevCtrl->dev->dev,
cb->dma_addr, 64, DMA_FROM_DEVICE);
status = (struct status_64 *)cb->BdAddr;
dmaFlag = status->length_status & 0xffff;
len = status->length_status >> 16;
dma_sync_single_for_cpu(&pDevCtrl->dev->dev,
cb->dma_addr + 64, len,
DMA_FROM_DEVICE);
/*
* Advancing our read pointer.
*/
if (read_ptr + RX_BUF_LENGTH > rDma_ring->rdma_end_addr)
read_ptr = rDma_ring->rdma_start_addr;
else
read_ptr += RX_BUF_LENGTH;
rDma_ring->rdma_read_pointer = read_ptr;
/*
* per packet processing
*/
skb = __bcmgenet_alloc_skb_from_buf(
(unsigned char *)cb->BdAddr, RX_BUF_LENGTH, 0);
skb->destructor = &__bcmgenet_skb_destructor;
status->reserved[0] = i; /* ring index */
status->reserved[1] = cbi; /* cb index */
rxpktprocessed++;
BUG_ON(skb == NULL);
TRACE(("%s: cbi=%d skb=0x%08x head=0x%08x dataref=%d\n",
__func__, cbi,
(unsigned int)skb, (unsigned int)skb->head,
(atomic_read(&skb_shinfo(skb)->dataref) &
SKB_DATAREF_MASK)));
/* report errors */
if (unlikely(!(dmaFlag & DMA_EOP) ||
!(dmaFlag & DMA_SOP))) {
/* probably can't do this for scater gather ?*/
printk(KERN_WARNING "Droping fragmented packet!\n");
pDevCtrl->dev->stats.rx_dropped++;
pDevCtrl->dev->stats.rx_errors++;
dev_kfree_skb_any(cb->skb);
cb->skb = NULL;
continue;
}
if (unlikely(dmaFlag & (DMA_RX_CRC_ERROR |
DMA_RX_OV |
DMA_RX_NO |
DMA_RX_LG |
DMA_RX_RXER))) {
TRACE(("ERROR: dmaFlag=0x%lx\n", dmaFlag));
if (dmaFlag & DMA_RX_CRC_ERROR)
pDevCtrl->dev->stats.rx_crc_errors++;
if (dmaFlag & DMA_RX_OV)
pDevCtrl->dev->stats.rx_fifo_errors++;
if (dmaFlag & DMA_RX_NO)
pDevCtrl->dev->stats.rx_frame_errors++;
if (dmaFlag & DMA_RX_LG)
pDevCtrl->dev->stats.rx_length_errors++;
pDevCtrl->dev->stats.rx_dropped++;
pDevCtrl->dev->stats.rx_errors++;
dev_kfree_skb_any(cb->skb);
cb->skb = NULL;
continue;
} /* error packet */
skb_put(skb, len);
/* we must have 64B rx status block enabled.*/
if (pDevCtrl->rbuf->rbuf_chk_ctrl & RBUF_RXCHK_EN) {
if (status->rx_csum & STATUS_RX_CSUM_OK) {
skb->csum = status->rx_csum ;
/*
* Should swap bytes based on
* rbuf->endian_ctrl?
*/
skb->csum = swab16(skb->csum);
}
skb->ip_summed = CHECKSUM_COMPLETE;
}
/*
* TODO: check filter index and compare with ring index
* Report error if not matched
*/
skb_pull(skb, 64);
len -= 64;
if (pDevCtrl->bIPHdrOptimize) {
skb_pull(skb, 2);
len -= 2;
}
if (pDevCtrl->umac->cmd & CMD_CRC_FWD) {
skb_trim(skb, len - 4);
len -= 4;
}
#ifdef CONFIG_BCMGENET_DUMP_DATA
printk(KERN_NOTICE "%s:\n", __func__);
print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS,
16, 1, skb->data, skb->len, 0);
#endif
/*
* Finish setting up the received SKB and send it
* to the kernel
*/
skb->dev = pDevCtrl->dev;
skb->protocol = eth_type_trans(skb, pDevCtrl->dev);
pDevCtrl->dev->stats.rx_packets++;
pDevCtrl->dev->stats.rx_bytes += len;
if (dmaFlag & DMA_RX_MULT)
pDevCtrl->dev->stats.multicast++;
skb->queue_mapping = i;
/* Notify kernel */
netif_receive_skb(skb);
TRACE(("pushed up to kernel\n"));
} /* packet process loop */
} /* ring index loop */
if (retvalue == 0)
retvalue = rxpktprocessed;
return retvalue;;
}
/*
* bcmgenet_isr1: interrupt handler for ring buffer.
*/
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct BcmEnet_devctrl *pDevCtrl = dev_id;
volatile struct intrl2Regs *intrl2 = pDevCtrl->intrl2_1;
unsigned int index;
unsigned long flags;
/* Save irq status for bottom-half processing. */
pDevCtrl->irq1_stat = intrl2->cpu_stat & ~intrl2->cpu_mask_status;
/* clear inerrupts*/
intrl2->cpu_clear |= pDevCtrl->irq1_stat;
TRACE(("%s: IRQ=0x%x\n", __func__, pDevCtrl->irq1_stat));
/*
* Check the MBDONE interrupts.
* packet is done, reclaim descriptors
*/
if (pDevCtrl->irq1_stat & 0x0000ffff) {
index = 0;
spin_lock_irqsave(&pDevCtrl->lock, flags);
for (index = 0; index < GENET_TX_RING_COUNT; index++) {
if (pDevCtrl->irq1_stat & (1 << index)) {
bcmgenet_tx_reclaim(pDevCtrl->dev, index);
if (index >= GENET_TX_MQ_CNT) {
pr_warn_ratelimited("bcmgenet_isr1 TX index %d >= %d",
index, GENET_TX_MQ_CNT);
}
}
}
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
}
if (pDevCtrl->irq1_stat & 0xffff0000) {
/*
* We use NAPI here, because of the fact that we are NOT
* advancing consumer index right after data moved out of
* DMA, instead we advance it only when we found out upper
* level has consumed it.
*/
if (likely(napi_schedule_prep(&pDevCtrl->ring_napi))) {
/* Disable all rx ring interrupt */
pr_warn_ratelimited("bcmgenet_isr1 ring_napi 0x%08x",
pDevCtrl->irq1_stat);
intrl2->cpu_mask_set |= 0xffff0000;
__napi_schedule(&pDevCtrl->ring_napi);
}
}
return IRQ_HANDLED;
}
/*
* bcmgenet_isr0: Handle various interrupts.
*/
static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
{
struct BcmEnet_devctrl *pDevCtrl = dev_id;
volatile struct intrl2Regs *intrl2 = pDevCtrl->intrl2_0;
/* Save irq status for bottom-half processing. */
pDevCtrl->irq0_stat = intrl2->cpu_stat & ~intrl2->cpu_mask_status;
/* clear inerrupts*/
intrl2->cpu_clear |= pDevCtrl->irq0_stat;
TRACE(("IRQ=0x%x\n", pDevCtrl->irq0_stat));
/* If there is tagged traffic, throttle untagged traffic. */
if (pDevCtrl->irq0_stat & (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM))
desc_budget = THROTTLED_DESC_BUDGET;
else
desc_budget = DEFAULT_DESC_BUDGET;
#ifndef CONFIG_BCMGENET_RX_DESC_THROTTLE
if (pDevCtrl->irq0_stat & UMAC_IRQ_HFB_OR_DONE) {
/*
* We use NAPI(software interrupt throttling, if
* Rx Descriptor throttling is not used.
* Disable interrupt, will be enabled in the poll method.
*/
if (likely(napi_schedule_prep(&pDevCtrl->napi))) {
intrl2->cpu_mask_set |= UMAC_IRQ_HFB_OR_DONE;
__napi_schedule(&pDevCtrl->napi);
}
}
#else
/* Multiple buffer done event. */
if (pDevCtrl->irq0_stat & UMAC_IRQ_RXDMA_MBDONE) {
unsigned int work_done;
volatile struct rDmaRingRegs *rDma_desc;
rDma_desc = &pDevCtrl->rxDma->rDmaRings[DESC_INDEX];
pDevCtrl->irq0_stat &= ~UMAC_IRQ_RXDMA_MBDONE;
TRACE(("%s: %d packets available\n", __func__, DmaDescThres));
work_done = bcmgenet_desc_rx(pDevCtrl, DmaDescThres,
DESC_INDEX);
rDma_desc->rdma_consumer_index += work_done;
}
#endif
if (pDevCtrl->irq0_stat &
(UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
/* Tx reclaim */
bcmgenet_xmit(NULL, pDevCtrl->dev);
}
if (pDevCtrl->irq0_stat & (UMAC_IRQ_PHY_DET_R |
UMAC_IRQ_PHY_DET_F |
UMAC_IRQ_LINK_UP |
UMAC_IRQ_LINK_DOWN |
UMAC_IRQ_MPD_R)) {
/* all other interested interrupts handled in bottom half */
schedule_work(&pDevCtrl->bcmgenet_irq_work);
}
return IRQ_HANDLED;
}
/*
* bcmgenet_desc_rx - descriptor based rx process.
* this could be called from bottom half, or from NAPI polling method.
*/
static unsigned int bcmgenet_desc_rx(void *ptr, unsigned int budget, int index)
{
struct BcmEnet_devctrl *pDevCtrl = ptr;
struct net_device *dev = pDevCtrl->dev;
struct Enet_CB *cb;
struct sk_buff *skb, *new_skb;
unsigned long dmaFlag;
int len, discard_cnt = 0;
unsigned int rxpktprocessed = 0, rxpkttoprocess = 0;
unsigned int p_index = 0, c_index = 0, read_ptr = 0;
unsigned long start_addr, end_addr;
volatile struct rDmaRingRegs *rDma_desc;
MY_BUG_ON(pDevCtrl->num_new_skbs != 0);
rDma_desc = &pDevCtrl->rxDma->rDmaRings[index];
p_index = rDma_desc->rdma_producer_index;
p_index &= DMA_P_INDEX_MASK;
c_index = rDma_desc->rdma_consumer_index;
c_index &= DMA_C_INDEX_MASK;
read_ptr = rDma_desc->rdma_read_pointer;
read_ptr = ((read_ptr & DMA_RW_POINTER_MASK) >> 1);
start_addr = rDma_desc->rdma_start_addr;
end_addr = rDma_desc->rdma_end_addr;
if (p_index < c_index)
rxpkttoprocess = (DMA_C_INDEX_MASK+1) - c_index + p_index;
else
rxpkttoprocess = p_index - c_index;
TRACE(("RDMA: rxpkttoprocess=%d\n", rxpkttoprocess));
discard_cnt = (rDma_desc->rdma_producer_index >> 16);
if (discard_cnt) {
/* Report rx overrun errors */
pDevCtrl->dev->stats.rx_over_errors += discard_cnt;
pDevCtrl->rxRingDiscCnt[index] += discard_cnt;
rDma_desc->rdma_producer_index = 0;
}
while ((rxpktprocessed < rxpkttoprocess) &&
(rxpktprocessed < budget)) {
dmaFlag = (pDevCtrl->rxBds[read_ptr].length_status & 0xffff);
len = ((pDevCtrl->rxBds[read_ptr].length_status)>>16);
TRACE(("%s:index=%d, p_index=%d c_index=%d read_ptr=%d "
"len_stat=0x%08lx\n",
__func__, index, p_index, c_index, read_ptr,
pDevCtrl->rxBds[read_ptr].length_status));
rxpktprocessed++;
cb = &pDevCtrl->rxCbs[read_ptr];
skb = cb->skb;
MY_BUG_ON(skb == NULL);
cb->skb = NULL;
dma_unmap_single(&dev->dev, cb->dma_addr,
pDevCtrl->rxBufLen, DMA_FROM_DEVICE);
pDevCtrl->rxBds[read_ptr].address = 0;
if (read_ptr == (end_addr & DMA_RW_POINTER_MASK) >> 1) {
read_ptr = (start_addr & DMA_RW_POINTER_MASK) >> 1;
} else {
read_ptr++;
}
if (unlikely(!(dmaFlag & DMA_EOP) || !(dmaFlag & DMA_SOP))) {
printk(KERN_WARNING "Dropping fragmented packet: "
"index=%d, p_index=%d c_index=%d "
"read_ptr=%d len_stat=0x%08lx\n",
index, p_index, c_index, read_ptr,
pDevCtrl->rxBds[read_ptr].length_status);
dev->stats.rx_dropped++;
dev->stats.rx_errors++;
MY_BUG_ON(pDevCtrl->num_new_skbs >= TOTAL_DESC * 2);
pDevCtrl->new_skbs[pDevCtrl->num_new_skbs++] = skb;
continue;
}
/* report errors */
if (unlikely(dmaFlag & (DMA_RX_CRC_ERROR |
DMA_RX_OV |
DMA_RX_NO |
DMA_RX_LG |
DMA_RX_RXER))) {
TRACE(("ERROR: dmaFlag=0x%x\n", (unsigned int)dmaFlag));
if (dmaFlag & DMA_RX_CRC_ERROR)
dev->stats.rx_crc_errors++;
if (dmaFlag & DMA_RX_OV)
dev->stats.rx_fifo_errors++;
if (dmaFlag & DMA_RX_NO)
dev->stats.rx_frame_errors++;
if (dmaFlag & DMA_RX_LG)
dev->stats.rx_length_errors++;
dev->stats.rx_dropped++;
dev->stats.rx_errors++;
/* discard the packet and advance consumer index.*/
MY_BUG_ON(pDevCtrl->num_new_skbs >= TOTAL_DESC * 2);
pDevCtrl->new_skbs[pDevCtrl->num_new_skbs++] = skb;
continue;
} /* error packet */
MY_BUG_ON(pDevCtrl->num_new_skbs >= TOTAL_DESC * 2);
new_skb = netdev_alloc_skb(pDevCtrl->dev,
pDevCtrl->rxBufLen + SKB_ALIGNMENT);
if (!new_skb) {
pr_err_ratelimited("%s: failed to allocate skb, "
"dropping old packet.\n", dev->name);
pDevCtrl->new_skbs[pDevCtrl->num_new_skbs++] = skb;
dev->stats.rx_over_errors++;
dev->stats.rx_dropped++;
continue;
}
handleAlignment(pDevCtrl, new_skb);
pDevCtrl->new_skbs[pDevCtrl->num_new_skbs++] = new_skb;
skb_put(skb, len);
if (pDevCtrl->rbuf->rbuf_ctrl & RBUF_64B_EN) {
struct status_64 *status;
status = (struct status_64 *)skb->data;
/* we have 64B rx status block enabled.*/
if (pDevCtrl->rbuf->rbuf_chk_ctrl & RBUF_RXCHK_EN) {
if (status->rx_csum & STATUS_RX_CSUM_OK)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
}
skb_pull(skb, 64);
len -= 64;
}
if (pDevCtrl->bIPHdrOptimize) {
skb_pull(skb, 2);
len -= 2;
}
if (pDevCtrl->umac->cmd & CMD_CRC_FWD) {
skb_trim(skb, len - 4);
len -= 4;
}
#ifdef CONFIG_BCMGENET_DUMP_DATA
printk(KERN_NOTICE "bcmgenet_desc_rx : len=%d", skb->len);
print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS,
16, 1, skb->data, skb->len, 0);
#endif
/*Finish setting up the received SKB and send it to the kernel*/
skb->dev = pDevCtrl->dev;
skb->protocol = eth_type_trans(skb, pDevCtrl->dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
if (dmaFlag & DMA_RX_MULT)
dev->stats.multicast++;
/* Notify kernel */
#ifdef CONFIG_BCMGENET_RX_DESC_THROTTLE
netif_rx(skb);
#else
netif_receive_skb(skb);
#endif
TRACE(("pushed up to kernel\n"));
}
if (rxpktprocessed) {
/*
* assign_rx_buffers_for_queue() uses the current
* rdma_read_pointer so do not update it until after
* assign_rx_buffers_for_queue has been called.
*/
MY_BUG_ON(rxpktprocessed != pDevCtrl->num_new_skbs);
assign_rx_buffers_for_queue(pDevCtrl, index);
rDma_desc->rdma_read_pointer = (read_ptr << 1) &
DMA_RW_POINTER_MASK;
}
return rxpktprocessed;
}
/*
* assign_rx_buffers:
* Assign skb to RX DMA descriptor. Used during initialization.
*/
static int assign_rx_buffers(struct BcmEnet_devctrl *pDevCtrl)
{
unsigned short bdsfilled;
unsigned long flags;
#ifndef CONFIG_BCMGENET_RX_DESC_THROTTLE
(void)flags;
spin_lock_bh(&pDevCtrl->bh_lock);
#else
spin_lock_irqsave(&pDevCtrl->lock, flags);
#endif
bdsfilled =
assign_rx_buffers_range(pDevCtrl, 0, 2 * TOTAL_DESC - 1, 0);
#ifndef CONFIG_BCMGENET_RX_DESC_THROTTLE
spin_unlock_bh(&pDevCtrl->bh_lock);
#else
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
#endif
return bdsfilled;
}
/*
* assign_rx_buffers for queue[index].
*/
static int assign_rx_buffers_for_queue(struct BcmEnet_devctrl *pDevCtrl,
int index)
{
unsigned short bdsfilled;
unsigned long flags;
volatile struct rDmaRingRegs *rDma_desc;
#ifndef CONFIG_BCMGENET_RX_DESC_THROTTLE
(void)flags;
spin_lock_bh(&pDevCtrl->bh_lock);
#else
spin_lock_irqsave(&pDevCtrl->lock, flags);
#endif
rDma_desc = &pDevCtrl->rxDma->rDmaRings[index];
bdsfilled = assign_rx_buffers_range(pDevCtrl,
rDma_desc->rdma_start_addr,
rDma_desc->rdma_end_addr,
rDma_desc->rdma_read_pointer);
/* Enable rx DMA in case it was disabled due to running out of rx BD */
pDevCtrl->rxDma->rdma_ctrl |= DMA_EN;
#ifndef CONFIG_BCMGENET_RX_DESC_THROTTLE
spin_unlock_bh(&pDevCtrl->bh_lock);
#else
spin_unlock_irqrestore(&pDevCtrl->lock, flags);
#endif
return bdsfilled;
}
/*
* Assign buffers for addresses between start_addr and end_addr.
*/
static int assign_rx_buffers_range(struct BcmEnet_devctrl *pDevCtrl,
unsigned long start_addr, unsigned long end_addr,
unsigned long read_pointer) {
struct sk_buff *skb;
struct Enet_CB *cb;
unsigned short bdsfilled = 0;
unsigned long read_ptr;
read_ptr = (read_pointer & DMA_RW_POINTER_MASK) >> 1;
while (pDevCtrl->rxBds[read_ptr].address == 0) {
cb = &pDevCtrl->rxCbs[read_ptr];
if (pDevCtrl->num_new_skbs > 0) {
skb = pDevCtrl->new_skbs[--pDevCtrl->num_new_skbs];
pDevCtrl->new_skbs[pDevCtrl->num_new_skbs] = NULL;
MY_BUG_ON(!skb);
} else {
skb = netdev_alloc_skb(pDevCtrl->dev,
pDevCtrl->rxBufLen + SKB_ALIGNMENT);
if (!skb) {
pr_err_ratelimited(
"%s: failed to allocate skb for rx\n",
pDevCtrl->dev->name);
break;
}
handleAlignment(pDevCtrl, skb);
}
/* keep count of any BD's we refill */
bdsfilled++;
cb->skb = skb;
cb->dma_addr = dma_map_single(&pDevCtrl->dev->dev,
skb->data, pDevCtrl->rxBufLen, DMA_FROM_DEVICE);
/* assign packet, prepare descriptor, and advance pointer */
pDevCtrl->rxBds[read_ptr].address = cb->dma_addr;
pDevCtrl->rxBds[read_ptr].length_status =
(pDevCtrl->rxBufLen << 16);
/* turn on the newly assigned BD for DMA to use */
if (read_ptr == (end_addr & DMA_RW_POINTER_MASK) >> 1) {
read_ptr = (start_addr & DMA_RW_POINTER_MASK) >> 1;
} else {
read_ptr++;
}
}
return bdsfilled;
}
static void save_state(struct BcmEnet_devctrl *pDevCtrl)
{
int ii;
volatile struct DmaDesc *rxBdAssignPtr = pDevCtrl->rxBds;
for (ii = 0; ii < pDevCtrl->nrRxBds; ++ii, ++rxBdAssignPtr) {
pDevCtrl->saved_rx_desc[ii].length_status =
rxBdAssignPtr->length_status;
pDevCtrl->saved_rx_desc[ii].address = rxBdAssignPtr->address;
}
pDevCtrl->int_mask = pDevCtrl->intrl2_0->cpu_mask_status;
pDevCtrl->rbuf_ctrl = pDevCtrl->rbuf->rbuf_ctrl;
}
static void restore_state(struct BcmEnet_devctrl *pDevCtrl)
{
int ii;
volatile struct DmaDesc *rxBdAssignPtr = pDevCtrl->rxBds;
pDevCtrl->intrl2_0->cpu_mask_clear = 0xFFFFFFFF ^ pDevCtrl->int_mask;
pDevCtrl->rbuf->rbuf_ctrl = pDevCtrl->rbuf_ctrl;
for (ii = 0; ii < pDevCtrl->nrRxBds; ++ii, ++rxBdAssignPtr) {
rxBdAssignPtr->length_status =
pDevCtrl->saved_rx_desc[ii].length_status;
rxBdAssignPtr->address = pDevCtrl->saved_rx_desc[ii].address;
}
pDevCtrl->rxDma->rdma_ctrl |= DMA_EN;
}
/*
* init_umac: Initializes the uniMac controller
*/
static int init_umac(struct BcmEnet_devctrl *pDevCtrl)
{
volatile struct uniMacRegs *umac;
volatile struct intrl2Regs *intrl2;
umac = pDevCtrl->umac;
intrl2 = pDevCtrl->intrl2_0;
TRACE(("bcmgenet: init_umac "));
/* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
GENET_RBUF_FLUSH_CTRL(pDevCtrl) = 0;
udelay(10);
/* disable MAC while updating its registers */
umac->cmd = 0;
/* issue soft reset, wait for it to complete */
umac->cmd = CMD_SW_RESET;
udelay(1000);
umac->cmd = 0;
/* clear tx/rx counter */
umac->mib_ctrl = MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT;
umac->mib_ctrl = 0;
#ifdef MAC_LOOPBACK
/* Enable GMII/MII loopback */
umac->cmd |= CMD_LCL_LOOP_EN;
#endif
umac->max_frame_len = ENET_MAX_MTU_SIZE;
/*
* init rx registers, enable ip header optimization.
*/
if (pDevCtrl->bIPHdrOptimize)
pDevCtrl->rbuf->rbuf_ctrl |= RBUF_ALIGN_2B ;
#if CONFIG_BRCM_GENET_VERSION >= 3
pDevCtrl->rbuf->rbuf_tbuf_size_ctrl = 1;
#endif
/* Mask all interrupts.*/
intrl2->cpu_mask_set = 0xFFFFFFFF;
intrl2->cpu_clear = 0xFFFFFFFF;
intrl2->cpu_mask_clear = 0x0;
/* Enable HFB single match and multiple match interrupts. */
intrl2->cpu_mask_clear |= (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM);
#ifdef CONFIG_BCMGENET_RX_DESC_THROTTLE
intrl2->cpu_mask_clear |= UMAC_IRQ_RXDMA_MBDONE;
#else
intrl2->cpu_mask_clear |= UMAC_IRQ_RXDMA_BDONE;
TRACE(("%s:Enabling RXDMA_BDONE interrupt\n", __func__));
#endif /* CONFIG_BCMGENET_RX_DESC_THROTTLE */
/* Monitor cable plug/unpluged event for internal PHY */
if (pDevCtrl->phyType == BRCM_PHY_TYPE_INT) {
intrl2->cpu_mask_clear |= (UMAC_IRQ_PHY_DET_R |
UMAC_IRQ_PHY_DET_F);
intrl2->cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN |
UMAC_IRQ_LINK_UP);
/* Turn on ENERGY_DET interrupt in bcmgenet_open()
* TODO: fix me for active standby.
*/
} else if (pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_MII ||
pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_RGMII) {
intrl2->cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN |
UMAC_IRQ_LINK_UP);
} else if (pDevCtrl->phyType == BRCM_PHY_TYPE_MOCA) {
GENET_TBUF_BP_MC(pDevCtrl) |= BIT(GENET_BP_IN_EN_SHIFT);
/* bp_mask: back pressure mask */
#if defined(CONFIG_NET_SCH_MULTIQ)
GENET_TBUF_BP_MC(pDevCtrl) |= GENET_BP_MASK;
#else
GENET_TBUF_BP_MC(pDevCtrl) &= ~GENET_BP_MASK;
#endif
}
/* Enable rx/tx engine.*/
TRACE(("done init umac\n"));
return 0;
}
/*
* init_edma: Initialize DMA control register
*/
static void init_edma(struct BcmEnet_devctrl *pDevCtrl)
{
#ifdef CONFIG_BCMGENET_RX_DESC_THROTTLE
int speeds[] = {10, 100, 1000, 2500};
int sid = 1, timeout;
#endif
volatile struct rDmaRingRegs *rDma_desc;
volatile struct tDmaRingRegs *tDma_desc;
TRACE(("bcmgenet: init_edma\n"));
/* init rDma */
pDevCtrl->rxDma->rdma_scb_burst_size = DMA_MAX_BURST_LENGTH;
/* by default, enable ring 16 (descriptor based) */
rDma_desc = &pDevCtrl->rxDma->rDmaRings[DESC_INDEX];
rDma_desc->rdma_producer_index = 0;
rDma_desc->rdma_consumer_index = 0;
/* Initialize default queue. */
MY_BUG_ON(GENET_RX_TOTAL_MQ_BD > TOTAL_DESC);
rDma_desc->rdma_ring_buf_size = ((GENET_RX_DEFAULT_BD_CNT <<
DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH);
rDma_desc->rdma_start_addr = 2 * GENET_RX_TOTAL_MQ_BD;
rDma_desc->rdma_end_addr = 2 * TOTAL_DESC - 1;
rDma_desc->rdma_read_pointer = 2 * GENET_RX_TOTAL_MQ_BD;
rDma_desc->rdma_write_pointer = 2 * GENET_RX_TOTAL_MQ_BD;
rDma_desc->rdma_xon_xoff_threshold = ((DMA_FC_THRESH_LO
<< DMA_XOFF_THRESHOLD_SHIFT) |
DMA_FC_THRESH_HI);
#ifdef CONFIG_BCMGENET_RX_DESC_THROTTLE
/*
* Use descriptor throttle, fire irq when multiple packets are done!
*/
rDma_desc->rdma_mbuf_done_threshold = DMA_DESC_THRES;
/*
* Enable push timer, force the IRQ_DESC_THROT to fire when timeout
* occurs, prevent system slow reponse when handling low throughput.
*/
sid = (pDevCtrl->umac->cmd >> CMD_SPEED_SHIFT) & CMD_SPEED_MASK;
timeout = 2*(DMA_DESC_THRES*ENET_MAX_MTU_SIZE