| |
| /* |
| * Copyright (c) 2002-2008 Broadcom Corporation |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License version 2 as |
| * published by the Free Software Foundation. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| * GNU General Public License for more details. |
| * |
| * You should have received a copy of the GNU General Public License |
| * along with this program; if not, write to the Free Software |
| * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| * |
| * |
| * File Name : bcmgenet.c |
| * |
| * Description: This is Linux driver for the broadcom GENET ethernet MAC core. |
| |
| */ |
| |
| #define CARDNAME "bcmgenet" |
| #define VERSION "2.0" |
| #define VER_STR "v" VERSION " " __DATE__ " " __TIME__ |
| |
| #include <linux/kernel.h> |
| #include <linux/module.h> |
| #include <linux/sched.h> |
| #include <linux/types.h> |
| #include <linux/fcntl.h> |
| #include <linux/interrupt.h> |
| #include <linux/string.h> |
| #include <linux/init.h> |
| #include <linux/errno.h> |
| #include <linux/delay.h> |
| #include <linux/platform_device.h> |
| #include <linux/dma-mapping.h> |
| #include <linux/pm.h> |
| #include <linux/clk.h> |
| #include <linux/version.h> |
| #include <linux/debugfs.h> |
| |
| #include <linux/mii.h> |
| #include <linux/ethtool.h> |
| #include <linux/netdevice.h> |
| #include <linux/inetdevice.h> |
| #include <linux/etherdevice.h> |
| #include <linux/skbuff.h> |
| #include <linux/in.h> |
| |
| #include <asm/mipsregs.h> |
| #include <asm/cacheflush.h> |
| #include <asm/brcmstb/brcmstb.h> |
| #include <asm/brcmstb/brcmapi.h> |
| #include "bcmmii.h" |
| #include "bcmgenet.h" |
| #include "if_net.h" |
| |
| #define MY_BUG_ON(c) do { if ((c)) { printk(KERN_EMERG "MY_BUG_ON(%s) at %s:%d\n", #c, __FILE__, __LINE__); BUG(); } } while (0) |
| |
| #ifdef CONFIG_NET_SCH_MULTIQ |
| |
| #if CONFIG_BRCM_GENET_VERSION == 1 |
| #error "This version of GENET doesn't support tx multi queue" |
| #endif |
| /* Default # of tx queues for multi queue support */ |
| #define GENET_TX_MQ_CNT 1 |
| /* Default # of bds for each priority tx queue for multi queue support */ |
| #define GENET_TX_MQ_BD_CNT 128 |
| |
| /* Default # of additional rx queues for multi queue support */ |
| #define GENET_RX_MQ_CNT 1 |
| /* Default # of bds for each priority rx queue for multi queue support */ |
| #define GENET_RX_MQ_BD_CNT 128 |
| |
| /* Highest priority is given to the tx descriptor ring 0. |
| * All other rings are mapped to lower priorities (higher numerical values) */ |
| #define GENET_TX_Q0_PRIORITY 0 |
| |
| |
| static void bcmgenet_init_multiq_tx(struct net_device *dev); |
| static void bcmgenet_init_multiq_rx(struct net_device *dev); |
| |
| #else |
| #define GENET_RX_MQ_CNT 0 |
| #define GENET_RX_MQ_BD_CNT 0 |
| |
| #define GENET_TX_MQ_CNT 0 |
| #define GENET_TX_MQ_BD_CNT 0 |
| #endif /*CONFIG_NET_SCH_MULTIQ */ |
| |
| /* Total number or priority descriptors must be less than TOTAL_DESC */ |
| #define GENET_RX_TOTAL_MQ_BD (GENET_RX_MQ_CNT * GENET_RX_MQ_BD_CNT) |
| #define GENET_TX_TOTAL_MQ_BD (GENET_TX_MQ_CNT * GENET_TX_MQ_BD_CNT) |
| |
| #if GENET_RX_TOTAL_MQ_BD > TOTAL_DESC |
| #error Total number or rx priority descriptors must be less than TOTAL_DESC. |
| #else |
| #define GENET_RX_DEFAULT_BD_CNT (TOTAL_DESC - GENET_RX_TOTAL_MQ_BD) |
| #endif |
| |
| #if GENET_TX_TOTAL_MQ_BD > TOTAL_DESC |
| #error Total number or tx priority descriptors must be less than TOTAL_DESC. |
| #else |
| #define GENET_TX_DEFAULT_BD_CNT (TOTAL_DESC - GENET_TX_TOTAL_MQ_BD) |
| #endif |
| |
| #define RX_BUF_LENGTH 2048 |
| #define RX_BUF_BITS 12 |
| #define SKB_ALIGNMENT 32 |
| #define DMA_DESC_THRES 4 |
| #define HFB_ARP_LEN 21 |
| |
| /* NAPI budget for the default queue (queue 16) */ |
| #define DEFAULT_DESC_BUDGET GENET_RX_DEFAULT_BD_CNT |
| #define THROTTLED_DESC_BUDGET 2 |
| |
| /* |
| * Length in bytes that we will match in the filter for 802.1Q packets |
| * This includes the source and destination mac addresses (6 bytes each) |
| * and the 802.1Q frame (4 bytes), for a total of 16 bytes. |
| */ |
| #define HFB_8021Q_LEN 16 |
| |
| /* |
| * Per IEEE 802.1Q, Tag Protocol Identifier (TPID): a 16-bit field set to a |
| * value of 0x8100 in order to identify the frame as an IEEE 802.1Q-tagged |
| * frame. This field is located at the same position as the EtherType/Length |
| * field in untagged frames, and is thus used to distinguish the frame from |
| * untagged frames. |
| */ |
| #define TPID 0x8100 |
| /* |
| * Priority Code Point (PCP): a 3-bit field which refers to the IEEE 802.1p |
| * priority. It indicates the frame priority level. Values are from 0 (best |
| * effort) to 7 (highest); 1 represents the lowest priority. These values can be |
| * used to prioritize different classes of traffic (voice, video, data, etc.). |
| * See also Class of Service or CoS. |
| */ |
| #define PCP_COUNT 8 /* Represented by 3 bits. */ |
| /* The first and last enabled priority. */ |
| #define PCP_START 1 /* First PCP to enable. */ |
| #define PCP_END 7 /* Last PCP to enable. */ |
| |
| /* |
| * Combination of interrupts that we process as a group. |
| */ |
| #define UMAC_IRQ_HFB_OR_DONE \ |
| (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM | \ |
| UMAC_IRQ_RXDMA_BDONE| UMAC_IRQ_RXDMA_PDONE) |
| |
| /* Tx/Rx DMA register offset, skip 256 descriptors */ |
| #define GENET_TDMA_REG_OFF (GENET_TDMA_OFF + \ |
| 2 * TOTAL_DESC * sizeof(unsigned long)) |
| #define GENET_RDMA_REG_OFF (GENET_RDMA_OFF + \ |
| 2 * TOTAL_DESC * sizeof(unsigned long)) |
| |
| #ifdef CONFIG_BRUNO |
| #define GENET0_DEVICE_NAME "eth0" |
| /* |
| * GENET MDIO Configuration Register. |
| * |
| * 31 10 9 4 3 1 0 |
| * -------------------------------------------------- |
| * | Reserved |mdio_clk_divider|Reserved|mdio_clause| |
| * -------------------------------------------------- |
| * MDIO clock (MDC) = system clock / 2 * (MDIO_CLK_DIVIDER + 1) |
| * With system clock = 108Mhz, mdio_clk_divider = 0x4, MDC = 10.8MHz. |
| */ |
| #define CLOCK_DIVIDER_SHIFT 4 |
| #define CLOCK_DIVIDER_MASK 0x3F |
| #define CLOCK_DIVIDER_10MHZ 0x4 |
| #endif /* CONFIG_BRUNO */ |
| |
| /* -------------------------------------------------------------------------- |
| External, indirect entry points. |
| --------------------------------------------------------------------------*/ |
| static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
| /* -------------------------------------------------------------------------- |
| Called for "ifconfig ethX up" & "down" |
| --------------------------------------------------------------------------*/ |
| static int bcmgenet_open(struct net_device *dev); |
| static int bcmgenet_close(struct net_device *dev); |
| /* -------------------------------------------------------------------------- |
| Watchdog timeout |
| --------------------------------------------------------------------------*/ |
| static void bcmgenet_timeout(struct net_device *dev); |
| /* -------------------------------------------------------------------------- |
| Packet transmission. |
| --------------------------------------------------------------------------*/ |
| static int bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev); |
| /* -------------------------------------------------------------------------- |
| Set address filtering mode |
| --------------------------------------------------------------------------*/ |
| static void bcmgenet_set_multicast_list(struct net_device *dev); |
| /* -------------------------------------------------------------------------- |
| Set the hardware MAC address. |
| --------------------------------------------------------------------------*/ |
| static int bcmgenet_set_mac_addr(struct net_device *dev, void *p); |
| |
| /* -------------------------------------------------------------------------- |
| Interrupt routine, for all interrupts except ring buffer interrupts |
| --------------------------------------------------------------------------*/ |
| static irqreturn_t bcmgenet_isr0(int irq, void *dev_id); |
| /*--------------------------------------------------------------------------- |
| IRQ handler for ring buffer interrupt. |
| --------------------------------------------------------------------------*/ |
| static irqreturn_t bcmgenet_isr1(int irq, void *dev_id); |
| /* -------------------------------------------------------------------------- |
| dev->poll() method |
| --------------------------------------------------------------------------*/ |
| static int bcmgenet_poll(struct napi_struct *napi, int budget); |
| static int bcmgenet_ring_poll(struct napi_struct *napi, int budget); |
| /* -------------------------------------------------------------------------- |
| Process recived packet for descriptor based DMA |
| --------------------------------------------------------------------------*/ |
| static unsigned int bcmgenet_desc_rx(void *ptr, unsigned int budget, int index); |
| /* -------------------------------------------------------------------------- |
| Process recived packet for ring buffer DMA |
| --------------------------------------------------------------------------*/ |
| static unsigned int bcmgenet_ring_rx(void *ptr, unsigned int budget); |
| /* -------------------------------------------------------------------------- |
| Internal routines |
| --------------------------------------------------------------------------*/ |
| /* Allocate and initialize tx/rx buffer descriptor pools */ |
| static int bcmgenet_init_dev(struct BcmEnet_devctrl *pDevCtrl); |
| static void bcmgenet_uninit_dev(struct BcmEnet_devctrl *pDevCtrl); |
| /* Assign the Rx descriptor ring */ |
| static int assign_rx_buffers(struct BcmEnet_devctrl *pDevCtrl); |
| static int assign_rx_buffers_for_queue(struct BcmEnet_devctrl *pDevCtrl, int i); |
| static int assign_rx_buffers_range(struct BcmEnet_devctrl *pDevCtrl, |
| unsigned long start_addr, unsigned long end_addr, |
| unsigned long read_ptr); |
| /* Initialize the uniMac control registers */ |
| static int init_umac(struct BcmEnet_devctrl *pDevCtrl); |
| /* Initialize DMA control register */ |
| static void init_edma(struct BcmEnet_devctrl *pDevCtrl); |
| /* Interrupt bottom-half */ |
| static void bcmgenet_irq_task(struct work_struct *work); |
| /* power management */ |
| static void bcmgenet_power_down(struct BcmEnet_devctrl *pDevCtrl, int mode); |
| static void bcmgenet_power_up(struct BcmEnet_devctrl *pDevCtrl, int mode); |
| /* allocate an skb, the data comes from ring buffer */ |
| static struct sk_buff *__bcmgenet_alloc_skb_from_buf(unsigned char *buf, |
| int len, int headroom); |
| /* clock control */ |
| static void bcmgenet_clock_enable(struct BcmEnet_devctrl *pDevCtrl); |
| static void bcmgenet_clock_disable(struct BcmEnet_devctrl *pDevCtrl); |
| |
| /* S3 warm boot */ |
| static void save_state(struct BcmEnet_devctrl *pDevCtrl); |
| static void restore_state(struct BcmEnet_devctrl *pDevCtrl); |
| |
| /* HFB filtering for IPv4 multicast */ |
| static int bcmgenet_enable_multicast_hfb(struct BcmEnet_devctrl *pDevCtrl); |
| |
| /* HFB filtering for PCP */ |
| static void bcmgenet_enable_pcp_hfb(struct BcmEnet_devctrl *pDevCtrl); |
| static void bcmgenet_disable_pcp_hfb(struct BcmEnet_devctrl *pDevCtrl); |
| |
| |
| static int bcmgenet_update_hfb(struct net_device *dev, unsigned int *data, |
| int len, int user); |
| |
| static struct net_device *eth_root_dev; |
| static int DmaDescThres = DMA_DESC_THRES; |
| |
| /* Descriptor queue budget. */ |
| static int desc_budget = DEFAULT_DESC_BUDGET; |
| |
| /* bcmgenet multi-queue budget count variables for debugfs*/ |
| |
| static struct dentry *bcmgenet_debugfs_root = NULL; |
| |
| #ifdef CONFIG_NET_SCH_MULTIQ |
| static u32 bcmgenet_rx_mq_bd_cnt = GENET_RX_MQ_BD_CNT; |
| static u32 bcmgenet_tx_mq_bd_cnt = GENET_TX_MQ_BD_CNT; |
| static u32 bcmgenet_tx_mq_cnt = GENET_TX_MQ_CNT; |
| #endif |
| static u32 bcmgenet_tx_default_bd_cnt = GENET_TX_DEFAULT_BD_CNT; |
| |
| /* bcmgenet debugfs variable pointer and file name */ |
| typedef struct { |
| u32 *dbfs_p; |
| const char *dbfs_name; |
| } bcmgenet_debugfs; |
| |
| /* Initialization function of bcmgenet debugfs variable pointer and file name*/ |
| #define BCMGENET_DEBUGFS(x) {.dbfs_p = &x, .dbfs_name = #x} |
| |
| static int bcmgenet_debugfs_create(struct BcmEnet_devctrl *dev); |
| static int bcmgenet_debugfs_create_u32(bcmgenet_debugfs bcmgenet_dbfs, |
| int op, struct dentry *rtdir); |
| static int bcmgenet_debugfs_create_indexed_u32(unsigned int *var, int index, |
| int op, struct dentry *rtdir); |
| |
| #ifdef CONFIG_BCM7429A0 |
| static void bcm7429_ephy_workaround(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| int data; |
| data = pDevCtrl->mii.mdio_read(pDevCtrl->dev, pDevCtrl->phyAddr, 0x1f); |
| data |= 0x0004; |
| pDevCtrl->mii.mdio_write(pDevCtrl->dev, pDevCtrl->phyAddr, 0x1f, data); |
| data = 0x7555; |
| pDevCtrl->mii.mdio_write(pDevCtrl->dev, pDevCtrl->phyAddr, 0x13, data); |
| data = pDevCtrl->mii.mdio_read(pDevCtrl->dev, pDevCtrl->phyAddr, 0x1f); |
| data &= ~0x0004; |
| pDevCtrl->mii.mdio_write(pDevCtrl->dev, pDevCtrl->phyAddr, 0x1f, data); |
| } |
| #endif |
| |
| /* |
| * HFB data for ARP request. |
| * * In WoL (Magic Packet or ACPI) mode, we need to response |
| * ARP request, so dedicate an HFB to filter the ARP request. |
| * NOTE: the last two words are to be filled by destination. |
| */ |
| static unsigned int hfb_arp[] = { |
| 0x000FFFFF, 0x000FFFFF, 0x000FFFFF, 0x00000000, |
| 0x00000000, 0x00000000, 0x000F0806, 0x000F0001, |
| 0x000F0800, 0x000F0604, 0x000F0001, 0x00000000, |
| 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
| 0x000F0000, 0x000F0000, 0x000F0000, 0x000F0000, |
| 0x000F0000 |
| }; |
| |
| /* |
| * HFB data for IPv4 packets with a multicast address in their dst field. |
| * Match: |
| * - Ethernet frame must use Type IP (0x0800) |
| * - IP version field must be 4 |
| * - First nibble (not byte) of dst IP address must be 0xe. |
| */ |
| static unsigned int hfb_ipv4_multicast[] = { |
| /* offset 0x00: */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
| /* offset 0x08: */ 0x00000000, 0x00000000, 0x000F0800, 0x00084000, |
| /* offset 0x10: */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
| /* offset 0x18: */ 0x00000000, 0x00000000, 0x00000000, 0x0008e000, |
| }; |
| |
| /* ------------------------------------------------------------------------- |
| * The following bcmemac_xxxx() functions are legacy netaccel hook, will be |
| * replaced! |
| * -----------------------------------------------------------------------*/ |
| struct net_device *bcmemac_get_device(void) |
| { |
| return eth_root_dev; |
| } |
| EXPORT_SYMBOL(bcmemac_get_device); |
| /* -------------------------------------------------------------------------- |
| Name: bcmemac_get_free_txdesc |
| Purpose: Get Current Available TX desc count |
| -------------------------------------------------------------------------- */ |
| int bcmemac_get_free_txdesc(struct net_device *dev) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| return pDevCtrl->txFreeBds; |
| } |
| EXPORT_SYMBOL(bcmemac_get_free_txdesc); |
| /* -------------------------------------------------------------------------- |
| Name: bcmemac_xmit_check |
| Purpose: Reclaims TX descriptors |
| -------------------------------------------------------------------------- */ |
| int bcmemac_xmit_check(struct net_device *dev) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| struct Enet_CB *txCBPtr; |
| unsigned long flags, ret; |
| unsigned int c_index; |
| int lastTxedCnt = 0, i = 0; |
| /* |
| * Obtain exclusive access to transmitter. This is necessary because |
| * we might have more than one stack transmitting at once. |
| */ |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| /* Compute how many buffers are transmited since last xmit call */ |
| c_index = pDevCtrl->txDma->tDmaRings[GENET_TX_RING_COUNT].tdma_consumer_index; |
| c_index &= (TOTAL_DESC - 1); |
| |
| if (c_index >= pDevCtrl->txLastCIndex) |
| lastTxedCnt = c_index - pDevCtrl->txLastCIndex; |
| else |
| lastTxedCnt = TOTAL_DESC - pDevCtrl->txLastCIndex + c_index; |
| |
| TRACE(("c_index=%d lastTxedCnt=%d txLastCIndex=%d\n", |
| c_index, lastTxedCnt, pDevCtrl->txLastCIndex)); |
| |
| /* Reclaim transmitted buffers */ |
| i = pDevCtrl->txLastCIndex; |
| while (lastTxedCnt-- > 0) { |
| txCBPtr = &pDevCtrl->txCbs[i]; |
| if (txCBPtr->skb != NULL) { |
| dma_unmap_single(&pDevCtrl->dev->dev, |
| txCBPtr->dma_addr, |
| txCBPtr->skb->len, |
| DMA_TO_DEVICE); |
| dev_kfree_skb_any(txCBPtr->skb); |
| txCBPtr->skb = NULL; |
| } |
| pDevCtrl->txFreeBds += 1; |
| if (i == (TOTAL_DESC - 1)) |
| i = 0; |
| else |
| i++; |
| } |
| pDevCtrl->txLastCIndex = c_index; |
| if (pDevCtrl->txFreeBds > 0) { |
| /* Disable txdma bdone/pdone interrupt if we have free tx bds */ |
| pDevCtrl->intrl2_0->cpu_mask_set |= (UMAC_IRQ_TXDMA_BDONE | |
| UMAC_IRQ_TXDMA_PDONE); |
| netif_wake_queue(dev); |
| ret = 0; |
| } else { |
| ret = 1; |
| } |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| |
| return ret; |
| } |
| EXPORT_SYMBOL(bcmemac_xmit_check); |
| /* -------------------------------------------------------------------------- |
| Name: bcmemac_xmit_fragment |
| Purpose: Send ethernet traffic Buffer DESC and submit to UDMA |
| -------------------------------------------------------------------------- */ |
| int bcmemac_xmit_fragment(int ch, unsigned char *buf, int buf_len, |
| unsigned long tx_flags , struct net_device *dev) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| struct Enet_CB *txCBPtr; |
| unsigned int write_ptr = 0; |
| |
| if (pDevCtrl->txFreeBds == 0) |
| return 1; |
| /* |
| * We must don't have 64B status block enabled in this case! |
| */ |
| write_ptr = pDevCtrl->txDma->tDmaRings[GENET_TX_RING_COUNT].tdma_write_pointer; |
| write_ptr = ((write_ptr & DMA_RW_POINTER_MASK) >> 1); |
| |
| /* Obtain transmit control block */ |
| txCBPtr = &pDevCtrl->txCbs[write_ptr]; |
| txCBPtr->BdAddr = &pDevCtrl->txBds[write_ptr]; |
| txCBPtr->dma_addr = dma_map_single(&pDevCtrl->dev->dev, buf, |
| buf_len, DMA_TO_DEVICE); |
| |
| /* |
| * Add the buffer to the ring. |
| * Set addr and length of DMA BD to be transmitted. |
| */ |
| txCBPtr->BdAddr->address = txCBPtr->dma_addr; |
| txCBPtr->BdAddr->length_status = ((unsigned long)(buf_len)) << 16; |
| txCBPtr->BdAddr->length_status |= tx_flags | DMA_TX_APPEND_CRC; |
| |
| /* Default QTAG for MoCA */ |
| txCBPtr->BdAddr->length_status |= |
| (DMA_TX_QTAG_MASK << DMA_TX_QTAG_SHIFT); |
| |
| #ifdef CONFIG_BCMGENET_DUMP_DATA |
| TRACE(("%s: len %d", __func__, buf_len)); |
| print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, |
| 16, 1, buf, buf_len, 0); |
| #endif |
| |
| /* Decrement total BD count and advance our write pointer */ |
| pDevCtrl->txFreeBds -= 1; |
| |
| if (write_ptr == pDevCtrl->nrTxBds - 1) |
| write_ptr = 0; |
| else |
| write_ptr++; |
| /* advance producer index and write pointer.*/ |
| pDevCtrl->txDma->tDmaRings[DESC_INDEX].tdma_producer_index += 1; |
| pDevCtrl->txDma->tDmaRings[DESC_INDEX].tdma_write_pointer += 2; |
| |
| if (pDevCtrl->txFreeBds == 0) { |
| TRACE(("%s: %s no transmit queue space -- stopping queues\n", |
| dev->name, __func__)); |
| /* Enable Tx bdone/pdone interrupt !*/ |
| pDevCtrl->intrl2_0->cpu_mask_clear |= (UMAC_IRQ_TXDMA_BDONE | |
| UMAC_IRQ_TXDMA_PDONE); |
| netif_stop_queue(dev); |
| } |
| /* update stats */ |
| dev->stats.tx_bytes += buf_len; |
| dev->stats.tx_packets++; |
| |
| dev->trans_start = jiffies; |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(bcmemac_xmit_fragment); |
| /* -------------------------------------------------------------------------- |
| Name: bcmemac_xmit_multibuf |
| Purpose: Send ethernet traffic in multi buffers (hdr, buf, tail) |
| -------------------------------------------------------------------------- */ |
| int bcmemac_xmit_multibuf(int ch, unsigned char *hdr, int hdr_len, |
| unsigned char *buf, int buf_len, unsigned char *tail, |
| int tail_len, struct net_device *dev) |
| { |
| unsigned long flags; |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| |
| while (bcmemac_xmit_check(dev)) |
| ; |
| |
| /* |
| * Obtain exclusive access to transmitter. This is necessary because |
| * we might have more than one stack transmitting at once. |
| */ |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| |
| /* Header + Optional payload in two parts */ |
| if ((hdr_len > 0) && (buf_len > 0) && |
| (tail_len > 0) && (hdr) && |
| (buf) && (tail)) { |
| /* Send Header */ |
| while (bcmemac_xmit_fragment(ch, hdr, hdr_len, DMA_SOP, dev)) |
| bcmemac_xmit_check(dev); |
| /* Send First Fragment */ |
| while (bcmemac_xmit_fragment(ch, buf, buf_len, 0, dev)) |
| bcmemac_xmit_check(dev); |
| /* Send 2nd Fragment */ |
| while (bcmemac_xmit_fragment(ch, tail, tail_len, DMA_EOP, dev)) |
| bcmemac_xmit_check(dev); |
| } else if ((hdr_len > 0) && (buf_len > 0) && (hdr) && (buf)) { |
| /* Header + Optional payload */ |
| /* Send Header */ |
| while (bcmemac_xmit_fragment(ch, hdr, hdr_len, DMA_SOP, dev)) |
| bcmemac_xmit_check(dev); |
| /* Send First Fragment */ |
| while (bcmemac_xmit_fragment(ch, buf, buf_len, DMA_EOP, dev)) |
| bcmemac_xmit_check(dev); |
| } else if ((hdr_len > 0) && (hdr)) { |
| /* Header Only (includes payload) */ |
| /* Send Header */ |
| while (bcmemac_xmit_fragment(ch, hdr, hdr_len, |
| DMA_SOP | DMA_EOP, dev)) |
| bcmemac_xmit_check(dev); |
| } else { |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return 0; /* Drop the packet */ |
| } |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return 0; |
| } |
| EXPORT_SYMBOL(bcmemac_xmit_multibuf); |
| static inline void handleAlignment(struct BcmEnet_devctrl *pDevCtrl, |
| struct sk_buff *skb) |
| { |
| /* |
| * We request to allocate 2048 + 32 bytes of buffers, and the |
| * dev_alloc_skb() added 16B for NET_SKB_PAD, so we totally |
| * requested 2048+32+16 bytes buffer, the size was aligned to |
| * SMP_CACHE_BYTES, which is 64B.(is it?), so we finnally ended |
| * up got 2112 bytes of buffer! Among which, the first 16B is |
| * reserved for NET_SKB_PAD, to make the skb->data aligned 32B |
| * boundary, we should have enough space to fullfill the 2KB |
| * buffer after alignment! |
| */ |
| |
| unsigned long boundary32, curData, resHeader; |
| |
| curData = (unsigned long) skb->data; |
| boundary32 = (curData + (SKB_ALIGNMENT - 1)) & ~(SKB_ALIGNMENT - 1); |
| resHeader = boundary32 - curData ; |
| /* 4 bytes for skb pointer */ |
| if (resHeader < 4) |
| boundary32 += 32; |
| |
| resHeader = boundary32 - curData - 4; |
| /* We'd have minimum 16B reserved by default. */ |
| skb_reserve(skb, resHeader); |
| |
| *(unsigned int *)skb->data = (unsigned int)skb; |
| skb_reserve(skb, 4); |
| /* |
| * Make sure it is on 32B boundary, should never happen if our |
| * calculation was correct. |
| */ |
| if ((unsigned long) skb->data & (SKB_ALIGNMENT - 1)) { |
| printk(KERN_WARNING "skb buffer is NOT aligned on %d boundary!\n", |
| SKB_ALIGNMENT); |
| } |
| |
| /* |
| * we don't reserve 2B for IP Header optimization here, |
| * use skb_pull when receiving packets |
| */ |
| } |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_gphy_link_status |
| Purpose: GPHY link status monitoring task |
| -------------------------------------------------------------------------- */ |
| static void bcmgenet_gphy_link_status(struct work_struct *work) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = container_of(work, |
| struct BcmEnet_devctrl, bcmgenet_link_work); |
| |
| mii_setup(pDevCtrl->dev); |
| } |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_gphy_link_timer |
| Purpose: GPHY link status monitoring timer function |
| -------------------------------------------------------------------------- */ |
| static void bcmgenet_gphy_link_timer(unsigned long data) |
| { |
| struct BcmEnet_devctrl * pDevCtrl = (struct BcmEnet_devctrl *)data; |
| schedule_work(&pDevCtrl->bcmgenet_link_work); |
| mod_timer(&pDevCtrl->timer, jiffies + HZ); |
| } |
| |
| #ifdef CONFIG_BRCM_HAS_STANDBY |
| static int bcmgenet_wakeup_enable(void *ref) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = (struct BcmEnet_devctrl *)ref; |
| u32 mask; |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_MOCA) |
| mask = WOL_MOCA_MASK; |
| else |
| mask = pDevCtrl->devnum ? WOL_MOCA_MASK : WOL_ENET_MASK; |
| if (device_may_wakeup(&pDevCtrl->dev->dev)) |
| brcm_pm_wakeup_source_enable(mask, 1); |
| return 0; |
| } |
| |
| static int bcmgenet_wakeup_disable(void *ref) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = (struct BcmEnet_devctrl *)ref; |
| u32 mask; |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_MOCA) |
| mask = WOL_MOCA_MASK; |
| else |
| mask = pDevCtrl->devnum ? WOL_MOCA_MASK : WOL_ENET_MASK; |
| if (device_may_wakeup(&pDevCtrl->dev->dev)) |
| brcm_pm_wakeup_source_enable(mask, 0); |
| return 0; |
| } |
| |
| static int bcmgenet_wakeup_poll(void *ref) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = (struct BcmEnet_devctrl *)ref; |
| int retval = 0; |
| u32 mask = 0; |
| |
| if (device_may_wakeup(&pDevCtrl->dev->dev)) { |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_MOCA) |
| mask = WOL_MOCA_MASK; |
| else |
| mask = pDevCtrl->devnum ? WOL_MOCA_MASK : WOL_ENET_MASK; |
| retval = brcm_pm_wakeup_get_status(mask); |
| } |
| printk(KERN_DEBUG "%s %s(%08x): %d\n", __func__, |
| pDevCtrl->dev->name, mask, retval); |
| return retval; |
| } |
| |
| static struct brcm_wakeup_ops bcmgenet_wakeup_ops = { |
| .enable = bcmgenet_wakeup_enable, |
| .disable = bcmgenet_wakeup_disable, |
| .poll = bcmgenet_wakeup_poll, |
| }; |
| #endif |
| |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_open |
| Purpose: Open and Initialize the EMAC on the chip |
| -------------------------------------------------------------------------- */ |
| static int bcmgenet_open(struct net_device *dev) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| unsigned long dma_ctrl; |
| volatile struct uniMacRegs *umac = pDevCtrl->umac; |
| |
| TRACE(("%s: bcmgenet_open\n", dev->name)); |
| |
| bcmgenet_clock_enable(pDevCtrl); |
| |
| #ifdef CONFIG_BRUNO |
| /* |
| * Set the clock divider to 0x04 to generate a 10.8MHz clock. |
| */ |
| if (strcmp(dev->name, GENET0_DEVICE_NAME) == 0) { |
| volatile unsigned long val = pDevCtrl->umac->mdio_cfg; |
| val &= ~(CLOCK_DIVIDER_MASK << CLOCK_DIVIDER_SHIFT); |
| val |= (CLOCK_DIVIDER_10MHZ << CLOCK_DIVIDER_SHIFT); |
| pDevCtrl->umac->mdio_cfg = val; |
| } |
| #endif /* CONFIG_BRUNO */ |
| |
| GENET_RBUF_FLUSH_CTRL(pDevCtrl) = 0; |
| |
| /* disable ethernet MAC while updating its registers */ |
| pDevCtrl->umac->cmd &= ~(CMD_TX_EN | CMD_RX_EN); |
| |
| umac->mac_0 = (dev->dev_addr[0] << 24 | |
| dev->dev_addr[1] << 16 | |
| dev->dev_addr[2] << 8 | |
| dev->dev_addr[3]); |
| umac->mac_1 = dev->dev_addr[4] << 8 | dev->dev_addr[5]; |
| |
| if (pDevCtrl->wol_enabled) { |
| /* From WOL-enabled suspend, switch to regular clock */ |
| clk_disable(pDevCtrl->clk_wol); |
| /* init umac registers to synchronize s/w with h/w */ |
| init_umac(pDevCtrl); |
| /* Speed settings must be restored */ |
| mii_init(dev); |
| mii_setup(dev); |
| } |
| |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_INT) |
| pDevCtrl->ext->ext_pwr_mgmt |= EXT_ENERGY_DET_MASK; |
| |
| if (test_and_clear_bit(GENET_POWER_WOL_MAGIC, &pDevCtrl->wol_enabled)) |
| bcmgenet_power_up(pDevCtrl, GENET_POWER_WOL_MAGIC); |
| if (test_and_clear_bit(GENET_POWER_WOL_ACPI, &pDevCtrl->wol_enabled)) |
| bcmgenet_power_up(pDevCtrl, GENET_POWER_WOL_ACPI); |
| |
| /* disable DMA */ |
| dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; |
| pDevCtrl->txDma->tdma_ctrl &= ~dma_ctrl; |
| pDevCtrl->rxDma->rdma_ctrl &= ~dma_ctrl; |
| pDevCtrl->umac->tx_flush = 1; |
| GENET_RBUF_FLUSH_CTRL(pDevCtrl) = 1; |
| udelay(10); |
| pDevCtrl->umac->tx_flush = 0; |
| GENET_RBUF_FLUSH_CTRL(pDevCtrl) = 0; |
| |
| /* reset dma, start from beginning of the ring. */ |
| init_edma(pDevCtrl); |
| /* reset internal book keeping variables. */ |
| pDevCtrl->txLastCIndex = 0; |
| pDevCtrl->rxBdAssignPtr = pDevCtrl->rxBds; |
| |
| if (brcm_pm_deep_sleep()) |
| restore_state(pDevCtrl); |
| else |
| assign_rx_buffers(pDevCtrl); |
| |
| pDevCtrl->txFreeBds = pDevCtrl->nrTxBds; |
| |
| /*Always enable ring 16 - descriptor ring */ |
| pDevCtrl->rxDma->rdma_ctrl |= dma_ctrl; |
| pDevCtrl->txDma->tdma_ctrl |= dma_ctrl; |
| |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_MII || |
| pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_RGMII || |
| pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_RGMII_IBS) { |
| mod_timer(&pDevCtrl->timer, jiffies); |
| } |
| |
| if (request_irq(pDevCtrl->irq0, bcmgenet_isr0, IRQF_SHARED, |
| dev->name, pDevCtrl) < 0) { |
| printk(KERN_ERR "can't request IRQ %d\n", pDevCtrl->irq0); |
| goto err2; |
| } |
| if (request_irq(pDevCtrl->irq1, bcmgenet_isr1, IRQF_SHARED, |
| dev->name, pDevCtrl) < 0) { |
| printk(KERN_ERR "can't request IRQ %d\n", pDevCtrl->irq1); |
| free_irq(pDevCtrl->irq0, pDevCtrl); |
| goto err1; |
| } |
| /* Start the network engine */ |
| netif_tx_start_all_queues(dev); |
| napi_enable(&pDevCtrl->napi); |
| |
| pDevCtrl->umac->cmd |= (CMD_TX_EN | CMD_RX_EN); |
| |
| #ifdef CONFIG_BRCM_HAS_STANDBY |
| brcm_pm_wakeup_register(&bcmgenet_wakeup_ops, pDevCtrl, dev->name); |
| device_set_wakeup_capable(&dev->dev, 1); |
| #endif |
| |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_INT) |
| bcmgenet_power_up(pDevCtrl, GENET_POWER_PASSIVE); |
| |
| return 0; |
| err1: |
| free_irq(pDevCtrl->irq0, dev); |
| err2: |
| free_irq(pDevCtrl->irq1, dev); |
| del_timer_sync(&pDevCtrl->timer); |
| netif_tx_stop_all_queues(dev); |
| |
| return -ENODEV; |
| } |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_close |
| Purpose: Stop communicating with the outside world |
| Note: Caused by 'ifconfig ethX down' |
| -------------------------------------------------------------------------- */ |
| static int bcmgenet_close(struct net_device *dev) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| int timeout = 0; |
| |
| TRACE(("%s: bcmgenet_close\n", dev->name)); |
| |
| napi_disable(&pDevCtrl->napi); |
| netif_tx_stop_all_queues(dev); |
| /* Stop Tx DMA */ |
| pDevCtrl->txDma->tdma_ctrl &= ~DMA_EN; |
| while (timeout < 5000) { |
| if (pDevCtrl->txDma->tdma_status & DMA_EN) |
| break; |
| udelay(1); |
| timeout++; |
| } |
| if (timeout == 5000) |
| printk(KERN_ERR "Timed out while shutting down Tx DMA\n"); |
| |
| /* Disable Rx DMA*/ |
| pDevCtrl->rxDma->rdma_ctrl &= ~DMA_EN; |
| timeout = 0; |
| while (timeout < 5000) { |
| if (pDevCtrl->rxDma->rdma_status & DMA_EN) |
| break; |
| udelay(1); |
| timeout++; |
| } |
| if (timeout == 5000) |
| printk(KERN_ERR "Timed out while shutting down Rx DMA\n"); |
| |
| pDevCtrl->umac->cmd &= ~(CMD_RX_EN | CMD_TX_EN); |
| |
| /* tx reclaim */ |
| bcmgenet_xmit(NULL, dev); |
| free_irq(pDevCtrl->irq0, (void *)pDevCtrl); |
| free_irq(pDevCtrl->irq1, (void *)pDevCtrl); |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_MII || |
| pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_RGMII || |
| pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_RGMII_IBS) { |
| del_timer_sync(&pDevCtrl->timer); |
| cancel_work_sync(&pDevCtrl->bcmgenet_link_work); |
| } |
| /* |
| * Wait for pending work items to complete - we are stopping |
| * the clock now. Since interrupts are disabled, no new work |
| * will be scheduled. |
| */ |
| cancel_work_sync(&pDevCtrl->bcmgenet_irq_work); |
| |
| if (brcm_pm_deep_sleep()) |
| save_state(pDevCtrl); |
| |
| if (device_may_wakeup(&dev->dev) && pDevCtrl->dev_asleep) { |
| if (pDevCtrl->wolopts & WAKE_MAGIC) |
| bcmgenet_power_down(pDevCtrl, GENET_POWER_WOL_MAGIC); |
| else if (pDevCtrl->wolopts & WAKE_ARP) |
| bcmgenet_power_down(pDevCtrl, GENET_POWER_WOL_ACPI); |
| } else if (pDevCtrl->phyType == BRCM_PHY_TYPE_INT) |
| bcmgenet_power_down(pDevCtrl, GENET_POWER_PASSIVE); |
| |
| if (pDevCtrl->wol_enabled) |
| clk_enable(pDevCtrl->clk_wol); |
| |
| bcmgenet_clock_disable(pDevCtrl); |
| |
| return 0; |
| } |
| |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_net_timeout |
| Purpose: |
| -------------------------------------------------------------------------- */ |
| static void bcmgenet_timeout(struct net_device *dev) |
| { |
| MY_BUG_ON(dev == NULL); |
| |
| TRACE(("%s: bcmgenet_timeout\n", dev->name)); |
| |
| dev->trans_start = jiffies; |
| |
| dev->stats.tx_errors++; |
| |
| netif_tx_wake_all_queues(dev); |
| } |
| |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_set_multicast_list |
| Purpose: Set the multicast mode, ie. promiscuous or multicast |
| -------------------------------------------------------------------------- */ |
| static void bcmgenet_set_multicast_list(struct net_device *dev) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) |
| struct netdev_hw_addr *ha; |
| #else |
| struct dev_mc_list *dmi; |
| #endif |
| int i, mc; |
| #define MAX_MC_COUNT 16 |
| |
| TRACE(("%s: bcmgenet_set_multicast_list: %08X\n", |
| dev->name, dev->flags)); |
| |
| /* Promiscous mode */ |
| if (dev->flags & IFF_PROMISC) { |
| pDevCtrl->umac->cmd |= CMD_PROMISC; |
| pDevCtrl->umac->mdf_ctrl = 0; |
| return; |
| } else |
| pDevCtrl->umac->cmd &= ~CMD_PROMISC; |
| |
| |
| /* UniMac doesn't support ALLMULTI */ |
| if (dev->flags & IFF_ALLMULTI) |
| return; |
| |
| /* update MDF filter */ |
| i = 0; |
| mc = 0; |
| /* Broadcast */ |
| pDevCtrl->umac->mdf_addr[i] = 0xFFFF; |
| pDevCtrl->umac->mdf_addr[i+1] = 0xFFFFFFFF; |
| pDevCtrl->umac->mdf_ctrl |= (1 << (MAX_MC_COUNT - mc)); |
| i += 2; |
| mc++; |
| /* Unicast*/ |
| pDevCtrl->umac->mdf_addr[i] = (dev->dev_addr[0]<<8) | dev->dev_addr[1]; |
| pDevCtrl->umac->mdf_addr[i+1] = dev->dev_addr[2] << 24 | |
| dev->dev_addr[3] << 16 | |
| dev->dev_addr[4] << 8 | |
| dev->dev_addr[5]; |
| pDevCtrl->umac->mdf_ctrl |= (1 << (MAX_MC_COUNT - mc)); |
| i += 2; |
| mc++; |
| |
| #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) |
| if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= MAX_MC_COUNT) |
| return; |
| netdev_for_each_mc_addr(ha, dev) { |
| pDevCtrl->umac->mdf_addr[i] = ha->addr[0] << 8 | ha->addr[1]; |
| pDevCtrl->umac->mdf_addr[i+1] = ha->addr[2] << 24 | |
| ha->addr[3] << 16 | ha->addr[4] << 8 | ha->addr[5]; |
| pDevCtrl->umac->mdf_ctrl |= (1 << (MAX_MC_COUNT - mc)); |
| i += 2; |
| mc++; |
| } |
| #else |
| if (dev->mc_count == 0 || dev->mc_count > (MAX_MC_COUNT - 1)) |
| return; |
| /* Multicast */ |
| for (dmi = dev->mc_list; dmi; dmi = dmi->next) { |
| pDevCtrl->umac->mdf_addr[i] = (dmi->dmi_addr[0] << 8) | |
| dmi->dmi_addr[1]; |
| pDevCtrl->umac->mdf_addr[i+1] = (dmi->dmi_addr[2] << 24) | |
| (dmi->dmi_addr[3] << 16) | |
| (dmi->dmi_addr[4] << 8) | |
| dmi->dmi_addr[5]; |
| pDevCtrl->umac->mdf_ctrl |= (1 << (MAX_MC_COUNT - mc)); |
| i += 2; |
| mc++; |
| } |
| #endif |
| } |
| /* |
| * Set the hardware MAC address. |
| */ |
| static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) |
| { |
| struct sockaddr *addr = p; |
| |
| if (netif_running(dev)) |
| return -EBUSY; |
| |
| memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
| |
| return 0; |
| } |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_select_queue |
| Purpose: select which xmit queue to use based on skb->queue_mapping. |
| -------------------------------------------------------------------------- */ |
| static u16 __maybe_unused bcmgenet_select_queue(struct net_device *dev, |
| struct sk_buff *skb) |
| { |
| /* |
| * If multi-queue support is enabled, and NET_ACT_SKBEDIT is not |
| * defined, this function simply returns current queue_mapping set |
| * inside skb, this means other modules, (netaccel, for example), |
| * must provide a mechanism to set the queue_mapping before trying |
| * to send a packet. |
| */ |
| return skb->queue_mapping; |
| } |
| /* -------------------------------------------------------------------------- |
| Name: __bcmgenet_skb_destructor |
| Purpose: For ring buffer, called after skb is consumed. |
| -------------------------------------------------------------------------- */ |
| static void __bcmgenet_skb_destructor(struct sk_buff *skb) |
| { |
| struct Enet_CB *cb; |
| int index, cbi; |
| volatile struct rDmaRingRegs *rDma_ring; |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(skb->dev); |
| struct status_64 *status = (struct status_64 *)skb->head; |
| index = status->reserved[0]; |
| cbi = status->reserved[1]; |
| |
| rDma_ring = &pDevCtrl->rxDma->rDmaRings[index]; |
| cb = pDevCtrl->rxRingCbs[index] + cbi; |
| dma_sync_single_for_device(&pDevCtrl->dev->dev, |
| cb->dma_addr, cb->dma_len, DMA_FROM_DEVICE); |
| /* Increment consumer index, if previous skb was not consumed. |
| * this will cause buffer out of sync!! */ |
| if ((rDma_ring->rdma_consumer_index & DMA_C_INDEX_MASK) == 0xFFFF) |
| rDma_ring->rdma_consumer_index = 0; |
| else |
| rDma_ring->rdma_consumer_index++; |
| skb->destructor = NULL; |
| } |
| /* -------------------------------------------------------------------------- |
| Name: __bcmgenet_alloc_skb_from_buf |
| Purpose: Allocated an skb from exsiting buffer. |
| -------------------------------------------------------------------------- */ |
| static struct sk_buff *__bcmgenet_alloc_skb_from_buf(unsigned char *buf, |
| int len, int headroom) |
| { |
| struct skb_shared_info *shinfo; |
| struct sk_buff *skb; |
| |
| skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); |
| if (!skb) |
| return NULL; |
| memset(skb, 0, offsetof(struct sk_buff, tail)); |
| skb->truesize = len + sizeof(struct sk_buff); |
| atomic_set(&skb->users, 1); |
| skb->head = buf; |
| skb->data = buf; |
| skb_reset_tail_pointer(skb); |
| skb->end = skb->tail + len - sizeof(struct skb_shared_info); |
| /* FCLONE_ORIG tell kfree_skb() not to release data */ |
| skb->cloned = SKB_FCLONE_ORIG; |
| /* FLONE_ORIG tells kfree_skb to free skb from skb head cache*/ |
| skb->fclone = SKB_FCLONE_UNAVAILABLE; |
| |
| skb_reserve(skb, headroom); |
| shinfo = skb_shinfo(skb); |
| |
| /* Set dataref to 2, so upper layer won't free the data buffer */ |
| atomic_set(&shinfo->dataref, 2); |
| shinfo->nr_frags = 0; |
| shinfo->gso_size = 0; |
| shinfo->gso_segs = 0; |
| shinfo->gso_type = 0; |
| shinfo->ip6_frag_id = 0; |
| #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) |
| shinfo->tx_flags = 0; |
| #else |
| shinfo->tx_flags.flags = 0; |
| #endif |
| shinfo->frag_list = NULL; |
| memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); |
| |
| return skb; |
| } |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_alloc_txring_skb |
| Purpose: Allocated skb for tx ring buffer. |
| -------------------------------------------------------------------------- */ |
| struct sk_buff *bcmgenet_alloc_txring_skb(struct net_device *dev, int index) |
| { |
| unsigned long flags, p_index = 0; |
| struct sk_buff *skb = NULL; |
| struct Enet_CB *cb; |
| volatile struct tDmaRingRegs *tDma_ring; |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| |
| if (!(pDevCtrl->txDma->tdma_ctrl & |
| (1 << (index + DMA_RING_BUF_EN_SHIFT)))) { |
| printk(KERN_ERR "Ring %d is not enabled\n", index); |
| BUG(); |
| } |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| if (pDevCtrl->txRingFreeBds[index] == 0) { |
| /* |
| * This shouldn't happen, upper level should |
| * check if the tx queue stopped before calling this. |
| */ |
| printk(KERN_ERR "%s:%d queue stopped!!\n", __func__, index); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return skb; |
| } |
| tDma_ring = &pDevCtrl->txDma->tDmaRings[index]; |
| p_index = (DMA_P_INDEX_MASK & tDma_ring->tdma_producer_index); |
| /* P/C index is 16 bits, we do modulo of RING_SIZE */ |
| p_index &= (pDevCtrl->txRingSize[index] - 1); |
| |
| cb = pDevCtrl->txRingCBs[index] + p_index; |
| skb = __bcmgenet_alloc_skb_from_buf((unsigned char *)cb->BdAddr, |
| RX_BUF_LENGTH, 64); |
| |
| cb->skb = skb; |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| |
| return skb; |
| } |
| EXPORT_SYMBOL(bcmgenet_alloc_txring_skb); |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_get_txcb |
| Purpose: return tx control data and increment write pointer. |
| -------------------------------------------------------------------------- */ |
| static struct Enet_CB *bcmgenet_get_txcb(struct net_device *dev, |
| int *pos, int index) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| struct Enet_CB *txCBPtr = NULL; |
| |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| if (index == DESC_INDEX) { |
| txCBPtr = pDevCtrl->txCbs; |
| txCBPtr += (*pos - GENET_TX_MQ_CNT * GENET_TX_MQ_BD_CNT); |
| txCBPtr->BdAddr = &pDevCtrl->txBds[*pos]; |
| if (*pos == (TOTAL_DESC - 1)) |
| *pos = (GENET_TX_MQ_CNT * GENET_TX_MQ_BD_CNT); |
| else |
| *pos += 1; |
| |
| } else { |
| txCBPtr = pDevCtrl->txRingCBs[index]; |
| txCBPtr += (*pos - index * GENET_TX_MQ_BD_CNT); |
| txCBPtr->BdAddr = &pDevCtrl->txBds[*pos]; |
| if (*pos == (GENET_TX_MQ_BD_CNT * (index + 1) - 1)) |
| *pos = GENET_TX_MQ_BD_CNT * index; |
| else |
| *pos += 1; |
| } |
| #else |
| txCBPtr = pDevCtrl->txCbs + *pos; |
| txCBPtr->BdAddr = &pDevCtrl->txBds[*pos]; |
| /* Advancing local write pointer */ |
| if (*pos == (TOTAL_DESC - 1)) |
| *pos = 0; |
| else |
| *pos += 1; |
| #endif |
| |
| return txCBPtr; |
| } |
| |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_tx_reclaim |
| Purpose: reclaim xmited skb |
| -------------------------------------------------------------------------- */ |
| static void bcmgenet_tx_reclaim(struct net_device *dev, int index) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| unsigned int c_index; |
| struct Enet_CB *txCBPtr; |
| int lastTxedCnt = 0, lastCIndex = 0, nrTxBds = 0; |
| |
| /* Compute how many buffers are transmited since last xmit call */ |
| c_index = pDevCtrl->txDma->tDmaRings[index].tdma_consumer_index; |
| |
| |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| if (index == DESC_INDEX) { |
| lastCIndex = pDevCtrl->txLastCIndex; |
| nrTxBds = GENET_TX_DEFAULT_BD_CNT; |
| } else { |
| lastCIndex = pDevCtrl->txRingCIndex[index]; |
| nrTxBds = GENET_TX_MQ_BD_CNT; |
| } |
| |
| #else |
| lastCIndex = pDevCtrl->txLastCIndex; |
| nrTxBds = TOTAL_DESC; |
| #endif |
| c_index &= (nrTxBds - 1); |
| |
| if (c_index >= lastCIndex) |
| lastTxedCnt = c_index - lastCIndex; |
| else |
| lastTxedCnt = nrTxBds - lastCIndex + c_index; |
| |
| |
| TRACE(("%s: %s index=%d c_index=%d " |
| "lastTxedCnt=%d txLastCIndex=%d\n", |
| __func__, pDevCtrl->dev->name, index, |
| c_index, lastTxedCnt, lastCIndex)); |
| |
| /* Reclaim transmitted buffers */ |
| while (lastTxedCnt-- > 0) { |
| if (index == DESC_INDEX) |
| txCBPtr = &pDevCtrl->txCbs[lastCIndex]; |
| else |
| txCBPtr = pDevCtrl->txRingCBs[index] + lastCIndex; |
| if (txCBPtr->skb != NULL) { |
| dma_unmap_single(&pDevCtrl->dev->dev, |
| txCBPtr->dma_addr, |
| txCBPtr->skb->len, |
| DMA_TO_DEVICE); |
| dev_kfree_skb_any(txCBPtr->skb); |
| txCBPtr->skb = NULL; |
| txCBPtr->dma_addr = 0; |
| } else if (txCBPtr->dma_addr) { |
| dma_unmap_page(&pDevCtrl->dev->dev, |
| txCBPtr->dma_addr, |
| txCBPtr->dma_len, |
| DMA_TO_DEVICE); |
| txCBPtr->dma_addr = 0; |
| } |
| if (index == DESC_INDEX) |
| pDevCtrl->txFreeBds += 1; |
| else |
| pDevCtrl->txRingFreeBds[index] += 1; |
| |
| if (lastCIndex == (nrTxBds - 1)) |
| lastCIndex = 0; |
| else |
| lastCIndex++; |
| } |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| if (index == DESC_INDEX) { |
| if (pDevCtrl->txFreeBds > (MAX_SKB_FRAGS + 1) |
| && __netif_subqueue_stopped(dev, 0)) { |
| pDevCtrl->intrl2_0->cpu_mask_set |= |
| (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE); |
| netif_wake_subqueue(dev, 0); |
| } |
| pDevCtrl->txLastCIndex = c_index; |
| } else { |
| if (pDevCtrl->txRingFreeBds[index] > (MAX_SKB_FRAGS + 1) |
| && __netif_subqueue_stopped(dev, index+1)) { |
| pDevCtrl->intrl2_1->cpu_mask_set = (1 << index); |
| netif_wake_subqueue(dev, index+1); |
| } |
| pDevCtrl->txRingCIndex[index] = c_index; |
| } |
| #else |
| if (pDevCtrl->txFreeBds > (MAX_SKB_FRAGS + 1) |
| && netif_queue_stopped(dev)) { |
| /* Disable txdma bdone/pdone interrupt if we have free tx bds */ |
| pDevCtrl->intrl2_0->cpu_mask_set |= (UMAC_IRQ_TXDMA_BDONE | |
| UMAC_IRQ_TXDMA_PDONE); |
| netif_wake_queue(dev); |
| } |
| pDevCtrl->txLastCIndex = c_index; |
| #endif |
| } |
| |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_xmit |
| Purpose: Send ethernet traffic |
| -------------------------------------------------------------------------- */ |
| static int bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| volatile struct tDmaRingRegs *tDma_ring; |
| struct Enet_CB *txCBPtr; |
| unsigned int write_ptr = 0; |
| int i = 0; |
| unsigned long flags; |
| struct status_64 *Status = NULL; |
| int nr_frags = 0, index = DESC_INDEX; |
| |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| |
| if (!pDevCtrl->clock_active) { |
| printk(KERN_WARNING "%s: transmitting with gated clock!\n", |
| dev_name(&dev->dev)); |
| dev_kfree_skb_any(skb); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return NETDEV_TX_OK; |
| } |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| if (skb) { |
| index = skb_get_queue_mapping(skb); |
| /* |
| * Mapping strategy: |
| * queue_mapping = 0, unclassified, packet xmited through ring16 |
| * queue_mapping = 1, goes to ring 0. (highest priority queue) |
| */ |
| if (index == 0) |
| index = DESC_INDEX; |
| else |
| index -= 1; |
| if (index != DESC_INDEX && index >= GENET_TX_MQ_CNT) { |
| printk(KERN_ERR "%s: skb->queue_mapping %d is invalid\n", |
| __func__, skb_get_queue_mapping(skb)); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| dev->stats.tx_errors++; |
| dev->stats.tx_dropped++; |
| return 1; |
| } |
| nr_frags = skb_shinfo(skb)->nr_frags; |
| if (index == DESC_INDEX) { |
| if (pDevCtrl->txFreeBds <= nr_frags + 1) { |
| netif_stop_subqueue(dev, 0); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| printk(KERN_ERR "%s: tx ring %d full when queue awake\n", |
| __func__, index); |
| return 1; |
| } |
| } else if (pDevCtrl->txRingFreeBds[index] <= nr_frags + 1) { |
| netif_stop_subqueue(dev, index + 1); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| printk(KERN_ERR "%s: tx ring %d full when queue awake\n", |
| __func__, index); |
| return 1; |
| } |
| } |
| /* Reclaim xmited skb for each subqueue */ |
| for (i = 0; i < GENET_TX_MQ_CNT; i++) |
| bcmgenet_tx_reclaim(dev, i); |
| #else |
| if (skb) { |
| nr_frags = skb_shinfo(skb)->nr_frags; |
| if (pDevCtrl->txFreeBds <= nr_frags + 1) { |
| netif_stop_queue(dev); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| printk(KERN_ERR "%s: tx ring full when queue awake\n", |
| __func__); |
| return 1; |
| } |
| } |
| #endif |
| |
| if (!skb) { |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| for (i = 0; i < GENET_TX_MQ_CNT; i++) |
| bcmgenet_tx_reclaim(dev, i); |
| #endif |
| bcmgenet_tx_reclaim(dev, DESC_INDEX); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return 0; |
| } |
| /* |
| * reclaim xmited skb every 8 packets. |
| */ |
| if ((index == DESC_INDEX) && |
| (pDevCtrl->txFreeBds < pDevCtrl->nrTxBds - 8)) |
| bcmgenet_tx_reclaim(dev, index); |
| |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| if ((index != DESC_INDEX) && (pDevCtrl->txRingFreeBds[index] |
| < GENET_TX_MQ_BD_CNT - 8)) |
| bcmgenet_tx_reclaim(dev, index); |
| #endif |
| |
| tDma_ring = &pDevCtrl->txDma->tDmaRings[index]; |
| /* |
| * If 64 byte status block enabled, must make sure skb has |
| * enough headroom for us to insert 64B status block. |
| */ |
| if (GENET_TBUF_CTRL(pDevCtrl) & RBUF_64B_EN) { |
| if (likely(skb_headroom(skb) < 64)) { |
| struct sk_buff *new_skb; |
| new_skb = skb_realloc_headroom(skb, 64); |
| if (new_skb == NULL) { |
| dev_kfree_skb(skb); |
| dev->stats.tx_errors++; |
| dev->stats.tx_dropped++; |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return 0; |
| } else if (skb->sk) { |
| skb_set_owner_w(new_skb, skb->sk); |
| } |
| dev_kfree_skb(skb); |
| skb = new_skb; |
| } |
| skb_push(skb, 64); |
| Status = (struct status_64 *)skb->data; |
| } |
| write_ptr = (DMA_RW_POINTER_MASK & tDma_ring->tdma_write_pointer) >> 1; |
| |
| /* Obtain transmit control block */ |
| txCBPtr = bcmgenet_get_txcb(dev, &write_ptr, index); |
| |
| if (unlikely(!txCBPtr)) |
| BUG(); |
| |
| txCBPtr->skb = skb; |
| |
| if ((skb->ip_summed == CHECKSUM_PARTIAL) && |
| (GENET_TBUF_CTRL(pDevCtrl) & RBUF_64B_EN)) { |
| u16 offset; |
| offset = skb->csum_start - skb_headroom(skb) - 64; |
| /* Insert 64B TSB and set the flag */ |
| Status->tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | |
| (offset + skb->csum_offset) | |
| STATUS_TX_CSUM_LV; |
| } |
| |
| /* |
| * Add the buffer to the ring. |
| * Set addr and length of DMA BD to be transmitted. |
| */ |
| if (!nr_frags) { |
| txCBPtr->dma_addr = dma_map_single(&pDevCtrl->dev->dev, |
| skb->data, skb->len, DMA_TO_DEVICE); |
| if (!txCBPtr->dma_addr) { |
| dev_err(&pDevCtrl->dev->dev, "Tx DMA map failed\n"); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return 0; |
| } |
| txCBPtr->dma_len = skb->len; |
| txCBPtr->BdAddr->address = txCBPtr->dma_addr; |
| txCBPtr->BdAddr->length_status = ( |
| ((unsigned long)((skb->len < ETH_ZLEN) ? |
| ETH_ZLEN : skb->len)) << 16) | DMA_SOP | DMA_EOP | |
| (DMA_TX_QTAG_MASK << DMA_TX_QTAG_SHIFT) | |
| DMA_TX_APPEND_CRC; |
| |
| if (skb->ip_summed == CHECKSUM_PARTIAL) |
| txCBPtr->BdAddr->length_status |= DMA_TX_DO_CSUM; |
| |
| #ifdef CONFIG_BCMGENET_DUMP_DATA |
| printk(KERN_NOTICE "%s: data 0x%p len %d", |
| __func__, skb->data, skb->len); |
| print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, |
| 16, 1, skb->data, skb->len, 0); |
| #endif |
| /* Decrement total BD count and advance our write pointer */ |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| if (index == DESC_INDEX) |
| pDevCtrl->txFreeBds -= 1; |
| else |
| pDevCtrl->txRingFreeBds[index] -= 1; |
| #else |
| pDevCtrl->txFreeBds -= 1; |
| #endif |
| /* advance producer index and write pointer.*/ |
| tDma_ring->tdma_producer_index += 1; |
| tDma_ring->tdma_write_pointer = (write_ptr << 1); |
| /* update stats */ |
| dev->stats.tx_bytes += ((skb->len < ETH_ZLEN) ? |
| ETH_ZLEN : skb->len); |
| dev->stats.tx_packets++; |
| |
| } else { |
| /* xmit head */ |
| txCBPtr->dma_addr = dma_map_single(&pDevCtrl->dev->dev, |
| skb->data, skb_headlen(skb), DMA_TO_DEVICE); |
| if (!txCBPtr->dma_addr) { |
| dev_err(&pDevCtrl->dev->dev, "Tx DMA map failed\n"); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return 0; |
| } |
| txCBPtr->dma_len = skb_headlen(skb); |
| txCBPtr->BdAddr->address = txCBPtr->dma_addr; |
| txCBPtr->BdAddr->length_status = (skb_headlen(skb) << 16) | |
| (DMA_TX_QTAG_MASK << DMA_TX_QTAG_SHIFT) | |
| DMA_SOP | DMA_TX_APPEND_CRC; |
| |
| if (skb->ip_summed == CHECKSUM_PARTIAL) |
| txCBPtr->BdAddr->length_status |= DMA_TX_DO_CSUM; |
| |
| #ifdef CONFIG_BCMGENET_DUMP_DATA |
| printk(KERN_NOTICE "%s: frag head len %d", |
| __func__, skb_headlen(skb)); |
| print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, |
| 16, 1, skb->data, skb_headlen(skb), 0); |
| #endif |
| /* Decrement total BD count and advance our write pointer */ |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| if (index == DESC_INDEX) |
| pDevCtrl->txFreeBds -= 1; |
| else |
| pDevCtrl->txRingFreeBds[index] -= 1; |
| #else |
| pDevCtrl->txFreeBds -= 1; |
| #endif |
| /* advance producer index and write pointer.*/ |
| tDma_ring->tdma_producer_index += 1; |
| tDma_ring->tdma_write_pointer = (write_ptr << 1); |
| dev->stats.tx_bytes += skb_headlen(skb); |
| |
| /* xmit fragment */ |
| for (i = 0; i < nr_frags; i++) { |
| skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| txCBPtr = bcmgenet_get_txcb(dev, &write_ptr, index); |
| |
| if (unlikely(!txCBPtr)) |
| BUG(); |
| txCBPtr->skb = NULL; |
| txCBPtr->dma_addr = dma_map_page(&pDevCtrl->dev->dev, |
| frag->page, |
| frag->page_offset, |
| frag->size, |
| DMA_TO_DEVICE); |
| if (txCBPtr->dma_addr == 0) { |
| printk(KERN_ERR "%s: Tx DMA map failed\n", |
| __func__); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return 0; |
| } |
| txCBPtr->dma_len = frag->size; |
| txCBPtr->BdAddr->address = txCBPtr->dma_addr; |
| #ifdef CONFIG_BCMGENET_DUMP_DATA |
| printk(KERN_NOTICE "%s: frag%d len %d", |
| __func__, i, frag->size); |
| print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, |
| 16, 1, |
| page_address(frag->page)+frag->page_offset, |
| frag->size, 0); |
| #endif |
| txCBPtr->BdAddr->length_status = |
| ((unsigned long)frag->size << 16) | |
| (DMA_TX_QTAG_MASK << DMA_TX_QTAG_SHIFT); |
| if (i == nr_frags - 1) |
| txCBPtr->BdAddr->length_status |= DMA_EOP; |
| |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| if (index == DESC_INDEX) |
| pDevCtrl->txFreeBds -= 1; |
| else |
| pDevCtrl->txRingFreeBds[index] -= 1; |
| #else |
| pDevCtrl->txFreeBds -= 1; |
| #endif |
| /* advance producer index and write pointer.*/ |
| tDma_ring->tdma_producer_index += 1; |
| tDma_ring->tdma_write_pointer = (write_ptr << 1); |
| /* update stats */ |
| dev->stats.tx_bytes += frag->size; |
| } |
| dev->stats.tx_packets++; |
| } |
| |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| if (index == DESC_INDEX) { |
| if (pDevCtrl->txFreeBds <= (MAX_SKB_FRAGS + 1)) { |
| netif_stop_subqueue(dev, 0); |
| pDevCtrl->intrl2_0->cpu_mask_clear = |
| UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE; |
| |
| } |
| } else if (pDevCtrl->txRingFreeBds[index] <= (MAX_SKB_FRAGS + 1)) { |
| netif_stop_subqueue(dev, index+1); |
| pDevCtrl->intrl2_1->cpu_mask_clear = (1 << index); |
| } |
| #else |
| if (pDevCtrl->txFreeBds <= (MAX_SKB_FRAGS + 1)) { |
| /* Enable Tx bdone/pdone interrupt !*/ |
| pDevCtrl->intrl2_0->cpu_mask_clear |= UMAC_IRQ_TXDMA_BDONE | |
| UMAC_IRQ_TXDMA_PDONE; |
| netif_stop_queue(dev); |
| } |
| #endif |
| dev->trans_start = jiffies; |
| |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| |
| return 0; |
| } |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_tx_ring_reclaim |
| Purpose: reclaim xmited skb for a ring buffer |
| -------------------------------------------------------------------------- */ |
| static void bcmgenet_tx_ring_reclaim(struct net_device *dev, int index, |
| unsigned int p_index, unsigned int c_index) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| struct Enet_CB *txCBPtr; |
| int lastTxedCnt = 0, lastCIndex = 0; |
| struct sk_buff *skb; |
| unsigned long flags; |
| |
| /* Compute how many buffers are transmited since last xmit call */ |
| |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| |
| if (c_index >= pDevCtrl->txRingCIndex[index]) { |
| /* index not wrapped */ |
| lastTxedCnt = c_index - pDevCtrl->txRingCIndex[index]; |
| } else { |
| /* index wrapped */ |
| lastTxedCnt = pDevCtrl->txRingSize[index] - |
| pDevCtrl->txRingCIndex[index] + c_index; |
| } |
| TRACE(("%s: ring %d: p_index=%d c_index=%d" |
| "lastTxedCnt=%d txLastCIndex=%d\n", |
| __func__, index, p_index, c_index, |
| lastTxedCnt, pDevCtrl->txRingCIndex[index])); |
| |
| pDevCtrl->txRingFreeBds[index] += lastTxedCnt; |
| |
| lastCIndex = pDevCtrl->txRingCIndex[index]; |
| pDevCtrl->txRingCIndex[index] = c_index; |
| |
| /* free xmited skb */ |
| while (lastTxedCnt-- > 0) { |
| txCBPtr = pDevCtrl->txRingCBs[index] + lastCIndex; |
| skb = txCBPtr->skb; |
| if (skb != NULL) { |
| /* |
| * This will consume skb, we don't want to run |
| * destructor which is to drop the skb. |
| */ |
| if (skb->destructor != NULL) |
| skb->destructor = NULL; |
| /* make sure dev_kfree_skb_any() don't free mem. */ |
| if ((atomic_read(&skb_shinfo(skb)->dataref) & |
| SKB_DATAREF_MASK) < 2) |
| atomic_set(&(skb_shinfo(skb)->dataref), 2); |
| dev_kfree_skb_any(skb); |
| txCBPtr->skb = NULL; |
| } |
| if (lastCIndex == (pDevCtrl->txRingSize[index] - 1)) |
| lastCIndex = 0; |
| else |
| lastCIndex++; |
| } |
| if (pDevCtrl->txRingFreeBds[index] > 0 && |
| netif_queue_stopped(dev)) { |
| /* |
| * Disable txdma multibuf done interrupt for this ring |
| * since we have free tx bds. |
| */ |
| pDevCtrl->intrl2_1->cpu_mask_set |= (1 << index); |
| netif_wake_queue(dev); |
| } |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| } |
| /* -------------------------------------------------------------------------- |
| Name: bcmgenet_ring_xmit |
| Purpose: Send ethernet traffic through ring buffer |
| -------------------------------------------------------------------------- */ |
| int __maybe_unused bcmgenet_ring_xmit(struct sk_buff *skb, |
| struct net_device *dev, int index, int drop) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| volatile struct tDmaRingRegs *tDma_ring; |
| struct Enet_CB *txCBPtr; |
| struct status_64 *Status; |
| unsigned int p_index = 0, c_index = 0; |
| |
| /* Compute how many buffers are transmited since last xmit call */ |
| tDma_ring = &pDevCtrl->txDma->tDmaRings[index]; |
| p_index = (DMA_P_INDEX_MASK & tDma_ring->tdma_producer_index); |
| c_index = (DMA_C_INDEX_MASK & tDma_ring->tdma_consumer_index); |
| |
| /* P/C index is 16 bits, we do modulo of RING_SIZE */ |
| p_index &= (pDevCtrl->txRingSize[index] - 1); |
| c_index &= (pDevCtrl->txRingSize[index] - 1); |
| |
| bcmgenet_tx_ring_reclaim(dev, index, p_index, c_index); |
| |
| if (!skb) |
| return 0; |
| /* Obtain a tx control block */ |
| txCBPtr = pDevCtrl->txRingCBs[index] + p_index; |
| txCBPtr->skb = skb; |
| |
| TRACE(("%s: txCBPtr=0x%08lx skb=0x%08lx skb->head=0x%08lx\n", |
| __func__, |
| (unsigned long)txCBPtr, |
| (unsigned long)skb, |
| (unsigned long)skb->head)); |
| |
| /* |
| * Make sure we have headroom for us to insert 64B status block. |
| */ |
| if (unlikely(skb_headroom(skb) < 64)) { |
| printk(KERN_ERR "no enough headroom for TSB (head=0x%08x)\n", |
| (unsigned int)skb->head); |
| BUG(); |
| } |
| Status = (struct status_64 *)skb->head; |
| Status->length_status = ((unsigned long)((skb->len < ETH_ZLEN) ? |
| ETH_ZLEN : skb->len)) << 16; |
| Status->length_status += (sizeof(struct status_64) << 16); |
| Status->length_status |= DMA_SOP | DMA_EOP | DMA_TX_APPEND_CRC; |
| if ((skb->ip_summed == CHECKSUM_PARTIAL) && |
| (GENET_TBUF_CTRL(pDevCtrl) & RBUF_64B_EN)) { |
| u16 offset; |
| offset = skb->csum_start - skb_headroom(skb) - 64; |
| /* Insert 64B TSB and set the flag */ |
| Status->tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | |
| (offset + skb->csum_offset) | STATUS_TX_CSUM_LV; |
| Status->length_status |= DMA_TX_DO_CSUM; |
| TRACE(("Tx Hw Csum: head=0x%08x data=0x%08x " |
| "csum_start=%d csum_offset=%d\n", |
| (unsigned int)skb->head, |
| (unsigned int)skb->data, |
| skb->csum_start, |
| skb->csum_offset)); |
| } else { |
| Status->tx_csum_info = 0; |
| } |
| /* Default QTAG for MoCA */ |
| Status->length_status |= (DMA_TX_QTAG_MASK << DMA_TX_QTAG_SHIFT); |
| txCBPtr->dma_addr = dma_map_single(&pDevCtrl->dev->dev, |
| skb->head, skb->len + 64, DMA_TO_DEVICE); |
| |
| #ifdef CONFIG_BCMGENET_DUMP_DATA |
| printk(KERN_NOTICE "bcmgenet_xmit: len %d", skb->len); |
| print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, |
| 16, 1, skb->head, skb->len + 64, 0); |
| #endif |
| |
| /* |
| * Decrement total BD count and advance our |
| * write pointer/producer index |
| */ |
| pDevCtrl->txRingFreeBds[index] -= 1; |
| |
| if (likely(txCBPtr->dma_addr == tDma_ring->tdma_write_pointer)) { |
| unsigned long start_addr = tDma_ring->tdma_start_addr; |
| if (unlikely(drop)) { |
| /* |
| * Don't xmit current packet pointed by read_pointer, |
| * there is no such mechanism in GENET's TDMA, so we |
| * disable TDMA and increment consumer index/read |
| * pointer to skip this packet as a work around. |
| */ |
| pDevCtrl->txDma->tdma_ctrl &= ~DMA_EN; |
| tDma_ring->tdma_consumer_index += 1; |
| if ((tDma_ring->tdma_read_pointer + RX_BUF_LENGTH) > |
| tDma_ring->tdma_end_addr) { |
| tDma_ring->tdma_read_pointer = start_addr; |
| } else { |
| tDma_ring->tdma_read_pointer += RX_BUF_LENGTH; |
| } |
| } |
| /* advance producer index and write pointer.*/ |
| tDma_ring->tdma_producer_index += 1; |
| if ((tDma_ring->tdma_write_pointer + RX_BUF_LENGTH) > |
| tDma_ring->tdma_end_addr) { |
| tDma_ring->tdma_write_pointer = start_addr; |
| } else { |
| tDma_ring->tdma_write_pointer += RX_BUF_LENGTH; |
| } |
| if (unlikely(drop)) |
| pDevCtrl->txDma->tdma_ctrl |= DMA_EN; |
| } else { |
| /* ooops! how can we get here ?*/ |
| BUG(); |
| } |
| |
| if (pDevCtrl->txRingFreeBds[index] == 0) { |
| TRACE(("%s: no xmit queue space, stopping queue\n", dev->name)); |
| /* Enable Tx bdone/pdone interrupt !*/ |
| pDevCtrl->intrl2_0->cpu_mask_clear |= (1 << index); |
| netif_stop_subqueue(dev, index); |
| } |
| |
| if (!drop) { |
| /* update stats */ |
| dev->stats.tx_bytes += ((skb->len < ETH_ZLEN) ? |
| ETH_ZLEN : skb->len); |
| dev->stats.tx_packets++; |
| } |
| dev->trans_start = jiffies; |
| return 0; |
| } |
| /* NAPI polling method*/ |
| static int bcmgenet_poll(struct napi_struct *napi, int budget) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = container_of(napi, |
| struct BcmEnet_devctrl, napi); |
| volatile struct intrl2Regs *intrl2 = pDevCtrl->intrl2_0; |
| volatile struct rDmaRingRegs *rDma_desc; |
| unsigned int work_done, total_work_done = 0; |
| int local_budget; |
| int i; |
| |
| /* Process the priority queues. */ |
| for (i = 0, local_budget = GENET_RX_MQ_BD_CNT; |
| i < GENET_RX_MQ_CNT || i == DESC_INDEX;) { |
| work_done = bcmgenet_desc_rx(pDevCtrl, local_budget, i); |
| rDma_desc = &pDevCtrl->rxDma->rDmaRings[i]; |
| rDma_desc->rdma_consumer_index += work_done; |
| total_work_done += work_done; |
| if (i == GENET_RX_MQ_CNT - 1) { |
| /* Process the default queue. */ |
| i = DESC_INDEX; |
| local_budget = desc_budget; |
| } else { |
| i++; |
| } |
| } |
| |
| /* |
| * Per NAPI spec at |
| * |
| * http://www.linuxfoundation.org/collaborate/workgroups/networking/napi |
| * |
| * If packets remain to be processed (i.e. the driver used its entire |
| * quota), poll() should return a value of one. |
| * If, instead, all packets have been processed, your driver should |
| * reenable interrupts, turn off polling, and return zero. |
| */ |
| if (total_work_done < budget) { |
| napi_complete(napi); |
| intrl2->cpu_mask_clear |= UMAC_IRQ_HFB_OR_DONE; |
| return 0; |
| } else { |
| return 1; |
| } |
| } |
| /* |
| * NAPI polling for ring buffer. |
| */ |
| static int bcmgenet_ring_poll(struct napi_struct *napi, int budget) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = container_of(napi, |
| struct BcmEnet_devctrl, ring_napi); |
| volatile struct intrl2Regs *intrl2 = pDevCtrl->intrl2_1; |
| unsigned int work_done; |
| work_done = bcmgenet_ring_rx(pDevCtrl, budget); |
| |
| /* tx reclaim */ |
| bcmgenet_ring_xmit(NULL, pDevCtrl->dev, 0, 0); |
| if (work_done < budget) { |
| unsigned long bits; |
| napi_complete(napi); |
| bits = (pDevCtrl->rxDma->rdma_ctrl >> 1) << 16; |
| intrl2->cpu_mask_clear |= bits; |
| } |
| return work_done; |
| } |
| /* |
| * Interrupt bottom half |
| */ |
| static void bcmgenet_irq_task(struct work_struct *work) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = container_of( |
| work, struct BcmEnet_devctrl, bcmgenet_irq_work); |
| struct net_device *dev; |
| |
| dev = pDevCtrl->dev; |
| |
| TRACE(("%s\n", __func__)); |
| /* Cable plugged/unplugged event */ |
| if (pDevCtrl->irq0_stat & UMAC_IRQ_PHY_DET_R) { |
| pDevCtrl->irq0_stat &= ~UMAC_IRQ_PHY_DET_R; |
| printk(KERN_CRIT "%s cable plugged in, powering up\n", |
| pDevCtrl->dev->name); |
| bcmgenet_power_up(pDevCtrl, GENET_POWER_CABLE_SENSE); |
| } else if (pDevCtrl->irq0_stat & UMAC_IRQ_PHY_DET_F) { |
| pDevCtrl->irq0_stat &= ~UMAC_IRQ_PHY_DET_F; |
| printk(KERN_CRIT "%s cable unplugged, powering down\n", |
| pDevCtrl->dev->name); |
| bcmgenet_power_down(pDevCtrl, GENET_POWER_CABLE_SENSE); |
| } |
| if (pDevCtrl->irq0_stat & UMAC_IRQ_MPD_R) { |
| pDevCtrl->irq0_stat &= ~UMAC_IRQ_MPD_R; |
| printk(KERN_CRIT "%s magic packet detected, waking up\n", |
| pDevCtrl->dev->name); |
| /* disable mpd interrupt */ |
| pDevCtrl->intrl2_0->cpu_mask_set |= UMAC_IRQ_MPD_R; |
| /* disable CRC forward.*/ |
| pDevCtrl->umac->cmd &= ~CMD_CRC_FWD; |
| if (pDevCtrl->dev_asleep) |
| bcmgenet_power_up(pDevCtrl, GENET_POWER_WOL_MAGIC); |
| |
| } else if (pDevCtrl->irq0_stat & (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM)) { |
| pDevCtrl->irq0_stat &= ~(UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM); |
| printk(KERN_CRIT "%s ACPI pattern matched, waking up\n", |
| pDevCtrl->dev->name); |
| /* disable HFB match interrupts */ |
| pDevCtrl->intrl2_0->cpu_mask_set |= (UMAC_IRQ_HFB_SM | |
| UMAC_IRQ_HFB_MM); |
| if (pDevCtrl->dev_asleep) |
| bcmgenet_power_up(pDevCtrl, GENET_POWER_WOL_ACPI); |
| } |
| |
| /* Link UP/DOWN event */ |
| if (pDevCtrl->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN)) { |
| pDevCtrl->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN); |
| mii_setup(pDevCtrl->dev); |
| } |
| } |
| /* |
| * bcmgenet_ring_rx: ring buffer rx function. |
| */ |
| static unsigned int bcmgenet_ring_rx(void *ptr, unsigned int budget) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = ptr; |
| volatile struct status_64 *status; |
| volatile struct rDmaRingRegs *rDma_ring; |
| int i, len, rx_discard_flag = 0; |
| struct Enet_CB *cb; |
| struct sk_buff *skb; |
| unsigned long dmaFlag; |
| unsigned int rxpktprocessed = 0, pktcnt = 0, retvalue = 0; |
| unsigned int read_ptr = 0, write_ptr = 0, p_index = 0, c_index = 0; |
| |
| TRACE(("%s: ifindex=%d irq_stat=0x%08x\n", |
| __func__, pDevCtrl->dev->ifindex, pDevCtrl->irq1_stat)); |
| |
| /* loop for each ring */ |
| for (i = 0; i < GENET_RX_RING_COUNT; i++) { |
| /* Skip if this ring is not eanbled*/ |
| if (!(pDevCtrl->rxDma->rdma_ctrl & |
| (1 << (i + DMA_RING_BUF_EN_SHIFT)))) |
| continue; |
| /* Skip if not outstanding packet for this ring*/ |
| if (!(pDevCtrl->irq1_stat & (1 << (16 + i)))) |
| continue; |
| |
| rDma_ring = &pDevCtrl->rxDma->rDmaRings[i]; |
| write_ptr = rDma_ring->rdma_write_pointer; |
| read_ptr = rDma_ring->rdma_read_pointer; |
| p_index = rDma_ring->rdma_producer_index; |
| p_index &= DMA_P_INDEX_MASK; |
| c_index = rDma_ring->rdma_consumer_index; |
| c_index &= DMA_C_INDEX_MASK; |
| |
| if (p_index < c_index) { |
| /* index wrapped */ |
| if ((DMA_P_INDEX_MASK - c_index + p_index) == |
| (pDevCtrl->rxRingSize[i] - 1)) |
| rx_discard_flag = 1; |
| } else if (p_index > c_index) { |
| /* index not wrapped */ |
| if (p_index - c_index == pDevCtrl->rxRingSize[i]) |
| rx_discard_flag = 1; |
| } |
| |
| if (rx_discard_flag) { |
| int discard_cnt = rDma_ring->rdma_producer_index >> 16; |
| /* Report rx overrun errors */ |
| pDevCtrl->dev->stats.rx_over_errors += discard_cnt; |
| pDevCtrl->rxRingDiscCnt[i] += discard_cnt; |
| rDma_ring->rdma_producer_index = 0; |
| } |
| |
| /* |
| * We can't use produer/consumer index to compute how |
| * many outstanding packets are there, because we are not |
| * advancing consumer index right after packets are moved |
| * out of DMA. So we use read/write pointer for the math. |
| */ |
| if (write_ptr < read_ptr) { |
| /* pointer wrapped */ |
| pktcnt = (rDma_ring->rdma_end_addr + 1 - read_ptr) >> |
| (RX_BUF_BITS - 1); |
| pktcnt += (write_ptr - rDma_ring->rdma_start_addr) >> |
| (RX_BUF_BITS - 1); |
| } else if (write_ptr > read_ptr) { |
| /* pointer not wrapped */ |
| pktcnt = (write_ptr - read_ptr) >> (RX_BUF_BITS - 1); |
| } else if (write_ptr == read_ptr && p_index != c_index) { |
| /* overflowed, some packets are discarded by DMA */ |
| pktcnt = rDma_ring->rdma_ring_buf_size >> 16; |
| } |
| |
| TRACE(("%s: p_index=%d c_index=%d write_ptr=0x%08x " |
| "read_ptr=0x%08x pktcnt=%d\n", |
| __func__, p_index, c_index, write_ptr, |
| read_ptr, pktcnt)); |
| |
| /*Start processing packets */ |
| while ((rxpktprocessed < pktcnt) && |
| (rxpktprocessed < budget)) { |
| |
| unsigned int cbi; |
| /* |
| * Find out Which buffer in the ring are we pointing to. |
| */ |
| cbi = (read_ptr - rDma_ring->rdma_start_addr) >> |
| (RX_BUF_BITS - 1); |
| cb = pDevCtrl->rxRingCbs[i] + cbi; |
| dma_sync_single_for_cpu(&pDevCtrl->dev->dev, |
| cb->dma_addr, 64, DMA_FROM_DEVICE); |
| |
| status = (struct status_64 *)cb->BdAddr; |
| dmaFlag = status->length_status & 0xffff; |
| len = status->length_status >> 16; |
| dma_sync_single_for_cpu(&pDevCtrl->dev->dev, |
| cb->dma_addr + 64, len, |
| DMA_FROM_DEVICE); |
| |
| /* |
| * Advancing our read pointer. |
| */ |
| if (read_ptr + RX_BUF_LENGTH > rDma_ring->rdma_end_addr) |
| read_ptr = rDma_ring->rdma_start_addr; |
| else |
| read_ptr += RX_BUF_LENGTH; |
| rDma_ring->rdma_read_pointer = read_ptr; |
| /* |
| * per packet processing |
| */ |
| skb = __bcmgenet_alloc_skb_from_buf( |
| (unsigned char *)cb->BdAddr, RX_BUF_LENGTH, 0); |
| skb->destructor = &__bcmgenet_skb_destructor; |
| status->reserved[0] = i; /* ring index */ |
| status->reserved[1] = cbi; /* cb index */ |
| |
| rxpktprocessed++; |
| BUG_ON(skb == NULL); |
| |
| TRACE(("%s: cbi=%d skb=0x%08x head=0x%08x dataref=%d\n", |
| __func__, cbi, |
| (unsigned int)skb, (unsigned int)skb->head, |
| (atomic_read(&skb_shinfo(skb)->dataref) & |
| SKB_DATAREF_MASK))); |
| /* report errors */ |
| if (unlikely(!(dmaFlag & DMA_EOP) || |
| !(dmaFlag & DMA_SOP))) { |
| /* probably can't do this for scater gather ?*/ |
| printk(KERN_WARNING "Droping fragmented packet!\n"); |
| pDevCtrl->dev->stats.rx_dropped++; |
| pDevCtrl->dev->stats.rx_errors++; |
| dev_kfree_skb_any(cb->skb); |
| cb->skb = NULL; |
| continue; |
| } |
| if (unlikely(dmaFlag & (DMA_RX_CRC_ERROR | |
| DMA_RX_OV | |
| DMA_RX_NO | |
| DMA_RX_LG | |
| DMA_RX_RXER))) { |
| TRACE(("ERROR: dmaFlag=0x%lx\n", dmaFlag)); |
| if (dmaFlag & DMA_RX_CRC_ERROR) |
| pDevCtrl->dev->stats.rx_crc_errors++; |
| if (dmaFlag & DMA_RX_OV) |
| pDevCtrl->dev->stats.rx_fifo_errors++; |
| if (dmaFlag & DMA_RX_NO) |
| pDevCtrl->dev->stats.rx_frame_errors++; |
| if (dmaFlag & DMA_RX_LG) |
| pDevCtrl->dev->stats.rx_length_errors++; |
| |
| pDevCtrl->dev->stats.rx_dropped++; |
| pDevCtrl->dev->stats.rx_errors++; |
| dev_kfree_skb_any(cb->skb); |
| cb->skb = NULL; |
| continue; |
| } /* error packet */ |
| |
| skb_put(skb, len); |
| /* we must have 64B rx status block enabled.*/ |
| if (pDevCtrl->rbuf->rbuf_chk_ctrl & RBUF_RXCHK_EN) { |
| if (status->rx_csum & STATUS_RX_CSUM_OK) { |
| skb->csum = status->rx_csum ; |
| /* |
| * Should swap bytes based on |
| * rbuf->endian_ctrl? |
| */ |
| skb->csum = swab16(skb->csum); |
| } |
| skb->ip_summed = CHECKSUM_COMPLETE; |
| } |
| /* |
| * TODO: check filter index and compare with ring index |
| * Report error if not matched |
| */ |
| skb_pull(skb, 64); |
| len -= 64; |
| |
| if (pDevCtrl->bIPHdrOptimize) { |
| skb_pull(skb, 2); |
| len -= 2; |
| } |
| |
| if (pDevCtrl->umac->cmd & CMD_CRC_FWD) { |
| skb_trim(skb, len - 4); |
| len -= 4; |
| } |
| #ifdef CONFIG_BCMGENET_DUMP_DATA |
| printk(KERN_NOTICE "%s:\n", __func__); |
| print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, |
| 16, 1, skb->data, skb->len, 0); |
| #endif |
| /* |
| * Finish setting up the received SKB and send it |
| * to the kernel |
| */ |
| skb->dev = pDevCtrl->dev; |
| skb->protocol = eth_type_trans(skb, pDevCtrl->dev); |
| pDevCtrl->dev->stats.rx_packets++; |
| pDevCtrl->dev->stats.rx_bytes += len; |
| if (dmaFlag & DMA_RX_MULT) |
| pDevCtrl->dev->stats.multicast++; |
| |
| skb->queue_mapping = i; |
| /* Notify kernel */ |
| netif_receive_skb(skb); |
| TRACE(("pushed up to kernel\n")); |
| |
| } /* packet process loop */ |
| |
| } /* ring index loop */ |
| |
| if (retvalue == 0) |
| retvalue = rxpktprocessed; |
| |
| return retvalue;; |
| } |
| /* |
| * bcmgenet_isr1: interrupt handler for ring buffer. |
| */ |
| static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = dev_id; |
| volatile struct intrl2Regs *intrl2 = pDevCtrl->intrl2_1; |
| unsigned int index; |
| unsigned long flags; |
| |
| /* Save irq status for bottom-half processing. */ |
| pDevCtrl->irq1_stat = intrl2->cpu_stat & ~intrl2->cpu_mask_status; |
| /* clear inerrupts*/ |
| intrl2->cpu_clear |= pDevCtrl->irq1_stat; |
| |
| TRACE(("%s: IRQ=0x%x\n", __func__, pDevCtrl->irq1_stat)); |
| /* |
| * Check the MBDONE interrupts. |
| * packet is done, reclaim descriptors |
| */ |
| if (pDevCtrl->irq1_stat & 0x0000ffff) { |
| index = 0; |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| for (index = 0; index < GENET_TX_RING_COUNT; index++) { |
| if (pDevCtrl->irq1_stat & (1 << index)) { |
| bcmgenet_tx_reclaim(pDevCtrl->dev, index); |
| if (index >= GENET_TX_MQ_CNT) { |
| pr_warn_ratelimited("bcmgenet_isr1 TX index %d >= %d", |
| index, GENET_TX_MQ_CNT); |
| } |
| } |
| } |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| } |
| |
| if (pDevCtrl->irq1_stat & 0xffff0000) { |
| /* |
| * We use NAPI here, because of the fact that we are NOT |
| * advancing consumer index right after data moved out of |
| * DMA, instead we advance it only when we found out upper |
| * level has consumed it. |
| */ |
| if (likely(napi_schedule_prep(&pDevCtrl->ring_napi))) { |
| /* Disable all rx ring interrupt */ |
| pr_warn_ratelimited("bcmgenet_isr1 ring_napi 0x%08x", |
| pDevCtrl->irq1_stat); |
| intrl2->cpu_mask_set |= 0xffff0000; |
| __napi_schedule(&pDevCtrl->ring_napi); |
| } |
| } |
| return IRQ_HANDLED; |
| } |
| /* |
| * bcmgenet_isr0: Handle various interrupts. |
| */ |
| static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = dev_id; |
| volatile struct intrl2Regs *intrl2 = pDevCtrl->intrl2_0; |
| |
| /* Save irq status for bottom-half processing. */ |
| pDevCtrl->irq0_stat = intrl2->cpu_stat & ~intrl2->cpu_mask_status; |
| /* clear inerrupts*/ |
| intrl2->cpu_clear |= pDevCtrl->irq0_stat; |
| |
| TRACE(("IRQ=0x%x\n", pDevCtrl->irq0_stat)); |
| |
| /* If there is tagged traffic, throttle untagged traffic. */ |
| if (pDevCtrl->irq0_stat & (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM)) |
| desc_budget = THROTTLED_DESC_BUDGET; |
| else |
| desc_budget = DEFAULT_DESC_BUDGET; |
| |
| #ifndef CONFIG_BCMGENET_RX_DESC_THROTTLE |
| if (pDevCtrl->irq0_stat & UMAC_IRQ_HFB_OR_DONE) { |
| /* |
| * We use NAPI(software interrupt throttling, if |
| * Rx Descriptor throttling is not used. |
| * Disable interrupt, will be enabled in the poll method. |
| */ |
| if (likely(napi_schedule_prep(&pDevCtrl->napi))) { |
| intrl2->cpu_mask_set |= UMAC_IRQ_HFB_OR_DONE; |
| __napi_schedule(&pDevCtrl->napi); |
| } |
| } |
| #else |
| /* Multiple buffer done event. */ |
| if (pDevCtrl->irq0_stat & UMAC_IRQ_RXDMA_MBDONE) { |
| unsigned int work_done; |
| volatile struct rDmaRingRegs *rDma_desc; |
| |
| rDma_desc = &pDevCtrl->rxDma->rDmaRings[DESC_INDEX]; |
| pDevCtrl->irq0_stat &= ~UMAC_IRQ_RXDMA_MBDONE; |
| TRACE(("%s: %d packets available\n", __func__, DmaDescThres)); |
| work_done = bcmgenet_desc_rx(pDevCtrl, DmaDescThres, |
| DESC_INDEX); |
| rDma_desc->rdma_consumer_index += work_done; |
| } |
| #endif |
| if (pDevCtrl->irq0_stat & |
| (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { |
| /* Tx reclaim */ |
| bcmgenet_xmit(NULL, pDevCtrl->dev); |
| } |
| if (pDevCtrl->irq0_stat & (UMAC_IRQ_PHY_DET_R | |
| UMAC_IRQ_PHY_DET_F | |
| UMAC_IRQ_LINK_UP | |
| UMAC_IRQ_LINK_DOWN | |
| UMAC_IRQ_MPD_R)) { |
| /* all other interested interrupts handled in bottom half */ |
| schedule_work(&pDevCtrl->bcmgenet_irq_work); |
| } |
| |
| return IRQ_HANDLED; |
| } |
| /* |
| * bcmgenet_desc_rx - descriptor based rx process. |
| * this could be called from bottom half, or from NAPI polling method. |
| */ |
| static unsigned int bcmgenet_desc_rx(void *ptr, unsigned int budget, int index) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = ptr; |
| struct net_device *dev = pDevCtrl->dev; |
| struct Enet_CB *cb; |
| struct sk_buff *skb, *new_skb; |
| unsigned long dmaFlag; |
| int len, discard_cnt = 0; |
| unsigned int rxpktprocessed = 0, rxpkttoprocess = 0; |
| unsigned int p_index = 0, c_index = 0, read_ptr = 0; |
| unsigned long start_addr, end_addr; |
| volatile struct rDmaRingRegs *rDma_desc; |
| |
| MY_BUG_ON(pDevCtrl->num_new_skbs != 0); |
| |
| rDma_desc = &pDevCtrl->rxDma->rDmaRings[index]; |
| |
| p_index = rDma_desc->rdma_producer_index; |
| p_index &= DMA_P_INDEX_MASK; |
| c_index = rDma_desc->rdma_consumer_index; |
| c_index &= DMA_C_INDEX_MASK; |
| read_ptr = rDma_desc->rdma_read_pointer; |
| read_ptr = ((read_ptr & DMA_RW_POINTER_MASK) >> 1); |
| start_addr = rDma_desc->rdma_start_addr; |
| end_addr = rDma_desc->rdma_end_addr; |
| if (p_index < c_index) |
| rxpkttoprocess = (DMA_C_INDEX_MASK+1) - c_index + p_index; |
| else |
| rxpkttoprocess = p_index - c_index; |
| TRACE(("RDMA: rxpkttoprocess=%d\n", rxpkttoprocess)); |
| |
| discard_cnt = (rDma_desc->rdma_producer_index >> 16); |
| if (discard_cnt) { |
| /* Report rx overrun errors */ |
| pDevCtrl->dev->stats.rx_over_errors += discard_cnt; |
| pDevCtrl->rxRingDiscCnt[index] += discard_cnt; |
| rDma_desc->rdma_producer_index = 0; |
| } |
| |
| while ((rxpktprocessed < rxpkttoprocess) && |
| (rxpktprocessed < budget)) { |
| |
| dmaFlag = (pDevCtrl->rxBds[read_ptr].length_status & 0xffff); |
| len = ((pDevCtrl->rxBds[read_ptr].length_status)>>16); |
| |
| TRACE(("%s:index=%d, p_index=%d c_index=%d read_ptr=%d " |
| "len_stat=0x%08lx\n", |
| __func__, index, p_index, c_index, read_ptr, |
| pDevCtrl->rxBds[read_ptr].length_status)); |
| |
| rxpktprocessed++; |
| |
| cb = &pDevCtrl->rxCbs[read_ptr]; |
| skb = cb->skb; |
| MY_BUG_ON(skb == NULL); |
| cb->skb = NULL; |
| dma_unmap_single(&dev->dev, cb->dma_addr, |
| pDevCtrl->rxBufLen, DMA_FROM_DEVICE); |
| |
| pDevCtrl->rxBds[read_ptr].address = 0; |
| |
| if (read_ptr == (end_addr & DMA_RW_POINTER_MASK) >> 1) { |
| read_ptr = (start_addr & DMA_RW_POINTER_MASK) >> 1; |
| } else { |
| read_ptr++; |
| } |
| |
| if (unlikely(!(dmaFlag & DMA_EOP) || !(dmaFlag & DMA_SOP))) { |
| printk(KERN_WARNING "Dropping fragmented packet: " |
| "index=%d, p_index=%d c_index=%d " |
| "read_ptr=%d len_stat=0x%08lx\n", |
| index, p_index, c_index, read_ptr, |
| pDevCtrl->rxBds[read_ptr].length_status); |
| dev->stats.rx_dropped++; |
| dev->stats.rx_errors++; |
| MY_BUG_ON(pDevCtrl->num_new_skbs >= TOTAL_DESC * 2); |
| pDevCtrl->new_skbs[pDevCtrl->num_new_skbs++] = skb; |
| continue; |
| } |
| /* report errors */ |
| if (unlikely(dmaFlag & (DMA_RX_CRC_ERROR | |
| DMA_RX_OV | |
| DMA_RX_NO | |
| DMA_RX_LG | |
| DMA_RX_RXER))) { |
| TRACE(("ERROR: dmaFlag=0x%x\n", (unsigned int)dmaFlag)); |
| if (dmaFlag & DMA_RX_CRC_ERROR) |
| dev->stats.rx_crc_errors++; |
| if (dmaFlag & DMA_RX_OV) |
| dev->stats.rx_fifo_errors++; |
| if (dmaFlag & DMA_RX_NO) |
| dev->stats.rx_frame_errors++; |
| if (dmaFlag & DMA_RX_LG) |
| dev->stats.rx_length_errors++; |
| dev->stats.rx_dropped++; |
| dev->stats.rx_errors++; |
| |
| /* discard the packet and advance consumer index.*/ |
| MY_BUG_ON(pDevCtrl->num_new_skbs >= TOTAL_DESC * 2); |
| pDevCtrl->new_skbs[pDevCtrl->num_new_skbs++] = skb; |
| continue; |
| } /* error packet */ |
| |
| MY_BUG_ON(pDevCtrl->num_new_skbs >= TOTAL_DESC * 2); |
| new_skb = netdev_alloc_skb(pDevCtrl->dev, |
| pDevCtrl->rxBufLen + SKB_ALIGNMENT); |
| if (!new_skb) { |
| pr_err_ratelimited("%s: failed to allocate skb, " |
| "dropping old packet.\n", dev->name); |
| pDevCtrl->new_skbs[pDevCtrl->num_new_skbs++] = skb; |
| dev->stats.rx_over_errors++; |
| dev->stats.rx_dropped++; |
| continue; |
| } |
| handleAlignment(pDevCtrl, new_skb); |
| pDevCtrl->new_skbs[pDevCtrl->num_new_skbs++] = new_skb; |
| |
| skb_put(skb, len); |
| if (pDevCtrl->rbuf->rbuf_ctrl & RBUF_64B_EN) { |
| struct status_64 *status; |
| status = (struct status_64 *)skb->data; |
| /* we have 64B rx status block enabled.*/ |
| if (pDevCtrl->rbuf->rbuf_chk_ctrl & RBUF_RXCHK_EN) { |
| if (status->rx_csum & STATUS_RX_CSUM_OK) |
| skb->ip_summed = CHECKSUM_UNNECESSARY; |
| else |
| skb->ip_summed = CHECKSUM_NONE; |
| } |
| skb_pull(skb, 64); |
| len -= 64; |
| } |
| |
| if (pDevCtrl->bIPHdrOptimize) { |
| skb_pull(skb, 2); |
| len -= 2; |
| } |
| |
| if (pDevCtrl->umac->cmd & CMD_CRC_FWD) { |
| skb_trim(skb, len - 4); |
| len -= 4; |
| } |
| #ifdef CONFIG_BCMGENET_DUMP_DATA |
| printk(KERN_NOTICE "bcmgenet_desc_rx : len=%d", skb->len); |
| print_hex_dump(KERN_NOTICE, "", DUMP_PREFIX_ADDRESS, |
| 16, 1, skb->data, skb->len, 0); |
| #endif |
| |
| /*Finish setting up the received SKB and send it to the kernel*/ |
| skb->dev = pDevCtrl->dev; |
| skb->protocol = eth_type_trans(skb, pDevCtrl->dev); |
| dev->stats.rx_packets++; |
| dev->stats.rx_bytes += len; |
| if (dmaFlag & DMA_RX_MULT) |
| dev->stats.multicast++; |
| |
| /* Notify kernel */ |
| #ifdef CONFIG_BCMGENET_RX_DESC_THROTTLE |
| netif_rx(skb); |
| #else |
| netif_receive_skb(skb); |
| #endif |
| TRACE(("pushed up to kernel\n")); |
| } |
| |
| if (rxpktprocessed) { |
| /* |
| * assign_rx_buffers_for_queue() uses the current |
| * rdma_read_pointer so do not update it until after |
| * assign_rx_buffers_for_queue has been called. |
| */ |
| MY_BUG_ON(rxpktprocessed != pDevCtrl->num_new_skbs); |
| assign_rx_buffers_for_queue(pDevCtrl, index); |
| rDma_desc->rdma_read_pointer = (read_ptr << 1) & |
| DMA_RW_POINTER_MASK; |
| } |
| |
| return rxpktprocessed; |
| } |
| |
| |
| /* |
| * assign_rx_buffers: |
| * Assign skb to RX DMA descriptor. Used during initialization. |
| */ |
| static int assign_rx_buffers(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| unsigned short bdsfilled; |
| unsigned long flags; |
| |
| #ifndef CONFIG_BCMGENET_RX_DESC_THROTTLE |
| (void)flags; |
| spin_lock_bh(&pDevCtrl->bh_lock); |
| #else |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| #endif |
| bdsfilled = |
| assign_rx_buffers_range(pDevCtrl, 0, 2 * TOTAL_DESC - 1, 0); |
| |
| #ifndef CONFIG_BCMGENET_RX_DESC_THROTTLE |
| spin_unlock_bh(&pDevCtrl->bh_lock); |
| #else |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| #endif |
| |
| return bdsfilled; |
| } |
| |
| /* |
| * assign_rx_buffers for queue[index]. |
| */ |
| static int assign_rx_buffers_for_queue(struct BcmEnet_devctrl *pDevCtrl, |
| int index) |
| { |
| unsigned short bdsfilled; |
| unsigned long flags; |
| volatile struct rDmaRingRegs *rDma_desc; |
| |
| #ifndef CONFIG_BCMGENET_RX_DESC_THROTTLE |
| (void)flags; |
| spin_lock_bh(&pDevCtrl->bh_lock); |
| #else |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| #endif |
| rDma_desc = &pDevCtrl->rxDma->rDmaRings[index]; |
| |
| bdsfilled = assign_rx_buffers_range(pDevCtrl, |
| rDma_desc->rdma_start_addr, |
| rDma_desc->rdma_end_addr, |
| rDma_desc->rdma_read_pointer); |
| /* Enable rx DMA in case it was disabled due to running out of rx BD */ |
| pDevCtrl->rxDma->rdma_ctrl |= DMA_EN; |
| |
| #ifndef CONFIG_BCMGENET_RX_DESC_THROTTLE |
| spin_unlock_bh(&pDevCtrl->bh_lock); |
| #else |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| #endif |
| |
| return bdsfilled; |
| } |
| |
| |
| /* |
| * Assign buffers for addresses between start_addr and end_addr. |
| */ |
| static int assign_rx_buffers_range(struct BcmEnet_devctrl *pDevCtrl, |
| unsigned long start_addr, unsigned long end_addr, |
| unsigned long read_pointer) { |
| struct sk_buff *skb; |
| struct Enet_CB *cb; |
| unsigned short bdsfilled = 0; |
| unsigned long read_ptr; |
| |
| read_ptr = (read_pointer & DMA_RW_POINTER_MASK) >> 1; |
| while (pDevCtrl->rxBds[read_ptr].address == 0) { |
| cb = &pDevCtrl->rxCbs[read_ptr]; |
| if (pDevCtrl->num_new_skbs > 0) { |
| skb = pDevCtrl->new_skbs[--pDevCtrl->num_new_skbs]; |
| pDevCtrl->new_skbs[pDevCtrl->num_new_skbs] = NULL; |
| MY_BUG_ON(!skb); |
| } else { |
| skb = netdev_alloc_skb(pDevCtrl->dev, |
| pDevCtrl->rxBufLen + SKB_ALIGNMENT); |
| if (!skb) { |
| pr_err_ratelimited( |
| "%s: failed to allocate skb for rx\n", |
| pDevCtrl->dev->name); |
| break; |
| } |
| handleAlignment(pDevCtrl, skb); |
| } |
| |
| /* keep count of any BD's we refill */ |
| bdsfilled++; |
| cb->skb = skb; |
| cb->dma_addr = dma_map_single(&pDevCtrl->dev->dev, |
| skb->data, pDevCtrl->rxBufLen, DMA_FROM_DEVICE); |
| /* assign packet, prepare descriptor, and advance pointer */ |
| pDevCtrl->rxBds[read_ptr].address = cb->dma_addr; |
| pDevCtrl->rxBds[read_ptr].length_status = |
| (pDevCtrl->rxBufLen << 16); |
| |
| /* turn on the newly assigned BD for DMA to use */ |
| if (read_ptr == (end_addr & DMA_RW_POINTER_MASK) >> 1) { |
| read_ptr = (start_addr & DMA_RW_POINTER_MASK) >> 1; |
| } else { |
| read_ptr++; |
| } |
| } |
| return bdsfilled; |
| } |
| |
| static void save_state(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| int ii; |
| volatile struct DmaDesc *rxBdAssignPtr = pDevCtrl->rxBds; |
| |
| for (ii = 0; ii < pDevCtrl->nrRxBds; ++ii, ++rxBdAssignPtr) { |
| pDevCtrl->saved_rx_desc[ii].length_status = |
| rxBdAssignPtr->length_status; |
| pDevCtrl->saved_rx_desc[ii].address = rxBdAssignPtr->address; |
| } |
| |
| pDevCtrl->int_mask = pDevCtrl->intrl2_0->cpu_mask_status; |
| pDevCtrl->rbuf_ctrl = pDevCtrl->rbuf->rbuf_ctrl; |
| } |
| |
| static void restore_state(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| int ii; |
| volatile struct DmaDesc *rxBdAssignPtr = pDevCtrl->rxBds; |
| |
| pDevCtrl->intrl2_0->cpu_mask_clear = 0xFFFFFFFF ^ pDevCtrl->int_mask; |
| pDevCtrl->rbuf->rbuf_ctrl = pDevCtrl->rbuf_ctrl; |
| |
| for (ii = 0; ii < pDevCtrl->nrRxBds; ++ii, ++rxBdAssignPtr) { |
| rxBdAssignPtr->length_status = |
| pDevCtrl->saved_rx_desc[ii].length_status; |
| rxBdAssignPtr->address = pDevCtrl->saved_rx_desc[ii].address; |
| } |
| |
| pDevCtrl->rxDma->rdma_ctrl |= DMA_EN; |
| |
| } |
| |
| /* |
| * init_umac: Initializes the uniMac controller |
| */ |
| static int init_umac(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| volatile struct uniMacRegs *umac; |
| volatile struct intrl2Regs *intrl2; |
| |
| umac = pDevCtrl->umac; |
| intrl2 = pDevCtrl->intrl2_0; |
| |
| TRACE(("bcmgenet: init_umac ")); |
| |
| /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ |
| GENET_RBUF_FLUSH_CTRL(pDevCtrl) = 0; |
| udelay(10); |
| |
| /* disable MAC while updating its registers */ |
| umac->cmd = 0; |
| |
| /* issue soft reset, wait for it to complete */ |
| umac->cmd = CMD_SW_RESET; |
| udelay(1000); |
| umac->cmd = 0; |
| /* clear tx/rx counter */ |
| umac->mib_ctrl = MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT; |
| umac->mib_ctrl = 0; |
| |
| #ifdef MAC_LOOPBACK |
| /* Enable GMII/MII loopback */ |
| umac->cmd |= CMD_LCL_LOOP_EN; |
| #endif |
| umac->max_frame_len = ENET_MAX_MTU_SIZE; |
| /* |
| * init rx registers, enable ip header optimization. |
| */ |
| if (pDevCtrl->bIPHdrOptimize) |
| pDevCtrl->rbuf->rbuf_ctrl |= RBUF_ALIGN_2B ; |
| #if CONFIG_BRCM_GENET_VERSION >= 3 |
| pDevCtrl->rbuf->rbuf_tbuf_size_ctrl = 1; |
| #endif |
| |
| /* Mask all interrupts.*/ |
| intrl2->cpu_mask_set = 0xFFFFFFFF; |
| intrl2->cpu_clear = 0xFFFFFFFF; |
| intrl2->cpu_mask_clear = 0x0; |
| |
| /* Enable HFB single match and multiple match interrupts. */ |
| intrl2->cpu_mask_clear |= (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM); |
| |
| #ifdef CONFIG_BCMGENET_RX_DESC_THROTTLE |
| intrl2->cpu_mask_clear |= UMAC_IRQ_RXDMA_MBDONE; |
| #else |
| intrl2->cpu_mask_clear |= UMAC_IRQ_RXDMA_BDONE; |
| TRACE(("%s:Enabling RXDMA_BDONE interrupt\n", __func__)); |
| #endif /* CONFIG_BCMGENET_RX_DESC_THROTTLE */ |
| |
| /* Monitor cable plug/unpluged event for internal PHY */ |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_INT) { |
| intrl2->cpu_mask_clear |= (UMAC_IRQ_PHY_DET_R | |
| UMAC_IRQ_PHY_DET_F); |
| intrl2->cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | |
| UMAC_IRQ_LINK_UP); |
| /* Turn on ENERGY_DET interrupt in bcmgenet_open() |
| * TODO: fix me for active standby. |
| */ |
| } else if (pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_MII || |
| pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_RGMII) { |
| intrl2->cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | |
| UMAC_IRQ_LINK_UP); |
| |
| } else if (pDevCtrl->phyType == BRCM_PHY_TYPE_MOCA) { |
| GENET_TBUF_BP_MC(pDevCtrl) |= BIT(GENET_BP_IN_EN_SHIFT); |
| |
| /* bp_mask: back pressure mask */ |
| #if defined(CONFIG_NET_SCH_MULTIQ) |
| GENET_TBUF_BP_MC(pDevCtrl) |= GENET_BP_MASK; |
| #else |
| GENET_TBUF_BP_MC(pDevCtrl) &= ~GENET_BP_MASK; |
| #endif |
| } |
| |
| /* Enable rx/tx engine.*/ |
| TRACE(("done init umac\n")); |
| return 0; |
| } |
| /* |
| * init_edma: Initialize DMA control register |
| */ |
| static void init_edma(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| #ifdef CONFIG_BCMGENET_RX_DESC_THROTTLE |
| int speeds[] = {10, 100, 1000, 2500}; |
| int sid = 1, timeout; |
| #endif |
| volatile struct rDmaRingRegs *rDma_desc; |
| volatile struct tDmaRingRegs *tDma_desc; |
| TRACE(("bcmgenet: init_edma\n")); |
| |
| /* init rDma */ |
| pDevCtrl->rxDma->rdma_scb_burst_size = DMA_MAX_BURST_LENGTH; |
| /* by default, enable ring 16 (descriptor based) */ |
| rDma_desc = &pDevCtrl->rxDma->rDmaRings[DESC_INDEX]; |
| rDma_desc->rdma_producer_index = 0; |
| rDma_desc->rdma_consumer_index = 0; |
| /* Initialize default queue. */ |
| MY_BUG_ON(GENET_RX_TOTAL_MQ_BD > TOTAL_DESC); |
| rDma_desc->rdma_ring_buf_size = ((GENET_RX_DEFAULT_BD_CNT << |
| DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH); |
| rDma_desc->rdma_start_addr = 2 * GENET_RX_TOTAL_MQ_BD; |
| rDma_desc->rdma_end_addr = 2 * TOTAL_DESC - 1; |
| rDma_desc->rdma_read_pointer = 2 * GENET_RX_TOTAL_MQ_BD; |
| rDma_desc->rdma_write_pointer = 2 * GENET_RX_TOTAL_MQ_BD; |
| rDma_desc->rdma_xon_xoff_threshold = ((DMA_FC_THRESH_LO |
| << DMA_XOFF_THRESHOLD_SHIFT) | |
| DMA_FC_THRESH_HI); |
| |
| #ifdef CONFIG_BCMGENET_RX_DESC_THROTTLE |
| /* |
| * Use descriptor throttle, fire irq when multiple packets are done! |
| */ |
| rDma_desc->rdma_mbuf_done_threshold = DMA_DESC_THRES; |
| /* |
| * Enable push timer, force the IRQ_DESC_THROT to fire when timeout |
| * occurs, prevent system slow reponse when handling low throughput. |
| */ |
| sid = (pDevCtrl->umac->cmd >> CMD_SPEED_SHIFT) & CMD_SPEED_MASK; |
| timeout = 2*(DMA_DESC_THRES*ENET_MAX_MTU_SIZE)/speeds[sid]; |
| pDevCtrl->rxDma->rdma_timeout[DESC_INDEX] = timeout & DMA_TIMEOUT_MASK; |
| #endif /* CONFIG_BCMGENET_RX_DESC_THROTTLE */ |
| |
| |
| /* Init tDma */ |
| pDevCtrl->txDma->tdma_scb_burst_size = DMA_MAX_BURST_LENGTH; |
| /* by default, enable ring DESC_INDEX (descriptor based) */ |
| tDma_desc = &pDevCtrl->txDma->tDmaRings[DESC_INDEX]; |
| tDma_desc->tdma_producer_index = 0; |
| tDma_desc->tdma_consumer_index = 0; |
| tDma_desc->tdma_mbuf_done_threshold = 1; |
| /* Disable rate control for now */ |
| tDma_desc->tdma_flow_period = 0; |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| /* Unclassified traffic goes to ring 16 */ |
| tDma_desc->tdma_ring_buf_size = ((GENET_TX_DEFAULT_BD_CNT << |
| DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH); |
| tDma_desc->tdma_start_addr = |
| 2 * GENET_TX_MQ_CNT * GENET_TX_MQ_BD_CNT; |
| tDma_desc->tdma_end_addr = 2 * TOTAL_DESC - 1; |
| tDma_desc->tdma_read_pointer = |
| 2 * GENET_TX_MQ_CNT * GENET_TX_MQ_BD_CNT; |
| tDma_desc->tdma_write_pointer = |
| 2 * GENET_TX_MQ_CNT * GENET_TX_MQ_BD_CNT; |
| pDevCtrl->txFreeBds = GENET_TX_DEFAULT_BD_CNT; |
| |
| /* Initiaize priority tx queues */ |
| bcmgenet_init_multiq_tx(pDevCtrl->dev); |
| /* Initialize priority rx queues. */ |
| bcmgenet_init_multiq_rx(pDevCtrl->dev); |
| |
| #else |
| tDma_desc->tdma_ring_buf_size = ((TOTAL_DESC << DMA_RING_SIZE_SHIFT) | |
| RX_BUF_LENGTH); |
| tDma_desc->tdma_start_addr = 0; |
| tDma_desc->tdma_end_addr = 2 * TOTAL_DESC - 1; |
| tDma_desc->tdma_read_pointer = 0; |
| tDma_desc->tdma_write_pointer = 0; |
| #endif |
| |
| } |
| /*----------------------------------------------------------------------------- |
| * exported function , Initialize ring buffer |
| * dev: device pointer. |
| * direction: 0 for rx 1 for tx. |
| * id: ring index. |
| * size: ring size, number of buffer in the ring, must be power of 2. |
| * buf_len: buffer length, must be 32 bytes aligned, we assume 2Kb here. |
| * buf: pointer to the buffer, continues memory, if *buf == NULL, buffer will |
| * be allocated by this function. |
| *----------------------------------------------------------------------------*/ |
| int bcmgenet_init_ringbuf(struct net_device *dev, int direction, |
| unsigned int id, unsigned int size, |
| int buf_len, unsigned char **buf) |
| { |
| int speeds[] = {10, 100, 1000, 2500}; |
| int sid = 1; |
| int i, dma_enable, timeout; |
| dma_addr_t dma_start; |
| struct Enet_CB *cb; |
| unsigned long flags; |
| volatile struct rDmaRingRegs *rDmaRing; |
| volatile struct tDmaRingRegs *tDmaRing; |
| |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| |
| if (id < 0 || id > 15 || size & (size - 1)) |
| return -EINVAL; |
| |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| |
| if (direction == GENET_ALLOC_RX_RING) { |
| if (*buf == NULL) { |
| *buf = kmalloc(size * buf_len, GFP_KERNEL); |
| if (*buf == NULL) { |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return -ENOMEM; |
| } |
| } |
| cb = kmalloc(size*sizeof(struct Enet_CB), GFP_KERNEL); |
| if (cb == NULL) { |
| kfree(buf); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return -ENOMEM; |
| } |
| rDmaRing = &pDevCtrl->rxDma->rDmaRings[id]; |
| pDevCtrl->rxRingCbs[id] = cb; |
| pDevCtrl->rxRingSize[id] = size; |
| pDevCtrl->rxRingCIndex[id] = 0; |
| pDevCtrl->rxRingDiscCnt[id] = 0; |
| |
| dma_enable = pDevCtrl->rxDma->rdma_ctrl & DMA_EN; |
| pDevCtrl->rxDma->rdma_ctrl &= ~DMA_EN; |
| rDmaRing->rdma_producer_index = 0; |
| rDmaRing->rdma_consumer_index = 0; |
| rDmaRing->rdma_ring_buf_size = ((size << DMA_RING_SIZE_SHIFT) | |
| buf_len); |
| dma_start = dma_map_single(&dev->dev, *buf, buf_len * size, |
| DMA_FROM_DEVICE); |
| |
| for (i = 0; i < size; i++) { |
| cb->skb = NULL; |
| cb->BdAddr = (struct DmaDesc *)(*buf + i*buf_len); |
| cb->dma_addr = dma_start + i*buf_len; |
| cb->dma_len = buf_len; |
| cb++; |
| } |
| |
| rDmaRing->rdma_start_addr = dma_start; |
| rDmaRing->rdma_end_addr = dma_start + size * buf_len - 1; |
| rDmaRing->rdma_xon_xoff_threshold = (DMA_FC_THRESH_LO << |
| DMA_XOFF_THRESHOLD_SHIFT) | DMA_FC_THRESH_HI; |
| rDmaRing->rdma_read_pointer = dma_start; |
| rDmaRing->rdma_write_pointer = dma_start; |
| |
| /* |
| * Use descriptor throttle, fire interrupt only when multiple |
| * packets are done! |
| */ |
| rDmaRing->rdma_mbuf_done_threshold = DMA_DESC_THRES; |
| /* |
| * Enable push timer, that is, force the IRQ_DESC_THROT to fire |
| * when timeout occurs, to prevent system slow reponse when |
| * handling low throughput data. |
| */ |
| sid = (pDevCtrl->umac->cmd >> CMD_SPEED_SHIFT) & CMD_SPEED_MASK; |
| timeout = 16*2*(DMA_DESC_THRES*ENET_MAX_MTU_SIZE)/speeds[sid]; |
| /* set large pushtimer value to reduce interrupt rate */ |
| pDevCtrl->rxDma->rdma_timeout[id] = timeout & DMA_TIMEOUT_MASK; |
| |
| /* Enable interrupt for this ring */ |
| pDevCtrl->intrl2_1->cpu_mask_clear |= (1 << (id + 16)); |
| pDevCtrl->rxDma->rdma_ctrl |= |
| (1 << (id + DMA_RING_BUF_EN_SHIFT)); |
| if (!(pDevCtrl->rbuf->rbuf_ctrl & RBUF_64B_EN)) |
| pDevCtrl->rbuf->rbuf_ctrl |= RBUF_64B_EN; |
| if (dma_enable) |
| pDevCtrl->rxDma->rdma_ctrl |= DMA_EN; |
| } else { |
| cb = kmalloc(size*sizeof(struct Enet_CB), GFP_KERNEL); |
| if (cb == NULL) { |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return -ENOMEM; |
| } |
| tDmaRing = &pDevCtrl->txDma->tDmaRings[id]; |
| pDevCtrl->txRingCBs[id] = cb; |
| pDevCtrl->txRingSize[id] = size; |
| pDevCtrl->txRingCIndex[id] = 0; |
| |
| dma_enable = pDevCtrl->txDma->tdma_ctrl & DMA_EN; |
| pDevCtrl->txDma->tdma_ctrl &= ~DMA_EN; |
| tDmaRing->tdma_producer_index = 0; |
| tDmaRing->tdma_consumer_index = 0; |
| tDmaRing->tdma_ring_buf_size = ((size << DMA_RING_SIZE_SHIFT) | |
| buf_len); |
| dma_start = dma_map_single(&dev->dev, *buf, buf_len * size, |
| DMA_TO_DEVICE); |
| for (i = 0; i < size; i++) { |
| cb->skb = NULL; |
| cb->BdAddr = (struct DmaDesc *)(*buf + i * buf_len); |
| cb->dma_addr = dma_start + i * buf_len; |
| cb++; |
| } |
| tDmaRing->tdma_start_addr = dma_start; |
| tDmaRing->tdma_end_addr = dma_start + size * buf_len - 1; |
| tDmaRing->tdma_mbuf_done_threshold = 1; |
| tDmaRing->tdma_flow_period = ENET_MAX_MTU_SIZE << 16; |
| tDmaRing->tdma_read_pointer = dma_start; |
| tDmaRing->tdma_write_pointer = dma_start; |
| |
| if (!(GENET_TBUF_CTRL(pDevCtrl) & RBUF_64B_EN)) { |
| GENET_TBUF_CTRL(pDevCtrl) |= RBUF_64B_EN; |
| if (dev->needed_headroom < 64) |
| dev->needed_headroom += 64; |
| } |
| pDevCtrl->txDma->tdma_ctrl |= DMA_TSB_SWAP_EN; |
| pDevCtrl->txDma->tdma_ctrl |= (1<<(id+DMA_RING_BUF_EN_SHIFT)); |
| if (dma_enable) |
| pDevCtrl->txDma->tdma_ctrl |= DMA_EN; |
| } |
| |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return 0; |
| } |
| EXPORT_SYMBOL(bcmgenet_init_ringbuf); |
| /* |
| * bcmgenet_uninit_ringbuf : cleanup ring buffer |
| * if "free" is non-zero , it will free the buffer. |
| */ |
| int bcmgenet_uninit_ringbuf(struct net_device *dev, int direction, |
| unsigned int id, int free) |
| { |
| int dma_enable, size = 0, buflen, i; |
| struct Enet_CB *cb; |
| unsigned long flags; |
| volatile struct rDmaRingRegs *rDmaRing; |
| volatile struct tDmaRingRegs *tDmaRing; |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| |
| if (id < 0 || id > 15 || size & (size - 1)) |
| return -EINVAL; |
| |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| if (direction == GENET_ALLOC_RX_RING) { |
| rDmaRing = &pDevCtrl->rxDma->rDmaRings[id]; |
| size = (rDmaRing->rdma_ring_buf_size >> DMA_RING_SIZE_SHIFT); |
| buflen = (0xFFFF & rDmaRing->rdma_ring_buf_size); |
| /* Disble this ring first */ |
| dma_enable = pDevCtrl->rxDma->rdma_ctrl & DMA_EN; |
| pDevCtrl->rxDma->rdma_ctrl &= ~DMA_EN; |
| pDevCtrl->rxDma->rdma_ctrl &= |
| ~(1 << (id + DMA_RING_BUF_EN_SHIFT)); |
| dma_unmap_single(&pDevCtrl->dev->dev, |
| rDmaRing->rdma_start_addr, |
| size * buflen, |
| DMA_FROM_DEVICE); |
| |
| /*release resources */ |
| cb = pDevCtrl->rxRingCbs[id]; |
| |
| for (i = 0; i < size; i++) { |
| if (cb->skb != NULL) |
| dev_kfree_skb_any(cb->skb); |
| cb++; |
| } |
| if (free) |
| kfree((void *)pDevCtrl->rxRingCbs[id]->BdAddr); |
| |
| kfree(pDevCtrl->rxRingCbs[id]); |
| if (dma_enable) |
| pDevCtrl->rxDma->rdma_ctrl |= DMA_EN; |
| } else { |
| tDmaRing = &pDevCtrl->txDma->tDmaRings[id]; |
| size = (tDmaRing->tdma_ring_buf_size >> DMA_RING_SIZE_SHIFT); |
| buflen = (0xFFFF & tDmaRing->tdma_ring_buf_size); |
| dma_enable = pDevCtrl->txDma->tdma_ctrl & DMA_EN; |
| pDevCtrl->txDma->tdma_ctrl &= ~DMA_EN; |
| /* Disble this ring first */ |
| pDevCtrl->txDma->tdma_ctrl &= |
| ~(1 << (id + DMA_RING_BUF_EN_SHIFT)); |
| dma_unmap_single(&pDevCtrl->dev->dev, |
| tDmaRing->tdma_start_addr, |
| size * buflen, |
| DMA_TO_DEVICE); |
| |
| /*release resources */ |
| cb = pDevCtrl->txRingCBs[id]; |
| kfree(cb); |
| /* |
| * if all rings are disabled and tx checksum offloading |
| * is off, disable TSB. |
| */ |
| if (!(pDevCtrl->txDma->tdma_ctrl & (0xFFFF << 1)) |
| && !(dev->features & NETIF_F_IP_CSUM)) { |
| GENET_TBUF_CTRL(pDevCtrl) &= ~RBUF_64B_EN; |
| if (dev->needed_headroom > 64) |
| dev->needed_headroom -= 64; |
| } |
| if (dma_enable) |
| pDevCtrl->rxDma->rdma_ctrl |= DMA_EN; |
| } |
| |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return 0; |
| } |
| EXPORT_SYMBOL(bcmgenet_uninit_ringbuf); |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| static void bcmgenet_init_multiq_rx(struct net_device *dev) |
| { |
| int i, dma_enable; |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| |
| dma_enable = pDevCtrl->rxDma->rdma_ctrl & DMA_EN; |
| pDevCtrl->rxDma->rdma_ctrl &= ~DMA_EN; |
| for (i = 0; i < GENET_RX_MQ_CNT; i++) { |
| volatile struct rDmaRingRegs *rDma_desc; |
| rDma_desc = &pDevCtrl->rxDma->rDmaRings[i]; |
| |
| pDevCtrl->rxRingCbs[i] = pDevCtrl->rxCbs + |
| i * GENET_RX_MQ_BD_CNT; |
| pDevCtrl->rxRingSize[i] = GENET_RX_MQ_BD_CNT; |
| pDevCtrl->rxRingCIndex[i] = 0; |
| |
| rDma_desc->rdma_producer_index = 0; |
| rDma_desc->rdma_consumer_index = 0; |
| rDma_desc->rdma_ring_buf_size = |
| (GENET_RX_MQ_BD_CNT << DMA_RING_SIZE_SHIFT) | |
| RX_BUF_LENGTH; |
| rDma_desc->rdma_start_addr = 2 * i * GENET_RX_MQ_BD_CNT; |
| rDma_desc->rdma_end_addr = 2 * (i + 1)*GENET_RX_MQ_BD_CNT - 1; |
| rDma_desc->rdma_mbuf_done_threshold = 1; |
| rDma_desc->rdma_write_pointer = 2 * i * GENET_RX_MQ_BD_CNT; |
| rDma_desc->rdma_read_pointer = 2 * i * GENET_RX_MQ_BD_CNT; |
| |
| /* Enable this descriptor ring. */ |
| pDevCtrl->rxDma->rdma_ring_cfg |= (1 << i); |
| pDevCtrl->rxDma->rdma_ctrl |= |
| (1 << (i + DMA_RING_BUF_EN_SHIFT)); |
| rDma_desc->rdma_xon_xoff_threshold = |
| (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) | |
| DMA_FC_THRESH_HI; |
| } |
| /* |
| * There are a total of 8 rdma_index2ring registers. Each nibble |
| * of each register controls one filter_index->queue_index mapping. |
| * In order to have filter_index mapped to queue_index, the register |
| * needs to be programmed as following: |
| * register = filter_index / 8; |
| * nibbble = filter_index % 8; |
| * rdma_index2ring[register] &= ~(0xf << (nibble * 4)); |
| * rdma_index2ring[register] |= (queue_index << (nibble * 4)); |
| * If we have 8 filters enabled, and would like to map each filter |
| * to a priority with the same index, we would do |
| * pDevCtrl->rxDma->rdma_index2ring[0] = 0x76543210; |
| * |
| * Since we have only one priority queue at the moment, we will map all |
| * 8 prioritized traffic to queue 0. Should the number of priority |
| * queues change, the following mapping will need to changed as well. |
| */ |
| pDevCtrl->rxDma->rdma_index2ring[0] = 0x0; |
| |
| if (dma_enable) |
| pDevCtrl->rxDma->rdma_ctrl |= DMA_EN; |
| } |
| |
| /* |
| * init multi xmit queues, only available for GENET2 |
| * the queue is partitioned as follows: |
| * - there are GENET_TX_MQ_CNT priority queues, each having GENET_TX_MQ_BD_CNT |
| * descriptors, with queue 0 being the highest priority queue. |
| * - queue 16 is the default tx queue, that has GENET_TX_DEFAULT_BD_CNT |
| * descriptors. |
| */ |
| static void bcmgenet_init_multiq_tx(struct net_device *dev) |
| { |
| int i, dma_enable; |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| |
| dma_enable = pDevCtrl->txDma->tdma_ctrl & DMA_EN; |
| pDevCtrl->txDma->tdma_ctrl &= ~DMA_EN; |
| /* Enable strict priority arbiter mode */ |
| pDevCtrl->txDma->tdma_arb_ctrl = 0x2; |
| for (i = 0; i < GENET_TX_MQ_CNT; i++) { |
| /* first GENET_TX_DEFAULT_BD_CNT are reserved for |
| * default tx queue (ring 16) */ |
| pDevCtrl->txRingCBs[i] = pDevCtrl->txCbs + |
| GENET_TX_DEFAULT_BD_CNT + i * GENET_TX_MQ_BD_CNT; |
| pDevCtrl->txRingSize[i] = GENET_TX_MQ_BD_CNT; |
| pDevCtrl->txRingCIndex[i] = 0; |
| pDevCtrl->txRingFreeBds[i] = GENET_TX_MQ_BD_CNT; |
| |
| pDevCtrl->txDma->tDmaRings[i].tdma_producer_index = 0; |
| pDevCtrl->txDma->tDmaRings[i].tdma_consumer_index = 0; |
| pDevCtrl->txDma->tDmaRings[i].tdma_ring_buf_size = |
| (GENET_TX_MQ_BD_CNT << DMA_RING_SIZE_SHIFT) | |
| RX_BUF_LENGTH; |
| pDevCtrl->txDma->tDmaRings[i].tdma_start_addr = |
| 2 * i * GENET_TX_MQ_BD_CNT; |
| pDevCtrl->txDma->tDmaRings[i].tdma_end_addr = |
| 2 * (i + 1) * GENET_TX_MQ_BD_CNT - 1; |
| pDevCtrl->txDma->tDmaRings[i].tdma_flow_period = |
| ENET_MAX_MTU_SIZE << 16; |
| pDevCtrl->txDma->tDmaRings[i].tdma_mbuf_done_threshold = 1; |
| pDevCtrl->txDma->tDmaRings[i].tdma_write_pointer = |
| 2 * i * GENET_TX_MQ_BD_CNT; |
| pDevCtrl->txDma->tDmaRings[i].tdma_read_pointer = |
| 2 * i * GENET_TX_MQ_BD_CNT; |
| |
| /* Configure ring as descriptor ring and setup priority */ |
| pDevCtrl->txDma->tdma_ring_cfg |= (1 << i); |
| pDevCtrl->txDma->tdma_priority[0] |= |
| ((GENET_TX_Q0_PRIORITY + i) << 5*i); |
| pDevCtrl->txDma->tdma_ctrl |= |
| (1 << (i + DMA_RING_BUF_EN_SHIFT)); |
| } |
| |
| /* Set ring #16 priority */ |
| pDevCtrl->txDma->tdma_priority[2] |= |
| ((GENET_TX_Q0_PRIORITY + GENET_TX_MQ_CNT) << 20); |
| if (dma_enable) |
| pDevCtrl->txDma->tdma_ctrl |= DMA_EN; |
| } |
| #endif |
| /* |
| * bcmgenet_init_dev: initialize uniMac devie |
| * allocate Tx/Rx buffer descriptors pool, Tx control block pool. |
| */ |
| static int bcmgenet_init_dev(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| int i, ret; |
| unsigned long base; |
| void *ptxCbs, *prxCbs; |
| volatile struct DmaDesc *lastBd; |
| |
| pDevCtrl->clk = clk_get(&pDevCtrl->pdev->dev, "enet"); |
| pDevCtrl->clk_wol = clk_get(&pDevCtrl->pdev->dev, "enet-wol"); |
| bcmgenet_clock_enable(pDevCtrl); |
| |
| TRACE(("%s\n", __func__)); |
| /* setup buffer/pointer relationships here */ |
| pDevCtrl->nrTxBds = pDevCtrl->nrRxBds = TOTAL_DESC; |
| /* Always use 2KB buffer for 7420*/ |
| pDevCtrl->rxBufLen = RX_BUF_LENGTH; |
| |
| /* register block locations */ |
| base = pDevCtrl->dev->base_addr; |
| pDevCtrl->sys = (struct SysRegs *)(base); |
| pDevCtrl->grb = (struct GrBridgeRegs *)(base + GENET_GR_BRIDGE_OFF); |
| pDevCtrl->ext = (struct ExtRegs *)(base + GENET_EXT_OFF); |
| #if CONFIG_BRCM_GENET_VERSION == 1 |
| /* SWLINUX-1813: EXT block is not available on MOCA_GENET */ |
| #if !defined(CONFIG_BCM7125) |
| if (pDevCtrl->devnum == 1) |
| #endif |
| pDevCtrl->ext = NULL; |
| #endif |
| pDevCtrl->intrl2_0 = (struct intrl2Regs *)(base + GENET_INTRL2_0_OFF); |
| pDevCtrl->intrl2_1 = (struct intrl2Regs *)(base + GENET_INTRL2_1_OFF); |
| pDevCtrl->rbuf = (struct rbufRegs *)(base + GENET_RBUF_OFF); |
| pDevCtrl->umac = (struct uniMacRegs *)(base + GENET_UMAC_OFF); |
| pDevCtrl->hfb = (unsigned long *)(base + GENET_HFB_OFF); |
| pDevCtrl->txDma = (struct tDmaRegs *)(base + GENET_TDMA_REG_OFF); |
| pDevCtrl->rxDma = (struct rDmaRegs *)(base + GENET_RDMA_REG_OFF); |
| |
| #if CONFIG_BRCM_GENET_VERSION > 1 |
| pDevCtrl->tbuf = (struct tbufRegs *)(base + GENET_TBUF_OFF); |
| pDevCtrl->hfbReg = (struct hfbRegs *)(base + GENET_HFB_REG_OFF); |
| #endif |
| |
| pDevCtrl->rxBds = (struct DmaDesc *)(base + GENET_RDMA_OFF); |
| pDevCtrl->txBds = (struct DmaDesc *)(base + GENET_TDMA_OFF); |
| |
| TRACE(("%s: rxbds=0x%08x txbds=0x%08x\n", __func__, |
| (unsigned int)pDevCtrl->rxBds, (unsigned int)pDevCtrl->txBds)); |
| |
| /* alloc space for the tx control block pool */ |
| ptxCbs = kmalloc(pDevCtrl->nrTxBds*sizeof(struct Enet_CB), GFP_KERNEL); |
| if (!ptxCbs) { |
| bcmgenet_clock_disable(pDevCtrl); |
| return -ENOMEM; |
| } |
| memset(ptxCbs, 0, pDevCtrl->nrTxBds*sizeof(struct Enet_CB)); |
| pDevCtrl->txCbs = (struct Enet_CB *)ptxCbs; |
| |
| /* initialize rx ring pointer variables. */ |
| pDevCtrl->rxBdAssignPtr = pDevCtrl->rxBds; |
| prxCbs = kmalloc(pDevCtrl->nrRxBds*sizeof(struct Enet_CB), GFP_KERNEL); |
| if (!prxCbs) { |
| ret = -ENOMEM; |
| goto error2; |
| } |
| memset(prxCbs, 0, pDevCtrl->nrRxBds*sizeof(struct Enet_CB)); |
| pDevCtrl->rxCbs = (struct Enet_CB *)prxCbs; |
| |
| /* init the receive buffer descriptor ring */ |
| for (i = 0; i < pDevCtrl->nrRxBds; i++) { |
| (pDevCtrl->rxBds + i)->length_status = (pDevCtrl->rxBufLen<<16); |
| (pDevCtrl->rxBds + i)->address = 0; |
| } |
| lastBd = pDevCtrl->rxBds + pDevCtrl->nrRxBds - 1; |
| |
| /* clear the transmit buffer descriptors */ |
| for (i = 0; i < pDevCtrl->nrTxBds; i++) { |
| (pDevCtrl->txBds + i)->length_status = 0<<16; |
| (pDevCtrl->txBds + i)->address = 0; |
| } |
| lastBd = pDevCtrl->txBds + pDevCtrl->nrTxBds - 1; |
| pDevCtrl->txFreeBds = pDevCtrl->nrTxBds; |
| |
| /* fill receive buffers */ |
| if (assign_rx_buffers(pDevCtrl) == 0) { |
| printk(KERN_ERR "Failed to assign rx buffers\n"); |
| ret = -ENOMEM; |
| goto error1; |
| } |
| |
| TRACE(("%s done!\n", __func__)); |
| /* init umac registers */ |
| if (init_umac(pDevCtrl)) { |
| ret = -EFAULT; |
| goto error1; |
| } |
| |
| /* init dma registers */ |
| init_edma(pDevCtrl); |
| |
| #if (CONFIG_BRCM_GENET_VERSION > 1) && defined(CONFIG_NET_SCH_MULTIQ) |
| pDevCtrl->nrTxBds = GENET_TX_DEFAULT_BD_CNT; |
| #endif |
| |
| TRACE(("%s done!\n", __func__)); |
| /* Enable HFB filtering. */ |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_MOCA) |
| bcmgenet_enable_pcp_hfb(pDevCtrl); |
| else |
| bcmgenet_enable_multicast_hfb(pDevCtrl); |
| |
| /* if we reach this point, we've init'ed successfully */ |
| return 0; |
| error1: |
| kfree(prxCbs); |
| error2: |
| kfree(ptxCbs); |
| bcmgenet_clock_disable(pDevCtrl); |
| |
| TRACE(("%s Failed!\n", __func__)); |
| return ret; |
| } |
| |
| /* Uninitialize tx/rx buffer descriptor pools */ |
| static void bcmgenet_uninit_dev(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| int i; |
| |
| if (pDevCtrl) { |
| /* Disable HFB filtering. */ |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_MOCA) |
| bcmgenet_disable_pcp_hfb(pDevCtrl); |
| |
| /* disable DMA */ |
| pDevCtrl->rxDma->rdma_ctrl = 0; |
| pDevCtrl->txDma->tdma_ctrl = 0; |
| |
| for (i = 0; i < pDevCtrl->nrTxBds; i++) { |
| if (pDevCtrl->txCbs[i].skb != NULL) { |
| dev_kfree_skb(pDevCtrl->txCbs[i].skb); |
| pDevCtrl->txCbs[i].skb = NULL; |
| } |
| } |
| for (i = 0; i < pDevCtrl->nrRxBds; i++) { |
| if (pDevCtrl->rxCbs[i].skb != NULL) { |
| dev_kfree_skb(pDevCtrl->rxCbs[i].skb); |
| pDevCtrl->rxCbs[i].skb = NULL; |
| } |
| } |
| |
| /* free the transmit buffer descriptor */ |
| if (pDevCtrl->txBds) |
| pDevCtrl->txBds = NULL; |
| /* free the receive buffer descriptor */ |
| if (pDevCtrl->rxBds) |
| pDevCtrl->rxBds = NULL; |
| /* free the transmit control block pool */ |
| kfree(pDevCtrl->txCbs); |
| /* free the transmit control block pool */ |
| kfree(pDevCtrl->rxCbs); |
| |
| clk_put(pDevCtrl->clk); |
| } |
| } |
| |
| #define FILTER_POS(i) (((HFB_NUM_FLTRS-i-1)>>2)) |
| #define SET_HFB_FILTER_LEN(dev, i, len) \ |
| do { \ |
| u32 tmp = GENET_HFB_FLTR_LEN(dev, FILTER_POS(i)); \ |
| tmp &= ~(RBUF_FLTR_LEN_MASK << (RBUF_FLTR_LEN_SHIFT * (i & 0x03))); \ |
| tmp |= (len << (RBUF_FLTR_LEN_SHIFT * (i & 0x03))); \ |
| GENET_HFB_FLTR_LEN(dev, FILTER_POS(i)) = tmp; \ |
| } while (0) |
| |
| #define GET_HFB_FILTER_LEN(dev, i) \ |
| ((GENET_HFB_FLTR_LEN(dev, FILTER_POS(i)) >> \ |
| (RBUF_FLTR_LEN_SHIFT * (i & 0x03))) & RBUF_FLTR_LEN_MASK) |
| |
| #if CONFIG_BRCM_GENET_VERSION >= 3 |
| /* The order of GENET_x_HFB_FLT_ENBLE_0/1 is reversed !!! */ |
| #define GET_HFB_FILTER_EN(dev, i) \ |
| (dev->hfbReg->hfb_flt_enable[i < 32] & (1 << (i % 32))) |
| #define HFB_FILTER_ENABLE(dev, i) \ |
| (dev->hfbReg->hfb_flt_enable[i < 32] |= (1 << (i % 32))) |
| #define HFB_FILTER_DISABLE(dev, i) \ |
| (dev->hfbReg->hfb_flt_enable[i < 32] &= ~(1 << (i % 32))) |
| #define HFB_FILTER_DISABLE_ALL(dev) \ |
| do { \ |
| dev->hfbReg->hfb_flt_enable[0] = 0; \ |
| dev->hfbReg->hfb_flt_enable[1] = 0; \ |
| } while (0) |
| #else |
| #define GET_HFB_FILTER_EN(dev, i) \ |
| ((GENET_HFB_CTRL(dev) >> (i + RBUF_HFB_FILTER_EN_SHIFT)) & 0x01) |
| #define HFB_FILTER_ENABLE(dev, i) \ |
| (GENET_HFB_CTRL(dev) |= 1 << (i + RBUF_HFB_FILTER_EN_SHIFT)) |
| #define HFB_FILTER_DISABLE(dev, i) \ |
| (GENET_HFB_CTRL(dev) &= ~(1 << (i + RBUF_HFB_FILTER_EN_SHIFT))) |
| #define HFB_FILTER_DISABLE_ALL(dev) \ |
| (GENET_HFB_CTRL(dev) &= ~(0xffff << (RBUF_HFB_FILTER_EN_SHIFT))) |
| #endif |
| |
| /* |
| * Enable IPv4 Multicast filtering in HFB. |
| */ |
| static int bcmgenet_enable_multicast_hfb(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| struct net_device *dev = pDevCtrl->dev; |
| int filter; |
| |
| filter = bcmgenet_update_hfb(dev, |
| hfb_ipv4_multicast, ARRAY_SIZE(hfb_ipv4_multicast), 0); |
| if (filter < 0) { |
| printk(KERN_ERR "%s: Unable to update multicast HFB\n", |
| __func__); |
| return -1; |
| } |
| GENET_HFB_CTRL(pDevCtrl) |= RBUF_HFB_EN; |
| return 0; |
| } |
| |
| /* |
| * Enable PCP (Priority Code Point) filtering in HFB. |
| */ |
| void bcmgenet_enable_pcp_hfb(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| int filter; |
| int filter_size; /* filter size in unsigned long */ |
| int filter_len = HFB_8021Q_LEN; |
| |
| filter_size = (GENET_HFB_CTRL(pDevCtrl) & RBUF_HFB_256B) ? 128 : 64; |
| |
| /* Clear all filters. */ |
| memset((void*)pDevCtrl->hfb, 0, |
| HFB_NUM_FLTRS * filter_size * sizeof (unsigned long)); |
| |
| /* |
| * Enable PCP. Do not map PCP 0 so that traffic with |
| * PCP set to 0 will go to the default queue. |
| */ |
| for (filter = PCP_START; filter <= PCP_END; filter++) { |
| volatile unsigned long *addr = |
| &pDevCtrl->hfb[filter * filter_size]; |
| |
| MY_BUG_ON(PCP_START < 0 || PCP_START >= PCP_COUNT); |
| MY_BUG_ON(PCP_END < 0 || PCP_END >= PCP_COUNT); |
| /* |
| * Mask the first 12 bytes (destination mac address, source mac |
| * address. This involves setting the first 24 bytes (NOT 12!) |
| * of the filter to 0, which is already done by the bzero above. |
| * Simple move the address pointer. The format of each filter |
| * is of the following format: |
| * |
| * 31 23 15 7 0 |
| * ------------------------------- |
| * | unused | Mask | B0 | B1 | |
| * ------------------------------- |
| */ |
| addr += 6; |
| |
| /* Match the next 4 nibbles (TPID). */ |
| *addr++ = 0xf << 16 | TPID; |
| /* Match the next nibble (PCP and CFI). Mask 3 nibbles (VID). */ |
| *addr = 0x8 << 16 | filter << 13; |
| |
| /* Set the filter length. */ |
| SET_HFB_FILTER_LEN(pDevCtrl, filter, filter_len); |
| |
| /* Enable the filter. */ |
| HFB_FILTER_ENABLE(pDevCtrl, filter); |
| } |
| GENET_HFB_CTRL(pDevCtrl) |= RBUF_HFB_EN; |
| } |
| |
| void bcmgenet_disable_pcp_hfb(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| int filter_size = (GENET_HFB_CTRL(pDevCtrl) & RBUF_HFB_256B) ? 128 : 64; |
| int filter; |
| |
| GENET_HFB_CTRL(pDevCtrl) &= ~RBUF_HFB_EN; |
| |
| for (filter = PCP_START; filter <= PCP_END; filter++) { |
| HFB_FILTER_DISABLE(pDevCtrl, filter); |
| } |
| memset((void *)pDevCtrl->hfb, 0, |
| PCP_COUNT * filter_size * sizeof (unsigned long)); |
| } |
| |
| /* |
| * Program ACPI pattern into HFB. Return filter index if succesful. |
| * if user == 1, the data will be copied from user space. |
| */ |
| int bcmgenet_update_hfb(struct net_device *dev, unsigned int *data, |
| int len, int user) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| int filter, offset, count; |
| unsigned int *tmp; |
| |
| TRACE(("Updating HFB len=0x%d\n", len)); |
| |
| count = HFB_NUM_FLTRS; |
| offset = 64; |
| if (GENET_HFB_CTRL(pDevCtrl) & RBUF_HFB_256B) { |
| #if CONFIG_BRCM_GENET_VERSION < 3 |
| count >>= 1; |
| #endif |
| offset = 128; |
| } |
| |
| if (len > offset) |
| return -EINVAL; |
| |
| /* find next unused filter */ |
| for (filter = 0; filter < count; filter++) { |
| if (!GET_HFB_FILTER_EN(pDevCtrl, filter)) |
| break; |
| } |
| if (filter == count) { |
| printk(KERN_ERR "no unused filter available!\n"); |
| return -EINVAL; /* all filters have been enabled*/ |
| } |
| |
| if (user) { |
| tmp = kmalloc(len*sizeof(unsigned int), GFP_KERNEL); |
| if (tmp == NULL) { |
| printk(KERN_ERR "%s: Malloc faild\n", __func__); |
| return -EFAULT; |
| } |
| /* copy pattern data */ |
| if (copy_from_user(tmp, data, len*sizeof(unsigned int)) != 0) { |
| printk(KERN_ERR "Failed to copy user data: src=%p, dst=%p\n", |
| data, pDevCtrl->hfb + filter*offset); |
| return -EFAULT; |
| } |
| } else { |
| tmp = data; |
| } |
| /* Copy pattern data into HFB registers.*/ |
| for (count = 0; count < offset; count++) { |
| if (count < len) |
| pDevCtrl->hfb[filter * offset + count] = *(tmp + count); |
| else |
| pDevCtrl->hfb[filter * offset + count] = 0; |
| } |
| if (user) |
| kfree(tmp); |
| |
| /* set the filter length*/ |
| SET_HFB_FILTER_LEN(pDevCtrl, filter, len*2); |
| |
| /*enable this filter.*/ |
| HFB_FILTER_ENABLE(pDevCtrl, filter); |
| |
| return filter; |
| |
| } |
| EXPORT_SYMBOL(bcmgenet_update_hfb); |
| /* |
| * read ACPI pattern data for a particular filter. |
| */ |
| static int bcmgenet_read_hfb(struct net_device *dev, struct acpi_data * u_data) |
| { |
| int filter, offset, count, len; |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| |
| if (get_user(filter, &(u_data->fltr_index))) { |
| printk(KERN_ERR "Failed to get user data\n"); |
| return -EFAULT; |
| } |
| |
| count = HFB_NUM_FLTRS; |
| offset = 128; |
| if (GENET_HFB_CTRL(pDevCtrl) & RBUF_HFB_256B) { |
| #if CONFIG_BRCM_GENET_VERSION < 3 |
| count >>= 1; |
| #endif |
| offset = 256; |
| } |
| |
| if (filter > count) |
| return -EINVAL; |
| |
| /* see if this filter is enabled, if not, return length 0 */ |
| if (!GET_HFB_FILTER_EN(pDevCtrl, filter)) { |
| len = 0; |
| put_user(len , &u_data->count); |
| return 0; |
| } |
| /* check the filter length, in bytes */ |
| len = GET_HFB_FILTER_LEN(pDevCtrl, filter); |
| if (u_data->count < len) |
| return -EINVAL; |
| /* copy pattern data */ |
| if (copy_to_user((void *)(u_data->p_data), |
| (void *)(pDevCtrl->hfb + filter*offset), len)) { |
| printk(KERN_ERR "Failed to copy data to user space: src=%p, dst=%p\n", |
| pDevCtrl->hfb+filter*offset, u_data->p_data); |
| return -EFAULT; |
| } |
| return len; |
| } |
| /* |
| * clear the HFB, disable filter indexed by "filter" argument. |
| */ |
| static inline void bcmgenet_clear_hfb(struct BcmEnet_devctrl *pDevCtrl, |
| int filter) |
| { |
| if (filter == CLEAR_ALL_HFB) { |
| HFB_FILTER_DISABLE_ALL(pDevCtrl); |
| GENET_HFB_CTRL(pDevCtrl) &= ~RBUF_HFB_EN; |
| } else { |
| /* disable this filter */ |
| HFB_FILTER_DISABLE(pDevCtrl, filter); |
| /* clear filter length register */ |
| SET_HFB_FILTER_LEN(pDevCtrl, filter, 0); |
| } |
| |
| } |
| /* |
| * Utility function to get interface ip address in kernel space. |
| */ |
| static inline unsigned int bcmgenet_getip(struct net_device *dev) |
| { |
| struct net_device *pnet_device; |
| unsigned int ip = 0; |
| |
| read_lock(&dev_base_lock); |
| /* read all devices */ |
| for_each_netdev(&init_net, pnet_device) |
| { |
| if ((netif_running(pnet_device)) && |
| (pnet_device->ip_ptr != NULL) && |
| (!strcmp(pnet_device->name, dev->name))) { |
| struct in_device *pin_dev; |
| pin_dev = (struct in_device *)(pnet_device->ip_ptr); |
| if (pin_dev && pin_dev->ifa_list) |
| ip = htonl(pin_dev->ifa_list->ifa_address); |
| break; |
| } |
| } |
| read_unlock(&dev_base_lock); |
| return ip; |
| } |
| |
| static void bcmgenet_clock_enable(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| unsigned long flags; |
| |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| clk_enable(pDevCtrl->clk); |
| pDevCtrl->clock_active = 1; |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| } |
| |
| static void bcmgenet_clock_disable(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| unsigned long flags; |
| |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| pDevCtrl->clock_active = 0; |
| clk_disable(pDevCtrl->clk); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| } |
| |
| /* |
| * ethtool function - get WOL (Wake on LAN) settings, |
| * Only Magic Packet Detection is supported through ethtool, |
| * the ACPI (Pattern Matching) WOL option is supported in |
| * bcmumac_do_ioctl function. |
| */ |
| static void bcmgenet_get_wol(struct net_device *dev, |
| struct ethtool_wolinfo *wol) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| volatile struct uniMacRegs *umac = pDevCtrl->umac; |
| wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_ARP; |
| |
| if (!netif_running(dev)) |
| return; |
| |
| wol->wolopts = pDevCtrl->wolopts; |
| if (wol->wolopts & WAKE_MAGICSECURE) { |
| unsigned short pwd_ms; |
| unsigned long pwd_ls; |
| wol->wolopts |= WAKE_MAGICSECURE; |
| pwd_ls = umac->mpd_pw_ls; |
| copy_to_user(&wol->sopass[0], &pwd_ls, 4); |
| pwd_ms = umac->mpd_pw_ms & 0xFFFF; |
| copy_to_user(&wol->sopass[4], &pwd_ms, 2); |
| } else { |
| memset(&wol->sopass[0], 0, sizeof(wol->sopass)); |
| } |
| } |
| /* |
| * ethtool function - set WOL (Wake on LAN) settings. |
| * Only for magic packet detection mode. |
| */ |
| static int bcmgenet_set_wol(struct net_device *dev, |
| struct ethtool_wolinfo *wol) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| volatile struct uniMacRegs *umac = pDevCtrl->umac; |
| |
| if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_ARP)) |
| return -EINVAL; |
| |
| if (wol->wolopts & WAKE_MAGICSECURE) { |
| umac->mpd_pw_ls = *(unsigned long *)&wol->sopass[0]; |
| umac->mpd_pw_ms = *(unsigned short *)&wol->sopass[4]; |
| umac->mpd_ctrl |= MPD_PW_EN; |
| } |
| |
| device_set_wakeup_enable(&dev->dev, wol->wolopts); |
| pDevCtrl->wolopts = wol->wolopts; |
| return 0; |
| } |
| /* |
| * ethtool function - get generic settings. |
| */ |
| static int bcmgenet_get_settings(struct net_device *dev, |
| struct ethtool_cmd *cmd) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| int rc = 0; |
| |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_MOCA) { |
| /* see comments in bcmgenet_set_settings() */ |
| cmd->autoneg = netif_carrier_ok(pDevCtrl->dev); |
| cmd->speed = SPEED_1000; |
| cmd->duplex = DUPLEX_HALF; |
| cmd->port = PORT_BNC; |
| } else { |
| if (!netif_running(dev)) |
| return -EINVAL; |
| rc = mii_ethtool_gset(&pDevCtrl->mii, cmd); |
| } |
| |
| return rc; |
| } |
| /* |
| * ethtool function - set settings. |
| */ |
| static int bcmgenet_set_settings(struct net_device *dev, |
| struct ethtool_cmd *cmd) |
| { |
| int err = 0; |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_MOCA) { |
| /* mocad uses cmd->autoneg to control our RUNNING flag */ |
| if (cmd->autoneg) |
| netif_carrier_on(pDevCtrl->dev); |
| else |
| netif_carrier_off(pDevCtrl->dev); |
| } else { |
| if (!netif_running(dev)) |
| return -EINVAL; |
| |
| err = mii_ethtool_sset(&pDevCtrl->mii, cmd); |
| if (err < 0) |
| return err; |
| mii_setup(dev); |
| |
| if (cmd->maxrxpkt != 0) |
| DmaDescThres = cmd->maxrxpkt; |
| } |
| |
| return err; |
| } |
| /* |
| * ethtool function - get driver info. |
| */ |
| static void bcmgenet_get_drvinfo(struct net_device *dev, |
| struct ethtool_drvinfo *info) |
| { |
| strncpy(info->driver, CARDNAME, sizeof(info->driver)); |
| strncpy(info->version, VER_STR, sizeof(info->version)); |
| |
| } |
| static u32 bcmgenet_get_rx_csum(struct net_device *dev) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| if (pDevCtrl->rbuf->rbuf_chk_ctrl & RBUF_RXCHK_EN) |
| return 1; |
| |
| return 0; |
| } |
| static int bcmgenet_set_rx_csum(struct net_device *dev, u32 val) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| spin_lock_bh(&pDevCtrl->bh_lock); |
| if (val == 0) { |
| /*pDevCtrl->rbuf->rbuf_endian_ctrl &= ~RBUF_ENDIAN_NOSWAP;*/ |
| pDevCtrl->rbuf->rbuf_ctrl &= ~RBUF_64B_EN; |
| pDevCtrl->rbuf->rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; |
| } else { |
| /*pDevCtrl->rbuf->rbuf_endian_ctrl &= ~RBUF_ENDIAN_NOSWAP;*/ |
| pDevCtrl->rbuf->rbuf_ctrl |= RBUF_64B_EN; |
| pDevCtrl->rbuf->rbuf_chk_ctrl |= RBUF_RXCHK_EN ; |
| } |
| spin_unlock_bh(&pDevCtrl->bh_lock); |
| return 0; |
| } |
| static u32 bcmgenet_get_tx_csum(struct net_device *dev) |
| { |
| return dev->features & NETIF_F_IP_CSUM; |
| } |
| static int bcmgenet_set_tx_csum(struct net_device *dev, u32 val) |
| { |
| unsigned long flags; |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| if (val == 0) { |
| dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); |
| GENET_TBUF_CTRL(pDevCtrl) &= ~RBUF_64B_EN; |
| if (dev->needed_headroom > 64) |
| dev->needed_headroom -= 64; |
| } else { |
| dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM ; |
| GENET_TBUF_CTRL(pDevCtrl) |= RBUF_64B_EN; |
| if (dev->needed_headroom < 64) |
| dev->needed_headroom += 64; |
| } |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return 0; |
| } |
| static int bcmgenet_set_sg(struct net_device *dev, u32 val) |
| { |
| unsigned long flags; |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| if (val && !(dev->features & NETIF_F_IP_CSUM)) { |
| printk(KERN_WARNING "Tx Checksum offloading disabled, not setting SG\n"); |
| return -EINVAL; |
| } |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| if (val) |
| dev->features |= NETIF_F_SG; |
| else |
| dev->features &= ~NETIF_F_SG; |
| /* must have 64B tx status enabled */ |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| return 0; |
| } |
| static u32 bcmgenet_get_sg(struct net_device *dev) |
| { |
| return dev->features & NETIF_F_SG ; |
| } |
| /* |
| * standard ethtool support functions. |
| */ |
| static struct ethtool_ops bcmgenet_ethtool_ops = { |
| .get_settings = bcmgenet_get_settings, |
| .set_settings = bcmgenet_set_settings, |
| .get_drvinfo = bcmgenet_get_drvinfo, |
| .get_wol = bcmgenet_get_wol, |
| .set_wol = bcmgenet_set_wol, |
| .get_rx_csum = bcmgenet_get_rx_csum, |
| .set_rx_csum = bcmgenet_set_rx_csum, |
| .get_tx_csum = bcmgenet_get_tx_csum, |
| .set_tx_csum = bcmgenet_set_tx_csum, |
| .get_sg = bcmgenet_get_sg, |
| .set_sg = bcmgenet_set_sg, |
| .get_link = ethtool_op_get_link, |
| }; |
| |
| static int bcmgenet_enable_arp_filter(struct BcmEnet_devctrl *pDevCtrl) |
| { |
| struct net_device *dev = pDevCtrl->dev; |
| unsigned int ip; |
| |
| ip = bcmgenet_getip(dev); |
| if (ip) { |
| /* clear the lower halfwords */ |
| hfb_arp[HFB_ARP_LEN-2] &= ~0xffff; |
| hfb_arp[HFB_ARP_LEN-1] &= ~0xffff; |
| hfb_arp[HFB_ARP_LEN-2] |= (ip >> 16); |
| hfb_arp[HFB_ARP_LEN-1] |= (ip & 0xFFFF); |
| /* Enable HFB, to response to ARP request.*/ |
| if (bcmgenet_update_hfb(dev, hfb_arp, HFB_ARP_LEN, 0) < 0) { |
| printk(KERN_ERR "%s: Unable to update HFB\n", |
| __func__); |
| return -1; |
| } |
| GENET_HFB_CTRL(pDevCtrl) |= RBUF_HFB_EN; |
| return 0; |
| } |
| |
| return -1; |
| } |
| /* |
| * Power down the unimac, based on mode. |
| */ |
| static void bcmgenet_power_down(struct BcmEnet_devctrl *pDevCtrl, int mode) |
| { |
| struct net_device *dev; |
| int retries = 0; |
| |
| dev = pDevCtrl->dev; |
| switch (mode) { |
| case GENET_POWER_CABLE_SENSE: |
| #if 0 |
| /* |
| * EPHY bug, setting ext_pwr_down_dll and ext_pwr_down_phy cause |
| * link IRQ bouncing. |
| */ |
| pDevCtrl->ext->ext_pwr_mgmt |= (EXT_PWR_DOWN_PHY | |
| EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); |
| #else |
| /* Workaround for putting EPHY in iddq mode. */ |
| pDevCtrl->mii.mdio_write(dev, pDevCtrl->phyAddr, 0x1f, 0x008b); |
| pDevCtrl->mii.mdio_write(dev, pDevCtrl->phyAddr, 0x10, 0x01c0); |
| pDevCtrl->mii.mdio_write(dev, pDevCtrl->phyAddr, 0x14, 0x7000); |
| pDevCtrl->mii.mdio_write(dev, pDevCtrl->phyAddr, 0x1f, 0x000f); |
| pDevCtrl->mii.mdio_write(dev, pDevCtrl->phyAddr, 0x10, 0x20d0); |
| pDevCtrl->mii.mdio_write(dev, pDevCtrl->phyAddr, 0x1f, 0x000b); |
| |
| #endif |
| break; |
| case GENET_POWER_WOL_MAGIC: |
| /* disable RX while turning on MPD_EN */ |
| pDevCtrl->umac->cmd &= ~CMD_RX_EN; |
| mdelay(10); |
| pDevCtrl->umac->mpd_ctrl |= MPD_EN; |
| while (!(pDevCtrl->rbuf->rbuf_status & RBUF_STATUS_WOL)) { |
| retries++; |
| if (retries > 5) { |
| printk(KERN_CRIT "%s: polling " |
| "wol mode timeout\n", dev->name); |
| pDevCtrl->umac->mpd_ctrl &= ~MPD_EN; |
| return; |
| } |
| mdelay(1); |
| } |
| printk(KERN_DEBUG "%s: MP WOL-ready status set after " |
| "%d msec\n", dev->name, retries); |
| |
| /* Enable CRC forward */ |
| pDevCtrl->umac->cmd |= CMD_CRC_FWD; |
| /* Receiver must be enabled for WOL MP detection */ |
| pDevCtrl->umac->cmd |= CMD_RX_EN; |
| |
| if (pDevCtrl->ext && pDevCtrl->dev_asleep) |
| pDevCtrl->ext->ext_pwr_mgmt &= ~EXT_ENERGY_DET_MASK; |
| |
| pDevCtrl->intrl2_0->cpu_mask_clear |= UMAC_IRQ_MPD_R; |
| |
| set_bit(GENET_POWER_WOL_MAGIC, &pDevCtrl->wol_enabled); |
| /* fall-through */ |
| case GENET_POWER_WOL_ACPI: |
| if (bcmgenet_enable_arp_filter(pDevCtrl)) { |
| printk(KERN_CRIT "%s failed to set HFB filter\n", |
| dev->name); |
| return; |
| } |
| pDevCtrl->umac->cmd &= ~CMD_RX_EN; |
| mdelay(10); |
| GENET_HFB_CTRL(pDevCtrl) |= RBUF_ACPI_EN; |
| while (!(pDevCtrl->rbuf->rbuf_status & RBUF_STATUS_WOL)) { |
| retries++; |
| if (retries > 5) { |
| printk(KERN_CRIT "%s polling " |
| "wol mode timeout\n", dev->name); |
| GENET_HFB_CTRL(pDevCtrl) &= ~RBUF_ACPI_EN; |
| return; |
| } |
| mdelay(1); |
| } |
| /* Receiver must be enabled for WOL ACPI detection */ |
| pDevCtrl->umac->cmd |= CMD_RX_EN; |
| printk(KERN_DEBUG "%s: ACPI WOL-ready status set " |
| "after %d msec\n", dev->name, retries); |
| /* Service RX BD until empty */ |
| pDevCtrl->intrl2_0->cpu_mask_clear |= (UMAC_IRQ_HFB_MM | |
| UMAC_IRQ_HFB_SM); |
| set_bit(GENET_POWER_WOL_ACPI, &pDevCtrl->wol_enabled); |
| break; |
| case GENET_POWER_PASSIVE: |
| /* Power down LED */ |
| pDevCtrl->mii.mdio_write(pDevCtrl->dev, |
| pDevCtrl->phyAddr, MII_BMCR, BMCR_RESET); |
| if (pDevCtrl->ext) |
| pDevCtrl->ext->ext_pwr_mgmt |= (EXT_PWR_DOWN_PHY | |
| EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); |
| break; |
| default: |
| break; |
| } |
| |
| } |
| static void bcmgenet_power_up(struct BcmEnet_devctrl *pDevCtrl, int mode) |
| { |
| switch (mode) { |
| case GENET_POWER_CABLE_SENSE: |
| #if 0 |
| pDevCtrl->ext->ext_pwr_mgmt &= ~EXT_PWR_DOWN_DLL; |
| pDevCtrl->ext->ext_pwr_mgmt &= ~EXT_PWR_DOWN_PHY; |
| pDevCtrl->ext->ext_pwr_mgmt &= ~EXT_PWR_DOWN_BIAS; |
| #endif |
| /* enable APD */ |
| if (pDevCtrl->ext) { |
| pDevCtrl->ext->ext_pwr_mgmt |= EXT_PWR_DN_EN_LD; |
| pDevCtrl->ext->ext_pwr_mgmt |= EXT_PHY_RESET; |
| udelay(1); |
| pDevCtrl->ext->ext_pwr_mgmt &= ~EXT_PHY_RESET; |
| udelay(100); |
| } |
| /* enable 64 clock MDIO */ |
| pDevCtrl->mii.mdio_write(pDevCtrl->dev, pDevCtrl->phyAddr, 0x1d, |
| 0x1000); |
| pDevCtrl->mii.mdio_read(pDevCtrl->dev, pDevCtrl->phyAddr, 0x1d); |
| break; |
| case GENET_POWER_WOL_MAGIC: |
| pDevCtrl->umac->mpd_ctrl &= ~MPD_EN; |
| /* Disable CRC Forward */ |
| pDevCtrl->umac->cmd &= ~CMD_CRC_FWD; |
| /* Stop monitoring magic packet IRQ */ |
| pDevCtrl->intrl2_0->cpu_mask_set |= UMAC_IRQ_MPD_R; |
| clear_bit(GENET_POWER_WOL_MAGIC, &pDevCtrl->wol_enabled); |
| /* fall through */ |
| case GENET_POWER_WOL_ACPI: |
| GENET_HFB_CTRL(pDevCtrl) &= ~RBUF_ACPI_EN; |
| bcmgenet_clear_hfb(pDevCtrl, CLEAR_ALL_HFB); |
| /* Stop monitoring ACPI interrupts */ |
| pDevCtrl->intrl2_0->cpu_mask_set |= (UMAC_IRQ_HFB_SM | |
| UMAC_IRQ_HFB_MM); |
| clear_bit(GENET_POWER_WOL_ACPI, &pDevCtrl->wol_enabled); |
| break; |
| case GENET_POWER_PASSIVE: |
| if (pDevCtrl->ext) { |
| pDevCtrl->ext->ext_pwr_mgmt &= ~EXT_PWR_DOWN_DLL; |
| pDevCtrl->ext->ext_pwr_mgmt &= ~EXT_PWR_DOWN_PHY; |
| pDevCtrl->ext->ext_pwr_mgmt &= ~EXT_PWR_DOWN_BIAS; |
| /* enable APD */ |
| pDevCtrl->ext->ext_pwr_mgmt |= EXT_PWR_DN_EN_LD; |
| pDevCtrl->ext->ext_pwr_mgmt |= EXT_PHY_RESET; |
| udelay(1); |
| pDevCtrl->ext->ext_pwr_mgmt &= ~EXT_PHY_RESET; |
| udelay(100); |
| } |
| /* enable 64 clock MDIO */ |
| pDevCtrl->mii.mdio_write(pDevCtrl->dev, pDevCtrl->phyAddr, 0x1d, |
| 0x1000); |
| pDevCtrl->mii.mdio_read(pDevCtrl->dev, pDevCtrl->phyAddr, 0x1d); |
| default: |
| break; |
| } |
| #ifdef CONFIG_BCM7429A0 |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_INT) |
| bcm7429_ephy_workaround(pDevCtrl); |
| #endif |
| } |
| |
| /* |
| * ioctl handle special commands that are not present in ethtool. |
| */ |
| static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = netdev_priv(dev); |
| unsigned long flags; |
| struct acpi_data *u_data; |
| int val = 0; |
| |
| if (!netif_running(dev)) |
| return -EINVAL; |
| /* we can add sub-command in ifr_data if we need to in the future */ |
| switch (cmd) { |
| case SIOCSACPISET: |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| bcmgenet_power_down(pDevCtrl, GENET_POWER_WOL_ACPI); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| break; |
| case SIOCSACPICANCEL: |
| spin_lock_irqsave(&pDevCtrl->lock, flags); |
| bcmgenet_power_up(pDevCtrl, GENET_POWER_WOL_ACPI); |
| spin_unlock_irqrestore(&pDevCtrl->lock, flags); |
| break; |
| case SIOCSPATTERN: |
| u_data = (struct acpi_data *)rq->ifr_data; |
| val = bcmgenet_update_hfb(dev, (unsigned int *)u_data->p_data, |
| u_data->count, 1); |
| if (val >= 0) |
| put_user(val, &u_data->fltr_index); |
| break; |
| case SIOCGPATTERN: |
| u_data = (struct acpi_data *)rq->ifr_data; |
| val = bcmgenet_read_hfb(dev, u_data); |
| break; |
| case SIOCGMIIPHY: |
| case SIOCGMIIREG: |
| case SIOCSMIIREG: |
| val = generic_mii_ioctl(&pDevCtrl->mii, if_mii(rq), cmd, NULL); |
| break; |
| default: |
| val = -EINVAL; |
| break; |
| } |
| |
| return val; |
| } |
| static const struct net_device_ops bcmgenet_netdev_ops = { |
| .ndo_open = bcmgenet_open, |
| .ndo_stop = bcmgenet_close, |
| .ndo_start_xmit = bcmgenet_xmit, |
| #ifdef CONFIG_NET_SCH_MULTIQ |
| .ndo_select_queue = bcmgenet_select_queue, |
| #endif |
| .ndo_tx_timeout = bcmgenet_timeout, |
| .ndo_set_multicast_list = bcmgenet_set_multicast_list, |
| .ndo_set_mac_address = bcmgenet_set_mac_addr, |
| .ndo_do_ioctl = bcmgenet_ioctl, |
| }; |
| |
| /* |
| * This function creates debugfs directory and files for run-time debugging, |
| * including: bcmgenet multi-queue rx/tx budget size, multi-queue count, etc. |
| * Read-only for now. |
| */ |
| static int bcmgenet_debugfs_create(struct BcmEnet_devctrl *dev) |
| { |
| /* define debugfs file name and dentry */ |
| struct dentry *interface_dir; |
| int err = -EIO, i; |
| char path[16]; |
| |
| /* init debugfs file name and variable*/ |
| bcmgenet_debugfs dbfs[] = { |
| #ifdef CONFIG_NET_SCH_MULTIQ |
| BCMGENET_DEBUGFS(bcmgenet_rx_mq_bd_cnt), |
| BCMGENET_DEBUGFS(bcmgenet_tx_mq_bd_cnt), |
| BCMGENET_DEBUGFS(bcmgenet_tx_mq_cnt), |
| #endif |
| BCMGENET_DEBUGFS(bcmgenet_tx_default_bd_cnt), |
| }; |
| |
| if (!bcmgenet_debugfs_root) { |
| /* create debugfs root directory */ |
| bcmgenet_debugfs_root = debugfs_create_dir("bcmgenet", NULL); |
| if (IS_ERR_OR_NULL(bcmgenet_debugfs_root)) { |
| err = bcmgenet_debugfs_root ? |
| PTR_ERR(bcmgenet_debugfs_root) : -ENODEV; |
| printk(KERN_ERR "%s: can't create debugfs directory\n", |
| __func__); |
| return err; |
| } |
| /* create debugfs files */ |
| for(i = 0; i < ARRAY_SIZE(dbfs); i++) { |
| err = bcmgenet_debugfs_create_u32(dbfs[i], S_IRUGO, |
| bcmgenet_debugfs_root); |
| if(err) |
| return err; |
| } |
| } |
| |
| snprintf(path, sizeof(path), "eth%d", dev->devnum); |
| interface_dir = debugfs_create_dir(path, bcmgenet_debugfs_root); |
| |
| if (IS_ERR_OR_NULL(interface_dir)) { |
| err = interface_dir ? PTR_ERR(interface_dir) : -ENODEV; |
| printk(KERN_ERR "%s: can't create debugfs " |
| "interface directory\n", __func__); |
| return err; |
| } |
| |
| for (i = 0; i < GENET_RX_RING_COUNT; i++) { |
| err = bcmgenet_debugfs_create_indexed_u32(&(dev->rxRingDiscCnt[i]), |
| i, S_IRUGO, |
| interface_dir); |
| if(err) |
| return err; |
| } |
| |
| return 0; |
| } |
| |
| /* This function creates debugfs file for u32 kernel variable. */ |
| static int bcmgenet_debugfs_create_u32(bcmgenet_debugfs bcmgenet_dbfs, |
| int op, struct dentry *rtdir) |
| { |
| struct dentry *dent; |
| int err = -EIO; |
| |
| dent = debugfs_create_u32(bcmgenet_dbfs.dbfs_name, op, rtdir, |
| bcmgenet_dbfs.dbfs_p); |
| if (IS_ERR_OR_NULL(dent)){ |
| err = dent ? PTR_ERR(dent) : -ENODEV; |
| printk(KERN_ERR "%s: can't create debugfs file\n", __func__); |
| return err; |
| } |
| |
| return 0; |
| } |
| |
| static int bcmgenet_debugfs_create_indexed_u32(unsigned int *var, int index, |
| int op, struct dentry *rtdir) |
| { |
| struct dentry *dent; |
| int err = -EIO; |
| char name[32]; |
| |
| snprintf(name, sizeof(name), "bcmgenet_discard_cnt_q%d", index); |
| |
| dent = debugfs_create_u32(name, op, rtdir, var); |
| if (IS_ERR_OR_NULL(dent)){ |
| err = dent ? PTR_ERR(dent) : -ENODEV; |
| printk(KERN_ERR "%s: can't create debugfs file\n", __func__); |
| return err; |
| } |
| |
| return 0; |
| } |
| |
| static int bcmgenet_drv_probe(struct platform_device *pdev) |
| { |
| struct resource *mres, *ires; |
| void __iomem *base; |
| unsigned long res_size; |
| int err = -EIO; |
| |
| /* |
| * bcmemac and bcmgenet use same platform data structure. |
| */ |
| struct bcmemac_platform_data *cfg = pdev->dev.platform_data; |
| struct BcmEnet_devctrl *pDevCtrl; |
| struct net_device *dev; |
| |
| mres = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
| |
| if (!mres || !ires) { |
| printk(KERN_ERR "%s: can't get resources\n", __func__); |
| return -EIO; |
| } |
| res_size = resource_size(mres); |
| if (!request_mem_region(mres->start, res_size, CARDNAME)) { |
| printk(KERN_ERR "%s: can't request mem region: start: 0x%x size: %lu\n", |
| CARDNAME, mres->start, res_size); |
| return -ENODEV; |
| } |
| base = ioremap(mres->start, res_size); |
| TRACE(("%s: base=0x%x\n", __func__, (unsigned int)base)); |
| |
| if (!base) { |
| printk(KERN_ERR "%s: can't ioremap\n", __func__); |
| return -EIO; |
| } |
| |
| #ifdef CONFIG_NET_SCH_MULTIQ |
| dev = alloc_etherdev_mq(sizeof(*(pDevCtrl)), GENET_TX_MQ_CNT + 1); |
| #else |
| dev = alloc_etherdev(sizeof(*pDevCtrl)); |
| #endif |
| if (dev == NULL) { |
| printk(KERN_ERR "bcmgenet: can't allocate net device\n"); |
| err = -ENOMEM; |
| goto err0; |
| } |
| |
| dev->base_addr = (unsigned long)base; |
| pDevCtrl = (struct BcmEnet_devctrl *)netdev_priv(dev); |
| pDevCtrl->phyType = cfg->phy_type; |
| pDevCtrl->devnum = pdev->id; |
| |
| /* create debugfs for bcmgenet multi-queue*/ |
| if (bcmgenet_debugfs_create(pDevCtrl)) |
| printk(KERN_ERR "%s: debugfs creation fail.\n", __func__); |
| |
| SET_NETDEV_DEV(dev, &pdev->dev); |
| dev_set_drvdata(&pdev->dev, pDevCtrl); |
| memcpy(dev->dev_addr, cfg->macaddr, 6); |
| dev->irq = pDevCtrl->irq0; |
| dev->watchdog_timeo = 2 * HZ; |
| SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops); |
| dev->netdev_ops = &bcmgenet_netdev_ops; |
| netif_napi_add(dev, &pDevCtrl->napi, bcmgenet_poll, 64); |
| netif_napi_add(dev, &pDevCtrl->ring_napi, bcmgenet_ring_poll, 64); |
| |
| netdev_boot_setup_check(dev); |
| |
| pDevCtrl->dev = dev; |
| pDevCtrl->irq0 = platform_get_irq(pdev, 0); |
| pDevCtrl->irq1 = platform_get_irq(pdev, 1); |
| |
| /* NOTE: with fast-bridge , must turn this off! */ |
| pDevCtrl->bIPHdrOptimize = 1; |
| |
| spin_lock_init(&pDevCtrl->lock); |
| spin_lock_init(&pDevCtrl->bh_lock); |
| mutex_init(&pDevCtrl->mdio_mutex); |
| /* Mii wait queue */ |
| init_waitqueue_head(&pDevCtrl->wq); |
| |
| pDevCtrl->pdev = pdev; |
| |
| /* Init GENET registers, Tx/Rx buffers */ |
| if (bcmgenet_init_dev(pDevCtrl) < 0) |
| goto err1; |
| |
| if (cfg->phy_id == BRCM_PHY_ID_AUTO) { |
| if (mii_probe(dev, cfg) < 0) { |
| printk(KERN_ERR "No PHY detected, not registering interface:%d\n", |
| pdev->id); |
| bcmgenet_clock_disable(pDevCtrl); |
| goto err1; |
| } else { |
| printk(KERN_CRIT "Found PHY at Address %d\n", |
| pDevCtrl->phyAddr); |
| } |
| |
| } else { |
| pDevCtrl->phyAddr = cfg->phy_id; |
| } |
| mii_init(dev); |
| |
| INIT_WORK(&pDevCtrl->bcmgenet_irq_work, bcmgenet_irq_task); |
| netif_carrier_off(pDevCtrl->dev); |
| |
| if (pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_MII || |
| pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_RGMII || |
| pDevCtrl->phyType == BRCM_PHY_TYPE_EXT_RGMII_IBS) { |
| /* No Link status IRQ */ |
| INIT_WORK(&pDevCtrl->bcmgenet_link_work, |
| bcmgenet_gphy_link_status); |
| init_timer(&pDevCtrl->timer); |
| pDevCtrl->timer.data = (unsigned long)pDevCtrl; |
| pDevCtrl->timer.function = bcmgenet_gphy_link_timer; |
| } else { |
| /* check link status */ |
| mii_setup(dev); |
| } |
| dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; |
| err = register_netdev(dev); |
| if (err != 0) |
| goto err2; |
| /* Turn off these features by default */ |
| bcmgenet_set_tx_csum(dev, 0); |
| bcmgenet_set_sg(dev, 0); |
| |
| pDevCtrl->next_dev = eth_root_dev; |
| eth_root_dev = dev; |
| bcmgenet_clock_disable(pDevCtrl); |
| |
| return 0; |
| |
| err2: |
| bcmgenet_clock_disable(pDevCtrl); |
| bcmgenet_uninit_dev(pDevCtrl); |
| err1: |
| iounmap(base); |
| free_netdev(dev); |
| err0: |
| release_mem_region(mres->start, res_size); |
| return err; |
| } |
| |
| static int bcmgenet_drv_remove(struct platform_device *pdev) |
| { |
| struct BcmEnet_devctrl *pDevCtrl = dev_get_drvdata(&pdev->dev); |
| |
| unregister_netdev(pDevCtrl->dev); |
| free_irq(pDevCtrl->irq0, pDevCtrl); |
| free_irq(pDevCtrl->irq1, pDevCtrl); |
| bcmgenet_uninit_dev(pDevCtrl); |
| iounmap((void __iomem *)pDevCtrl->base_addr); |
| free_netdev(pDevCtrl->dev); |
| return 0; |
| } |
| |
| static int bcmgenet_drv_suspend(struct device *dev) |
| { |
| int val = 0; |
| struct BcmEnet_devctrl *pDevCtrl = dev_get_drvdata(dev); |
| |
| cancel_work_sync(&pDevCtrl->bcmgenet_irq_work); |
| |
| /* |
| * Save/restore the interface status across PM modes. |
| * FIXME: Don't use open/close for suspend/resume. |
| */ |
| pDevCtrl->dev_opened = netif_running(pDevCtrl->dev); |
| if (pDevCtrl->dev_opened && !pDevCtrl->dev_asleep) { |
| pDevCtrl->dev_asleep = 1; |
| val = bcmgenet_close(pDevCtrl->dev); |
| } |
| |
| return val; |
| } |
| |
| static int bcmgenet_drv_resume(struct device *dev) |
| { |
| int val = 0; |
| struct BcmEnet_devctrl *pDevCtrl = dev_get_drvdata(dev); |
| |
| if (pDevCtrl->dev_opened) |
| val = bcmgenet_open(pDevCtrl->dev); |
| pDevCtrl->dev_asleep = 0; |
| |
| return val; |
| } |
| |
| static struct dev_pm_ops bcmgenet_pm_ops = { |
| .suspend = bcmgenet_drv_suspend, |
| .resume = bcmgenet_drv_resume, |
| }; |
| |
| |
| static struct platform_driver bcmgenet_plat_drv = { |
| .probe = bcmgenet_drv_probe, |
| .remove = bcmgenet_drv_remove, |
| .driver = { |
| .name = "bcmgenet", |
| .owner = THIS_MODULE, |
| .pm = &bcmgenet_pm_ops, |
| }, |
| }; |
| |
| static int bcmgenet_module_init(void) |
| { |
| platform_driver_register(&bcmgenet_plat_drv); |
| return 0; |
| } |
| |
| static void bcmgenet_module_cleanup(void) |
| { |
| platform_driver_unregister(&bcmgenet_plat_drv); |
| } |
| |
| module_init(bcmgenet_module_init); |
| module_exit(bcmgenet_module_cleanup); |
| MODULE_LICENSE("GPL"); |