| /******************************************************************************* |
| |
| Intel 10 Gigabit PCI Express Linux driver |
| Copyright(c) 1999 - 2010 Intel Corporation. |
| |
| This program is free software; you can redistribute it and/or modify it |
| under the terms and conditions of the GNU General Public License, |
| version 2, as published by the Free Software Foundation. |
| |
| This program is distributed in the hope it will be useful, but WITHOUT |
| ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| more details. |
| |
| You should have received a copy of the GNU General Public License along with |
| this program; if not, write to the Free Software Foundation, Inc., |
| 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
| |
| The full GNU General Public License is included in this distribution in |
| the file called "COPYING". |
| |
| Contact Information: |
| e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
| Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| |
| *******************************************************************************/ |
| |
| #include <linux/types.h> |
| #include <linux/module.h> |
| #include <linux/pci.h> |
| #include <linux/netdevice.h> |
| #include <linux/vmalloc.h> |
| #include <linux/string.h> |
| #include <linux/in.h> |
| #include <linux/ip.h> |
| #include <linux/tcp.h> |
| #include <linux/pkt_sched.h> |
| #include <linux/ipv6.h> |
| #include <linux/slab.h> |
| #include <net/checksum.h> |
| #include <net/ip6_checksum.h> |
| #include <linux/ethtool.h> |
| #include <linux/if_vlan.h> |
| #include <scsi/fc/fc_fcoe.h> |
| |
| #include "ixgbe.h" |
| #include "ixgbe_common.h" |
| #include "ixgbe_dcb_82599.h" |
| #include "ixgbe_sriov.h" |
| |
| char ixgbe_driver_name[] = "ixgbe"; |
| static const char ixgbe_driver_string[] = |
| "Intel(R) 10 Gigabit PCI Express Network Driver"; |
| |
| #define DRV_VERSION "2.0.62-k2" |
| const char ixgbe_driver_version[] = DRV_VERSION; |
| static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; |
| |
| static const struct ixgbe_info *ixgbe_info_tbl[] = { |
| [board_82598] = &ixgbe_82598_info, |
| [board_82599] = &ixgbe_82599_info, |
| }; |
| |
| /* ixgbe_pci_tbl - PCI Device ID Table |
| * |
| * Wildcard entries (PCI_ANY_ID) should come last |
| * Last entry must be all 0s |
| * |
| * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, |
| * Class, Class Mask, private data (not used) } |
| */ |
| static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), |
| board_82598 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), |
| board_82598 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), |
| board_82598 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), |
| board_82598 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), |
| board_82598 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), |
| board_82598 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), |
| board_82598 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), |
| board_82598 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), |
| board_82598 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), |
| board_82598 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), |
| board_82598 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), |
| board_82598 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), |
| board_82599 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), |
| board_82599 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), |
| board_82599 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), |
| board_82599 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), |
| board_82599 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), |
| board_82599 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), |
| board_82599 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), |
| board_82599 }, |
| {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), |
| board_82599 }, |
| |
| /* required last entry */ |
| {0, } |
| }; |
| MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); |
| |
| #ifdef CONFIG_IXGBE_DCA |
| static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, |
| void *p); |
| static struct notifier_block dca_notifier = { |
| .notifier_call = ixgbe_notify_dca, |
| .next = NULL, |
| .priority = 0 |
| }; |
| #endif |
| |
| #ifdef CONFIG_PCI_IOV |
| static unsigned int max_vfs; |
| module_param(max_vfs, uint, 0); |
| MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " |
| "per physical function"); |
| #endif /* CONFIG_PCI_IOV */ |
| |
| MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
| MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); |
| MODULE_LICENSE("GPL"); |
| MODULE_VERSION(DRV_VERSION); |
| |
| #define DEFAULT_DEBUG_LEVEL_SHIFT 3 |
| |
| static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) |
| { |
| struct ixgbe_hw *hw = &adapter->hw; |
| u32 gcr; |
| u32 gpie; |
| u32 vmdctl; |
| |
| #ifdef CONFIG_PCI_IOV |
| /* disable iov and allow time for transactions to clear */ |
| pci_disable_sriov(adapter->pdev); |
| #endif |
| |
| /* turn off device IOV mode */ |
| gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); |
| gcr &= ~(IXGBE_GCR_EXT_SRIOV); |
| IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); |
| gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); |
| gpie &= ~IXGBE_GPIE_VTMODE_MASK; |
| IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); |
| |
| /* set default pool back to 0 */ |
| vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); |
| vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; |
| IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); |
| |
| /* take a breather then clean up driver data */ |
| msleep(100); |
| if (adapter->vfinfo) |
| kfree(adapter->vfinfo); |
| adapter->vfinfo = NULL; |
| |
| adapter->num_vfs = 0; |
| adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; |
| } |
| |
| struct ixgbe_reg_info { |
| u32 ofs; |
| char *name; |
| }; |
| |
| static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { |
| |
| /* General Registers */ |
| {IXGBE_CTRL, "CTRL"}, |
| {IXGBE_STATUS, "STATUS"}, |
| {IXGBE_CTRL_EXT, "CTRL_EXT"}, |
| |
| /* Interrupt Registers */ |
| {IXGBE_EICR, "EICR"}, |
| |
| /* RX Registers */ |
| {IXGBE_SRRCTL(0), "SRRCTL"}, |
| {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, |
| {IXGBE_RDLEN(0), "RDLEN"}, |
| {IXGBE_RDH(0), "RDH"}, |
| {IXGBE_RDT(0), "RDT"}, |
| {IXGBE_RXDCTL(0), "RXDCTL"}, |
| {IXGBE_RDBAL(0), "RDBAL"}, |
| {IXGBE_RDBAH(0), "RDBAH"}, |
| |
| /* TX Registers */ |
| {IXGBE_TDBAL(0), "TDBAL"}, |
| {IXGBE_TDBAH(0), "TDBAH"}, |
| {IXGBE_TDLEN(0), "TDLEN"}, |
| {IXGBE_TDH(0), "TDH"}, |
| {IXGBE_TDT(0), "TDT"}, |
| {IXGBE_TXDCTL(0), "TXDCTL"}, |
| |
| /* List Terminator */ |
| {} |
| }; |
| |
| |
| /* |
| * ixgbe_regdump - register printout routine |
| */ |
| static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) |
| { |
| int i = 0, j = 0; |
| char rname[16]; |
| u32 regs[64]; |
| |
| switch (reginfo->ofs) { |
| case IXGBE_SRRCTL(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); |
| break; |
| case IXGBE_DCA_RXCTRL(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); |
| break; |
| case IXGBE_RDLEN(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); |
| break; |
| case IXGBE_RDH(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); |
| break; |
| case IXGBE_RDT(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); |
| break; |
| case IXGBE_RXDCTL(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); |
| break; |
| case IXGBE_RDBAL(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); |
| break; |
| case IXGBE_RDBAH(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); |
| break; |
| case IXGBE_TDBAL(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); |
| break; |
| case IXGBE_TDBAH(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); |
| break; |
| case IXGBE_TDLEN(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); |
| break; |
| case IXGBE_TDH(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); |
| break; |
| case IXGBE_TDT(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); |
| break; |
| case IXGBE_TXDCTL(0): |
| for (i = 0; i < 64; i++) |
| regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); |
| break; |
| default: |
| printk(KERN_INFO "%-15s %08x\n", reginfo->name, |
| IXGBE_READ_REG(hw, reginfo->ofs)); |
| return; |
| } |
| |
| for (i = 0; i < 8; i++) { |
| snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); |
| printk(KERN_ERR "%-15s ", rname); |
| for (j = 0; j < 8; j++) |
| printk(KERN_CONT "%08x ", regs[i*8+j]); |
| printk(KERN_CONT "\n"); |
| } |
| |
| } |
| |
| /* |
| * ixgbe_dump - Print registers, tx-rings and rx-rings |
| */ |
| static void ixgbe_dump(struct ixgbe_adapter *adapter) |
| { |
| struct net_device *netdev = adapter->netdev; |
| struct ixgbe_hw *hw = &adapter->hw; |
| struct ixgbe_reg_info *reginfo; |
| int n = 0; |
| struct ixgbe_ring *tx_ring; |
| struct ixgbe_tx_buffer *tx_buffer_info; |
| union ixgbe_adv_tx_desc *tx_desc; |
| struct my_u0 { u64 a; u64 b; } *u0; |
| struct ixgbe_ring *rx_ring; |
| union ixgbe_adv_rx_desc *rx_desc; |
| struct ixgbe_rx_buffer *rx_buffer_info; |
| u32 staterr; |
| int i = 0; |
| |
| if (!netif_msg_hw(adapter)) |
| return; |
| |
| /* Print netdevice Info */ |
| if (netdev) { |
| dev_info(&adapter->pdev->dev, "Net device Info\n"); |
| printk(KERN_INFO "Device Name state " |
| "trans_start last_rx\n"); |
| printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", |
| netdev->name, |
| netdev->state, |
| netdev->trans_start, |
| netdev->last_rx); |
| } |
| |
| /* Print Registers */ |
| dev_info(&adapter->pdev->dev, "Register Dump\n"); |
| printk(KERN_INFO " Register Name Value\n"); |
| for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; |
| reginfo->name; reginfo++) { |
| ixgbe_regdump(hw, reginfo); |
| } |
| |
| /* Print TX Ring Summary */ |
| if (!netdev || !netif_running(netdev)) |
| goto exit; |
| |
| dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); |
| printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] " |
| "leng ntw timestamp\n"); |
| for (n = 0; n < adapter->num_tx_queues; n++) { |
| tx_ring = adapter->tx_ring[n]; |
| tx_buffer_info = |
| &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; |
| printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", |
| n, tx_ring->next_to_use, tx_ring->next_to_clean, |
| (u64)tx_buffer_info->dma, |
| tx_buffer_info->length, |
| tx_buffer_info->next_to_watch, |
| (u64)tx_buffer_info->time_stamp); |
| } |
| |
| /* Print TX Rings */ |
| if (!netif_msg_tx_done(adapter)) |
| goto rx_ring_summary; |
| |
| dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); |
| |
| /* Transmit Descriptor Formats |
| * |
| * Advanced Transmit Descriptor |
| * +--------------------------------------------------------------+ |
| * 0 | Buffer Address [63:0] | |
| * +--------------------------------------------------------------+ |
| * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | |
| * +--------------------------------------------------------------+ |
| * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 |
| */ |
| |
| for (n = 0; n < adapter->num_tx_queues; n++) { |
| tx_ring = adapter->tx_ring[n]; |
| printk(KERN_INFO "------------------------------------\n"); |
| printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); |
| printk(KERN_INFO "------------------------------------\n"); |
| printk(KERN_INFO "T [desc] [address 63:0 ] " |
| "[PlPOIdStDDt Ln] [bi->dma ] " |
| "leng ntw timestamp bi->skb\n"); |
| |
| for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { |
| tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); |
| tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
| u0 = (struct my_u0 *)tx_desc; |
| printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" |
| " %04X %3X %016llX %p", i, |
| le64_to_cpu(u0->a), |
| le64_to_cpu(u0->b), |
| (u64)tx_buffer_info->dma, |
| tx_buffer_info->length, |
| tx_buffer_info->next_to_watch, |
| (u64)tx_buffer_info->time_stamp, |
| tx_buffer_info->skb); |
| if (i == tx_ring->next_to_use && |
| i == tx_ring->next_to_clean) |
| printk(KERN_CONT " NTC/U\n"); |
| else if (i == tx_ring->next_to_use) |
| printk(KERN_CONT " NTU\n"); |
| else if (i == tx_ring->next_to_clean) |
| printk(KERN_CONT " NTC\n"); |
| else |
| printk(KERN_CONT "\n"); |
| |
| if (netif_msg_pktdata(adapter) && |
| tx_buffer_info->dma != 0) |
| print_hex_dump(KERN_INFO, "", |
| DUMP_PREFIX_ADDRESS, 16, 1, |
| phys_to_virt(tx_buffer_info->dma), |
| tx_buffer_info->length, true); |
| } |
| } |
| |
| /* Print RX Rings Summary */ |
| rx_ring_summary: |
| dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); |
| printk(KERN_INFO "Queue [NTU] [NTC]\n"); |
| for (n = 0; n < adapter->num_rx_queues; n++) { |
| rx_ring = adapter->rx_ring[n]; |
| printk(KERN_INFO "%5d %5X %5X\n", n, |
| rx_ring->next_to_use, rx_ring->next_to_clean); |
| } |
| |
| /* Print RX Rings */ |
| if (!netif_msg_rx_status(adapter)) |
| goto exit; |
| |
| dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); |
| |
| /* Advanced Receive Descriptor (Read) Format |
| * 63 1 0 |
| * +-----------------------------------------------------+ |
| * 0 | Packet Buffer Address [63:1] |A0/NSE| |
| * +----------------------------------------------+------+ |
| * 8 | Header Buffer Address [63:1] | DD | |
| * +-----------------------------------------------------+ |
| * |
| * |
| * Advanced Receive Descriptor (Write-Back) Format |
| * |
| * 63 48 47 32 31 30 21 20 16 15 4 3 0 |
| * +------------------------------------------------------+ |
| * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | |
| * | Checksum Ident | | | | Type | Type | |
| * +------------------------------------------------------+ |
| * 8 | VLAN Tag | Length | Extended Error | Extended Status | |
| * +------------------------------------------------------+ |
| * 63 48 47 32 31 20 19 0 |
| */ |
| for (n = 0; n < adapter->num_rx_queues; n++) { |
| rx_ring = adapter->rx_ring[n]; |
| printk(KERN_INFO "------------------------------------\n"); |
| printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); |
| printk(KERN_INFO "------------------------------------\n"); |
| printk(KERN_INFO "R [desc] [ PktBuf A0] " |
| "[ HeadBuf DD] [bi->dma ] [bi->skb] " |
| "<-- Adv Rx Read format\n"); |
| printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " |
| "[vl er S cks ln] ---------------- [bi->skb] " |
| "<-- Adv Rx Write-Back format\n"); |
| |
| for (i = 0; i < rx_ring->count; i++) { |
| rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
| rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); |
| u0 = (struct my_u0 *)rx_desc; |
| staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
| if (staterr & IXGBE_RXD_STAT_DD) { |
| /* Descriptor Done */ |
| printk(KERN_INFO "RWB[0x%03X] %016llX " |
| "%016llX ---------------- %p", i, |
| le64_to_cpu(u0->a), |
| le64_to_cpu(u0->b), |
| rx_buffer_info->skb); |
| } else { |
| printk(KERN_INFO "R [0x%03X] %016llX " |
| "%016llX %016llX %p", i, |
| le64_to_cpu(u0->a), |
| le64_to_cpu(u0->b), |
| (u64)rx_buffer_info->dma, |
| rx_buffer_info->skb); |
| |
| if (netif_msg_pktdata(adapter)) { |
| print_hex_dump(KERN_INFO, "", |
| DUMP_PREFIX_ADDRESS, 16, 1, |
| phys_to_virt(rx_buffer_info->dma), |
| rx_ring->rx_buf_len, true); |
| |
| if (rx_ring->rx_buf_len |
| < IXGBE_RXBUFFER_2048) |
| print_hex_dump(KERN_INFO, "", |
| DUMP_PREFIX_ADDRESS, 16, 1, |
| phys_to_virt( |
| rx_buffer_info->page_dma + |
| rx_buffer_info->page_offset |
| ), |
| PAGE_SIZE/2, true); |
| } |
| } |
| |
| if (i == rx_ring->next_to_use) |
| printk(KERN_CONT " NTU\n"); |
| else if (i == rx_ring->next_to_clean) |
| printk(KERN_CONT " NTC\n"); |
| else |
| printk(KERN_CONT "\n"); |
| |
| } |
| } |
| |
| exit: |
| return; |
| } |
| |
| static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) |
| { |
| u32 ctrl_ext; |
| |
| /* Let firmware take over control of h/w */ |
| ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, |
| ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); |
| } |
| |
| static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) |
| { |
| u32 ctrl_ext; |
| |
| /* Let firmware know the driver has taken over */ |
| ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, |
| ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); |
| } |
| |
| /* |
| * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors |
| * @adapter: pointer to adapter struct |
| * @direction: 0 for Rx, 1 for Tx, -1 for other causes |
| * @queue: queue to map the corresponding interrupt to |
| * @msix_vector: the vector to map to the corresponding queue |
| * |
| */ |
| static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, |
| u8 queue, u8 msix_vector) |
| { |
| u32 ivar, index; |
| struct ixgbe_hw *hw = &adapter->hw; |
| switch (hw->mac.type) { |
| case ixgbe_mac_82598EB: |
| msix_vector |= IXGBE_IVAR_ALLOC_VAL; |
| if (direction == -1) |
| direction = 0; |
| index = (((direction * 64) + queue) >> 2) & 0x1F; |
| ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); |
| ivar &= ~(0xFF << (8 * (queue & 0x3))); |
| ivar |= (msix_vector << (8 * (queue & 0x3))); |
| IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); |
| break; |
| case ixgbe_mac_82599EB: |
| if (direction == -1) { |
| /* other causes */ |
| msix_vector |= IXGBE_IVAR_ALLOC_VAL; |
| index = ((queue & 1) * 8); |
| ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); |
| ivar &= ~(0xFF << index); |
| ivar |= (msix_vector << index); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); |
| break; |
| } else { |
| /* tx or rx causes */ |
| msix_vector |= IXGBE_IVAR_ALLOC_VAL; |
| index = ((16 * (queue & 1)) + (8 * direction)); |
| ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); |
| ivar &= ~(0xFF << index); |
| ivar |= (msix_vector << index); |
| IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); |
| break; |
| } |
| default: |
| break; |
| } |
| } |
| |
| static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, |
| u64 qmask) |
| { |
| u32 mask; |
| |
| if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
| mask = (IXGBE_EIMS_RTX_QUEUE & qmask); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); |
| } else { |
| mask = (qmask & 0xFFFFFFFF); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); |
| mask = (qmask >> 32); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); |
| } |
| } |
| |
| static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, |
| struct ixgbe_tx_buffer |
| *tx_buffer_info) |
| { |
| if (tx_buffer_info->dma) { |
| if (tx_buffer_info->mapped_as_page) |
| dma_unmap_page(&adapter->pdev->dev, |
| tx_buffer_info->dma, |
| tx_buffer_info->length, |
| DMA_TO_DEVICE); |
| else |
| dma_unmap_single(&adapter->pdev->dev, |
| tx_buffer_info->dma, |
| tx_buffer_info->length, |
| DMA_TO_DEVICE); |
| tx_buffer_info->dma = 0; |
| } |
| if (tx_buffer_info->skb) { |
| dev_kfree_skb_any(tx_buffer_info->skb); |
| tx_buffer_info->skb = NULL; |
| } |
| tx_buffer_info->time_stamp = 0; |
| /* tx_buffer_info must be completely set up in the transmit path */ |
| } |
| |
| /** |
| * ixgbe_tx_xon_state - check the tx ring xon state |
| * @adapter: the ixgbe adapter |
| * @tx_ring: the corresponding tx_ring |
| * |
| * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the |
| * corresponding TC of this tx_ring when checking TFCS. |
| * |
| * Returns : true if in xon state (currently not paused) |
| */ |
| static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, |
| struct ixgbe_ring *tx_ring) |
| { |
| u32 txoff = IXGBE_TFCS_TXOFF; |
| |
| #ifdef CONFIG_IXGBE_DCB |
| if (adapter->dcb_cfg.pfc_mode_enable) { |
| int tc; |
| int reg_idx = tx_ring->reg_idx; |
| int dcb_i = adapter->ring_feature[RING_F_DCB].indices; |
| |
| switch (adapter->hw.mac.type) { |
| case ixgbe_mac_82598EB: |
| tc = reg_idx >> 2; |
| txoff = IXGBE_TFCS_TXOFF0; |
| break; |
| case ixgbe_mac_82599EB: |
| tc = 0; |
| txoff = IXGBE_TFCS_TXOFF; |
| if (dcb_i == 8) { |
| /* TC0, TC1 */ |
| tc = reg_idx >> 5; |
| if (tc == 2) /* TC2, TC3 */ |
| tc += (reg_idx - 64) >> 4; |
| else if (tc == 3) /* TC4, TC5, TC6, TC7 */ |
| tc += 1 + ((reg_idx - 96) >> 3); |
| } else if (dcb_i == 4) { |
| /* TC0, TC1 */ |
| tc = reg_idx >> 6; |
| if (tc == 1) { |
| tc += (reg_idx - 64) >> 5; |
| if (tc == 2) /* TC2, TC3 */ |
| tc += (reg_idx - 96) >> 4; |
| } |
| } |
| break; |
| default: |
| tc = 0; |
| } |
| txoff <<= tc; |
| } |
| #endif |
| return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff; |
| } |
| |
| static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, |
| struct ixgbe_ring *tx_ring, |
| unsigned int eop) |
| { |
| struct ixgbe_hw *hw = &adapter->hw; |
| |
| /* Detect a transmit hang in hardware, this serializes the |
| * check with the clearing of time_stamp and movement of eop */ |
| adapter->detect_tx_hung = false; |
| if (tx_ring->tx_buffer_info[eop].time_stamp && |
| time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && |
| ixgbe_tx_xon_state(adapter, tx_ring)) { |
| /* detected Tx unit hang */ |
| union ixgbe_adv_tx_desc *tx_desc; |
| tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); |
| DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" |
| " Tx Queue <%d>\n" |
| " TDH, TDT <%x>, <%x>\n" |
| " next_to_use <%x>\n" |
| " next_to_clean <%x>\n" |
| "tx_buffer_info[next_to_clean]\n" |
| " time_stamp <%lx>\n" |
| " jiffies <%lx>\n", |
| tx_ring->queue_index, |
| IXGBE_READ_REG(hw, tx_ring->head), |
| IXGBE_READ_REG(hw, tx_ring->tail), |
| tx_ring->next_to_use, eop, |
| tx_ring->tx_buffer_info[eop].time_stamp, jiffies); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| #define IXGBE_MAX_TXD_PWR 14 |
| #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) |
| |
| /* Tx Descriptors needed, worst case */ |
| #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ |
| (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) |
| #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ |
| MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ |
| |
| static void ixgbe_tx_timeout(struct net_device *netdev); |
| |
| /** |
| * ixgbe_clean_tx_irq - Reclaim resources after transmit completes |
| * @q_vector: structure containing interrupt and ring information |
| * @tx_ring: tx ring to clean |
| **/ |
| static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, |
| struct ixgbe_ring *tx_ring) |
| { |
| struct ixgbe_adapter *adapter = q_vector->adapter; |
| struct net_device *netdev = adapter->netdev; |
| union ixgbe_adv_tx_desc *tx_desc, *eop_desc; |
| struct ixgbe_tx_buffer *tx_buffer_info; |
| unsigned int i, eop, count = 0; |
| unsigned int total_bytes = 0, total_packets = 0; |
| |
| i = tx_ring->next_to_clean; |
| eop = tx_ring->tx_buffer_info[i].next_to_watch; |
| eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); |
| |
| while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && |
| (count < tx_ring->work_limit)) { |
| bool cleaned = false; |
| rmb(); /* read buffer_info after eop_desc */ |
| for ( ; !cleaned; count++) { |
| struct sk_buff *skb; |
| tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); |
| tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
| cleaned = (i == eop); |
| skb = tx_buffer_info->skb; |
| |
| if (cleaned && skb) { |
| unsigned int segs, bytecount; |
| unsigned int hlen = skb_headlen(skb); |
| |
| /* gso_segs is currently only valid for tcp */ |
| segs = skb_shinfo(skb)->gso_segs ?: 1; |
| #ifdef IXGBE_FCOE |
| /* adjust for FCoE Sequence Offload */ |
| if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) |
| && (skb->protocol == htons(ETH_P_FCOE)) && |
| skb_is_gso(skb)) { |
| hlen = skb_transport_offset(skb) + |
| sizeof(struct fc_frame_header) + |
| sizeof(struct fcoe_crc_eof); |
| segs = DIV_ROUND_UP(skb->len - hlen, |
| skb_shinfo(skb)->gso_size); |
| } |
| #endif /* IXGBE_FCOE */ |
| /* multiply data chunks by size of headers */ |
| bytecount = ((segs - 1) * hlen) + skb->len; |
| total_packets += segs; |
| total_bytes += bytecount; |
| } |
| |
| ixgbe_unmap_and_free_tx_resource(adapter, |
| tx_buffer_info); |
| |
| tx_desc->wb.status = 0; |
| |
| i++; |
| if (i == tx_ring->count) |
| i = 0; |
| } |
| |
| eop = tx_ring->tx_buffer_info[i].next_to_watch; |
| eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); |
| } |
| |
| tx_ring->next_to_clean = i; |
| |
| #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
| if (unlikely(count && netif_carrier_ok(netdev) && |
| (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { |
| /* Make sure that anybody stopping the queue after this |
| * sees the new next_to_clean. |
| */ |
| smp_mb(); |
| if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && |
| !test_bit(__IXGBE_DOWN, &adapter->state)) { |
| netif_wake_subqueue(netdev, tx_ring->queue_index); |
| ++tx_ring->restart_queue; |
| } |
| } |
| |
| if (adapter->detect_tx_hung) { |
| if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { |
| /* schedule immediate reset if we believe we hung */ |
| DPRINTK(PROBE, INFO, |
| "tx hang %d detected, resetting adapter\n", |
| adapter->tx_timeout_count + 1); |
| ixgbe_tx_timeout(adapter->netdev); |
| } |
| } |
| |
| /* re-arm the interrupt */ |
| if (count >= tx_ring->work_limit) |
| ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx)); |
| |
| tx_ring->total_bytes += total_bytes; |
| tx_ring->total_packets += total_packets; |
| tx_ring->stats.packets += total_packets; |
| tx_ring->stats.bytes += total_bytes; |
| return (count < tx_ring->work_limit); |
| } |
| |
| #ifdef CONFIG_IXGBE_DCA |
| static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, |
| struct ixgbe_ring *rx_ring) |
| { |
| u32 rxctrl; |
| int cpu = get_cpu(); |
| int q = rx_ring->reg_idx; |
| |
| if (rx_ring->cpu != cpu) { |
| rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); |
| if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
| rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; |
| rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); |
| } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
| rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; |
| rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << |
| IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); |
| } |
| rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; |
| rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; |
| rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); |
| rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | |
| IXGBE_DCA_RXCTRL_DESC_HSRO_EN); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); |
| rx_ring->cpu = cpu; |
| } |
| put_cpu(); |
| } |
| |
| static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, |
| struct ixgbe_ring *tx_ring) |
| { |
| u32 txctrl; |
| int cpu = get_cpu(); |
| int q = tx_ring->reg_idx; |
| struct ixgbe_hw *hw = &adapter->hw; |
| |
| if (tx_ring->cpu != cpu) { |
| if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
| txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q)); |
| txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; |
| txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); |
| txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; |
| IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl); |
| } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
| txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q)); |
| txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; |
| txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << |
| IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); |
| txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; |
| IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl); |
| } |
| tx_ring->cpu = cpu; |
| } |
| put_cpu(); |
| } |
| |
| static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) |
| { |
| int i; |
| |
| if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) |
| return; |
| |
| /* always use CB2 mode, difference is masked in the CB driver */ |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); |
| |
| for (i = 0; i < adapter->num_tx_queues; i++) { |
| adapter->tx_ring[i]->cpu = -1; |
| ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]); |
| } |
| for (i = 0; i < adapter->num_rx_queues; i++) { |
| adapter->rx_ring[i]->cpu = -1; |
| ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]); |
| } |
| } |
| |
| static int __ixgbe_notify_dca(struct device *dev, void *data) |
| { |
| struct net_device *netdev = dev_get_drvdata(dev); |
| struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| unsigned long event = *(unsigned long *)data; |
| |
| switch (event) { |
| case DCA_PROVIDER_ADD: |
| /* if we're already enabled, don't do it again */ |
| if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
| break; |
| if (dca_add_requester(dev) == 0) { |
| adapter->flags |= IXGBE_FLAG_DCA_ENABLED; |
| ixgbe_setup_dca(adapter); |
| break; |
| } |
| /* Fall Through since DCA is disabled. */ |
| case DCA_PROVIDER_REMOVE: |
| if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { |
| dca_remove_requester(dev); |
| adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); |
| } |
| break; |
| } |
| |
| return 0; |
| } |
| |
| #endif /* CONFIG_IXGBE_DCA */ |
| /** |
| * ixgbe_receive_skb - Send a completed packet up the stack |
| * @adapter: board private structure |
| * @skb: packet to send up |
| * @status: hardware indication of status of receive |
| * @rx_ring: rx descriptor ring (for a specific queue) to setup |
| * @rx_desc: rx descriptor |
| **/ |
| static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, |
| struct sk_buff *skb, u8 status, |
| struct ixgbe_ring *ring, |
| union ixgbe_adv_rx_desc *rx_desc) |
| { |
| struct ixgbe_adapter *adapter = q_vector->adapter; |
| struct napi_struct *napi = &q_vector->napi; |
| bool is_vlan = (status & IXGBE_RXD_STAT_VP); |
| u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); |
| |
| skb_record_rx_queue(skb, ring->queue_index); |
| if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { |
| if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) |
| vlan_gro_receive(napi, adapter->vlgrp, tag, skb); |
| else |
| napi_gro_receive(napi, skb); |
| } else { |
| if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) |
| vlan_hwaccel_rx(skb, adapter->vlgrp, tag); |
| else |
| netif_rx(skb); |
| } |
| } |
| |
| /** |
| * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum |
| * @adapter: address of board private structure |
| * @status_err: hardware indication of status of receive |
| * @skb: skb currently being received and modified |
| **/ |
| static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, |
| union ixgbe_adv_rx_desc *rx_desc, |
| struct sk_buff *skb) |
| { |
| u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); |
| |
| skb->ip_summed = CHECKSUM_NONE; |
| |
| /* Rx csum disabled */ |
| if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) |
| return; |
| |
| /* if IP and error */ |
| if ((status_err & IXGBE_RXD_STAT_IPCS) && |
| (status_err & IXGBE_RXDADV_ERR_IPE)) { |
| adapter->hw_csum_rx_error++; |
| return; |
| } |
| |
| if (!(status_err & IXGBE_RXD_STAT_L4CS)) |
| return; |
| |
| if (status_err & IXGBE_RXDADV_ERR_TCPE) { |
| u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; |
| |
| /* |
| * 82599 errata, UDP frames with a 0 checksum can be marked as |
| * checksum errors. |
| */ |
| if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && |
| (adapter->hw.mac.type == ixgbe_mac_82599EB)) |
| return; |
| |
| adapter->hw_csum_rx_error++; |
| return; |
| } |
| |
| /* It must be a TCP or UDP packet with a valid checksum */ |
| skb->ip_summed = CHECKSUM_UNNECESSARY; |
| } |
| |
| static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, |
| struct ixgbe_ring *rx_ring, u32 val) |
| { |
| /* |
| * Force memory writes to complete before letting h/w |
| * know there are new descriptors to fetch. (Only |
| * applicable for weak-ordered memory model archs, |
| * such as IA-64). |
| */ |
| wmb(); |
| IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val); |
| } |
| |
| /** |
| * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split |
| * @adapter: address of board private structure |
| **/ |
| static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, |
| struct ixgbe_ring *rx_ring, |
| int cleaned_count) |
| { |
| struct pci_dev *pdev = adapter->pdev; |
| union ixgbe_adv_rx_desc *rx_desc; |
| struct ixgbe_rx_buffer *bi; |
| unsigned int i; |
| |
| i = rx_ring->next_to_use; |
| bi = &rx_ring->rx_buffer_info[i]; |
| |
| while (cleaned_count--) { |
| rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); |
| |
| if (!bi->page_dma && |
| (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { |
| if (!bi->page) { |
| bi->page = alloc_page(GFP_ATOMIC); |
| if (!bi->page) { |
| adapter->alloc_rx_page_failed++; |
| goto no_buffers; |
| } |
| bi->page_offset = 0; |
| } else { |
| /* use a half page if we're re-using */ |
| bi->page_offset ^= (PAGE_SIZE / 2); |
| } |
| |
| bi->page_dma = dma_map_page(&pdev->dev, bi->page, |
| bi->page_offset, |
| (PAGE_SIZE / 2), |
| DMA_FROM_DEVICE); |
| } |
| |
| if (!bi->skb) { |
| struct sk_buff *skb; |
| /* netdev_alloc_skb reserves 32 bytes up front!! */ |
| uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES; |
| skb = netdev_alloc_skb(adapter->netdev, bufsz); |
| |
| if (!skb) { |
| adapter->alloc_rx_buff_failed++; |
| goto no_buffers; |
| } |
| |
| /* advance the data pointer to the next cache line */ |
| skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES) |
| - skb->data)); |
| |
| bi->skb = skb; |
| bi->dma = dma_map_single(&pdev->dev, skb->data, |
| rx_ring->rx_buf_len, |
| DMA_FROM_DEVICE); |
| } |
| /* Refresh the desc even if buffer_addrs didn't change because |
| * each write-back erases this info. */ |
| if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
| rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); |
| rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); |
| } else { |
| rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); |
| } |
| |
| i++; |
| if (i == rx_ring->count) |
| i = 0; |
| bi = &rx_ring->rx_buffer_info[i]; |
| } |
| |
| no_buffers: |
| if (rx_ring->next_to_use != i) { |
| rx_ring->next_to_use = i; |
| if (i-- == 0) |
| i = (rx_ring->count - 1); |
| |
| ixgbe_release_rx_desc(&adapter->hw, rx_ring, i); |
| } |
| } |
| |
| static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) |
| { |
| return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; |
| } |
| |
| static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) |
| { |
| return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; |
| } |
| |
| static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) |
| { |
| return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & |
| IXGBE_RXDADV_RSCCNT_MASK) >> |
| IXGBE_RXDADV_RSCCNT_SHIFT; |
| } |
| |
| /** |
| * ixgbe_transform_rsc_queue - change rsc queue into a full packet |
| * @skb: pointer to the last skb in the rsc queue |
| * @count: pointer to number of packets coalesced in this context |
| * |
| * This function changes a queue full of hw rsc buffers into a completed |
| * packet. It uses the ->prev pointers to find the first packet and then |
| * turns it into the frag list owner. |
| **/ |
| static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, |
| u64 *count) |
| { |
| unsigned int frag_list_size = 0; |
| |
| while (skb->prev) { |
| struct sk_buff *prev = skb->prev; |
| frag_list_size += skb->len; |
| skb->prev = NULL; |
| skb = prev; |
| *count += 1; |
| } |
| |
| skb_shinfo(skb)->frag_list = skb->next; |
| skb->next = NULL; |
| skb->len += frag_list_size; |
| skb->data_len += frag_list_size; |
| skb->truesize += frag_list_size; |
| return skb; |
| } |
| |
| struct ixgbe_rsc_cb { |
| dma_addr_t dma; |
| bool delay_unmap; |
| }; |
| |
| #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) |
| |
| static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, |
| struct ixgbe_ring *rx_ring, |
| int *work_done, int work_to_do) |
| { |
| struct ixgbe_adapter *adapter = q_vector->adapter; |
| struct net_device *netdev = adapter->netdev; |
| struct pci_dev *pdev = adapter->pdev; |
| union ixgbe_adv_rx_desc *rx_desc, *next_rxd; |
| struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; |
| struct sk_buff *skb; |
| unsigned int i, rsc_count = 0; |
| u32 len, staterr; |
| u16 hdr_info; |
| bool cleaned = false; |
| int cleaned_count = 0; |
| unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
| #ifdef IXGBE_FCOE |
| int ddp_bytes = 0; |
| #endif /* IXGBE_FCOE */ |
| |
| i = rx_ring->next_to_clean; |
| rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); |
| staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
| rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
| |
| while (staterr & IXGBE_RXD_STAT_DD) { |
| u32 upper_len = 0; |
| if (*work_done >= work_to_do) |
| break; |
| (*work_done)++; |
| |
| rmb(); /* read descriptor and rx_buffer_info after status DD */ |
| if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
| hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); |
| len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> |
| IXGBE_RXDADV_HDRBUFLEN_SHIFT; |
| upper_len = le16_to_cpu(rx_desc->wb.upper.length); |
| if ((len > IXGBE_RX_HDR_SIZE) || |
| (upper_len && !(hdr_info & IXGBE_RXDADV_SPH))) |
| len = IXGBE_RX_HDR_SIZE; |
| } else { |
| len = le16_to_cpu(rx_desc->wb.upper.length); |
| } |
| |
| cleaned = true; |
| skb = rx_buffer_info->skb; |
| prefetch(skb->data); |
| rx_buffer_info->skb = NULL; |
| |
| if (rx_buffer_info->dma) { |
| if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && |
| (!(staterr & IXGBE_RXD_STAT_EOP)) && |
| (!(skb->prev))) { |
| /* |
| * When HWRSC is enabled, delay unmapping |
| * of the first packet. It carries the |
| * header information, HW may still |
| * access the header after the writeback. |
| * Only unmap it when EOP is reached |
| */ |
| IXGBE_RSC_CB(skb)->delay_unmap = true; |
| IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; |
| } else { |
| dma_unmap_single(&pdev->dev, |
| rx_buffer_info->dma, |
| rx_ring->rx_buf_len, |
| DMA_FROM_DEVICE); |
| } |
| rx_buffer_info->dma = 0; |
| skb_put(skb, len); |
| } |
| |
| if (upper_len) { |
| dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, |
| PAGE_SIZE / 2, DMA_FROM_DEVICE); |
| rx_buffer_info->page_dma = 0; |
| skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
| rx_buffer_info->page, |
| rx_buffer_info->page_offset, |
| upper_len); |
| |
| if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || |
| (page_count(rx_buffer_info->page) != 1)) |
| rx_buffer_info->page = NULL; |
| else |
| get_page(rx_buffer_info->page); |
| |
| skb->len += upper_len; |
| skb->data_len += upper_len; |
| skb->truesize += upper_len; |
| } |
| |
| i++; |
| if (i == rx_ring->count) |
| i = 0; |
| |
| next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); |
| prefetch(next_rxd); |
| cleaned_count++; |
| |
| if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) |
| rsc_count = ixgbe_get_rsc_count(rx_desc); |
| |
| if (rsc_count) { |
| u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> |
| IXGBE_RXDADV_NEXTP_SHIFT; |
| next_buffer = &rx_ring->rx_buffer_info[nextp]; |
| } else { |
| next_buffer = &rx_ring->rx_buffer_info[i]; |
| } |
| |
| if (staterr & IXGBE_RXD_STAT_EOP) { |
| if (skb->prev) |
| skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); |
| if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { |
| if (IXGBE_RSC_CB(skb)->delay_unmap) { |
| dma_unmap_single(&pdev->dev, |
| IXGBE_RSC_CB(skb)->dma, |
| rx_ring->rx_buf_len, |
| DMA_FROM_DEVICE); |
| IXGBE_RSC_CB(skb)->dma = 0; |
| IXGBE_RSC_CB(skb)->delay_unmap = false; |
| } |
| if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) |
| rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; |
| else |
| rx_ring->rsc_count++; |
| rx_ring->rsc_flush++; |
| } |
| rx_ring->stats.packets++; |
| rx_ring->stats.bytes += skb->len; |
| } else { |
| if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
| rx_buffer_info->skb = next_buffer->skb; |
| rx_buffer_info->dma = next_buffer->dma; |
| next_buffer->skb = skb; |
| next_buffer->dma = 0; |
| } else { |
| skb->next = next_buffer->skb; |
| skb->next->prev = skb; |
| } |
| rx_ring->non_eop_descs++; |
| goto next_desc; |
| } |
| |
| if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { |
| dev_kfree_skb_irq(skb); |
| goto next_desc; |
| } |
| |
| ixgbe_rx_checksum(adapter, rx_desc, skb); |
| |
| /* probably a little skewed due to removing CRC */ |
| total_rx_bytes += skb->len; |
| total_rx_packets++; |
| |
| skb->protocol = eth_type_trans(skb, adapter->netdev); |
| #ifdef IXGBE_FCOE |
| /* if ddp, not passing to ULD unless for FCP_RSP or error */ |
| if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { |
| ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); |
| if (!ddp_bytes) |
| goto next_desc; |
| } |
| #endif /* IXGBE_FCOE */ |
| ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); |
| |
| next_desc: |
| rx_desc->wb.upper.status_error = 0; |
| |
| /* return some buffers to hardware, one at a time is too slow */ |
| if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { |
| ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); |
| cleaned_count = 0; |
| } |
| |
| /* use prefetched values */ |
| rx_desc = next_rxd; |
| rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
| |
| staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
| } |
| |
| rx_ring->next_to_clean = i; |
| cleaned_count = IXGBE_DESC_UNUSED(rx_ring); |
| |
| if (cleaned_count) |
| ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); |
| |
| #ifdef IXGBE_FCOE |
| /* include DDPed FCoE data */ |
| if (ddp_bytes > 0) { |
| unsigned int mss; |
| |
| mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) - |
| sizeof(struct fc_frame_header) - |
| sizeof(struct fcoe_crc_eof); |
| if (mss > 512) |
| mss &= ~511; |
| total_rx_bytes += ddp_bytes; |
| total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); |
| } |
| #endif /* IXGBE_FCOE */ |
| |
| rx_ring->total_packets += total_rx_packets; |
| rx_ring->total_bytes += total_rx_bytes; |
| netdev->stats.rx_bytes += total_rx_bytes; |
| netdev->stats.rx_packets += total_rx_packets; |
| |
| return cleaned; |
| } |
| |
| static int ixgbe_clean_rxonly(struct napi_struct *, int); |
| /** |
| * ixgbe_configure_msix - Configure MSI-X hardware |
| * @adapter: board private structure |
| * |
| * ixgbe_configure_msix sets up the hardware to properly generate MSI-X |
| * interrupts. |
| **/ |
| static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) |
| { |
| struct ixgbe_q_vector *q_vector; |
| int i, j, q_vectors, v_idx, r_idx; |
| u32 mask; |
| |
| q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
| |
| /* |
| * Populate the IVAR table and set the ITR values to the |
| * corresponding register. |
| */ |
| for (v_idx = 0; v_idx < q_vectors; v_idx++) { |
| q_vector = adapter->q_vector[v_idx]; |
| /* XXX for_each_set_bit(...) */ |
| r_idx = find_first_bit(q_vector->rxr_idx, |
| adapter->num_rx_queues); |
| |
| for (i = 0; i < q_vector->rxr_count; i++) { |
| j = adapter->rx_ring[r_idx]->reg_idx; |
| ixgbe_set_ivar(adapter, 0, j, v_idx); |
| r_idx = find_next_bit(q_vector->rxr_idx, |
| adapter->num_rx_queues, |
| r_idx + 1); |
| } |
| r_idx = find_first_bit(q_vector->txr_idx, |
| adapter->num_tx_queues); |
| |
| for (i = 0; i < q_vector->txr_count; i++) { |
| j = adapter->tx_ring[r_idx]->reg_idx; |
| ixgbe_set_ivar(adapter, 1, j, v_idx); |
| r_idx = find_next_bit(q_vector->txr_idx, |
| adapter->num_tx_queues, |
| r_idx + 1); |
| } |
| |
| if (q_vector->txr_count && !q_vector->rxr_count) |
| /* tx only */ |
| q_vector->eitr = adapter->tx_eitr_param; |
| else if (q_vector->rxr_count) |
| /* rx or mixed */ |
| q_vector->eitr = adapter->rx_eitr_param; |
| |
| ixgbe_write_eitr(q_vector); |
| } |
| |
| if (adapter->hw.mac.type == ixgbe_mac_82598EB) |
| ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, |
| v_idx); |
| else if (adapter->hw.mac.type == ixgbe_mac_82599EB) |
| ixgbe_set_ivar(adapter, -1, 1, v_idx); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); |
| |
| /* set up to autoclear timer, and the vectors */ |
| mask = IXGBE_EIMS_ENABLE_MASK; |
| if (adapter->num_vfs) |
| mask &= ~(IXGBE_EIMS_OTHER | |
| IXGBE_EIMS_MAILBOX | |
| IXGBE_EIMS_LSC); |
| else |
| mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); |
| } |
| |
| enum latency_range { |
| lowest_latency = 0, |
| low_latency = 1, |
| bulk_latency = 2, |
| latency_invalid = 255 |
| }; |
| |
| /** |
| * ixgbe_update_itr - update the dynamic ITR value based on statistics |
| * @adapter: pointer to adapter |
| * @eitr: eitr setting (ints per sec) to give last timeslice |
| * @itr_setting: current throttle rate in ints/second |
| * @packets: the number of packets during this measurement interval |
| * @bytes: the number of bytes during this measurement interval |
| * |
| * Stores a new ITR value based on packets and byte |
| * counts during the last interrupt. The advantage of per interrupt |
| * computation is faster updates and more accurate ITR for the current |
| * traffic pattern. Constants in this function were computed |
| * based on theoretical maximum wire speed and thresholds were set based |
| * on testing data as well as attempting to minimize response time |
| * while increasing bulk throughput. |
| * this functionality is controlled by the InterruptThrottleRate module |
| * parameter (see ixgbe_param.c) |
| **/ |
| static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, |
| u32 eitr, u8 itr_setting, |
| int packets, int bytes) |
| { |
| unsigned int retval = itr_setting; |
| u32 timepassed_us; |
| u64 bytes_perint; |
| |
| if (packets == 0) |
| goto update_itr_done; |
| |
| |
| /* simple throttlerate management |
| * 0-20MB/s lowest (100000 ints/s) |
| * 20-100MB/s low (20000 ints/s) |
| * 100-1249MB/s bulk (8000 ints/s) |
| */ |
| /* what was last interrupt timeslice? */ |
| timepassed_us = 1000000/eitr; |
| bytes_perint = bytes / timepassed_us; /* bytes/usec */ |
| |
| switch (itr_setting) { |
| case lowest_latency: |
| if (bytes_perint > adapter->eitr_low) |
| retval = low_latency; |
| break; |
| case low_latency: |
| if (bytes_perint > adapter->eitr_high) |
| retval = bulk_latency; |
| else if (bytes_perint <= adapter->eitr_low) |
| retval = lowest_latency; |
| break; |
| case bulk_latency: |
| if (bytes_perint <= adapter->eitr_high) |
| retval = low_latency; |
| break; |
| } |
| |
| update_itr_done: |
| return retval; |
| } |
| |
| /** |
| * ixgbe_write_eitr - write EITR register in hardware specific way |
| * @q_vector: structure containing interrupt and ring information |
| * |
| * This function is made to be called by ethtool and by the driver |
| * when it needs to update EITR registers at runtime. Hardware |
| * specific quirks/differences are taken care of here. |
| */ |
| void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) |
| { |
| struct ixgbe_adapter *adapter = q_vector->adapter; |
| struct ixgbe_hw *hw = &adapter->hw; |
| int v_idx = q_vector->v_idx; |
| u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); |
| |
| if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
| /* must write high and low 16 bits to reset counter */ |
| itr_reg |= (itr_reg << 16); |
| } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
| /* |
| * 82599 can support a value of zero, so allow it for |
| * max interrupt rate, but there is an errata where it can |
| * not be zero with RSC |
| */ |
| if (itr_reg == 8 && |
| !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) |
| itr_reg = 0; |
| |
| /* |
| * set the WDIS bit to not clear the timer bits and cause an |
| * immediate assertion of the interrupt |
| */ |
| itr_reg |= IXGBE_EITR_CNT_WDIS; |
| } |
| IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); |
| } |
| |
| static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) |
| { |
| struct ixgbe_adapter *adapter = q_vector->adapter; |
| u32 new_itr; |
| u8 current_itr, ret_itr; |
| int i, r_idx; |
| struct ixgbe_ring *rx_ring, *tx_ring; |
| |
| r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
| for (i = 0; i < q_vector->txr_count; i++) { |
| tx_ring = adapter->tx_ring[r_idx]; |
| ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
| q_vector->tx_itr, |
| tx_ring->total_packets, |
| tx_ring->total_bytes); |
| /* if the result for this queue would decrease interrupt |
| * rate for this vector then use that result */ |
| q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? |
| q_vector->tx_itr - 1 : ret_itr); |
| r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
| r_idx + 1); |
| } |
| |
| r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
| for (i = 0; i < q_vector->rxr_count; i++) { |
| rx_ring = adapter->rx_ring[r_idx]; |
| ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
| q_vector->rx_itr, |
| rx_ring->total_packets, |
| rx_ring->total_bytes); |
| /* if the result for this queue would decrease interrupt |
| * rate for this vector then use that result */ |
| q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? |
| q_vector->rx_itr - 1 : ret_itr); |
| r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
| r_idx + 1); |
| } |
| |
| current_itr = max(q_vector->rx_itr, q_vector->tx_itr); |
| |
| switch (current_itr) { |
| /* counts and packets in update_itr are dependent on these numbers */ |
| case lowest_latency: |
| new_itr = 100000; |
| break; |
| case low_latency: |
| new_itr = 20000; /* aka hwitr = ~200 */ |
| break; |
| case bulk_latency: |
| default: |
| new_itr = 8000; |
| break; |
| } |
| |
| if (new_itr != q_vector->eitr) { |
| /* do an exponential smoothing */ |
| new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); |
| |
| /* save the algorithm value here, not the smoothed one */ |
| q_vector->eitr = new_itr; |
| |
| ixgbe_write_eitr(q_vector); |
| } |
| } |
| |
| /** |
| * ixgbe_check_overtemp_task - worker thread to check over tempurature |
| * @work: pointer to work_struct containing our data |
| **/ |
| static void ixgbe_check_overtemp_task(struct work_struct *work) |
| { |
| struct ixgbe_adapter *adapter = container_of(work, |
| struct ixgbe_adapter, |
| check_overtemp_task); |
| struct ixgbe_hw *hw = &adapter->hw; |
| u32 eicr = adapter->interrupt_event; |
| |
| if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { |
| switch (hw->device_id) { |
| case IXGBE_DEV_ID_82599_T3_LOM: { |
| u32 autoneg; |
| bool link_up = false; |
| |
| if (hw->mac.ops.check_link) |
| hw->mac.ops.check_link(hw, &autoneg, &link_up, false); |
| |
| if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || |
| (eicr & IXGBE_EICR_LSC)) |
| /* Check if this is due to overtemp */ |
| if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) |
| break; |
| } |
| return; |
| default: |
| if (!(eicr & IXGBE_EICR_GPI_SDP0)) |
| return; |
| break; |
| } |
| DPRINTK(DRV, ERR, "Network adapter has been stopped because it " |
| "has over heated. Restart the computer. If the problem " |
| "persists, power off the system and replace the " |
| "adapter\n"); |
| /* write to clear the interrupt */ |
| IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); |
| } |
| } |
| |
| static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) |
| { |
| struct ixgbe_hw *hw = &adapter->hw; |
| |
| if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && |
| (eicr & IXGBE_EICR_GPI_SDP1)) { |
| DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n"); |
| /* write to clear the interrupt */ |
| IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); |
| } |
| } |
| |
| static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) |
| { |
| struct ixgbe_hw *hw = &adapter->hw; |
| |
| if (eicr & IXGBE_EICR_GPI_SDP1) { |
| /* Clear the interrupt */ |
| IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); |
| schedule_work(&adapter->multispeed_fiber_task); |
| } else if (eicr & IXGBE_EICR_GPI_SDP2) { |
| /* Clear the interrupt */ |
| IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); |
| schedule_work(&adapter->sfp_config_module_task); |
| } else { |
| /* Interrupt isn't for us... */ |
| return; |
| } |
| } |
| |
| static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) |
| { |
| struct ixgbe_hw *hw = &adapter->hw; |
| |
| adapter->lsc_int++; |
| adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; |
| adapter->link_check_timeout = jiffies; |
| if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
| IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); |
| IXGBE_WRITE_FLUSH(hw); |
| schedule_work(&adapter->watchdog_task); |
| } |
| } |
| |
| static irqreturn_t ixgbe_msix_lsc(int irq, void *data) |
| { |
| struct net_device *netdev = data; |
| struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| struct ixgbe_hw *hw = &adapter->hw; |
| u32 eicr; |
| |
| /* |
| * Workaround for Silicon errata. Use clear-by-write instead |
| * of clear-by-read. Reading with EICS will return the |
| * interrupt causes without clearing, which later be done |
| * with the write to EICR. |
| */ |
| eicr = IXGBE_READ_REG(hw, IXGBE_EICS); |
| IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); |
| |
| if (eicr & IXGBE_EICR_LSC) |
| ixgbe_check_lsc(adapter); |
| |
| if (eicr & IXGBE_EICR_MAILBOX) |
| ixgbe_msg_task(adapter); |
| |
| if (hw->mac.type == ixgbe_mac_82598EB) |
| ixgbe_check_fan_failure(adapter, eicr); |
| |
| if (hw->mac.type == ixgbe_mac_82599EB) { |
| ixgbe_check_sfp_event(adapter, eicr); |
| adapter->interrupt_event = eicr; |
| if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && |
| ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) |
| schedule_work(&adapter->check_overtemp_task); |
| |
| /* Handle Flow Director Full threshold interrupt */ |
| if (eicr & IXGBE_EICR_FLOW_DIR) { |
| int i; |
| IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR); |
| /* Disable transmits before FDIR Re-initialization */ |
| netif_tx_stop_all_queues(netdev); |
| for (i = 0; i < adapter->num_tx_queues; i++) { |
| struct ixgbe_ring *tx_ring = |
| adapter->tx_ring[i]; |
| if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, |
| &tx_ring->reinit_state)) |
| schedule_work(&adapter->fdir_reinit_task); |
| } |
| } |
| } |
| if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
| IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); |
| |
| return IRQ_HANDLED; |
| } |
| |
| static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, |
| u64 qmask) |
| { |
| u32 mask; |
| |
| if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
| mask = (IXGBE_EIMS_RTX_QUEUE & qmask); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
| } else { |
| mask = (qmask & 0xFFFFFFFF); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); |
| mask = (qmask >> 32); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); |
| } |
| /* skip the flush */ |
| } |
| |
| static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, |
| u64 qmask) |
| { |
| u32 mask; |
| |
| if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
| mask = (IXGBE_EIMS_RTX_QUEUE & qmask); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); |
| } else { |
| mask = (qmask & 0xFFFFFFFF); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); |
| mask = (qmask >> 32); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); |
| } |
| /* skip the flush */ |
| } |
| |
| static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) |
| { |
| struct ixgbe_q_vector *q_vector = data; |
| struct ixgbe_adapter *adapter = q_vector->adapter; |
| struct ixgbe_ring *tx_ring; |
| int i, r_idx; |
| |
| if (!q_vector->txr_count) |
| return IRQ_HANDLED; |
| |
| r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
| for (i = 0; i < q_vector->txr_count; i++) { |
| tx_ring = adapter->tx_ring[r_idx]; |
| tx_ring->total_bytes = 0; |
| tx_ring->total_packets = 0; |
| r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
| r_idx + 1); |
| } |
| |
| /* EIAM disabled interrupts (on this vector) for us */ |
| napi_schedule(&q_vector->napi); |
| |
| return IRQ_HANDLED; |
| } |
| |
| /** |
| * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) |
| * @irq: unused |
| * @data: pointer to our q_vector struct for this interrupt vector |
| **/ |
| static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) |
| { |
| struct ixgbe_q_vector *q_vector = data; |
| struct ixgbe_adapter *adapter = q_vector->adapter; |
| struct ixgbe_ring *rx_ring; |
| int r_idx; |
| int i; |
| |
| r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
| for (i = 0; i < q_vector->rxr_count; i++) { |
| rx_ring = adapter->rx_ring[r_idx]; |
| rx_ring->total_bytes = 0; |
| rx_ring->total_packets = 0; |
| r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
| r_idx + 1); |
| } |
| |
| if (!q_vector->rxr_count) |
| return IRQ_HANDLED; |
| |
| /* disable interrupts on this vector only */ |
| /* EIAM disabled interrupts (on this vector) for us */ |
| napi_schedule(&q_vector->napi); |
| |
| return IRQ_HANDLED; |
| } |
| |
| static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) |
| { |
| struct ixgbe_q_vector *q_vector = data; |
| struct ixgbe_adapter *adapter = q_vector->adapter; |
| struct ixgbe_ring *ring; |
| int r_idx; |
| int i; |
| |
| if (!q_vector->txr_count && !q_vector->rxr_count) |
| return IRQ_HANDLED; |
| |
| r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
| for (i = 0; i < q_vector->txr_count; i++) { |
| ring = adapter->tx_ring[r_idx]; |
| ring->total_bytes = 0; |
| ring->total_packets = 0; |
| r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
| r_idx + 1); |
| } |
| |
| r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
| for (i = 0; i < q_vector->rxr_count; i++) { |
| ring = adapter->rx_ring[r_idx]; |
| ring->total_bytes = 0; |
| ring->total_packets = 0; |
| r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
| r_idx + 1); |
| } |
| |
| /* EIAM disabled interrupts (on this vector) for us */ |
| napi_schedule(&q_vector->napi); |
| |
| return IRQ_HANDLED; |
| } |
| |
| /** |
| * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine |
| * @napi: napi struct with our devices info in it |
| * @budget: amount of work driver is allowed to do this pass, in packets |
| * |
| * This function is optimized for cleaning one queue only on a single |
| * q_vector!!! |
| **/ |
| static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) |
| { |
| struct ixgbe_q_vector *q_vector = |
| container_of(napi, struct ixgbe_q_vector, napi); |
| struct ixgbe_adapter *adapter = q_vector->adapter; |
| struct ixgbe_ring *rx_ring = NULL; |
| int work_done = 0; |
| long r_idx; |
| |
| r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
| rx_ring = adapter->rx_ring[r_idx]; |
| #ifdef CONFIG_IXGBE_DCA |
| if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
| ixgbe_update_rx_dca(adapter, rx_ring); |
| #endif |
| |
| ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); |
| |
| /* If all Rx work done, exit the polling mode */ |
| if (work_done < budget) { |
| napi_complete(napi); |
| if (adapter->rx_itr_setting & 1) |
| ixgbe_set_itr_msix(q_vector); |
| if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
| ixgbe_irq_enable_queues(adapter, |
| ((u64)1 << q_vector->v_idx)); |
| } |
| |
| return work_done; |
| } |
| |
| /** |
| * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine |
| * @napi: napi struct with our devices info in it |
| * @budget: amount of work driver is allowed to do this pass, in packets |
| * |
| * This function will clean more than one rx queue associated with a |
| * q_vector. |
| **/ |
| static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) |
| { |
| struct ixgbe_q_vector *q_vector = |
| container_of(napi, struct ixgbe_q_vector, napi); |
| struct ixgbe_adapter *adapter = q_vector->adapter; |
| struct ixgbe_ring *ring = NULL; |
| int work_done = 0, i; |
| long r_idx; |
| bool tx_clean_complete = true; |
| |
| r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
| for (i = 0; i < q_vector->txr_count; i++) { |
| ring = adapter->tx_ring[r_idx]; |
| #ifdef CONFIG_IXGBE_DCA |
| if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
| ixgbe_update_tx_dca(adapter, ring); |
| #endif |
| tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); |
| r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
| r_idx + 1); |
| } |
| |
| /* attempt to distribute budget to each queue fairly, but don't allow |
| * the budget to go below 1 because we'll exit polling */ |
| budget /= (q_vector->rxr_count ?: 1); |
| budget = max(budget, 1); |
| r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
| for (i = 0; i < q_vector->rxr_count; i++) { |
| ring = adapter->rx_ring[r_idx]; |
| #ifdef CONFIG_IXGBE_DCA |
| if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
| ixgbe_update_rx_dca(adapter, ring); |
| #endif |
| ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); |
| r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
| r_idx + 1); |
| } |
| |
| r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
| ring = adapter->rx_ring[r_idx]; |
| /* If all Rx work done, exit the polling mode */ |
| if (work_done < budget) { |
| napi_complete(napi); |
| if (adapter->rx_itr_setting & 1) |
| ixgbe_set_itr_msix(q_vector); |
| if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
| ixgbe_irq_enable_queues(adapter, |
| ((u64)1 << q_vector->v_idx)); |
| return 0; |
| } |
| |
| return work_done; |
| } |
| |
| /** |
| * ixgbe_clean_txonly - msix (aka one shot) tx clean routine |
| * @napi: napi struct with our devices info in it |
| * @budget: amount of work driver is allowed to do this pass, in packets |
| * |
| * This function is optimized for cleaning one queue only on a single |
| * q_vector!!! |
| **/ |
| static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) |
| { |
| struct ixgbe_q_vector *q_vector = |
| container_of(napi, struct ixgbe_q_vector, napi); |
| struct ixgbe_adapter *adapter = q_vector->adapter; |
| struct ixgbe_ring *tx_ring = NULL; |
| int work_done = 0; |
| long r_idx; |
| |
| r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
| tx_ring = adapter->tx_ring[r_idx]; |
| #ifdef CONFIG_IXGBE_DCA |
| if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
| ixgbe_update_tx_dca(adapter, tx_ring); |
| #endif |
| |
| if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) |
| work_done = budget; |
| |
| /* If all Tx work done, exit the polling mode */ |
| if (work_done < budget) { |
| napi_complete(napi); |
| if (adapter->tx_itr_setting & 1) |
| ixgbe_set_itr_msix(q_vector); |
| if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
| ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); |
| } |
| |
| return work_done; |
| } |
| |
| static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, |
| int r_idx) |
| { |
| struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; |
| |
| set_bit(r_idx, q_vector->rxr_idx); |
| q_vector->rxr_count++; |
| } |
| |
| static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, |
| int t_idx) |
| { |
| struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; |
| |
| set_bit(t_idx, q_vector->txr_idx); |
| q_vector->txr_count++; |
| } |
| |
| /** |
| * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors |
| * @adapter: board private structure to initialize |
| * @vectors: allotted vector count for descriptor rings |
| * |
| * This function maps descriptor rings to the queue-specific vectors |
| * we were allotted through the MSI-X enabling code. Ideally, we'd have |
| * one vector per ring/queue, but on a constrained vector budget, we |
| * group the rings as "efficiently" as possible. You would add new |
| * mapping configurations in here. |
| **/ |
| static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, |
| int vectors) |
| { |
| int v_start = 0; |
| int rxr_idx = 0, txr_idx = 0; |
| int rxr_remaining = adapter->num_rx_queues; |
| int txr_remaining = adapter->num_tx_queues; |
| int i, j; |
| int rqpv, tqpv; |
| int err = 0; |
| |
| /* No mapping required if MSI-X is disabled. */ |
| if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) |
| goto out; |
| |
| /* |
| * The ideal configuration... |
| * We have enough vectors to map one per queue. |
| */ |
| if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { |
| for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) |
| map_vector_to_rxq(adapter, v_start, rxr_idx); |
| |
| for (; txr_idx < txr_remaining; v_start++, txr_idx++) |
| map_vector_to_txq(adapter, v_start, txr_idx); |
| |
| goto out; |
| } |
| |
| /* |
| * If we don't have enough vectors for a 1-to-1 |
| * mapping, we'll have to group them so there are |
| * multiple queues per vector. |
| */ |
| /* Re-adjusting *qpv takes care of the remainder. */ |
| for (i = v_start; i < vectors; i++) { |
| rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); |
| for (j = 0; j < rqpv; j++) { |
| map_vector_to_rxq(adapter, i, rxr_idx); |
| rxr_idx++; |
| rxr_remaining--; |
| } |
| } |
| for (i = v_start; i < vectors; i++) { |
| tqpv = DIV_ROUND_UP(txr_remaining, vectors - i); |
| for (j = 0; j < tqpv; j++) { |
| map_vector_to_txq(adapter, i, txr_idx); |
| txr_idx++; |
| txr_remaining--; |
| } |
| } |
| |
| out: |
| return err; |
| } |
| |
| /** |
| * ixgbe_request_msix_irqs - Initialize MSI-X interrupts |
| * @adapter: board private structure |
| * |
| * ixgbe_request_msix_irqs allocates MSI-X vectors and requests |
| * interrupts from the kernel. |
| **/ |
| static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) |
| { |
| struct net_device *netdev = adapter->netdev; |
| irqreturn_t (*handler)(int, void *); |
| int i, vector, q_vectors, err; |
| int ri=0, ti=0; |
| |
| /* Decrement for Other and TCP Timer vectors */ |
| q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
| |
| /* Map the Tx/Rx rings to the vectors we were allotted. */ |
| err = ixgbe_map_rings_to_vectors(adapter, q_vectors); |
| if (err) |
| goto out; |
| |
| #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ |
| (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ |
| &ixgbe_msix_clean_many) |
| for (vector = 0; vector < q_vectors; vector++) { |
| handler = SET_HANDLER(adapter->q_vector[vector]); |
| |
| if(handler == &ixgbe_msix_clean_rx) { |
| sprintf(adapter->name[vector], "%s-%s-%d", |
| netdev->name, "rx", ri++); |
| } |
| else if(handler == &ixgbe_msix_clean_tx) { |
| sprintf(adapter->name[vector], "%s-%s-%d", |
| netdev->name, "tx", ti++); |
| } |
| else |
| sprintf(adapter->name[vector], "%s-%s-%d", |
| netdev->name, "TxRx", vector); |
| |
| err = request_irq(adapter->msix_entries[vector].vector, |
| handler, 0, adapter->name[vector], |
| adapter->q_vector[vector]); |
| if (err) { |
| DPRINTK(PROBE, ERR, |
| "request_irq failed for MSIX interrupt " |
| "Error: %d\n", err); |
| goto free_queue_irqs; |
| } |
| } |
| |
| sprintf(adapter->name[vector], "%s:lsc", netdev->name); |
| err = request_irq(adapter->msix_entries[vector].vector, |
| ixgbe_msix_lsc, 0, adapter->name[vector], netdev); |
| if (err) { |
| DPRINTK(PROBE, ERR, |
| "request_irq for msix_lsc failed: %d\n", err); |
| goto free_queue_irqs; |
| } |
| |
| return 0; |
| |
| free_queue_irqs: |
| for (i = vector - 1; i >= 0; i--) |
| free_irq(adapter->msix_entries[--vector].vector, |
| adapter->q_vector[i]); |
| adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
| pci_disable_msix(adapter->pdev); |
| kfree(adapter->msix_entries); |
| adapter->msix_entries = NULL; |
| out: |
| return err; |
| } |
| |
| static void ixgbe_set_itr(struct ixgbe_adapter *adapter) |
| { |
| struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
| u8 current_itr; |
| u32 new_itr = q_vector->eitr; |
| struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; |
| struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; |
| |
| q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, |
| q_vector->tx_itr, |
| tx_ring->total_packets, |
| tx_ring->total_bytes); |
| q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, |
| q_vector->rx_itr, |
| rx_ring->total_packets, |
| rx_ring->total_bytes); |
| |
| current_itr = max(q_vector->rx_itr, q_vector->tx_itr); |
| |
| switch (current_itr) { |
| /* counts and packets in update_itr are dependent on these numbers */ |
| case lowest_latency: |
| new_itr = 100000; |
| break; |
| case low_latency: |
| new_itr = 20000; /* aka hwitr = ~200 */ |
| break; |
| case bulk_latency: |
| new_itr = 8000; |
| break; |
| default: |
| break; |
| } |
| |
| if (new_itr != q_vector->eitr) { |
| /* do an exponential smoothing */ |
| new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); |
| |
| /* save the algorithm value here, not the smoothed one */ |
| q_vector->eitr = new_itr; |
| |
| ixgbe_write_eitr(q_vector); |
| } |
| } |
| |
| /** |
| * ixgbe_irq_enable - Enable default interrupt generation settings |
| * @adapter: board private structure |
| **/ |
| static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) |
| { |
| u32 mask; |
| |
| mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); |
| if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) |
| mask |= IXGBE_EIMS_GPI_SDP0; |
| if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) |
| mask |= IXGBE_EIMS_GPI_SDP1; |
| if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
| mask |= IXGBE_EIMS_ECC; |
| mask |= IXGBE_EIMS_GPI_SDP1; |
| mask |= IXGBE_EIMS_GPI_SDP2; |
| if (adapter->num_vfs) |
| mask |= IXGBE_EIMS_MAILBOX; |
| } |
| if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
| adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) |
| mask |= IXGBE_EIMS_FLOW_DIR; |
| |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
| ixgbe_irq_enable_queues(adapter, ~0); |
| IXGBE_WRITE_FLUSH(&adapter->hw); |
| |
| if (adapter->num_vfs > 32) { |
| u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); |
| } |
| } |
| |
| /** |
| * ixgbe_intr - legacy mode Interrupt Handler |
| * @irq: interrupt number |
| * @data: pointer to a network interface device structure |
| **/ |
| static irqreturn_t ixgbe_intr(int irq, void *data) |
| { |
| struct net_device *netdev = data; |
| struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| struct ixgbe_hw *hw = &adapter->hw; |
| struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
| u32 eicr; |
| |
| /* |
| * Workaround for silicon errata. Mask the interrupts |
| * before the read of EICR. |
| */ |
| IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); |
| |
| /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read |
| * therefore no explict interrupt disable is necessary */ |
| eicr = IXGBE_READ_REG(hw, IXGBE_EICR); |
| if (!eicr) { |
| /* shared interrupt alert! |
| * make sure interrupts are enabled because the read will |
| * have disabled interrupts due to EIAM */ |
| ixgbe_irq_enable(adapter); |
| return IRQ_NONE; /* Not our interrupt */ |
| } |
| |
| if (eicr & IXGBE_EICR_LSC) |
| ixgbe_check_lsc(adapter); |
| |
| if (hw->mac.type == ixgbe_mac_82599EB) |
| ixgbe_check_sfp_event(adapter, eicr); |
| |
| ixgbe_check_fan_failure(adapter, eicr); |
| if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && |
| ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) |
| schedule_work(&adapter->check_overtemp_task); |
| |
| if (napi_schedule_prep(&(q_vector->napi))) { |
| adapter->tx_ring[0]->total_packets = 0; |
| adapter->tx_ring[0]->total_bytes = 0; |
| adapter->rx_ring[0]->total_packets = 0; |
| adapter->rx_ring[0]->total_bytes = 0; |
| /* would disable interrupts here but EIAM disabled it */ |
| __napi_schedule(&(q_vector->napi)); |
| } |
| |
| return IRQ_HANDLED; |
| } |
| |
| static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) |
| { |
| int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
| |
| for (i = 0; i < q_vectors; i++) { |
| struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; |
| bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); |
| bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); |
| q_vector->rxr_count = 0; |
| q_vector->txr_count = 0; |
| } |
| } |
| |
| /** |
| * ixgbe_request_irq - initialize interrupts |
| * @adapter: board private structure |
| * |
| * Attempts to configure interrupts using the best available |
| * capabilities of the hardware and kernel. |
| **/ |
| static int ixgbe_request_irq(struct ixgbe_adapter *adapter) |
| { |
| struct net_device *netdev = adapter->netdev; |
| int err; |
| |
| if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
| err = ixgbe_request_msix_irqs(adapter); |
| } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { |
| err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, |
| netdev->name, netdev); |
| } else { |
| err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, |
| netdev->name, netdev); |
| } |
| |
| if (err) |
| DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); |
| |
| return err; |
| } |
| |
| static void ixgbe_free_irq(struct ixgbe_adapter *adapter) |
| { |
| struct net_device *netdev = adapter->netdev; |
| |
| if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
| int i, q_vectors; |
| |
| q_vectors = adapter->num_msix_vectors; |
| |
| i = q_vectors - 1; |
| free_irq(adapter->msix_entries[i].vector, netdev); |
| |
| i--; |
| for (; i >= 0; i--) { |
| free_irq(adapter->msix_entries[i].vector, |
| adapter->q_vector[i]); |
| } |
| |
| ixgbe_reset_q_vectors(adapter); |
| } else { |
| free_irq(adapter->pdev->irq, netdev); |
| } |
| } |
| |
| /** |
| * ixgbe_irq_disable - Mask off interrupt generation on the NIC |
| * @adapter: board private structure |
| **/ |
| static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) |
| { |
| if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); |
| } else { |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); |
| if (adapter->num_vfs > 32) |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); |
| } |
| IXGBE_WRITE_FLUSH(&adapter->hw); |
| if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
| int i; |
| for (i = 0; i < adapter->num_msix_vectors; i++) |
| synchronize_irq(adapter->msix_entries[i].vector); |
| } else { |
| synchronize_irq(adapter->pdev->irq); |
| } |
| } |
| |
| /** |
| * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts |
| * |
| **/ |
| static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) |
| { |
| struct ixgbe_hw *hw = &adapter->hw; |
| |
| IXGBE_WRITE_REG(hw, IXGBE_EITR(0), |
| EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); |
| |
| ixgbe_set_ivar(adapter, 0, 0, 0); |
| ixgbe_set_ivar(adapter, 1, 0, 0); |
| |
| map_vector_to_rxq(adapter, 0, 0); |
| map_vector_to_txq(adapter, 0, 0); |
| |
| DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n"); |
| } |
| |
| /** |
| * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset |
| * @adapter: board private structure |
| * |
| * Configure the Tx unit of the MAC after a reset. |
| **/ |
| static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) |
| { |
| u64 tdba; |
| struct ixgbe_hw *hw = &adapter->hw; |
| u32 i, j, tdlen, txctrl; |
| |
| /* Setup the HW Tx Head and Tail descriptor pointers */ |
| for (i = 0; i < adapter->num_tx_queues; i++) { |
| struct ixgbe_ring *ring = adapter->tx_ring[i]; |
| j = ring->reg_idx; |
| tdba = ring->dma; |
| tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); |
| IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), |
| (tdba & DMA_BIT_MASK(32))); |
| IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); |
| IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); |
| IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); |
| IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); |
| adapter->tx_ring[i]->head = IXGBE_TDH(j); |
| adapter->tx_ring[i]->tail = IXGBE_TDT(j); |
| /* |
| * Disable Tx Head Writeback RO bit, since this hoses |
| * bookkeeping if things aren't delivered in order. |
| */ |
| switch (hw->mac.type) { |
| case ixgbe_mac_82598EB: |
| txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); |
| break; |
| case ixgbe_mac_82599EB: |
| default: |
| txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); |
| break; |
| } |
| txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; |
| switch (hw->mac.type) { |
| case ixgbe_mac_82598EB: |
| IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); |
| break; |
| case ixgbe_mac_82599EB: |
| default: |
| IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); |
| break; |
| } |
| } |
| |
| if (hw->mac.type == ixgbe_mac_82599EB) { |
| u32 rttdcs; |
| u32 mask; |
| |
| /* disable the arbiter while setting MTQC */ |
| rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); |
| rttdcs |= IXGBE_RTTDCS_ARBDIS; |
| IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); |
| |
| /* set transmit pool layout */ |
| mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); |
| switch (adapter->flags & mask) { |
| |
| case (IXGBE_FLAG_SRIOV_ENABLED): |
| IXGBE_WRITE_REG(hw, IXGBE_MTQC, |
| (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); |
| break; |
| |
| case (IXGBE_FLAG_DCB_ENABLED): |
| /* We enable 8 traffic classes, DCB only */ |
| IXGBE_WRITE_REG(hw, IXGBE_MTQC, |
| (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ)); |
| break; |
| |
| default: |
| IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); |
| break; |
| } |
| |
| /* re-eable the arbiter */ |
| rttdcs &= ~IXGBE_RTTDCS_ARBDIS; |
| IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); |
| } |
| } |
| |
| #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 |
| |
| static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, |
| struct ixgbe_ring *rx_ring) |
| { |
| u32 srrctl; |
| int index; |
| struct ixgbe_ring_feature *feature = adapter->ring_feature; |
| |
| index = rx_ring->reg_idx; |
| if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
| unsigned long mask; |
| mask = (unsigned long) feature[RING_F_RSS].mask; |
| index = index & mask; |
| } |
| srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); |
| |
| srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; |
| srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; |
| |
| srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & |
| IXGBE_SRRCTL_BSIZEHDR_MASK; |
| |
| if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
| #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER |
| srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
| #else |
| srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
| #endif |
| srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; |
| } else { |
| srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> |
| IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
| srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
| } |
| |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); |
| } |
| |
| static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) |
| { |
| u32 mrqc = 0; |
| int mask; |
| |
| if (!(adapter->hw.mac.type == ixgbe_mac_82599EB)) |
| return mrqc; |
| |
| mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED |
| #ifdef CONFIG_IXGBE_DCB |
| | IXGBE_FLAG_DCB_ENABLED |
| #endif |
| | IXGBE_FLAG_SRIOV_ENABLED |
| ); |
| |
| switch (mask) { |
| case (IXGBE_FLAG_RSS_ENABLED): |
| mrqc = IXGBE_MRQC_RSSEN; |
| break; |
| case (IXGBE_FLAG_SRIOV_ENABLED): |
| mrqc = IXGBE_MRQC_VMDQEN; |
| break; |
| #ifdef CONFIG_IXGBE_DCB |
| case (IXGBE_FLAG_DCB_ENABLED): |
| mrqc = IXGBE_MRQC_RT8TCEN; |
| break; |
| #endif /* CONFIG_IXGBE_DCB */ |
| default: |
| break; |
| } |
| |
| return mrqc; |
| } |
| |
| /** |
| * ixgbe_configure_rscctl - enable RSC for the indicated ring |
| * @adapter: address of board private structure |
| * @index: index of ring to set |
| **/ |
| static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index) |
| { |
| struct ixgbe_ring *rx_ring; |
| struct ixgbe_hw *hw = &adapter->hw; |
| int j; |
| u32 rscctrl; |
| int rx_buf_len; |
| |
| rx_ring = adapter->rx_ring[index]; |
| j = rx_ring->reg_idx; |
| rx_buf_len = rx_ring->rx_buf_len; |
| rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); |
| rscctrl |= IXGBE_RSCCTL_RSCEN; |
| /* |
| * we must limit the number of descriptors so that the |
| * total size of max desc * buf_len is not greater |
| * than 65535 |
| */ |
| if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
| #if (MAX_SKB_FRAGS > 16) |
| rscctrl |= IXGBE_RSCCTL_MAXDESC_16; |
| #elif (MAX_SKB_FRAGS > 8) |
| rscctrl |= IXGBE_RSCCTL_MAXDESC_8; |
| #elif (MAX_SKB_FRAGS > 4) |
| rscctrl |= IXGBE_RSCCTL_MAXDESC_4; |
| #else |
| rscctrl |= IXGBE_RSCCTL_MAXDESC_1; |
| #endif |
| } else { |
| if (rx_buf_len < IXGBE_RXBUFFER_4096) |
| rscctrl |= IXGBE_RSCCTL_MAXDESC_16; |
| else if (rx_buf_len < IXGBE_RXBUFFER_8192) |
| rscctrl |= IXGBE_RSCCTL_MAXDESC_8; |
| else |
| rscctrl |= IXGBE_RSCCTL_MAXDESC_4; |
| } |
| IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl); |
| } |
| |
| /** |
| * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset |
| * @adapter: board private structure |
| * |
| * Configure the Rx unit of the MAC after a reset. |
| **/ |
| static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) |
| { |
| u64 rdba; |
| struct ixgbe_hw *hw = &adapter->hw; |
| struct ixgbe_ring *rx_ring; |
| struct net_device *netdev = adapter->netdev; |
| int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
| int i, j; |
| u32 rdlen, rxctrl, rxcsum; |
| static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, |
| 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, |
| 0x6A3E67EA, 0x14364D17, 0x3BED200D}; |
| u32 fctrl, hlreg0; |
| u32 reta = 0, mrqc = 0; |
| u32 rdrxctl; |
| int rx_buf_len; |
| |
| /* Decide whether to use packet split mode or not */ |
| /* Do not use packet split if we're in SR-IOV Mode */ |
| if (!adapter->num_vfs) |
| adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; |
| |
| /* Disable packet split due to 82599 erratum #45 */ |
| if (hw->mac.type == ixgbe_mac_82599EB) |
| adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; |
| |
| /* Set the RX buffer length according to the mode */ |
| if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
| rx_buf_len = IXGBE_RX_HDR_SIZE; |
| if (hw->mac.type == ixgbe_mac_82599EB) { |
| /* PSRTYPE must be initialized in 82599 */ |
| u32 psrtype = IXGBE_PSRTYPE_TCPHDR | |
| IXGBE_PSRTYPE_UDPHDR | |
| IXGBE_PSRTYPE_IPV4HDR | |
| IXGBE_PSRTYPE_IPV6HDR | |
| IXGBE_PSRTYPE_L2HDR; |
| IXGBE_WRITE_REG(hw, |
| IXGBE_PSRTYPE(adapter->num_vfs), |
| psrtype); |
| } |
| } else { |
| if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && |
| (netdev->mtu <= ETH_DATA_LEN)) |
| rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
| else |
| rx_buf_len = ALIGN(max_frame, 1024); |
| } |
| |
| fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); |
| fctrl |= IXGBE_FCTRL_BAM; |
| fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ |
| fctrl |= IXGBE_FCTRL_PMCF; |
| IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); |
| |
| hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); |
| if (adapter->netdev->mtu <= ETH_DATA_LEN) |
| hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; |
| else |
| hlreg0 |= IXGBE_HLREG0_JUMBOEN; |
| #ifdef IXGBE_FCOE |
| if (netdev->features & NETIF_F_FCOE_MTU) |
| hlreg0 |= IXGBE_HLREG0_JUMBOEN; |
| #endif |
| IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); |
| |
| rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc); |
| /* disable receives while setting up the descriptors */ |
| rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
| IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); |
| |
| /* |
| * Setup the HW Rx Head and Tail Descriptor Pointers and |
| * the Base and Length of the Rx Descriptor Ring |
| */ |
| for (i = 0; i < adapter->num_rx_queues; i++) { |
| rx_ring = adapter->rx_ring[i]; |
| rdba = rx_ring->dma; |
| j = rx_ring->reg_idx; |
| IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); |
| IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); |
| IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen); |
| IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); |
| IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); |
| rx_ring->head = IXGBE_RDH(j); |
| rx_ring->tail = IXGBE_RDT(j); |
| rx_ring->rx_buf_len = rx_buf_len; |
| |
| if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) |
| rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; |
| else |
| rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; |
| |
| #ifdef IXGBE_FCOE |
| if (netdev->features & NETIF_F_FCOE_MTU) { |
| struct ixgbe_ring_feature *f; |
| f = &adapter->ring_feature[RING_F_FCOE]; |
| if ((i >= f->mask) && (i < f->mask + f->indices)) { |
| rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; |
| if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) |
| rx_ring->rx_buf_len = |
| IXGBE_FCOE_JUMBO_FRAME_SIZE; |
| } |
| } |
| |
| #endif /* IXGBE_FCOE */ |
| ixgbe_configure_srrctl(adapter, rx_ring); |
| } |
| |
| if (hw->mac.type == ixgbe_mac_82598EB) { |
| /* |
| * For VMDq support of different descriptor types or |
| * buffer sizes through the use of multiple SRRCTL |
| * registers, RDRXCTL.MVMEN must be set to 1 |
| * |
| * also, the manual doesn't mention it clearly but DCA hints |
| * will only use queue 0's tags unless this bit is set. Side |
| * effects of setting this bit are only that SRRCTL must be |
| * fully programmed [0..15] |
| */ |
| rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); |
| rdrxctl |= IXGBE_RDRXCTL_MVMEN; |
| IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); |
| } |
| |
| if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { |
| u32 vt_reg_bits; |
| u32 reg_offset, vf_shift; |
| u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); |
| vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN |
| | IXGBE_VT_CTL_REPLEN; |
| vt_reg_bits |= (adapter->num_vfs << |
| IXGBE_VT_CTL_POOL_SHIFT); |
| IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); |
| IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0); |
| |
| vf_shift = adapter->num_vfs % 32; |
| reg_offset = adapter->num_vfs / 32; |
| IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); |
| IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); |
| IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); |
| IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); |
| /* Enable only the PF's pool for Tx/Rx */ |
| IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); |
| IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); |
| IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); |
| ixgbe_set_vmolr(hw, adapter->num_vfs, true); |
| } |
| |
| /* Program MRQC for the distribution of queues */ |
| mrqc = ixgbe_setup_mrqc(adapter); |
| |
| if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
| /* Fill out redirection table */ |
| for (i = 0, j = 0; i < 128; i++, j++) { |
| if (j == adapter->ring_feature[RING_F_RSS].indices) |
| j = 0; |
| /* reta = 4-byte sliding window of |
| * 0x00..(indices-1)(indices-1)00..etc. */ |
| reta = (reta << 8) | (j * 0x11); |
| if ((i & 3) == 3) |
| IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); |
| } |
| |
| /* Fill out hash function seeds */ |
| for (i = 0; i < 10; i++) |
| IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); |
| |
| if (hw->mac.type == ixgbe_mac_82598EB) |
| mrqc |= IXGBE_MRQC_RSSEN; |
| /* Perform hash on these packet types */ |
| mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
| | IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
| | IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
| | IXGBE_MRQC_RSS_FIELD_IPV6 |
| | IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
| | IXGBE_MRQC_RSS_FIELD_IPV6_UDP; |
| } |
| IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); |
| |
| if (adapter->num_vfs) { |
| u32 reg; |
| |
| /* Map PF MAC address in RAR Entry 0 to first pool |
| * following VFs */ |
| hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); |
| |
| /* Set up VF register offsets for selected VT Mode, i.e. |
| * 64 VFs for SR-IOV */ |
| reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); |
| reg |= IXGBE_GCR_EXT_SRIOV; |
| IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg); |
| } |
| |
| rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); |
| |
| if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || |
| adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) { |
| /* Disable indicating checksum in descriptor, enables |
| * RSS hash */ |
| rxcsum |= IXGBE_RXCSUM_PCSD; |
| } |
| if (!(rxcsum & IXGBE_RXCSUM_PCSD)) { |
| /* Enable IPv4 payload checksum for UDP fragments |
| * if PCSD is not set */ |
| rxcsum |= IXGBE_RXCSUM_IPPCSE; |
| } |
| |
| IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); |
| |
| if (hw->mac.type == ixgbe_mac_82599EB) { |
| rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); |
| rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; |
| rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; |
| IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); |
| } |
| |
| if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { |
| /* Enable 82599 HW-RSC */ |
| for (i = 0; i < adapter->num_rx_queues; i++) |
| ixgbe_configure_rscctl(adapter, i); |
| |
| /* Disable RSC for ACK packets */ |
| IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, |
| (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); |
| } |
| } |
| |
| static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
| { |
| struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| struct ixgbe_hw *hw = &adapter->hw; |
| int pool_ndx = adapter->num_vfs; |
| |
| /* add VID to filter table */ |
| hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); |
| } |
| |
| static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
| { |
| struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| struct ixgbe_hw *hw = &adapter->hw; |
| int pool_ndx = adapter->num_vfs; |
| |
| if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
| ixgbe_irq_disable(adapter); |
| |
| vlan_group_set_device(adapter->vlgrp, vid, NULL); |
| |
| if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
| ixgbe_irq_enable(adapter); |
| |
| /* remove VID from filter table */ |
| hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); |
| } |
| |
| /** |
| * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering |
| * @adapter: driver data |
| */ |
| static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) |
| { |
| struct ixgbe_hw *hw = &adapter->hw; |
| u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
| int i, j; |
| |
| switch (hw->mac.type) { |
| case ixgbe_mac_82598EB: |
| vlnctrl &= ~IXGBE_VLNCTRL_VFE; |
| #ifdef CONFIG_IXGBE_DCB |
| if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) |
| vlnctrl &= ~IXGBE_VLNCTRL_VME; |
| #endif |
| vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; |
| IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
| break; |
| case ixgbe_mac_82599EB: |
| vlnctrl &= ~IXGBE_VLNCTRL_VFE; |
| vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; |
| IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
| #ifdef CONFIG_IXGBE_DCB |
| if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) |
| break; |
| #endif |
| for (i = 0; i < adapter->num_rx_queues; i++) { |
| j = adapter->rx_ring[i]->reg_idx; |
| vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
| vlnctrl &= ~IXGBE_RXDCTL_VME; |
| IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); |
| } |
| break; |
| default: |
| break; |
| } |
| } |
| |
| /** |
| * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering |
| * @adapter: driver data |
| */ |
| static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) |
| { |
| struct ixgbe_hw *hw = &adapter->hw; |
| u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
| int i, j; |
| |
| switch (hw->mac.type) { |
| case ixgbe_mac_82598EB: |
| vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; |
| vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; |
| IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
| break; |
| case ixgbe_mac_82599EB: |
| vlnctrl |= IXGBE_VLNCTRL_VFE; |
| vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; |
| IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
| for (i = 0; i < adapter->num_rx_queues; i++) { |
| j = adapter->rx_ring[i]->reg_idx; |
| vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
| vlnctrl |= IXGBE_RXDCTL_VME; |
| IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); |
| } |
| break; |
| default: |
| break; |
| } |
| } |
| |
| static void ixgbe_vlan_rx_register(struct net_device *netdev, |
| struct vlan_group *grp) |
| { |
| struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| |
| if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
| ixgbe_irq_disable(adapter); |
| adapter->vlgrp = grp; |
| |
| /* |
| * For a DCB driver, always enable VLAN tag stripping so we can |
| * still receive traffic from a DCB-enabled host even if we're |
| * not in DCB mode. |
| */ |
| ixgbe_vlan_filter_enable(adapter); |
| |
| ixgbe_vlan_rx_add_vid(netdev, 0); |
| |
| if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
| ixgbe_irq_enable(adapter); |
| } |
| |
| static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) |
| { |
| |