Merge branch 'upstream' of git://lost.foo-projects.org/~ahkok/git/netdev-2.6 into upstream

This commit is contained in:
Jeff Garzik 2006-08-31 17:39:21 -04:00
commit 7c440e7990
10 changed files with 977 additions and 794 deletions

View file

@ -159,7 +159,7 @@
#define DRV_NAME "e100" #define DRV_NAME "e100"
#define DRV_EXT "-NAPI" #define DRV_EXT "-NAPI"
#define DRV_VERSION "3.5.10-k4"DRV_EXT #define DRV_VERSION "3.5.16-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
#define PFX DRV_NAME ": " #define PFX DRV_NAME ": "
@ -1759,11 +1759,10 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
{ {
if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN))) if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
return -ENOMEM; return -ENOMEM;
/* Align, init, and map the RFD. */ /* Align, init, and map the RFD. */
rx->skb->dev = nic->netdev;
skb_reserve(rx->skb, NET_IP_ALIGN); skb_reserve(rx->skb, NET_IP_ALIGN);
memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd)); memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
@ -2139,7 +2138,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
e100_start_receiver(nic, NULL); e100_start_receiver(nic, NULL);
if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) { if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
err = -ENOMEM; err = -ENOMEM;
goto err_loopback_none; goto err_loopback_none;
} }
@ -2791,6 +2790,7 @@ static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel
/* Detach; put netif into state similar to hotplug unplug. */ /* Detach; put netif into state similar to hotplug unplug. */
netif_poll_enable(netdev); netif_poll_enable(netdev);
netif_device_detach(netdev); netif_device_detach(netdev);
pci_disable_device(pdev);
/* Request a slot reset. */ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET; return PCI_ERS_RESULT_NEED_RESET;

View file

@ -242,12 +242,10 @@ struct e1000_adapter {
struct timer_list watchdog_timer; struct timer_list watchdog_timer;
struct timer_list phy_info_timer; struct timer_list phy_info_timer;
struct vlan_group *vlgrp; struct vlan_group *vlgrp;
uint16_t mng_vlan_id; uint16_t mng_vlan_id;
uint32_t bd_number; uint32_t bd_number;
uint32_t rx_buffer_len; uint32_t rx_buffer_len;
uint32_t part_num;
uint32_t wol; uint32_t wol;
uint32_t ksp3_port_a;
uint32_t smartspeed; uint32_t smartspeed;
uint32_t en_mng_pt; uint32_t en_mng_pt;
uint16_t link_speed; uint16_t link_speed;
@ -342,7 +340,9 @@ struct e1000_adapter {
boolean_t tso_force; boolean_t tso_force;
#endif #endif
boolean_t smart_power_down; /* phy smart power down */ boolean_t smart_power_down; /* phy smart power down */
boolean_t quad_port_a;
unsigned long flags; unsigned long flags;
uint32_t eeprom_wol;
}; };
enum e1000_state_t { enum e1000_state_t {

View file

@ -428,12 +428,12 @@ e1000_get_regs(struct net_device *netdev,
regs_buff[23] = regs_buff[18]; /* mdix mode */ regs_buff[23] = regs_buff[18]; /* mdix mode */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
} else { } else {
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
regs_buff[13] = (uint32_t)phy_data; /* cable length */ regs_buff[13] = (uint32_t)phy_data; /* cable length */
regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */ regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */
regs_buff[18] = regs_buff[13]; /* cable polarity */ regs_buff[18] = regs_buff[13]; /* cable polarity */
regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
@ -709,7 +709,6 @@ e1000_set_ringparam(struct net_device *netdev,
} }
clear_bit(__E1000_RESETTING, &adapter->flags); clear_bit(__E1000_RESETTING, &adapter->flags);
return 0; return 0;
err_setup_tx: err_setup_tx:
e1000_free_all_rx_resources(adapter); e1000_free_all_rx_resources(adapter);
@ -894,16 +893,17 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
*data = 0; *data = 0;
/* NOTE: we don't test MSI interrupts here, yet */
/* Hook up test interrupt handler just for this test */ /* Hook up test interrupt handler just for this test */
if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED,
netdev->name, netdev)) { netdev->name, netdev))
shared_int = FALSE; shared_int = FALSE;
} else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
netdev->name, netdev)){ netdev->name, netdev)) {
*data = 1; *data = 1;
return -1; return -1;
} }
DPRINTK(PROBE,INFO, "testing %s interrupt\n", DPRINTK(HW, INFO, "testing %s interrupt\n",
(shared_int ? "shared" : "unshared")); (shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */ /* Disable all the interrupts */
@ -1269,11 +1269,10 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140); e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140);
/* autoneg off */ /* autoneg off */
e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140); e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140);
} else if (adapter->hw.phy_type == e1000_phy_gg82563) { } else if (adapter->hw.phy_type == e1000_phy_gg82563)
e1000_write_phy_reg(&adapter->hw, e1000_write_phy_reg(&adapter->hw,
GG82563_PHY_KMRN_MODE_CTRL, GG82563_PHY_KMRN_MODE_CTRL,
0x1CC); 0x1CC);
}
ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
@ -1301,9 +1300,9 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
} }
if (adapter->hw.media_type == e1000_media_type_copper && if (adapter->hw.media_type == e1000_media_type_copper &&
adapter->hw.phy_type == e1000_phy_m88) { adapter->hw.phy_type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
} else { else {
/* Set the ILOS bit on the fiber Nic is half /* Set the ILOS bit on the fiber Nic is half
* duplex link is detected. */ * duplex link is detected. */
stat_reg = E1000_READ_REG(&adapter->hw, STATUS); stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
@ -1439,11 +1438,10 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_82546_rev_3: case e1000_82546_rev_3:
default: default:
hw->autoneg = TRUE; hw->autoneg = TRUE;
if (hw->phy_type == e1000_phy_gg82563) { if (hw->phy_type == e1000_phy_gg82563)
e1000_write_phy_reg(hw, e1000_write_phy_reg(hw,
GG82563_PHY_KMRN_MODE_CTRL, GG82563_PHY_KMRN_MODE_CTRL,
0x180); 0x180);
}
e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
if (phy_reg & MII_CR_LOOPBACK) { if (phy_reg & MII_CR_LOOPBACK) {
phy_reg &= ~MII_CR_LOOPBACK; phy_reg &= ~MII_CR_LOOPBACK;
@ -1677,14 +1675,12 @@ e1000_diag_test(struct net_device *netdev,
msleep_interruptible(4 * 1000); msleep_interruptible(4 * 1000);
} }
static void static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol)
e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
int retval = 1; /* fail by default */
switch (adapter->hw.device_id) { switch (hw->device_id) {
case E1000_DEV_ID_82542:
case E1000_DEV_ID_82543GC_FIBER: case E1000_DEV_ID_82543GC_FIBER:
case E1000_DEV_ID_82543GC_COPPER: case E1000_DEV_ID_82543GC_COPPER:
case E1000_DEV_ID_82544EI_FIBER: case E1000_DEV_ID_82544EI_FIBER:
@ -1692,52 +1688,87 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
case E1000_DEV_ID_82545EM_FIBER: case E1000_DEV_ID_82545EM_FIBER:
case E1000_DEV_ID_82545EM_COPPER: case E1000_DEV_ID_82545EM_COPPER:
case E1000_DEV_ID_82546GB_QUAD_COPPER: case E1000_DEV_ID_82546GB_QUAD_COPPER:
case E1000_DEV_ID_82546GB_PCIE:
/* these don't support WoL at all */
wol->supported = 0; wol->supported = 0;
wol->wolopts = 0; break;
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82571EB_FIBER:
case E1000_DEV_ID_82571EB_SERDES:
case E1000_DEV_ID_82571EB_COPPER:
/* Wake events not supported on port B */
if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
wol->supported = 0;
break;
}
/* return success for non excluded adapter ports */
retval = 0;
break;
case E1000_DEV_ID_82571EB_QUAD_COPPER:
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
/* quad port adapters only support WoL on port A */
if (!adapter->quad_port_a) {
wol->supported = 0;
break;
}
/* return success for non excluded adapter ports */
retval = 0;
break;
default:
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
* so exclude FUNC_1 ports from having WoL enabled */
if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1 &&
!adapter->eeprom_wol) {
wol->supported = 0;
break;
}
retval = 0;
}
return retval;
}
static void
e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
WAKE_BCAST | WAKE_MAGIC;
wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not
* supported by this hardware */
if (e1000_wol_exclusion(adapter, wol))
return; return;
/* apply any specific unsupported masks here */
switch (adapter->hw.device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
/* device id 10B5 port-A supports wol */ /* KSP3 does not suppport UCAST wake-ups */
if (!adapter->ksp3_port_a) { wol->supported &= ~WAKE_UCAST;
wol->supported = 0;
return;
}
/* KSP3 does not suppport UCAST wake-ups for any interface */
wol->supported = WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
if (adapter->wol & E1000_WUFC_EX) if (adapter->wol & E1000_WUFC_EX)
DPRINTK(DRV, ERR, "Interface does not support " DPRINTK(DRV, ERR, "Interface does not support "
"directed (unicast) frame wake-up packets\n"); "directed (unicast) frame wake-up packets\n");
wol->wolopts = 0; break;
goto do_defaults;
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber */
if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
wol->supported = 0;
wol->wolopts = 0;
return;
}
/* Fall Through */
default: default:
wol->supported = WAKE_UCAST | WAKE_MCAST | break;
WAKE_BCAST | WAKE_MAGIC;
wol->wolopts = 0;
do_defaults:
if (adapter->wol & E1000_WUFC_EX)
wol->wolopts |= WAKE_UCAST;
if (adapter->wol & E1000_WUFC_MC)
wol->wolopts |= WAKE_MCAST;
if (adapter->wol & E1000_WUFC_BC)
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
return;
} }
if (adapter->wol & E1000_WUFC_EX)
wol->wolopts |= WAKE_UCAST;
if (adapter->wol & E1000_WUFC_MC)
wol->wolopts |= WAKE_MCAST;
if (adapter->wol & E1000_WUFC_BC)
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
return;
} }
static int static int
@ -1746,52 +1777,36 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
switch (adapter->hw.device_id) { if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
case E1000_DEV_ID_82542: return -EOPNOTSUPP;
case E1000_DEV_ID_82543GC_FIBER:
case E1000_DEV_ID_82543GC_COPPER: if (e1000_wol_exclusion(adapter, wol))
case E1000_DEV_ID_82544EI_FIBER:
case E1000_DEV_ID_82546EB_QUAD_COPPER:
case E1000_DEV_ID_82546GB_QUAD_COPPER:
case E1000_DEV_ID_82545EM_FIBER:
case E1000_DEV_ID_82545EM_COPPER:
return wol->wolopts ? -EOPNOTSUPP : 0; return wol->wolopts ? -EOPNOTSUPP : 0;
switch (hw->device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
/* device id 10B5 port-A supports wol */
if (!adapter->ksp3_port_a)
return wol->wolopts ? -EOPNOTSUPP : 0;
if (wol->wolopts & WAKE_UCAST) { if (wol->wolopts & WAKE_UCAST) {
DPRINTK(DRV, ERR, "Interface does not support " DPRINTK(DRV, ERR, "Interface does not support "
"directed (unicast) frame wake-up packets\n"); "directed (unicast) frame wake-up packets\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
break;
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber */
if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
return wol->wolopts ? -EOPNOTSUPP : 0;
/* Fall Through */
default: default:
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) break;
return -EOPNOTSUPP;
adapter->wol = 0;
if (wol->wolopts & WAKE_UCAST)
adapter->wol |= E1000_WUFC_EX;
if (wol->wolopts & WAKE_MCAST)
adapter->wol |= E1000_WUFC_MC;
if (wol->wolopts & WAKE_BCAST)
adapter->wol |= E1000_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG;
} }
/* these settings will always override what we currently have */
adapter->wol = 0;
if (wol->wolopts & WAKE_UCAST)
adapter->wol |= E1000_WUFC_EX;
if (wol->wolopts & WAKE_MCAST)
adapter->wol |= E1000_WUFC_MC;
if (wol->wolopts & WAKE_BCAST)
adapter->wol |= E1000_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG;
return 0; return 0;
} }
@ -1915,8 +1930,8 @@ static struct ethtool_ops e1000_ethtool_ops = {
.get_regs = e1000_get_regs, .get_regs = e1000_get_regs,
.get_wol = e1000_get_wol, .get_wol = e1000_get_wol,
.set_wol = e1000_set_wol, .set_wol = e1000_set_wol,
.get_msglevel = e1000_get_msglevel, .get_msglevel = e1000_get_msglevel,
.set_msglevel = e1000_set_msglevel, .set_msglevel = e1000_set_msglevel,
.nway_reset = e1000_nway_reset, .nway_reset = e1000_nway_reset,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
.get_eeprom_len = e1000_get_eeprom_len, .get_eeprom_len = e1000_get_eeprom_len,
@ -1924,17 +1939,17 @@ static struct ethtool_ops e1000_ethtool_ops = {
.set_eeprom = e1000_set_eeprom, .set_eeprom = e1000_set_eeprom,
.get_ringparam = e1000_get_ringparam, .get_ringparam = e1000_get_ringparam,
.set_ringparam = e1000_set_ringparam, .set_ringparam = e1000_set_ringparam,
.get_pauseparam = e1000_get_pauseparam, .get_pauseparam = e1000_get_pauseparam,
.set_pauseparam = e1000_set_pauseparam, .set_pauseparam = e1000_set_pauseparam,
.get_rx_csum = e1000_get_rx_csum, .get_rx_csum = e1000_get_rx_csum,
.set_rx_csum = e1000_set_rx_csum, .set_rx_csum = e1000_set_rx_csum,
.get_tx_csum = e1000_get_tx_csum, .get_tx_csum = e1000_get_tx_csum,
.set_tx_csum = e1000_set_tx_csum, .set_tx_csum = e1000_set_tx_csum,
.get_sg = ethtool_op_get_sg, .get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg, .set_sg = ethtool_op_set_sg,
#ifdef NETIF_F_TSO #ifdef NETIF_F_TSO
.get_tso = ethtool_op_get_tso, .get_tso = ethtool_op_get_tso,
.set_tso = e1000_set_tso, .set_tso = e1000_set_tso,
#endif #endif
.self_test_count = e1000_diag_test_count, .self_test_count = e1000_diag_test_count,
.self_test = e1000_diag_test, .self_test = e1000_diag_test,
@ -1942,7 +1957,7 @@ static struct ethtool_ops e1000_ethtool_ops = {
.phys_id = e1000_phys_id, .phys_id = e1000_phys_id,
.get_stats_count = e1000_get_stats_count, .get_stats_count = e1000_get_stats_count,
.get_ethtool_stats = e1000_get_ethtool_stats, .get_ethtool_stats = e1000_get_ethtool_stats,
.get_perm_addr = ethtool_op_get_perm_addr, .get_perm_addr = ethtool_op_get_perm_addr,
}; };
void e1000_set_ethtool_ops(struct net_device *netdev) void e1000_set_ethtool_ops(struct net_device *netdev)

File diff suppressed because it is too large Load diff

View file

@ -336,9 +336,9 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ #define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ #define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */
#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ #define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ #define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
#define E1000_MNG_IAMT_MODE 0x3 #define E1000_MNG_IAMT_MODE 0x3
#define E1000_MNG_ICH_IAMT_MODE 0x2 #define E1000_MNG_ICH_IAMT_MODE 0x2
#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ #define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
@ -385,7 +385,7 @@ struct e1000_host_mng_dhcp_cookie{
#endif #endif
int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
uint16_t length); uint16_t length);
boolean_t e1000_check_mng_mode(struct e1000_hw *hw); boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
@ -470,6 +470,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82571EB_COPPER 0x105E #define E1000_DEV_ID_82571EB_COPPER 0x105E
#define E1000_DEV_ID_82571EB_FIBER 0x105F #define E1000_DEV_ID_82571EB_FIBER 0x105F
#define E1000_DEV_ID_82571EB_SERDES 0x1060 #define E1000_DEV_ID_82571EB_SERDES 0x1060
#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
#define E1000_DEV_ID_82572EI_COPPER 0x107D #define E1000_DEV_ID_82572EI_COPPER 0x107D
#define E1000_DEV_ID_82572EI_FIBER 0x107E #define E1000_DEV_ID_82572EI_FIBER 0x107E
#define E1000_DEV_ID_82572EI_SERDES 0x107F #define E1000_DEV_ID_82572EI_SERDES 0x107F
@ -523,7 +524,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
/* 802.1q VLAN Packet Sizes */ /* 802.1q VLAN Packet Sizes */
#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ #define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */
/* Ethertype field values */ /* Ethertype field values */
#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ #define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
@ -697,6 +698,7 @@ union e1000_rx_desc_packet_split {
E1000_RXDEXT_STATERR_CXE | \ E1000_RXDEXT_STATERR_CXE | \
E1000_RXDEXT_STATERR_RXE) E1000_RXDEXT_STATERR_RXE)
/* Transmit Descriptor */ /* Transmit Descriptor */
struct e1000_tx_desc { struct e1000_tx_desc {
uint64_t buffer_addr; /* Address of the descriptor's data buffer */ uint64_t buffer_addr; /* Address of the descriptor's data buffer */
@ -2086,7 +2088,7 @@ struct e1000_hw {
#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address #define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
* filtering */ * filtering */
#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ #define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ #define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ #define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ #define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ #define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
@ -2172,7 +2174,7 @@ struct e1000_host_command_info {
#define E1000_MDALIGN 4096 #define E1000_MDALIGN 4096
/* PCI-Ex registers */ /* PCI-Ex registers*/
/* PCI-Ex Control Register */ /* PCI-Ex Control Register */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001 #define E1000_GCR_RXD_NO_SNOOP 0x00000001
@ -2224,7 +2226,7 @@ struct e1000_host_command_info {
#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */ #define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */
/* EEPROM Commands - SPI */ /* EEPROM Commands - SPI */
#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ #define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ #define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ #define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ #define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
@ -3082,10 +3084,10 @@ struct e1000_host_command_info {
/* DSP Distance Register (Page 5, Register 26) */ /* DSP Distance Register (Page 5, Register 26) */
#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M; #define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M;
1 = 50-80M; 1 = 50-80M;
2 = 80-110M; 2 = 80-110M;
3 = 110-140M; 3 = 110-140M;
4 = >140M */ 4 = >140M */
/* Kumeran Mode Control Register (Page 193, Register 16) */ /* Kumeran Mode Control Register (Page 193, Register 16) */
#define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */ #define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */

View file

@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else #else
#define DRIVERNAPI "-NAPI" #define DRIVERNAPI "-NAPI"
#endif #endif
#define DRV_VERSION "7.1.9-k6"DRIVERNAPI #define DRV_VERSION "7.2.7-k2"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION; char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@ -98,6 +98,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x1098), INTEL_E1000_ETHERNET_DEVICE(0x1098),
INTEL_E1000_ETHERNET_DEVICE(0x1099), INTEL_E1000_ETHERNET_DEVICE(0x1099),
INTEL_E1000_ETHERNET_DEVICE(0x109A), INTEL_E1000_ETHERNET_DEVICE(0x109A),
INTEL_E1000_ETHERNET_DEVICE(0x10A4),
INTEL_E1000_ETHERNET_DEVICE(0x10B5), INTEL_E1000_ETHERNET_DEVICE(0x10B5),
INTEL_E1000_ETHERNET_DEVICE(0x10B9), INTEL_E1000_ETHERNET_DEVICE(0x10B9),
INTEL_E1000_ETHERNET_DEVICE(0x10BA), INTEL_E1000_ETHERNET_DEVICE(0x10BA),
@ -681,9 +682,9 @@ e1000_probe(struct pci_dev *pdev,
unsigned long flash_start, flash_len; unsigned long flash_start, flash_len;
static int cards_found = 0; static int cards_found = 0;
static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ static int global_quad_port_a = 0; /* global ksp3 port a indication */
int i, err, pci_using_dac; int i, err, pci_using_dac;
uint16_t eeprom_data; uint16_t eeprom_data = 0;
uint16_t eeprom_apme_mask = E1000_EEPROM_APME; uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
if ((err = pci_enable_device(pdev))) if ((err = pci_enable_device(pdev)))
return err; return err;
@ -695,21 +696,20 @@ e1000_probe(struct pci_dev *pdev,
if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) && if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
(err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
E1000_ERR("No usable DMA configuration, aborting\n"); E1000_ERR("No usable DMA configuration, aborting\n");
return err; goto err_dma;
} }
pci_using_dac = 0; pci_using_dac = 0;
} }
if ((err = pci_request_regions(pdev, e1000_driver_name))) if ((err = pci_request_regions(pdev, e1000_driver_name)))
return err; goto err_pci_reg;
pci_set_master(pdev); pci_set_master(pdev);
err = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct e1000_adapter)); netdev = alloc_etherdev(sizeof(struct e1000_adapter));
if (!netdev) { if (!netdev)
err = -ENOMEM;
goto err_alloc_etherdev; goto err_alloc_etherdev;
}
SET_MODULE_OWNER(netdev); SET_MODULE_OWNER(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev); SET_NETDEV_DEV(netdev, &pdev->dev);
@ -724,11 +724,10 @@ e1000_probe(struct pci_dev *pdev,
mmio_start = pci_resource_start(pdev, BAR_0); mmio_start = pci_resource_start(pdev, BAR_0);
mmio_len = pci_resource_len(pdev, BAR_0); mmio_len = pci_resource_len(pdev, BAR_0);
err = -EIO;
adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
if (!adapter->hw.hw_addr) { if (!adapter->hw.hw_addr)
err = -EIO;
goto err_ioremap; goto err_ioremap;
}
for (i = BAR_1; i <= BAR_5; i++) { for (i = BAR_1; i <= BAR_5; i++) {
if (pci_resource_len(pdev, i) == 0) if (pci_resource_len(pdev, i) == 0)
@ -773,6 +772,7 @@ e1000_probe(struct pci_dev *pdev,
if ((err = e1000_sw_init(adapter))) if ((err = e1000_sw_init(adapter)))
goto err_sw_init; goto err_sw_init;
err = -EIO;
/* Flash BAR mapping must happen after e1000_sw_init /* Flash BAR mapping must happen after e1000_sw_init
* because it depends on mac_type */ * because it depends on mac_type */
if ((adapter->hw.mac_type == e1000_ich8lan) && if ((adapter->hw.mac_type == e1000_ich8lan) &&
@ -780,24 +780,13 @@ e1000_probe(struct pci_dev *pdev,
flash_start = pci_resource_start(pdev, 1); flash_start = pci_resource_start(pdev, 1);
flash_len = pci_resource_len(pdev, 1); flash_len = pci_resource_len(pdev, 1);
adapter->hw.flash_address = ioremap(flash_start, flash_len); adapter->hw.flash_address = ioremap(flash_start, flash_len);
if (!adapter->hw.flash_address) { if (!adapter->hw.flash_address)
err = -EIO;
goto err_flashmap; goto err_flashmap;
}
} }
if ((err = e1000_check_phy_reset_block(&adapter->hw))) if (e1000_check_phy_reset_block(&adapter->hw))
DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
/* if ksp3, indicate if it's port a being setup */
if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
e1000_ksp3_port_a == 0)
adapter->ksp3_port_a = 1;
e1000_ksp3_port_a++;
/* Reset for multiple KP3 adapters */
if (e1000_ksp3_port_a == 4)
e1000_ksp3_port_a = 0;
if (adapter->hw.mac_type >= e1000_82543) { if (adapter->hw.mac_type >= e1000_82543) {
netdev->features = NETIF_F_SG | netdev->features = NETIF_F_SG |
NETIF_F_HW_CSUM | NETIF_F_HW_CSUM |
@ -829,7 +818,7 @@ e1000_probe(struct pci_dev *pdev,
if (e1000_init_eeprom_params(&adapter->hw)) { if (e1000_init_eeprom_params(&adapter->hw)) {
E1000_ERR("EEPROM initialization failed\n"); E1000_ERR("EEPROM initialization failed\n");
return -EIO; goto err_eeprom;
} }
/* before reading the EEPROM, reset the controller to /* before reading the EEPROM, reset the controller to
@ -841,7 +830,6 @@ e1000_probe(struct pci_dev *pdev,
if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) { if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom; goto err_eeprom;
} }
@ -854,12 +842,9 @@ e1000_probe(struct pci_dev *pdev,
if (!is_valid_ether_addr(netdev->perm_addr)) { if (!is_valid_ether_addr(netdev->perm_addr)) {
DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom; goto err_eeprom;
} }
e1000_read_part_num(&adapter->hw, &(adapter->part_num));
e1000_get_bus_info(&adapter->hw); e1000_get_bus_info(&adapter->hw);
init_timer(&adapter->tx_fifo_stall_timer); init_timer(&adapter->tx_fifo_stall_timer);
@ -920,7 +905,38 @@ e1000_probe(struct pci_dev *pdev,
break; break;
} }
if (eeprom_data & eeprom_apme_mask) if (eeprom_data & eeprom_apme_mask)
adapter->wol |= E1000_WUFC_MAG; adapter->eeprom_wol |= E1000_WUFC_MAG;
/* now that we have the eeprom settings, apply the special cases
* where the eeprom may be wrong or the board simply won't support
* wake on lan on a particular port */
switch (pdev->device) {
case E1000_DEV_ID_82546GB_PCIE:
adapter->eeprom_wol = 0;
break;
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0;
break;
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
case E1000_DEV_ID_82571EB_QUAD_COPPER:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
else
adapter->quad_port_a = 1;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
break;
}
/* initialize the wol settings based on the eeprom settings */
adapter->wol = adapter->eeprom_wol;
/* print bus type/speed/width info */ /* print bus type/speed/width info */
{ {
@ -963,16 +979,33 @@ e1000_probe(struct pci_dev *pdev,
return 0; return 0;
err_register: err_register:
e1000_release_hw_control(adapter);
err_eeprom:
if (!e1000_check_phy_reset_block(&adapter->hw))
e1000_phy_hw_reset(&adapter->hw);
if (adapter->hw.flash_address) if (adapter->hw.flash_address)
iounmap(adapter->hw.flash_address); iounmap(adapter->hw.flash_address);
err_flashmap: err_flashmap:
#ifdef CONFIG_E1000_NAPI
for (i = 0; i < adapter->num_rx_queues; i++)
dev_put(&adapter->polling_netdev[i]);
#endif
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
#ifdef CONFIG_E1000_NAPI
kfree(adapter->polling_netdev);
#endif
err_sw_init: err_sw_init:
err_eeprom:
iounmap(adapter->hw.hw_addr); iounmap(adapter->hw.hw_addr);
err_ioremap: err_ioremap:
free_netdev(netdev); free_netdev(netdev);
err_alloc_etherdev: err_alloc_etherdev:
pci_release_regions(pdev); pci_release_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
return err; return err;
} }
@ -1207,7 +1240,7 @@ e1000_open(struct net_device *netdev)
err = e1000_request_irq(adapter); err = e1000_request_irq(adapter);
if (err) if (err)
goto err_up; goto err_req_irq;
e1000_power_up_phy(adapter); e1000_power_up_phy(adapter);
@ -1228,6 +1261,9 @@ e1000_open(struct net_device *netdev)
return E1000_SUCCESS; return E1000_SUCCESS;
err_up: err_up:
e1000_power_down_phy(adapter);
e1000_free_irq(adapter);
err_req_irq:
e1000_free_all_rx_resources(adapter); e1000_free_all_rx_resources(adapter);
err_setup_rx: err_setup_rx:
e1000_free_all_tx_resources(adapter); e1000_free_all_tx_resources(adapter);
@ -1380,10 +1416,6 @@ setup_tx_desc_die:
* (Descriptors) for all queues * (Descriptors) for all queues
* @adapter: board private structure * @adapter: board private structure
* *
* If this function returns with an error, then it's possible one or
* more of the rings is populated (while the rest are not). It is the
* callers duty to clean those orphaned rings.
*
* Return 0 on success, negative on failure * Return 0 on success, negative on failure
**/ **/
@ -1397,6 +1429,9 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
if (err) { if (err) {
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
"Allocation for Tx Queue %u failed\n", i); "Allocation for Tx Queue %u failed\n", i);
for (i-- ; i >= 0; i--)
e1000_free_tx_resources(adapter,
&adapter->tx_ring[i]);
break; break;
} }
} }
@ -1636,10 +1671,6 @@ setup_rx_desc_die:
* (Descriptors) for all queues * (Descriptors) for all queues
* @adapter: board private structure * @adapter: board private structure
* *
* If this function returns with an error, then it's possible one or
* more of the rings is populated (while the rest are not). It is the
* callers duty to clean those orphaned rings.
*
* Return 0 on success, negative on failure * Return 0 on success, negative on failure
**/ **/
@ -1653,6 +1684,9 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
if (err) { if (err) {
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
"Allocation for Rx Queue %u failed\n", i); "Allocation for Rx Queue %u failed\n", i);
for (i-- ; i >= 0; i--)
e1000_free_rx_resources(adapter,
&adapter->rx_ring[i]);
break; break;
} }
} }
@ -2439,10 +2473,9 @@ e1000_watchdog(unsigned long data)
* disable receives in the ISR and * disable receives in the ISR and
* reset device here in the watchdog * reset device here in the watchdog
*/ */
if (adapter->hw.mac_type == e1000_80003es2lan) { if (adapter->hw.mac_type == e1000_80003es2lan)
/* reset device */ /* reset device */
schedule_work(&adapter->reset_task); schedule_work(&adapter->reset_task);
}
} }
e1000_smartspeed(adapter); e1000_smartspeed(adapter);
@ -3677,7 +3710,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
E1000_DBG("%s: Receive packet consumed multiple" E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name); " buffers\n", netdev->name);
/* recycle */ /* recycle */
buffer_info-> skb = skb; buffer_info->skb = skb;
goto next_desc; goto next_desc;
} }
@ -3708,7 +3741,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
netdev_alloc_skb(netdev, length + NET_IP_ALIGN); netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
if (new_skb) { if (new_skb) {
skb_reserve(new_skb, NET_IP_ALIGN); skb_reserve(new_skb, NET_IP_ALIGN);
new_skb->dev = netdev;
memcpy(new_skb->data - NET_IP_ALIGN, memcpy(new_skb->data - NET_IP_ALIGN,
skb->data - NET_IP_ALIGN, skb->data - NET_IP_ALIGN,
length + NET_IP_ALIGN); length + NET_IP_ALIGN);
@ -3975,13 +4007,13 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info = &rx_ring->buffer_info[i]; buffer_info = &rx_ring->buffer_info[i];
while (cleaned_count--) { while (cleaned_count--) {
if (!(skb = buffer_info->skb)) skb = buffer_info->skb;
skb = netdev_alloc_skb(netdev, bufsz); if (skb) {
else {
skb_trim(skb, 0); skb_trim(skb, 0);
goto map_skb; goto map_skb;
} }
skb = netdev_alloc_skb(netdev, bufsz);
if (unlikely(!skb)) { if (unlikely(!skb)) {
/* Better luck next round */ /* Better luck next round */
adapter->alloc_rx_buff_failed++; adapter->alloc_rx_buff_failed++;
@ -4006,10 +4038,10 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
dev_kfree_skb(skb); dev_kfree_skb(skb);
dev_kfree_skb(oldskb); dev_kfree_skb(oldskb);
break; /* while !buffer_info->skb */ break; /* while !buffer_info->skb */
} else {
/* Use new allocation */
dev_kfree_skb(oldskb);
} }
/* Use new allocation */
dev_kfree_skb(oldskb);
} }
/* Make buffer alignment 2 beyond a 16 byte boundary /* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after * this will result in a 16 byte aligned IP header after
@ -4017,8 +4049,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
*/ */
skb_reserve(skb, NET_IP_ALIGN); skb_reserve(skb, NET_IP_ALIGN);
skb->dev = netdev;
buffer_info->skb = skb; buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len; buffer_info->length = adapter->rx_buffer_len;
map_skb: map_skb:
@ -4132,8 +4162,6 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
*/ */
skb_reserve(skb, NET_IP_ALIGN); skb_reserve(skb, NET_IP_ALIGN);
skb->dev = netdev;
buffer_info->skb = skb; buffer_info->skb = skb;
buffer_info->length = adapter->rx_ps_bsize0; buffer_info->length = adapter->rx_ps_bsize0;
buffer_info->dma = pci_map_single(pdev, skb->data, buffer_info->dma = pci_map_single(pdev, skb->data,
@ -4625,7 +4653,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
e1000_set_multi(netdev); e1000_set_multi(netdev);
/* turn on all-multi mode if wake on multicast is enabled */ /* turn on all-multi mode if wake on multicast is enabled */
if (adapter->wol & E1000_WUFC_MC) { if (wufc & E1000_WUFC_MC) {
rctl = E1000_READ_REG(&adapter->hw, RCTL); rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl |= E1000_RCTL_MPE; rctl |= E1000_RCTL_MPE;
E1000_WRITE_REG(&adapter->hw, RCTL, rctl); E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
@ -4697,11 +4725,14 @@ e1000_resume(struct pci_dev *pdev)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t manc, ret_val; uint32_t manc, err;
pci_set_power_state(pdev, PCI_D0); pci_set_power_state(pdev, PCI_D0);
e1000_pci_restore_state(adapter); e1000_pci_restore_state(adapter);
ret_val = pci_enable_device(pdev); if ((err = pci_enable_device(pdev))) {
printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev); pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3hot, 0);

View file

@ -324,7 +324,6 @@ e1000_check_options(struct e1000_adapter *adapter)
DPRINTK(PROBE, NOTICE, DPRINTK(PROBE, NOTICE,
"Warning: no configuration for board #%i\n", bd); "Warning: no configuration for board #%i\n", bd);
DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
bd = E1000_MAX_NIC;
} }
{ /* Transmit Descriptor Count */ { /* Transmit Descriptor Count */
@ -342,9 +341,14 @@ e1000_check_options(struct e1000_adapter *adapter)
opt.arg.r.max = mac_type < e1000_82544 ? opt.arg.r.max = mac_type < e1000_82544 ?
E1000_MAX_TXD : E1000_MAX_82544_TXD; E1000_MAX_TXD : E1000_MAX_82544_TXD;
tx_ring->count = TxDescriptors[bd]; if (num_TxDescriptors > bd) {
e1000_validate_option(&tx_ring->count, &opt, adapter); tx_ring->count = TxDescriptors[bd];
E1000_ROUNDUP(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); e1000_validate_option(&tx_ring->count, &opt, adapter);
E1000_ROUNDUP(tx_ring->count,
REQ_TX_DESCRIPTOR_MULTIPLE);
} else {
tx_ring->count = opt.def;
}
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
tx_ring[i].count = tx_ring->count; tx_ring[i].count = tx_ring->count;
} }
@ -363,9 +367,14 @@ e1000_check_options(struct e1000_adapter *adapter)
opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
E1000_MAX_82544_RXD; E1000_MAX_82544_RXD;
rx_ring->count = RxDescriptors[bd]; if (num_RxDescriptors > bd) {
e1000_validate_option(&rx_ring->count, &opt, adapter); rx_ring->count = RxDescriptors[bd];
E1000_ROUNDUP(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); e1000_validate_option(&rx_ring->count, &opt, adapter);
E1000_ROUNDUP(rx_ring->count,
REQ_RX_DESCRIPTOR_MULTIPLE);
} else {
rx_ring->count = opt.def;
}
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
rx_ring[i].count = rx_ring->count; rx_ring[i].count = rx_ring->count;
} }
@ -377,9 +386,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.def = OPTION_ENABLED .def = OPTION_ENABLED
}; };
int rx_csum = XsumRX[bd]; if (num_XsumRX > bd) {
e1000_validate_option(&rx_csum, &opt, adapter); int rx_csum = XsumRX[bd];
adapter->rx_csum = rx_csum; e1000_validate_option(&rx_csum, &opt, adapter);
adapter->rx_csum = rx_csum;
} else {
adapter->rx_csum = opt.def;
}
} }
{ /* Flow Control */ { /* Flow Control */
@ -399,9 +412,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.p = fc_list }} .p = fc_list }}
}; };
int fc = FlowControl[bd]; if (num_FlowControl > bd) {
e1000_validate_option(&fc, &opt, adapter); int fc = FlowControl[bd];
adapter->hw.fc = adapter->hw.original_fc = fc; e1000_validate_option(&fc, &opt, adapter);
adapter->hw.fc = adapter->hw.original_fc = fc;
} else {
adapter->hw.fc = adapter->hw.original_fc = opt.def;
}
} }
{ /* Transmit Interrupt Delay */ { /* Transmit Interrupt Delay */
struct e1000_option opt = { struct e1000_option opt = {
@ -413,8 +430,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_TXDELAY }} .max = MAX_TXDELAY }}
}; };
adapter->tx_int_delay = TxIntDelay[bd]; if (num_TxIntDelay > bd) {
e1000_validate_option(&adapter->tx_int_delay, &opt, adapter); adapter->tx_int_delay = TxIntDelay[bd];
e1000_validate_option(&adapter->tx_int_delay, &opt,
adapter);
} else {
adapter->tx_int_delay = opt.def;
}
} }
{ /* Transmit Absolute Interrupt Delay */ { /* Transmit Absolute Interrupt Delay */
struct e1000_option opt = { struct e1000_option opt = {
@ -426,9 +448,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_TXABSDELAY }} .max = MAX_TXABSDELAY }}
}; };
adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; if (num_TxAbsIntDelay > bd) {
e1000_validate_option(&adapter->tx_abs_int_delay, &opt, adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
adapter); e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
}
} }
{ /* Receive Interrupt Delay */ { /* Receive Interrupt Delay */
struct e1000_option opt = { struct e1000_option opt = {
@ -440,8 +466,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_RXDELAY }} .max = MAX_RXDELAY }}
}; };
adapter->rx_int_delay = RxIntDelay[bd]; if (num_RxIntDelay > bd) {
e1000_validate_option(&adapter->rx_int_delay, &opt, adapter); adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
adapter);
} else {
adapter->rx_int_delay = opt.def;
}
} }
{ /* Receive Absolute Interrupt Delay */ { /* Receive Absolute Interrupt Delay */
struct e1000_option opt = { struct e1000_option opt = {
@ -453,9 +484,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_RXABSDELAY }} .max = MAX_RXABSDELAY }}
}; };
adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; if (num_RxAbsIntDelay > bd) {
e1000_validate_option(&adapter->rx_abs_int_delay, &opt, adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
adapter); e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
}
} }
{ /* Interrupt Throttling Rate */ { /* Interrupt Throttling Rate */
struct e1000_option opt = { struct e1000_option opt = {
@ -467,18 +502,24 @@ e1000_check_options(struct e1000_adapter *adapter)
.max = MAX_ITR }} .max = MAX_ITR }}
}; };
adapter->itr = InterruptThrottleRate[bd]; if (num_InterruptThrottleRate > bd) {
switch (adapter->itr) { adapter->itr = InterruptThrottleRate[bd];
case 0: switch (adapter->itr) {
DPRINTK(PROBE, INFO, "%s turned off\n", opt.name); case 0:
break; DPRINTK(PROBE, INFO, "%s turned off\n",
case 1: opt.name);
DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", break;
opt.name); case 1:
break; DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
default: opt.name);
e1000_validate_option(&adapter->itr, &opt, adapter); break;
break; default:
e1000_validate_option(&adapter->itr, &opt,
adapter);
break;
}
} else {
adapter->itr = opt.def;
} }
} }
{ /* Smart Power Down */ { /* Smart Power Down */
@ -489,9 +530,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.def = OPTION_DISABLED .def = OPTION_DISABLED
}; };
int spd = SmartPowerDownEnable[bd]; if (num_SmartPowerDownEnable > bd) {
e1000_validate_option(&spd, &opt, adapter); int spd = SmartPowerDownEnable[bd];
adapter->smart_power_down = spd; e1000_validate_option(&spd, &opt, adapter);
adapter->smart_power_down = spd;
} else {
adapter->smart_power_down = opt.def;
}
} }
{ /* Kumeran Lock Loss Workaround */ { /* Kumeran Lock Loss Workaround */
struct e1000_option opt = { struct e1000_option opt = {
@ -501,9 +546,13 @@ e1000_check_options(struct e1000_adapter *adapter)
.def = OPTION_ENABLED .def = OPTION_ENABLED
}; };
if (num_KumeranLockLoss > bd) {
int kmrn_lock_loss = KumeranLockLoss[bd]; int kmrn_lock_loss = KumeranLockLoss[bd];
e1000_validate_option(&kmrn_lock_loss, &opt, adapter); e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss; adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
} else {
adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
}
} }
switch (adapter->hw.media_type) { switch (adapter->hw.media_type) {
@ -530,18 +579,17 @@ static void __devinit
e1000_check_fiber_options(struct e1000_adapter *adapter) e1000_check_fiber_options(struct e1000_adapter *adapter)
{ {
int bd = adapter->bd_number; int bd = adapter->bd_number;
bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd; if (num_Speed > bd) {
if ((Speed[bd] != OPTION_UNSET)) {
DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
"parameter ignored\n"); "parameter ignored\n");
} }
if ((Duplex[bd] != OPTION_UNSET)) { if (num_Duplex > bd) {
DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
"parameter ignored\n"); "parameter ignored\n");
} }
if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) { if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
"not valid for fiber adapters, " "not valid for fiber adapters, "
"parameter ignored\n"); "parameter ignored\n");
@ -560,7 +608,6 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
{ {
int speed, dplx, an; int speed, dplx, an;
int bd = adapter->bd_number; int bd = adapter->bd_number;
bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
{ /* Speed */ { /* Speed */
struct e1000_opt_list speed_list[] = {{ 0, "" }, struct e1000_opt_list speed_list[] = {{ 0, "" },
@ -577,8 +624,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
.p = speed_list }} .p = speed_list }}
}; };
speed = Speed[bd]; if (num_Speed > bd) {
e1000_validate_option(&speed, &opt, adapter); speed = Speed[bd];
e1000_validate_option(&speed, &opt, adapter);
} else {
speed = opt.def;
}
} }
{ /* Duplex */ { /* Duplex */
struct e1000_opt_list dplx_list[] = {{ 0, "" }, struct e1000_opt_list dplx_list[] = {{ 0, "" },
@ -600,11 +651,15 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
"Speed/Duplex/AutoNeg parameter ignored.\n"); "Speed/Duplex/AutoNeg parameter ignored.\n");
return; return;
} }
dplx = Duplex[bd]; if (num_Duplex > bd) {
e1000_validate_option(&dplx, &opt, adapter); dplx = Duplex[bd];
e1000_validate_option(&dplx, &opt, adapter);
} else {
dplx = opt.def;
}
} }
if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) { if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
DPRINTK(PROBE, INFO, DPRINTK(PROBE, INFO,
"AutoNeg specified along with Speed or Duplex, " "AutoNeg specified along with Speed or Duplex, "
"parameter ignored\n"); "parameter ignored\n");
@ -653,15 +708,19 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
.p = an_list }} .p = an_list }}
}; };
an = AutoNeg[bd]; if (num_AutoNeg > bd) {
e1000_validate_option(&an, &opt, adapter); an = AutoNeg[bd];
e1000_validate_option(&an, &opt, adapter);
} else {
an = opt.def;
}
adapter->hw.autoneg_advertised = an; adapter->hw.autoneg_advertised = an;
} }
switch (speed + dplx) { switch (speed + dplx) {
case 0: case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg = adapter->fc_autoneg = 1;
if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET) if ((num_Speed > bd) && (speed != 0 || dplx != 0))
DPRINTK(PROBE, INFO, DPRINTK(PROBE, INFO,
"Speed and duplex autonegotiation enabled\n"); "Speed and duplex autonegotiation enabled\n");
break; break;

View file

@ -110,9 +110,6 @@ struct ixgb_adapter;
#define IXGB_RXBUFFER_8192 8192 #define IXGB_RXBUFFER_8192 8192
#define IXGB_RXBUFFER_16384 16384 #define IXGB_RXBUFFER_16384 16384
/* How many Tx Descriptors do we need to call netif_wake_queue? */
#define IXGB_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGB_RX_BUFFER_WRITE 4 /* Must be power of 2 */ #define IXGB_RX_BUFFER_WRITE 4 /* Must be power of 2 */
@ -173,7 +170,7 @@ struct ixgb_adapter {
unsigned long led_status; unsigned long led_status;
/* TX */ /* TX */
struct ixgb_desc_ring tx_ring; struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
unsigned long timeo_start; unsigned long timeo_start;
uint32_t tx_cmd_type; uint32_t tx_cmd_type;
uint64_t hw_csum_tx_good; uint64_t hw_csum_tx_good;

View file

@ -654,11 +654,7 @@ ixgb_phys_id(struct net_device *netdev, uint32_t data)
mod_timer(&adapter->blink_timer, jiffies); mod_timer(&adapter->blink_timer, jiffies);
if (data) msleep_interruptible(data * 1000);
schedule_timeout_interruptible(data * HZ);
else
schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
del_timer_sync(&adapter->blink_timer); del_timer_sync(&adapter->blink_timer);
ixgb_led_off(&adapter->hw); ixgb_led_off(&adapter->hw);
clear_bit(IXGB_LED_ON, &adapter->led_status); clear_bit(IXGB_LED_ON, &adapter->led_status);

View file

@ -36,7 +36,7 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
#else #else
#define DRIVERNAPI "-NAPI" #define DRIVERNAPI "-NAPI"
#endif #endif
#define DRV_VERSION "1.0.109-k4"DRIVERNAPI #define DRV_VERSION "1.0.112-k2"DRIVERNAPI
char ixgb_driver_version[] = DRV_VERSION; char ixgb_driver_version[] = DRV_VERSION;
static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@ -118,15 +118,26 @@ static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
static void ixgb_netpoll(struct net_device *dev); static void ixgb_netpoll(struct net_device *dev);
#endif #endif
/* Exported from other modules */ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
enum pci_channel_state state);
static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
static void ixgb_io_resume (struct pci_dev *pdev);
/* Exported from other modules */
extern void ixgb_check_options(struct ixgb_adapter *adapter); extern void ixgb_check_options(struct ixgb_adapter *adapter);
static struct pci_error_handlers ixgb_err_handler = {
.error_detected = ixgb_io_error_detected,
.slot_reset = ixgb_io_slot_reset,
.resume = ixgb_io_resume,
};
static struct pci_driver ixgb_driver = { static struct pci_driver ixgb_driver = {
.name = ixgb_driver_name, .name = ixgb_driver_name,
.id_table = ixgb_pci_tbl, .id_table = ixgb_pci_tbl,
.probe = ixgb_probe, .probe = ixgb_probe,
.remove = __devexit_p(ixgb_remove), .remove = __devexit_p(ixgb_remove),
.err_handler = &ixgb_err_handler
}; };
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@ -1174,6 +1185,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
int err; int err;
if (likely(skb_is_gso(skb))) { if (likely(skb_is_gso(skb))) {
struct ixgb_buffer *buffer_info;
if (skb_header_cloned(skb)) { if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err) if (err)
@ -1196,6 +1208,8 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
i = adapter->tx_ring.next_to_use; i = adapter->tx_ring.next_to_use;
context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
buffer_info = &adapter->tx_ring.buffer_info[i];
WARN_ON(buffer_info->dma != 0);
context_desc->ipcss = ipcss; context_desc->ipcss = ipcss;
context_desc->ipcso = ipcso; context_desc->ipcso = ipcso;
@ -1233,11 +1247,14 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
uint8_t css, cso; uint8_t css, cso;
if(likely(skb->ip_summed == CHECKSUM_HW)) { if(likely(skb->ip_summed == CHECKSUM_HW)) {
struct ixgb_buffer *buffer_info;
css = skb->h.raw - skb->data; css = skb->h.raw - skb->data;
cso = (skb->h.raw + skb->csum) - skb->data; cso = (skb->h.raw + skb->csum) - skb->data;
i = adapter->tx_ring.next_to_use; i = adapter->tx_ring.next_to_use;
context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
buffer_info = &adapter->tx_ring.buffer_info[i];
WARN_ON(buffer_info->dma != 0);
context_desc->tucss = css; context_desc->tucss = css;
context_desc->tucso = cso; context_desc->tucso = cso;
@ -1283,6 +1300,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
buffer_info = &tx_ring->buffer_info[i]; buffer_info = &tx_ring->buffer_info[i];
size = min(len, IXGB_MAX_DATA_PER_TXD); size = min(len, IXGB_MAX_DATA_PER_TXD);
buffer_info->length = size; buffer_info->length = size;
WARN_ON(buffer_info->dma != 0);
buffer_info->dma = buffer_info->dma =
pci_map_single(adapter->pdev, pci_map_single(adapter->pdev,
skb->data + offset, skb->data + offset,
@ -1543,6 +1561,11 @@ void
ixgb_update_stats(struct ixgb_adapter *adapter) ixgb_update_stats(struct ixgb_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
/* Prevent stats update while adapter is being reset */
if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
return;
if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
@ -1787,7 +1810,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
if (unlikely(netif_queue_stopped(netdev))) { if (unlikely(netif_queue_stopped(netdev))) {
spin_lock(&adapter->tx_lock); spin_lock(&adapter->tx_lock);
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
(IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) (IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED))
netif_wake_queue(netdev); netif_wake_queue(netdev);
spin_unlock(&adapter->tx_lock); spin_unlock(&adapter->tx_lock);
} }
@ -1948,10 +1971,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
#define IXGB_CB_LENGTH 256 #define IXGB_CB_LENGTH 256
if (length < IXGB_CB_LENGTH) { if (length < IXGB_CB_LENGTH) {
struct sk_buff *new_skb = struct sk_buff *new_skb =
dev_alloc_skb(length + NET_IP_ALIGN); netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
if (new_skb) { if (new_skb) {
skb_reserve(new_skb, NET_IP_ALIGN); skb_reserve(new_skb, NET_IP_ALIGN);
new_skb->dev = netdev;
memcpy(new_skb->data - NET_IP_ALIGN, memcpy(new_skb->data - NET_IP_ALIGN,
skb->data - NET_IP_ALIGN, skb->data - NET_IP_ALIGN,
length + NET_IP_ALIGN); length + NET_IP_ALIGN);
@ -2031,14 +2053,14 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
/* leave three descriptors unused */ /* leave three descriptors unused */
while(--cleancount > 2) { while(--cleancount > 2) {
/* recycle! its good for you */ /* recycle! its good for you */
if (!(skb = buffer_info->skb)) skb = buffer_info->skb;
skb = dev_alloc_skb(adapter->rx_buffer_len if (skb) {
+ NET_IP_ALIGN);
else {
skb_trim(skb, 0); skb_trim(skb, 0);
goto map_skb; goto map_skb;
} }
skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
+ NET_IP_ALIGN);
if (unlikely(!skb)) { if (unlikely(!skb)) {
/* Better luck next round */ /* Better luck next round */
adapter->alloc_rx_buff_failed++; adapter->alloc_rx_buff_failed++;
@ -2051,8 +2073,6 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
*/ */
skb_reserve(skb, NET_IP_ALIGN); skb_reserve(skb, NET_IP_ALIGN);
skb->dev = netdev;
buffer_info->skb = skb; buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len; buffer_info->length = adapter->rx_buffer_len;
map_skb: map_skb:
@ -2190,7 +2210,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
static void ixgb_netpoll(struct net_device *dev) static void ixgb_netpoll(struct net_device *dev)
{ {
struct ixgb_adapter *adapter = dev->priv; struct ixgb_adapter *adapter = netdev_priv(dev);
disable_irq(adapter->pdev->irq); disable_irq(adapter->pdev->irq);
ixgb_intr(adapter->pdev->irq, dev, NULL); ixgb_intr(adapter->pdev->irq, dev, NULL);
@ -2198,4 +2218,98 @@ static void ixgb_netpoll(struct net_device *dev)
} }
#endif #endif
/**
* ixgb_io_error_detected() - called when PCI error is detected
* @pdev pointer to pci device with error
* @state pci channel state after error
*
* This callback is called by the PCI subsystem whenever
* a PCI bus error is detected.
*/
static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
enum pci_channel_state state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev->priv;
if(netif_running(netdev))
ixgb_down(adapter, TRUE);
pci_disable_device(pdev);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
/**
* ixgb_io_slot_reset - called after the pci bus has been reset.
* @pdev pointer to pci device with error
*
* This callback is called after the PCI buss has been reset.
* Basically, this tries to restart the card from scratch.
* This is a shortened version of the device probe/discovery code,
* it resembles the first-half of the ixgb_probe() routine.
*/
static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev->priv;
if(pci_enable_device(pdev)) {
DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
/* Perform card reset only on one instance of the card */
if (0 != PCI_FUNC (pdev->devfn))
return PCI_ERS_RESULT_RECOVERED;
pci_set_master(pdev);
netif_carrier_off(netdev);
netif_stop_queue(netdev);
ixgb_reset(adapter);
/* Make sure the EEPROM is good */
if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if(!is_valid_ether_addr(netdev->perm_addr)) {
DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_RECOVERED;
}
/**
* ixgb_io_resume - called when its OK to resume normal operations
* @pdev pointer to pci device with error
*
* The error recovery driver tells us that its OK to resume
* normal operation. Implementation resembles the second-half
* of the ixgb_probe() routine.
*/
static void ixgb_io_resume (struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev->priv;
pci_set_master(pdev);
if(netif_running(netdev)) {
if(ixgb_up(adapter)) {
printk ("ixgb: can't bring device back up after reset\n");
return;
}
}
netif_device_attach(netdev);
mod_timer(&adapter->watchdog_timer, jiffies);
}
/* ixgb_main.c */ /* ixgb_main.c */