Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (77 commits)
  [IPV6]: Reorg struct ifmcaddr6 to save some bytes
  [INET_TIMEWAIT_SOCK]: Reorganize struct inet_timewait_sock to save some bytes
  [DCCP]: Reorganize struct dccp_sock to save 8 bytes
  [INET6]: Reorganize struct inet6_dev to save 8 bytes
  [SOCK] proto: Add hashinfo member to struct proto
  EMAC driver: Fix bug: The clock divisor is set to all ones at reset.
  EMAC driver: fix bug - invalidate data cache of new_skb->data range when cache is WB
  EMAC driver: add power down mode
  EMAC driver: ADSP-BF52x arch/mach support
  EMAC driver: use simpler comment headers and strip out information that is maintained in the scm's log
  EMAC driver: bf537 MAC multicast hash filtering patch
  EMAC driver: define MDC_CLK=2.5MHz and caculate mdc_div according to SCLK.
  EMAC driver: shorten the mdelay value to solve netperf performance issue
  [netdrvr] sis190: build fix
  sky2: fix Wake On Lan interaction with BIOS
  sky2: restore multicast addresses after recovery
  pci-skeleton: Misc fixes to build neatly
  phylib: Add Realtek 821x eth PHY support
  natsemi: Update locking documentation
  PHYLIB: Locking fixes for PHY I/O potentially sleeping
  ...
This commit is contained in:
Linus Torvalds 2008-02-04 07:43:36 -08:00
commit a2e4e108c5
84 changed files with 1293 additions and 1138 deletions

View file

@ -84,13 +84,6 @@ S: Status, one of the following:
it has been replaced by a better system and you it has been replaced by a better system and you
should be using that. should be using that.
3C359 NETWORK DRIVER
P: Mike Phillips
M: mikep@linuxtr.net
L: netdev@vger.kernel.org
W: http://www.linuxtr.net
S: Maintained
3C505 NETWORK DRIVER 3C505 NETWORK DRIVER
P: Philip Blundell P: Philip Blundell
M: philb@gnu.org M: philb@gnu.org
@ -939,8 +932,6 @@ M: maxk@qualcomm.com
S: Maintained S: Maintained
BONDING DRIVER BONDING DRIVER
P: Chad Tindel
M: ctindel@users.sourceforge.net
P: Jay Vosburgh P: Jay Vosburgh
M: fubar@us.ibm.com M: fubar@us.ibm.com
L: bonding-devel@lists.sourceforge.net L: bonding-devel@lists.sourceforge.net
@ -2864,15 +2855,6 @@ L: ocfs2-devel@oss.oracle.com
W: http://oss.oracle.com/projects/ocfs2/ W: http://oss.oracle.com/projects/ocfs2/
S: Supported S: Supported
OLYMPIC NETWORK DRIVER
P: Peter De Shrijver
M: p2@ace.ulyssis.student.kuleuven.ac.be
P: Mike Phillips
M: mikep@linuxtr.net
L: netdev@vger.kernel.org
W: http://www.linuxtr.net
S: Maintained
OMNIKEY CARDMAN 4000 DRIVER OMNIKEY CARDMAN 4000 DRIVER
P: Harald Welte P: Harald Welte
M: laforge@gnumonks.org M: laforge@gnumonks.org
@ -3788,13 +3770,6 @@ L: tlan-devel@lists.sourceforge.net (subscribers-only)
W: http://sourceforge.net/projects/tlan/ W: http://sourceforge.net/projects/tlan/
S: Maintained S: Maintained
TOKEN-RING NETWORK DRIVER
P: Mike Phillips
M: mikep@linuxtr.net
L: netdev@vger.kernel.org
W: http://www.linuxtr.net
S: Maintained
TOSHIBA ACPI EXTRAS DRIVER TOSHIBA ACPI EXTRAS DRIVER
P: John Belmonte P: John Belmonte
M: toshiba_acpi@memebeam.org M: toshiba_acpi@memebeam.org

View file

@ -814,8 +814,8 @@ config ULTRA32
will be called smc-ultra32. will be called smc-ultra32.
config BFIN_MAC config BFIN_MAC
tristate "Blackfin 536/537 on-chip mac support" tristate "Blackfin 527/536/537 on-chip mac support"
depends on NET_ETHERNET && (BF537 || BF536) && (!BF537_PORT_H) depends on NET_ETHERNET && (BF527 || BF537 || BF536) && (!BF537_PORT_H)
select CRC32 select CRC32
select MII select MII
select PHYLIB select PHYLIB
@ -828,7 +828,7 @@ config BFIN_MAC
config BFIN_MAC_USE_L1 config BFIN_MAC_USE_L1
bool "Use L1 memory for rx/tx packets" bool "Use L1 memory for rx/tx packets"
depends on BFIN_MAC && BF537 depends on BFIN_MAC && (BF527 || BF537)
default y default y
help help
To get maximum network performance, you should use L1 memory as rx/tx buffers. To get maximum network performance, you should use L1 memory as rx/tx buffers.
@ -855,7 +855,8 @@ config BFIN_RX_DESC_NUM
config BFIN_MAC_RMII config BFIN_MAC_RMII
bool "RMII PHY Interface (EXPERIMENTAL)" bool "RMII PHY Interface (EXPERIMENTAL)"
depends on BFIN_MAC && EXPERIMENTAL depends on BFIN_MAC && EXPERIMENTAL
default n default y if BFIN527_EZKIT
default n if BFIN537_STAMP
help help
Use Reduced PHY MII Interface Use Reduced PHY MII Interface
@ -1199,7 +1200,7 @@ config NE2_MCA
config IBMLANA config IBMLANA
tristate "IBM LAN Adapter/A support" tristate "IBM LAN Adapter/A support"
depends on MCA && MCA_LEGACY depends on MCA
---help--- ---help---
This is a Micro Channel Ethernet adapter. You need to set This is a Micro Channel Ethernet adapter. You need to set
CONFIG_MCA to use this driver. It is both available as an in-kernel CONFIG_MCA to use this driver. It is both available as an in-kernel

View file

@ -384,7 +384,7 @@ static void reset_phy(struct net_device *dev)
/* Wait until PHY reset is complete */ /* Wait until PHY reset is complete */
do { do {
read_phy(lp->phy_address, MII_BMCR, &bmcr); read_phy(lp->phy_address, MII_BMCR, &bmcr);
} while (!(bmcr && BMCR_RESET)); } while (!(bmcr & BMCR_RESET));
disable_mdi(); disable_mdi();
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);

View file

@ -137,11 +137,12 @@ static int ax_initial_check(struct net_device *dev)
static void ax_reset_8390(struct net_device *dev) static void ax_reset_8390(struct net_device *dev)
{ {
struct ei_device *ei_local = netdev_priv(dev); struct ei_device *ei_local = netdev_priv(dev);
struct ax_device *ax = to_ax_dev(dev);
unsigned long reset_start_time = jiffies; unsigned long reset_start_time = jiffies;
void __iomem *addr = (void __iomem *)dev->base_addr; void __iomem *addr = (void __iomem *)dev->base_addr;
if (ei_debug > 1) if (ei_debug > 1)
printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
@ -151,7 +152,7 @@ static void ax_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */ /* This check _should_not_ be necessary, omit eventually. */
while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
if (jiffies - reset_start_time > 2*HZ/100) { if (jiffies - reset_start_time > 2*HZ/100) {
printk(KERN_WARNING "%s: %s did not complete.\n", dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
__FUNCTION__, dev->name); __FUNCTION__, dev->name);
break; break;
} }
@ -165,11 +166,13 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page) int ring_page)
{ {
struct ei_device *ei_local = netdev_priv(dev); struct ei_device *ei_local = netdev_priv(dev);
struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem; void __iomem *nic_base = ei_local->mem;
/* This *shouldn't* happen. If it does, it's the last thing you'll see */ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) { if (ei_status.dmaing) {
printk(KERN_EMERG "%s: DMAing conflict in %s [DMAstat:%d][irqlock:%d].\n", dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
"[DMAstat:%d][irqlock:%d].\n",
dev->name, __FUNCTION__, dev->name, __FUNCTION__,
ei_status.dmaing, ei_status.irqlock); ei_status.dmaing, ei_status.irqlock);
return; return;
@ -204,13 +207,16 @@ static void ax_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset) struct sk_buff *skb, int ring_offset)
{ {
struct ei_device *ei_local = netdev_priv(dev); struct ei_device *ei_local = netdev_priv(dev);
struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem; void __iomem *nic_base = ei_local->mem;
char *buf = skb->data; char *buf = skb->data;
if (ei_status.dmaing) { if (ei_status.dmaing) {
printk(KERN_EMERG "%s: DMAing conflict in ax_block_input " dev_err(&ax->dev->dev,
"%s: DMAing conflict in %s "
"[DMAstat:%d][irqlock:%d].\n", "[DMAstat:%d][irqlock:%d].\n",
dev->name, ei_status.dmaing, ei_status.irqlock); dev->name, __FUNCTION__,
ei_status.dmaing, ei_status.irqlock);
return; return;
} }
@ -239,6 +245,7 @@ static void ax_block_output(struct net_device *dev, int count,
const unsigned char *buf, const int start_page) const unsigned char *buf, const int start_page)
{ {
struct ei_device *ei_local = netdev_priv(dev); struct ei_device *ei_local = netdev_priv(dev);
struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem; void __iomem *nic_base = ei_local->mem;
unsigned long dma_start; unsigned long dma_start;
@ -251,7 +258,7 @@ static void ax_block_output(struct net_device *dev, int count,
/* This *shouldn't* happen. If it does, it's the last thing you'll see */ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) { if (ei_status.dmaing) {
printk(KERN_EMERG "%s: DMAing conflict in %s." dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
"[DMAstat:%d][irqlock:%d]\n", "[DMAstat:%d][irqlock:%d]\n",
dev->name, __FUNCTION__, dev->name, __FUNCTION__,
ei_status.dmaing, ei_status.irqlock); ei_status.dmaing, ei_status.irqlock);
@ -281,7 +288,8 @@ static void ax_block_output(struct net_device *dev, int count,
while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); dev_warn(&ax->dev->dev,
"%s: timeout waiting for Tx RDC.\n", dev->name);
ax_reset_8390(dev); ax_reset_8390(dev);
ax_NS8390_init(dev,1); ax_NS8390_init(dev,1);
break; break;
@ -424,9 +432,10 @@ static void
ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value) ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
{ {
struct ei_device *ei = (struct ei_device *) netdev_priv(dev); struct ei_device *ei = (struct ei_device *) netdev_priv(dev);
struct ax_device *ax = to_ax_dev(dev);
unsigned long flags; unsigned long flags;
printk(KERN_DEBUG "%s: %p, %04x, %04x %04x\n", dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
__FUNCTION__, dev, phy_addr, reg, value); __FUNCTION__, dev, phy_addr, reg, value);
spin_lock_irqsave(&ei->page_lock, flags); spin_lock_irqsave(&ei->page_lock, flags);
@ -750,14 +759,11 @@ static int ax_init_dev(struct net_device *dev, int first_init)
ax_NS8390_init(dev, 0); ax_NS8390_init(dev, 0);
if (first_init) { if (first_init) {
printk("AX88796: %dbit, irq %d, %lx, MAC: ", DECLARE_MAC_BUF(mac);
ei_status.word16 ? 16:8, dev->irq, dev->base_addr);
for (i = 0; i < ETHER_ADDR_LEN; i++) dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %s\n",
printk("%2.2x%c", dev->dev_addr[i], ei_status.word16 ? 16:8, dev->irq, dev->base_addr,
(i < (ETHER_ADDR_LEN-1) ? ':' : ' ')); print_mac(mac, dev->dev_addr));
printk("\n");
} }
ret = register_netdev(dev); ret = register_netdev(dev);

View file

@ -1,34 +1,11 @@
/* /*
* File: drivers/net/bfin_mac.c * Blackfin On-Chip MAC Driver
* Based on:
* Maintainer:
* Bryan Wu <bryan.wu@analog.com>
* *
* Original author: * Copyright 2004-2007 Analog Devices Inc.
* Luke Yang <luke.yang@analog.com>
* *
* Created: * Enter bugs at http://blackfin.uclinux.org/
* Description:
* *
* Modified: * Licensed under the GPL-2 or later.
* Copyright 2004-2006 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program ; see the file COPYING.
* If not, write to the Free Software Foundation,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/ */
#include <linux/init.h> #include <linux/init.h>
@ -65,7 +42,7 @@
#define DRV_NAME "bfin_mac" #define DRV_NAME "bfin_mac"
#define DRV_VERSION "1.1" #define DRV_VERSION "1.1"
#define DRV_AUTHOR "Bryan Wu, Luke Yang" #define DRV_AUTHOR "Bryan Wu, Luke Yang"
#define DRV_DESC "Blackfin BF53[67] on-chip Ethernet MAC driver" #define DRV_DESC "Blackfin BF53[67] BF527 on-chip Ethernet MAC driver"
MODULE_AUTHOR(DRV_AUTHOR); MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
@ -296,7 +273,7 @@ static void mdio_poll(void)
/* poll the STABUSY bit */ /* poll the STABUSY bit */
while ((bfin_read_EMAC_STAADD()) & STABUSY) { while ((bfin_read_EMAC_STAADD()) & STABUSY) {
mdelay(10); udelay(1);
if (timeout_cnt-- < 0) { if (timeout_cnt-- < 0) {
printk(KERN_ERR DRV_NAME printk(KERN_ERR DRV_NAME
": wait MDC/MDIO transaction to complete timeout\n"); ": wait MDC/MDIO transaction to complete timeout\n");
@ -412,20 +389,26 @@ static void bf537_adjust_link(struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags); spin_unlock_irqrestore(&lp->lock, flags);
} }
/* MDC = 2.5 MHz */
#define MDC_CLK 2500000
static int mii_probe(struct net_device *dev) static int mii_probe(struct net_device *dev)
{ {
struct bf537mac_local *lp = netdev_priv(dev); struct bf537mac_local *lp = netdev_priv(dev);
struct phy_device *phydev = NULL; struct phy_device *phydev = NULL;
unsigned short sysctl; unsigned short sysctl;
int i; int i;
u32 sclk, mdc_div;
/* Enable PHY output early */ /* Enable PHY output early */
if (!(bfin_read_VR_CTL() & PHYCLKOE)) if (!(bfin_read_VR_CTL() & PHYCLKOE))
bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
/* MDC = 2.5 MHz */ sclk = get_sclk();
mdc_div = ((sclk / MDC_CLK) / 2) - 1;
sysctl = bfin_read_EMAC_SYSCTL(); sysctl = bfin_read_EMAC_SYSCTL();
sysctl |= SET_MDCDIV(24); sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
bfin_write_EMAC_SYSCTL(sysctl); bfin_write_EMAC_SYSCTL(sysctl);
/* search for connect PHY device */ /* search for connect PHY device */
@ -477,8 +460,10 @@ static int mii_probe(struct net_device *dev)
lp->phydev = phydev; lp->phydev = phydev;
printk(KERN_INFO "%s: attached PHY driver [%s] " printk(KERN_INFO "%s: attached PHY driver [%s] "
"(mii_bus:phy_addr=%s, irq=%d)\n", "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq); "@sclk=%dMHz)\n",
DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq,
MDC_CLK, mdc_div, sclk/1000000);
return 0; return 0;
} }
@ -551,7 +536,7 @@ static void adjust_tx_list(void)
*/ */
if (current_tx_ptr->next->next == tx_list_head) { if (current_tx_ptr->next->next == tx_list_head) {
while (tx_list_head->status.status_word == 0) { while (tx_list_head->status.status_word == 0) {
mdelay(10); mdelay(1);
if (tx_list_head->status.status_word != 0 if (tx_list_head->status.status_word != 0
|| !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) { || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
goto adjust_head; goto adjust_head;
@ -666,6 +651,12 @@ static void bf537mac_rx(struct net_device *dev)
current_rx_ptr->skb = new_skb; current_rx_ptr->skb = new_skb;
current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2; current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
/* Invidate the data cache of skb->data range when it is write back
* cache. It will prevent overwritting the new data from DMA
*/
blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
(unsigned long)new_skb->end);
len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN); len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
skb_put(skb, len); skb_put(skb, len);
blackfin_dcache_invalidate_range((unsigned long)skb->head, blackfin_dcache_invalidate_range((unsigned long)skb->head,
@ -767,7 +758,7 @@ static void bf537mac_enable(void)
#if defined(CONFIG_BFIN_MAC_RMII) #if defined(CONFIG_BFIN_MAC_RMII)
opmode |= RMII; /* For Now only 100MBit are supported */ opmode |= RMII; /* For Now only 100MBit are supported */
#ifdef CONFIG_BF_REV_0_2 #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
opmode |= TE; opmode |= TE;
#endif #endif
#endif #endif
@ -792,6 +783,39 @@ static void bf537mac_timeout(struct net_device *dev)
netif_wake_queue(dev); netif_wake_queue(dev);
} }
static void bf537mac_multicast_hash(struct net_device *dev)
{
u32 emac_hashhi, emac_hashlo;
struct dev_mc_list *dmi = dev->mc_list;
char *addrs;
int i;
u32 crc;
emac_hashhi = emac_hashlo = 0;
for (i = 0; i < dev->mc_count; i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
/* skip non-multicast addresses */
if (!(*addrs & 1))
continue;
crc = ether_crc(ETH_ALEN, addrs);
crc >>= 26;
if (crc & 0x20)
emac_hashhi |= 1 << (crc & 0x1f);
else
emac_hashlo |= 1 << (crc & 0x1f);
}
bfin_write_EMAC_HASHHI(emac_hashhi);
bfin_write_EMAC_HASHLO(emac_hashlo);
return;
}
/* /*
* This routine will, depending on the values passed to it, * This routine will, depending on the values passed to it,
* either make it accept multicast packets, go into * either make it accept multicast packets, go into
@ -807,11 +831,17 @@ static void bf537mac_set_multicast_list(struct net_device *dev)
sysctl = bfin_read_EMAC_OPMODE(); sysctl = bfin_read_EMAC_OPMODE();
sysctl |= RAF; sysctl |= RAF;
bfin_write_EMAC_OPMODE(sysctl); bfin_write_EMAC_OPMODE(sysctl);
} else if (dev->flags & IFF_ALLMULTI || dev->mc_count) { } else if (dev->flags & IFF_ALLMULTI) {
/* accept all multicast */ /* accept all multicast */
sysctl = bfin_read_EMAC_OPMODE(); sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PAM; sysctl |= PAM;
bfin_write_EMAC_OPMODE(sysctl); bfin_write_EMAC_OPMODE(sysctl);
} else if (dev->mc_count) {
/* set up multicast hash table */
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= HM;
bfin_write_EMAC_OPMODE(sysctl);
bf537mac_multicast_hash(dev);
} else { } else {
/* clear promisc or multicast mode */ /* clear promisc or multicast mode */
sysctl = bfin_read_EMAC_OPMODE(); sysctl = bfin_read_EMAC_OPMODE();
@ -860,10 +890,10 @@ static int bf537mac_open(struct net_device *dev)
return retval; return retval;
phy_start(lp->phydev); phy_start(lp->phydev);
phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
setup_system_regs(dev); setup_system_regs(dev);
bf537mac_disable(); bf537mac_disable();
bf537mac_enable(); bf537mac_enable();
pr_debug("hardware init finished\n"); pr_debug("hardware init finished\n");
netif_start_queue(dev); netif_start_queue(dev);
netif_carrier_on(dev); netif_carrier_on(dev);
@ -886,6 +916,7 @@ static int bf537mac_close(struct net_device *dev)
netif_carrier_off(dev); netif_carrier_off(dev);
phy_stop(lp->phydev); phy_stop(lp->phydev);
phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
/* clear everything */ /* clear everything */
bf537mac_shutdown(dev); bf537mac_shutdown(dev);
@ -970,7 +1001,7 @@ static int __init bf537mac_probe(struct net_device *dev)
/* register irq handler */ /* register irq handler */
if (request_irq if (request_irq
(IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED, (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED,
"BFIN537_MAC_RX", dev)) { "EMAC_RX", dev)) {
printk(KERN_WARNING DRV_NAME printk(KERN_WARNING DRV_NAME
": Unable to attach BlackFin MAC RX interrupt\n"); ": Unable to attach BlackFin MAC RX interrupt\n");
return -EBUSY; return -EBUSY;

View file

@ -1,34 +1,11 @@
/* /*
* File: drivers/net/bfin_mac.c * Blackfin On-Chip MAC Driver
* Based on:
* Maintainer:
* Bryan Wu <bryan.wu@analog.com>
* *
* Original author: * Copyright 2004-2007 Analog Devices Inc.
* Luke Yang <luke.yang@analog.com>
* *
* Created: * Enter bugs at http://blackfin.uclinux.org/
* Description:
* *
* Modified: * Licensed under the GPL-2 or later.
* Copyright 2004-2006 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program ; see the file COPYING.
* If not, write to the Free Software Foundation,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/ */
#define BFIN_MAC_CSUM_OFFLOAD #define BFIN_MAC_CSUM_OFFLOAD

View file

@ -1464,10 +1464,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
dev_set_allmulti(slave_dev, 1); dev_set_allmulti(slave_dev, 1);
} }
netif_tx_lock_bh(bond_dev);
/* upload master's mc_list to new slave */ /* upload master's mc_list to new slave */
for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) { for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
} }
netif_tx_unlock_bh(bond_dev);
} }
if (bond->params.mode == BOND_MODE_8023AD) { if (bond->params.mode == BOND_MODE_8023AD) {
@ -1821,7 +1823,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
} }
/* flush master's mc_list from slave */ /* flush master's mc_list from slave */
netif_tx_lock_bh(bond_dev);
bond_mc_list_flush(bond_dev, slave_dev); bond_mc_list_flush(bond_dev, slave_dev);
netif_tx_unlock_bh(bond_dev);
} }
netdev_set_master(slave_dev, NULL); netdev_set_master(slave_dev, NULL);
@ -1942,7 +1946,9 @@ static int bond_release_all(struct net_device *bond_dev)
} }
/* flush master's mc_list from slave */ /* flush master's mc_list from slave */
netif_tx_lock_bh(bond_dev);
bond_mc_list_flush(bond_dev, slave_dev); bond_mc_list_flush(bond_dev, slave_dev);
netif_tx_unlock_bh(bond_dev);
} }
netdev_set_master(slave_dev, NULL); netdev_set_master(slave_dev, NULL);
@ -2795,14 +2801,11 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
} }
if (do_failover) { if (do_failover) {
rtnl_lock();
write_lock_bh(&bond->curr_slave_lock); write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond); bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock); write_unlock_bh(&bond->curr_slave_lock);
rtnl_unlock();
} }
re_arm: re_arm:
@ -2859,8 +2862,6 @@ void bond_activebackup_arp_mon(struct work_struct *work)
slave->link = BOND_LINK_UP; slave->link = BOND_LINK_UP;
rtnl_lock();
write_lock_bh(&bond->curr_slave_lock); write_lock_bh(&bond->curr_slave_lock);
if ((!bond->curr_active_slave) && if ((!bond->curr_active_slave) &&
@ -2896,7 +2897,6 @@ void bond_activebackup_arp_mon(struct work_struct *work)
} }
write_unlock_bh(&bond->curr_slave_lock); write_unlock_bh(&bond->curr_slave_lock);
rtnl_unlock();
} }
} else { } else {
read_lock(&bond->curr_slave_lock); read_lock(&bond->curr_slave_lock);
@ -2966,7 +2966,6 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bond->dev->name, bond->dev->name,
slave->dev->name); slave->dev->name);
rtnl_lock();
write_lock_bh(&bond->curr_slave_lock); write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond); bond_select_active_slave(bond);
@ -2974,8 +2973,6 @@ void bond_activebackup_arp_mon(struct work_struct *work)
write_unlock_bh(&bond->curr_slave_lock); write_unlock_bh(&bond->curr_slave_lock);
rtnl_unlock();
bond->current_arp_slave = slave; bond->current_arp_slave = slave;
if (slave) { if (slave) {
@ -2993,13 +2990,10 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bond->primary_slave->dev->name); bond->primary_slave->dev->name);
/* primary is up so switch to it */ /* primary is up so switch to it */
rtnl_lock();
write_lock_bh(&bond->curr_slave_lock); write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, bond->primary_slave); bond_change_active_slave(bond, bond->primary_slave);
write_unlock_bh(&bond->curr_slave_lock); write_unlock_bh(&bond->curr_slave_lock);
rtnl_unlock();
slave = bond->primary_slave; slave = bond->primary_slave;
slave->jiffies = jiffies; slave->jiffies = jiffies;
} else { } else {
@ -3769,42 +3763,45 @@ static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
{ {
struct bonding *bond = bond_dev->priv; struct bonding *bond = bond_dev->priv;
struct net_device_stats *stats = &(bond->stats), *sstats; struct net_device_stats *stats = &(bond->stats), *sstats;
struct net_device_stats local_stats;
struct slave *slave; struct slave *slave;
int i; int i;
memset(stats, 0, sizeof(struct net_device_stats)); memset(&local_stats, 0, sizeof(struct net_device_stats));
read_lock_bh(&bond->lock); read_lock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave, i) {
sstats = slave->dev->get_stats(slave->dev); sstats = slave->dev->get_stats(slave->dev);
stats->rx_packets += sstats->rx_packets; local_stats.rx_packets += sstats->rx_packets;
stats->rx_bytes += sstats->rx_bytes; local_stats.rx_bytes += sstats->rx_bytes;
stats->rx_errors += sstats->rx_errors; local_stats.rx_errors += sstats->rx_errors;
stats->rx_dropped += sstats->rx_dropped; local_stats.rx_dropped += sstats->rx_dropped;
stats->tx_packets += sstats->tx_packets; local_stats.tx_packets += sstats->tx_packets;
stats->tx_bytes += sstats->tx_bytes; local_stats.tx_bytes += sstats->tx_bytes;
stats->tx_errors += sstats->tx_errors; local_stats.tx_errors += sstats->tx_errors;
stats->tx_dropped += sstats->tx_dropped; local_stats.tx_dropped += sstats->tx_dropped;
stats->multicast += sstats->multicast; local_stats.multicast += sstats->multicast;
stats->collisions += sstats->collisions; local_stats.collisions += sstats->collisions;
stats->rx_length_errors += sstats->rx_length_errors; local_stats.rx_length_errors += sstats->rx_length_errors;
stats->rx_over_errors += sstats->rx_over_errors; local_stats.rx_over_errors += sstats->rx_over_errors;
stats->rx_crc_errors += sstats->rx_crc_errors; local_stats.rx_crc_errors += sstats->rx_crc_errors;
stats->rx_frame_errors += sstats->rx_frame_errors; local_stats.rx_frame_errors += sstats->rx_frame_errors;
stats->rx_fifo_errors += sstats->rx_fifo_errors; local_stats.rx_fifo_errors += sstats->rx_fifo_errors;
stats->rx_missed_errors += sstats->rx_missed_errors; local_stats.rx_missed_errors += sstats->rx_missed_errors;
stats->tx_aborted_errors += sstats->tx_aborted_errors; local_stats.tx_aborted_errors += sstats->tx_aborted_errors;
stats->tx_carrier_errors += sstats->tx_carrier_errors; local_stats.tx_carrier_errors += sstats->tx_carrier_errors;
stats->tx_fifo_errors += sstats->tx_fifo_errors; local_stats.tx_fifo_errors += sstats->tx_fifo_errors;
stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors; local_stats.tx_heartbeat_errors += sstats->tx_heartbeat_errors;
stats->tx_window_errors += sstats->tx_window_errors; local_stats.tx_window_errors += sstats->tx_window_errors;
} }
memcpy(stats, &local_stats, sizeof(struct net_device_stats));
read_unlock_bh(&bond->lock); read_unlock_bh(&bond->lock);
return stats; return stats;
@ -3937,8 +3934,6 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
struct bonding *bond = bond_dev->priv; struct bonding *bond = bond_dev->priv;
struct dev_mc_list *dmi; struct dev_mc_list *dmi;
write_lock_bh(&bond->lock);
/* /*
* Do promisc before checking multicast_mode * Do promisc before checking multicast_mode
*/ */
@ -3959,6 +3954,8 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
bond_set_allmulti(bond, -1); bond_set_allmulti(bond, -1);
} }
read_lock(&bond->lock);
bond->flags = bond_dev->flags; bond->flags = bond_dev->flags;
/* looking for addresses to add to slaves' mc list */ /* looking for addresses to add to slaves' mc list */
@ -3979,7 +3976,7 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
bond_mc_list_destroy(bond); bond_mc_list_destroy(bond);
bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC); bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC);
write_unlock_bh(&bond->lock); read_unlock(&bond->lock);
} }
/* /*
@ -4526,7 +4523,9 @@ static void bond_free_all(void)
struct net_device *bond_dev = bond->dev; struct net_device *bond_dev = bond->dev;
bond_work_cancel_all(bond); bond_work_cancel_all(bond);
netif_tx_lock_bh(bond_dev);
bond_mc_list_destroy(bond); bond_mc_list_destroy(bond);
netif_tx_unlock_bh(bond_dev);
/* Release the bonded slaves */ /* Release the bonded slaves */
bond_release_all(bond_dev); bond_release_all(bond_dev);
bond_deinit(bond_dev); bond_deinit(bond_dev);
@ -4549,14 +4548,19 @@ static void bond_free_all(void)
int bond_parse_parm(const char *buf, struct bond_parm_tbl *tbl) int bond_parse_parm(const char *buf, struct bond_parm_tbl *tbl)
{ {
int mode = -1, i, rv; int mode = -1, i, rv;
char modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, }; char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
rv = sscanf(buf, "%d", &mode); for (p = (char *)buf; *p; p++)
if (!rv) { if (!(isdigit(*p) || isspace(*p)))
break;
if (*p)
rv = sscanf(buf, "%20s", modestr); rv = sscanf(buf, "%20s", modestr);
else
rv = sscanf(buf, "%d", &mode);
if (!rv) if (!rv)
return -1; return -1;
}
for (i = 0; tbl[i].modename; i++) { for (i = 0; tbl[i].modename; i++) {
if (mode == tbl[i].mode) if (mode == tbl[i].mode)
@ -4883,6 +4887,7 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
down_write(&bonding_rwsem); down_write(&bonding_rwsem);
/* Check to see if the bond already exists. */ /* Check to see if the bond already exists. */
if (name) {
list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) { if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) {
printk(KERN_ERR DRV_NAME printk(KERN_ERR DRV_NAME
@ -4891,6 +4896,7 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
res = -EPERM; res = -EPERM;
goto out_rtnl; goto out_rtnl;
} }
}
bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "", bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
ether_setup); ether_setup);

View file

@ -22,8 +22,8 @@
#include "bond_3ad.h" #include "bond_3ad.h"
#include "bond_alb.h" #include "bond_alb.h"
#define DRV_VERSION "3.2.3" #define DRV_VERSION "3.2.4"
#define DRV_RELDATE "December 6, 2007" #define DRV_RELDATE "January 28, 2008"
#define DRV_NAME "bonding" #define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"

View file

@ -452,7 +452,7 @@ void t3_mc5_intr_handler(struct mc5 *mc5)
t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause); t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
} }
void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode) void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
{ {
#define K * 1024 #define K * 1024

View file

@ -2836,7 +2836,7 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p)
* defaults for the assorted SGE parameters, which admins can change until * defaults for the assorted SGE parameters, which admins can change until
* they are used to initialize the SGE. * they are used to initialize the SGE.
*/ */
void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p) void t3_sge_prep(struct adapter *adap, struct sge_params *p)
{ {
int i; int i;

View file

@ -2675,7 +2675,7 @@ void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size)); V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
} }
static void __devinit init_mtus(unsigned short mtus[]) static void init_mtus(unsigned short mtus[])
{ {
/* /*
* See draft-mathis-plpmtud-00.txt for the values. The min is 88 so * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
@ -2703,7 +2703,7 @@ static void __devinit init_mtus(unsigned short mtus[])
/* /*
* Initial congestion control parameters. * Initial congestion control parameters.
*/ */
static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) static void init_cong_ctrl(unsigned short *a, unsigned short *b)
{ {
a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
a[9] = 2; a[9] = 2;
@ -3354,8 +3354,7 @@ out_err:
* Determines a card's PCI mode and associated parameters, such as speed * Determines a card's PCI mode and associated parameters, such as speed
* and width. * and width.
*/ */
static void __devinit get_pci_mode(struct adapter *adapter, static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
struct pci_params *p)
{ {
static unsigned short speed_map[] = { 33, 66, 100, 133 }; static unsigned short speed_map[] = { 33, 66, 100, 133 };
u32 pci_mode, pcie_cap; u32 pci_mode, pcie_cap;
@ -3395,8 +3394,7 @@ static void __devinit get_pci_mode(struct adapter *adapter,
* capabilities and default speed/duplex/flow-control/autonegotiation * capabilities and default speed/duplex/flow-control/autonegotiation
* settings. * settings.
*/ */
static void __devinit init_link_config(struct link_config *lc, static void init_link_config(struct link_config *lc, unsigned int caps)
unsigned int caps)
{ {
lc->supported = caps; lc->supported = caps;
lc->requested_speed = lc->speed = SPEED_INVALID; lc->requested_speed = lc->speed = SPEED_INVALID;
@ -3419,7 +3417,7 @@ static void __devinit init_link_config(struct link_config *lc,
* Calculates the size of an MC7 memory in bytes from the value of its * Calculates the size of an MC7 memory in bytes from the value of its
* configuration register. * configuration register.
*/ */
static unsigned int __devinit mc7_calc_size(u32 cfg) static unsigned int mc7_calc_size(u32 cfg)
{ {
unsigned int width = G_WIDTH(cfg); unsigned int width = G_WIDTH(cfg);
unsigned int banks = !!(cfg & F_BKS) + 1; unsigned int banks = !!(cfg & F_BKS) + 1;
@ -3430,7 +3428,7 @@ static unsigned int __devinit mc7_calc_size(u32 cfg)
return MBs << 20; return MBs << 20;
} }
static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7, static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
unsigned int base_addr, const char *name) unsigned int base_addr, const char *name)
{ {
u32 cfg; u32 cfg;
@ -3517,7 +3515,7 @@ static int t3_reset_adapter(struct adapter *adapter)
return 0; return 0;
} }
static int __devinit init_parity(struct adapter *adap) static int init_parity(struct adapter *adap)
{ {
int i, err, addr; int i, err, addr;
@ -3552,8 +3550,8 @@ static int __devinit init_parity(struct adapter *adap)
* for some adapter tunables, take PHYs out of reset, and initialize the MDIO * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
* interface. * interface.
*/ */
int __devinit t3_prep_adapter(struct adapter *adapter, int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
const struct adapter_info *ai, int reset) int reset)
{ {
int ret; int ret;
unsigned int i, j = 0; unsigned int i, j = 0;

View file

@ -94,7 +94,7 @@
* enabled. 82557 pads with 7Eh, while the later controllers pad * enabled. 82557 pads with 7Eh, while the later controllers pad
* with 00h. * with 00h.
* *
* IV. Recieve * IV. Receive
* *
* The Receive Frame Area (RFA) comprises a ring of Receive Frame * The Receive Frame Area (RFA) comprises a ring of Receive Frame
* Descriptors (RFD) + data buffer, thus forming the simplified mode * Descriptors (RFD) + data buffer, thus forming the simplified mode
@ -120,7 +120,7 @@
* and Rx indication and re-allocation happen in the same context, * and Rx indication and re-allocation happen in the same context,
* therefore no locking is required. A software-generated interrupt * therefore no locking is required. A software-generated interrupt
* is generated from the watchdog to recover from a failed allocation * is generated from the watchdog to recover from a failed allocation
* senario where all Rx resources have been indicated and none re- * scenario where all Rx resources have been indicated and none re-
* placed. * placed.
* *
* V. Miscellaneous * V. Miscellaneous
@ -1497,7 +1497,7 @@ static void e100_update_stats(struct nic *nic)
&s->complete; &s->complete;
/* Device's stats reporting may take several microseconds to /* Device's stats reporting may take several microseconds to
* complete, so where always waiting for results of the * complete, so we're always waiting for results of the
* previous command. */ * previous command. */
if(*complete == cpu_to_le32(cuc_dump_reset_complete)) { if(*complete == cpu_to_le32(cuc_dump_reset_complete)) {
@ -1958,7 +1958,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
if(restart_required) { if(restart_required) {
// ack the rnr? // ack the rnr?
writeb(stat_ack_rnr, &nic->csr->scb.stat_ack); iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
e100_start_receiver(nic, nic->rx_to_clean); e100_start_receiver(nic, nic->rx_to_clean);
if(work_done) if(work_done)
(*work_done)++; (*work_done)++;
@ -2774,7 +2774,7 @@ static void __devexit e100_remove(struct pci_dev *pdev)
struct nic *nic = netdev_priv(netdev); struct nic *nic = netdev_priv(netdev);
unregister_netdev(netdev); unregister_netdev(netdev);
e100_free(nic); e100_free(nic);
iounmap(nic->csr); pci_iounmap(pdev, nic->csr);
free_netdev(netdev); free_netdev(netdev);
pci_release_regions(pdev); pci_release_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
@ -2858,17 +2858,17 @@ static void e100_shutdown(struct pci_dev *pdev)
/** /**
* e100_io_error_detected - called when PCI error is detected. * e100_io_error_detected - called when PCI error is detected.
* @pdev: Pointer to PCI device * @pdev: Pointer to PCI device
* @state: The current pci conneection state * @state: The current pci connection state
*/ */
static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev); struct nic *nic = netdev_priv(netdev);
/* Similar to calling e100_down(), but avoids adpater I/O. */ /* Similar to calling e100_down(), but avoids adapter I/O. */
netdev->stop(netdev); netdev->stop(netdev);
/* Detach; put netif into state similar to hotplug unplug. */ /* Detach; put netif into a state similar to hotplug unplug. */
napi_enable(&nic->napi); napi_enable(&nic->napi);
netif_device_detach(netdev); netif_device_detach(netdev);
pci_disable_device(pdev); pci_disable_device(pdev);

View file

@ -853,7 +853,7 @@ e1000_reset(struct e1000_adapter *adapter)
/** /**
* Dump the eeprom for users having checksum issues * Dump the eeprom for users having checksum issues
**/ **/
void e1000_dump_eeprom(struct e1000_adapter *adapter) static void e1000_dump_eeprom(struct e1000_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct ethtool_eeprom eeprom; struct ethtool_eeprom eeprom;

View file

@ -63,6 +63,7 @@
#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ #define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ #define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ #define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
/* Extended Device Control */ /* Extended Device Control */
#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ #define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */

View file

@ -690,7 +690,7 @@ err_setup:
return err; return err;
} }
bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data, static bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data,
int reg, int offset, u32 mask, u32 write) int reg, int offset, u32 mask, u32 write)
{ {
int i; int i;
@ -1632,7 +1632,8 @@ static void e1000_get_wol(struct net_device *netdev,
return; return;
wol->supported = WAKE_UCAST | WAKE_MCAST | wol->supported = WAKE_UCAST | WAKE_MCAST |
WAKE_BCAST | WAKE_MAGIC; WAKE_BCAST | WAKE_MAGIC |
WAKE_PHY | WAKE_ARP;
/* apply any specific unsupported masks here */ /* apply any specific unsupported masks here */
if (adapter->flags & FLAG_NO_WAKE_UCAST) { if (adapter->flags & FLAG_NO_WAKE_UCAST) {
@ -1651,6 +1652,10 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_BCAST; wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG) if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC; wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & E1000_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
if (adapter->wol & E1000_WUFC_ARP)
wol->wolopts |= WAKE_ARP;
} }
static int e1000_set_wol(struct net_device *netdev, static int e1000_set_wol(struct net_device *netdev,
@ -1658,7 +1663,7 @@ static int e1000_set_wol(struct net_device *netdev,
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) if (wol->wolopts & WAKE_MAGICSECURE)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!(adapter->flags & FLAG_HAS_WOL)) if (!(adapter->flags & FLAG_HAS_WOL))
@ -1675,6 +1680,10 @@ static int e1000_set_wol(struct net_device *netdev,
adapter->wol |= E1000_WUFC_BC; adapter->wol |= E1000_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC) if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG; adapter->wol |= E1000_WUFC_MAG;
if (wol->wolopts & WAKE_PHY)
adapter->wol |= E1000_WUFC_LNKC;
if (wol->wolopts & WAKE_ARP)
adapter->wol |= E1000_WUFC_ARP;
return 0; return 0;
} }

View file

@ -945,11 +945,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
int irq_flags = IRQF_SHARED; int irq_flags = IRQF_SHARED;
int err; int err;
err = pci_enable_msi(adapter->pdev); if (!pci_enable_msi(adapter->pdev)) {
if (err) {
ndev_warn(netdev,
"Unable to allocate MSI interrupt Error: %d\n", err);
} else {
adapter->flags |= FLAG_MSI_ENABLED; adapter->flags |= FLAG_MSI_ENABLED;
handler = e1000_intr_msi; handler = e1000_intr_msi;
irq_flags = 0; irq_flags = 0;
@ -958,10 +954,12 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
netdev); netdev);
if (err) { if (err) {
ndev_err(netdev,
"Unable to allocate %s interrupt (return: %d)\n",
adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx",
err);
if (adapter->flags & FLAG_MSI_ENABLED) if (adapter->flags & FLAG_MSI_ENABLED)
pci_disable_msi(adapter->pdev); pci_disable_msi(adapter->pdev);
ndev_err(netdev,
"Unable to allocate interrupt Error: %d\n", err);
} }
return err; return err;

View file

@ -458,4 +458,7 @@ void ehea_set_ethtool_ops(struct net_device *netdev);
int ehea_sense_port_attr(struct ehea_port *port); int ehea_sense_port_attr(struct ehea_port *port);
int ehea_set_portspeed(struct ehea_port *port, u32 port_speed); int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
extern u64 ehea_driver_flags;
extern struct work_struct ehea_rereg_mr_task;
#endif /* __EHEA_H__ */ #endif /* __EHEA_H__ */

View file

@ -54,11 +54,11 @@ static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
static int sq_entries = EHEA_DEF_ENTRIES_SQ; static int sq_entries = EHEA_DEF_ENTRIES_SQ;
static int use_mcs = 0; static int use_mcs;
static int use_lro = 0; static int use_lro;
static int lro_max_aggr = EHEA_LRO_MAX_AGGR; static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
static int num_tx_qps = EHEA_NUM_TX_QP; static int num_tx_qps = EHEA_NUM_TX_QP;
static int prop_carrier_state = 0; static int prop_carrier_state;
module_param(msg_level, int, 0); module_param(msg_level, int, 0);
module_param(rq1_entries, int, 0); module_param(rq1_entries, int, 0);
@ -94,9 +94,9 @@ MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, " MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
"Default = 0"); "Default = 0");
static int port_name_cnt = 0; static int port_name_cnt;
static LIST_HEAD(adapter_list); static LIST_HEAD(adapter_list);
u64 ehea_driver_flags = 0; u64 ehea_driver_flags;
struct work_struct ehea_rereg_mr_task; struct work_struct ehea_rereg_mr_task;
struct semaphore dlpar_mem_lock; struct semaphore dlpar_mem_lock;
@ -121,7 +121,8 @@ static struct of_platform_driver ehea_driver = {
.remove = ehea_remove, .remove = ehea_remove,
}; };
void ehea_dump(void *adr, int len, char *msg) { void ehea_dump(void *adr, int len, char *msg)
{
int x; int x;
unsigned char *deb = adr; unsigned char *deb = adr;
for (x = 0; x < len; x += 16) { for (x = 0; x < len; x += 16) {
@ -518,7 +519,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
last_wqe_index = wqe_index; last_wqe_index = wqe_index;
rmb(); rmb();
if (!ehea_check_cqe(cqe, &rq)) { if (!ehea_check_cqe(cqe, &rq)) {
if (rq == 1) { /* LL RQ1 */ if (rq == 1) {
/* LL RQ1 */
skb = get_skb_by_index_ll(skb_arr_rq1, skb = get_skb_by_index_ll(skb_arr_rq1,
skb_arr_rq1_len, skb_arr_rq1_len,
wqe_index); wqe_index);
@ -534,7 +536,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb_copy_to_linear_data(skb, ((char *)cqe) + 64, skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4); cqe->num_bytes_transfered - 4);
ehea_fill_skb(dev, skb, cqe); ehea_fill_skb(dev, skb, cqe);
} else if (rq == 2) { /* RQ2 */ } else if (rq == 2) {
/* RQ2 */
skb = get_skb_by_index(skb_arr_rq2, skb = get_skb_by_index(skb_arr_rq2,
skb_arr_rq2_len, cqe); skb_arr_rq2_len, cqe);
if (unlikely(!skb)) { if (unlikely(!skb)) {
@ -544,7 +547,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
} }
ehea_fill_skb(dev, skb, cqe); ehea_fill_skb(dev, skb, cqe);
processed_rq2++; processed_rq2++;
} else { /* RQ3 */ } else {
/* RQ3 */
skb = get_skb_by_index(skb_arr_rq3, skb = get_skb_by_index(skb_arr_rq3,
skb_arr_rq3_len, cqe); skb_arr_rq3_len, cqe);
if (unlikely(!skb)) { if (unlikely(!skb)) {
@ -643,7 +647,8 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
static int ehea_poll(struct napi_struct *napi, int budget) static int ehea_poll(struct napi_struct *napi, int budget)
{ {
struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, napi); struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
napi);
struct net_device *dev = pr->port->netdev; struct net_device *dev = pr->port->netdev;
struct ehea_cqe *cqe; struct ehea_cqe *cqe;
struct ehea_cqe *cqe_skb = NULL; struct ehea_cqe *cqe_skb = NULL;
@ -743,8 +748,9 @@ int ehea_sense_port_attr(struct ehea_port *port)
u64 hret; u64 hret;
struct hcp_ehea_port_cb0 *cb0; struct hcp_ehea_port_cb0 *cb0;
cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */ /* may be called via ehea_neq_tasklet() */
if (!cb0) { /* ehea_neq_tasklet() */ cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
if (!cb0) {
ehea_error("no mem for cb0"); ehea_error("no mem for cb0");
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
@ -1791,11 +1797,10 @@ static void ehea_set_multicast_list(struct net_device *dev)
goto out; goto out;
} }
for (i = 0, k_mcl_entry = dev->mc_list; for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
i < dev->mc_count; k_mcl_entry = k_mcl_entry->next)
i++, k_mcl_entry = k_mcl_entry->next) {
ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
}
} }
out: out:
return; return;
@ -1925,12 +1930,12 @@ static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
if ((skb->protocol == htons(ETH_P_IP)) && if ((skb->protocol == htons(ETH_P_IP)) &&
(ip_hdr(skb)->protocol == IPPROTO_TCP)) { (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
tcp = (struct tcphdr*)(skb_network_header(skb) + (ip_hdr(skb)->ihl * 4)); tcp = (struct tcphdr *)(skb_network_header(skb) +
(ip_hdr(skb)->ihl * 4));
tmp = (tcp->source + (tcp->dest << 16)) % 31; tmp = (tcp->source + (tcp->dest << 16)) % 31;
tmp += ip_hdr(skb)->daddr % 31; tmp += ip_hdr(skb)->daddr % 31;
return tmp % num_qps; return tmp % num_qps;
} } else
else
return 0; return 0;
} }
@ -2804,34 +2809,6 @@ static void __devinit logical_port_release(struct device *dev)
of_node_put(port->ofdev.node); of_node_put(port->ofdev.node);
} }
static int ehea_driver_sysfs_add(struct device *dev,
struct device_driver *driver)
{
int ret;
ret = sysfs_create_link(&driver->kobj, &dev->kobj,
kobject_name(&dev->kobj));
if (ret == 0) {
ret = sysfs_create_link(&dev->kobj, &driver->kobj,
"driver");
if (ret)
sysfs_remove_link(&driver->kobj,
kobject_name(&dev->kobj));
}
return ret;
}
static void ehea_driver_sysfs_remove(struct device *dev,
struct device_driver *driver)
{
struct device_driver *drv = driver;
if (drv) {
sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj));
sysfs_remove_link(&dev->kobj, "driver");
}
}
static struct device *ehea_register_port(struct ehea_port *port, static struct device *ehea_register_port(struct ehea_port *port,
struct device_node *dn) struct device_node *dn)
{ {
@ -2856,16 +2833,8 @@ static struct device *ehea_register_port(struct ehea_port *port,
goto out_unreg_of_dev; goto out_unreg_of_dev;
} }
ret = ehea_driver_sysfs_add(&port->ofdev.dev, &ehea_driver.driver);
if (ret) {
ehea_error("failed to register sysfs driver link");
goto out_rem_dev_file;
}
return &port->ofdev.dev; return &port->ofdev.dev;
out_rem_dev_file:
device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
out_unreg_of_dev: out_unreg_of_dev:
of_device_unregister(&port->ofdev); of_device_unregister(&port->ofdev);
out: out:
@ -2874,7 +2843,6 @@ out:
static void ehea_unregister_port(struct ehea_port *port) static void ehea_unregister_port(struct ehea_port *port)
{ {
ehea_driver_sysfs_remove(&port->ofdev.dev, &ehea_driver.driver);
device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
of_device_unregister(&port->ofdev); of_device_unregister(&port->ofdev);
} }

View file

@ -33,8 +33,6 @@
struct ehea_busmap ehea_bmap = { 0, 0, NULL }; struct ehea_busmap ehea_bmap = { 0, 0, NULL };
extern u64 ehea_driver_flags;
extern struct work_struct ehea_rereg_mr_task;
static void *hw_qpageit_get_inc(struct hw_queue *queue) static void *hw_qpageit_get_inc(struct hw_queue *queue)
@ -235,8 +233,8 @@ int ehea_destroy_cq(struct ehea_cq *cq)
return 0; return 0;
hcp_epas_dtor(&cq->epas); hcp_epas_dtor(&cq->epas);
hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) { if (hret == H_R_STATE) {
ehea_error_data(cq->adapter, cq->fw_handle); ehea_error_data(cq->adapter, cq->fw_handle);
hret = ehea_destroy_cq_res(cq, FORCE_FREE); hret = ehea_destroy_cq_res(cq, FORCE_FREE);
} }
@ -301,13 +299,13 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
if (i == (eq->attr.nr_pages - 1)) { if (i == (eq->attr.nr_pages - 1)) {
/* last page */ /* last page */
vpage = hw_qpageit_get_inc(&eq->hw_queue); vpage = hw_qpageit_get_inc(&eq->hw_queue);
if ((hret != H_SUCCESS) || (vpage)) { if ((hret != H_SUCCESS) || (vpage))
goto out_kill_hwq; goto out_kill_hwq;
}
} else { } else {
if ((hret != H_PAGE_REGISTERED) || (!vpage)) { if ((hret != H_PAGE_REGISTERED) || (!vpage))
goto out_kill_hwq; goto out_kill_hwq;
}
} }
} }
@ -364,7 +362,8 @@ int ehea_destroy_eq(struct ehea_eq *eq)
hcp_epas_dtor(&eq->epas); hcp_epas_dtor(&eq->epas);
if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) { hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
if (hret == H_R_STATE) {
ehea_error_data(eq->adapter, eq->fw_handle); ehea_error_data(eq->adapter, eq->fw_handle);
hret = ehea_destroy_eq_res(eq, FORCE_FREE); hret = ehea_destroy_eq_res(eq, FORCE_FREE);
} }
@ -546,7 +545,8 @@ int ehea_destroy_qp(struct ehea_qp *qp)
hcp_epas_dtor(&qp->epas); hcp_epas_dtor(&qp->epas);
if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
if (hret == H_R_STATE) {
ehea_error_data(qp->adapter, qp->fw_handle); ehea_error_data(qp->adapter, qp->fw_handle);
hret = ehea_destroy_qp_res(qp, FORCE_FREE); hret = ehea_destroy_qp_res(qp, FORCE_FREE);
} }

View file

@ -41,8 +41,8 @@
#define EHEA_SECTSIZE (1UL << 24) #define EHEA_SECTSIZE (1UL << 24)
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE #if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
#error eHEA module can't work if kernel sectionsize < ehea sectionsize #error eHEA module cannot work if kernel sectionsize < ehea sectionsize
#endif #endif
/* Some abbreviations used here: /* Some abbreviations used here:

View file

@ -13,7 +13,7 @@
* Copyright (C) 2004 Andrew de Quincey (wol support) * Copyright (C) 2004 Andrew de Quincey (wol support)
* Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
* IRQ rate fixes, bigendian fixes, cleanups, verification) * IRQ rate fixes, bigendian fixes, cleanups, verification)
* Copyright (c) 2004,5,6 NVIDIA Corporation * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
@ -226,7 +226,7 @@ enum {
#define NVREG_MISC1_HD 0x02 #define NVREG_MISC1_HD 0x02
#define NVREG_MISC1_FORCE 0x3b0f3c #define NVREG_MISC1_FORCE 0x3b0f3c
NvRegMacReset = 0x3c, NvRegMacReset = 0x34,
#define NVREG_MAC_RESET_ASSERT 0x0F3 #define NVREG_MAC_RESET_ASSERT 0x0F3
NvRegTransmitterControl = 0x084, NvRegTransmitterControl = 0x084,
#define NVREG_XMITCTL_START 0x01 #define NVREG_XMITCTL_START 0x01
@ -277,7 +277,9 @@ enum {
#define NVREG_MCASTADDRA_FORCE 0x01 #define NVREG_MCASTADDRA_FORCE 0x01
NvRegMulticastAddrB = 0xB4, NvRegMulticastAddrB = 0xB4,
NvRegMulticastMaskA = 0xB8, NvRegMulticastMaskA = 0xB8,
#define NVREG_MCASTMASKA_NONE 0xffffffff
NvRegMulticastMaskB = 0xBC, NvRegMulticastMaskB = 0xBC,
#define NVREG_MCASTMASKB_NONE 0xffff
NvRegPhyInterface = 0xC0, NvRegPhyInterface = 0xC0,
#define PHY_RGMII 0x10000000 #define PHY_RGMII 0x10000000
@ -316,8 +318,8 @@ enum {
NvRegTxRingPhysAddrHigh = 0x148, NvRegTxRingPhysAddrHigh = 0x148,
NvRegRxRingPhysAddrHigh = 0x14C, NvRegRxRingPhysAddrHigh = 0x14C,
NvRegTxPauseFrame = 0x170, NvRegTxPauseFrame = 0x170,
#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080 #define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080
#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030 #define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010
NvRegMIIStatus = 0x180, NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001 #define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008 #define NVREG_MIISTAT_LINKCHANGE 0x0008
@ -471,9 +473,9 @@ union ring_type {
#define NV_RX_AVAIL (1<<31) #define NV_RX_AVAIL (1<<31)
#define NV_RX2_CHECKSUMMASK (0x1C000000) #define NV_RX2_CHECKSUMMASK (0x1C000000)
#define NV_RX2_CHECKSUMOK1 (0x10000000) #define NV_RX2_CHECKSUM_IP (0x10000000)
#define NV_RX2_CHECKSUMOK2 (0x14000000) #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
#define NV_RX2_CHECKSUMOK3 (0x18000000) #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
#define NV_RX2_DESCRIPTORVALID (1<<29) #define NV_RX2_DESCRIPTORVALID (1<<29)
#define NV_RX2_SUBSTRACT1 (1<<25) #define NV_RX2_SUBSTRACT1 (1<<25)
#define NV_RX2_ERROR1 (1<<18) #define NV_RX2_ERROR1 (1<<18)
@ -2375,14 +2377,9 @@ static int nv_rx_process(struct net_device *dev, int limit)
goto next_pkt; goto next_pkt;
} }
} }
if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
(flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
} else { } else {
dev_kfree_skb(skb); dev_kfree_skb(skb);
goto next_pkt; goto next_pkt;
@ -2474,14 +2471,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
} }
} }
if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
(flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
/* got a valid packet - forward it to the network core */ /* got a valid packet - forward it to the network core */
skb_put(skb, len); skb_put(skb, len);
@ -2703,6 +2695,9 @@ static void nv_set_multicast(struct net_device *dev)
addr[1] = alwaysOn[1]; addr[1] = alwaysOn[1];
mask[0] = alwaysOn[0] | alwaysOff[0]; mask[0] = alwaysOn[0] | alwaysOff[0];
mask[1] = alwaysOn[1] | alwaysOff[1]; mask[1] = alwaysOn[1] | alwaysOff[1];
} else {
mask[0] = NVREG_MCASTMASKA_NONE;
mask[1] = NVREG_MCASTMASKB_NONE;
} }
} }
addr[0] |= NVREG_MCASTADDRA_FORCE; addr[0] |= NVREG_MCASTADDRA_FORCE;
@ -4813,8 +4808,8 @@ static int nv_open(struct net_device *dev)
nv_mac_reset(dev); nv_mac_reset(dev);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB); writel(0, base + NvRegMulticastAddrB);
writel(0, base + NvRegMulticastMaskA); writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
writel(0, base + NvRegMulticastMaskB); writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
writel(0, base + NvRegPacketFilterFlags); writel(0, base + NvRegPacketFilterFlags);
writel(0, base + NvRegTransmitterControl); writel(0, base + NvRegTransmitterControl);
@ -4908,8 +4903,8 @@ static int nv_open(struct net_device *dev)
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB); writel(0, base + NvRegMulticastAddrB);
writel(0, base + NvRegMulticastMaskA); writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
writel(0, base + NvRegMulticastMaskB); writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
/* One manual link speed update: Interrupts are enabled, future link /* One manual link speed update: Interrupts are enabled, future link
* speed changes cause interrupts and are handled by nv_link_irq(). * speed changes cause interrupts and are handled by nv_link_irq().
@ -5603,35 +5598,35 @@ static struct pci_device_id pci_tbl[] = {
}, },
{ /* MCP77 Ethernet Controller */ { /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
}, },
{ /* MCP77 Ethernet Controller */ { /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
}, },
{ /* MCP77 Ethernet Controller */ { /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
}, },
{ /* MCP77 Ethernet Controller */ { /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
}, },
{ /* MCP79 Ethernet Controller */ { /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
}, },
{ /* MCP79 Ethernet Controller */ { /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
}, },
{ /* MCP79 Ethernet Controller */ { /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
}, },
{ /* MCP79 Ethernet Controller */ { /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
}, },
{0,}, {0,},
}; };

View file

@ -901,12 +901,12 @@ static short ibmlana_adapter_ids[] __initdata = {
0x0000 0x0000
}; };
static char *ibmlana_adapter_names[] __initdata = { static char *ibmlana_adapter_names[] __devinitdata = {
"IBM LAN Adapter/A", "IBM LAN Adapter/A",
NULL NULL
}; };
static int ibmlana_init_one(struct device *kdev) static int __devinit ibmlana_init_one(struct device *kdev)
{ {
struct mca_device *mdev = to_mca_device(kdev); struct mca_device *mdev = to_mca_device(kdev);
struct net_device *dev; struct net_device *dev;

View file

@ -438,7 +438,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
if (adapter->msix_entries) { if (adapter->msix_entries) {
err = igb_request_msix(adapter); err = igb_request_msix(adapter);
if (!err) { if (!err) {
struct e1000_hw *hw = &adapter->hw;
/* enable IAM, auto-mask, /* enable IAM, auto-mask,
* DO NOT USE EIAME or IAME in legacy mode */ * DO NOT USE EIAME or IAME in legacy mode */
wr32(E1000_IAM, IMS_ENABLE_MASK); wr32(E1000_IAM, IMS_ENABLE_MASK);

View file

@ -1084,7 +1084,7 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(phydev, if_mii(rq), cmd); return phy_mii_ioctl(phydev, if_mii(rq), cmd);
} }
static int __devinit macb_probe(struct platform_device *pdev) static int __init macb_probe(struct platform_device *pdev)
{ {
struct eth_platform_data *pdata; struct eth_platform_data *pdata;
struct resource *regs; struct resource *regs;
@ -1248,7 +1248,7 @@ err_out:
return err; return err;
} }
static int __devexit macb_remove(struct platform_device *pdev) static int __exit macb_remove(struct platform_device *pdev)
{ {
struct net_device *dev; struct net_device *dev;
struct macb *bp; struct macb *bp;
@ -1276,8 +1276,7 @@ static int __devexit macb_remove(struct platform_device *pdev)
} }
static struct platform_driver macb_driver = { static struct platform_driver macb_driver = {
.probe = macb_probe, .remove = __exit_p(macb_remove),
.remove = __devexit_p(macb_remove),
.driver = { .driver = {
.name = "macb", .name = "macb",
}, },
@ -1285,7 +1284,7 @@ static struct platform_driver macb_driver = {
static int __init macb_init(void) static int __init macb_init(void)
{ {
return platform_driver_register(&macb_driver); return platform_driver_probe(&macb_driver, macb_probe);
} }
static void __exit macb_exit(void) static void __exit macb_exit(void)

View file

@ -4,8 +4,6 @@
* for more details. * for more details.
*/ */
#define DEBUG
#include <linux/init.h> #include <linux/init.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
@ -15,11 +13,93 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <asm/mips-boards/simint.h> #include <asm/mips-boards/simint.h>
#include "mipsnet.h" /* actual device IO mapping */ #define MIPSNET_VERSION "2007-11-17"
#define MIPSNET_VERSION "2005-06-20" /*
* Net status/control block as seen by sw in the core.
*/
struct mipsnet_regs {
/*
* Device info for probing, reads as MIPSNET%d where %d is some
* form of version.
*/
u64 devId; /*0x00 */
#define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field)) /*
* read only busy flag.
* Set and cleared by the Net Device to indicate that an rx or a tx
* is in progress.
*/
u32 busy; /*0x08 */
/*
* Set by the Net Device.
* The device will set it once data has been received.
* The value is the number of bytes that should be read from
* rxDataBuffer. The value will decrease till 0 until all the data
* from rxDataBuffer has been read.
*/
u32 rxDataCount; /*0x0c */
#define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16)
/*
* Settable from the MIPS core, cleared by the Net Device.
* The core should set the number of bytes it wants to send,
* then it should write those bytes of data to txDataBuffer.
* The device will clear txDataCount has been processed (not
* necessarily sent).
*/
u32 txDataCount; /*0x10 */
/*
* Interrupt control
*
* Used to clear the interrupted generated by this dev.
* Write a 1 to clear the interrupt. (except bit31).
*
* Bit0 is set if it was a tx-done interrupt.
* Bit1 is set when new rx-data is available.
* Until this bit is cleared there will be no other RXs.
*
* Bit31 is used for testing, it clears after a read.
* Writing 1 to this bit will cause an interrupt to be generated.
* To clear the test interrupt, write 0 to this register.
*/
u32 interruptControl; /*0x14 */
#define MIPSNET_INTCTL_TXDONE (1u << 0)
#define MIPSNET_INTCTL_RXDONE (1u << 1)
#define MIPSNET_INTCTL_TESTBIT (1u << 31)
/*
* Readonly core-specific interrupt info for the device to signal
* the core. The meaning of the contents of this field might change.
*/
/* XXX: the whole memIntf interrupt scheme is messy: the device
* should have no control what so ever of what VPE/register set is
* being used.
* The MemIntf should only expose interrupt lines, and something in
* the config should be responsible for the line<->core/vpe bindings.
*/
u32 interruptInfo; /*0x18 */
/*
* This is where the received data is read out.
* There is more data to read until rxDataReady is 0.
* Only 1 byte at this regs offset is used.
*/
u32 rxDataBuffer; /*0x1c */
/*
* This is where the data to transmit is written.
* Data should be written for the amount specified in the
* txDataCount register.
* Only 1 byte at this regs offset is used.
*/
u32 txDataBuffer; /*0x20 */
};
#define regaddr(dev, field) \
(dev->base_addr + offsetof(struct mipsnet_regs, field))
static char mipsnet_string[] = "mipsnet"; static char mipsnet_string[] = "mipsnet";
@ -29,32 +109,27 @@ static char mipsnet_string[] = "mipsnet";
static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata, static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
int len) int len)
{ {
uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount));
if (available_len < len)
return -EFAULT;
for (; len > 0; len--, kdata++) for (; len > 0; len--, kdata++)
*kdata = inb(mipsnet_reg_address(dev, rxDataBuffer)); *kdata = inb(regaddr(dev, rxDataBuffer));
return inl(mipsnet_reg_address(dev, rxDataCount)); return inl(regaddr(dev, rxDataCount));
} }
static inline ssize_t mipsnet_put_todevice(struct net_device *dev, static inline void mipsnet_put_todevice(struct net_device *dev,
struct sk_buff *skb) struct sk_buff *skb)
{ {
int count_to_go = skb->len; int count_to_go = skb->len;
char *buf_ptr = skb->data; char *buf_ptr = skb->data;
outl(skb->len, mipsnet_reg_address(dev, txDataCount)); outl(skb->len, regaddr(dev, txDataCount));
for (; count_to_go; buf_ptr++, count_to_go--) for (; count_to_go; buf_ptr++, count_to_go--)
outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); outb(*buf_ptr, regaddr(dev, txDataBuffer));
dev->stats.tx_packets++; dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len; dev->stats.tx_bytes += skb->len;
return skb->len; dev_kfree_skb(skb);
} }
static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
@ -69,18 +144,20 @@ static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
return 0; return 0;
} }
static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len)
{ {
struct sk_buff *skb; struct sk_buff *skb;
size_t len = count;
skb = alloc_skb(len + 2, GFP_KERNEL); if (!len)
return len;
skb = dev_alloc_skb(len + NET_IP_ALIGN);
if (!skb) { if (!skb) {
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
return -ENOMEM; return -ENOMEM;
} }
skb_reserve(skb, 2); skb_reserve(skb, NET_IP_ALIGN);
if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len)) if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
return -EFAULT; return -EFAULT;
@ -92,50 +169,42 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
dev->stats.rx_packets++; dev->stats.rx_packets++;
dev->stats.rx_bytes += len; dev->stats.rx_bytes += len;
return count; return len;
} }
static irqreturn_t mipsnet_interrupt(int irq, void *dev_id) static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
{ {
struct net_device *dev = dev_id; struct net_device *dev = dev_id;
u32 int_flags;
irqreturn_t ret = IRQ_NONE;
irqreturn_t retval = IRQ_NONE; if (irq != dev->irq)
uint64_t interruptFlags; goto out_badirq;
if (irq == dev->irq) { /* TESTBIT is cleared on read. */
retval = IRQ_HANDLED; int_flags = inl(regaddr(dev, interruptControl));
if (int_flags & MIPSNET_INTCTL_TESTBIT) {
interruptFlags = /* TESTBIT takes effect after a write with 0. */
inl(mipsnet_reg_address(dev, interruptControl)); outl(0, regaddr(dev, interruptControl));
ret = IRQ_HANDLED;
if (interruptFlags & MIPSNET_INTCTL_TXDONE) { } else if (int_flags & MIPSNET_INTCTL_TXDONE) {
outl(MIPSNET_INTCTL_TXDONE, /* Only one packet at a time, we are done. */
mipsnet_reg_address(dev, interruptControl)); dev->stats.tx_packets++;
/* only one packet at a time, we are done. */
netif_wake_queue(dev); netif_wake_queue(dev);
} else if (interruptFlags & MIPSNET_INTCTL_RXDONE) { outl(MIPSNET_INTCTL_TXDONE,
mipsnet_get_fromdev(dev, regaddr(dev, interruptControl));
inl(mipsnet_reg_address(dev, rxDataCount))); ret = IRQ_HANDLED;
outl(MIPSNET_INTCTL_RXDONE, } else if (int_flags & MIPSNET_INTCTL_RXDONE) {
mipsnet_reg_address(dev, interruptControl)); mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount)));
outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl));
} else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) { ret = IRQ_HANDLED;
/*
* TESTBIT is cleared on read.
* And takes effect after a write with 0
*/
outl(0, mipsnet_reg_address(dev, interruptControl));
} else {
/* Maybe shared IRQ, just ignore, no clearing. */
retval = IRQ_NONE;
} }
return ret;
} else { out_badirq:
printk(KERN_INFO "%s: %s(): irq %d for unknown device\n", printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
dev->name, __FUNCTION__, irq); dev->name, __FUNCTION__, irq);
retval = IRQ_NONE; return ret;
}
return retval;
} }
static int mipsnet_open(struct net_device *dev) static int mipsnet_open(struct net_device *dev)
@ -144,18 +213,15 @@ static int mipsnet_open(struct net_device *dev)
err = request_irq(dev->irq, &mipsnet_interrupt, err = request_irq(dev->irq, &mipsnet_interrupt,
IRQF_SHARED, dev->name, (void *) dev); IRQF_SHARED, dev->name, (void *) dev);
if (err) { if (err) {
release_region(dev->base_addr, MIPSNET_IO_EXTENT); release_region(dev->base_addr, sizeof(struct mipsnet_regs));
return err; return err;
} }
netif_start_queue(dev); netif_start_queue(dev);
/* test interrupt handler */ /* test interrupt handler */
outl(MIPSNET_INTCTL_TESTBIT, outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl));
mipsnet_reg_address(dev, interruptControl));
return 0; return 0;
} }
@ -163,7 +229,7 @@ static int mipsnet_open(struct net_device *dev)
static int mipsnet_close(struct net_device *dev) static int mipsnet_close(struct net_device *dev)
{ {
netif_stop_queue(dev); netif_stop_queue(dev);
free_irq(dev->irq, dev);
return 0; return 0;
} }
@ -194,10 +260,11 @@ static int __init mipsnet_probe(struct device *dev)
*/ */
netdev->base_addr = 0x4200; netdev->base_addr = 0x4200;
netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 + netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 +
inl(mipsnet_reg_address(netdev, interruptInfo)); inl(regaddr(netdev, interruptInfo));
/* Get the io region now, get irq on open() */ /* Get the io region now, get irq on open() */
if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) { if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs),
"mipsnet")) {
err = -EBUSY; err = -EBUSY;
goto out_free_netdev; goto out_free_netdev;
} }
@ -217,7 +284,7 @@ static int __init mipsnet_probe(struct device *dev)
return 0; return 0;
out_free_region: out_free_region:
release_region(netdev->base_addr, MIPSNET_IO_EXTENT); release_region(netdev->base_addr, sizeof(struct mipsnet_regs));
out_free_netdev: out_free_netdev:
free_netdev(netdev); free_netdev(netdev);
@ -231,7 +298,7 @@ static int __devexit mipsnet_device_remove(struct device *device)
struct net_device *dev = dev_get_drvdata(device); struct net_device *dev = dev_get_drvdata(device);
unregister_netdev(dev); unregister_netdev(dev);
release_region(dev->base_addr, MIPSNET_IO_EXTENT); release_region(dev->base_addr, sizeof(struct mipsnet_regs));
free_netdev(dev); free_netdev(dev);
dev_set_drvdata(device, NULL); dev_set_drvdata(device, NULL);

View file

@ -1,112 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __MIPSNET_H
#define __MIPSNET_H
/*
* Id of this Net device, as seen by the core.
*/
#define MIPS_NET_DEV_ID ((uint64_t) \
((uint64_t) 'M' << 0)| \
((uint64_t) 'I' << 8)| \
((uint64_t) 'P' << 16)| \
((uint64_t) 'S' << 24)| \
((uint64_t) 'N' << 32)| \
((uint64_t) 'E' << 40)| \
((uint64_t) 'T' << 48)| \
((uint64_t) '0' << 56))
/*
* Net status/control block as seen by sw in the core.
* (Why not use bit fields? can't be bothered with cross-platform struct
* packing.)
*/
struct net_control_block {
/*
* dev info for probing
* reads as MIPSNET%d where %d is some form of version
*/
uint64_t devId; /* 0x00 */
/*
* read only busy flag.
* Set and cleared by the Net Device to indicate that an rx or a tx
* is in progress.
*/
uint32_t busy; /* 0x08 */
/*
* Set by the Net Device.
* The device will set it once data has been received.
* The value is the number of bytes that should be read from
* rxDataBuffer. The value will decrease till 0 until all the data
* from rxDataBuffer has been read.
*/
uint32_t rxDataCount; /* 0x0c */
#define MIPSNET_MAX_RXTX_DATACOUNT (1<<16)
/*
* Settable from the MIPS core, cleared by the Net Device. The core
* should set the number of bytes it wants to send, then it should
* write those bytes of data to txDataBuffer. The device will clear
* txDataCount has been processed (not necessarily sent).
*/
uint32_t txDataCount; /* 0x10 */
/*
* Interrupt control
*
* Used to clear the interrupted generated by this dev.
* Write a 1 to clear the interrupt. (except bit31).
*
* Bit0 is set if it was a tx-done interrupt.
* Bit1 is set when new rx-data is available.
* Until this bit is cleared there will be no other RXs.
*
* Bit31 is used for testing, it clears after a read.
* Writing 1 to this bit will cause an interrupt to be generated.
* To clear the test interrupt, write 0 to this register.
*/
uint32_t interruptControl; /*0x14 */
#define MIPSNET_INTCTL_TXDONE ((uint32_t)(1 << 0))
#define MIPSNET_INTCTL_RXDONE ((uint32_t)(1 << 1))
#define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1 << 31))
#define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE | \
MIPSNET_INTCTL_RXDONE | \
MIPSNET_INTCTL_TESTBIT)
/*
* Readonly core-specific interrupt info for the device to signal the
* core. The meaning of the contents of this field might change.
*
* TODO: the whole memIntf interrupt scheme is messy: the device should
* have no control what so ever of what VPE/register set is being
* used. The MemIntf should only expose interrupt lines, and
* something in the config should be responsible for the
* line<->core/vpe bindings.
*/
uint32_t interruptInfo; /* 0x18 */
/*
* This is where the received data is read out.
* There is more data to read until rxDataReady is 0.
* Only 1 byte at this regs offset is used.
*/
uint32_t rxDataBuffer; /* 0x1c */
/*
* This is where the data to transmit is written. Data should be
* written for the amount specified in the txDataCount register. Only
* 1 byte at this regs offset is used.
*/
uint32_t txDataBuffer; /* 0x20 */
};
#define MIPSNET_IO_EXTENT 0x40 /* being generous */
#define field_offset(field) (offsetof(struct net_control_block, field))
#endif /* __MIPSNET_H */

View file

@ -203,22 +203,8 @@ skbuff at an offset of "+2", 16-byte aligning the IP header.
IIId. Synchronization IIId. Synchronization
Most operations are synchronized on the np->lock irq spinlock, except the Most operations are synchronized on the np->lock irq spinlock, except the
performance critical codepaths: recieve and transmit paths which are synchronised using a combination of
hardware descriptor ownership, disabling interrupts and NAPI poll scheduling.
The rx process only runs in the interrupt handler. Access from outside
the interrupt handler is only permitted after disable_irq().
The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
is set, then access is permitted under spin_lock_irq(&np->lock).
Thus configuration functions that want to access everything must call
disable_irq(dev->irq);
netif_tx_lock_bh(dev);
spin_lock_irq(&np->lock);
IV. Notes
NatSemi PCI network controllers are very uncommon.
IVb. References IVb. References

View file

@ -62,6 +62,10 @@
#define LRO_MAX_AGGR 64 #define LRO_MAX_AGGR 64
#define PE_MIN_MTU 64
#define PE_MAX_MTU 1500
#define PE_DEF_MTU ETH_DATA_LEN
#define DEFAULT_MSG_ENABLE \ #define DEFAULT_MSG_ENABLE \
(NETIF_MSG_DRV | \ (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \ NETIF_MSG_PROBE | \
@ -82,8 +86,6 @@
& ((ring)->size - 1)) & ((ring)->size - 1))
#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver"); MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
@ -175,6 +177,24 @@ static int mac_to_intf(struct pasemi_mac *mac)
return -1; return -1;
} }
static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
{
unsigned int flags;
flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
flags &= ~PAS_MAC_CFG_PCFG_PE;
write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
}
static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
{
unsigned int flags;
flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
flags |= PAS_MAC_CFG_PCFG_PE;
write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
}
static int pasemi_get_mac_addr(struct pasemi_mac *mac) static int pasemi_get_mac_addr(struct pasemi_mac *mac)
{ {
struct pci_dev *pdev = mac->pdev; struct pci_dev *pdev = mac->pdev;
@ -221,6 +241,33 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
return 0; return 0;
} }
static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
{
struct pasemi_mac *mac = netdev_priv(dev);
struct sockaddr *addr = p;
unsigned int adr0, adr1;
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
adr0 = dev->dev_addr[2] << 24 |
dev->dev_addr[3] << 16 |
dev->dev_addr[4] << 8 |
dev->dev_addr[5];
adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1);
adr1 &= ~0xffff;
adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1];
pasemi_mac_intf_disable(mac);
write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0);
write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1);
pasemi_mac_intf_enable(mac);
return 0;
}
static int get_skb_hdr(struct sk_buff *skb, void **iphdr, static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
void **tcph, u64 *hdr_flags, void *data) void **tcph, u64 *hdr_flags, void *data)
{ {
@ -453,7 +500,7 @@ static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac)
} }
static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac)
{ {
struct pasemi_mac_rxring *rx = rx_ring(mac); struct pasemi_mac_rxring *rx = rx_ring(mac);
unsigned int i; unsigned int i;
@ -473,7 +520,12 @@ static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
} }
for (i = 0; i < RX_RING_SIZE; i++) for (i = 0; i < RX_RING_SIZE; i++)
RX_DESC(rx, i) = 0; RX_BUFF(rx, i) = 0;
}
static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
{
pasemi_mac_free_rx_buffers(mac);
dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
@ -503,14 +555,14 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
/* Entry in use? */ /* Entry in use? */
WARN_ON(*buff); WARN_ON(*buff);
skb = dev_alloc_skb(BUF_SIZE); skb = dev_alloc_skb(mac->bufsz);
skb_reserve(skb, LOCAL_SKB_ALIGN); skb_reserve(skb, LOCAL_SKB_ALIGN);
if (unlikely(!skb)) if (unlikely(!skb))
break; break;
dma = pci_map_single(mac->dma_pdev, skb->data, dma = pci_map_single(mac->dma_pdev, skb->data,
BUF_SIZE - LOCAL_SKB_ALIGN, mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(dma))) { if (unlikely(dma_mapping_error(dma))) {
@ -520,7 +572,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
info->skb = skb; info->skb = skb;
info->dma = dma; info->dma = dma;
*buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma); *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma);
fill++; fill++;
} }
@ -650,7 +702,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
pci_unmap_single(pdev, dma, BUF_SIZE-LOCAL_SKB_ALIGN, pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (macrx & XCT_MACRX_CRC) { if (macrx & XCT_MACRX_CRC) {
@ -874,24 +926,6 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
{
unsigned int flags;
flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
flags &= ~PAS_MAC_CFG_PCFG_PE;
write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
}
static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
{
unsigned int flags;
flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
flags |= PAS_MAC_CFG_PCFG_PE;
write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
}
static void pasemi_adjust_link(struct net_device *dev) static void pasemi_adjust_link(struct net_device *dev)
{ {
struct pasemi_mac *mac = netdev_priv(dev); struct pasemi_mac *mac = netdev_priv(dev);
@ -1148,11 +1182,71 @@ out_rx_resources:
#define MAX_RETRIES 5000 #define MAX_RETRIES 5000
static void pasemi_mac_pause_txchan(struct pasemi_mac *mac)
{
unsigned int sta, retries;
int txch = tx_ring(mac)->chan.chno;
write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
PAS_DMA_TXCHAN_TCMDSTA_ST);
for (retries = 0; retries < MAX_RETRIES; retries++) {
sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
break;
cond_resched();
}
if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
dev_err(&mac->dma_pdev->dev,
"Failed to stop tx channel, tcmdsta %08x\n", sta);
write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
}
static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac)
{
unsigned int sta, retries;
int rxch = rx_ring(mac)->chan.chno;
write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
PAS_DMA_RXCHAN_CCMDSTA_ST);
for (retries = 0; retries < MAX_RETRIES; retries++) {
sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
break;
cond_resched();
}
if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
dev_err(&mac->dma_pdev->dev,
"Failed to stop rx channel, ccmdsta 08%x\n", sta);
write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
}
static void pasemi_mac_pause_rxint(struct pasemi_mac *mac)
{
unsigned int sta, retries;
write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
PAS_DMA_RXINT_RCMDSTA_ST);
for (retries = 0; retries < MAX_RETRIES; retries++) {
sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
break;
cond_resched();
}
if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
dev_err(&mac->dma_pdev->dev,
"Failed to stop rx interface, rcmdsta %08x\n", sta);
write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
}
static int pasemi_mac_close(struct net_device *dev) static int pasemi_mac_close(struct net_device *dev)
{ {
struct pasemi_mac *mac = netdev_priv(dev); struct pasemi_mac *mac = netdev_priv(dev);
unsigned int sta; unsigned int sta;
int retries;
int rxch, txch; int rxch, txch;
rxch = rx_ring(mac)->chan.chno; rxch = rx_ring(mac)->chan.chno;
@ -1190,51 +1284,10 @@ static int pasemi_mac_close(struct net_device *dev)
pasemi_mac_clean_tx(tx_ring(mac)); pasemi_mac_clean_tx(tx_ring(mac));
pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
/* Disable interface */ pasemi_mac_pause_txchan(mac);
write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), pasemi_mac_pause_rxint(mac);
PAS_DMA_TXCHAN_TCMDSTA_ST); pasemi_mac_pause_rxchan(mac);
write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac->dma_if), pasemi_mac_intf_disable(mac);
PAS_DMA_RXINT_RCMDSTA_ST);
write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
PAS_DMA_RXCHAN_CCMDSTA_ST);
for (retries = 0; retries < MAX_RETRIES; retries++) {
sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch));
if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
break;
cond_resched();
}
if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
for (retries = 0; retries < MAX_RETRIES; retries++) {
sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
break;
cond_resched();
}
if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
for (retries = 0; retries < MAX_RETRIES; retries++) {
sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
break;
cond_resched();
}
if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
/* Then, disable the channel. This must be done separately from
* stopping, since you can't disable when active.
*/
write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
free_irq(mac->tx->chan.irq, mac->tx); free_irq(mac->tx->chan.irq, mac->tx);
free_irq(mac->rx->chan.irq, mac->rx); free_irq(mac->rx->chan.irq, mac->rx);
@ -1388,6 +1441,62 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget)
return pkts; return pkts;
} }
static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
{
struct pasemi_mac *mac = netdev_priv(dev);
unsigned int reg;
unsigned int rcmdsta;
int running;
if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU)
return -EINVAL;
running = netif_running(dev);
if (running) {
/* Need to stop the interface, clean out all already
* received buffers, free all unused buffers on the RX
* interface ring, then finally re-fill the rx ring with
* the new-size buffers and restart.
*/
napi_disable(&mac->napi);
netif_tx_disable(dev);
pasemi_mac_intf_disable(mac);
rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
pasemi_mac_pause_rxint(mac);
pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
pasemi_mac_free_rx_buffers(mac);
}
/* Change maxf, i.e. what size frames are accepted.
* Need room for ethernet header and CRC word
*/
reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG);
reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M;
reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
dev->mtu = new_mtu;
/* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
if (running) {
write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN);
rx_ring(mac)->next_to_fill = 0;
pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1);
napi_enable(&mac->napi);
netif_start_queue(dev);
pasemi_mac_intf_enable(mac);
}
return 0;
}
static int __devinit static int __devinit
pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
@ -1475,6 +1584,12 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->stop = pasemi_mac_close; dev->stop = pasemi_mac_close;
dev->hard_start_xmit = pasemi_mac_start_tx; dev->hard_start_xmit = pasemi_mac_start_tx;
dev->set_multicast_list = pasemi_mac_set_rx_mode; dev->set_multicast_list = pasemi_mac_set_rx_mode;
dev->set_mac_address = pasemi_mac_set_mac_addr;
dev->mtu = PE_DEF_MTU;
/* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
dev->change_mtu = pasemi_mac_change_mtu;
if (err) if (err)
goto out; goto out;

View file

@ -59,6 +59,7 @@ struct pasemi_mac {
struct phy_device *phydev; struct phy_device *phydev;
struct napi_struct napi; struct napi_struct napi;
int bufsz; /* RX ring buffer size */
u8 type; u8 type;
#define MAC_TYPE_GMAC 1 #define MAC_TYPE_GMAC 1
#define MAC_TYPE_XAUI 2 #define MAC_TYPE_XAUI 2
@ -96,6 +97,9 @@ struct pasemi_mac_buffer {
/* MAC CFG register offsets */ /* MAC CFG register offsets */
enum { enum {
PAS_MAC_CFG_PCFG = 0x80, PAS_MAC_CFG_PCFG = 0x80,
PAS_MAC_CFG_MACCFG = 0x84,
PAS_MAC_CFG_ADR0 = 0x8c,
PAS_MAC_CFG_ADR1 = 0x90,
PAS_MAC_CFG_TXP = 0x98, PAS_MAC_CFG_TXP = 0x98,
PAS_MAC_IPC_CHNL = 0x208, PAS_MAC_IPC_CHNL = 0x208,
}; };
@ -130,6 +134,18 @@ enum {
#define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001 #define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001
#define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002 #define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002
#define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003 #define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003
#define PAS_MAC_CFG_MACCFG_TXT_M 0x70000000
#define PAS_MAC_CFG_MACCFG_TXT_S 28
#define PAS_MAC_CFG_MACCFG_PRES_M 0x0f000000
#define PAS_MAC_CFG_MACCFG_PRES_S 24
#define PAS_MAC_CFG_MACCFG_MAXF_M 0x00ffff00
#define PAS_MAC_CFG_MACCFG_MAXF_S 8
#define PAS_MAC_CFG_MACCFG_MAXF(x) (((x) << PAS_MAC_CFG_MACCFG_MAXF_S) & \
PAS_MAC_CFG_MACCFG_MAXF_M)
#define PAS_MAC_CFG_MACCFG_MINF_M 0x000000ff
#define PAS_MAC_CFG_MACCFG_MINF_S 0
#define PAS_MAC_CFG_TXP_FCF 0x01000000 #define PAS_MAC_CFG_TXP_FCF 0x01000000
#define PAS_MAC_CFG_TXP_FCE 0x00800000 #define PAS_MAC_CFG_TXP_FCE 0x00800000
#define PAS_MAC_CFG_TXP_FC 0x00400000 #define PAS_MAC_CFG_TXP_FC 0x00400000

View file

@ -541,7 +541,7 @@ static void netdrv_hw_start (struct net_device *dev);
#define NETDRV_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0) #define NETDRV_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
#if MMIO_FLUSH_AUDIT_COMPLETE #ifdef MMIO_FLUSH_AUDIT_COMPLETE
/* write MMIO register */ /* write MMIO register */
#define NETDRV_W8(reg, val8) writeb ((val8), ioaddr + (reg)) #define NETDRV_W8(reg, val8) writeb ((val8), ioaddr + (reg))
@ -603,7 +603,7 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
return -ENOMEM; return -ENOMEM;
} }
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
tp = dev->priv; tp = netdev_priv(dev);
/* enable device (incl. PCI PM wakeup), and bus-mastering */ /* enable device (incl. PCI PM wakeup), and bus-mastering */
rc = pci_enable_device (pdev); rc = pci_enable_device (pdev);
@ -759,7 +759,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
return i; return i;
} }
tp = dev->priv; tp = netdev_priv(dev);
assert (ioaddr != NULL); assert (ioaddr != NULL);
assert (dev != NULL); assert (dev != NULL);
@ -783,7 +783,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
dev->base_addr = (unsigned long) ioaddr; dev->base_addr = (unsigned long) ioaddr;
/* dev->priv/tp zeroed and aligned in alloc_etherdev */ /* dev->priv/tp zeroed and aligned in alloc_etherdev */
tp = dev->priv; tp = netdev_priv(dev);
/* note: tp->chipset set in netdrv_init_board */ /* note: tp->chipset set in netdrv_init_board */
tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
@ -841,7 +841,7 @@ static void __devexit netdrv_remove_one (struct pci_dev *pdev)
assert (dev != NULL); assert (dev != NULL);
np = dev->priv; np = netdev_priv(dev);
assert (np != NULL); assert (np != NULL);
unregister_netdev (dev); unregister_netdev (dev);
@ -974,7 +974,7 @@ static void mdio_sync (void *mdio_addr)
static int mdio_read (struct net_device *dev, int phy_id, int location) static int mdio_read (struct net_device *dev, int phy_id, int location)
{ {
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4; void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
int retval = 0; int retval = 0;
@ -1017,7 +1017,7 @@ static int mdio_read (struct net_device *dev, int phy_id, int location)
static void mdio_write (struct net_device *dev, int phy_id, int location, static void mdio_write (struct net_device *dev, int phy_id, int location,
int value) int value)
{ {
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4; void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd = int mii_cmd =
(0x5002 << 16) | (phy_id << 23) | (location << 18) | value; (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
@ -1060,7 +1060,7 @@ static void mdio_write (struct net_device *dev, int phy_id, int location,
static int netdrv_open (struct net_device *dev) static int netdrv_open (struct net_device *dev)
{ {
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
int retval; int retval;
#ifdef NETDRV_DEBUG #ifdef NETDRV_DEBUG
void *ioaddr = tp->mmio_addr; void *ioaddr = tp->mmio_addr;
@ -1121,7 +1121,7 @@ static int netdrv_open (struct net_device *dev)
/* Start the hardware at open or resume. */ /* Start the hardware at open or resume. */
static void netdrv_hw_start (struct net_device *dev) static void netdrv_hw_start (struct net_device *dev)
{ {
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr; void *ioaddr = tp->mmio_addr;
u32 i; u32 i;
@ -1191,7 +1191,7 @@ static void netdrv_hw_start (struct net_device *dev)
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void netdrv_init_ring (struct net_device *dev) static void netdrv_init_ring (struct net_device *dev)
{ {
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
int i; int i;
DPRINTK ("ENTER\n"); DPRINTK ("ENTER\n");
@ -1213,7 +1213,7 @@ static void netdrv_init_ring (struct net_device *dev)
static void netdrv_timer (unsigned long data) static void netdrv_timer (unsigned long data)
{ {
struct net_device *dev = (struct net_device *) data; struct net_device *dev = (struct net_device *) data;
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr; void *ioaddr = tp->mmio_addr;
int next_tick = 60 * HZ; int next_tick = 60 * HZ;
int mii_lpa; int mii_lpa;
@ -1252,9 +1252,10 @@ static void netdrv_timer (unsigned long data)
} }
static void netdrv_tx_clear (struct netdrv_private *tp) static void netdrv_tx_clear (struct net_device *dev)
{ {
int i; int i;
struct netdrv_private *tp = netdev_priv(dev);
atomic_set (&tp->cur_tx, 0); atomic_set (&tp->cur_tx, 0);
atomic_set (&tp->dirty_tx, 0); atomic_set (&tp->dirty_tx, 0);
@ -1278,7 +1279,7 @@ static void netdrv_tx_clear (struct netdrv_private *tp)
static void netdrv_tx_timeout (struct net_device *dev) static void netdrv_tx_timeout (struct net_device *dev)
{ {
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr; void *ioaddr = tp->mmio_addr;
int i; int i;
u8 tmp8; u8 tmp8;
@ -1311,7 +1312,7 @@ static void netdrv_tx_timeout (struct net_device *dev)
/* Stop a shared interrupt from scavenging while we are. */ /* Stop a shared interrupt from scavenging while we are. */
spin_lock_irqsave (&tp->lock, flags); spin_lock_irqsave (&tp->lock, flags);
netdrv_tx_clear (tp); netdrv_tx_clear (dev);
spin_unlock_irqrestore (&tp->lock, flags); spin_unlock_irqrestore (&tp->lock, flags);
@ -1325,7 +1326,7 @@ static void netdrv_tx_timeout (struct net_device *dev)
static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev) static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev)
{ {
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr; void *ioaddr = tp->mmio_addr;
int entry; int entry;
@ -1525,7 +1526,7 @@ static void netdrv_rx_interrupt (struct net_device *dev,
DPRINTK ("%s: netdrv_rx() status %4.4x, size %4.4x," DPRINTK ("%s: netdrv_rx() status %4.4x, size %4.4x,"
" cur %4.4x.\n", dev->name, rx_status, " cur %4.4x.\n", dev->name, rx_status,
rx_size, cur_rx); rx_size, cur_rx);
#if NETDRV_DEBUG > 2 #if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2)
{ {
int i; int i;
DPRINTK ("%s: Frame contents ", dev->name); DPRINTK ("%s: Frame contents ", dev->name);
@ -1648,7 +1649,7 @@ static void netdrv_weird_interrupt (struct net_device *dev,
static irqreturn_t netdrv_interrupt (int irq, void *dev_instance) static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
{ {
struct net_device *dev = (struct net_device *) dev_instance; struct net_device *dev = (struct net_device *) dev_instance;
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
int boguscnt = max_interrupt_work; int boguscnt = max_interrupt_work;
void *ioaddr = tp->mmio_addr; void *ioaddr = tp->mmio_addr;
int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */ int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
@ -1711,7 +1712,7 @@ static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
static int netdrv_close (struct net_device *dev) static int netdrv_close (struct net_device *dev)
{ {
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr; void *ioaddr = tp->mmio_addr;
unsigned long flags; unsigned long flags;
@ -1738,10 +1739,10 @@ static int netdrv_close (struct net_device *dev)
spin_unlock_irqrestore (&tp->lock, flags); spin_unlock_irqrestore (&tp->lock, flags);
synchronize_irq (); synchronize_irq (dev->irq);
free_irq (dev->irq, dev); free_irq (dev->irq, dev);
netdrv_tx_clear (tp); netdrv_tx_clear (dev);
pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma); tp->rx_ring, tp->rx_ring_dma);
@ -1762,7 +1763,7 @@ static int netdrv_close (struct net_device *dev)
static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
{ {
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq); struct mii_ioctl_data *data = if_mii(rq);
unsigned long flags; unsigned long flags;
int rc = 0; int rc = 0;
@ -1805,7 +1806,7 @@ static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
static void netdrv_set_rx_mode (struct net_device *dev) static void netdrv_set_rx_mode (struct net_device *dev)
{ {
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr; void *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */ u32 mc_filter[2]; /* Multicast hash filter */
int i, rx_mode; int i, rx_mode;
@ -1862,7 +1863,7 @@ static void netdrv_set_rx_mode (struct net_device *dev)
static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state) static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state)
{ {
struct net_device *dev = pci_get_drvdata (pdev); struct net_device *dev = pci_get_drvdata (pdev);
struct netdrv_private *tp = dev->priv; struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr; void *ioaddr = tp->mmio_addr;
unsigned long flags; unsigned long flags;
@ -1892,7 +1893,7 @@ static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state)
static int netdrv_resume (struct pci_dev *pdev) static int netdrv_resume (struct pci_dev *pdev)
{ {
struct net_device *dev = pci_get_drvdata (pdev); struct net_device *dev = pci_get_drvdata (pdev);
struct netdrv_private *tp = dev->priv; /*struct netdrv_private *tp = netdev_priv(dev);*/
if (!netif_running(dev)) if (!netif_running(dev))
return 0; return 0;

View file

@ -60,6 +60,11 @@ config ICPLUS_PHY
---help--- ---help---
Currently supports the IP175C PHY. Currently supports the IP175C PHY.
config REALTEK_PHY
tristate "Drivers for Realtek PHYs"
---help---
Supports the Realtek 821x PHY.
config FIXED_PHY config FIXED_PHY
bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
---help--- ---help---

View file

@ -12,5 +12,6 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_FIXED_PHY) += fixed.o obj-$(CONFIG_FIXED_PHY) += fixed.o
obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o

View file

@ -141,6 +141,20 @@ static struct phy_driver bcm5461_driver = {
.driver = { .owner = THIS_MODULE }, .driver = { .owner = THIS_MODULE },
}; };
static struct phy_driver bcm5482_driver = {
.phy_id = 0x0143bcb0,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
};
static int __init broadcom_init(void) static int __init broadcom_init(void)
{ {
int ret; int ret;
@ -154,8 +168,13 @@ static int __init broadcom_init(void)
ret = phy_driver_register(&bcm5461_driver); ret = phy_driver_register(&bcm5461_driver);
if (ret) if (ret)
goto out_5461; goto out_5461;
ret = phy_driver_register(&bcm5482_driver);
if (ret)
goto out_5482;
return ret; return ret;
out_5482:
phy_driver_unregister(&bcm5461_driver);
out_5461: out_5461:
phy_driver_unregister(&bcm5421_driver); phy_driver_unregister(&bcm5421_driver);
out_5421: out_5421:
@ -166,6 +185,7 @@ out_5411:
static void __exit broadcom_exit(void) static void __exit broadcom_exit(void)
{ {
phy_driver_unregister(&bcm5482_driver);
phy_driver_unregister(&bcm5461_driver); phy_driver_unregister(&bcm5461_driver);
phy_driver_unregister(&bcm5421_driver); phy_driver_unregister(&bcm5421_driver);
phy_driver_unregister(&bcm5411_driver); phy_driver_unregister(&bcm5411_driver);

View file

@ -49,7 +49,7 @@ int mdiobus_register(struct mii_bus *bus)
int i; int i;
int err = 0; int err = 0;
spin_lock_init(&bus->mdio_lock); mutex_init(&bus->mdio_lock);
if (NULL == bus || NULL == bus->name || if (NULL == bus || NULL == bus->name ||
NULL == bus->read || NULL == bus->read ||

View file

@ -26,7 +26,6 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mii.h> #include <linux/mii.h>
@ -72,9 +71,11 @@ int phy_read(struct phy_device *phydev, u16 regnum)
int retval; int retval;
struct mii_bus *bus = phydev->bus; struct mii_bus *bus = phydev->bus;
spin_lock_bh(&bus->mdio_lock); BUG_ON(in_interrupt());
mutex_lock(&bus->mdio_lock);
retval = bus->read(bus, phydev->addr, regnum); retval = bus->read(bus, phydev->addr, regnum);
spin_unlock_bh(&bus->mdio_lock); mutex_unlock(&bus->mdio_lock);
return retval; return retval;
} }
@ -95,9 +96,11 @@ int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
int err; int err;
struct mii_bus *bus = phydev->bus; struct mii_bus *bus = phydev->bus;
spin_lock_bh(&bus->mdio_lock); BUG_ON(in_interrupt());
mutex_lock(&bus->mdio_lock);
err = bus->write(bus, phydev->addr, regnum, val); err = bus->write(bus, phydev->addr, regnum, val);
spin_unlock_bh(&bus->mdio_lock); mutex_unlock(&bus->mdio_lock);
return err; return err;
} }
@ -428,7 +431,7 @@ int phy_start_aneg(struct phy_device *phydev)
{ {
int err; int err;
spin_lock_bh(&phydev->lock); mutex_lock(&phydev->lock);
if (AUTONEG_DISABLE == phydev->autoneg) if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev); phy_sanitize_settings(phydev);
@ -449,13 +452,14 @@ int phy_start_aneg(struct phy_device *phydev)
} }
out_unlock: out_unlock:
spin_unlock_bh(&phydev->lock); mutex_unlock(&phydev->lock);
return err; return err;
} }
EXPORT_SYMBOL(phy_start_aneg); EXPORT_SYMBOL(phy_start_aneg);
static void phy_change(struct work_struct *work); static void phy_change(struct work_struct *work);
static void phy_state_machine(struct work_struct *work);
static void phy_timer(unsigned long data); static void phy_timer(unsigned long data);
/** /**
@ -476,6 +480,7 @@ void phy_start_machine(struct phy_device *phydev,
{ {
phydev->adjust_state = handler; phydev->adjust_state = handler;
INIT_WORK(&phydev->state_queue, phy_state_machine);
init_timer(&phydev->phy_timer); init_timer(&phydev->phy_timer);
phydev->phy_timer.function = &phy_timer; phydev->phy_timer.function = &phy_timer;
phydev->phy_timer.data = (unsigned long) phydev; phydev->phy_timer.data = (unsigned long) phydev;
@ -493,11 +498,12 @@ void phy_start_machine(struct phy_device *phydev,
void phy_stop_machine(struct phy_device *phydev) void phy_stop_machine(struct phy_device *phydev)
{ {
del_timer_sync(&phydev->phy_timer); del_timer_sync(&phydev->phy_timer);
cancel_work_sync(&phydev->state_queue);
spin_lock_bh(&phydev->lock); mutex_lock(&phydev->lock);
if (phydev->state > PHY_UP) if (phydev->state > PHY_UP)
phydev->state = PHY_UP; phydev->state = PHY_UP;
spin_unlock_bh(&phydev->lock); mutex_unlock(&phydev->lock);
phydev->adjust_state = NULL; phydev->adjust_state = NULL;
} }
@ -541,9 +547,9 @@ static void phy_force_reduction(struct phy_device *phydev)
*/ */
void phy_error(struct phy_device *phydev) void phy_error(struct phy_device *phydev)
{ {
spin_lock_bh(&phydev->lock); mutex_lock(&phydev->lock);
phydev->state = PHY_HALTED; phydev->state = PHY_HALTED;
spin_unlock_bh(&phydev->lock); mutex_unlock(&phydev->lock);
} }
/** /**
@ -705,10 +711,10 @@ static void phy_change(struct work_struct *work)
if (err) if (err)
goto phy_err; goto phy_err;
spin_lock_bh(&phydev->lock); mutex_lock(&phydev->lock);
if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
phydev->state = PHY_CHANGELINK; phydev->state = PHY_CHANGELINK;
spin_unlock_bh(&phydev->lock); mutex_unlock(&phydev->lock);
atomic_dec(&phydev->irq_disable); atomic_dec(&phydev->irq_disable);
enable_irq(phydev->irq); enable_irq(phydev->irq);
@ -735,7 +741,7 @@ phy_err:
*/ */
void phy_stop(struct phy_device *phydev) void phy_stop(struct phy_device *phydev)
{ {
spin_lock_bh(&phydev->lock); mutex_lock(&phydev->lock);
if (PHY_HALTED == phydev->state) if (PHY_HALTED == phydev->state)
goto out_unlock; goto out_unlock;
@ -751,7 +757,7 @@ void phy_stop(struct phy_device *phydev)
phydev->state = PHY_HALTED; phydev->state = PHY_HALTED;
out_unlock: out_unlock:
spin_unlock_bh(&phydev->lock); mutex_unlock(&phydev->lock);
/* /*
* Cannot call flush_scheduled_work() here as desired because * Cannot call flush_scheduled_work() here as desired because
@ -773,7 +779,7 @@ out_unlock:
*/ */
void phy_start(struct phy_device *phydev) void phy_start(struct phy_device *phydev)
{ {
spin_lock_bh(&phydev->lock); mutex_lock(&phydev->lock);
switch (phydev->state) { switch (phydev->state) {
case PHY_STARTING: case PHY_STARTING:
@ -787,19 +793,26 @@ void phy_start(struct phy_device *phydev)
default: default:
break; break;
} }
spin_unlock_bh(&phydev->lock); mutex_unlock(&phydev->lock);
} }
EXPORT_SYMBOL(phy_stop); EXPORT_SYMBOL(phy_stop);
EXPORT_SYMBOL(phy_start); EXPORT_SYMBOL(phy_start);
/* PHY timer which handles the state machine */ /**
static void phy_timer(unsigned long data) * phy_state_machine - Handle the state machine
* @work: work_struct that describes the work to be done
*
* Description: Scheduled by the state_queue workqueue each time
* phy_timer is triggered.
*/
static void phy_state_machine(struct work_struct *work)
{ {
struct phy_device *phydev = (struct phy_device *)data; struct phy_device *phydev =
container_of(work, struct phy_device, state_queue);
int needs_aneg = 0; int needs_aneg = 0;
int err = 0; int err = 0;
spin_lock_bh(&phydev->lock); mutex_lock(&phydev->lock);
if (phydev->adjust_state) if (phydev->adjust_state)
phydev->adjust_state(phydev->attached_dev); phydev->adjust_state(phydev->attached_dev);
@ -965,7 +978,7 @@ static void phy_timer(unsigned long data)
break; break;
} }
spin_unlock_bh(&phydev->lock); mutex_unlock(&phydev->lock);
if (needs_aneg) if (needs_aneg)
err = phy_start_aneg(phydev); err = phy_start_aneg(phydev);
@ -976,3 +989,14 @@ static void phy_timer(unsigned long data)
mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ); mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
} }
/* PHY timer which schedules the state machine work */
static void phy_timer(unsigned long data)
{
struct phy_device *phydev = (struct phy_device *)data;
/*
* PHY I/O operations can potentially sleep so we ensure that
* it's done from a process context
*/
schedule_work(&phydev->state_queue);
}

View file

@ -25,7 +25,6 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mii.h> #include <linux/mii.h>
@ -80,7 +79,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
dev->state = PHY_DOWN; dev->state = PHY_DOWN;
spin_lock_init(&dev->lock); mutex_init(&dev->lock);
return dev; return dev;
} }
@ -656,7 +655,7 @@ static int phy_probe(struct device *dev)
if (!(phydrv->flags & PHY_HAS_INTERRUPT)) if (!(phydrv->flags & PHY_HAS_INTERRUPT))
phydev->irq = PHY_POLL; phydev->irq = PHY_POLL;
spin_lock_bh(&phydev->lock); mutex_lock(&phydev->lock);
/* Start out supporting everything. Eventually, /* Start out supporting everything. Eventually,
* a controller will attach, and may modify one * a controller will attach, and may modify one
@ -670,7 +669,7 @@ static int phy_probe(struct device *dev)
if (phydev->drv->probe) if (phydev->drv->probe)
err = phydev->drv->probe(phydev); err = phydev->drv->probe(phydev);
spin_unlock_bh(&phydev->lock); mutex_unlock(&phydev->lock);
return err; return err;
@ -682,9 +681,9 @@ static int phy_remove(struct device *dev)
phydev = to_phy_device(dev); phydev = to_phy_device(dev);
spin_lock_bh(&phydev->lock); mutex_lock(&phydev->lock);
phydev->state = PHY_DOWN; phydev->state = PHY_DOWN;
spin_unlock_bh(&phydev->lock); mutex_unlock(&phydev->lock);
if (phydev->drv->remove) if (phydev->drv->remove)
phydev->drv->remove(phydev); phydev->drv->remove(phydev);

80
drivers/net/phy/realtek.c Normal file
View file

@ -0,0 +1,80 @@
/*
* drivers/net/phy/realtek.c
*
* Driver for Realtek PHYs
*
* Author: Johnson Leung <r58129@freescale.com>
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/phy.h>
#define RTL821x_PHYSR 0x11
#define RTL821x_PHYSR_DUPLEX 0x2000
#define RTL821x_PHYSR_SPEED 0xc000
#define RTL821x_INER 0x12
#define RTL821x_INER_INIT 0x6400
#define RTL821x_INSR 0x13
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
static int rtl821x_ack_interrupt(struct phy_device *phydev)
{
int err;
err = phy_read(phydev, RTL821x_INSR);
return (err < 0) ? err : 0;
}
static int rtl821x_config_intr(struct phy_device *phydev)
{
int err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, RTL821x_INER,
RTL821x_INER_INIT);
else
err = phy_write(phydev, RTL821x_INER, 0);
return err;
}
/* RTL8211B */
static struct phy_driver rtl821x_driver = {
.phy_id = 0x001cc912,
.name = "RTL821x Gigabit Ethernet",
.phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl821x_config_intr,
.driver = { .owner = THIS_MODULE,},
};
static int __init realtek_init(void)
{
int ret;
ret = phy_driver_register(&rtl821x_driver);
return ret;
}
static void __exit realtek_exit(void)
{
phy_driver_unregister(&rtl821x_driver);
}
module_init(realtek_init);
module_exit(realtek_exit);

View file

@ -8118,7 +8118,7 @@ static void initiate_new_session(struct lro *lro, u8 *l2h,
lro->iph = ip; lro->iph = ip;
lro->tcph = tcp; lro->tcph = tcp;
lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq); lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
lro->tcp_ack = ntohl(tcp->ack_seq); lro->tcp_ack = tcp->ack_seq;
lro->sg_num = 1; lro->sg_num = 1;
lro->total_len = ntohs(ip->tot_len); lro->total_len = ntohs(ip->tot_len);
lro->frags_len = 0; lro->frags_len = 0;
@ -8127,10 +8127,10 @@ static void initiate_new_session(struct lro *lro, u8 *l2h,
* already been done. * already been done.
*/ */
if (tcp->doff == 8) { if (tcp->doff == 8) {
u32 *ptr; __be32 *ptr;
ptr = (u32 *)(tcp+1); ptr = (__be32 *)(tcp+1);
lro->saw_ts = 1; lro->saw_ts = 1;
lro->cur_tsval = *(ptr+1); lro->cur_tsval = ntohl(*(ptr+1));
lro->cur_tsecr = *(ptr+2); lro->cur_tsecr = *(ptr+2);
} }
lro->in_use = 1; lro->in_use = 1;
@ -8156,7 +8156,7 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
/* Update tsecr field if this session has timestamps enabled */ /* Update tsecr field if this session has timestamps enabled */
if (lro->saw_ts) { if (lro->saw_ts) {
u32 *ptr = (u32 *)(tcp + 1); __be32 *ptr = (__be32 *)(tcp + 1);
*(ptr+2) = lro->cur_tsecr; *(ptr+2) = lro->cur_tsecr;
} }
@ -8181,10 +8181,10 @@ static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
lro->window = tcp->window; lro->window = tcp->window;
if (lro->saw_ts) { if (lro->saw_ts) {
u32 *ptr; __be32 *ptr;
/* Update tsecr and tsval from this packet */ /* Update tsecr and tsval from this packet */
ptr = (u32 *) (tcp + 1); ptr = (__be32 *)(tcp+1);
lro->cur_tsval = *(ptr + 1); lro->cur_tsval = ntohl(*(ptr+1));
lro->cur_tsecr = *(ptr + 2); lro->cur_tsecr = *(ptr + 2);
} }
} }
@ -8235,11 +8235,11 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
/* Ensure timestamp value increases monotonically */ /* Ensure timestamp value increases monotonically */
if (l_lro) if (l_lro)
if (l_lro->cur_tsval > *((u32 *)(ptr+2))) if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
return -1; return -1;
/* timestamp echo reply should be non-zero */ /* timestamp echo reply should be non-zero */
if (*((u32 *)(ptr+6)) == 0) if (*((__be32 *)(ptr+6)) == 0)
return -1; return -1;
} }

View file

@ -809,7 +809,7 @@ struct lro {
int in_use; int in_use;
__be16 window; __be16 window;
u32 cur_tsval; u32 cur_tsval;
u32 cur_tsecr; __be32 cur_tsecr;
u8 saw_ts; u8 saw_ts;
}; };

View file

@ -326,7 +326,7 @@ static const struct {
{ "SiS 191 PCI Gigabit Ethernet adapter" }, { "SiS 191 PCI Gigabit Ethernet adapter" },
}; };
static struct pci_device_id sis190_pci_tbl[] __devinitdata = { static struct pci_device_id sis190_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
{ 0, }, { 0, },

View file

@ -623,6 +623,7 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
/* Turn on/off phy power saving */ /* Turn on/off phy power saving */
if (onoff) if (onoff)
@ -634,7 +635,8 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
reg1 |= coma_mode[port]; reg1 |= coma_mode[port];
sky2_pci_write32(hw, PCI_DEV_REG1, reg1); sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
sky2_pci_read32(hw, PCI_DEV_REG1);
udelay(100); udelay(100);
} }
@ -1422,6 +1424,7 @@ static int sky2_up(struct net_device *dev)
imask |= portirq_msk[port]; imask |= portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask); sky2_write32(hw, B0_IMSK, imask);
sky2_set_multicast(dev);
return 0; return 0;
err_out: err_out:
@ -2436,6 +2439,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
u16 pci_err; u16 pci_err;
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_err = sky2_pci_read16(hw, PCI_STATUS); pci_err = sky2_pci_read16(hw, PCI_STATUS);
if (net_ratelimit()) if (net_ratelimit())
dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
@ -2443,12 +2447,14 @@ static void sky2_hw_intr(struct sky2_hw *hw)
sky2_pci_write16(hw, PCI_STATUS, sky2_pci_write16(hw, PCI_STATUS,
pci_err | PCI_STATUS_ERROR_BITS); pci_err | PCI_STATUS_ERROR_BITS);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
} }
if (status & Y2_IS_PCI_EXP) { if (status & Y2_IS_PCI_EXP) {
/* PCI-Express uncorrectable Error occurred */ /* PCI-Express uncorrectable Error occurred */
u32 err; u32 err;
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
0xfffffffful); 0xfffffffful);
@ -2456,6 +2462,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
} }
if (status & Y2_HWE_L1_MASK) if (status & Y2_HWE_L1_MASK)
@ -2831,6 +2838,7 @@ static void sky2_reset(struct sky2_hw *hw)
} }
sky2_power_on(hw); sky2_power_on(hw);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
for (i = 0; i < hw->ports; i++) { for (i = 0; i < hw->ports; i++) {
sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@ -3554,8 +3562,6 @@ static int sky2_set_ringparam(struct net_device *dev,
err = sky2_up(dev); err = sky2_up(dev);
if (err) if (err)
dev_close(dev); dev_close(dev);
else
sky2_set_multicast(dev);
} }
return err; return err;
@ -4389,8 +4395,6 @@ static int sky2_resume(struct pci_dev *pdev)
dev_close(dev); dev_close(dev);
goto out; goto out;
} }
sky2_set_multicast(dev);
} }
} }

View file

@ -1075,7 +1075,7 @@ static const struct ethtool_ops bigmac_ethtool_ops = {
.get_link = bigmac_get_link, .get_link = bigmac_get_link,
}; };
static int __init bigmac_ether_init(struct sbus_dev *qec_sdev) static int __devinit bigmac_ether_init(struct sbus_dev *qec_sdev)
{ {
struct net_device *dev; struct net_device *dev;
static int version_printed; static int version_printed;

View file

@ -747,7 +747,7 @@ static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev)
qecp->gregs + GLOB_RSIZE); qecp->gregs + GLOB_RSIZE);
} }
static u8 __init qec_get_burst(struct device_node *dp) static u8 __devinit qec_get_burst(struct device_node *dp)
{ {
u8 bsizes, bsizes_more; u8 bsizes, bsizes_more;
@ -767,7 +767,7 @@ static u8 __init qec_get_burst(struct device_node *dp)
return bsizes; return bsizes;
} }
static struct sunqec * __init get_qec(struct sbus_dev *child_sdev) static struct sunqec * __devinit get_qec(struct sbus_dev *child_sdev)
{ {
struct sbus_dev *qec_sdev = child_sdev->parent; struct sbus_dev *qec_sdev = child_sdev->parent;
struct sunqec *qecp; struct sunqec *qecp;
@ -823,7 +823,7 @@ fail:
return NULL; return NULL;
} }
static int __init qec_ether_init(struct sbus_dev *sdev) static int __devinit qec_ether_init(struct sbus_dev *sdev)
{ {
static unsigned version_printed; static unsigned version_printed;
struct net_device *dev; struct net_device *dev;

View file

@ -1130,7 +1130,7 @@ static struct vio_driver_ops vnet_vio_ops = {
.handshake_complete = vnet_handshake_complete, .handshake_complete = vnet_handshake_complete,
}; };
static void print_version(void) static void __devinit print_version(void)
{ {
static int version_printed; static int version_printed;

View file

@ -434,7 +434,7 @@ static int __devinit olympic_init(struct net_device *dev)
} }
static int olympic_open(struct net_device *dev) static int __devinit olympic_open(struct net_device *dev)
{ {
struct olympic_private *olympic_priv=netdev_priv(dev); struct olympic_private *olympic_priv=netdev_priv(dev);
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb; u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;

View file

@ -2084,8 +2084,10 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
if (!ugeth) if (!ugeth)
return; return;
if (ugeth->uccf) if (ugeth->uccf) {
ucc_fast_free(ugeth->uccf); ucc_fast_free(ugeth->uccf);
ugeth->uccf = NULL;
}
if (ugeth->p_thread_data_tx) { if (ugeth->p_thread_data_tx) {
qe_muram_free(ugeth->thread_dat_tx_offset); qe_muram_free(ugeth->thread_dat_tx_offset);
@ -2305,10 +2307,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info; ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info; uf_info = &ug_info->uf_info;
/* Create CQs for hash tables */
INIT_LIST_HEAD(&ugeth->group_hash_q);
INIT_LIST_HEAD(&ugeth->ind_hash_q);
if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
(uf_info->bd_mem_part == MEM_PART_MURAM))) { (uf_info->bd_mem_part == MEM_PART_MURAM))) {
if (netif_msg_probe(ugeth)) if (netif_msg_probe(ugeth))
@ -3668,6 +3666,23 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
static void ucc_netpoll(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
int irq = ugeth->ug_info->uf_info.irq;
disable_irq(irq);
ucc_geth_irq_handler(irq, dev);
enable_irq(irq);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
/* Called when something needs to use the ethernet device */ /* Called when something needs to use the ethernet device */
/* Returns 0 for success. */ /* Returns 0 for success. */
static int ucc_geth_open(struct net_device *dev) static int ucc_geth_open(struct net_device *dev)
@ -3990,6 +4005,10 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
ugeth = netdev_priv(dev); ugeth = netdev_priv(dev);
spin_lock_init(&ugeth->lock); spin_lock_init(&ugeth->lock);
/* Create CQs for hash tables */
INIT_LIST_HEAD(&ugeth->group_hash_q);
INIT_LIST_HEAD(&ugeth->ind_hash_q);
dev_set_drvdata(device, dev); dev_set_drvdata(device, dev);
/* Set the dev->base_addr to the gfar reg region */ /* Set the dev->base_addr to the gfar reg region */
@ -4006,6 +4025,9 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
#ifdef CONFIG_UGETH_NAPI #ifdef CONFIG_UGETH_NAPI
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT);
#endif /* CONFIG_UGETH_NAPI */ #endif /* CONFIG_UGETH_NAPI */
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = ucc_netpoll;
#endif
dev->stop = ucc_geth_close; dev->stop = ucc_geth_close;
// dev->change_mtu = ucc_geth_change_mtu; // dev->change_mtu = ucc_geth_change_mtu;
dev->mtu = 1500; dev->mtu = 1500;
@ -4040,9 +4062,10 @@ static int ucc_geth_remove(struct of_device* ofdev)
struct net_device *dev = dev_get_drvdata(device); struct net_device *dev = dev_get_drvdata(device);
struct ucc_geth_private *ugeth = netdev_priv(dev); struct ucc_geth_private *ugeth = netdev_priv(dev);
dev_set_drvdata(device, NULL); unregister_netdev(dev);
ucc_geth_memclean(ugeth);
free_netdev(dev); free_netdev(dev);
ucc_geth_memclean(ugeth);
dev_set_drvdata(device, NULL);
return 0; return 0;
} }

View file

@ -926,7 +926,6 @@ static int rtl8150_probe(struct usb_interface *intf,
netdev->set_multicast_list = rtl8150_set_multicast; netdev->set_multicast_list = rtl8150_set_multicast;
netdev->set_mac_address = rtl8150_set_mac_address; netdev->set_mac_address = rtl8150_set_mac_address;
netdev->get_stats = rtl8150_netdev_stats; netdev->get_stats = rtl8150_netdev_stats;
netdev->mtu = RTL8150_MTU;
SET_ETHTOOL_OPS(netdev, &ops); SET_ETHTOOL_OPS(netdev, &ops);
dev->intr_interval = 100; /* 100ms */ dev->intr_interval = 100; /* 100ms */

View file

@ -606,7 +606,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
} }
#endif #endif
static void rhine_hw_init(struct net_device *dev, long pioaddr) static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
{ {
struct rhine_private *rp = netdev_priv(dev); struct rhine_private *rp = netdev_priv(dev);

View file

@ -8,7 +8,6 @@
* for 64bit hardware platforms. * for 64bit hardware platforms.
* *
* TODO * TODO
* Big-endian support
* rx_copybreak/alignment * rx_copybreak/alignment
* Scatter gather * Scatter gather
* More testing * More testing
@ -681,7 +680,7 @@ static void velocity_rx_reset(struct velocity_info *vptr)
* Init state, all RD entries belong to the NIC * Init state, all RD entries belong to the NIC
*/ */
for (i = 0; i < vptr->options.numrx; ++i) for (i = 0; i < vptr->options.numrx; ++i)
vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC; vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC;
writew(vptr->options.numrx, &regs->RBRDU); writew(vptr->options.numrx, &regs->RBRDU);
writel(vptr->rd_pool_dma, &regs->RDBaseLo); writel(vptr->rd_pool_dma, &regs->RDBaseLo);
@ -777,7 +776,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
vptr->int_mask = INT_MASK_DEF; vptr->int_mask = INT_MASK_DEF;
writel(cpu_to_le32(vptr->rd_pool_dma), &regs->RDBaseLo); writel(vptr->rd_pool_dma, &regs->RDBaseLo);
writew(vptr->options.numrx - 1, &regs->RDCSize); writew(vptr->options.numrx - 1, &regs->RDCSize);
mac_rx_queue_run(regs); mac_rx_queue_run(regs);
mac_rx_queue_wake(regs); mac_rx_queue_wake(regs);
@ -785,7 +784,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
writew(vptr->options.numtx - 1, &regs->TDCSize); writew(vptr->options.numtx - 1, &regs->TDCSize);
for (i = 0; i < vptr->num_txq; i++) { for (i = 0; i < vptr->num_txq; i++) {
writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); writel(vptr->td_pool_dma[i], &regs->TDBaseLo[i]);
mac_tx_queue_run(regs, i); mac_tx_queue_run(regs, i);
} }
@ -1195,7 +1194,7 @@ static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
dirty = vptr->rd_dirty - unusable; dirty = vptr->rd_dirty - unusable;
for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC; vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC;
} }
writew(vptr->rd_filled & 0xfffc, &regs->RBRDU); writew(vptr->rd_filled & 0xfffc, &regs->RBRDU);
@ -1210,7 +1209,7 @@ static int velocity_rx_refill(struct velocity_info *vptr)
struct rx_desc *rd = vptr->rd_ring + dirty; struct rx_desc *rd = vptr->rd_ring + dirty;
/* Fine for an all zero Rx desc at init time as well */ /* Fine for an all zero Rx desc at init time as well */
if (rd->rdesc0.owner == OWNED_BY_NIC) if (rd->rdesc0.len & OWNED_BY_NIC)
break; break;
if (!vptr->rd_info[dirty].skb) { if (!vptr->rd_info[dirty].skb) {
@ -1413,7 +1412,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
if (!vptr->rd_info[rd_curr].skb) if (!vptr->rd_info[rd_curr].skb)
break; break;
if (rd->rdesc0.owner == OWNED_BY_NIC) if (rd->rdesc0.len & OWNED_BY_NIC)
break; break;
rmb(); rmb();
@ -1421,7 +1420,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
/* /*
* Don't drop CE or RL error frame although RXOK is off * Don't drop CE or RL error frame although RXOK is off
*/ */
if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
if (velocity_receive_frame(vptr, rd_curr) < 0) if (velocity_receive_frame(vptr, rd_curr) < 0)
stats->rx_dropped++; stats->rx_dropped++;
} else { } else {
@ -1433,7 +1432,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
stats->rx_dropped++; stats->rx_dropped++;
} }
rd->inten = 1; rd->size |= RX_INTEN;
vptr->dev->last_rx = jiffies; vptr->dev->last_rx = jiffies;
@ -1554,7 +1553,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
struct net_device_stats *stats = &vptr->stats; struct net_device_stats *stats = &vptr->stats;
struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
struct rx_desc *rd = &(vptr->rd_ring[idx]); struct rx_desc *rd = &(vptr->rd_ring[idx]);
int pkt_len = rd->rdesc0.len; int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
struct sk_buff *skb; struct sk_buff *skb;
if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
@ -1637,8 +1636,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
*/ */
*((u32 *) & (rd->rdesc0)) = 0; *((u32 *) & (rd->rdesc0)) = 0;
rd->len = cpu_to_le32(vptr->rx_buf_sz); rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN;
rd->inten = 1;
rd->pa_low = cpu_to_le32(rd_info->skb_dma); rd->pa_low = cpu_to_le32(rd_info->skb_dma);
rd->pa_high = 0; rd->pa_high = 0;
return 0; return 0;
@ -1674,7 +1672,7 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
td = &(vptr->td_rings[qnum][idx]); td = &(vptr->td_rings[qnum][idx]);
tdinfo = &(vptr->td_infos[qnum][idx]); tdinfo = &(vptr->td_infos[qnum][idx]);
if (td->tdesc0.owner == OWNED_BY_NIC) if (td->tdesc0.len & OWNED_BY_NIC)
break; break;
if ((works++ > 15)) if ((works++ > 15))
@ -1874,7 +1872,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
for (i = 0; i < tdinfo->nskb_dma; i++) { for (i = 0; i < tdinfo->nskb_dma; i++) {
#ifdef VELOCITY_ZERO_COPY_SUPPORT #ifdef VELOCITY_ZERO_COPY_SUPPORT
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE); pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
#else #else
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE); pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE);
#endif #endif
@ -2067,8 +2065,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
struct velocity_td_info *tdinfo; struct velocity_td_info *tdinfo;
unsigned long flags; unsigned long flags;
int index; int index;
int pktlen = skb->len; int pktlen = skb->len;
__le16 len = cpu_to_le16(pktlen);
#ifdef VELOCITY_ZERO_COPY_SUPPORT #ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
@ -2083,9 +2081,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
td_ptr = &(vptr->td_rings[qnum][index]); td_ptr = &(vptr->td_rings[qnum][index]);
tdinfo = &(vptr->td_infos[qnum][index]); tdinfo = &(vptr->td_infos[qnum][index]);
td_ptr->tdesc1.TCPLS = TCPLS_NORMAL;
td_ptr->tdesc1.TCR = TCR0_TIC; td_ptr->tdesc1.TCR = TCR0_TIC;
td_ptr->td_buf[0].queue = 0; td_ptr->td_buf[0].size &= ~TD_QUEUE;
/* /*
* Pad short frames. * Pad short frames.
@ -2093,16 +2090,16 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
if (pktlen < ETH_ZLEN) { if (pktlen < ETH_ZLEN) {
/* Cannot occur until ZC support */ /* Cannot occur until ZC support */
pktlen = ETH_ZLEN; pktlen = ETH_ZLEN;
len = cpu_to_le16(ETH_ZLEN);
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
tdinfo->skb = skb; tdinfo->skb = skb;
tdinfo->skb_dma[0] = tdinfo->buf_dma; tdinfo->skb_dma[0] = tdinfo->buf_dma;
td_ptr->tdesc0.pktsize = pktlen; td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0; td_ptr->td_buf[0].pa_high = 0;
td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1; tdinfo->nskb_dma = 1;
td_ptr->tdesc1.CMDZ = 2;
} else } else
#ifdef VELOCITY_ZERO_COPY_SUPPORT #ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 0) { if (skb_shinfo(skb)->nr_frags > 0) {
@ -2111,36 +2108,35 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
if (nfrags > 6) { if (nfrags > 6) {
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
tdinfo->skb_dma[0] = tdinfo->buf_dma; tdinfo->skb_dma[0] = tdinfo->buf_dma;
td_ptr->tdesc0.pktsize = td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0; td_ptr->td_buf[0].pa_high = 0;
td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1; tdinfo->nskb_dma = 1;
td_ptr->tdesc1.CMDZ = 2;
} else { } else {
int i = 0; int i = 0;
tdinfo->nskb_dma = 0; tdinfo->nskb_dma = 0;
tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE); tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE);
td_ptr->tdesc0.pktsize = pktlen; td_ptr->tdesc0.len = len;
/* FIXME: support 48bit DMA later */ /* FIXME: support 48bit DMA later */
td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
td_ptr->td_buf[i].pa_high = 0; td_ptr->td_buf[i].pa_high = 0;
td_ptr->td_buf[i].bufsize = skb->len->skb->data_len; td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb));
for (i = 0; i < nfrags; i++) { for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = ((void *) page_address(frag->page + frag->page_offset)); void *addr = (void *)page_address(frag->page) + frag->page_offset;
tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
td_ptr->td_buf[i + 1].pa_high = 0; td_ptr->td_buf[i + 1].pa_high = 0;
td_ptr->td_buf[i + 1].bufsize = frag->size; td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
} }
tdinfo->nskb_dma = i - 1; tdinfo->nskb_dma = i - 1;
td_ptr->tdesc1.CMDZ = i;
} }
} else } else
@ -2152,18 +2148,16 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
*/ */
tdinfo->skb = skb; tdinfo->skb = skb;
tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
td_ptr->tdesc0.pktsize = pktlen; td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0; td_ptr->td_buf[0].pa_high = 0;
td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; td_ptr->td_buf[0].size = len;
tdinfo->nskb_dma = 1; tdinfo->nskb_dma = 1;
td_ptr->tdesc1.CMDZ = 2;
} }
td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
if (vptr->vlgrp && vlan_tx_tag_present(skb)) { if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb); td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
td_ptr->tdesc1.pqinf.priority = 0;
td_ptr->tdesc1.pqinf.CFI = 0;
td_ptr->tdesc1.TCR |= TCR0_VETAG; td_ptr->tdesc1.TCR |= TCR0_VETAG;
} }
@ -2185,7 +2179,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
if (prev < 0) if (prev < 0)
prev = vptr->options.numtx - 1; prev = vptr->options.numtx - 1;
td_ptr->tdesc0.owner = OWNED_BY_NIC; td_ptr->tdesc0.len |= OWNED_BY_NIC;
vptr->td_used[qnum]++; vptr->td_used[qnum]++;
vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
@ -2193,7 +2187,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev); netif_stop_queue(dev);
td_ptr = &(vptr->td_rings[qnum][prev]); td_ptr = &(vptr->td_rings[qnum][prev]);
td_ptr->td_buf[0].queue = 1; td_ptr->td_buf[0].size |= TD_QUEUE;
mac_tx_queue_wake(vptr->mac_regs, qnum); mac_tx_queue_wake(vptr->mac_regs, qnum);
} }
dev->trans_start = jiffies; dev->trans_start = jiffies;
@ -3410,7 +3404,7 @@ static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
velocity_save_context(vptr, &vptr->context); velocity_save_context(vptr, &vptr->context);
velocity_shutdown(vptr); velocity_shutdown(vptr);
velocity_set_wol(vptr); velocity_set_wol(vptr);
pci_enable_wake(pdev, 3, 1); pci_enable_wake(pdev, PCI_D3hot, 1);
pci_set_power_state(pdev, PCI_D3hot); pci_set_power_state(pdev, PCI_D3hot);
} else { } else {
velocity_save_context(vptr, &vptr->context); velocity_save_context(vptr, &vptr->context);

View file

@ -70,40 +70,27 @@
* Bits in the RSR0 register * Bits in the RSR0 register
*/ */
#define RSR_DETAG 0x0080 #define RSR_DETAG cpu_to_le16(0x0080)
#define RSR_SNTAG 0x0040 #define RSR_SNTAG cpu_to_le16(0x0040)
#define RSR_RXER 0x0020 #define RSR_RXER cpu_to_le16(0x0020)
#define RSR_RL 0x0010 #define RSR_RL cpu_to_le16(0x0010)
#define RSR_CE 0x0008 #define RSR_CE cpu_to_le16(0x0008)
#define RSR_FAE 0x0004 #define RSR_FAE cpu_to_le16(0x0004)
#define RSR_CRC 0x0002 #define RSR_CRC cpu_to_le16(0x0002)
#define RSR_VIDM 0x0001 #define RSR_VIDM cpu_to_le16(0x0001)
/* /*
* Bits in the RSR1 register * Bits in the RSR1 register
*/ */
#define RSR_RXOK 0x8000 // rx OK #define RSR_RXOK cpu_to_le16(0x8000) // rx OK
#define RSR_PFT 0x4000 // Perfect filtering address match #define RSR_PFT cpu_to_le16(0x4000) // Perfect filtering address match
#define RSR_MAR 0x2000 // MAC accept multicast address packet #define RSR_MAR cpu_to_le16(0x2000) // MAC accept multicast address packet
#define RSR_BAR 0x1000 // MAC accept broadcast address packet #define RSR_BAR cpu_to_le16(0x1000) // MAC accept broadcast address packet
#define RSR_PHY 0x0800 // MAC accept physical address packet #define RSR_PHY cpu_to_le16(0x0800) // MAC accept physical address packet
#define RSR_VTAG 0x0400 // 802.1p/1q tagging packet indicator #define RSR_VTAG cpu_to_le16(0x0400) // 802.1p/1q tagging packet indicator
#define RSR_STP 0x0200 // start of packet #define RSR_STP cpu_to_le16(0x0200) // start of packet
#define RSR_EDP 0x0100 // end of packet #define RSR_EDP cpu_to_le16(0x0100) // end of packet
/*
* Bits in the RSR1 register
*/
#define RSR1_RXOK 0x80 // rx OK
#define RSR1_PFT 0x40 // Perfect filtering address match
#define RSR1_MAR 0x20 // MAC accept multicast address packet
#define RSR1_BAR 0x10 // MAC accept broadcast address packet
#define RSR1_PHY 0x08 // MAC accept physical address packet
#define RSR1_VTAG 0x04 // 802.1p/1q tagging packet indicator
#define RSR1_STP 0x02 // start of packet
#define RSR1_EDP 0x01 // end of packet
/* /*
* Bits in the CSM register * Bits in the CSM register
@ -120,33 +107,21 @@
* Bits in the TSR0 register * Bits in the TSR0 register
*/ */
#define TSR0_ABT 0x0080 // Tx abort because of excessive collision #define TSR0_ABT cpu_to_le16(0x0080) // Tx abort because of excessive collision
#define TSR0_OWT 0x0040 // Jumbo frame Tx abort #define TSR0_OWT cpu_to_le16(0x0040) // Jumbo frame Tx abort
#define TSR0_OWC 0x0020 // Out of window collision #define TSR0_OWC cpu_to_le16(0x0020) // Out of window collision
#define TSR0_COLS 0x0010 // experience collision in this transmit event #define TSR0_COLS cpu_to_le16(0x0010) // experience collision in this transmit event
#define TSR0_NCR3 0x0008 // collision retry counter[3] #define TSR0_NCR3 cpu_to_le16(0x0008) // collision retry counter[3]
#define TSR0_NCR2 0x0004 // collision retry counter[2] #define TSR0_NCR2 cpu_to_le16(0x0004) // collision retry counter[2]
#define TSR0_NCR1 0x0002 // collision retry counter[1] #define TSR0_NCR1 cpu_to_le16(0x0002) // collision retry counter[1]
#define TSR0_NCR0 0x0001 // collision retry counter[0] #define TSR0_NCR0 cpu_to_le16(0x0001) // collision retry counter[0]
#define TSR0_TERR 0x8000 // #define TSR0_TERR cpu_to_le16(0x8000) //
#define TSR0_FDX 0x4000 // current transaction is serviced by full duplex mode #define TSR0_FDX cpu_to_le16(0x4000) // current transaction is serviced by full duplex mode
#define TSR0_GMII 0x2000 // current transaction is serviced by GMII mode #define TSR0_GMII cpu_to_le16(0x2000) // current transaction is serviced by GMII mode
#define TSR0_LNKFL 0x1000 // packet serviced during link down #define TSR0_LNKFL cpu_to_le16(0x1000) // packet serviced during link down
#define TSR0_SHDN 0x0400 // shutdown case #define TSR0_SHDN cpu_to_le16(0x0400) // shutdown case
#define TSR0_CRS 0x0200 // carrier sense lost #define TSR0_CRS cpu_to_le16(0x0200) // carrier sense lost
#define TSR0_CDH 0x0100 // AQE test fail (CD heartbeat) #define TSR0_CDH cpu_to_le16(0x0100) // AQE test fail (CD heartbeat)
/*
* Bits in the TSR1 register
*/
#define TSR1_TERR 0x80 //
#define TSR1_FDX 0x40 // current transaction is serviced by full duplex mode
#define TSR1_GMII 0x20 // current transaction is serviced by GMII mode
#define TSR1_LNKFL 0x10 // packet serviced during link down
#define TSR1_SHDN 0x04 // shutdown case
#define TSR1_CRS 0x02 // carrier sense lost
#define TSR1_CDH 0x01 // AQE test fail (CD heartbeat)
// //
// Bits in the TCR0 register // Bits in the TCR0 register
@ -197,25 +172,26 @@
*/ */
struct rdesc0 { struct rdesc0 {
u16 RSR; /* Receive status */ __le16 RSR; /* Receive status */
u16 len:14; /* Received packet length */ __le16 len; /* bits 0--13; bit 15 - owner */
u16 reserved:1;
u16 owner:1; /* Who owns this buffer ? */
}; };
struct rdesc1 { struct rdesc1 {
u16 PQTAG; __le16 PQTAG;
u8 CSM; u8 CSM;
u8 IPKT; u8 IPKT;
}; };
enum {
RX_INTEN = __constant_cpu_to_le16(0x8000)
};
struct rx_desc { struct rx_desc {
struct rdesc0 rdesc0; struct rdesc0 rdesc0;
struct rdesc1 rdesc1; struct rdesc1 rdesc1;
u32 pa_low; /* Low 32 bit PCI address */ __le32 pa_low; /* Low 32 bit PCI address */
u16 pa_high; /* Next 16 bit PCI address (48 total) */ __le16 pa_high; /* Next 16 bit PCI address (48 total) */
u16 len:15; /* Frame size */ __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */
u16 inten:1; /* Enable interrupt */
} __attribute__ ((__packed__)); } __attribute__ ((__packed__));
/* /*
@ -223,32 +199,24 @@ struct rx_desc {
*/ */
struct tdesc0 { struct tdesc0 {
u16 TSR; /* Transmit status register */ __le16 TSR; /* Transmit status register */
u16 pktsize:14; /* Size of frame */ __le16 len; /* bits 0--13 - size of frame, bit 15 - owner */
u16 reserved:1;
u16 owner:1; /* Who owns the buffer */
}; };
struct pqinf { /* Priority queue info */ struct tdesc1 {
u16 VID:12; __le16 vlan;
u16 CFI:1; u8 TCR;
u16 priority:3; u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */
} __attribute__ ((__packed__)); } __attribute__ ((__packed__));
struct tdesc1 { enum {
struct pqinf pqinf; TD_QUEUE = __constant_cpu_to_le16(0x8000)
u8 TCR; };
u8 TCPLS:2;
u8 reserved:2;
u8 CMDZ:4;
} __attribute__ ((__packed__));
struct td_buf { struct td_buf {
u32 pa_low; __le32 pa_low;
u16 pa_high; __le16 pa_high;
u16 bufsize:14; __le16 size; /* bits 0--13 - size, bit 15 - queue */
u16 reserved:1;
u16 queue:1;
} __attribute__ ((__packed__)); } __attribute__ ((__packed__));
struct tx_desc { struct tx_desc {
@ -276,7 +244,7 @@ struct velocity_td_info {
enum velocity_owner { enum velocity_owner {
OWNED_BY_HOST = 0, OWNED_BY_HOST = 0,
OWNED_BY_NIC = 1 OWNED_BY_NIC = __constant_cpu_to_le16(0x8000)
}; };
@ -1012,45 +980,45 @@ struct mac_regs {
volatile u8 RCR; volatile u8 RCR;
volatile u8 TCR; volatile u8 TCR;
volatile u32 CR0Set; /* 0x08 */ volatile __le32 CR0Set; /* 0x08 */
volatile u32 CR0Clr; /* 0x0C */ volatile __le32 CR0Clr; /* 0x0C */
volatile u8 MARCAM[8]; /* 0x10 */ volatile u8 MARCAM[8]; /* 0x10 */
volatile u32 DecBaseHi; /* 0x18 */ volatile __le32 DecBaseHi; /* 0x18 */
volatile u16 DbfBaseHi; /* 0x1C */ volatile __le16 DbfBaseHi; /* 0x1C */
volatile u16 reserved_1E; volatile __le16 reserved_1E;
volatile u16 ISRCTL; /* 0x20 */ volatile __le16 ISRCTL; /* 0x20 */
volatile u8 TXESR; volatile u8 TXESR;
volatile u8 RXESR; volatile u8 RXESR;
volatile u32 ISR; /* 0x24 */ volatile __le32 ISR; /* 0x24 */
volatile u32 IMR; volatile __le32 IMR;
volatile u32 TDStatusPort; /* 0x2C */ volatile __le32 TDStatusPort; /* 0x2C */
volatile u16 TDCSRSet; /* 0x30 */ volatile __le16 TDCSRSet; /* 0x30 */
volatile u8 RDCSRSet; volatile u8 RDCSRSet;
volatile u8 reserved_33; volatile u8 reserved_33;
volatile u16 TDCSRClr; volatile __le16 TDCSRClr;
volatile u8 RDCSRClr; volatile u8 RDCSRClr;
volatile u8 reserved_37; volatile u8 reserved_37;
volatile u32 RDBaseLo; /* 0x38 */ volatile __le32 RDBaseLo; /* 0x38 */
volatile u16 RDIdx; /* 0x3C */ volatile __le16 RDIdx; /* 0x3C */
volatile u16 reserved_3E; volatile __le16 reserved_3E;
volatile u32 TDBaseLo[4]; /* 0x40 */ volatile __le32 TDBaseLo[4]; /* 0x40 */
volatile u16 RDCSize; /* 0x50 */ volatile __le16 RDCSize; /* 0x50 */
volatile u16 TDCSize; /* 0x52 */ volatile __le16 TDCSize; /* 0x52 */
volatile u16 TDIdx[4]; /* 0x54 */ volatile __le16 TDIdx[4]; /* 0x54 */
volatile u16 tx_pause_timer; /* 0x5C */ volatile __le16 tx_pause_timer; /* 0x5C */
volatile u16 RBRDU; /* 0x5E */ volatile __le16 RBRDU; /* 0x5E */
volatile u32 FIFOTest0; /* 0x60 */ volatile __le32 FIFOTest0; /* 0x60 */
volatile u32 FIFOTest1; /* 0x64 */ volatile __le32 FIFOTest1; /* 0x64 */
volatile u8 CAMADDR; /* 0x68 */ volatile u8 CAMADDR; /* 0x68 */
volatile u8 CAMCR; /* 0x69 */ volatile u8 CAMCR; /* 0x69 */
@ -1063,18 +1031,18 @@ struct mac_regs {
volatile u8 PHYSR1; volatile u8 PHYSR1;
volatile u8 MIICR; volatile u8 MIICR;
volatile u8 MIIADR; volatile u8 MIIADR;
volatile u16 MIIDATA; volatile __le16 MIIDATA;
volatile u16 SoftTimer0; /* 0x74 */ volatile __le16 SoftTimer0; /* 0x74 */
volatile u16 SoftTimer1; volatile __le16 SoftTimer1;
volatile u8 CFGA; /* 0x78 */ volatile u8 CFGA; /* 0x78 */
volatile u8 CFGB; volatile u8 CFGB;
volatile u8 CFGC; volatile u8 CFGC;
volatile u8 CFGD; volatile u8 CFGD;
volatile u16 DCFG; /* 0x7C */ volatile __le16 DCFG; /* 0x7C */
volatile u16 MCFG; volatile __le16 MCFG;
volatile u8 TBIST; /* 0x80 */ volatile u8 TBIST; /* 0x80 */
volatile u8 RBIST; volatile u8 RBIST;
@ -1086,9 +1054,9 @@ struct mac_regs {
volatile u8 rev_id; volatile u8 rev_id;
volatile u8 PORSTS; volatile u8 PORSTS;
volatile u32 MIBData; /* 0x88 */ volatile __le32 MIBData; /* 0x88 */
volatile u16 EEWrData; volatile __le16 EEWrData;
volatile u8 reserved_8E; volatile u8 reserved_8E;
volatile u8 BPMDWr; volatile u8 BPMDWr;
@ -1098,7 +1066,7 @@ struct mac_regs {
volatile u8 EECHKSUM; /* 0x92 */ volatile u8 EECHKSUM; /* 0x92 */
volatile u8 EECSR; volatile u8 EECSR;
volatile u16 EERdData; /* 0x94 */ volatile __le16 EERdData; /* 0x94 */
volatile u8 EADDR; volatile u8 EADDR;
volatile u8 EMBCMD; volatile u8 EMBCMD;
@ -1112,22 +1080,22 @@ struct mac_regs {
volatile u8 DEBUG; volatile u8 DEBUG;
volatile u8 CHIPGCR; volatile u8 CHIPGCR;
volatile u16 WOLCRSet; /* 0xA0 */ volatile __le16 WOLCRSet; /* 0xA0 */
volatile u8 PWCFGSet; volatile u8 PWCFGSet;
volatile u8 WOLCFGSet; volatile u8 WOLCFGSet;
volatile u16 WOLCRClr; /* 0xA4 */ volatile __le16 WOLCRClr; /* 0xA4 */
volatile u8 PWCFGCLR; volatile u8 PWCFGCLR;
volatile u8 WOLCFGClr; volatile u8 WOLCFGClr;
volatile u16 WOLSRSet; /* 0xA8 */ volatile __le16 WOLSRSet; /* 0xA8 */
volatile u16 reserved_AA; volatile __le16 reserved_AA;
volatile u16 WOLSRClr; /* 0xAC */ volatile __le16 WOLSRClr; /* 0xAC */
volatile u16 reserved_AE; volatile __le16 reserved_AE;
volatile u16 PatternCRC[8]; /* 0xB0 */ volatile __le16 PatternCRC[8]; /* 0xB0 */
volatile u32 ByteMask[4][4]; /* 0xC0 */ volatile __le32 ByteMask[4][4]; /* 0xC0 */
} __attribute__ ((__packed__)); } __attribute__ ((__packed__));
@ -1238,12 +1206,12 @@ typedef u8 MCAM_ADDR[ETH_ALEN];
struct arp_packet { struct arp_packet {
u8 dest_mac[ETH_ALEN]; u8 dest_mac[ETH_ALEN];
u8 src_mac[ETH_ALEN]; u8 src_mac[ETH_ALEN];
u16 type; __be16 type;
u16 ar_hrd; __be16 ar_hrd;
u16 ar_pro; __be16 ar_pro;
u8 ar_hln; u8 ar_hln;
u8 ar_pln; u8 ar_pln;
u16 ar_op; __be16 ar_op;
u8 ar_sha[ETH_ALEN]; u8 ar_sha[ETH_ALEN];
u8 ar_sip[4]; u8 ar_sip[4];
u8 ar_tha[ETH_ALEN]; u8 ar_tha[ETH_ALEN];
@ -1253,7 +1221,7 @@ struct arp_packet {
struct _magic_packet { struct _magic_packet {
u8 dest_mac[6]; u8 dest_mac[6];
u8 src_mac[6]; u8 src_mac[6];
u16 type; __be16 type;
u8 MAC[16][6]; u8 MAC[16][6];
u8 password[6]; u8 password[6];
} __attribute__ ((__packed__)); } __attribute__ ((__packed__));

View file

@ -153,7 +153,7 @@ static int ath5k_pci_resume(struct pci_dev *pdev);
#define ath5k_pci_resume NULL #define ath5k_pci_resume NULL
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
static struct pci_driver ath5k_pci_drv_id = { static struct pci_driver ath5k_pci_driver = {
.name = "ath5k_pci", .name = "ath5k_pci",
.id_table = ath5k_pci_id_table, .id_table = ath5k_pci_id_table,
.probe = ath5k_pci_probe, .probe = ath5k_pci_probe,
@ -329,7 +329,7 @@ init_ath5k_pci(void)
ath5k_debug_init(); ath5k_debug_init();
ret = pci_register_driver(&ath5k_pci_drv_id); ret = pci_register_driver(&ath5k_pci_driver);
if (ret) { if (ret) {
printk(KERN_ERR "ath5k_pci: can't register pci driver\n"); printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
return ret; return ret;
@ -341,7 +341,7 @@ init_ath5k_pci(void)
static void __exit static void __exit
exit_ath5k_pci(void) exit_ath5k_pci(void)
{ {
pci_unregister_driver(&ath5k_pci_drv_id); pci_unregister_driver(&ath5k_pci_driver);
ath5k_debug_finish(); ath5k_debug_finish();
} }

View file

@ -238,7 +238,8 @@ void iwl3945_hw_rx_statistics(struct iwl3945_priv *priv, struct iwl3945_rx_mem_b
priv->last_statistics_time = jiffies; priv->last_statistics_time = jiffies;
} }
void iwl3945_add_radiotap(struct iwl3945_priv *priv, struct sk_buff *skb, static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
struct sk_buff *skb,
struct iwl3945_rx_frame_hdr *rx_hdr, struct iwl3945_rx_frame_hdr *rx_hdr,
struct ieee80211_rx_status *stats) struct ieee80211_rx_status *stats)
{ {

View file

@ -4658,17 +4658,30 @@ void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index,
struct ieee80211_ht_info *sta_ht_inf) struct ieee80211_ht_info *sta_ht_inf)
{ {
__le32 sta_flags; __le32 sta_flags;
u8 mimo_ps_mode;
if (!sta_ht_inf || !sta_ht_inf->ht_supported) if (!sta_ht_inf || !sta_ht_inf->ht_supported)
goto done; goto done;
mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
sta_flags = priv->stations[index].sta.station_flags; sta_flags = priv->stations[index].sta.station_flags;
if (((sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS >> 2)) sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
== IWL_MIMO_PS_DYNAMIC)
switch (mimo_ps_mode) {
case WLAN_HT_CAP_MIMO_PS_STATIC:
sta_flags |= STA_FLG_MIMO_DIS_MSK;
break;
case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
else break;
sta_flags &= ~STA_FLG_RTS_MIMO_PROT_MSK; case WLAN_HT_CAP_MIMO_PS_DISABLED:
break;
default:
IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
break;
}
sta_flags |= cpu_to_le32( sta_flags |= cpu_to_le32(
(u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
@ -4679,7 +4692,7 @@ void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index,
if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf)) if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
sta_flags |= STA_FLG_FAT_EN_MSK; sta_flags |= STA_FLG_FAT_EN_MSK;
else else
sta_flags &= (~STA_FLG_FAT_EN_MSK); sta_flags &= ~STA_FLG_FAT_EN_MSK;
priv->stations[index].sta.station_flags = sta_flags; priv->stations[index].sta.station_flags = sta_flags;
done: done:

View file

@ -147,9 +147,6 @@ static inline struct ieee80211_conf *ieee80211_get_hw_conf(
#define QOS_CONTROL_LEN 2 #define QOS_CONTROL_LEN 2
#define IEEE80211_STYPE_BACK_REQ 0x0080
#define IEEE80211_STYPE_BACK 0x0090
static inline int ieee80211_is_management(u16 fc) static inline int ieee80211_is_management(u16 fc)
{ {

View file

@ -6330,6 +6330,11 @@ static int __iwl3945_up(struct iwl3945_priv *priv)
return -ENODEV; return -ENODEV;
} }
if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
IWL_ERROR("ucode not available for device bringup\n");
return -EIO;
}
/* If platform's RF_KILL switch is NOT set to KILL */ /* If platform's RF_KILL switch is NOT set to KILL */
if (iwl3945_read32(priv, CSR_GP_CNTRL) & if (iwl3945_read32(priv, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
@ -6342,11 +6347,6 @@ static int __iwl3945_up(struct iwl3945_priv *priv)
} }
} }
if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
IWL_ERROR("ucode not available for device bringup\n");
return -EIO;
}
iwl3945_write32(priv, CSR_INT, 0xFFFFFFFF); iwl3945_write32(priv, CSR_INT, 0xFFFFFFFF);
rc = iwl3945_hw_nic_init(priv); rc = iwl3945_hw_nic_init(priv);

View file

@ -6755,6 +6755,11 @@ static int __iwl4965_up(struct iwl4965_priv *priv)
return -ENODEV; return -ENODEV;
} }
if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
IWL_ERROR("ucode not available for device bringup\n");
return -EIO;
}
/* If platform's RF_KILL switch is NOT set to KILL */ /* If platform's RF_KILL switch is NOT set to KILL */
if (iwl4965_read32(priv, CSR_GP_CNTRL) & if (iwl4965_read32(priv, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
@ -6767,11 +6772,6 @@ static int __iwl4965_up(struct iwl4965_priv *priv)
} }
} }
if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
IWL_ERROR("ucode not available for device bringup\n");
return -EIO;
}
iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF); iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
rc = iwl4965_hw_nic_init(priv); rc = iwl4965_hw_nic_init(priv);

View file

@ -525,6 +525,7 @@ struct dccp_sock {
__u64 dccps_gsr; __u64 dccps_gsr;
__u64 dccps_gar; __u64 dccps_gar;
__be32 dccps_service; __be32 dccps_service;
__u32 dccps_mss_cache;
struct dccp_service_list *dccps_service_list; struct dccp_service_list *dccps_service_list;
__u32 dccps_timestamp_echo; __u32 dccps_timestamp_echo;
__u32 dccps_timestamp_time; __u32 dccps_timestamp_time;
@ -533,7 +534,6 @@ struct dccp_sock {
__u16 dccps_pcslen; __u16 dccps_pcslen;
__u16 dccps_pcrlen; __u16 dccps_pcrlen;
unsigned long dccps_ndp_count; unsigned long dccps_ndp_count;
__u32 dccps_mss_cache;
unsigned long dccps_rate_last; unsigned long dccps_rate_last;
struct dccp_minisock dccps_minisock; struct dccp_minisock dccps_minisock;
struct dccp_ackvec *dccps_hc_rx_ackvec; struct dccp_ackvec *dccps_hc_rx_ackvec;

View file

@ -287,6 +287,12 @@ struct ieee80211_ht_addt_info {
#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 #define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 #define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
/* MIMO Power Save Modes */
#define WLAN_HT_CAP_MIMO_PS_STATIC 0
#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1
#define WLAN_HT_CAP_MIMO_PS_INVALID 2
#define WLAN_HT_CAP_MIMO_PS_DISABLED 3
/* Authentication algorithms */ /* Authentication algorithms */
#define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1 #define WLAN_AUTH_SHARED_KEY 1

View file

@ -88,7 +88,7 @@ struct mii_bus {
/* A lock to ensure that only one thing can read/write /* A lock to ensure that only one thing can read/write
* the MDIO bus at a time */ * the MDIO bus at a time */
spinlock_t mdio_lock; struct mutex mdio_lock;
struct device *dev; struct device *dev;
@ -284,10 +284,11 @@ struct phy_device {
/* Interrupt and Polling infrastructure */ /* Interrupt and Polling infrastructure */
struct work_struct phy_queue; struct work_struct phy_queue;
struct work_struct state_queue;
struct timer_list phy_timer; struct timer_list phy_timer;
atomic_t irq_disable; atomic_t irq_disable;
spinlock_t lock; struct mutex lock;
struct net_device *attached_dev; struct net_device *attached_dev;

View file

@ -112,13 +112,13 @@ struct ifmcaddr6
struct ip6_sf_list *mca_sources; struct ip6_sf_list *mca_sources;
struct ip6_sf_list *mca_tomb; struct ip6_sf_list *mca_tomb;
unsigned int mca_sfmode; unsigned int mca_sfmode;
unsigned char mca_crcount;
unsigned long mca_sfcount[2]; unsigned long mca_sfcount[2];
struct timer_list mca_timer; struct timer_list mca_timer;
unsigned mca_flags; unsigned mca_flags;
int mca_users; int mca_users;
atomic_t mca_refcnt; atomic_t mca_refcnt;
spinlock_t mca_lock; spinlock_t mca_lock;
unsigned char mca_crcount;
unsigned long mca_cstamp; unsigned long mca_cstamp;
unsigned long mca_tstamp; unsigned long mca_tstamp;
}; };
@ -166,11 +166,11 @@ struct inet6_dev
struct ifmcaddr6 *mc_list; struct ifmcaddr6 *mc_list;
struct ifmcaddr6 *mc_tomb; struct ifmcaddr6 *mc_tomb;
rwlock_t mc_lock; rwlock_t mc_lock;
unsigned long mc_v1_seen;
unsigned long mc_maxdelay;
unsigned char mc_qrv; unsigned char mc_qrv;
unsigned char mc_gq_running; unsigned char mc_gq_running;
unsigned char mc_ifc_count; unsigned char mc_ifc_count;
unsigned long mc_v1_seen;
unsigned long mc_maxdelay;
struct timer_list mc_gq_timer; /* general query timer */ struct timer_list mc_gq_timer; /* general query timer */
struct timer_list mc_ifc_timer; /* interface change timer */ struct timer_list mc_ifc_timer; /* interface change timer */

View file

@ -49,7 +49,7 @@ static inline int inet6_sk_ehashfn(const struct sock *sk)
return inet6_ehashfn(laddr, lport, faddr, fport); return inet6_ehashfn(laddr, lport, faddr, fport);
} }
extern void __inet6_hash(struct inet_hashinfo *hashinfo, struct sock *sk); extern void __inet6_hash(struct sock *sk);
/* /*
* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so

View file

@ -29,7 +29,6 @@
#undef INET_CSK_CLEAR_TIMERS #undef INET_CSK_CLEAR_TIMERS
struct inet_bind_bucket; struct inet_bind_bucket;
struct inet_hashinfo;
struct tcp_congestion_ops; struct tcp_congestion_ops;
/* /*
@ -59,6 +58,8 @@ struct inet_connection_sock_af_ops {
int level, int optname, int level, int optname,
char __user *optval, int __user *optlen); char __user *optval, int __user *optlen);
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
int (*bind_conflict)(const struct sock *sk,
const struct inet_bind_bucket *tb);
}; };
/** inet_connection_sock - INET connection oriented sock /** inet_connection_sock - INET connection oriented sock
@ -244,10 +245,7 @@ extern struct request_sock *inet_csk_search_req(const struct sock *sk,
const __be32 laddr); const __be32 laddr);
extern int inet_csk_bind_conflict(const struct sock *sk, extern int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb); const struct inet_bind_bucket *tb);
extern int inet_csk_get_port(struct inet_hashinfo *hashinfo, extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
struct sock *sk, unsigned short snum,
int (*bind_conflict)(const struct sock *sk,
const struct inet_bind_bucket *tb));
extern struct dst_entry* inet_csk_route_req(struct sock *sk, extern struct dst_entry* inet_csk_route_req(struct sock *sk,
const struct request_sock *req); const struct request_sock *req);

View file

@ -221,9 +221,9 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk)
} }
/* Caller must disable local BH processing. */ /* Caller must disable local BH processing. */
static inline void __inet_inherit_port(struct inet_hashinfo *table, static inline void __inet_inherit_port(struct sock *sk, struct sock *child)
struct sock *sk, struct sock *child)
{ {
struct inet_hashinfo *table = sk->sk_prot->hashinfo;
const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size); const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
struct inet_bind_hashbucket *head = &table->bhash[bhash]; struct inet_bind_hashbucket *head = &table->bhash[bhash];
struct inet_bind_bucket *tb; struct inet_bind_bucket *tb;
@ -235,15 +235,14 @@ static inline void __inet_inherit_port(struct inet_hashinfo *table,
spin_unlock(&head->lock); spin_unlock(&head->lock);
} }
static inline void inet_inherit_port(struct inet_hashinfo *table, static inline void inet_inherit_port(struct sock *sk, struct sock *child)
struct sock *sk, struct sock *child)
{ {
local_bh_disable(); local_bh_disable();
__inet_inherit_port(table, sk, child); __inet_inherit_port(sk, child);
local_bh_enable(); local_bh_enable();
} }
extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk); extern void inet_put_port(struct sock *sk);
extern void inet_listen_wlock(struct inet_hashinfo *hashinfo); extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);
@ -266,41 +265,9 @@ static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
wake_up(&hashinfo->lhash_wait); wake_up(&hashinfo->lhash_wait);
} }
extern void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk); extern void __inet_hash_nolisten(struct sock *sk);
extern void __inet_hash_nolisten(struct inet_hashinfo *hinfo, struct sock *sk); extern void inet_hash(struct sock *sk);
extern void inet_unhash(struct sock *sk);
static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
{
if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
__inet_hash(hashinfo, sk);
local_bh_enable();
}
}
static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
{
rwlock_t *lock;
if (sk_unhashed(sk))
goto out;
if (sk->sk_state == TCP_LISTEN) {
local_bh_disable();
inet_listen_wlock(hashinfo);
lock = &hashinfo->lhash_lock;
} else {
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
write_lock_bh(lock);
}
if (__sk_del_node_init(sk))
sock_prot_inuse_add(sk->sk_prot, -1);
write_unlock_bh(lock);
out:
if (sk->sk_state == TCP_LISTEN)
wake_up(&hashinfo->lhash_wait);
}
extern struct sock *__inet_lookup_listener(struct net *net, extern struct sock *__inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo, struct inet_hashinfo *hashinfo,
@ -425,7 +392,7 @@ extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, struct sock *sk,
int (*check_established)(struct inet_timewait_death_row *, int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16, struct inet_timewait_sock **), struct sock *, __u16, struct inet_timewait_sock **),
void (*hash)(struct inet_hashinfo *, struct sock *)); void (*hash)(struct sock *sk));
extern int inet_hash_connect(struct inet_timewait_death_row *death_row, extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk); struct sock *sk);
#endif /* _INET_HASHTABLES_H */ #endif /* _INET_HASHTABLES_H */

View file

@ -116,6 +116,7 @@ struct inet_timewait_sock {
#define tw_hash __tw_common.skc_hash #define tw_hash __tw_common.skc_hash
#define tw_prot __tw_common.skc_prot #define tw_prot __tw_common.skc_prot
#define tw_net __tw_common.skc_net #define tw_net __tw_common.skc_net
int tw_timeout;
volatile unsigned char tw_substate; volatile unsigned char tw_substate;
/* 3 bits hole, try to pack */ /* 3 bits hole, try to pack */
unsigned char tw_rcv_wscale; unsigned char tw_rcv_wscale;
@ -130,7 +131,6 @@ struct inet_timewait_sock {
__u8 tw_ipv6only:1; __u8 tw_ipv6only:1;
/* 15 bits hole, try to pack */ /* 15 bits hole, try to pack */
__u16 tw_ipv6_offset; __u16 tw_ipv6_offset;
int tw_timeout;
unsigned long tw_ttd; unsigned long tw_ttd;
struct inet_bind_bucket *tw_tb; struct inet_bind_bucket *tw_tb;
struct hlist_node tw_death_node; struct hlist_node tw_death_node;

View file

@ -496,6 +496,7 @@ extern int sk_wait_data(struct sock *sk, long *timeo);
struct request_sock_ops; struct request_sock_ops;
struct timewait_sock_ops; struct timewait_sock_ops;
struct inet_hashinfo;
/* Networking protocol blocks we attach to sockets. /* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface * socket layer -> transport layer interface
@ -578,6 +579,8 @@ struct proto {
struct request_sock_ops *rsk_prot; struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot; struct timewait_sock_ops *twsk_prot;
struct inet_hashinfo *hashinfo;
struct module *owner; struct module *owner;
char name[32]; char name[32];

View file

@ -271,8 +271,6 @@ extern struct sk_buff *dccp_make_response(struct sock *sk,
extern int dccp_connect(struct sock *sk); extern int dccp_connect(struct sock *sk);
extern int dccp_disconnect(struct sock *sk, int flags); extern int dccp_disconnect(struct sock *sk, int flags);
extern void dccp_hash(struct sock *sk);
extern void dccp_unhash(struct sock *sk);
extern int dccp_getsockopt(struct sock *sk, int level, int optname, extern int dccp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen); char __user *optval, int __user *optlen);
extern int dccp_setsockopt(struct sock *sk, int level, int optname, extern int dccp_setsockopt(struct sock *sk, int level, int optname,

View file

@ -38,12 +38,6 @@
*/ */
static struct socket *dccp_v4_ctl_socket; static struct socket *dccp_v4_ctl_socket;
static int dccp_v4_get_port(struct sock *sk, const unsigned short snum)
{
return inet_csk_get_port(&dccp_hashinfo, sk, snum,
inet_csk_bind_conflict);
}
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{ {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
@ -408,8 +402,8 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
dccp_sync_mss(newsk, dst_mtu(dst)); dccp_sync_mss(newsk, dst_mtu(dst));
__inet_hash_nolisten(&dccp_hashinfo, newsk); __inet_hash_nolisten(newsk);
__inet_inherit_port(&dccp_hashinfo, sk, newsk); __inet_inherit_port(sk, newsk);
return newsk; return newsk;
@ -898,6 +892,7 @@ static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
.getsockopt = ip_getsockopt, .getsockopt = ip_getsockopt,
.addr2sockaddr = inet_csk_addr2sockaddr, .addr2sockaddr = inet_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in), .sockaddr_len = sizeof(struct sockaddr_in),
.bind_conflict = inet_csk_bind_conflict,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt, .compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt, .compat_getsockopt = compat_ip_getsockopt,
@ -937,10 +932,10 @@ static struct proto dccp_v4_prot = {
.sendmsg = dccp_sendmsg, .sendmsg = dccp_sendmsg,
.recvmsg = dccp_recvmsg, .recvmsg = dccp_recvmsg,
.backlog_rcv = dccp_v4_do_rcv, .backlog_rcv = dccp_v4_do_rcv,
.hash = dccp_hash, .hash = inet_hash,
.unhash = dccp_unhash, .unhash = inet_unhash,
.accept = inet_csk_accept, .accept = inet_csk_accept,
.get_port = dccp_v4_get_port, .get_port = inet_csk_get_port,
.shutdown = dccp_shutdown, .shutdown = dccp_shutdown,
.destroy = dccp_destroy_sock, .destroy = dccp_destroy_sock,
.orphan_count = &dccp_orphan_count, .orphan_count = &dccp_orphan_count,
@ -948,6 +943,7 @@ static struct proto dccp_v4_prot = {
.obj_size = sizeof(struct dccp_sock), .obj_size = sizeof(struct dccp_sock),
.rsk_prot = &dccp_request_sock_ops, .rsk_prot = &dccp_request_sock_ops,
.twsk_prot = &dccp_timewait_sock_ops, .twsk_prot = &dccp_timewait_sock_ops,
.hashinfo = &dccp_hashinfo,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_dccp_setsockopt, .compat_setsockopt = compat_dccp_setsockopt,
.compat_getsockopt = compat_dccp_getsockopt, .compat_getsockopt = compat_dccp_getsockopt,

View file

@ -39,21 +39,15 @@ static struct socket *dccp_v6_ctl_socket;
static struct inet_connection_sock_af_ops dccp_ipv6_mapped; static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
static int dccp_v6_get_port(struct sock *sk, unsigned short snum)
{
return inet_csk_get_port(&dccp_hashinfo, sk, snum,
inet6_csk_bind_conflict);
}
static void dccp_v6_hash(struct sock *sk) static void dccp_v6_hash(struct sock *sk)
{ {
if (sk->sk_state != DCCP_CLOSED) { if (sk->sk_state != DCCP_CLOSED) {
if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) { if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
dccp_hash(sk); inet_hash(sk);
return; return;
} }
local_bh_disable(); local_bh_disable();
__inet6_hash(&dccp_hashinfo, sk); __inet6_hash(sk);
local_bh_enable(); local_bh_enable();
} }
} }
@ -630,8 +624,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
__inet6_hash(&dccp_hashinfo, newsk); __inet6_hash(newsk);
inet_inherit_port(&dccp_hashinfo, sk, newsk); inet_inherit_port(sk, newsk);
return newsk; return newsk;
@ -1054,6 +1048,7 @@ static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
.getsockopt = ipv6_getsockopt, .getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr, .addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6), .sockaddr_len = sizeof(struct sockaddr_in6),
.bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt, .compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt, .compat_getsockopt = compat_ipv6_getsockopt,
@ -1123,9 +1118,9 @@ static struct proto dccp_v6_prot = {
.recvmsg = dccp_recvmsg, .recvmsg = dccp_recvmsg,
.backlog_rcv = dccp_v6_do_rcv, .backlog_rcv = dccp_v6_do_rcv,
.hash = dccp_v6_hash, .hash = dccp_v6_hash,
.unhash = dccp_unhash, .unhash = inet_unhash,
.accept = inet_csk_accept, .accept = inet_csk_accept,
.get_port = dccp_v6_get_port, .get_port = inet_csk_get_port,
.shutdown = dccp_shutdown, .shutdown = dccp_shutdown,
.destroy = dccp_v6_destroy_sock, .destroy = dccp_v6_destroy_sock,
.orphan_count = &dccp_orphan_count, .orphan_count = &dccp_orphan_count,
@ -1133,6 +1128,7 @@ static struct proto dccp_v6_prot = {
.obj_size = sizeof(struct dccp6_sock), .obj_size = sizeof(struct dccp6_sock),
.rsk_prot = &dccp6_request_sock_ops, .rsk_prot = &dccp6_request_sock_ops,
.twsk_prot = &dccp6_timewait_sock_ops, .twsk_prot = &dccp6_timewait_sock_ops,
.hashinfo = &dccp_hashinfo,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_dccp_setsockopt, .compat_setsockopt = compat_dccp_setsockopt,
.compat_getsockopt = compat_dccp_getsockopt, .compat_getsockopt = compat_dccp_getsockopt,

View file

@ -78,7 +78,7 @@ void dccp_set_state(struct sock *sk, const int state)
sk->sk_prot->unhash(sk); sk->sk_prot->unhash(sk);
if (inet_csk(sk)->icsk_bind_hash != NULL && if (inet_csk(sk)->icsk_bind_hash != NULL &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
inet_put_port(&dccp_hashinfo, sk); inet_put_port(sk);
/* fall through */ /* fall through */
default: default:
if (oldstate == DCCP_OPEN) if (oldstate == DCCP_OPEN)
@ -173,20 +173,6 @@ const char *dccp_state_name(const int state)
EXPORT_SYMBOL_GPL(dccp_state_name); EXPORT_SYMBOL_GPL(dccp_state_name);
void dccp_hash(struct sock *sk)
{
inet_hash(&dccp_hashinfo, sk);
}
EXPORT_SYMBOL_GPL(dccp_hash);
void dccp_unhash(struct sock *sk)
{
inet_unhash(&dccp_hashinfo, sk);
}
EXPORT_SYMBOL_GPL(dccp_unhash);
int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
{ {
struct dccp_sock *dp = dccp_sk(sk); struct dccp_sock *dp = dccp_sk(sk);
@ -268,7 +254,7 @@ int dccp_destroy_sock(struct sock *sk)
/* Clean up a referenced DCCP bind bucket. */ /* Clean up a referenced DCCP bind bucket. */
if (inet_csk(sk)->icsk_bind_hash != NULL) if (inet_csk(sk)->icsk_bind_hash != NULL)
inet_put_port(&dccp_hashinfo, sk); inet_put_port(sk);
kfree(dp->dccps_service_list); kfree(dp->dccps_service_list);
dp->dccps_service_list = NULL; dp->dccps_service_list = NULL;

View file

@ -78,11 +78,9 @@ EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
/* Obtain a reference to a local port for the given sock, /* Obtain a reference to a local port for the given sock,
* if snum is zero it means select any available local port. * if snum is zero it means select any available local port.
*/ */
int inet_csk_get_port(struct inet_hashinfo *hashinfo, int inet_csk_get_port(struct sock *sk, unsigned short snum)
struct sock *sk, unsigned short snum,
int (*bind_conflict)(const struct sock *sk,
const struct inet_bind_bucket *tb))
{ {
struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo;
struct inet_bind_hashbucket *head; struct inet_bind_hashbucket *head;
struct hlist_node *node; struct hlist_node *node;
struct inet_bind_bucket *tb; struct inet_bind_bucket *tb;
@ -142,7 +140,7 @@ tb_found:
goto success; goto success;
} else { } else {
ret = 1; ret = 1;
if (bind_conflict(sk, tb)) if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb))
goto fail_unlock; goto fail_unlock;
} }
} }

View file

@ -66,8 +66,9 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
/* /*
* Get rid of any references to a local port held by the given sock. * Get rid of any references to a local port held by the given sock.
*/ */
static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) static void __inet_put_port(struct sock *sk)
{ {
struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo;
const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size); const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size);
struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
struct inet_bind_bucket *tb; struct inet_bind_bucket *tb;
@ -81,10 +82,10 @@ static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
spin_unlock(&head->lock); spin_unlock(&head->lock);
} }
void inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) void inet_put_port(struct sock *sk)
{ {
local_bh_disable(); local_bh_disable();
__inet_put_port(hashinfo, sk); __inet_put_port(sk);
local_bh_enable(); local_bh_enable();
} }
@ -317,8 +318,9 @@ static inline u32 inet_sk_port_offset(const struct sock *sk)
inet->dport); inet->dport);
} }
void __inet_hash_nolisten(struct inet_hashinfo *hashinfo, struct sock *sk) void __inet_hash_nolisten(struct sock *sk)
{ {
struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo;
struct hlist_head *list; struct hlist_head *list;
rwlock_t *lock; rwlock_t *lock;
struct inet_ehash_bucket *head; struct inet_ehash_bucket *head;
@ -337,13 +339,14 @@ void __inet_hash_nolisten(struct inet_hashinfo *hashinfo, struct sock *sk)
} }
EXPORT_SYMBOL_GPL(__inet_hash_nolisten); EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk) static void __inet_hash(struct sock *sk)
{ {
struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo;
struct hlist_head *list; struct hlist_head *list;
rwlock_t *lock; rwlock_t *lock;
if (sk->sk_state != TCP_LISTEN) { if (sk->sk_state != TCP_LISTEN) {
__inet_hash_nolisten(hashinfo, sk); __inet_hash_nolisten(sk);
return; return;
} }
@ -357,13 +360,48 @@ void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
write_unlock(lock); write_unlock(lock);
wake_up(&hashinfo->lhash_wait); wake_up(&hashinfo->lhash_wait);
} }
EXPORT_SYMBOL_GPL(__inet_hash);
void inet_hash(struct sock *sk)
{
if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
__inet_hash(sk);
local_bh_enable();
}
}
EXPORT_SYMBOL_GPL(inet_hash);
void inet_unhash(struct sock *sk)
{
rwlock_t *lock;
struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo;
if (sk_unhashed(sk))
goto out;
if (sk->sk_state == TCP_LISTEN) {
local_bh_disable();
inet_listen_wlock(hashinfo);
lock = &hashinfo->lhash_lock;
} else {
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
write_lock_bh(lock);
}
if (__sk_del_node_init(sk))
sock_prot_inuse_add(sk->sk_prot, -1);
write_unlock_bh(lock);
out:
if (sk->sk_state == TCP_LISTEN)
wake_up(&hashinfo->lhash_wait);
}
EXPORT_SYMBOL_GPL(inet_unhash);
int __inet_hash_connect(struct inet_timewait_death_row *death_row, int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, struct sock *sk,
int (*check_established)(struct inet_timewait_death_row *, int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16, struct inet_timewait_sock **), struct sock *, __u16, struct inet_timewait_sock **),
void (*hash)(struct inet_hashinfo *, struct sock *)) void (*hash)(struct sock *sk))
{ {
struct inet_hashinfo *hinfo = death_row->hashinfo; struct inet_hashinfo *hinfo = death_row->hashinfo;
const unsigned short snum = inet_sk(sk)->num; const unsigned short snum = inet_sk(sk)->num;
@ -427,7 +465,7 @@ ok:
inet_bind_hash(sk, tb, port); inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) { if (sk_unhashed(sk)) {
inet_sk(sk)->sport = htons(port); inet_sk(sk)->sport = htons(port);
hash(hinfo, sk); hash(sk);
} }
spin_unlock(&head->lock); spin_unlock(&head->lock);
@ -444,7 +482,7 @@ ok:
tb = inet_csk(sk)->icsk_bind_hash; tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock); spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
hash(hinfo, sk); hash(sk);
spin_unlock_bh(&head->lock); spin_unlock_bh(&head->lock);
return 0; return 0;
} else { } else {

View file

@ -1669,7 +1669,7 @@ void tcp_set_state(struct sock *sk, int state)
sk->sk_prot->unhash(sk); sk->sk_prot->unhash(sk);
if (inet_csk(sk)->icsk_bind_hash && if (inet_csk(sk)->icsk_bind_hash &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
inet_put_port(&tcp_hashinfo, sk); inet_put_port(sk);
/* fall through */ /* fall through */
default: default:
if (oldstate==TCP_ESTABLISHED) if (oldstate==TCP_ESTABLISHED)

View file

@ -108,22 +108,6 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
.lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
}; };
static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
{
return inet_csk_get_port(&tcp_hashinfo, sk, snum,
inet_csk_bind_conflict);
}
static void tcp_v4_hash(struct sock *sk)
{
inet_hash(&tcp_hashinfo, sk);
}
void tcp_unhash(struct sock *sk)
{
inet_unhash(&tcp_hashinfo, sk);
}
static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
{ {
return secure_tcp_sequence_number(ip_hdr(skb)->daddr, return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
@ -1478,8 +1462,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
} }
#endif #endif
__inet_hash_nolisten(&tcp_hashinfo, newsk); __inet_hash_nolisten(newsk);
__inet_inherit_port(&tcp_hashinfo, sk, newsk); __inet_inherit_port(sk, newsk);
return newsk; return newsk;
@ -1827,6 +1811,7 @@ struct inet_connection_sock_af_ops ipv4_specific = {
.getsockopt = ip_getsockopt, .getsockopt = ip_getsockopt,
.addr2sockaddr = inet_csk_addr2sockaddr, .addr2sockaddr = inet_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in), .sockaddr_len = sizeof(struct sockaddr_in),
.bind_conflict = inet_csk_bind_conflict,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt, .compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt, .compat_getsockopt = compat_ip_getsockopt,
@ -1926,7 +1911,7 @@ int tcp_v4_destroy_sock(struct sock *sk)
/* Clean up a referenced TCP bind bucket. */ /* Clean up a referenced TCP bind bucket. */
if (inet_csk(sk)->icsk_bind_hash) if (inet_csk(sk)->icsk_bind_hash)
inet_put_port(&tcp_hashinfo, sk); inet_put_port(sk);
/* /*
* If sendmsg cached page exists, toss it. * If sendmsg cached page exists, toss it.
@ -2435,9 +2420,9 @@ struct proto tcp_prot = {
.getsockopt = tcp_getsockopt, .getsockopt = tcp_getsockopt,
.recvmsg = tcp_recvmsg, .recvmsg = tcp_recvmsg,
.backlog_rcv = tcp_v4_do_rcv, .backlog_rcv = tcp_v4_do_rcv,
.hash = tcp_v4_hash, .hash = inet_hash,
.unhash = tcp_unhash, .unhash = inet_unhash,
.get_port = tcp_v4_get_port, .get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure, .enter_memory_pressure = tcp_enter_memory_pressure,
.sockets_allocated = &tcp_sockets_allocated, .sockets_allocated = &tcp_sockets_allocated,
.orphan_count = &tcp_orphan_count, .orphan_count = &tcp_orphan_count,
@ -2450,6 +2435,7 @@ struct proto tcp_prot = {
.obj_size = sizeof(struct tcp_sock), .obj_size = sizeof(struct tcp_sock),
.twsk_prot = &tcp_timewait_sock_ops, .twsk_prot = &tcp_timewait_sock_ops,
.rsk_prot = &tcp_request_sock_ops, .rsk_prot = &tcp_request_sock_ops,
.hashinfo = &tcp_hashinfo,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_tcp_setsockopt, .compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt, .compat_getsockopt = compat_tcp_getsockopt,
@ -2467,7 +2453,6 @@ void __init tcp_v4_init(struct net_proto_family *ops)
EXPORT_SYMBOL(ipv4_specific); EXPORT_SYMBOL(ipv4_specific);
EXPORT_SYMBOL(tcp_hashinfo); EXPORT_SYMBOL(tcp_hashinfo);
EXPORT_SYMBOL(tcp_prot); EXPORT_SYMBOL(tcp_prot);
EXPORT_SYMBOL(tcp_unhash);
EXPORT_SYMBOL(tcp_v4_conn_request); EXPORT_SYMBOL(tcp_v4_conn_request);
EXPORT_SYMBOL(tcp_v4_connect); EXPORT_SYMBOL(tcp_v4_connect);
EXPORT_SYMBOL(tcp_v4_do_rcv); EXPORT_SYMBOL(tcp_v4_do_rcv);

View file

@ -22,9 +22,9 @@
#include <net/inet6_hashtables.h> #include <net/inet6_hashtables.h>
#include <net/ip.h> #include <net/ip.h>
void __inet6_hash(struct inet_hashinfo *hashinfo, void __inet6_hash(struct sock *sk)
struct sock *sk)
{ {
struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo;
struct hlist_head *list; struct hlist_head *list;
rwlock_t *lock; rwlock_t *lock;

View file

@ -86,12 +86,6 @@ static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
#endif #endif
static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
{
return inet_csk_get_port(&tcp_hashinfo, sk, snum,
inet6_csk_bind_conflict);
}
static void tcp_v6_hash(struct sock *sk) static void tcp_v6_hash(struct sock *sk)
{ {
if (sk->sk_state != TCP_CLOSE) { if (sk->sk_state != TCP_CLOSE) {
@ -100,7 +94,7 @@ static void tcp_v6_hash(struct sock *sk)
return; return;
} }
local_bh_disable(); local_bh_disable();
__inet6_hash(&tcp_hashinfo, sk); __inet6_hash(sk);
local_bh_enable(); local_bh_enable();
} }
} }
@ -1504,8 +1498,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
} }
#endif #endif
__inet6_hash(&tcp_hashinfo, newsk); __inet6_hash(newsk);
inet_inherit_port(&tcp_hashinfo, sk, newsk); inet_inherit_port(sk, newsk);
return newsk; return newsk;
@ -1833,6 +1827,7 @@ static struct inet_connection_sock_af_ops ipv6_specific = {
.getsockopt = ipv6_getsockopt, .getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr, .addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6), .sockaddr_len = sizeof(struct sockaddr_in6),
.bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt, .compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt, .compat_getsockopt = compat_ipv6_getsockopt,
@ -1864,6 +1859,7 @@ static struct inet_connection_sock_af_ops ipv6_mapped = {
.getsockopt = ipv6_getsockopt, .getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr, .addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6), .sockaddr_len = sizeof(struct sockaddr_in6),
.bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt, .compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt, .compat_getsockopt = compat_ipv6_getsockopt,
@ -2127,8 +2123,8 @@ struct proto tcpv6_prot = {
.recvmsg = tcp_recvmsg, .recvmsg = tcp_recvmsg,
.backlog_rcv = tcp_v6_do_rcv, .backlog_rcv = tcp_v6_do_rcv,
.hash = tcp_v6_hash, .hash = tcp_v6_hash,
.unhash = tcp_unhash, .unhash = inet_unhash,
.get_port = tcp_v6_get_port, .get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure, .enter_memory_pressure = tcp_enter_memory_pressure,
.sockets_allocated = &tcp_sockets_allocated, .sockets_allocated = &tcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated, .memory_allocated = &tcp_memory_allocated,
@ -2141,6 +2137,7 @@ struct proto tcpv6_prot = {
.obj_size = sizeof(struct tcp6_sock), .obj_size = sizeof(struct tcp6_sock),
.twsk_prot = &tcp6_timewait_sock_ops, .twsk_prot = &tcp6_timewait_sock_ops,
.rsk_prot = &tcp6_request_sock_ops, .rsk_prot = &tcp6_request_sock_ops,
.hashinfo = &tcp_hashinfo,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_setsockopt = compat_tcp_setsockopt, .compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt, .compat_getsockopt = compat_tcp_getsockopt,

View file

@ -98,6 +98,18 @@ config MAC80211_DEBUGFS
Say N unless you know you need this. Say N unless you know you need this.
config MAC80211_DEBUG_PACKET_ALIGNMENT
bool "Enable packet alignment debugging"
depends on MAC80211
help
This option is recommended for driver authors and strongly
discouraged for everybody else, it will trigger a warning
when a driver hands mac80211 a buffer that is aligned in
a way that will cause problems with the IP stack on some
architectures.
Say N unless you're writing a mac80211 based driver.
config MAC80211_DEBUG config MAC80211_DEBUG
bool "Enable debugging output" bool "Enable debugging output"
depends on MAC80211 depends on MAC80211

View file

@ -1344,17 +1344,17 @@ static int __init ieee80211_init(void)
ret = rc80211_simple_init(); ret = rc80211_simple_init();
if (ret) if (ret)
goto fail; goto out;
ret = rc80211_pid_init(); ret = rc80211_pid_init();
if (ret) if (ret)
goto fail_simple; goto out_cleanup_simple;
ret = ieee80211_wme_register(); ret = ieee80211_wme_register();
if (ret) { if (ret) {
printk(KERN_DEBUG "ieee80211_init: failed to " printk(KERN_DEBUG "ieee80211_init: failed to "
"initialize WME (err=%d)\n", ret); "initialize WME (err=%d)\n", ret);
goto fail_pid; goto out_cleanup_pid;
} }
ieee80211_debugfs_netdev_init(); ieee80211_debugfs_netdev_init();
@ -1362,11 +1362,11 @@ static int __init ieee80211_init(void)
return 0; return 0;
fail_pid: out_cleanup_pid:
rc80211_simple_exit();
fail_simple:
rc80211_pid_exit(); rc80211_pid_exit();
fail: out_cleanup_simple:
rc80211_simple_exit();
out:
return ret; return ret;
} }

View file

@ -538,7 +538,7 @@ int __init rc80211_pid_init(void)
return ieee80211_rate_control_register(&mac80211_rcpid); return ieee80211_rate_control_register(&mac80211_rcpid);
} }
void __exit rc80211_pid_exit(void) void rc80211_pid_exit(void)
{ {
ieee80211_rate_control_unregister(&mac80211_rcpid); ieee80211_rate_control_unregister(&mac80211_rcpid);
} }

View file

@ -389,7 +389,7 @@ int __init rc80211_simple_init(void)
return ieee80211_rate_control_register(&mac80211_rcsimple); return ieee80211_rate_control_register(&mac80211_rcsimple);
} }
void __exit rc80211_simple_exit(void) void rc80211_simple_exit(void)
{ {
ieee80211_rate_control_unregister(&mac80211_rcsimple); ieee80211_rate_control_unregister(&mac80211_rcsimple);
} }

View file

@ -340,11 +340,15 @@ static u32 ieee80211_rx_load_stats(struct ieee80211_local *local,
return load; return load;
} }
#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
static ieee80211_txrx_result static ieee80211_txrx_result
ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx) ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx)
{ {
int hdrlen; int hdrlen;
if (!WLAN_FC_DATA_PRESENT(rx->fc))
return TXRX_CONTINUE;
/* /*
* Drivers are required to align the payload data in a way that * Drivers are required to align the payload data in a way that
* guarantees that the contained IP header is aligned to a four- * guarantees that the contained IP header is aligned to a four-
@ -371,11 +375,14 @@ ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx)
return TXRX_CONTINUE; return TXRX_CONTINUE;
} }
#endif
ieee80211_rx_handler ieee80211_rx_pre_handlers[] = ieee80211_rx_handler ieee80211_rx_pre_handlers[] =
{ {
ieee80211_rx_h_parse_qos, ieee80211_rx_h_parse_qos,
#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
ieee80211_rx_h_verify_ip_alignment, ieee80211_rx_h_verify_ip_alignment,
#endif
NULL NULL
}; };