This is the 4.4.73 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAllEst4ACgkQONu9yGCS
 aT5RJQ//UBkwmDInzy8BbfZk7pXY4IXzbXYfZ/nS5QbmPQNcQArKeIsOH+C1ZTKq
 suo4wR4yWUzATr8BBBxbUKyyrAsA85oDq/fl3EyPVLYOHdChnM/sS9H/eFrQhJIN
 7PM9j1YiqyTohkagB2tkWSb8tO1r/wgTSTLmIE+RzsWn6rvMCPMPVY+3OGgC1fuT
 LJrgtXKGBl9zNxmrns3VlSou1MhJvh/y8ESIrhdYHdZKos0zsgQaISXJjhZtyQcV
 OnEzsuz9NMXWE9XGjNpyAm88Nh8T41Ey/vwOjt6mkvWac3r77IgI5NWaLV/QDyqm
 d3jpVWK5BSPcLsmeN4LwQC5aYayvHlh8CfP8ZlBx1xkB5TpclnqXGgQA9BYpXAKw
 XoeFl8n8xLaPrgX8gp3kw/f6C6443OC2JVeRvgnH/0ZM7M0+rZxE6DstRcUHGqf6
 K8PN+AssQpBLIjXSGHnzDVHME/1xWUSmJZfLd5bd6NJ1zSZqZOy1gkf5dx77p0Ka
 UaGVOg6UzOojr3GeUTE62bRO2ZuAno0QO1NQJJkUK1CbNMYmE61vYLM8i0pLKWZJ
 3mDlhcoGK8aJH8chNLU3mgkXECU/9zOVKveWZFoghhMlv8ImgeTuiqZhvztzzT38
 42DxdXPfMzxCwBF02zYu4qn+WDJbNyOqMQrlMEHwwb88wnKiOUg=
 =Ic1J
 -----END PGP SIGNATURE-----

Merge 4.4.73 into android-4.4

Changes in 4.4.73
	s390/vmem: fix identity mapping
	partitions/msdos: FreeBSD UFS2 file systems are not recognized
	ARM: dts: imx6dl: Fix the VDD_ARM_CAP voltage for 396MHz operation
	staging: rtl8192e: rtl92e_fill_tx_desc fix write to mapped out memory.
	Call echo service immediately after socket reconnect
	net: xilinx_emaclite: fix freezes due to unordered I/O
	net: xilinx_emaclite: fix receive buffer overflow
	ipv6: Handle IPv4-mapped src to in6addr_any dst.
	ipv6: Inhibit IPv4-mapped src address on the wire.
	NET: Fix /proc/net/arp for AX.25
	NET: mkiss: Fix panic
	net: hns: Fix the device being used for dma mapping during TX
	sierra_net: Skip validating irrelevant fields for IDLE LSIs
	sierra_net: Add support for IPv6 and Dual-Stack Link Sense Indications
	i2c: piix4: Fix request_region size
	ipv6: Fix IPv6 packet loss in scenarios involving roaming + snooping switches
	PM / runtime: Avoid false-positive warnings from might_sleep_if()
	jump label: pass kbuild_cflags when checking for asm goto support
	kasan: respect /proc/sys/kernel/traceoff_on_warning
	log2: make order_base_2() behave correctly on const input value zero
	ethtool: do not vzalloc(0) on registers dump
	fscache: Fix dead object requeue
	fscache: Clear outstanding writes when disabling a cookie
	FS-Cache: Initialise stores_lock in netfs cookie
	ipv6: fix flow labels when the traffic class is non-0
	drm/nouveau: prevent userspace from deleting client object
	drm/nouveau/fence/g84-: protect against concurrent access to semaphore buffers
	net/mlx4_core: Avoid command timeouts during VF driver device shutdown
	gianfar: synchronize DMA API usage by free_skb_rx_queue w/ gfar_new_page
	pinctrl: berlin-bg4ct: fix the value for "sd1a" of pin SCRD0_CRD_PRES
	net: adaptec: starfire: add checks for dma mapping errors
	parisc, parport_gsc: Fixes for printk continuation lines
	drm/nouveau: Don't enabling polling twice on runtime resume
	drm/ast: Fixed system hanged if disable P2A
	ravb: unmap descriptors when freeing rings
	nfs: Fix "Don't increment lock sequence ID after NFS4ERR_MOVED"
	r8152: re-schedule napi for tx
	r8152: fix rtl8152_post_reset function
	r8152: avoid start_xmit to schedule napi when napi is disabled
	sctp: sctp_addr_id2transport should verify the addr before looking up assoc
	romfs: use different way to generate fsid for BLOCK or MTD
	proc: add a schedule point in proc_pid_readdir()
	tipc: ignore requests when the connection state is not CONNECTED
	xtensa: don't use linux IRQ #0
	s390/kvm: do not rely on the ILC on kvm host protection fauls
	sparc64: make string buffers large enough
	Linux 4.4.73

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2017-06-27 09:47:36 +02:00
commit 5672779e72
56 changed files with 609 additions and 324 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 72
SUBLEVEL = 73
EXTRAVERSION =
NAME = Blurry Fish Butt
@ -789,7 +789,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time)
KBUILD_ARFLAGS := $(call ar-option,D)
# check for 'asm goto'
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif

View file

@ -30,7 +30,7 @@
/* kHz uV */
996000 1250000
792000 1175000
396000 1075000
396000 1150000
>;
fsl,soc-operating-points = <
/* ARM kHz SOC-PU uV */

View file

@ -229,12 +229,17 @@ ENTRY(sie64a)
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
.Lsie_done:
# some program checks are suppressing. C code (e.g. do_protection_exception)
# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
# instructions between sie64a and .Lsie_done should not cause program
# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
# Other instructions between sie64a and .Lsie_done should not cause program
# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
# See also .Lcleanup_sie
.Lrewind_pad:
nop 0
.Lrewind_pad6:
nopr 7
.Lrewind_pad4:
nopr 7
.Lrewind_pad2:
nopr 7
.globl sie_exit
sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@ -247,7 +252,9 @@ sie_exit:
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
j sie_exit
EX_TABLE(.Lrewind_pad,.Lsie_fault)
EX_TABLE(.Lrewind_pad6,.Lsie_fault)
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
#endif

View file

@ -372,7 +372,7 @@ void __init vmem_map_init(void)
ro_end = (unsigned long)&_eshared & PAGE_MASK;
for_each_memblock(memory, reg) {
start = reg->base;
end = reg->base + reg->size - 1;
end = reg->base + reg->size;
if (start >= ro_end || end <= ro_start)
vmem_add_mem(start, end - start, 0);
else if (start >= ro_start && end <= ro_end)

View file

@ -85,7 +85,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
void bad_trap(struct pt_regs *regs, long lvl)
{
char buffer[32];
char buffer[36];
siginfo_t info;
if (notify_die(DIE_TRAP, "bad trap", regs,
@ -116,7 +116,7 @@ void bad_trap(struct pt_regs *regs, long lvl)
void bad_trap_tl1(struct pt_regs *regs, long lvl)
{
char buffer[32];
char buffer[36];
if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
0, lvl, SIGTRAP) == NOTIFY_STOP)

View file

@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
#if VARIANT_NR_IRQS == 0
static inline void variant_init_irq(void) { }

View file

@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{
int irq = irq_find_mapping(NULL, hwirq);
if (hwirq >= NR_IRQS) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, hwirq);
}
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{

View file

@ -24,16 +24,18 @@
/* Interrupt configuration. */
#define PLATFORM_NR_IRQS 10
#define PLATFORM_NR_IRQS 0
/* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_XTENSA_MX
#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
#define OETH_IRQ XCHAL_EXTINT4_NUM
#define C67X00_IRQ XCHAL_EXTINT8_NUM
#else
#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
#define OETH_IRQ XCHAL_EXTINT1_NUM
#define C67X00_IRQ XCHAL_EXTINT5_NUM
#endif
/*
@ -63,5 +65,5 @@
#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
#define C67X00_SIZE 0x10
#define C67X00_IRQ 5
#endif /* __XTENSA_XTAVNET_HARDWARE_H */

View file

@ -209,8 +209,8 @@ static struct resource ethoc_res[] = {
.flags = IORESOURCE_MEM,
},
[2] = { /* IRQ number */
.start = OETH_IRQ,
.end = OETH_IRQ,
.start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@ -246,8 +246,8 @@ static struct resource c67x00_res[] = {
.flags = IORESOURCE_MEM,
},
[1] = { /* IRQ number */
.start = C67X00_IRQ,
.end = C67X00_IRQ,
.start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@ -280,7 +280,7 @@ static struct resource serial_resource = {
static struct plat_serial8250_port serial_platform_data[] = {
[0] = {
.mapbase = DUART16552_PADDR,
.irq = DUART16552_INTNUM,
.irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM32,

View file

@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state,
continue;
bsd_start = le32_to_cpu(p->p_offset);
bsd_size = le32_to_cpu(p->p_size);
if (memcmp(flavour, "bsd\0", 4) == 0)
bsd_start += offset;
if (offset == bsd_start && size == bsd_size)
/* full parent partition, we have it already */
continue;

View file

@ -889,13 +889,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
unsigned long flags;
int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_idle(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
@ -921,13 +921,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
unsigned long flags;
int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_suspend(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
@ -952,7 +952,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
unsigned long flags;
int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
dev->power.runtime_status != RPM_ACTIVE);
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);

View file

@ -113,6 +113,7 @@ struct ast_private {
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
bool DisableP2A;
enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk;

View file

@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
} else
*need_post = false;
/* Check P2A Access */
ast->DisableP2A = true;
data = ast_read32(ast, 0xf004);
if (data != 0xFFFFFFFF)
ast->DisableP2A = false;
/* Check if we support wide screen */
switch (ast->chip) {
case AST1180:
@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
/* Read SCU7c (silicon revision register) */
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
data = ast_read32(ast, 0x1207c);
data &= 0x300;
if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
ast->support_wide_screen = true;
if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
ast->support_wide_screen = true;
if (ast->DisableP2A == false) {
/* Read SCU7c (silicon revision register) */
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
data = ast_read32(ast, 0x1207c);
data &= 0x300;
if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
ast->support_wide_screen = true;
if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
ast->support_wide_screen = true;
}
}
break;
}
@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
uint32_t data, data2;
uint32_t denum, num, div, ref_pll;
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x10000, 0xfc600309);
do {
if (pci_channel_offline(dev->pdev))
return -EIO;
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
if (data & 0x40)
if (ast->DisableP2A)
{
ast->dram_bus_width = 16;
ast->dram_type = AST_DRAM_1Gx16;
ast->mclk = 396;
}
else
ast->dram_bus_width = 32;
{
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
data = ast_read32(ast, 0x10004);
if (ast->chip == AST2300 || ast->chip == AST2400) {
switch (data & 0x03) {
case 0:
ast->dram_type = AST_DRAM_512Mx16;
break;
default:
case 1:
ast->dram_type = AST_DRAM_1Gx16;
if (data & 0x40)
ast->dram_bus_width = 16;
else
ast->dram_bus_width = 32;
if (ast->chip == AST2300 || ast->chip == AST2400) {
switch (data & 0x03) {
case 0:
ast->dram_type = AST_DRAM_512Mx16;
break;
default:
case 1:
ast->dram_type = AST_DRAM_1Gx16;
break;
case 2:
ast->dram_type = AST_DRAM_2Gx16;
break;
case 3:
ast->dram_type = AST_DRAM_4Gx16;
break;
}
} else {
switch (data & 0x0c) {
case 0:
case 4:
ast->dram_type = AST_DRAM_512Mx16;
break;
case 8:
if (data & 0x40)
ast->dram_type = AST_DRAM_1Gx16;
else
ast->dram_type = AST_DRAM_512Mx32;
break;
case 0xc:
ast->dram_type = AST_DRAM_1Gx32;
break;
}
}
data = ast_read32(ast, 0x10120);
data2 = ast_read32(ast, 0x10170);
if (data2 & 0x2000)
ref_pll = 14318;
else
ref_pll = 12000;
denum = data & 0x1f;
num = (data & 0x3fe0) >> 5;
data = (data & 0xc000) >> 14;
switch (data) {
case 3:
div = 0x4;
break;
case 2:
ast->dram_type = AST_DRAM_2Gx16;
case 1:
div = 0x2;
break;
case 3:
ast->dram_type = AST_DRAM_4Gx16;
break;
}
} else {
switch (data & 0x0c) {
case 0:
case 4:
ast->dram_type = AST_DRAM_512Mx16;
break;
case 8:
if (data & 0x40)
ast->dram_type = AST_DRAM_1Gx16;
else
ast->dram_type = AST_DRAM_512Mx32;
break;
case 0xc:
ast->dram_type = AST_DRAM_1Gx32;
default:
div = 0x1;
break;
}
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
}
data = ast_read32(ast, 0x10120);
data2 = ast_read32(ast, 0x10170);
if (data2 & 0x2000)
ref_pll = 14318;
else
ref_pll = 12000;
denum = data & 0x1f;
num = (data & 0x3fe0) >> 5;
data = (data & 0xc000) >> 14;
switch (data) {
case 3:
div = 0x4;
break;
case 2:
case 1:
div = 0x2;
break;
default:
div = 0x1;
break;
}
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
return 0;
}

View file

@ -375,12 +375,20 @@ void ast_post_gpu(struct drm_device *dev)
ast_enable_mmio(dev);
ast_set_def_ext_reg(dev);
if (ast->chip == AST2300 || ast->chip == AST2400)
ast_init_dram_2300(dev);
else
ast_init_dram_reg(dev);
if (ast->DisableP2A == false)
{
if (ast->chip == AST2300 || ast->chip == AST2400)
ast_init_dram_2300(dev);
else
ast_init_dram_reg(dev);
ast_init_3rdtx(dev);
ast_init_3rdtx(dev);
}
else
{
if (ast->tx_chip_type != AST_TX_NONE)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
}
}
/* AST 2300 DRAM settings */

View file

@ -370,7 +370,8 @@ nouveau_display_init(struct drm_device *dev)
return ret;
/* enable polling for external displays */
drm_kms_helper_poll_enable(dev);
if (!dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(dev);
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {

View file

@ -743,7 +743,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true);
drm_kms_helper_poll_enable(drm_dev);
if (!drm_dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);

View file

@ -99,6 +99,7 @@ struct nv84_fence_priv {
struct nouveau_bo *bo;
struct nouveau_bo *bo_gart;
u32 *suspend;
struct mutex mutex;
};
u64 nv84_fence_crtc(struct nouveau_channel *, int);

View file

@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
if (nvif_unpack(argv->v0, 0, 0, true)) {
/* block access to objects not created via this interface */
owner = argv->v0.owner;
if (argv->v0.object == 0ULL)
if (argv->v0.object == 0ULL &&
argv->v0.type != NVIF_IOCTL_V0_DEL)
argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
else
argv->v0.owner = NVDRM_OBJECT_USIF;

View file

@ -121,8 +121,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
}
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
mutex_lock(&priv->mutex);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
nouveau_bo_vma_del(priv->bo, &fctx->vma);
mutex_unlock(&priv->mutex);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
@ -148,11 +150,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
fctx->base.sync32 = nv84_fence_sync32;
fctx->base.sequence = nv84_fence_read(chan);
mutex_lock(&priv->mutex);
ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
if (ret == 0) {
ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
&fctx->vma_gart);
}
mutex_unlock(&priv->mutex);
/* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
@ -232,6 +236,8 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_base = fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
mutex_init(&priv->mutex);
/* Use VRAM if there is any ; otherwise fallback to system memory */
domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
/*

View file

@ -54,7 +54,7 @@
#define SMBSLVDAT (0xC + piix4_smba)
/* count for request_region */
#define SMBIOSIZE 8
#define SMBIOSIZE 9
/* PCI Address Constants */
#define SMBBA 0x090

View file

@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = {
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
irq_set_default_host(root_domain);

View file

@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = {
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_irq_domain_ops, &xtensa_irq_chip);
irq_set_default_host(root_domain);
return 0;

View file

@ -1153,6 +1153,12 @@ static void init_ring(struct net_device *dev)
if (skb == NULL)
break;
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(np->pci_dev,
np->rx_info[i].mapping)) {
dev_kfree_skb(skb);
np->rx_info[i].skb = NULL;
break;
}
/* Grrr, we cannot offset to correctly align the IP header. */
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
}
@ -1183,8 +1189,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
unsigned int entry;
unsigned int prev_tx;
u32 status;
int i;
int i, j;
/*
* be cautious here, wrapping the queue has weird semantics
@ -1202,6 +1209,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
prev_tx = np->cur_tx;
entry = np->cur_tx % TX_RING_SIZE;
for (i = 0; i < skb_num_frags(skb); i++) {
int wrap_ring = 0;
@ -1235,6 +1243,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
skb_frag_size(this_frag),
PCI_DMA_TODEVICE);
}
if (pci_dma_mapping_error(np->pci_dev,
np->tx_info[entry].mapping)) {
dev->stats.tx_dropped++;
goto err_out;
}
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
np->tx_ring[entry].status = cpu_to_le32(status);
@ -1269,8 +1282,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
return NETDEV_TX_OK;
}
err_out:
entry = prev_tx % TX_RING_SIZE;
np->tx_info[entry].skb = NULL;
if (i > 0) {
pci_unmap_single(np->pci_dev,
np->tx_info[entry].mapping,
skb_first_frag_len(skb),
PCI_DMA_TODEVICE);
np->tx_info[entry].mapping = 0;
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
for (j = 1; j < i; j++) {
pci_unmap_single(np->pci_dev,
np->tx_info[entry].mapping,
skb_frag_size(
&skb_shinfo(skb)->frags[j-1]),
PCI_DMA_TODEVICE);
entry++;
}
}
dev_kfree_skb_any(skb);
np->cur_tx = prev_tx;
return NETDEV_TX_OK;
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
@ -1570,6 +1605,12 @@ static void refill_rx_ring(struct net_device *dev)
break; /* Better luck next round. */
np->rx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(np->pci_dev,
np->rx_info[entry].mapping)) {
dev_kfree_skb(skb);
np->rx_info[entry].skb = NULL;
break;
}
np->rx_ring[entry].rxaddr =
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
}

View file

@ -1999,8 +1999,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
if (!rxb->page)
continue;
dma_unmap_single(rx_queue->dev, rxb->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
dma_unmap_page(rx_queue->dev, rxb->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(rxb->page);
rxb->page = NULL;

View file

@ -105,8 +105,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
struct hns_nic_ring_data *ring_data)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct device *dev = priv->dev;
struct hnae_ring *ring = ring_data->ring;
struct device *dev = ring_to_dev(ring);
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int buf_num;

View file

@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
return -ETIMEDOUT;
}
static int mlx4_comm_internal_err(u32 slave_read)
int mlx4_comm_internal_err(u32 slave_read)
{
return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;

View file

@ -218,6 +218,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_interface *intf;
mlx4_stop_catas_poll(dev);
if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
mlx4_is_slave(dev)) {
/* In mlx4_remove_one on a VF */
u32 slave_read =
swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
if (mlx4_comm_internal_err(slave_read)) {
mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
__func__);
mlx4_enter_error_state(dev->persist);
}
}
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)

View file

@ -1205,6 +1205,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
int mlx4_comm_internal_err(u32 slave_read);
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);

View file

@ -171,6 +171,49 @@ static struct mdiobb_ops bb_ops = {
.get_mdio_data = ravb_get_mdio_data,
};
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
struct ravb_private *priv = netdev_priv(ndev);
struct net_device_stats *stats = &priv->stats[q];
struct ravb_tx_desc *desc;
int free_num = 0;
int entry;
u32 size;
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
bool txed;
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
NUM_TX_DESC);
desc = &priv->tx_ring[q][entry];
txed = desc->die_dt == DT_FEMPTY;
if (free_txed_only && !txed)
break;
/* Descriptor type must be checked before all other reads */
dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
/* Last packet descriptor? */
if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
entry /= NUM_TX_DESC;
dev_kfree_skb_any(priv->tx_skb[q][entry]);
priv->tx_skb[q][entry] = NULL;
if (txed)
stats->tx_packets++;
}
free_num++;
}
if (txed)
stats->tx_bytes += size;
desc->die_dt = DT_EEMPTY;
}
return free_num;
}
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
@ -186,19 +229,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
kfree(priv->rx_skb[q]);
priv->rx_skb[q] = NULL;
/* Free TX skb ringbuffer */
if (priv->tx_skb[q]) {
for (i = 0; i < priv->num_tx_ring[q]; i++)
dev_kfree_skb(priv->tx_skb[q][i]);
}
kfree(priv->tx_skb[q]);
priv->tx_skb[q] = NULL;
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
priv->tx_align[q] = NULL;
if (priv->rx_ring[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
PKT_BUF_SZ,
DMA_FROM_DEVICE);
}
ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@ -207,12 +252,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
}
if (priv->tx_ring[q]) {
ravb_tx_free(ndev, q, false);
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * NUM_TX_DESC + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL;
}
/* Free TX skb ringbuffer.
* SKBs are freed by ravb_tx_free() call above.
*/
kfree(priv->tx_skb[q]);
priv->tx_skb[q] = NULL;
}
/* Format skb and descriptor buffer for Ethernet AVB */
@ -420,44 +473,6 @@ static int ravb_dmac_init(struct net_device *ndev)
return 0;
}
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
struct net_device_stats *stats = &priv->stats[q];
struct ravb_tx_desc *desc;
int free_num = 0;
int entry;
u32 size;
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
NUM_TX_DESC);
desc = &priv->tx_ring[q][entry];
if (desc->die_dt != DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */
dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
/* Last packet descriptor? */
if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
entry /= NUM_TX_DESC;
dev_kfree_skb_any(priv->tx_skb[q][entry]);
priv->tx_skb[q][entry] = NULL;
stats->tx_packets++;
}
free_num++;
}
stats->tx_bytes += size;
desc->die_dt = DT_EEMPTY;
}
return free_num;
}
static void ravb_get_tx_tstamp(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@ -797,7 +812,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~mask, TIS);
ravb_tx_free(ndev, q);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@ -1393,7 +1408,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->cur_tx[q] += NUM_TX_DESC;
if (priv->cur_tx[q] - priv->dirty_tx[q] >
(priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
(priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
!ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q);
exit:

View file

@ -100,6 +100,14 @@
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
#define xemaclite_writel iowrite32be
#else
#define xemaclite_readl ioread32
#define xemaclite_writel iowrite32
#endif
/**
* struct net_local - Our private per device data
* @ndev: instance of the network device
@ -158,15 +166,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
u32 reg_data;
/* Enable the Tx interrupts for the first Buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
__raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
drvdata->base_addr + XEL_TSR_OFFSET);
reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
drvdata->base_addr + XEL_TSR_OFFSET);
/* Enable the Rx interrupts for the first buffer */
__raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
/* Enable the Global Interrupt Enable */
__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
}
/**
@ -181,17 +189,17 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
u32 reg_data;
/* Disable the Global Interrupt Enable */
__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
/* Disable the Tx interrupts for the first buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
__raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
drvdata->base_addr + XEL_TSR_OFFSET);
reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
drvdata->base_addr + XEL_TSR_OFFSET);
/* Disable the Rx interrupts for the first buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
__raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
drvdata->base_addr + XEL_RSR_OFFSET);
reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
drvdata->base_addr + XEL_RSR_OFFSET);
}
/**
@ -323,7 +331,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
byte_count = ETH_FRAME_LEN;
/* Check if the expected buffer is available */
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
@ -336,7 +344,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@ -347,16 +355,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
__raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
addr + XEL_TPLR_OFFSET);
xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
addr + XEL_TPLR_OFFSET);
/* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame
* has been transmitted */
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
__raw_writel(reg_data, addr + XEL_TSR_OFFSET);
xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
return 0;
}
@ -371,7 +379,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
*
* Return: Total number of bytes received
*/
static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
{
void __iomem *addr;
u16 length, proto_type;
@ -381,7 +389,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
/* Verify which buffer has valid data */
reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
if (drvdata->rx_ping_pong != 0)
@ -398,27 +406,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
return 0; /* No data was available */
/* Verify that buffer has valid data */
reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
XEL_RSR_RECV_DONE_MASK)
return 0; /* No data was available */
}
/* Get the protocol type of the ethernet frame that arrived */
proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
* or an IP packet or an ARP packet */
if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
if (proto_type > ETH_DATA_LEN) {
if (proto_type == ETH_P_IP) {
length = ((ntohl(__raw_readl(addr +
length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
XEL_RXBUFF_OFFSET)) >>
XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
} else if (proto_type == ETH_P_ARP)
@ -431,14 +440,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
if (WARN_ON(length > maxlen))
length = maxlen;
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
data, length);
/* Acknowledge the frame */
reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
reg_data &= ~XEL_RSR_RECV_DONE_MASK;
__raw_writel(reg_data, addr + XEL_RSR_OFFSET);
xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
return length;
}
@ -465,14 +477,14 @@ static void xemaclite_update_address(struct net_local *drvdata,
xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
__raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
/* Update the MAC address in the EmacLite */
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
__raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
/* Wait for EmacLite to finish with the MAC address update */
while ((__raw_readl(addr + XEL_TSR_OFFSET) &
while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
XEL_TSR_PROG_MAC_ADDR) != 0)
;
}
@ -605,7 +617,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
skb_reserve(skb, 2);
len = xemaclite_recv_data(lp, (u8 *) skb->data);
len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
if (!len) {
dev->stats.rx_errors++;
@ -642,32 +654,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
u32 tx_status;
/* Check if there is Rx Data available */
if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
XEL_RSR_RECV_DONE_MASK) ||
(__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
(xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
& XEL_RSR_RECV_DONE_MASK))
xemaclite_rx_handler(dev);
/* Check if the Transmission for the first buffer is completed */
tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
__raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
tx_complete = true;
}
/* Check if the Transmission for the second buffer is completed */
tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
__raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
tx_complete = true;
}
@ -700,7 +712,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
/* wait for the MDIO interface to not be busy or timeout
after some time.
*/
while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
if (time_before_eq(end, jiffies)) {
WARN_ON(1);
@ -736,17 +748,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* MDIO Address register. Set the Status bit in the MDIO Control
* register to start a MDIO read transaction.
*/
ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
__raw_writel(XEL_MDIOADDR_OP_MASK |
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET);
__raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
xemaclite_writel(XEL_MDIOADDR_OP_MASK |
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET);
xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
"xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@ -783,13 +795,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
* Data register. Finally, set the Status bit in the MDIO Control
* register to start a MDIO write transaction.
*/
ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
__raw_writel(~XEL_MDIOADDR_OP_MASK &
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET);
__raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
__raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET);
xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
return 0;
}
@ -836,8 +848,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
/* Enable the MDIO bus by asserting the enable bit in MDIO Control
* register.
*/
__raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
bus = mdiobus_alloc();
if (!bus) {
@ -1141,8 +1153,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
dev_warn(dev, "No MAC address found\n");
/* Clear the Tx CSR's in case this is a restart */
__raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
__raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
/* Set the MAC address in the EmacLite device */
xemaclite_update_address(lp, ndev->dev_addr);

View file

@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev)
{
/* Finish setting up the DEVICE info. */
dev->mtu = AX_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10;
dev->header_ops = &ax25_header_ops;

View file

@ -1851,6 +1851,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
if (!list_empty(&tp->rx_done))
napi_schedule(napi);
else if (!skb_queue_empty(&tp->tx_queue) &&
!list_empty(&tp->tx_free))
napi_schedule(napi);
}
return work_done;
@ -2990,10 +2993,13 @@ static void set_carrier(struct r8152 *tp)
if (!netif_carrier_ok(netdev)) {
tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_stop_queue(netdev);
napi_disable(&tp->napi);
netif_carrier_on(netdev);
rtl_start_rx(tp);
napi_enable(&tp->napi);
netif_wake_queue(netdev);
netif_info(tp, link, netdev, "carrier on\n");
}
} else {
if (netif_carrier_ok(netdev)) {
@ -3001,6 +3007,7 @@ static void set_carrier(struct r8152 *tp)
napi_disable(&tp->napi);
tp->rtl_ops.disable(tp);
napi_enable(&tp->napi);
netif_info(tp, link, netdev, "carrier off\n");
}
}
}
@ -3385,12 +3392,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
if (!netif_running(netdev))
return 0;
netif_stop_queue(netdev);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
if (netif_carrier_ok(netdev)) {
netif_stop_queue(netdev);
mutex_lock(&tp->control);
tp->rtl_ops.disable(tp);
mutex_unlock(&tp->control);
@ -3415,12 +3422,14 @@ static int rtl8152_post_reset(struct usb_interface *intf)
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
tp->rtl_ops.enable(tp);
rtl_start_rx(tp);
rtl8152_set_rx_mode(netdev);
mutex_unlock(&tp->control);
netif_wake_queue(netdev);
}
napi_enable(&tp->napi);
netif_wake_queue(netdev);
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
return 0;
}

View file

@ -73,8 +73,6 @@ static atomic_t iface_counter = ATOMIC_INIT(0);
/* Private data structure */
struct sierra_net_data {
u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
u16 link_up; /* air link up or down */
u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
@ -122,6 +120,7 @@ struct param {
/* LSI Protocol types */
#define SIERRA_NET_PROTOCOL_UMTS 0x01
#define SIERRA_NET_PROTOCOL_UMTS_DS 0x04
/* LSI Coverage */
#define SIERRA_NET_COVERAGE_NONE 0x00
#define SIERRA_NET_COVERAGE_NOPACKET 0x01
@ -129,7 +128,8 @@ struct param {
/* LSI Session */
#define SIERRA_NET_SESSION_IDLE 0x00
/* LSI Link types */
#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
#define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00
#define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02
struct lsi_umts {
u8 protocol;
@ -137,9 +137,14 @@ struct lsi_umts {
__be16 length;
/* eventually use a union for the rest - assume umts for now */
u8 coverage;
u8 unused2[41];
u8 network_len; /* network name len */
u8 network[40]; /* network name (UCS2, bigendian) */
u8 session_state;
u8 unused3[33];
} __packed;
struct lsi_umts_single {
struct lsi_umts lsi;
u8 link_type;
u8 pdp_addr_len; /* NW-supplied PDP address len */
u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
@ -158,10 +163,31 @@ struct lsi_umts {
u8 reserved[8];
} __packed;
struct lsi_umts_dual {
struct lsi_umts lsi;
u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */
u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */
u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */
u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */
u8 unused4[23];
u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */
u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */
u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */
u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/
u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */
u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */
u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */
u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/
u8 unused5[68];
} __packed;
#define SIERRA_NET_LSI_COMMON_LEN 4
#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single))
#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
(SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
#define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual))
#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
(SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
/* Forward definitions */
static void sierra_sync_timer(unsigned long syncdata);
@ -191,10 +217,11 @@ static inline void sierra_net_set_private(struct usbnet *dev,
dev->data[0] = (unsigned long)priv;
}
/* is packet IPv4 */
/* is packet IPv4/IPv6 */
static inline int is_ip(struct sk_buff *skb)
{
return skb->protocol == cpu_to_be16(ETH_P_IP);
return skb->protocol == cpu_to_be16(ETH_P_IP) ||
skb->protocol == cpu_to_be16(ETH_P_IPV6);
}
/*
@ -350,46 +377,51 @@ static inline int sierra_net_is_valid_addrlen(u8 len)
static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
{
struct lsi_umts *lsi = (struct lsi_umts *)data;
u32 expected_length;
if (datalen < sizeof(struct lsi_umts)) {
netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
__func__, datalen,
sizeof(struct lsi_umts));
if (datalen < sizeof(struct lsi_umts_single)) {
netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n",
__func__, datalen, sizeof(struct lsi_umts_single));
return -1;
}
if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
__func__, be16_to_cpu(lsi->length),
(u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
return -1;
}
/* Validate the protocol - only support UMTS for now */
if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
lsi->protocol);
return -1;
}
/* Validate the link type */
if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
lsi->link_type);
return -1;
}
/* Validate the coverage */
if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
|| lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
return 0;
}
/* Validate the session state */
if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
netdev_err(dev->net, "Session idle, 0x%02x\n",
lsi->session_state);
lsi->session_state);
return 0;
}
/* Validate the protocol - only support UMTS for now */
if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) {
struct lsi_umts_single *single = (struct lsi_umts_single *)lsi;
/* Validate the link type */
if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 &&
single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) {
netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
single->link_type);
return -1;
}
expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN;
} else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) {
expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN;
} else {
netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
lsi->protocol);
return -1;
}
if (be16_to_cpu(lsi->length) != expected_length) {
netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
__func__, be16_to_cpu(lsi->length), expected_length);
return -1;
}
/* Validate the coverage */
if (lsi->coverage == SIERRA_NET_COVERAGE_NONE ||
lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
return 0;
}
@ -662,7 +694,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
u8 numendpoints;
u16 fwattr = 0;
int status;
struct ethhdr *eth;
struct sierra_net_data *priv;
static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
@ -700,11 +731,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
/* we will have to manufacture ethernet headers, prepare template */
eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
eth->h_proto = cpu_to_be16(ETH_P_IP);
/* prepare shutdown message template */
memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
/* set context index initially to 0 - prepares tx hdr template */
@ -833,9 +859,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, hh.hdrlen);
/* We are going to accept this packet, prepare it */
memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
ETH_HLEN);
/* We are going to accept this packet, prepare it.
* In case protocol is IPv6, keep it, otherwise force IPv4.
*/
skb_reset_mac_header(skb);
if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
eth_zero_addr(eth_hdr(skb)->h_source);
memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
/* Last packet in batch handled by usbnet */
if (hh.payload_len.word == skb->len)

View file

@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->irq = PARPORT_IRQ_NONE;
}
if (p->irq != PARPORT_IRQ_NONE) {
printk(", irq %d", p->irq);
pr_cont(", irq %d", p->irq);
if (p->dma == PARPORT_DMA_AUTO) {
p->dma = PARPORT_DMA_NONE;
@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base,
is mandatory (see above) */
p->dma = PARPORT_DMA_NONE;
printk(" [");
#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}}
pr_cont(" [");
#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
{
int f = 0;
printmode(PCSPP);
@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
// printmode(DMA);
}
#undef printmode
printk("]\n");
pr_cont("]\n");
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_irq_handler,

View file

@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */

View file

@ -1185,8 +1185,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
struct cb_desc *cb_desc, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
dma_addr_t mapping;
struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
@ -1197,8 +1196,6 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
pTxFwInfo->TxRate, cb_desc);
if (pci_dma_mapping_error(priv->pdev, mapping))
netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
if (cb_desc->bAMPDUEnable) {
pTxFwInfo->AllowAggregation = 1;
pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@ -1233,6 +1230,14 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
}
memset((u8 *)pdesc, 0, 12);
mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(priv->pdev, mapping)) {
netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
return;
}
pdesc->LINIP = 0;
pdesc->CmdInit = 1;
pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;

View file

@ -412,6 +412,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
}
} while (server->tcpStatus == CifsNeedReconnect);
if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0);
return rc;
}
@ -421,18 +424,27 @@ cifs_echo_request(struct work_struct *work)
int rc;
struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, echo.work);
unsigned long echo_interval;
/*
* We cannot send an echo if it is disabled or until the
* NEGOTIATE_PROTOCOL request is done, which is indicated by
* server->ops->need_neg() == true. Also, no need to ping if
* we got a response recently.
* If we need to renegotiate, set echo interval to zero to
* immediately call echo service where we can renegotiate.
*/
if (server->tcpStatus == CifsNeedNegotiate)
echo_interval = 0;
else
echo_interval = SMB_ECHO_INTERVAL;
/*
* We cannot send an echo if it is disabled.
* Also, no need to ping if we got a response recently.
*/
if (server->tcpStatus == CifsNeedReconnect ||
server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
server->tcpStatus == CifsExiting ||
server->tcpStatus == CifsNew ||
(server->ops->can_echo && !server->ops->can_echo(server)) ||
time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
time_before(jiffies, server->lstrp + echo_interval - HZ))
goto requeue_echo;
rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;

View file

@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
if (invalidate)
set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
}
} else {
@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
TASK_UNINTERRUPTIBLE);
/* Make sure any pending writes are cancelled. */
if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
fscache_invalidate_writes(cookie);
/* Reset the cookie state if it wasn't relinquished */
if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
atomic_inc(&cookie->n_active);

View file

@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
cookie->flags = 1 << FSCACHE_COOKIE_ENABLED;
spin_lock_init(&cookie->lock);
spin_lock_init(&cookie->stores_lock);
INIT_HLIST_HEAD(&cookie->backing_objects);
/* check the netfs type is not already present */

View file

@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
#define __STATE_NAME(n) fscache_osm_##n
#define STATE(n) (&__STATE_NAME(n))
@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL);
static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
static WAIT_STATE(WAIT_FOR_INIT, "?INI",
TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@ -229,6 +230,10 @@ execute_work_state:
event = -1;
if (new_state == NO_TRANSIT) {
_debug("{OBJ%x} %s notrans", object->debug_id, state->name);
if (unlikely(state == STATE(OBJECT_DEAD))) {
_leave(" [dead]");
return;
}
fscache_enqueue_object(object);
event_mask = object->oob_event_mask;
goto unmask_events;
@ -239,7 +244,7 @@ execute_work_state:
object->state = state = new_state;
if (state->work) {
if (unlikely(state->work == ((void *)2UL))) {
if (unlikely(state == STATE(OBJECT_DEAD))) {
_leave(" [dead]");
return;
}
@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
fscache_mark_object_dead(object);
object->oob_event_mask = 0;
if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
/* Reject any new read/write ops and abort any that are pending. */
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
fscache_cancel_all_ops(object);
}
if (list_empty(&object->dependents) &&
object->n_ops == 0 &&
object->n_children == 0)
@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object,
}
}
EXPORT_SYMBOL(fscache_object_mark_killed);
/*
* The object is dead. We can get here if an object gets queued by an event
* that would lead to its death (such as EV_KILL) when the dispatcher is
* already running (and so can be requeued) but hasn't yet cleared the event
* mask.
*/
static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
int event)
{
if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
&object->flags))
return NO_TRANSIT;
WARN(true, "FS-Cache object redispatched after death");
return NO_TRANSIT;
}

View file

@ -1072,6 +1072,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
case -NFS4ERR_BADXDR:
case -NFS4ERR_RESOURCE:
case -NFS4ERR_NOFILEHANDLE:
case -NFS4ERR_MOVED:
/* Non-seqid mutating errors */
return;
};

View file

@ -3145,6 +3145,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
iter.tgid += 1, iter = next_tgid(ns, iter)) {
char name[PROC_NUMBUF];
int len;
cond_resched();
if (!has_pid_permissions(ns, iter.task, 2))
continue;

View file

@ -74,6 +74,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/uaccess.h>
#include <linux/major.h>
#include "internal.h"
static struct kmem_cache *romfs_inode_cachep;
@ -415,7 +416,22 @@ static void romfs_destroy_inode(struct inode *inode)
static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
u64 id = 0;
/* When calling huge_encode_dev(),
* use sb->s_bdev->bd_dev when,
* - CONFIG_ROMFS_ON_BLOCK defined
* use sb->s_dev when,
* - CONFIG_ROMFS_ON_BLOCK undefined and
* - CONFIG_ROMFS_ON_MTD defined
* leave id as 0 when,
* - CONFIG_ROMFS_ON_BLOCK undefined and
* - CONFIG_ROMFS_ON_MTD undefined
*/
if (sb->s_bdev)
id = huge_encode_dev(sb->s_bdev->bd_dev);
else if (sb->s_dev)
id = huge_encode_dev(sb->s_dev);
buf->f_type = ROMFS_MAGIC;
buf->f_namelen = ROMFS_MAXFN;
@ -488,6 +504,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags |= MS_RDONLY | MS_NOATIME;
sb->s_op = &romfs_super_ops;
#ifdef CONFIG_ROMFS_ON_MTD
/* Use same dev ID from the underlying mtdblock device */
if (sb->s_mtd)
sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
#endif
/* read the image superblock and check it */
rsb = kmalloc(512, GFP_KERNEL);
if (!rsb)

View file

@ -360,6 +360,7 @@ struct fscache_object {
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */

View file

@ -194,6 +194,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
* ... and so on.
*/
#define order_base_2(n) ilog2(roundup_pow_of_two(n))
static inline __attribute_const__
int __order_base_2(unsigned long n)
{
return n > 1 ? ilog2(n - 1) + 1 : 0;
}
#define order_base_2(n) \
( \
__builtin_constant_p(n) ? ( \
((n) == 0 || (n) == 1) ? 0 : \
ilog2((n) - 1) + 1) : \
__order_base_2(n) \
)
#endif /* _LINUX_LOG2_H */

View file

@ -744,6 +744,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
{
u32 hash;
/* @flowlabel may include more than a flow label, eg, the traffic class.
* Here we want only the flow label value.
*/
flowlabel &= IPV6_FLOWLABEL_MASK;
if (flowlabel ||
net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
(!autolabel &&

View file

@ -13,6 +13,7 @@
*
*/
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
@ -251,6 +252,8 @@ void kasan_report(unsigned long addr, size_t size,
if (likely(!kasan_report_enabled()))
return;
disable_trace_on_warning();
info.access_addr = (void *)addr;
info.access_size = size;
info.is_write = is_write;

View file

@ -886,9 +886,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
if (regs.len > reglen)
regs.len = reglen;
regbuf = vzalloc(reglen);
if (reglen && !regbuf)
return -ENOMEM;
regbuf = NULL;
if (reglen) {
regbuf = vzalloc(reglen);
if (!regbuf)
return -ENOMEM;
}
ops->get_regs(dev, &regs, regbuf);

View file

@ -1250,7 +1250,7 @@ void __init arp_init(void)
/*
* ax25 -> ASCII conversion
*/
static char *ax2asc2(ax25_address *a, char *buf)
static void ax2asc2(ax25_address *a, char *buf)
{
char c, *s;
int n;
@ -1272,10 +1272,10 @@ static char *ax2asc2(ax25_address *a, char *buf)
*s++ = n + '0';
*s++ = '\0';
if (*buf == '\0' || *buf == '-')
return "*";
return buf;
if (*buf == '\0' || *buf == '-') {
buf[0] = '*';
buf[1] = '\0';
}
}
#endif /* CONFIG_AX25 */
@ -1309,7 +1309,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
}
#endif
sprintf(tbuf, "%pI4", n->primary_key);
seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n",
seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n",
tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
read_unlock(&n->lock);
}

View file

@ -3263,9 +3263,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
}
if (idev) {
if (idev->if_flags & IF_READY)
/* device is already configured. */
if (idev->if_flags & IF_READY) {
/* device is already configured -
* but resend MLD reports, we might
* have roamed and need to update
* multicast snooping switches
*/
ipv6_mc_up(idev);
break;
}
idev->if_flags |= IF_READY;
}

View file

@ -76,18 +76,22 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
}
}
addr_type = ipv6_addr_type(&usin->sin6_addr);
if (addr_type == IPV6_ADDR_ANY) {
if (ipv6_addr_any(&usin->sin6_addr)) {
/*
* connect to self
*/
usin->sin6_addr.s6_addr[15] = 0x01;
if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
&usin->sin6_addr);
else
usin->sin6_addr = in6addr_loopback;
}
addr_type = ipv6_addr_type(&usin->sin6_addr);
daddr = &usin->sin6_addr;
if (addr_type == IPV6_ADDR_MAPPED) {
if (addr_type & IPV6_ADDR_MAPPED) {
struct sockaddr_in sin;
if (__ipv6_only_sock(sk)) {

View file

@ -1004,6 +1004,9 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
}
}
#endif
if (ipv6_addr_v4mapped(&fl6->saddr) &&
!(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr)))
return -EAFNOSUPPORT;
return 0;

View file

@ -149,8 +149,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
if (ipv6_addr_any(&usin->sin6_addr))
usin->sin6_addr.s6_addr[15] = 0x1;
if (ipv6_addr_any(&usin->sin6_addr)) {
if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
&usin->sin6_addr);
else
usin->sin6_addr = in6addr_loopback;
}
addr_type = ipv6_addr_type(&usin->sin6_addr);
@ -189,7 +194,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
* TCP over IPv4
*/
if (addr_type == IPV6_ADDR_MAPPED) {
if (addr_type & IPV6_ADDR_MAPPED) {
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;

View file

@ -1136,6 +1136,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
daddr = &sin6->sin6_addr;
if (ipv6_addr_any(daddr) &&
ipv6_addr_v4mapped(&np->saddr))
ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
daddr);
break;
case AF_INET:
goto do_udp_sendmsg;

View file

@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
sctp_assoc_t id)
{
struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
struct sctp_transport *transport;
struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
union sctp_addr *laddr = (union sctp_addr *)addr;
struct sctp_transport *transport;
if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
return NULL;
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
laddr,

View file

@ -452,6 +452,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
if (!con)
return -EINVAL;
if (!test_bit(CF_CONNECTED, &con->flags)) {
conn_put(con);
return 0;
}
e = tipc_alloc_entry(data, len);
if (!e) {
conn_put(con);
@ -465,12 +470,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
list_add_tail(&e->list, &con->outqueue);
spin_unlock_bh(&con->outqueue_lock);
if (test_bit(CF_CONNECTED, &con->flags)) {
if (!queue_work(s->send_wq, &con->swork))
conn_put(con);
} else {
if (!queue_work(s->send_wq, &con->swork))
conn_put(con);
}
return 0;
}
@ -494,7 +495,7 @@ static void tipc_send_to_sock(struct tipc_conn *con)
int ret;
spin_lock_bh(&con->outqueue_lock);
while (1) {
while (test_bit(CF_CONNECTED, &con->flags)) {
e = list_entry(con->outqueue.next, struct outqueue_entry,
list);
if ((struct list_head *) e == &con->outqueue)