Merge android-4.4.143 (7bbfac1) into msm-4.4

* refs/heads/tmp-7bbfac1
  Linux 4.4.143
  net/nfc: Avoid stalls when nfc_alloc_send_skb() returned NULL.
  rds: avoid unenecessary cong_update in loop transport
  KEYS: DNS: fix parsing multiple options
  netfilter: ebtables: reject non-bridge targets
  MIPS: Use async IPIs for arch_trigger_cpumask_backtrace()
  MIPS: Call dump_stack() from show_regs()
  rtlwifi: rtl8821ae: fix firmware is not ready to run
  net: cxgb3_main: fix potential Spectre v1
  net/mlx5: Fix command interface race in polling mode
  net_sched: blackhole: tell upper qdisc about dropped packets
  vhost_net: validate sock before trying to put its fd
  tcp: prevent bogus FRTO undos with non-SACK flows
  tcp: fix Fast Open key endianness
  r8152: napi hangup fix after disconnect
  qed: Limit msix vectors in kdump kernel to the minimum required count.
  net: sungem: fix rx checksum support
  net/mlx5: Fix incorrect raw command length parsing
  net: dccp: switch rx_tstamp_last_feedback to monotonic clock
  net: dccp: avoid crash in ccid3_hc_rx_send_feedback()
  atm: zatm: Fix potential Spectre v1
  crypto: crypto4xx - fix crypto4xx_build_pdr, crypto4xx_build_sdr leak
  crypto: crypto4xx - remove bad list_del
  bcm63xx_enet: do not write to random DMA channel on BCM6345
  bcm63xx_enet: correct clock usage
  ocfs2: subsystem.su_mutex is required while accessing the item->ci_parent
  Revert "sit: reload iphdr in ipip6_rcv"
  x86/asm: Add _ASM_ARG* constants for argument registers to <asm/asm.h>
  compiler-gcc.h: Add __attribute__((gnu_inline)) to all inline declarations
  compiler, clang: always inline when CONFIG_OPTIMIZE_INLINING is disabled
  compiler, clang: properly override 'inline' for clang
  compiler, clang: suppress warning for unused static inline functions

Change-Id: Ia4be0ff93c81aee090c38127014680460e8cc756
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
Srinivasarao P 2018-08-02 10:15:59 +05:30
commit 508ac0adfc
27 changed files with 315 additions and 86 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 142
SUBLEVEL = 143
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -633,21 +633,48 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ALMASK;
}
static DEFINE_PER_CPU(struct call_single_data, backtrace_csd);
static struct cpumask backtrace_csd_busy;
static void arch_dump_stack(void *info)
{
struct pt_regs *regs;
static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spin_lock(&lock);
regs = get_irq_regs();
if (regs)
show_regs(regs);
else
dump_stack();
arch_spin_unlock(&lock);
dump_stack();
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
}
void arch_trigger_all_cpu_backtrace(bool include_self)
{
smp_call_function(arch_dump_stack, NULL, 1);
struct call_single_data *csd;
int cpu;
for_each_cpu(cpu, cpu_online_mask) {
/*
* If we previously sent an IPI to the target CPU & it hasn't
* cleared its bit in the busy cpumask then it didn't handle
* our previous IPI & it's not safe for us to reuse the
* call_single_data_t.
*/
if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
cpu);
continue;
}
csd = &per_cpu(backtrace_csd, cpu);
csd->func = arch_dump_stack;
smp_call_function_single_async(cpu, csd);
}
}
int mips_get_process_fp_mode(struct task_struct *task)

View file

@ -345,6 +345,7 @@ static void __show_regs(const struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
{
__show_regs((struct pt_regs *)regs);
dump_stack();
}
void show_registers(struct pt_regs *regs)

View file

@ -45,6 +45,65 @@
#define _ASM_SI __ASM_REG(si)
#define _ASM_DI __ASM_REG(di)
#ifndef __x86_64__
/* 32 bit */
#define _ASM_ARG1 _ASM_AX
#define _ASM_ARG2 _ASM_DX
#define _ASM_ARG3 _ASM_CX
#define _ASM_ARG1L eax
#define _ASM_ARG2L edx
#define _ASM_ARG3L ecx
#define _ASM_ARG1W ax
#define _ASM_ARG2W dx
#define _ASM_ARG3W cx
#define _ASM_ARG1B al
#define _ASM_ARG2B dl
#define _ASM_ARG3B cl
#else
/* 64 bit */
#define _ASM_ARG1 _ASM_DI
#define _ASM_ARG2 _ASM_SI
#define _ASM_ARG3 _ASM_DX
#define _ASM_ARG4 _ASM_CX
#define _ASM_ARG5 r8
#define _ASM_ARG6 r9
#define _ASM_ARG1Q rdi
#define _ASM_ARG2Q rsi
#define _ASM_ARG3Q rdx
#define _ASM_ARG4Q rcx
#define _ASM_ARG5Q r8
#define _ASM_ARG6Q r9
#define _ASM_ARG1L edi
#define _ASM_ARG2L esi
#define _ASM_ARG3L edx
#define _ASM_ARG4L ecx
#define _ASM_ARG5L r8d
#define _ASM_ARG6L r9d
#define _ASM_ARG1W di
#define _ASM_ARG2W si
#define _ASM_ARG3W dx
#define _ASM_ARG4W cx
#define _ASM_ARG5W r8w
#define _ASM_ARG6W r9w
#define _ASM_ARG1B dil
#define _ASM_ARG2B sil
#define _ASM_ARG3B dl
#define _ASM_ARG4B cl
#define _ASM_ARG5B r8b
#define _ASM_ARG6B r9b
#endif
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE(from,to) \

View file

@ -1481,6 +1481,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
return -EFAULT;
if (pool < 0 || pool > ZATM_LAST_POOL)
return -EINVAL;
pool = array_index_nospec(pool,
ZATM_LAST_POOL + 1);
if (copy_from_user(&info,
&((struct zatm_pool_req __user *) arg)->info,
sizeof(info))) return -EFAULT;

View file

@ -207,7 +207,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
dev->pdr_pa);
return -ENOMEM;
}
memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
256 * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
@ -240,13 +240,15 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
{
if (dev->pdr != NULL)
if (dev->pdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
dev->pdr, dev->pdr_pa);
if (dev->shadow_sa_pool)
dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
if (dev->shadow_sr_pool)
dma_free_coherent(dev->core_dev->device,
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
@ -416,12 +418,12 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
{
if (dev->sdr != NULL)
if (dev->sdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_sd) * PPC4XX_NUM_SD,
dev->sdr, dev->sdr_pa);
if (dev->scatter_buffer_va != NULL)
if (dev->scatter_buffer_va)
dma_free_coherent(dev->core_dev->device,
dev->scatter_buffer_size * PPC4XX_NUM_SD,
dev->scatter_buffer_va,
@ -1029,12 +1031,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
break;
}
if (rc) {
list_del(&alg->entry);
if (rc)
kfree(alg);
} else {
else
list_add_tail(&alg->entry, &sec_dev->alg_list);
}
}
return 0;
@ -1188,7 +1188,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
rc = crypto4xx_build_gdr(core_dev->dev);
if (rc)
goto err_build_gdr;
goto err_build_pdr;
rc = crypto4xx_build_sdr(core_dev->dev);
if (rc)
@ -1230,12 +1230,11 @@ err_iomap:
err_request_irq:
irq_dispose_mapping(core_dev->irq);
tasklet_kill(&core_dev->tasklet);
crypto4xx_destroy_sdr(core_dev->dev);
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
err_build_gdr:
crypto4xx_destroy_pdr(core_dev->dev);
err_build_pdr:
crypto4xx_destroy_pdr(core_dev->dev);
kfree(core_dev->dev);
err_alloc_dev:
kfree(core_dev);

View file

@ -1063,7 +1063,8 @@ static int bcm_enet_open(struct net_device *dev)
val = enet_readl(priv, ENET_CTL_REG);
val |= ENET_CTL_ENABLE_MASK;
enet_writel(priv, val, ENET_CTL_REG);
enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
if (priv->dma_has_sram)
enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
enet_dmac_writel(priv, priv->dma_chan_en_mask,
ENETDMAC_CHANCFG, priv->rx_chan);
@ -1787,7 +1788,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
ret = PTR_ERR(priv->mac_clk);
goto out;
}
clk_prepare_enable(priv->mac_clk);
ret = clk_prepare_enable(priv->mac_clk);
if (ret)
goto out_put_clk_mac;
/* initialize default and fetch platform data */
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
@ -1819,9 +1822,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (IS_ERR(priv->phy_clk)) {
ret = PTR_ERR(priv->phy_clk);
priv->phy_clk = NULL;
goto out_put_clk_mac;
goto out_disable_clk_mac;
}
clk_prepare_enable(priv->phy_clk);
ret = clk_prepare_enable(priv->phy_clk);
if (ret)
goto out_put_clk_phy;
}
/* do minimal hardware init to be able to probe mii bus */
@ -1921,13 +1926,16 @@ out_free_mdio:
out_uninit_hw:
/* turn off mdc clock */
enet_writel(priv, 0, ENET_MIISC_REG);
if (priv->phy_clk) {
if (priv->phy_clk)
clk_disable_unprepare(priv->phy_clk);
clk_put(priv->phy_clk);
}
out_put_clk_mac:
out_put_clk_phy:
if (priv->phy_clk)
clk_put(priv->phy_clk);
out_disable_clk_mac:
clk_disable_unprepare(priv->mac_clk);
out_put_clk_mac:
clk_put(priv->mac_clk);
out:
free_netdev(dev);
@ -2772,7 +2780,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
ret = PTR_ERR(priv->mac_clk);
goto out_unmap;
}
clk_enable(priv->mac_clk);
ret = clk_prepare_enable(priv->mac_clk);
if (ret)
goto out_put_clk;
priv->rx_chan = 0;
priv->tx_chan = 1;
@ -2793,7 +2803,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
ret = register_netdev(dev);
if (ret)
goto out_put_clk;
goto out_disable_clk;
netif_carrier_off(dev);
platform_set_drvdata(pdev, dev);
@ -2802,6 +2812,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
return 0;
out_disable_clk:
clk_disable_unprepare(priv->mac_clk);
out_put_clk:
clk_put(priv->mac_clk);
@ -2833,6 +2846,9 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
clk_disable_unprepare(priv->mac_clk);
clk_put(priv->mac_clk);
free_netdev(dev);
return 0;
}

View file

@ -50,6 +50,7 @@
#include <linux/stringify.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/nospec.h>
#include <asm/uaccess.h>
#include "common.h"
@ -2256,6 +2257,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (t.qset_idx >= nqsets)
return -EINVAL;
t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
q = &adapter->params.sge.qset[q1 + t.qset_idx];
t.rspq_size = q->rspq_size;

View file

@ -643,6 +643,7 @@ static void cmd_work_handler(struct work_struct *work)
struct semaphore *sem;
unsigned long flags;
int alloc_ret;
int cmd_mode;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@ -688,6 +689,7 @@ static void cmd_work_handler(struct work_struct *work)
set_signature(ent, !cmd->checksum_disabled);
dump_command(dev, ent, 1);
ent->ts1 = ktime_get_ns();
cmd_mode = cmd->mode;
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
@ -695,7 +697,7 @@ static void cmd_work_handler(struct work_struct *work)
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
mmiowb();
/* if not in polling don't use ent after this point */
if (cmd->mode == CMD_MODE_POLLING) {
if (cmd_mode == CMD_MODE_POLLING) {
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
@ -1126,7 +1128,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
{
struct mlx5_core_dev *dev = filp->private_data;
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
char outlen_str[8];
char outlen_str[8] = {0};
int outlen;
void *ptr;
int err;
@ -1141,8 +1143,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
if (copy_from_user(outlen_str, buf, count))
return -EFAULT;
outlen_str[7] = 0;
err = sscanf(outlen_str, "%d", &outlen);
if (err < 0)
return err;

View file

@ -22,6 +22,7 @@
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/qed/qed_if.h>
#include <linux/crash_dump.h>
#include "qed.h"
#include "qed_sp.h"
@ -634,6 +635,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
/* We want a minimum of one slowpath and one fastpath vector per hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
if (is_kdump_kernel()) {
DP_INFO(cdev,
"Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
cdev->int_params.in.min_msix_cnt);
cdev->int_params.in.num_vectors =
cdev->int_params.in.min_msix_cnt;
}
rc = qed_set_int_mode(cdev, false);
if (rc) {
DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");

View file

@ -60,8 +60,7 @@
#include <linux/sungem_phy.h>
#include "sungem.h"
/* Stripping FCS is causing problems, disabled for now */
#undef STRIP_FCS
#define STRIP_FCS
#define DEFAULT_MSG (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp)
writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
(ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
writel(val, gp->regs + RXDMA_CFG);
if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
writel(((5 & RXDMA_BLANK_IPKTS) |
@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
struct net_device *dev = gp->dev;
int entry, drops, work_done = 0;
u32 done;
__sum16 csum;
if (netif_msg_rx_status(gp))
printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
skb = copy_skb;
}
csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
skb->csum = csum_unfold(csum);
skb->ip_summed = CHECKSUM_COMPLETE;
if (likely(dev->features & NETIF_F_RXCSUM)) {
__sum16 csum;
csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
skb->csum = csum_unfold(csum);
skb->ip_summed = CHECKSUM_COMPLETE;
}
skb->protocol = eth_type_trans(skb, gp->dev);
napi_gro_receive(&gp->napi, skb);
@ -1755,7 +1757,7 @@ static void gem_init_dma(struct gem *gp)
writel(0, gp->regs + TXDMA_KICK);
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
(ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
writel(val, gp->regs + RXDMA_CFG);
writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
@ -2973,8 +2975,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
/* We can do scatter/gather and HW checksum */
dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= dev->hw_features | NETIF_F_RXCSUM;
dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->features = dev->hw_features;
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;

View file

@ -3139,7 +3139,8 @@ static int rtl8152_close(struct net_device *netdev)
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&tp->pm_notifier);
#endif
napi_disable(&tp->napi);
if (!test_bit(RTL8152_UNPLUG, &tp->flags))
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);

View file

@ -135,7 +135,6 @@ found_alt:
firmware->size);
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
}
rtlpriv->rtlhal.fwsize = firmware->size;
release_firmware(firmware);
}

View file

@ -955,7 +955,8 @@ err_used:
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);
err_ubufs:
sockfd_put(sock);
if (sock)
sockfd_put(sock);
err_vq:
mutex_unlock(&vq->mutex);
err:

View file

@ -40,6 +40,9 @@ char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
"panic", /* O2NM_FENCE_PANIC */
};
static inline void o2nm_lock_subsystem(void);
static inline void o2nm_unlock_subsystem(void);
struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
{
struct o2nm_node *node = NULL;
@ -181,7 +184,10 @@ static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
{
/* through the first node_set .parent
* mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
if (node->nd_item.ci_parent)
return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
else
return NULL;
}
enum {
@ -194,7 +200,7 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
struct o2nm_cluster *cluster;
unsigned long tmp;
char *p = (char *)page;
int ret = 0;
@ -214,6 +220,13 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */
o2nm_lock_subsystem();
cluster = to_o2nm_cluster_from_node(node);
if (!cluster) {
o2nm_unlock_subsystem();
return -EINVAL;
}
write_lock(&cluster->cl_nodes_lock);
if (cluster->cl_nodes[tmp])
ret = -EEXIST;
@ -226,6 +239,8 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
set_bit(tmp, cluster->cl_nodes_bitmap);
}
write_unlock(&cluster->cl_nodes_lock);
o2nm_unlock_subsystem();
if (ret)
return ret;
@ -269,7 +284,7 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
struct o2nm_cluster *cluster;
int ret, i;
struct rb_node **p, *parent;
unsigned int octets[4];
@ -286,6 +301,13 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
}
o2nm_lock_subsystem();
cluster = to_o2nm_cluster_from_node(node);
if (!cluster) {
o2nm_unlock_subsystem();
return -EINVAL;
}
ret = 0;
write_lock(&cluster->cl_nodes_lock);
if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
@ -298,6 +320,8 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
}
write_unlock(&cluster->cl_nodes_lock);
o2nm_unlock_subsystem();
if (ret)
return ret;
@ -315,7 +339,7 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
struct o2nm_cluster *cluster;
unsigned long tmp;
char *p = (char *)page;
ssize_t ret;
@ -333,17 +357,26 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */
o2nm_lock_subsystem();
cluster = to_o2nm_cluster_from_node(node);
if (!cluster) {
ret = -EINVAL;
goto out;
}
/* the only failure case is trying to set a new local node
* when a different one is already set */
if (tmp && tmp == cluster->cl_has_local &&
cluster->cl_local_node != node->nd_num)
return -EBUSY;
cluster->cl_local_node != node->nd_num) {
ret = -EBUSY;
goto out;
}
/* bring up the rx thread if we're setting the new local node. */
if (tmp && !cluster->cl_has_local) {
ret = o2net_start_listening(node);
if (ret)
return ret;
goto out;
}
if (!tmp && cluster->cl_has_local &&
@ -358,7 +391,11 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
cluster->cl_local_node = node->nd_num;
}
return count;
ret = count;
out:
o2nm_unlock_subsystem();
return ret;
}
CONFIGFS_ATTR(o2nm_node_, num);
@ -750,6 +787,16 @@ static struct o2nm_cluster_group o2nm_cluster_group = {
},
};
static inline void o2nm_lock_subsystem(void)
{
mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
}
static inline void o2nm_unlock_subsystem(void)
{
mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
}
int o2nm_depend_item(struct config_item *item)
{
return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);

View file

@ -64,6 +64,18 @@
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#endif
/*
* Feature detection for gnu_inline (gnu89 extern inline semantics). Either
* __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
* and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
* defined so the gnu89 semantics are the default.
*/
#ifdef __GNUC_STDC_INLINE__
# define __gnu_inline __attribute__((gnu_inline))
#else
# define __gnu_inline
#endif
/*
* Force always-inline if the user requests it so via the .config,
* or if gcc is too old.
@ -71,19 +83,22 @@
* -Wunused-function. This turns out to avoid the need for complex #ifdef
* directives. Suppress the warning in clang as well by using "unused"
* function attribute, which is redundant but not harmful for gcc.
* Prefer gnu_inline, so that extern inline functions do not emit an
* externally visible function. This makes extern inline behave as per gnu89
* semantics rather than c99. This prevents multiple symbol definition errors
* of extern inline functions at link time.
* A lot of inline functions can cause havoc with function tracing.
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
#define inline inline __attribute__((always_inline,unused)) notrace
#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
#define __inline __inline __attribute__((always_inline,unused)) notrace
#define inline \
inline __attribute__((always_inline, unused)) notrace __gnu_inline
#else
/* A lot of inline functions can cause havoc with function tracing */
#define inline inline __attribute__((unused)) notrace
#define __inline__ __inline__ __attribute__((unused)) notrace
#define __inline __inline __attribute__((unused)) notrace
#define inline inline __attribute__((unused)) notrace __gnu_inline
#endif
#define __inline__ inline
#define __inline inline
#define __always_inline inline __attribute__((always_inline))
#define noinline __attribute__((noinline))

View file

@ -404,6 +404,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
if (IS_ERR(watcher))
return PTR_ERR(watcher);
if (watcher->family != NFPROTO_BRIDGE) {
module_put(watcher->me);
return -ENOENT;
}
w->u.watcher = watcher;
par->target = watcher;
@ -724,6 +730,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
goto cleanup_watchers;
}
/* Reject UNSPEC, xtables verdicts/return values are incompatible */
if (target->family != NFPROTO_BRIDGE) {
module_put(target->me);
ret = -ENOENT;
goto cleanup_watchers;
}
t->u.target = target;
if (t->u.target == &ebt_standard_target) {
if (gap < sizeof(struct ebt_standard_target)) {

View file

@ -599,7 +599,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
ktime_t now = ktime_get_real();
ktime_t now = ktime_get();
s64 delta = 0;
switch (fbtype) {
@ -624,15 +624,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
case CCID3_FBACK_PERIODIC:
delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
if (delta <= 0)
DCCP_BUG("delta (%ld) <= 0", (long)delta);
else
hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
delta = 1;
hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
break;
default:
return;
}
ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
hc->rx_x_recv, hc->rx_pinv);
hc->rx_tstamp_last_feedback = now;
@ -679,7 +678,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
static u32 ccid3_first_li(struct sock *sk)
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
u32 x_recv, p, delta;
u32 x_recv, p;
s64 delta;
u64 fval;
if (hc->rx_rtt == 0) {
@ -687,7 +687,9 @@ static u32 ccid3_first_li(struct sock *sk)
hc->rx_rtt = DCCP_FALLBACK_RTT;
}
delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
if (delta <= 0)
delta = 1;
x_recv = scaled_div32(hc->rx_bytes_recv, delta);
if (x_recv == 0) { /* would also trigger divide-by-zero */
DCCP_WARN("X_recv==0\n");

View file

@ -87,35 +87,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
opt++;
kdebug("options: '%s'", opt);
do {
int opt_len, opt_nlen;
const char *eq;
int opt_len, opt_nlen, opt_vlen, tmp;
char optval[128];
next_opt = memchr(opt, '#', end - opt) ?: end;
opt_len = next_opt - opt;
if (opt_len <= 0 || opt_len > 128) {
if (opt_len <= 0 || opt_len > sizeof(optval)) {
pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
opt_len);
return -EINVAL;
}
eq = memchr(opt, '=', opt_len) ?: end;
opt_nlen = eq - opt;
eq++;
opt_vlen = next_opt - eq; /* will be -1 if no value */
eq = memchr(opt, '=', opt_len);
if (eq) {
opt_nlen = eq - opt;
eq++;
memcpy(optval, eq, next_opt - eq);
optval[next_opt - eq] = '\0';
} else {
opt_nlen = opt_len;
optval[0] = '\0';
}
tmp = opt_vlen >= 0 ? opt_vlen : 0;
kdebug("option '%*.*s' val '%*.*s'",
opt_nlen, opt_nlen, opt, tmp, tmp, eq);
kdebug("option '%*.*s' val '%s'",
opt_nlen, opt_nlen, opt, optval);
/* see if it's an error number representing a DNS error
* that's to be recorded as the result in this key */
if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
kdebug("dns error number option");
if (opt_vlen <= 0)
goto bad_option_value;
ret = kstrtoul(eq, 10, &derrno);
ret = kstrtoul(optval, 10, &derrno);
if (ret < 0)
goto bad_option_value;

View file

@ -232,8 +232,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
{
struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
struct tcp_fastopen_context *ctxt;
int ret;
u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
__le32 key[4];
int ret, i;
tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
if (!tbl.data)
@ -242,11 +243,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
rcu_read_lock();
ctxt = rcu_dereference(tcp_fastopen_ctx);
if (ctxt)
memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
else
memset(user_key, 0, sizeof(user_key));
memset(key, 0, sizeof(key));
rcu_read_unlock();
for (i = 0; i < ARRAY_SIZE(key); i++)
user_key[i] = le32_to_cpu(key[i]);
snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
user_key[0], user_key[1], user_key[2], user_key[3]);
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
@ -262,12 +266,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
* first invocation of tcp_fastopen_cookie_gen
*/
tcp_fastopen_init_key_once(false);
tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
for (i = 0; i < ARRAY_SIZE(user_key); i++)
key[i] = cpu_to_le32(user_key[i]);
tcp_fastopen_reset_cipher(key, TCP_FASTOPEN_KEY_LENGTH);
}
bad_key:
pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
user_key[0], user_key[1], user_key[2], user_key[3],
user_key[0], user_key[1], user_key[2], user_key[3],
(char *)tbl.data, ret);
kfree(tbl.data);
return ret;

View file

@ -3219,6 +3219,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (tcp_is_reno(tp)) {
tcp_remove_reno_sacks(sk, pkts_acked);
/* If any of the cumulatively ACKed segments was
* retransmitted, non-SACK case cannot confirm that
* progress was due to original transmission due to
* lack of TCPCB_SACKED_ACKED bits even if some of
* the packets may have been never retransmitted.
*/
if (flag & FLAG_RETRANS_DATA_ACKED)
flag &= ~FLAG_ORIG_SACK_ACKED;
} else {
int delta;

View file

@ -692,7 +692,6 @@ static int ipip6_rcv(struct sk_buff *skb)
if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6)))
goto out;
iph = ip_hdr(skb);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {

View file

@ -754,11 +754,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
pr_debug("Fragment %zd bytes remaining %zd",
frag_len, remaining_len);
pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
frag_len + LLCP_HEADER_SIZE, &err);
if (pdu == NULL) {
pr_err("Could not allocate PDU\n");
continue;
pr_err("Could not allocate PDU (error=%d)\n", err);
len -= remaining_len;
if (len == 0)
len = err;
break;
}
pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);

View file

@ -191,4 +191,5 @@ struct rds_transport rds_loop_transport = {
.inc_copy_to_user = rds_message_inc_copy_to_user,
.inc_free = rds_loop_inc_free,
.t_name = "loopback",
.t_type = RDS_TRANS_LOOP,
};

View file

@ -401,6 +401,11 @@ struct rds_notifier {
int n_status;
};
/* Available as part of RDS core, so doesn't need to participate
* in get_preferred transport etc
*/
#define RDS_TRANS_LOOP 3
/**
* struct rds_transport - transport specific behavioural hooks
*

View file

@ -76,6 +76,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
return;
rs->rs_rcv_bytes += delta;
/* loop transport doesn't send/recv congestion updates */
if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
return;
now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "

View file

@ -20,7 +20,7 @@
static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
qdisc_drop(skb, sch);
return NET_XMIT_SUCCESS;
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)